Files
Pony-Alpha-2-Dataset-Training/datasets/04-architecture-design/architecture-design.jsonl
Pony Alpha 2 68453089ee feat: initial Alpha Brain 2 dataset release
Massive training corpus for AI coding models containing:
- 10 JSONL training datasets (641+ examples across coding, reasoning, planning, architecture, communication, debugging, security, workflows, error handling, UI/UX)
- 11 agent behavior specifications (explorer, planner, reviewer, debugger, executor, UI designer, Linux admin, kernel engineer, security architect, automation engineer, API architect)
- 6 skill definition files (coding, API engineering, kernel, Linux server, security architecture, server automation, UI/UX)
- Master README with project origin story and philosophy

Built by Pony Alpha 2 to help AI models learn expert-level coding approaches.
2026-03-13 16:26:29 +04:00

81 lines
107 KiB
JSON

{"requirement": "Build an e-commerce platform with product catalog, shopping cart, and order processing", "context": "Small team of 3-5 developers, expected traffic of 1000 concurrent users, tight deadline of 3 months", "architecture_decision": "Modular monolith with clear bounded contexts", "components": {"ProductCatalog": "Manages product information, categories, and inventory", "ShoppingCart": "Handles cart operations and session management", "OrderProcessing": "Processes orders and payments", "UserManagement": "Authentication and user profiles", "NotificationService": "Email and SMS notifications"}, "interfaces": {"API Gateway": "REST API with OpenAPI specification", "Module Communication": "In-process method calls with interfaces", "Database": "Single PostgreSQL database with schema separation"}, "tradeoffs": "Faster initial development, easier deployment and debugging, simpler transactions; harder to scale individual components independently, tighter coupling between modules", "anti_patterns_avoided": ["Distributed monolith with microservices overhead", "Shared database anti-pattern", "God object pattern", "Tight coupling through direct database access"], "code_example": "// Bounded Context Interface\npublic interface IProductCatalog\n{\n Task<Product> GetProductAsync(Guid productId);\n Task<IEnumerable<Product>> SearchProductsAsync(ProductSearchCriteria criteria);\n Task<bool> CheckAvailabilityAsync(Guid productId, int quantity);\n}\n\n// Module Registration with Dependency Injection\npublic static class ProductCatalogModule\n{\n public static IServiceCollection AddProductCatalog(this IServiceCollection services)\n {\n services.AddScoped<IProductCatalog, ProductCatalogService>();\n services.AddScoped<IProductRepository, ProductRepository>();\n services.AddDbContext<ProductCatalogContext>(options =>\n options.UseNpgsql(\"Name=ProductCatalog\"));\n return services;\n }\n}\n\n// Cross-cutting concern: Domain events\npublic interface IDomainEventDispatcher\n{\n Task DispatchAsync<TEvent>(TEvent @event) where TEvent : IDomainEvent;\n}\n\npublic class OrderPlacedEvent : IDomainEvent\n{\n public Guid OrderId { get; set; }\n public Guid CustomerId { get; set; }\n public decimal TotalAmount { get; set; }\n}"}
{"requirement": "Build a real-time collaborative document editing platform", "context": "Multiple users editing simultaneously, need conflict resolution, 10,000+ daily active users", "architecture_decision": "Event-driven architecture with operational transformation", "components": {"DocumentService": "Manages document storage and versioning", "TransformationEngine": "Handles conflict resolution through OT algorithms", "WebSocketManager": "Maintains real-time connections", "PresenceService": "Tracks user cursor positions and presence", "NotificationService": "Broadcasts updates to connected clients"}, "interfaces": {"WebSocket Protocol": "Custom binary protocol for efficient updates", "REST API": "Document metadata and initial fetch", "Event Bus": "Redis Pub/Sub for cross-instance communication"}, "tradeoffs": "Excellent real-time experience, natural conflict resolution, scales horizontally; increased complexity in transformation logic, eventual consistency challenges, higher memory usage", "anti_patterns_avoided": ["Last-write-wins conflict resolution", "Polling-based updates", "Monolithic WebSocket handlers", "Synchronous document locking"], "code_example": "// Operational Transformation Operation\npublic interface IOperation\n{\n string DocumentId { get; }\n int Version { get; }\n OperationType Type { get; }\n int Position { get; }\n string Content { get; }\n}\n\npublic class InsertOperation : IOperation\n{\n public string DocumentId { get; init; }\n public int Version { get; init; }\n public OperationType Type => OperationType.Insert;\n public int Position { get; init; }\n public string Content { get; init; }\n}\n\n// Transformation Service\npublic interface ITransformationService\n{\n Task<IOperation> TransformAsync(IOperation clientOp, IOperation serverOp);\n Task ApplyOperationAsync(string documentId, IOperation operation);\n}\n\npublic class OperationalTransformService : ITransformationService\n{\n public async Task<IOperation> TransformAsync(IOperation clientOp, IOperation serverOp)\n {\n // Implement transformation based on operation types\n if (clientOp.Type == OperationType.Insert && serverOp.Type == OperationType.Insert)\n {\n return await TransformInsertInsert(clientOp, serverOp);\n }\n // Handle other combinations...\n }\n \n public async Task ApplyOperationAsync(string documentId, IOperation operation)\n {\n await _documentRepository.ApplyOperationAsync(documentId, operation);\n await _eventBus.PublishAsync(new DocumentUpdatedEvent\n {\n DocumentId = documentId,\n Operation = operation\n });\n }\n}\n\n// WebSocket Handler\npublic class DocumentWebSocketHandler : WebSocketHandler\n{\n private readonly ITransformationService _transformService;\n private readonly IConnectionManager _connectionManager;\n \n public override async Task OnConnectedAsync(WebSocket socket, string documentId)\n {\n await _connectionManager.AddConnectionAsync(documentId, socket);\n await BroadcastPresenceAsync(documentId);\n }\n \n public override async Task ReceiveAsync(WebSocket socket, string documentId, string message)\n {\n var operation = JsonSerializer.Deserialize<IOperation>(message);\n var transformed = await _transformService.TransformAsync(operation, \n await _documentRepository.GetLatestOperationAsync(documentId));\n await _transformService.ApplyOperationAsync(documentId, transformed);\n }\n}"}
{"requirement": "Design a high-throughput payment processing system", "context": "Financial services with strict ACID requirements, 10,000 transactions/second, PCI-DSS compliance required", "architecture_decision": "CQRS with event sourcing for audit trail", "components": {"PaymentCommandHandler": "Handles payment processing commands", "PaymentQueryHandler": "Read-optimized queries", "EventStore": "Immutable event log", "ProjectionEngine": "Builds read models from events", "IdempotencyChecker": "Prevents duplicate transactions", "FraudDetectionService": "Real-time fraud analysis"}, "interfaces": {"Command API": "gRPC for low-latency commands", "Query API": "REST with GraphQL for flexible queries", "Event Stream": "Kafka for event distribution"}, "tradeoffs": "Perfect audit trail, scalable reads and writes independently, built-in replay capability; increased complexity, eventual consistency for queries, storage overhead for events", "anti_patterns_avoided": ["Single database for reads and writes", "Deleting transaction records", "Synchronous external API calls", "Shared state between command and query handlers"], "code_example": "// Payment Command\npublic record ProcessPaymentCommand(\n Guid PaymentId,\n decimal Amount,\n string Currency,\n string SourceAccount,\n string DestinationAccount,\n DateTime Timestamp\n) : ICommand;\n\n// Command Handler\npublic class PaymentCommandHandler : ICommandHandler<ProcessPaymentCommand>\n{\n private readonly IEventStore _eventStore;\n private readonly IIdempotencyChecker _idempotencyChecker;\n \n public async Task HandleAsync(ProcessPaymentCommand command)\n {\n // Check idempotency\n if (await _idempotencyChecker.ExistsAsync(command.PaymentId))\n {\n return;\n }\n \n // Load aggregate\n var payment = await _eventStore.LoadAsync<Payment>(command.PaymentId);\n \n // Execute domain logic\n payment.Process(command.Amount, command.SourceAccount, command.DestinationAccount);\n \n // Persist events\n await _eventStore.AppendAsync(payment.UncommittedEvents);\n \n // Publish to message bus\n await _messageBus.PublishAsync(payment.UncommittedEvents);\n \n payment.MarkEventsCommitted();\n }\n}\n\n// Payment Aggregate\npublic class Payment : AggregateRoot\n{\n public Guid Id { get; private set; }\n public decimal Amount { get; private set; }\n public PaymentStatus Status { get; private set; }\n \n public void Process(decimal amount, string source, string destination)\n {\n // Validate business rules\n if (Status != PaymentStatus.Pending)\n throw new InvalidOperationException(\"Payment already processed\");\n \n // Raise event\n RaiseEvent(new PaymentProcessedEvent\n {\n PaymentId = Id,\n Amount = amount,\n SourceAccount = source,\n DestinationAccount = destination,\n ProcessedAt = DateTime.UtcNow\n });\n }\n \n protected override void ApplyEvent(IDomainEvent @event)\n {\n switch (@event)\n {\n case PaymentProcessedEvent e:\n Status = PaymentStatus.Processed;\n Amount = e.Amount;\n break;\n }\n }\n}"}
{"requirement": "Build a scalable API gateway for microservices", "context": "50+ microservices, varying load patterns, need rate limiting and authentication", "architecture_decision": "API Gateway pattern with plugin architecture", "components": {"GatewayRouter": "Routes requests to appropriate services", "AuthenticationPlugin": "JWT validation and token refresh", "RateLimiterPlugin": "Per-IP and per-token rate limiting", "CachingLayer": "Response caching for GET requests", "MetricsCollector": "Request/response metrics", "CircuitBreaker": "Fail-fast for unhealthy services"}, "interfaces": {"Upstream Protocol": "HTTP/2 with gRPC support", "Downstream Protocol": "HTTP/1.1 and gRPC", "Plugin Interface": "Request/response pipeline hooks", "Configuration API": "Dynamic route updates"}, "tradeoffs": "Centralized cross-cutting concerns, simplified client experience, easier authentication enforcement; single point of failure, potential bottleneck, added latency", "anti_patterns_avoided": ["Tight coupling to specific services", "Business logic in gateway", "Synchronous service calls", "Hardcoded routing rules"], "code_example": "// Gateway Plugin Interface\npublic interface IGatewayPlugin\n{\n Task OnRequestAsync(GatewayContext context);\n Task OnResponseAsync(GatewayContext context);\n}\n\n// Authentication Plugin\npublic class AuthenticationPlugin : IGatewayPlugin\n{\n private readonly ITokenValidator _tokenValidator;\n private readonly ITokenRefresher _tokenRefresher;\n \n public async Task OnRequestAsync(GatewayContext context)\n {\n var token = ExtractToken(context.Request);\n \n if (string.IsNullOrEmpty(token))\n {\n context.Response = new UnauthorizedResponse();\n context.Abort();\n return;\n }\n \n var validationResult = await _tokenValidator.ValidateAsync(token);\n \n if (!validationResult.IsValid)\n {\n if (validationResult.ShouldRefresh)\n {\n var newToken = await _tokenRefresher.RefreshAsync(token);\n context.Request.Headers[\"Authorization\"] = $\"Bearer {newToken}\";\n }\n else\n {\n context.Response = new UnauthorizedResponse();\n context.Abort();\n return;\n }\n }\n \n context.User = validationResult.Principal;\n }\n \n public Task OnResponseAsync(GatewayContext context) => Task.CompletedTask;\n}\n\n// Gateway Router\npublic class GatewayRouter\n{\n private readonly IEnumerable<IGatewayPlugin> _plugins;\n private readonly IRouteTable _routeTable;\n \n public async Task HandleRequestAsync(HttpContext context)\n {\n var gatewayContext = new GatewayContext(context);\n \n // Execute plugins in order\n foreach (var plugin in _plugins)\n {\n await plugin.OnRequestAsync(gatewayContext);\n if (gatewayContext.Aborted) return;\n }\n \n // Route request\n var route = await _routeTable.MatchAsync(gatewayContext.Request);\n var response = await ForwardRequestAsync(route, gatewayContext.Request);\n \n gatewayContext.Response = response;\n \n // Execute response plugins\n foreach (var plugin in _plugins.Reverse())\n {\n await plugin.OnResponseAsync(gatewayContext);\n }\n }\n}"}
{"requirement": "Design a data pipeline for ETL operations", "context": "Processing 10TB of data daily, multiple data sources, real-time and batch processing needed", "architecture_decision": "Kappa architecture with stream processing", "components": {"DataIngestion": "Kafka for streaming data collection", "StreamProcessor": "Flink for real-time transformations", "BatchProcessor": "Spark for historical data", "DataLake": "Parquet files in S3", "MetadataStore": "Data catalog and lineage tracking", "QualityChecker": "Data quality validation"}, "interfaces": {"Ingestion API": "REST and Kafka producers", "Query Interface": "SQL on data lake", "Monitoring API": "Pipeline health and metrics"}, "tradeoffs": "Unified processing model, real-time capabilities, scales horizontally; requires stream processing expertise, higher operational complexity, potential data quality issues", "anti_patterns_avoided": ["Separate batch and streaming codebases", "Hardcoded schema dependencies", "Monolithic transformation jobs", "Ignoring late-arriving data"], "code_example": "// Stream Processing Topology\npublic class DataPipelineTopology\n{\n public static void BuildTopology(StreamExecutionEnvironment env)\n {\n // Source: Kafka topics\n var rawEvents = env.AddSource(new FlinkKafkaConsumer<Event>(\n \"raw-events\",\n new EventDeserializer(),\n GetKafkaProperties()));\n \n // Parse and validate\n var parsedEvents = rawEvents\n .Map(new EventParser())\n .Filter(new DataQualityFilter());\n \n // Transform and enrich\n var enrichedEvents = parsedEvents\n .KeyBy(e => e.UserId)\n .Connect(UserReferenceData.GetAsync())\n .FlatMap(new EnrichmentFunction());\n \n // Aggregate in windows\n var aggregated = enrichedEvents\n .Window(TumblingEventTimeWindows.Of(Time.Hours(1)))\n .Aggregate(new MetricsAggregator());\n \n // Sinks\n aggregated.AddSink(new DataLakeSink(\"aggregated-metrics\"));\n aggregated.AddSink(new MetricsSink(\"prometheus\"));\n enrichedEvents.AddSink(new AlertSink(\"critical-alerts\"));\n }\n}\n\n// Data Quality Filter\npublic class DataQualityFilter : FilterFunction<Event>\n{\n public override bool Filter(Event @event)\n {\n // Required fields\n if (string.IsNullOrEmpty(@event.Id) || \n @event.Timestamp == default ||\n @event.EventType == EventType.Unknown)\n {\n return false;\n }\n \n // Schema validation\n if (!SchemaRegistry.Validate(@event.EventType, @event.Data))\n {\n return false;\n }\n \n // Business rules\n if (@event.Value < 0 || @event.Value > MaxAllowedValue)\n {\n return false;\n }\n \n return true;\n }\n}\n\n// Exactly-once processing guarantee\npublic class ExactlyOnceProcessor\n{\n public async Task ProcessAsync(Event @event)\n {\n var transaction = await _database.BeginTransactionAsync();\n try\n {\n await _repository.SaveAsync(@event);\n await _kafkaProducer.ProduceAsync(new ProcessedEvent\n {\n EventId = @event.Id,\n ProcessedAt = DateTime.UtcNow\n });\n await transaction.CommitAsync();\n await _checkpointManager.CheckpointAsync(@event.Id);\n }\n catch\n {\n await transaction.RollbackAsync();\n throw;\n }\n }\n}"}
{"requirement": "Build a multi-tenant SaaS platform", "context": "1000+ organizations, data isolation required, varying feature sets per plan", "architecture_decision": "Shared database with tenant isolation middleware", "components": {"TenantResolver": "Identifies tenant from request", "TenantConfiguration": "Per-tenant feature flags", "DataIsolationLayer": "Row-level security", "TenantMigrations": "Database schema management", "BillingIntegration": "Usage-based billing", "TenantOnboarding": "Automated provisioning"}, "interfaces": {"Tenant Context": "Thread-local tenant storage", "Repository Pattern": "Tenant-aware data access", "Configuration API": "Dynamic tenant settings"}, "tradeoffs": "Efficient resource utilization, easier maintenance, lower costs; requires careful isolation implementation, potential noisy neighbor problem, complex migration strategy", "anti_patterns_avoided": ["Separate database per tenant for small tenants", "Cross-tenant data leakage", "Hardcoded tenant IDs", "Shared cache without tenant scoping"], "code_example": "// Tenant Resolution Middleware\npublic class TenantResolutionMiddleware\n{\n private readonly ITenantResolver _tenantResolver;\n \n public async Task InvokeAsync(HttpContext context, RequestDelegate next)\n {\n // Resolve tenant from various sources\n var tenant = await _tenantResolver.ResolveAsync(context.Request);\n \n if (tenant == null)\n {\n context.Response.StatusCode = 401;\n return;\n }\n \n // Store in context for dependency injection\n context.Items[\"Tenant\"] = tenant;\n \n // Set tenant scope for the request\n using (TenantContext.SetScope(tenant.Id))\n {\n await next(context);\n }\n }\n}\n\n// Tenant-Aware Repository\npublic class TenantAwareRepository<TEntity> : IRepository<TEntity> where TEntity : class\n{\n private readonly DbContext _context;\n private readonly string _tenantId;\n \n public TenantAwareRepository(DbContext context, ITenantContext tenantContext)\n {\n _context = context;\n _tenantId = tenantContext.TenantId;\n }\n \n public async Task<TEntity> GetByIdAsync(Guid id)\n {\n return await _context.Set<TEntity>()\n .AsNoTracking()\n .FirstOrDefaultAsync(e => EF.Property<string>(e, \"TenantId\") == _tenantId &&\n EF.Property<Guid>(e, \"Id\") == id);\n }\n \n public async Task AddAsync(TEntity entity)\n {\n _context.Entry(entity).Property(\"TenantId\").CurrentValue = _tenantId;\n await _context.Set<TEntity>().AddAsync(entity);\n }\n}\n\n// Row-Level Security with Global Query Filters\npublic class MultiTenantDbContext : DbContext\n{\n private readonly string _tenantId;\n \n public MultiTenantDbContext(ITenantContext tenantContext)\n {\n _tenantId = tenantContext.TenantId;\n }\n \n protected override void OnModelCreating(ModelBuilder modelBuilder)\n {\n // Apply global query filter for all tenant entities\n foreach (var entityType in modelBuilder.Model.GetEntityTypes())\n {\n if (entityType.ClrType.GetProperty(\"TenantId\") != null)\n {\n modelBuilder.Entity(entityType.ClrType)\n .HasQueryFilter(e => EF.Property<string>(e, \"TenantId\") == _tenantId);\n }\n }\n }\n}"}
{"requirement": "Design a caching strategy for a content delivery network", "context": "Global content distribution, 1M+ requests/second, dynamic and static content", "architecture_decision": "Multi-tier caching with cache hierarchy", "components": {"EdgeCache": "CDN edge locations for static content", "RegionalCache": "Regional cache clusters", "ApplicationCache": "In-memory cache for dynamic content", "CacheWarmer": "Proactive cache population", "InvalidationService": "Cache invalidation coordination", "MetricsCollector": "Cache hit/miss analytics"}, "interfaces": {"Cache Protocol": "Custom binary protocol", "Invalidation API": "Pub/Sub for cache updates", "Health Check": "Cache availability monitoring"}, "tradeoffs": "Reduced latency, lower database load, better scalability; cache consistency challenges, stale data risk, increased infrastructure complexity", "anti_patterns_avoided": ["Single cache tier", "Ignoring cache invalidation", "TTL as only invalidation strategy", "Cache stampede vulnerabilities"], "code_example": "// Multi-tier Cache Manager\npublic class CacheHierarchy : ICache\n{\n private readonly ICache _l1Cache; // Local memory\n private readonly ICache _l2Cache; // Redis cluster\n private readonly ICache _l3Cache; // Database/origin\n \n public async Task<T> GetAsync<T>(string key)\n {\n // L1: Local memory (fastest)\n var value = await _l1Cache.GetAsync<T>(key);\n if (value != null)\n {\n _metrics.RecordHit(CacheLevel.L1);\n return value;\n }\n \n // L2: Redis cluster (fast)\n value = await _l2Cache.GetAsync<T>(key);\n if (value != null)\n {\n await _l1Cache.SetAsync(key, value, TimeSpan.FromMinutes(5));\n _metrics.RecordHit(CacheLevel.L2);\n return value;\n }\n \n // L3: Origin (slowest)\n value = await _l3Cache.GetAsync<T>(key);\n if (value != null)\n {\n await _l2Cache.SetAsync(key, value, TimeSpan.FromHours(1));\n await _l1Cache.SetAsync(key, value, TimeSpan.FromMinutes(5));\n _metrics.RecordMiss();\n }\n \n return value;\n }\n}\n\n// Cache Stampede Prevention\npublic class StampedeProtectedCache : ICache\n{\n private readonly ICache _cache;\n private readonly IDistributedLock _lock;\n \n public async Task<T> GetOrLoadAsync<T>(string key, Func<Task<T>> loader)\n {\n var value = await _cache.GetAsync<T>(key);\n if (value != null) return value;\n \n // Acquire distributed lock to prevent stampede\n await using (await _lock.AcquireAsync(key))\n {\n // Double-check after acquiring lock\n value = await _cache.GetAsync<T>(key);\n if (value != null) return value;\n \n // Load and cache\n value = await loader();\n await _cache.SetAsync(key, value, GetExpiration(value));\n \n return value;\n }\n }\n}\n\n// Cache Invalidation Strategy\npublic class SmartInvalidation\n{\n public async Task InvalidateAsync(string pattern)\n {\n // Find all matching keys\n var keys = await _cache.SearchAsync(pattern);\n \n // Invalidate atomically\n var transaction = _cache.CreateTransaction();\n foreach (var key in keys)\n {\n transaction.DeleteAsync(key);\n }\n await transaction.ExecuteAsync();\n \n // Publish invalidation event\n await _eventBus.PublishAsync(new CacheInvalidatedEvent\n {\n Pattern = pattern,\n Keys = keys,\n Timestamp = DateTime.UtcNow\n });\n }\n}"}
{"requirement": "Design a GraphQL API for a social media platform", "context": "Complex relational data, varying client needs, mobile and web clients", "architecture_decision": "GraphQL with DataLoader and persisted queries", "components": {"GraphQLSchema": "Type definitions and resolvers", "DataLoader": "Batch loading and caching", "QueryComplexityAnalyzer": "Prevent expensive queries", "PersistedQueryStore": "Whitelist of allowed queries", "AuthContext": "Authorization in resolvers", "ErrorFormatter": "Consistent error responses"}, "interfaces": {"GraphQL Endpoint": "Single endpoint for all operations", "WebSocket": "Subscriptions for real-time updates", "Admin API": "Schema management"}, "tradeoffs": "Flexible queries, reduced over-fetching, type-safe clients; complex authorization implementation, potential for expensive queries, caching challenges", "anti_patterns_avoided": ["N+1 query problems", "Ignoring query complexity", "Direct database access in resolvers", "No rate limiting"], "code_example": "// GraphQL Schema with Authorization\npublic class QueryType : ObjectType<Query>\n{\n protected override void Configure(IObjectTypeDescriptor<Query> descriptor)\n {\n descriptor.Field(t => t.GetUser(default))\n .Type<UserType>()\n .Authorize(new[] { PolicyNames.CanViewUsers });\n \n descriptor.Field(t => t.GetPosts(default))\n .Type<NonNullType<ListType<NonNullType<PostType>>>>()\n .UsePaging<PostType>()\n .UseProjection()\n .UseFiltering()\n .UseSorting();\n }\n}\n\n// DataLoader for batch loading\npublic class UserDataLoader : DataLoaderBase<Guid, User>\n{\n private readonly IUserRepository _repository;\n \n protected override async Task<IReadOnlyDictionary<Guid, User>> LoadBatchAsync(\n IReadOnlyList<Guid> userIds)\n {\n var users = await _repository.GetByIdsAsync(userIds);\n return users.ToDictionary(u => u.Id);\n }\n}\n\n// Resolver with DataLoader\npublic class Query\n{\n public async Task<Post> GetPost(Guid id, [Service] UserDataLoader userLoader)\n {\n var post = await _repository.GetPostAsync(id);\n \n // User will be batched with other requests\n post.User = await userLoader.LoadAsync(post.AuthorId);\n \n return post;\n }\n}\n\n// Query Complexity Analysis\npublic class QueryComplexityMiddleware\n{\n private readonly int _maxComplexity;\n \n public async Task ExecuteAsync(QueryRequest request)\n {\n var complexity = await _analyzer.AnalyzeAsync(request.Query);\n \n if (complexity > _maxComplexity)\n {\n throw new QueryComplexityException(\n $\"Query complexity {complexity} exceeds maximum {_maxComplexity}\");\n }\n \n await _executor.ExecuteAsync(request);\n }\n}"}
{"requirement": "Build an authentication and authorization system", "context": "Multiple application types, third-party integrations, fine-grained permissions", "architecture_decision": "OAuth 2.0 + OpenID Connect with RBAC", "components": {"IdentityProvider": "User authentication and token issuance", "AuthorizationServer": "OAuth token management", "PermissionService": "Role and permission management", "TokenRevocation": "Token blacklist and refresh", "AuditLogger": "Security event logging", "MFAProvider": "Multi-factor authentication"}, "interfaces": {"OAuth Endpoints": "Standard OAuth 2.0 flows", "UserInfo Endpoint": "OpenID Connect user info", "Introspection": "Token validation API", "JWKS Endpoint": "Public key distribution"}, "tradeoffs": "Standard protocols, interoperable, centralized identity; complex implementation, token management overhead, dependency on external IdP", "anti_patterns_avoided": ["Custom authentication schemes", "Storing passwords in plain text", "Ignoring token revocation", "Hardcoded permission checks"], "code_example": "// OAuth Authorization Server\npublic class AuthorizationServer\n{\n public async Task<TokenResponse> AuthorizeAsync(AuthorizationRequest request)\n {\n // Validate client\n var client = await _clientStore.FindByIdAsync(request.ClientId);\n if (!client.ValidateSecret(request.ClientSecret))\n {\n throw new InvalidClientException();\n }\n \n // Validate user\n var user = await _userManager.AuthenticateAsync(\n request.Username, request.Password);\n if (user == null)\n {\n throw new InvalidGrantException();\n }\n \n // Generate tokens\n var accessToken = await _tokenGenerator.CreateAccessTokenAsync(user, client);\n var refreshToken = await _tokenGenerator.CreateRefreshTokenAsync(user);\n \n return new TokenResponse\n {\n AccessToken = accessToken,\n RefreshToken = refreshToken,\n ExpiresIn = 3600,\n TokenType = \"Bearer\"\n };\n }\n}\n\n// JWT Token Generator\npublic class JwtTokenGenerator\n{\n public async Task<string> CreateAccessTokenAsync(User user, Client client)\n {\n var claims = new List<Claim>\n {\n new Claim(JwtRegisteredClaimNames.Sub, user.Id),\n new Claim(JwtRegisteredClaimNames.Jti, Guid.NewGuid().ToString()),\n new Claim(JwtRegisteredClaimNames.Iat, DateTimeOffset.UtcNow.ToUnixTimeSeconds().ToString()),\n new Claim(\"scope\", string.Join(\" \", client.AllowedScopes))\n };\n \n // Add user permissions as claims\n var permissions = await _permissionService.GetPermissionsAsync(user);\n claims.AddRange(permissions.Select(p => new Claim(\"permission\", p)));\n \n var token = new JwtSecurityToken(\n issuer: _options.Issuer,\n audience: client.ClientId,\n claims: claims,\n expires: DateTime.UtcNow.AddHours(1),\n signingCredentials: _signingCredentials);\n \n return new JwtSecurityTokenHandler().WriteToken(token);\n }\n}\n\n// Policy-Based Authorization\npublic class PermissionAuthorizationHandler : AuthorizationHandler<PermissionRequirement>\n{\n protected override Task HandleRequirementAsync(\n AuthorizationHandlerContext context,\n PermissionRequirement requirement)\n {\n var user = context.User;\n if (user.HasClaim(c => c.Type == \"permission\" && c.Value == requirement.Permission))\n {\n context.Succeed(requirement);\n }\n \n return Task.CompletedTask;\n }\n}"}
{"requirement": "Design a message queue system for async processing", "context": "Decoupled services, varying message priorities, guaranteed delivery required", "architecture_decision": "RabbitMQ with DLQ and priority queues", "components": {"MessageProducer": "Publishes messages to queues", "MessageConsumer": "Processes messages from queues", "DeadLetterQueue": "Handles failed messages", "RetryPolicy": "Exponential backoff", "MessageRouter": "Routes based on content", "MonitoringService": "Queue health metrics"}, "interfaces": {"AMQP Protocol": "Standard messaging protocol", "Management API": "Queue and exchange management", "Monitoring Endpoint": "Queue statistics"}, "tradeoffs": "Reliable delivery, flexible routing, decoupled services; Operational complexity, message ordering challenges, infrastructure overhead", "anti_patterns_avoided": ["Ignoring message acknowledgments", "Unbounded queue sizes", "Loss of critical messages", "Tight coupling through shared queues"], "code_example": "// Message Publisher with Confirmations\npublic class MessagePublisher : IMessagePublisher\n{\n private readonly IConnection _connection;\n private readonly IModel _channel;\n \n public MessagePublisher()\n {\n var factory = new ConnectionFactory\n {\n HostName = _config.Host,\n Port = _config.Port,\n UserName = _config.Username,\n Password = _config.Password,\n VirtualHost = _config.VirtualHost\n };\n \n _connection = factory.CreateConnection();\n _channel = _connection.CreateModel();\n \n // Enable publisher confirms\n _channel.ConfirmSelect();\n \n // Declare exchange\n _channel.ExchangeDeclare(\n exchange: \"events\",\n type: ExchangeType.Topic,\n durable: true);\n }\n \n public async Task PublishAsync<T>(string routingKey, T message)\n {\n var body = JsonSerializer.SerializeToUtf8Bytes(message);\n var properties = _channel.CreateBasicProperties();\n properties.MessageId = Guid.NewGuid().ToString();\n properties.Timestamp = new AmqpTimestamp(DateTimeOffset.UtcNow.ToUnixTimeSeconds());\n properties.DeliveryMode = 2; // Persistent\n \n _channel.BasicPublish(\n exchange: \"events\",\n routingKey: routingKey,\n basicProperties: properties,\n body: body);\n \n // Wait for publisher confirmation\n await _channel.WaitForConfirmsOrDieAsync(TimeSpan.FromSeconds(5));\n }\n}\n\n// Message Consumer with Error Handling\npublic class MessageConsumer : IMessageConsumer\n{\n public async Task StartAsync<T>(string queue, string routingKey, Func<T, Task> handler)\n {\n _channel.QueueDeclare(\n queue: queue,\n durable: true,\n exclusive: false,\n autoDelete: false);\n \n _channel.QueueBind(queue, \"events\", routingKey);\n \n // Dead letter exchange\n var args = new Dictionary<string, object>\n {\n {\"x-dead-letter-exchange\", \"dlx\"},\n {\"x-dead-letter-routing-key\", queue}\n };\n \n var consumer = new EventingBasicConsumer(_channel);\n consumer.Received += async (sender, ea) =>\n {\n try\n {\n var message = JsonSerializer.Deserialize<T>(ea.Body.ToArray());\n await handler(message);\n _channel.BasicAck(ea.DeliveryTag, multiple: false);\n }\n catch (Exception ex)\n {\n _logger.LogError(ex, \"Error processing message\");\n \n // Retry with exponential backoff\n if (ea.BasicProperties.Headers.ContainsKey(\"x-retry-count\"))\n {\n var retryCount = (int)ea.BasicProperties.Headers[\"x-retry-count\"];\n if (retryCount < MaxRetries)\n {\n await RetryAsync(ea, retryCount + 1);\n return;\n }\n }\n else\n {\n await RetryAsync(ea, 1);\n return;\n }\n \n // Send to DLQ\n _channel.BasicNack(ea.DeliveryTag, multiple: false, requeue: false);\n }\n };\n \n _channel.BasicConsume(queue, autoAck: false, consumer);\n }\n}"}
{"requirement": "Build a CLI tool for developer productivity", "context": "Cross-platform, extensible command system, needs plugin architecture", "architecture_decision": "Command pattern with dependency injection and middleware pipeline", "components": {"CommandRouter": "Routes to appropriate command handler", "CommandRegistry": "Discovers and registers commands", "MiddlewarePipeline": "Pre/post processing hooks", "ConfigurationLoader": "Hierarchical config loading", "PluginLoader": "Dynamic plugin discovery", "OutputFormatter": "Multiple output formats"}, "interfaces": {"Command Interface": "Standard command contract", "Middleware Interface": "Pipeline hooks", "Plugin Interface": "Extension points"}, "tradeoffs": "Extensible, testable, maintainable; Higher initial complexity, requires DI container, learning curve", "anti_patterns_avoided": ["Switch statement dispatching", "Global state", "Hardcoded dependencies", "No error handling"], "code_example": "// Command Interface\npublic interface ICommand\n{\n string Name { get; }\n string Description { get; }\n Task<int> ExecuteAsync(CommandContext context);\n}\n\n// Command with Dependency Injection\npublic class DeployCommand : ICommand\n{\n private readonly IDeploymentService _deployment;\n private readonly IConfiguration _config;\n private readonly ILogger _logger;\n \n public string Name => \"deploy\";\n public string Description => \"Deploy application to specified environment\";\n \n public DeployCommand(IDeploymentService deployment, IConfiguration config, ILogger logger)\n {\n _deployment = deployment;\n _config = config;\n _logger = logger;\n }\n \n public async Task<int> ExecuteAsync(CommandContext context)\n {\n var environment = context.Arguments.GetValue(\"--environment\", \"dev\");\n var version = context.Arguments.GetValue(\"--version\", \"latest\");\n \n _logger.LogInformation($\"Deploying {version} to {environment}\");\n \n var result = await _deployment.DeployAsync(environment, version);\n \n return result.Success ? 0 : 1;\n }\n}\n\n// Middleware Pipeline\npublic interface ICommandMiddleware\n{\n Task<int> InvokeAsync(CommandContext context, CommandDelegate next);\n}\n\npublic class LoggingMiddleware : ICommandMiddleware\n{\n public async Task<int> InvokeAsync(CommandContext context, CommandDelegate next)\n {\n _logger.LogInformation($\"Executing: {context.CommandName}\");\n var stopwatch = Stopwatch.StartNew();\n \n try\n {\n var result = await next(context);\n stopwatch.Stop();\n _logger.LogInformation($\"Completed in {stopwatch.ElapsedMilliseconds}ms\");\n return result;\n }\n catch (Exception ex)\n {\n _logger.LogError(ex, \"Command failed\");\n return 1;\n }\n }\n}\n\n// Command Router with Middleware\npublic class CommandRouter\n{\n private readonly IEnumerable<ICommandMiddleware> _middlewares;\n private readonly ICommandRegistry _registry;\n \n public async Task<int> RouteAsync(string[] args)\n {\n var context = new CommandContext(args);\n var command = _registry.Find(context.CommandName);\n \n if (command == null)\n {\n _logger.LogError($\"Unknown command: {context.CommandName}\");\n return 1;\n }\n \n CommandDelegate pipeline = async () =>\n {\n using var scope = _scopeFactory.CreateScope();\n var handler = scope.ServiceProvider.GetRequiredService(command.GetType()) as ICommand;\n return await handler.ExecuteAsync(context);\n };\n \n // Build middleware pipeline\n foreach (var middleware in _middlewares.Reverse())\n {\n var next = pipeline;\n pipeline = async () => await middleware.InvokeAsync(context, next);\n }\n \n return await pipeline();\n }\n}"}
{"requirement": "Design error handling for a distributed system", "context": "Multiple services, network failures, partial degradation scenarios", "architecture_decision": "Circuit breaker pattern with fallback strategies", "components": {"CircuitBreaker": "Prevents cascading failures", "RetryPolicy": "Exponential backoff retry", "FallbackHandler": "Degraded functionality", "ErrorAggregator": "Error analytics and alerting", "HealthChecker": "Service health monitoring", "GracefulShutdown": "Clean service termination"}, "interfaces": {"Circuit State": "Open/Closed/Half-Open", "Fallback Delegate": "Alternative execution path", "Health Check Endpoint": "Liveness and readiness probes"}, "tradeoffs": "System resilience, graceful degradation, better UX; Added complexity, potential for stale fallback data, configuration overhead", "anti_patterns_avoided": ["Silent failures", "Cascading failures", "Infinite retries", "No monitoring"], "code_example": "// Circuit Breaker Implementation\npublic interface ICircuitBreaker\n{\n Task<T> ExecuteAsync<T>(Func<Task<T>> operation, Func<Task<T>> fallback);\n CircuitState State { get; }\n event EventHandler<CircuitStateChangedEventArgs> StateChanged;\n}\n\npublic class CircuitBreaker : ICircuitBreaker\n{\n private readonly CircuitBreakerOptions _options;\n private int _failureCount;\n private DateTime _lastFailureTime;\n private CircuitState _state = CircuitState.Closed;\n \n public async Task<T> ExecuteAsync<T>(Func<Task<T>> operation, Func<Task<T>> fallback)\n {\n if (_state == CircuitState.Open)\n {\n if (ShouldAttemptReset())\n {\n _state = CircuitState.HalfOpen;\n OnStateChanged(CircuitState.HalfOpen);\n }\n else\n {\n return await fallback();\n }\n }\n \n try\n {\n var result = await operation();\n OnSuccess();\n return result;\n }\n catch (Exception ex)\n {\n OnFailure(ex);\n \n if (_state == CircuitState.Open)\n {\n return await fallback();\n }\n \n throw;\n }\n }\n \n private void OnSuccess()\n {\n _failureCount = 0;\n if (_state == CircuitState.HalfOpen)\n {\n _state = CircuitState.Closed;\n OnStateChanged(CircuitState.Closed);\n }\n }\n \n private void OnFailure(Exception exception)\n {\n _failureCount++;\n _lastFailureTime = DateTime.UtcNow;\n \n if (_failureCount >= _options.FailureThreshold)\n {\n _state = CircuitState.Open;\n OnStateChanged(CircuitState.Open);\n }\n }\n \n private bool ShouldAttemptReset()\n {\n return DateTime.UtcNow - _lastFailureTime >= _options.OpenTimeout;\n }\n}\n\n// Resilient HTTP Client with Policies\npublic class ResilientHttpClient\n{\n private readonly IAsyncPolicy<HttpResponseMessage> _policy;\n \n public ResilientHttpClient(ICircuitBreaker circuitBreaker)\n {\n _policy = Policy\n .HandleResult<HttpResponseMessage>(r => !r.IsSuccessStatusCode)\n .Or<HttpRequestException>()\n .WaitAndRetryAsync(\n retryCount: 3,\n sleepDurationProvider: retryAttempt => \n TimeSpan.FromSeconds(Math.Pow(2, retryAttempt)),\n onRetry: (outcome, delay, retryCount, context) =>\n {\n _logger.LogWarning(\n $\"Retry {retryCount} after {delay.TotalSeconds}s due to: {outcome.Exception?.Message}\");\n })\n .WrapAsync(Policy\n .Handle<Exception>()\n .CircuitBreakerAsync(\n exceptionsAllowedBeforeBreaking: 5,\n durationOfBreak: TimeSpan.FromSeconds(30),\n onBreak: (ex, breakDelay) =>\n {\n _logger.LogError($\"Circuit broken for {breakDelay.TotalSeconds}s\");\n },\n onReset: () =>\n {\n _logger.LogInformation(\"Circuit reset\");\n }));\n }\n \n public async Task<HttpResponseMessage> GetAsync(string uri)\n {\n return await _policy.ExecuteAsync(async () =>\n {\n return await _httpClient.GetAsync(uri);\n });\n }\n}"}
{"requirement": "Design configuration management for microservices", "context": "100+ services, multiple environments, dynamic configuration updates", "architecture_decision": "Centralized configuration service with watch mechanism", "components": {"ConfigServer": "Centralized configuration storage", "ConfigWatcher": "Real-time configuration updates", "ConfigValidator": "Schema validation", "EncryptionService": "Sensitive data protection", "AuditLogger": "Configuration change tracking", "VersionControl": "Configuration history"}, "interfaces": {"REST API": "Configuration CRUD operations", "Watch API": "WebSocket for real-time updates", "Encryption": "Automatic encryption of secrets"}, "tradeoffs": "Centralized management, real-time updates, audit trail; Single point of failure, network dependency, configuration complexity", "anti_patterns_avoided": ["Hardcoded configuration", "Configuration in code", "No validation", "Secrets in plain text"], "code_example": "// Configuration Service\npublic class ConfigurationService : IConfigurationService\n{\n private readonly IConfigurationStore _store;\n private readonly IConfigValidator _validator;\n private readonly IConfigWatcher _watcher;\n private readonly IEncryptionService _encryption;\n \n public async Task<ConfigurationValue> GetAsync(string service, string key)\n {\n var config = await _store.GetAsync(service, key);\n \n // Decrypt if needed\n if (config.IsEncrypted)\n {\n config.Value = await _encryption.DecryptAsync(config.Value);\n }\n \n return config;\n }\n \n public async Task SetAsync(string service, string key, string value, bool isSecret = false)\n {\n // Validate configuration\n var schema = await _validator.GetSchemaAsync(service, key);\n if (schema != null)\n {\n var validation = schema.Validate(value);\n if (!validation.IsValid)\n {\n throw new ValidationException(validation.Errors);\n }\n }\n \n // Encrypt secrets\n var storedValue = isSecret ? await _encryption.EncryptAsync(value) : value;\n \n // Store configuration\n var config = new ConfigurationValue\n {\n Service = service,\n Key = key,\n Value = storedValue,\n IsEncrypted = isSecret,\n Version = Guid.NewGuid().ToString(),\n UpdatedAt = DateTime.UtcNow\n };\n \n await _store.SetAsync(config);\n \n // Notify watchers\n await _watcher.NotifyAsync(service, key, config);\n \n // Audit log\n await _auditLogger.LogAsync(new ConfigurationChangeEvent\n {\n Service = service,\n Key = key,\n Version = config.Version,\n ChangedBy = GetCurrentUser(),\n Timestamp = DateTime.UtcNow\n });\n }\n}\n\n// Configuration Watcher\npublic class ConfigurationWatcher : IConfigurationWatcher\n{\n private readonly ConcurrentDictionary<string, ConcurrentDictionary<string, List<Action<ConfigurationValue>>>> _watchers;\n \n public void Watch(string service, string key, Action<ConfigurationValue> onUpdate)\n {\n _watchers.GetOrAdd(service, _ => new())\n .GetOrAdd(key, _ => new())\n .Add(onUpdate);\n }\n \n public async Task NotifyAsync(string service, string key, ConfigurationValue value)\n {\n if (_watchers.TryGetValue(service, out var keys) &&\n keys.TryGetValue(key, out var callbacks))\n {\n await Task.WhenAll(callbacks.Select(callback =>\n {\n try\n {\n callback(value);\n return Task.CompletedTask;\n }\n catch (Exception ex)\n {\n _logger.LogError(ex, \"Error in configuration watcher callback\");\n return Task.CompletedTask;\n }\n }));\n }\n }\n}\n\n// Configuration Provider with Hot Reload\npublic class ConfigurationProvider : ConfigurationProvider, IConfigurationSource\n{\n private readonly IConfigurationService _service;\n private readonly string _serviceName;\n private CancellationTokenSource _reloadToken;\n \n public override void Load()\n {\n // Load initial configuration\n var config = _service.GetAsync(_serviceName, \"*\").GetAwaiter().GetResult();\n \n foreach (var item in config)\n {\n Data[$\"{_serviceName}:{item.Key}\"] = item.Value;\n }\n \n // Start watching for changes\n StartWatching();\n }\n \n private void StartWatching()\n {\n _service.Watch(_serviceName, \"*\", async (config) =>\n {\n Data[$\"{_serviceName}:{config.Key}\"] = config.Value;\n OnReload();\n });\n }\n}"}
{"requirement": "Build a mobile backend API", "context": "iOS and Android apps, offline-first, sync capabilities", "architecture_decision": "GraphQL with sync protocol and offline queue", "components": {"GraphQLServer": "Unified API for all data", "SyncEngine": "Conflict resolution and synchronization", "OfflineQueue": "Queued operations when offline", "PushNotification": "Real-time updates", "DataSync": "Bidirectional data synchronization", "VersionMigration": "API version compatibility"}, "interfaces": {"GraphQL Endpoint": "Query and mutations", "Sync Protocol": "Custom sync protocol", "Push Service": "FCM/APNS integration"}, "tradeoffs": "Unified API, offline support, real-time updates; Complexity in conflict resolution, sync protocol overhead, migration challenges", "anti_patterns_avoided": ["Separate APIs per platform", "No offline support", "Ignoring network state", "Breaking changes"], "code_example": "// Sync Protocol\npublic interface ISyncProtocol\n{\n Task<SyncResponse> SyncAsync(SyncRequest request);\n}\n\npublic class SyncProtocol : ISyncProtocol\n{\n public async Task<SyncResponse> SyncAsync(SyncRequest request)\n {\n var response = new SyncResponse();\n \n // Process client changes\n foreach (var change in request.Changes)\n {\n try\n {\n await ProcessChangeAsync(change);\n response.SuccessfulChanges.Add(change.Id);\n }\n catch (ConflictException ex)\n {\n response.Conflicts.Add(new Conflict\n {\n Id = change.Id,\n ClientVersion = change.Version,\n ServerVersion = ex.ServerVersion,\n ServerData = ex.ServerData\n });\n }\n }\n \n // Fetch server changes since last sync\n var serverChanges = await _repository.GetChangesSinceAsync(\n request.LastSyncTime,\n request.DeviceId);\n \n response.Changes = serverChanges;\n response.SyncToken = GenerateSyncToken();\n \n return response;\n }\n \n private async Task ProcessChangeAsync(DataChange change)\n {\n var current = await _repository.GetAsync(change.EntityId);\n \n if (current.Version != change.BaseVersion)\n {\n throw new ConflictException(current);\n }\n \n // Apply change\n change.Version++;\n change.SyncedAt = DateTime.UtcNow;\n await _repository.SaveAsync(change);\n \n // Broadcast to other devices\n await _pushService.SendAsync(new DataChangedNotification\n {\n EntityId = change.EntityId,\n EntityType = change.EntityType,\n Version = change.Version\n });\n }\n}\n\n// GraphQL Schema with Sync Support\npublic class MutationType : ObjectType<Mutation>\n{\n protected override void Configure(IObjectTypeDescriptor<Mutation> descriptor)\n {\n descriptor.Field(m => m.CreateTodo(default))\n .Type<TodoType>()\n .UseMutationConvention();\n \n descriptor.Field(m => m.UpdateTodo(default))\n .Type<TodoType>()\n .UseMutationConvention();\n \n descriptor.Field(m => m.DeleteTodo(default))\n .Type<BooleanType>()\n .UseMutationConvention();\n }\n}\n\n// Offline Mutation Queue\npublic class OfflineQueue\n{\n private readonly Queue<MutationRequest> _queue;\n private readonly IConnectivityService _connectivity;\n \n public void Enqueue(MutationRequest mutation)\n {\n _queue.Enqueue(mutation);\n \n if (_connectivity.IsConnected)\n {\n _ = ProcessQueueAsync();\n }\n }\n \n private async Task ProcessQueueAsync()\n {\n while (_queue.TryDequeue(out var mutation))\n {\n try\n {\n await _graphqlClient.SendMutationAsync(mutation);\n await _storage.RemoveAsync(mutation.Id);\n }\n catch (Exception ex)\n {\n _logger.LogError(ex, \"Failed to process offline mutation\");\n _queue.Enqueue(mutation); // Re-queue\n break;\n }\n }\n }\n}"}
{"requirement": "Design a repository pattern implementation", "context": "Domain-driven design, multiple data sources, testability requirements", "architecture_decision": "Repository pattern with Unit of Work and specifications", "components": {"Repository": "Generic CRUD operations", "Specification": "Encapsulated query logic", "UnitOfWork": "Transaction management", "DataContext": "Database context abstraction", "Mapping": "Entity-DTO mapping"}, "interfaces": {"IRepository": "Standard repository interface", "IUnitOfWork": "Transaction management", "ISpecification": "Composable queries"}, "tradeoffs": "Clean separation, testable, encapsulated queries; Additional abstraction layer, potential N+1 queries, learning curve", "anti_patterns_avoided": ["Direct data access in business logic", "Repository returning domain entities to API", "No transaction management", "Complex specifications"], "code_example": "// Repository Interface\npublic interface IRepository<T> where T : Entity\n{\n Task<T> GetByIdAsync(Guid id);\n Task<IEnumerable<T>> ListAsync(ISpecification<T> spec);\n Task<T> FirstOrDefaultAsync(ISpecification<T> spec);\n Task AddAsync(T entity);\n Task UpdateAsync(T entity);\n Task DeleteAsync(T entity);\n}\n\n// Generic Repository Implementation\npublic class Repository<T> : IRepository<T> where T : Entity\n{\n private readonly DbContext _context;\n private readonly ISpecificationEvaluator<T> _evaluator;\n \n public async Task<IEnumerable<T>> ListAsync(ISpecification<T> spec)\n {\n var query = _context.Set<T>().AsQueryable();\n \n query = _evaluator.GetQuery(query, spec);\n \n return await query.ToListAsync();\n }\n \n public async Task AddAsync(T entity)\n {\n await _context.Set<T>().AddAsync(entity);\n }\n}\n\n// Specification Pattern\npublic interface ISpecification<T>\n{\n Expression<Func<T, bool>> Criteria { get; }\n List<Expression<Func<T, object>>> Includes { get; }\n List<string> IncludeStrings { get; }\n Expression<Func<T, object>> OrderBy { get; }\n}\n\npublic class Specification<T> : ISpecification<T>\n{\n public Expression<Func<T, bool>> Criteria { get; private set; }\n public List<Expression<Func<T, object>>> Includes { get; } = new();\n public List<string> IncludeStrings { get; } = new();\n public Expression<Func<T, object>> OrderBy { get; private set; }\n \n public Specification<T> Where(Expression<Func<T, bool>> criteria)\n {\n Criteria = criteria;\n return this;\n }\n \n public Specification<T> Include(Expression<Func<T, object>> include)\n {\n Includes.Add(include);\n return this;\n }\n}\n\n// Domain Specification\npublic class ActiveUsersSpecification : Specification<User>\n{\n public ActiveUsersSpecification(DateTime activeSince)\n {\n Criteria = u => u.LastLoginAt >= activeSince && u.IsActive;\n Include(u => u.Profile);\n Include(u => u.Roles);\n OrderBy = u => u.LastLoginAt;\n }\n}\n\n// Usage in Service\npublic class UserService\n{\n private readonly IRepository<User> _repository;\n private readonly IUnitOfWork _unitOfWork;\n \n public async Task<IEnumerable<User>> GetActiveUsersAsync(DateTime since)\n {\n var spec = new ActiveUsersSpecification(since);\n return await _repository.ListAsync(spec);\n }\n \n public async Task UpdateUserAsync(User user)\n {\n await _repository.UpdateAsync(user);\n await _unitOfWork.SaveChangesAsync();\n }\n}"}
{"requirement": "Design dependency injection for a modular application", "context": "Plugin architecture, dynamic module loading, lifetimes management", "architecture_decision": "Container-based DI with module registration", "components": {"DIContainer": "Inversion of control container", "ModuleRegistry": "Dynamic module discovery", "ServiceLocator": "Service resolution", "LifetimeManager": "Scoped and singleton management", "ModuleInitializer": "Module startup logic"}, "interfaces": {"IServiceCollection": "Service registration", "IServiceProvider": "Service resolution", "IModule": "Module interface"}, "tradeoffs": "Loose coupling, testable, modular; Runtime overhead, complex dependencies, potential for service locator anti-pattern", "anti_patterns_avoided": ["Service locator pattern", "Static service access", "Circular dependencies", "Unmanaged lifetimes"], "code_example": "// Module Interface\npublic interface IModule\n{\n void ConfigureServices(IServiceCollection services);\n void Configure(IApplicationBuilder app);\n}\n\n// Concrete Module\npublic class UserModule : IModule\n{\n public void ConfigureServices(IServiceCollection services)\n {\n services.AddScoped<IUserService, UserService>();\n services.AddScoped<IUserRepository, UserRepository>();\n services.AddSingleton<IPasswordHasher, BCryptPasswordHasher>();\n }\n \n public void Configure(IApplicationBuilder app)\n {\n app.UseMiddleware<UserContextMiddleware>();\n }\n}\n\n// Module Registry\npublic class ModuleRegistry\n{\n private readonly List<IModule> _modules = new();\n \n public void RegisterModule<T>() where T : IModule, new()\n {\n var module = new T();\n _modules.Add(module);\n }\n \n public void ConfigureServices(IServiceCollection services)\n {\n foreach (var module in _modules)\n {\n module.ConfigureServices(services);\n }\n }\n \n public void Configure(IApplicationBuilder app)\n {\n foreach (var module in _modules)\n {\n module.Configure(app);\n }\n }\n}\n\n// Service Factory with Lifetime Management\npublic class ServiceFactory : IServiceFactory\n{\n private readonly IServiceProvider _provider;\n private readonly ConcurrentDictionary<string, Lazy<object>> _singletons;\n private readonly AsyncLocal<Dictionary<string, object>> _scoped;\n \n public T GetService<T>() where T : class\n {\n return _provider.GetRequiredService<T>();\n }\n \n public T GetRequiredService<T>() where T : class\n {\n return _provider.GetRequiredService<T>();\n }\n \n public T GetScopedService<T>() where T : class\n {\n var scopeId = GetScopeId();\n \n if (_scoped.Value?.TryGetValue(typeof(T).FullName, out var service) == true)\n {\n return (T)service;\n }\n \n var newService = _provider.GetRequiredService<T>();\n _scoped.Value ??= new();\n _scoped.Value[typeof(T).FullName] = newService;\n \n return newService;\n }\n}\n\n// Decorator Pattern with DI\npublic class CachedUserService : IUserService\n{\n private readonly IUserService _inner;\n private readonly IMemoryCache _cache;\n \n public CachedUserService(IUserService inner, IMemoryCache cache)\n {\n _inner = inner;\n _cache = cache;\n }\n \n public async Task<User> GetUserAsync(Guid id)\n {\n var cacheKey = $\"user:{id}\";\n \n return await _cache.GetOrCreateAsync(cacheKey, async entry =>\n {\n entry.AbsoluteExpirationRelativeToNow = TimeSpan.FromMinutes(5);\n return await _inner.GetUserAsync(id);\n });\n }\n \n // Other methods forwarded to _inner\n}\n\n// Decorator Registration\nservices.Decorate<IUserService, CachedUserService>();"}
{"requirement": "Design a database schema for a time-series monitoring system", "context": "Millions of metrics per second, retention policies, aggregation queries", "architecture_decision": "Time-series database with downsampling", "components": {"IngestionService": "High-throughput metric ingestion", "DownsamplingService": "Automated data rollup", "RetentionManager": "Automated data expiration", "QueryEngine": "Optimized time-range queries", "AlertingEngine": "Threshold-based alerts"}, "interfaces": {"Ingestion API": "Batch metric submission", "Query API": "Time-range aggregations", "Alert API": "Rule management"}, "tradeoffs": "Excellent write performance, efficient storage, fast queries; Specialized database, limited query flexibility, operational complexity", "anti_patterns_avoided": ["Relational database for time-series", "No downsampling", "Unbounded data growth", "Expensive full scans"], "code_example": "// Time-series Schema Design\npublic class MetricSchema\n{\n public class RawMetrics { public Guid SeriesId; public DateTime Timestamp; public double Value; }\n public class MetricRollup5m { public double Min; public double Max; public double Sum; }\n}"}
{"requirement": "Build a real-time analytics dashboard", "context": "Live metrics, multiple visualization types, 1000+ concurrent viewers", "architecture_decision": "WebSocket with server-sent events and pre-aggregation", "components": {"WebSocketManager": "Real-time connection handling", "EventAggregator": "Collects and buffers events", "PreAggregator": "Computes common aggregations", "DashboardState": "Client state synchronization"}, "interfaces": {"WebSocket Protocol": "JSON-based message format", "REST API": "Historical data queries"}, "tradeoffs": "Real-time updates, efficient bandwidth use, scalable; Complex state management, eventual consistency", "anti_patterns_avoided": ["Polling for updates", "Sending raw events", "No compression"], "code_example": "public class DashboardWebSocketManager { public async Task BroadcastMetricAsync(string metric, MetricValue value) { /* broadcast to subscribers */ } }"}
{"requirement": "Design a plugin system for a developer tool", "context": "Third-party extensions, sandboxing, version management", "architecture_decision": "Plugin architecture with isolation and lifecycle management", "components": {"PluginLoader": "Dynamic plugin discovery and loading", "PluginRegistry": "Plugin metadata and versions", "Sandbox": "Execution isolation"}, "interfaces": {"Plugin Interface": "Standard plugin contract", "API Surface": "Exposed host functionality"}, "tradeoffs": "Extensible, sandboxed, version-managed; Complex security, performance overhead", "anti_patterns_avoided": ["Unsandboxed execution", "Breaking API changes", "No version constraints"], "code_example": "public interface IPlugin { string Id { get; } string Version { get; } Task InitializeAsync(IPluginContext context); }"}
{"requirement": "Design a search service for e-commerce products", "context": "Millions of products, faceted search, autocomplete, relevance ranking", "architecture_decision": "Elasticsearch with synonym expansion and boosting", "components": {"IndexingService": "Product data synchronization", "SearchEngine": "Query parsing and execution", "RelevanceScorer": "Custom ranking algorithms"}, "interfaces": {"Search API": "Full-text and faceted search", "Autocomplete API": "Type-ahead suggestions"}, "tradeoffs": "Fast search, flexible queries, scalable; Indexing overhead, eventual consistency", "anti_patterns_avoided": ["Database LIKE queries", "No synonym handling", "Static relevance"], "code_example": "public async Task<SearchResults> SearchAsync(SearchRequest request) { var query = new MultiMatchQuery { Fields = new[] { \"name^3\", \"description^2\" } }; }"}
{"requirement": "Design a workflow orchestration system", "context": "Complex business processes, human approvals, external integrations", "architecture_decision": "State machine with saga pattern", "components": {"WorkflowEngine": "Process execution and state management", "StateStore": "Workflow state persistence", "CompensationHandler": "Rollback logic"}, "interfaces": {"Workflow Definition": "DSL for process definition", "Execution API": "Start and manage workflows"}, "tradeoffs": "Reliable execution, compensatable, auditable; Complex state management", "anti_patterns_avoided": ["Long-running transactions", "No compensation", "Ignoring failures"], "code_example": "public class OrderWorkflow { public static WorkflowDefinition Define() { return new WorkflowBuilder().Transition(WorkflowState.Started, WorkflowState.Validating); } }"}
{"requirement": "Design a session management system for web applications", "context": "Stateless servers, distributed deployment, session affinity not required", "architecture_decision": "Distributed session store with sticky session fallback", "components": {"SessionStore": "Redis-backed session storage", "SessionMiddleware": "Request/session binding", "SessionCleanup": "Expired session removal"}, "interfaces": {"Session API": "Get/set session data", "Events": "Session lifecycle events"}, "tradeoffs": "Scalable, stateless servers, shared sessions; Redis dependency, serialization overhead", "anti_patterns_avoided": ["In-memory sessions", "Session fixation vulnerabilities", "No expiration"], "code_example": "public class DistributedSession : ISession { public async Task SetAsync(string key, byte[] value) { _data[key] = value; } }"}
{"requirement": "Design an API versioning strategy", "context": "Long-lived API, breaking changes required, multiple client versions", "architecture_decision": "URL-based versioning with deprecation timeline", "components": {"VersionRouter": "Routes based on API version", "VersionValidator": "Validates version support", "DeprecationMonitor": "Tracks version usage"}, "interfaces": {"Versioned API": "Version-specific endpoints", "Negotiation": "Version selection logic"}, "tradeoffs": "Clear versioning, backward compatibility; API surface duplication, maintenance overhead", "anti_patterns_avoided": ["Breaking changes without versioning", "Unlimited versions", "No deprecation policy"], "code_example": "[ApiVersion(\"2.0\")]\n[Route(\"api/v{version:apiVersion}/[controller]\")]\npublic class ProductsV2Controller : ControllerBase { }"}
{"requirement": "Design a monitoring and alerting system", "context": "Distributed microservices, real-time alerting, metric visualization", "architecture_decision": "Time-series metrics with rule-based alerting", "components": {"MetricCollector": "Gathers metrics from services", "MetricProcessor": "Processes and aggregates metrics", "AlertEngine": "Evaluates alert rules"}, "interfaces": {"Metrics API": "Metric submission", "Alert API": "Alert rule management"}, "tradeoffs": "Comprehensive monitoring, real-time alerts; Operational complexity, alert fatigue risk", "anti_patterns_avoided": ["No baselines", "Alerting on every metric", "Missing context"], "code_example": "public class AlertEngine { public async Task EvaluateAsync(AlertRule rule) { var violations = metrics.Where(dp => IsViolation(dp.Value, rule)); } }"}
{"requirement": "Design a distributed tracing system", "context": "Microservices architecture, debugging cross-service requests", "architecture_decision": "OpenTelemetry with distributed context propagation", "components": {"Tracer": "Span creation and management", "ContextPropagator": "Cross-process context", "SpanExporter": "Send traces to backend"}, "interfaces": {"Tracing API": "Manual instrumentation", "Auto-instrumentation": "Automatic tracing"}, "tradeoffs": "Complete request visibility, debugging insights; Overhead, storage costs", "anti_patterns_avoided": ["Tracing everything", "Missing context", "No sampling"], "code_example": "using var span = _tracer.StartActiveSpan(\"CreateOrder\"); span.SetAttribute(\"order.customer_id\", request.CustomerId);"}
{"requirement": "Design a rate limiting system", "context": "API protection, fair usage, DDoS mitigation", "architecture_decision": "Token bucket with sliding window", "components": {"RateLimiter": "Core rate limiting logic", "PolicyEngine": "Rate limit policies", "DistributedCounter": "Redis-backed counters"}, "interfaces": {"Rate Limit API": "Check and consume tokens", "Policy API": "Manage rate limits"}, "tradeoffs": "Flexible policies, distributed support; Redis dependency, eventual consistency", "anti_patterns_avoided": ["Only IP-based limits", "No burst handling", "Fixed window"], "code_example": "public class TokenBucketRateLimiter : IRateLimiter { public async Task<RateLimitResult> CheckAsync(string key) { /* token bucket logic */ } }"}
{"requirement": "Build a file upload service", "context": "Large files, resumable uploads, virus scanning required", "architecture_decision": "Chunked uploads with background processing", "components": {"UploadController": "Handles chunk upload requests", "ChunkStorage": "Temporary chunk storage", "FileAssembler": "Combines chunks into final file", "VirusScanner": "Security scanning", "CDNPublisher": "Distributes to CDN"}, "interfaces": {"Upload API": "Chunk upload endpoints", "Status API": "Upload progress tracking"}, "tradeoffs": "Resumable, parallel uploads, secure scanning; Complex state management, storage overhead", "anti_patterns_avoided": ["Monolithic uploads", "No virus scanning", "Blocking operations"], "code_example": "public async Task<IActionResult> UploadChunk(UploadChunkRequest request) { await _chunkStorage.StoreAsync(request.FileId, request.ChunkNumber, request.Data); }"}
{"requirement": "Design a notification system", "context": "Multiple channels (email, SMS, push), user preferences, delivery tracking", "architecture_decision": "Message queue with provider abstraction", "components": {"NotificationService": "Queues notifications", "ProviderRouter": "Routes to appropriate provider", "TemplateEngine": "Message formatting", "DeliveryTracker": "Delivery status tracking", "PreferenceStore": "User notification settings"}, "interfaces": {"Notification API": "Send notifications", "Provider Interface": "Provider abstraction"}, "tradeoffs": "Multi-channel support, user control, trackable; Provider complexity, delivery delays", "anti_patterns_avoided": ["Tight coupling to providers", "Ignoring preferences", "No delivery tracking"], "code_example": "public interface INotificationProvider { Task<DeliveryResult> SendAsync(NotificationMessage message); }"}
{"requirement": "Build a recommendation engine", "context": "E-commerce product recommendations, real-time personalization", "architecture_decision": "Hybrid collaborative filtering with ML models", "components": {"RecommendationService": "Generates recommendations", "ModelStore": "ML model management", "FeatureStore": "User and item features", "RankingEngine": "Re-ranking logic", "CacheLayer": "Recommendation caching"}, "interfaces": {"Recommendation API": "Get recommendations", "Feedback API": "User interaction tracking"}, "tradeoffs": "Personalized, scalable, fast; Complex ML pipeline, cold start problem", "anti_patterns_avoided": ["Static recommendations", "Ignoring user feedback", "No diversity"], "code_example": "public async Task<IEnumerable<Product>> GetRecommendationsAsync(Guid userId) { var features = await _featureStore.GetFeaturesAsync(userId); }"}
{"requirement": "Design an internationalization system", "context": "Multiple languages, date/time formats, currencies", "architecture_decision": "Resource-based localization with format providers", "components": {"LocalizationService": "Translation lookup", "FormatProvider": "Date/time/currency formatting", "TranslationEditor": "Translation management UI", "ResourceCache": "Cached translations"}, "interfaces": {"Localization API": "Get localized strings", "Format API": "Format values for locale"}, "tradeoffs": "User-friendly, locale-aware, maintainable; Translation overhead, storage complexity", "anti_patterns_avoided": ["Hardcoded strings", "Ignoring RTL languages", "No format awareness"], "code_example": "public string Localize(string key, string locale = \"en\") { return _resources.GetResource(locale, key); }"}
{"requirement": "Build a logging infrastructure", "context": "Distributed systems, structured logging, log aggregation", "architecture_decision": "Structured logging with centralized aggregation", "components": {"Logger": "Structured log creation", "LogShipper": "Sends logs to aggregator", "Indexer": "Log indexing and search", "AlertAnalyzer": "Error pattern detection"}, "interfaces": {"Logging API": "Log creation", "Query API": "Log search"}, "tradeoffs": "Searchable, structured, aggregatable; Parsing overhead, storage costs", "anti_patterns_avoided": ["Unstructured logs", "Silent failures", "Missing context"], "code_example": "_logger.LogInformation(\"OrderCreated\", new { OrderId = order.Id, CustomerId = order.CustomerId });"}
{"requirement": "Design a feature flag system", "context": "Progressive rollouts, A/B testing, instant rollbacks", "architecture_decision": "Dynamic configuration with targeting rules", "components": {"FeatureFlagStore": "Flag storage", "TargetingEngine": "User targeting logic", "RolloutController": "Gradual rollout management", "MetricsCollector": "Feature usage tracking"}, "interfaces": {"Feature API": "Check feature flags", "Admin API": "Flag management"}, "tradeoffs": "Safe deployments, targeted rollouts, instant disable; Configuration complexity", "anti_patterns_avoided": ["Redeploying for flags", "No targeting", "Missing metrics"], "code_example": "if (await _featureFlags.IsEnabledAsync(\"new-checkout\", user)) { /* use new flow */ }"}
{"requirement": "Build a job scheduling system", "context": "Recurring tasks, distributed execution, fault tolerance", "architecture_decision": "Quartz-based scheduler with persistence", "components": {"Scheduler": "Job scheduling engine", "JobStore": "Job definition storage", "ExecutionPool": "Worker thread pool", "RetryHandler": "Failed job retry logic"}, "interfaces": {"Schedule API": "Manage jobs", "Job Interface": "Job execution contract"}, "tradeoffs": "Reliable execution, flexible scheduling, fault tolerant; Complexity, resource management", "anti_patterns_avoided": ["In-memory scheduling", "No retry logic", "Blocking jobs"], "code_example": "public class SyncJob : IJob { public Task Execute(IJobExecutionContext context) { /* sync logic */ } }"}
{"requirement": "Design an event sourcing implementation", "context": "Audit requirements, temporal queries, event replay", "architecture_decision": "Event store with snapshot optimization", "components": {"EventStore": "Immutable event log", "SnapshotStore": "Periodic state snapshots", "EventBus": "Event publication", "ProjectionBuilder": "Read model construction"}, "interfaces": {"Event API": "Event persistence", "Replay API": "Event replay"}, "tradeoffs": "Complete audit, temporal queries, replay capability; Storage overhead, complexity", "anti_patterns_avoided": ["Deleting events", "No snapshots", "Event mutation"], "code_example": "public async Task AppendAsync(Guid streamId, IEnumerable<IDomainEvent> events) { await _store.AppendAsync(streamId, events); }"}
{"requirement": "Build a webhooks delivery system", "context": "Third-party integrations, reliable delivery required", "architecture_decision": "Queue-based delivery with retry logic", "components": {"WebhookReceiver": "Accepts webhook registrations", "DeliveryQueue": "Message queue for webhooks", "DeliveryWorker": "Processes webhook delivery", "RetryManager": "Failed delivery retry logic"}, "interfaces": {"Webhook API": "Webhook registration", "Delivery API": "Webhook delivery"}, "tradeoffs": "Reliable delivery, asynchronous, retryable; Queue management, complexity", "anti_patterns_avoided": ["Synchronous delivery", "No retry logic", "Ignoring failures"], "code_example": "public async Task DeliverAsync(Webhook webhook, object payload) { await _httpClient.PostAsync(webhook.Url, payload); }"}
{"requirement": "Design a permission system", "context": "Fine-grained permissions, dynamic policies, inheritance", "architecture_decision": "RBAC with policy-based extensions", "components": {"RoleStore": "Role definitions", "PermissionStore": "Permission definitions", "PolicyEngine": "Policy evaluation", "PermissionCache": "Cached permissions"}, "interfaces": {"Authorization API": "Permission checks", "Policy API": "Policy management"}, "tradeoffs": "Flexible, granular, cacheable; Complex policies, evaluation overhead", "anti_patterns_avoided": ["Hardcoded permissions", "No inheritance", "Missing context"], "code_example": "public async Task<bool> CheckPermissionAsync(Guid userId, string resource, string action) { /* policy evaluation */ }"}
{"requirement": "Build a data export service", "context": "Large datasets, multiple formats, long-running exports", "architecture_decision": "Async job with streaming and chunking", "components": {"ExportJob": "Export job management", "DataStreamer": "Streamed data retrieval", "FormatConverter": "Format transformation", "ChunkWriter": "Chunked file writing"}, "interfaces": {"Export API": "Start exports", "Download API": "Retrieve exports"}, "tradeoffs": "Scalable, memory-efficient, resumable; Complex state, cleanup challenges", "anti_patterns_avoided": ["Synchronous exports", "Loading all data", "No cleanup"], "code_example": "public async Task<string> StartExportAsync(ExportRequest request) { /* create job, return ID */ }"}
{"requirement": "Design a database migration system", "context": "Multiple databases, rollback support, zero-downtime", "architecture_decision": "Versioned migrations with backward compatibility", "components": {"MigrationRunner": "Migration execution", "VersionTracker": "Migration state tracking", "RollbackHandler": "Rollback logic", "ValidationEngine": "Pre-migration validation"}, "interfaces": {"Migration API": "Apply migrations", "Rollback API": "Rollback migrations"}, "tradeoffs": "Controlled changes, reversible, tracked; Complexity, testing overhead", "anti_patterns_avoided": ["Manual schema changes", "No rollback", "Breaking changes"], "code_example": "public class AddUserEmailMigration : Migration { public override void Up() { AddColumn(\"users\", \"email\"); } }"}
{"requirement": "Build a real-time chat system", "context": "Multi-user rooms, message persistence, online presence", "architecture_decision": "WebSocket with message broker", "components": {"WebSocketHandler": "Connection management", "MessageBroker": "Message routing", "MessageStore": "Message persistence", "PresenceTracker": "User online status"}, "interfaces": {"Chat API": "Send/receive messages", "Presence API": "Online status"}, "tradeoffs": "Real-time, scalable, persistent; Connection management, complexity", "anti_patterns_avoided": ["Polling", "No persistence", "Missing acknowledgments"], "code_example": "public async Task SendMessageAsync(Guid roomId, ChatMessage message) { await _broker.PublishAsync(roomId, message); }"}
{"requirement": "Design a backup and disaster recovery system", "context": "Critical data, automated backups, point-in-time recovery", "architecture_decision": "Incremental backups with point-in-time recovery", "components": {"BackupScheduler": "Automated backup scheduling", "BackupStorage": "Secure backup storage", "RecoveryManager": "Restore orchestration", "IntegrityChecker": "Backup verification"}, "interfaces": {"Backup API": "Trigger backups", "Recovery API": "Restore data"}, "tradeoffs": "Data protection, automated recovery, verified; Storage costs, complexity", "anti_patterns_avoided": ["No testing", "Single backup copy", "No encryption"], "code_example": "public async Task<BackupResult> CreateBackupAsync(BackupRequest request) { /* backup logic */ }"}
{"requirement": "Build an API documentation system", "context": "Multiple APIs, versioning, interactive docs", "architecture_decision": "OpenAPI with dynamic generation", "components": {"SchemaGenerator": "OpenAPI schema generation", "DocumentationRenderer": "Interactive UI", "ExampleStore": "Request/response examples", "VersionManager": "API version tracking"}, "interfaces": {"Documentation API": "Fetch documentation", "Schema API": "Get OpenAPI schema"}, "tradeoffs": "Always current, interactive, standardized; Generation overhead, complexity", "anti_patterns_avoided": ["Manual docs", "Outdated examples", "No versioning"], "code_example": "public OpenApiDocument GenerateSchema() { /* OpenAPI generation */ }"}
{"requirement": "Design a content management system", "context": "Multi-tenant, versioned content, workflow support", "architecture_decision": "Headless CMS with content modeling", "components": {"ContentRepository": "Content storage", "ContentModel": "Content type definitions", "WorkflowEngine": "Approval workflows", "VersionControl": "Content versioning"}, "interfaces": {"Content API": "Content CRUD", "Workflow API": "Workflow management"}, "tradeoffs": "Flexible, versioned, workflow-capable; Complex modeling, storage overhead", "anti_patterns_avoided": ["Hardcoded schemas", "No versioning", "Direct database access"], "code_example": "public async Task<Content> CreateContentAsync(string contentType, JObject data) { /* content creation */ }"}
{"requirement": "Build a telemetry and analytics system", "context": "User behavior tracking, funnel analysis, cohort analysis", "architecture_decision": "Event-based analytics with batch processing", "components": {"EventCollector": "Event ingestion", "EventPipeline": "Data processing pipeline", "AnalyticsEngine": "Analysis computation", "ReportGenerator": "Report generation"}, "interfaces": {"Tracking API": "Event submission", "Analytics API": "Query analytics"}, "tradeoffs": "Rich insights, flexible, scalable; Processing delays, storage costs", "anti_patterns_avoided": ["PII leakage", "No validation", "Missing context"], "code_example": "public async Task TrackEventAsync(string eventName, Dictionary<string, object> properties) { /* event tracking */ }"}
{"requirement": "Design a secrets management system", "context": "Sensitive data, rotation requirements, audit trail", "architecture_decision": "Encrypted storage with rotation support", "components": {"SecretStore": "Encrypted secret storage", "RotationManager": "Secret rotation", "AuditLogger": "Access logging", "AccessPolicy": "Access control"}, "interfaces": {"Secret API": "Get/set secrets", "Rotation API": "Rotate secrets"}, "tradeoffs": "Secure, audited, rotatable; Complex access control, encryption overhead", "anti_patterns_avoided": ["Secrets in code", "No rotation", "Weak encryption"], "code_example": "public async Task<string> GetSecretAsync(string key) { return await _store.DecryptAsync(key); }"}
{"requirement": "Build a bulk import system", "context": "Large CSV/Excel files, validation, error handling", "architecture_decision": "Streaming import with batch processing", "components": {"FileParser": "File parsing and validation", "BatchProcessor": "Batched database writes", "ErrorCollector": "Error aggregation", "ProgressTracker": "Import progress"}, "interfaces": {"Import API": "Start imports", "Progress API": "Track progress"}, "tradeoffs": "Memory-efficient, resumable, validated; Complex error handling", "anti_patterns_avoided": ["Loading entire file", "Silent failures", "No validation"], "code_example": "public async Task<ImportResult> ImportAsync(Stream file, ImportMapping mapping) { /* streaming import */ }"}
{"requirement": "Design a service mesh integration", "context": "Microservices, traffic management, security", "architecture_decision": "Sidecar proxy pattern", "components": {"Proxy": "Envoy sidecar", "ControlPlane": "Configuration management", "ServiceDiscovery": "Service registration", "TrafficSplitter": "Canary deployments"}, "interfaces": {"Proxy API": "Proxy configuration", "Discovery API": "Service registration"}, "tradeoffs": "Traffic control, security, observability; Operational complexity, latency", "anti_patterns_avoided": ["Direct service-to-service", "No mutual TLS", "Missing metrics"], "code_example": "public class ServiceMeshProxy { public async Task RouteAsync(Request request) { /* proxy logic */ } }"}
{"requirement": "Build a cron job monitoring system", "context": "Scheduled jobs, failure detection, alerts", "architecture_decision": "Heartbeat-based monitoring", "components": {"HeartbeatTracker": "Job heartbeat monitoring", "FailureDetector": "Missed heartbeat detection", "AlertManager": "Alert triggering", "Dashboard": "Status visualization"}, "interfaces": {"Heartbeat API": "Heartbeat submission", "Status API": "Job status"}, "tradeoffs": "Proactive monitoring, alerting, visibility; Heartbeat overhead", "anti_patterns_avoided": ["No monitoring", "Silent failures", "Missing context"], "code_example": "public async Task RecordHeartbeatAsync(string jobId) { await _store.RecordAsync(jobId, DateTime.UtcNow); }"}
{"requirement": "Design a multi-region deployment strategy", "context": "Global user base, low latency requirements, disaster recovery", "architecture_decision": "Active-active with data replication", "components": {"TrafficRouter": "Geo-based routing", "DataReplicator": "Cross-region replication", "FailoverManager": "Region failover", "ConflictResolver": "Data conflict resolution"}, "interfaces": {"Routing API": "Traffic routing", "Replication API": "Data sync"}, "tradeoffs": "Low latency, high availability, disaster recovery; Complex consistency, higher costs", "anti_patterns_avoided": ["Single region", "No failover", "Ignoring conflicts"], "code_example": "public async Task<RouteResult> RouteAsync(Request request) { return _nearestRegion; }"}
{"requirement": "Build a performance profiling system", "context": "Production debugging, performance analysis", "architecture_decision": "Sampling profiler with flame graphs", "components": {"Profiler": "Execution sampling", "TraceCollector": "Call trace collection", "Analyzer": "Performance analysis", "Visualizer": "Flame graph generation"}, "interfaces": {"Profiling API": "Start/stop profiling", "Analysis API": "Performance reports"}, "tradeoffs": "Production-safe, detailed insights; Sampling overhead, storage", "anti_patterns_avoided": ["Profiling everything", "High overhead", "No anonymization"], "code_example": "public async Task<ProfileResult> ProfileAsync(Func<Task> operation) { /* profiling logic */ }"}
{"requirement": "Design a canary deployment system", "context": "Progressive rollouts, automated rollbacks", "architecture_decision": "Traffic-based canary with metrics validation", "components": {"TrafficSplitter": "Traffic allocation", "MetricsValidator": "Health validation", "RollbackTrigger": "Automated rollback", "ProgressController": "Canary progression"}, "interfaces": {"Canary API": "Start canary", "Metrics API": "Health metrics"}, "tradeoffs": "Safe deployments, automated rollback, gradual rollout; Complex validation, longer deployments", "anti_patterns_avoided": ["No validation", "Instant full rollout", "Ignoring metrics"], "code_example": "public async Task<CanaryResult> StartCanaryAsync(CanaryConfig config) { /* canary logic */ }"}
{"requirement": "Build a database connection pooling system", "context": "High concurrency, connection limits, performance optimization", "architecture_decision": "HikariCP-style pooling with validation", "components": {"ConnectionPool": "Connection management", "Validator": "Connection health checks", "EvictionPolicy": "Idle connection eviction", "MetricsCollector": "Pool metrics"}, "interfaces": {"Pool API": "Get/return connections", "Config API": "Pool configuration"}, "tradeoffs": "Reduced overhead, better performance, controlled; Connection limits, validation overhead", "anti_patterns_avoided": ["Unlimited connections", "No validation", "No metrics"], "code_example": "public async Task<IDbConnection> GetConnectionAsync() { return await _pool.GetAsync(); }"}
{"requirement": "Design a distributed lock system", "context": "Distributed coordination, resource contention", "architecture_decision": "Redis-based distributed locks with auto-renewal", "components": {"LockManager": "Lock acquisition/release", "RenewalService": "Lock auto-renewal", "WaitQueue": "Waiting request queue", "Monitor": "Lock state monitoring"}, "interfaces": {"Lock API": "Acquire/release locks", "Monitor API": "Lock state"}, "tradeoffs": "Distributed coordination, auto-renewal, fair queueing; Redis dependency, network latency", "anti_patterns_avoided": ["In-process locks", "No expiration", "No renewal"], "code_example": "public async Task<IDistributedLock> AcquireLockAsync(string key, TimeSpan expiration) { /* lock acquisition */ }"}
{"requirement": "Build a GraphQL subscription system", "context": "Real-time updates, filtered subscriptions", "architecture_decision": "WebSocket-based subscriptions with Redis pub/sub", "components": {"SubscriptionManager": "Subscription lifecycle", "EventBroker": "Event distribution", "FilterEngine": "Subscription filtering", "ReconnectionHandler": "Reconnection management"}, "interfaces": {"Subscription API": "Subscribe/unsubscribe", "Event API": "Event publication"}, "tradeoffs": "Real-time updates, filtered events, scalable; WebSocket complexity, state management", "anti_patterns_avoided": ["No filtering", "Unbounded events", "Missing reconnection"], "code_example": "public async Task<ISubscriptionResult> SubscribeAsync(string query, IObserver<GraphQLResult> observer) { /* subscription logic */ }"}
{"requirement": "Design a blue-green deployment system", "context": "Zero-downtime deployments, instant rollback", "architecture_decision": "Traffic switching with identical environments", "components": {"EnvironmentManager": "Environment provisioning", "TrafficSwitch": "Traffic routing", "HealthChecker": "Environment health", "RollbackTrigger": "Instant rollback"}, "interfaces": {"Deploy API": "Deploy to environment", "Switch API": "Switch traffic"}, "tradeoffs": "Zero downtime, instant rollback, safe; Double resources, complex orchestration", "anti_patterns_avoided": ["In-place upgrades", "No rollback", "Unequal environments"], "code_example": "public async Task<DeploymentResult> DeployAsync(DeploymentRequest request) { /* deployment logic */ }"}
{"requirement": "Build a log aggregation pipeline", "context": "Centralized logging, searchability, retention", "architecture_decision": "ELK stack with log shipping", "components": {"LogShipper": "Log forwarding", "LogParser": "Log parsing and structuring", "Indexer": "Elasticsearch indexing", "RetentionManager": "Log retention policies"}, "interfaces": {"Shipment API": "Log submission", "Query API": "Log search"}, "tradeoffs": "Centralized, searchable, scalable; Infrastructure complexity, parsing overhead", "anti_patterns_avoided": ["Local logs only", "No structure", "Unbounded retention"], "code_example": "public async Task ShipLogAsync(LogEntry entry) { await _indexer.IndexAsync(entry); }"}
{"requirement": "Design a API gateway rate limiting", "context": "Distributed rate limiting, fairness", "architecture_decision": "Redis token bucket with sliding window", "components": {"RateLimiter": "Rate limit enforcement", "PolicyStore": "Rate limit policies", "DistributedCounter": "Redis-backed counters", "MetricsCollector": "Usage metrics"}, "interfaces": {"Limit API": "Check/consume tokens", "Policy API": "Manage policies"}, "tradeoffs": "Distributed, fair, scalable; Redis dependency, eventual consistency", "anti_patterns_avoided": ["Local only", "No fairness", "Hardcoded limits"], "code_example": "public async Task<LimitResult> CheckLimitAsync(string key, int tokens) { /* rate limiting logic */ }"}
{"requirement": "Build a microservice communication layer", "context": "Service-to-service communication, resilience", "architecture_decision": "gRPC with service mesh", "components": {"gRPCServer": "gRPC server", "gRPCClient": "gRPC client", "ServiceDiscovery": "Service registration", "LoadBalancer": "Request distribution"}, "interfaces": {"Service API": "gRPC service definitions", "Discovery API": "Service registration"}, "tradeoffs": "High performance, type-safe, efficient; Protocol complexity, debugging challenges", "anti_patterns_avoided": ["REST for internal", "No discovery", "Hardcoded endpoints"], "code_example": "public class GreeterService : Greeter.GreeterBase { public override Task<HelloReply> SayHello(HelloRequest request) { /* gRPC logic */ } }"}
{"requirement": "Design a shadow database system", "context": "Testing with production data, performance validation", "architecture_decision": "Async replication with query shadowing", "components": {"Replicator": "Data replication", "QueryInterceptor": "Query interception", "ShadowExecutor": "Shadow query execution", "Comparator": "Result comparison"}, "interfaces": {"Shadow API": "Shadow query execution", "Comparison API": "Result comparison"}, "tradeoffs": "Safe testing, real data, performance insights; Extra load, storage costs", "anti_patterns_avoided": ["Direct production queries", "No comparison", "Missing replication"], "code_example": "public async Task<TResult> ShadowQueryAsync<TResult>(Func<Task<TResult>> query) { /* shadow execution */ }"}
{"requirement": "Build a state machine workflow engine", "context": "Complex business processes, state transitions", "architecture_decision": "State machine with event-driven transitions", "components": {"StateMachine": "State and transition definitions", "EventProcessor": "Event handling", "StateStore": "State persistence", "TransitionLogger": "Audit log"}, "interfaces": {"State API": "State management", "Event API": "Event submission"}, "tradeoffs": "Clear logic, auditable, debuggable; Complex definitions, state explosion", "anti_patterns_avoided": ["Spaghetti code", "No audit trail", "Missing states"], "code_example": "public class OrderStateMachine : StateMachine<OrderState> { public OrderStateMachine() { DefineTransition(OrderState.Created, OrderState.Validated); } }"}
{"requirement": "Design a GraphQL federation system", "context": "Multiple GraphQL services, unified schema", "architecture_decision": "Apollo Federation with composed schema", "components": {"Gateway": "Query routing", "Subgraph": "Service-specific schemas", "SchemaComposer": "Schema composition", "EntityResolver": "Cross-service entity resolution"}, "interfaces": {"Gateway API": "Unified GraphQL endpoint", "Federation API": "Subgraph schemas"}, "tradeoffs": "Unified API, service autonomy, scalable; Composition complexity, network latency", "anti_patterns_avoided": ["Monolithic schema", "Tight coupling", "Missing references"], "code_example": "[Key]\n[ExtendServiceType]\npublic class Product { } // Federation entity"}
{"requirement": "Build a distributed cache invalidation system", "context": "Multi-instance caching, consistency", "architecture_decision": "Pub/sub-based invalidation", "components": {"CacheStore": "Local cache", "InvalidationBus": "Invalidation events", "SubscriptionManager": "Event subscriptions", "ConsistencyChecker": "Consistency validation"}, "interfaces": {"Cache API": "Cache operations", "Invalidation API": "Invalidate cache"}, "tradeoffs": "Consistent, distributed, scalable; Event overhead, eventual consistency", "anti_patterns_avoided": ["No invalidation", "Local only", "Race conditions"], "code_example": "public async Task InvalidateAsync(string key) { await _bus.PublishAsync(new CacheInvalidated { Key = key }); }"}
{"requirement": "Design a service discovery system", "context": "Dynamic service instances, load balancing", "architecture_decision": "Consul-based service discovery", "components": {"ServiceRegistry": "Service registration", "HealthChecker": "Health monitoring", "DNSResolver": "Service DNS", "LoadBalancer": "Request distribution"}, "interfaces": {"Registration API": "Register services", "Discovery API": "Discover services"}, "tradeoffs": "Dynamic discovery, health-aware, self-healing; Consul dependency, network overhead", "anti_patterns_avoided": ["Hardcoded endpoints", "No health checks", "Static configuration"], "code_example": "public async Task RegisterAsync(ServiceInstance instance) { await _consul.Agent.ServiceRegister(instance); }"}
{"requirement": "Build a metric aggregation pipeline", "context": "High-frequency metrics, downsampling, retention", "architecture_decision": "Time-series aggregation with rollup", "components": {"MetricCollector": "Metric ingestion", "RollupProcessor": "Data rollup", "RetentionManager": "Data retention", "QueryEngine": "Optimized queries"}, "interfaces": {"Ingestion API": "Metric submission", "Query API": "Metric queries"}, "tradeoffs": "Scalable storage, efficient queries, managed retention; Processing overhead, complexity", "anti_patterns_avoided": ["Raw storage only", "No rollup", "Unbounded growth"], "code_example": "public async Task AggregateAsync(Metric metric) { await _rollup.ProcessAsync(metric); }"}
{"requirement": "Design a chaos engineering platform", "context": "Resilience testing, failure injection", "architecture_decision": "Controlled fault injection", "components": {"ExperimentRunner": "Chaos experiments", "FaultInjector": "Fault injection", "MetricsCollector": "Impact measurement", "RollbackManager": "Auto-rollback"}, "interfaces": {"Experiment API": "Run experiments", "Metrics API": "Impact metrics"}, "tradeoffs": "Resilience validation, controlled failures, learning; Risk, complexity", "anti_patterns_avoided": ["Uncontrolled failures", "No monitoring", "Production risks"], "code_example": "public async Task<ExperimentResult> RunExperimentAsync(ExperimentConfig config) { /* chaos experiment */ }"}
{"requirement": "Build a GraphQL query complexity analyzer", "context": "Query optimization, resource protection", "architecture_decision": "AST-based complexity calculation", "components": {"Parser": "Query parsing", "ComplexityCalculator": "Complexity scoring", "Validator": "Query validation", "Limiter": "Complexity limits"}, "interfaces": {"Analysis API": "Analyze queries", "Validation API": "Validate complexity"}, "tradeoffs": "Resource protection, optimization insights; Calculation overhead, complexity", "anti_patterns_avoided": ["No limits", "Ignoring depth", "Unbounded queries"], "code_example": "public int AnalyzeComplexity(DocumentNode query) { /* complexity calculation */ }"}
{"requirement": "Design a database sharding system", "context": "High write volume, horizontal scaling", "architecture_decision": "Consistent hashing with rebalancing", "components": {"ShardRouter": "Shard routing", "ShardManager": "Shard management", "Rebalancer": "Shard rebalancing", "MigrationTool": "Data migration"}, "interfaces": {"Router API": "Route queries", "Shard API": "Manage shards"}, "tradeoffs": "Horizontal scaling, distributed load; Query complexity, rebalancing overhead", "anti_patterns_avoided": ["Single shard", "Hot spots", "No rebalancing"], "code_example": "public async Task<IDbConnection> RouteAsync(Guid id) { var shard = _router.GetShard(id); }"}
{"requirement": "Build a container orchestration adapter", "context": "Kubernetes deployment, scaling, management", "architecture_decision": "Operator pattern with CRDs", "components": {"Operator": "Custom resource management", "Controller": "Reconciliation loop", "ResourceManager": "Resource allocation", "StatusReporter": "Status reporting"}, "interfaces": {"CRD API": "Custom resources", "Status API": "Resource status"}, "tradeoffs": "Kubernetes-native, declarative, automated; K8s dependency, complexity", "anti_patterns_avoided": ["Imperative management", "No reconciliation", "Missing status"], "code_example": "public class CustomOperator : Operator<CustomResource> { protected override async Task ReconcileAsync(CustomResource resource) { /* reconciliation */ } }"}
{"requirement": "Design a GraphQL error handling system", "context": "User-friendly errors, debugging info", "architecture_decision": "Structured errors with extensions", "components": {"ErrorFormatter": "Error formatting", "ErrorHandler": "Error catching", "ExtensionsBuilder": "Error extensions", "Reporter": "Error reporting"}, "interfaces": {"Error API": "Error handling", "Reporting API": "Error reporting"}, "tradeoffs": "User-friendly, debuggable, structured; Format complexity, overhead", "anti_patterns_avoided": ["Stack traces to users", "Generic errors", "Missing context"], "code_example": "public GraphQLError FormatError(Exception ex) { return new GraphQLError(\"User-friendly message\", extensions: new { code = \"ERROR_CODE\" }); }"}
{"requirement": "Build a feature experimentation platform", "context": "A/B testing, feature flags, analytics", "architecture_decision": "Experiment framework with analytics", "components": {"ExperimentEngine": "Experiment execution", "VariantAssigner": "Variant assignment", "MetricsCollector": "Metrics tracking", "Analyzer": "Results analysis"}, "interfaces": {"Experiment API": "Manage experiments", "Assignment API": "Get variants"}, "tradeoffs": "Data-driven decisions, flexible, measurable; Analysis complexity, overhead", "anti_patterns_avoided": ["No metrics", "Bias in assignment", "Short experiments"], "code_example": "public async Task<string> AssignVariantAsync(string experiment, string userId) { /* variant assignment */ }"}
{"requirement": "Design a service level objective system", "context": "SLO monitoring, alerting, compliance", "architecture_decision": "Time-window error budget calculation", "components": {"SLICalculator": "SLO measurement", "ErrorBudget": "Error budget tracking", "AlertManager": "Budget alerts", "Reporter": "SLO reporting"}, "interfaces": {"SLO API": "Define SLOs", "Query API": "Query SLO status"}, "tradeoffs": "Clear objectives, automated alerting, data-driven; Configuration complexity", "anti_patterns_avoided": ["No SLOs", "Wrong metrics", "No alerting"], "code_example": "public async Task<double> CalculateSLOAsync(string service, TimeSpan window) { /* SLO calculation */ }"}
{"requirement": "Build a GraphQL persisted query system", "context": "Performance optimization, security", "architecture_decision": "Query registry with hashes", "components": {"QueryRegistry": "Query storage", "HashValidator": "Query validation", "Executor": "Query execution", "Cache": "Query caching"}, "interfaces": {"Registration API": "Register queries", "Execution API": "Execute queries"}, "tradeoffs": "Secure, performant, cacheable; Registration overhead, complexity", "anti_patterns_avoided": ["Allowing all queries", "No validation", "Missing hashes"], "code_example": "public async Task<GraphQLResult> ExecutePersistedAsync(string hash, Variables variables) { /* persisted execution */ }"}
{"requirement": "Design a microservice communication pattern", "context": "Service composition, data aggregation", "architecture_decision": "GraphQL federation with entity resolution", "components": {"Gateway": "Query orchestration", "Subgraph": "Service-specific GraphQL", "EntityResolver": "Cross-service resolution", "Composer": "Response composition"}, "interfaces": {"Gateway API": "Unified endpoint", "Subgraph API": "Service schemas"}, "tradeoffs": "Unified API, service autonomy, flexible; Network latency, complexity", "anti_patterns_avoided": ["Tight coupling", "No federation", "Missing entities"], "code_example": "[ReferenceResolver]\npublic async Task<Product> ResolveProductAsync(IEnumerable<string> ids) { /* entity resolution */ }"}
{"requirement": "Build a distributed transaction coordinator", "context": "Multi-service transactions, consistency", "architecture_decision": "Saga pattern with compensation", "components": {"SagaCoordinator": "Saga execution", "StepExecutor": "Step execution", "Compensator": "Compensation logic", "StateStore": "Saga state"}, "interfaces": {"Saga API": "Execute sagas", "Compensation API": "Compensation logic"}, "tradeoffs": "Distributed transactions, compensatable, auditable; Complexity, eventual consistency", "anti_patterns_avoided": ["2PC", "No compensation", "Missing state"], "code_example": "public async Task<SagaResult> ExecuteAsync(SagaDefinition saga) { /* saga execution */ }"}
{"requirement": "Design a GraphQL subscription resolver", "context": "Real-time updates, filtered subscriptions", "architecture_decision": "WebSocket with event filtering", "components": {"SubscriptionManager": "Subscription lifecycle", "EventFilter": "Event filtering", "Publisher": "Event publishing", "ConnectionManager": "Connection management"}, "interfaces": {"Subscription API": "Subscribe/unsubscribe", "Publish API": "Publish events"}, "tradeoffs": "Real-time, filtered, scalable; State management, WebSocket complexity", "anti_patterns_avoided": ["No filtering", "Unbounded events", "Missing cleanup"], "code_example": "public async Task<ISubscription> SubscribeAsync(string query, IObserver<object> observer) { /* subscription logic */ }"}
{"requirement": "Build a GraphQL caching layer", "context": "Performance optimization, reduced database load", "architecture_decision": "Response caching with query normalization", "components": {"CacheKeyGenerator": "Query-based cache keys", "ResponseCache": "Cached responses", "InvalidationService": "Cache invalidation", "QueryNormalizer": "Query normalization"}, "interfaces": {"Cache API": "Cache operations", "Invalidation API": "Invalidation triggers"}, "tradeoffs": "Improved performance, reduced load; Cache complexity, staleness", "anti_patterns_avoided": ["Caching mutations", "No invalidation", "TTL only"], "code_example": "public async Task<GraphQLResult> ExecuteCachedAsync(string query, Variables vars) { var key = _normalizer.Normalize(query); }"}
{"requirement": "Design a database read replica system", "context": "High read volume, read scalability", "architecture_decision": "Primary-replica replication with read routing", "components": {"ReplicaManager": "Replica health tracking", "ReadRouter": "Read query routing", "ReplicationMonitor": "Replication lag monitoring", "FailoverHandler": "Replica failover"}, "interfaces": {"Router API": "Query routing", "Health API": "Replica health"}, "tradeoffs": "Read scalability, improved performance; Replication lag, complexity", "anti_patterns_avoided": ["Writes to replicas", "Ignoring lag", "No health checks"], "code_example": "public async Task<TResult> ExecuteReadAsync<TResult>(Func<Task<TResult>> query) { var replica = _router.SelectReplica(); }"}
{"requirement": "Build a GraphQL batch query executor", "context": "N+1 query prevention, batch operations", "architecture_decision": "DataLoader with batch execution", "components": {"DataLoader": "Batch loading", "BatchExecutor": "Batch query execution", "CacheManager": "Result caching", "Scheduler": "Batch scheduling"}, "interfaces": {"Loader API": "Load entities", "Batch API": "Batch execution"}, "tradeoffs": "Reduced queries, better performance; Complexity, caching challenges", "anti_patterns_avoided": ["N+1 queries", "No batching", "Missing cache"], "code_example": "public class UserDataLoader : DataLoaderBase<Guid, User> { protected override async Task<IReadOnlyDictionary<Guid, User>> LoadBatchAsync(IReadOnlyList<Guid> ids) { } }"}
{"requirement": "Design a microservice API composition", "context": "Aggregating data from multiple services", "architecture_decision": "GraphQL gateway with federation", "components": {"Gateway": "Query orchestration", "SubgraphClient": "Service clients", "Composer": "Response composition", "Cache": "Aggregated cache"}, "interfaces": {"Gateway API": "Unified endpoint", "Composition API": "Data composition"}, "tradeoffs": "Unified API, flexible; Network overhead, complexity", "anti_patterns_avoided": ["Tight coupling", "No caching", "Sequential calls"], "code_example": "public async Task<Product> GetProductAsync(Guid id) { var product = await _productClient.GetAsync(id); var reviews = await _reviewsClient.GetAsync(id); }"}
{"requirement": "Build a GraphQL schema stitching system", "context": "Combining multiple GraphQL services", "architecture_decision": "Schema stitching with type merging", "components": {"SchemaStitcher": "Schema composition", "TypeMerger": "Type merging", "DelegateResolver": "Query delegation", "TransformRegistry": "Schema transforms"}, "interfaces": {"Stitching API": "Schema stitching", "Delegation API": "Query delegation"}, "tradeoffs": "Unified schema, service autonomy; Complexity, network overhead", "anti_patterns_avoided": ["Monolithic schema", "No delegation", "Type conflicts"], "code_example": "public StitchingBuilder AddService(string name, string url) { /* add subgraph */ }"}
{"requirement": "Design a GraphQL real-time subscription system", "context": "Live updates, event-driven subscriptions", "architecture_decision": "WebSocket with event streaming", "components": {"SubscriptionManager": "Subscription lifecycle", "EventStreamer": "Event streaming", "FilterEngine": "Event filtering", "ConnectionPool": "Connection management"}, "interfaces": {"Subscription API": "Manage subscriptions", "Event API": "Event publishing"}, "tradeoffs": "Real-time updates, scalable; Connection complexity, state management", "anti_patterns_avoided": ["No filtering", "Unbounded events", "Missing cleanup"], "code_example": "public async Task<ISubscription> SubscribeAsync(SubscriptionRequest request, IObserver<GraphQLResult> observer) { /* subscription logic */ }"}