err) => err.message.includes('5') || err.name === 'AbortError'),
};
}
async run<T>(task: (signal: AbortSignal) => Promise<T>): Promise<T> {
const controller = new AbortController();
const deadlineTimer = setTimeout(() => controller.abort(), this.config.deadlineMs);
try {
for (let attempt = 1; attempt <= this.config.maxAttempts; attempt++) {
try {
return await task(controller.signal);
} catch (err) {
const error = err as Error;
if (attempt === this.config.maxAttempts || !this.config.isRetryable(error)) {
throw error;
}
const delay = Math.min(
this.config.baseDelayMs * Math.pow(2, attempt - 1),
this.config.maxDelayMs
);
await new Promise((res) => setTimeout(res, delay));
}
}
throw new Error('Execution exhausted without success');
} finally {
clearTimeout(deadlineTimer);
}
}
}
**Architecture Rationale:**
- `AbortController` is integrated natively, allowing downstream fetchers to cancel pending work when the deadline expires.
- Backoff is capped to prevent thundering herd scenarios during widespread outages.
- Error classification (`isRetryable`) prevents retrying non-idempotent or client-side validation errors.
### 2. Request Deduplication & In-Flight Caching
Concurrent components often request identical data. Without deduplication, you trigger redundant network calls and waste bandwidth. The following cache stores pending promises, ensuring only one request executes per key.
```typescript
interface CacheEntry<T> {
promise: Promise<T>;
createdAt: number;
}
class RequestDeduplicator<T> {
private store = new Map<string, CacheEntry<T>>();
private ttlMs: number;
constructor(ttlMs = 5000) {
this.ttlMs = ttlMs;
}
async resolve(key: string, fetcher: () => Promise<T>): Promise<T> {
const existing = this.store.get(key);
const isFresh = existing && Date.now() - existing.createdAt < this.ttlMs;
if (isFresh) return existing.promise;
const freshPromise = fetcher().finally(() => {
this.store.delete(key);
});
this.store.set(key, { promise: freshPromise, createdAt: Date.now() });
return freshPromise;
}
invalidate(key: string): void {
this.store.delete(key);
}
}
Architecture Rationale:
- Returns the exact same promise reference for concurrent identical calls, guaranteeing single execution.
.finally() ensures cache cleanup occurs regardless of success or failure, preventing memory leaks.
- TTL is evaluated on access, not on a background timer, reducing CPU overhead.
3. Concurrency Gating & Sequential Queuing
Unbounded concurrency destroys performance. Two complementary patterns solve this: a concurrency gate for parallelizable work, and a sequential queue for order-dependent mutations.
class ConcurrencyGate {
private active = new Set<Promise<unknown>>();
private limit: number;
constructor(limit: number) {
this.limit = limit;
}
async execute<T>(task: () => Promise<T>): Promise<T> {
if (this.active.size >= this.limit) {
await Promise.race(this.active);
}
const p = task().finally(() => this.active.delete(p));
this.active.add(p);
return p;
}
}
class SequentialQueue<T> {
private pending: Array<{ task: () => Promise<T>; resolve: (v: T) => void; reject: (e: Error) => void }> = [];
private running = false;
async enqueue(task: () => Promise<T>): Promise<T> {
return new Promise<T>((resolve, reject) => {
this.pending.push({ task, resolve, reject });
this.drain();
});
}
private async drain(): Promise<void> {
if (this.running || this.pending.length === 0) return;
this.running = true;
while (this.pending.length > 0) {
const { task, resolve, reject } = this.pending.shift()!;
try {
resolve(await task());
} catch (err) {
reject(err as Error);
}
}
this.running = false;
}
}
Architecture Rationale:
ConcurrencyGate uses Promise.race to yield control when the slot limit is reached, avoiding busy-waiting.
SequentialQueue guarantees FIFO execution, critical for database writes or state mutations where order determines correctness.
- Both patterns isolate task execution from scheduling logic, making them composable.
4. Deterministic Polling & Event Signaling
Long-running operations require polling, and decoupled components require event broadcasting. Both must handle lifecycle cleanup and error states explicitly.
interface PollConfig {
intervalMs: number;
maxIterations: number | null;
stopPredicate: (result: unknown) => boolean;
onError: 'abort' | 'continue' | 'returnError';
}
class IntervalPoller {
static async monitor<T>(
fetcher: () => Promise<T>,
config: PollConfig
): Promise<{ status: 'complete' | 'exhausted' | 'error'; data?: T; error?: Error; iterations: number }> {
let iterations = 0;
while (config.maxIterations === null || iterations < config.maxIterations) {
iterations++;
try {
const result = await fetcher();
if (config.stopPredicate(result)) {
return { status: 'complete', data: result, iterations };
}
} catch (err) {
if (config.onError === 'abort') throw err;
if (config.onError === 'returnError') return { status: 'error', error: err as Error, iterations };
}
await new Promise((r) => setTimeout(r, config.intervalMs));
}
return { status: 'exhausted', iterations };
}
}
class SignalBus {
private registry = new Map<string, Set<(...args: unknown[]) => void>>();
subscribe(event: string, handler: (...args: unknown[]) => void): () => void {
const handlers = this.registry.get(event) ?? new Set();
handlers.add(handler);
this.registry.set(event, handlers);
return () => this.unsubscribe(event, handler);
}
unsubscribe(event: string, handler: (...args: unknown[]) => void): void {
this.registry.get(event)?.delete(handler);
}
broadcast(event: string, ...payload: unknown[]): void {
this.registry.get(event)?.forEach((h) => h(...payload));
}
}
Architecture Rationale:
- Polling returns a structured status object, eliminating ambiguous
null/undefined returns.
SignalBus returns an unsubscribe function, enabling React useEffect cleanup patterns without manual tracking.
- Error handling in polling is explicit, preventing silent failures during long-running checks.
Pitfall Guide
| Pitfall | Explanation | Production Fix |
|---|
| Unbounded Concurrency Storms | Mapping over large arrays and firing all promises simultaneously exhausts connection pools and triggers rate limits. | Wrap execution in ConcurrencyGate or batch processing. Limit to 3-5 concurrent operations for network I/O. |
| Dangling Promise Chains | Forgetting to attach .catch() or try/catch leaves rejected promises unhandled, causing memory leaks and Node.js crashes. | Always chain .catch() or use try/catch. Enable unhandledrejection listeners in dev to catch leaks early. |
| Race Conditions on Shared State | Multiple async operations read/write the same variable without synchronization, causing corrupted state. | Use SequentialQueue for mutations, or adopt atomic state managers (Zustand, Redux Toolkit) with middleware. |
| Retry Storms on Non-Idempotent Endpoints | Retrying POST/PUT requests after network timeouts can duplicate data or charge users twice. | Classify errors: only retry 5xx/timeout. For 4xx or client errors, fail fast. Implement idempotency keys on the backend. |
| Memory Leaks in Event Listeners | Subscribing to events without cleanup accumulates handlers, degrading performance over time. | Always capture the unsubscribe function returned by .subscribe() and call it on component unmount or scope exit. |
| Blocking the Event Loop | Running heavy synchronous computation inside async chains stalls the main thread, freezing UI. | Offload CPU-bound work to Web Workers or setTimeout chunking. Keep promise chains I/O focused. |
| Ignoring Cancellation Signals | Continuing fetches after a component unmounts or user navigates away wastes bandwidth and causes state updates on dead references. | Pass AbortController.signal to all fetchers. Check signal.aborted before resolving state. |
Production Bundle
Action Checklist
Decision Matrix
| Scenario | Recommended Approach | Why | Cost Impact |
|---|
| Fetching independent dashboard widgets | Promise.all() with timeout wrapper | Maximizes parallelism; fails fast if critical data is missing | Low (standard network cost) |
| Bulk data synchronization (100+ items) | ConcurrencyGate + batch processing | Prevents rate limiting and memory spikes | Medium (slightly longer total time, but stable) |
| User form submission with validation | SequentialQueue | Guarantees order; prevents duplicate submissions | Low |
| Real-time job status tracking | IntervalPoller with stop predicate | Predictable resource usage; explicit completion states | Low |
| Decoupled micro-frontend communication | SignalBus with unsubscribe cleanup | Avoids tight coupling; safe lifecycle management | Low |
| Third-party API with known flakiness | ResilientExecutor + idempotency keys | Handles transient failures without data corruption | Medium (retry overhead) |
Configuration Template
// async-config.ts
export const ASYNC_CONFIG = {
resilience: {
maxAttempts: 3,
baseDelayMs: 800,
maxDelayMs: 5000,
deadlineMs: 15000,
isRetryable: (err: Error) =>
err.message.includes('5') || err.name === 'AbortError' || err.message.includes('timeout'),
},
cache: {
ttlMs: 8000,
maxKeys: 50, // Prevent unbounded memory growth
},
concurrency: {
networkLimit: 4,
cpuLimit: 2,
},
polling: {
intervalMs: 2000,
maxIterations: 30,
onError: 'continue' as const,
},
};
Quick Start Guide
- Initialize the control plane: Import
ResilientExecutor, RequestDeduplicator, and ConcurrencyGate. Instantiate them with your ASYNC_CONFIG values.
- Wrap network calls: Replace raw
fetch() or axios calls with executor.run(() => fetcher(signal)) and route identical keys through deduplicator.resolve().
- Gate batch operations: Map your task array to
() => gate.execute(task) and await Promise.all() on the resulting promises.
- Add observability: Attach
.then()/.catch() handlers that log execution time, attempt count, and failure reasons to your monitoring system.
- Validate under load: Use tools like
k6 or autocannon to simulate 50-100 concurrent requests. Verify that concurrency caps hold, retries back off correctly, and memory remains stable.
These patterns transform JavaScript's promise model from a reactive utility into a deterministic control system. By enforcing boundaries, classifying failures, and managing lifecycle explicitly, you build async workflows that degrade gracefully, consume resources predictably, and maintain data integrity under production stress.