nal });
if (!response.ok) {
throw new NetworkError(response.status, await this.extractErrorBody(response));
}
return (await response.json()) as T;
} finally {
clearTimeout(timer);
}
}
private async extractErrorBody(response: Response): Promise<Record<string, unknown>> {
try {
return await response.json();
} catch {
return { message: response.statusText };
}
}
}
**Architecture Rationale:** The `finally` block guarantees timer cleanup regardless of success or failure. `AbortController` is wired directly to the request, ensuring the browser drops the connection immediately on timeout. Error extraction is isolated to prevent JSON parsing failures from masking the original HTTP error.
### Step 2: Typed Error Hierarchy
Catching generic `Error` objects forces developers to parse strings or check `name` properties. A structured error hierarchy enables precise handling.
```typescript
class NetworkError extends Error {
constructor(readonly statusCode: number, readonly payload: Record<string, unknown>) {
super(`HTTP ${statusCode}`);
this.name = 'NetworkError';
}
}
class TimeoutError extends Error {
constructor(readonly durationMs: number) {
super(`Request exceeded ${durationMs}ms`);
this.name = 'TimeoutError';
}
}
Architecture Rationale: Extending Error with readonly properties preserves stack traces while attaching machine-readable context. This allows UI layers to map statusCode to user-facing messages without string matching.
Step 3: Retry Orchestration with Jitter
Linear backoff causes thundering herd problems when multiple clients retry simultaneously. Adding jitter randomizes the delay, distributing load across the server cluster.
interface RetryPolicy {
maxAttempts: number;
baseDelayMs: number;
factor: number;
retryableCodes: number[];
}
class RetryOrchestrator {
private policy: RetryPolicy;
constructor(policy: RetryPolicy) {
this.policy = policy;
}
async execute<T>(transport: HttpTransport, endpoint: string, init?: RequestInit): Promise<T> {
let lastError: Error | null = null;
for (let attempt = 0; attempt <= this.policy.maxAttempts; attempt++) {
try {
return await transport.execute<T>(endpoint, init);
} catch (err) {
lastError = err as Error;
if (err instanceof NetworkError && !this.policy.retryableCodes.includes(err.statusCode)) {
throw err; // Non-retryable client error
}
if (err instanceof TimeoutError) {
throw err; // Timeouts usually indicate network partition, not server load
}
}
if (attempt < this.policy.maxAttempts) {
const delay = this.policy.baseDelayMs * Math.pow(this.policy.factor, attempt);
const jitter = Math.random() * 500;
await new Promise(res => setTimeout(res, delay + jitter));
}
}
throw lastError!;
}
}
Architecture Rationale: The orchestrator delegates execution to the transport, intercepting failures before they bubble up. Jitter (Math.random() * 500) prevents synchronized retries. Client errors (4xx) are immediately re-thrown to avoid wasting bandwidth on invalid requests.
Step 4: In-Memory Cache with Deduplication
Simple TTL caches suffer from stampedes: identical requests fire simultaneously before the first response populates the cache. Request deduplication solves this.
interface CacheEntry<T> {
data: T;
expiresAt: number;
}
class CacheRegistry {
private store = new Map<string, CacheEntry<unknown>>();
private pending = new Map<string, Promise<unknown>>();
async get<T>(key: string, ttlMs: number, fetchFn: () => Promise<T>): Promise<T> {
const cached = this.store.get(key) as CacheEntry<T> | undefined;
if (cached && Date.now() < cached.expiresAt) {
return cached.data;
}
if (this.pending.has(key)) {
return this.pending.get(key) as Promise<T>;
}
const promise = fetchFn().then(data => {
this.store.set(key, { data, expiresAt: Date.now() + ttlMs });
this.pending.delete(key);
return data;
}).catch(err => {
this.pending.delete(key);
throw err;
});
this.pending.set(key, promise);
return promise;
}
invalidate(key: string): void {
this.store.delete(key);
this.pending.delete(key);
}
}
Architecture Rationale: The pending map holds unresolved promises. Subsequent calls for the same key receive the same promise, guaranteeing a single network request. Error handling removes the pending entry so failed requests can be retried later.
Step 5: Upload Progress Bridge
The Fetch specification deliberately excludes upload progress events for security and streaming complexity. Production apps must bridge to XMLHttpRequest while maintaining a Promise-based API.
interface UploadProgress {
percent: number;
loaded: number;
total: number;
}
function uploadWithProgress(file: File, url: string, onProgress: (p: UploadProgress) => void): Promise<Record<string, unknown>> {
return new Promise((resolve, reject) => {
const xhr = new XMLHttpRequest();
xhr.open('POST', url);
xhr.upload.onprogress = (e) => {
if (e.lengthComputable) {
onProgress({
percent: Math.round((e.loaded / e.total) * 100),
loaded: e.loaded,
total: e.total
});
}
};
xhr.onload = () => {
if (xhr.status >= 200 && xhr.status < 300) {
resolve(JSON.parse(xhr.responseText));
} else {
reject(new Error(`Upload failed: ${xhr.status}`));
}
};
xhr.onerror = () => reject(new Error('Network failure during upload'));
xhr.send(file);
});
}
Architecture Rationale: Wrapping XHR in a Promise preserves async/await ergonomics while exposing granular progress data. The bridge is isolated to upload operations; downloads and standard requests remain on fetch for better streaming support.
Step 6: File Download with Memory Management
Blob URLs consume memory until explicitly revoked. Forgetting cleanup causes gradual heap growth in long-running SPAs.
async function downloadResource(url: string, filename: string): Promise<void> {
const response = await fetch(url);
if (!response.ok) throw new Error(`Download failed: ${response.status}`);
const blob = await response.blob();
const objectUrl = URL.createObjectURL(blob);
const anchor = document.createElement('a');
anchor.href = objectUrl;
anchor.download = filename;
anchor.style.display = 'none';
document.body.appendChild(anchor);
anchor.click();
// Cleanup sequence
document.body.removeChild(anchor);
URL.revokeObjectURL(objectUrl);
}
Architecture Rationale: The anchor is appended to the DOM to satisfy browser security requirements for programmatic clicks. Immediate removal and revokeObjectURL prevent memory leaks. The blob is discarded after the click, freeing the underlying memory.
Pitfall Guide
1. Assuming Fetch Rejects on HTTP Errors
Explanation: fetch only rejects on network failures or aborted requests. A 404 or 500 resolves successfully with response.ok === false.
Fix: Always check response.ok or response.status before parsing. Wrap in a typed error class to standardize handling.
2. Memory Leaks from Blob URLs
Explanation: URL.createObjectURL allocates memory that persists until revokeObjectURL is called or the document unloads. Long-running apps accumulate orphaned blobs.
Fix: Pair every createObjectURL with a synchronous revokeObjectURL after the download trigger. Use try/finally to guarantee cleanup.
3. Thundering Herd from Linear Retries
Explanation: Multiple clients retrying at fixed intervals synchronize their requests, overwhelming the server during recovery.
Fix: Implement exponential backoff with random jitter. Add a maximum delay cap to prevent excessive wait times.
4. Cache Stampedes
Explanation: When a cache entry expires, dozens of concurrent requests fire identical network calls, wasting bandwidth and CPU.
Fix: Maintain a pending promise map. Route duplicate requests to the in-flight promise instead of spawning new fetches.
5. Orphaned Abort Signals
Explanation: Creating AbortController instances without clearing timeouts or handling AbortError leaves timers running and consumes event loop cycles.
Fix: Always clear the timeout in a finally block. Catch AbortError separately to distinguish timeouts from network failures.
6. Blocking the Main Thread with Synchronous Patterns
Explanation: Using synchronous XHR or heavy JSON parsing on the main thread causes jank, especially with large payloads.
Fix: Keep all network operations asynchronous. For massive JSON responses, consider streaming parsers or Web Workers for deserialization.
7. Over-Fetching with Unbounded Concurrency
Explanation: Promise.all fires every request simultaneously. On mobile networks or rate-limited APIs, this triggers throttling or connection drops.
Fix: Implement batched concurrency limits. Process requests in chunks using a sliding window or queue-based executor.
Production Bundle
Action Checklist
Decision Matrix
| Scenario | Recommended Approach | Why | Cost Impact |
|---|
| Simple script or one-off request | Native fetch | Minimal overhead, no abstraction needed | Low |
| Production SPA with multiple endpoints | Custom HttpPipeline | Standardized errors, caching, retries, timeouts | Medium (dev time) |
| Complex state synchronization | TanStack Query / SWR | Built-in caching, background refetch, devtools | High (learning curve) |
| Large file uploads with progress | XHR bridge + Promise | Native fetch lacks upload progress events | Low |
| High-frequency polling | Server-Sent Events or WebSockets | HTTP polling wastes bandwidth and increases latency | Medium (infra) |
Configuration Template
// network.config.ts
import { HttpTransport } from './HttpTransport';
import { RetryOrchestrator } from './RetryOrchestrator';
import { CacheRegistry } from './CacheRegistry';
export const transport = new HttpTransport({
baseURL: process.env.API_URL ?? 'https://api.production.io',
defaultHeaders: {
'Content-Type': 'application/json',
'X-Client-Version': '2.4.1',
},
timeoutMs: 6000,
});
export const retryPolicy = new RetryOrchestrator({
maxAttempts: 3,
baseDelayMs: 800,
factor: 2,
retryableCodes: [408, 429, 500, 502, 503, 504],
});
export const cache = new CacheRegistry();
// Usage wrapper
export async function apiRequest<T>(endpoint: string, init?: RequestInit, cacheTTL?: number): Promise<T> {
const fetchFn = () => retryPolicy.execute<T>(transport, endpoint, init);
if (cacheTTL && init?.method?.toUpperCase() === 'GET') {
return cache.get<T>(endpoint, cacheTTL, fetchFn);
}
return fetchFn();
}
Quick Start Guide
- Define the transport layer: Instantiate
HttpTransport with your base URL, default headers, and a timeout threshold. Ensure AbortController is wired to every request.
- Configure retry behavior: Set
RetryOrchestrator with a maximum of 3 attempts, base delay of 800ms, and a factor of 2. Include jitter to prevent synchronized retries.
- Enable caching for GET requests: Wrap read operations with
CacheRegistry.get(), specifying a TTL (e.g., 30000ms). The registry automatically deduplicates concurrent requests.
- Handle errors explicitly: Catch
NetworkError and TimeoutError separately. Map status codes to user-facing messages without parsing error strings.
- Test under degradation: Use browser DevTools network throttling to simulate 3G and offline states. Verify timeouts fire correctly, retries respect backoff, and UI remains responsive.