Loading source
Pulling the file list, source metadata, and syntax-aware rendering for this listing.
Source from repo
Comprehensive Cloudflare platform skill covering Workers, D1, R2, KV, AI, Durable Objects, and security.
Files
Skill
Size
Entrypoint
Format
Open file
Syntax-highlighted preview of this file as included in the skill package.
references/d1/patterns.md
1# D1 Patterns & Best Practices23## Pagination45```typescript6async function getUsers({ page, pageSize }: { page: number; pageSize: number }, env: Env) {7const offset = (page - 1) * pageSize;8const [countResult, dataResult] = await env.DB.batch([9env.DB.prepare('SELECT COUNT(*) as total FROM users'),10env.DB.prepare('SELECT * FROM users ORDER BY created_at DESC LIMIT ? OFFSET ?').bind(pageSize, offset)11]);12return { data: dataResult.results, total: countResult.results[0].total, page, pageSize, totalPages: Math.ceil(countResult.results[0].total / pageSize) };13}14```1516## Conditional Queries1718```typescript19async function searchUsers(filters: { name?: string; email?: string; active?: boolean }, env: Env) {20const conditions: string[] = [], params: (string | number | boolean | null)[] = [];21if (filters.name) { conditions.push('name LIKE ?'); params.push(`%${filters.name}%`); }22if (filters.email) { conditions.push('email = ?'); params.push(filters.email); }23if (filters.active !== undefined) { conditions.push('active = ?'); params.push(filters.active ? 1 : 0); }24const whereClause = conditions.length > 0 ? `WHERE ${conditions.join(' AND ')}` : '';25return await env.DB.prepare(`SELECT * FROM users ${whereClause}`).bind(...params).all();26}27```2829## Bulk Insert3031```typescript32async function bulkInsertUsers(users: Array<{ name: string; email: string }>, env: Env) {33const stmt = env.DB.prepare('INSERT INTO users (name, email) VALUES (?, ?)');34const batch = users.map(user => stmt.bind(user.name, user.email));35return await env.DB.batch(batch);36}37```3839## Caching with KV4041```typescript42async function getCachedUser(userId: number, env: { DB: D1Database; CACHE: KVNamespace }) {43const cacheKey = `user:${userId}`;44const cached = await env.CACHE?.get(cacheKey, 'json');45if (cached) return cached;46const user = await env.DB.prepare('SELECT * FROM users WHERE id = ?').bind(userId).first();47if (user) await env.CACHE?.put(cacheKey, JSON.stringify(user), { expirationTtl: 300 });48return user;49}50```5152## Query Optimization5354```typescript55// ✅ Use indexes in WHERE clauses56const users = await env.DB.prepare('SELECT * FROM users WHERE email = ?').bind(email).all();5758// ✅ Limit result sets59const recentPosts = await env.DB.prepare('SELECT * FROM posts ORDER BY created_at DESC LIMIT 100').all();6061// ✅ Use batch() for multiple independent queries62const [user, posts, comments] = await env.DB.batch([63env.DB.prepare('SELECT * FROM users WHERE id = ?').bind(userId),64env.DB.prepare('SELECT * FROM posts WHERE user_id = ?').bind(userId),65env.DB.prepare('SELECT * FROM comments WHERE user_id = ?').bind(userId)66]);6768// ❌ Avoid N+1 queries69for (const post of posts) {70const author = await env.DB.prepare('SELECT * FROM users WHERE id = ?').bind(post.user_id).first(); // Bad: multiple round trips71}7273// ✅ Use JOINs instead74const postsWithAuthors = await env.DB.prepare(`75SELECT posts.*, users.name as author_name76FROM posts77JOIN users ON posts.user_id = users.id78`).all();79```8081## Multi-Tenant SaaS8283```typescript84// Each tenant gets own database85export default {86async fetch(request: Request, env: { [key: `TENANT_${string}`]: D1Database }) {87const tenantId = request.headers.get('X-Tenant-ID');88const data = await env[`TENANT_${tenantId}`].prepare('SELECT * FROM records').all();89return Response.json(data.results);90}91}92```9394## Session Storage9596```typescript97async function createSession(userId: number, token: string, env: Env) {98const expiresAt = new Date(Date.now() + 7 * 24 * 60 * 60 * 1000).toISOString();99return await env.DB.prepare('INSERT INTO sessions (user_id, token, expires_at) VALUES (?, ?, ?)').bind(userId, token, expiresAt).run();100}101102async function validateSession(token: string, env: Env) {103return await env.DB.prepare('SELECT s.*, u.email FROM sessions s JOIN users u ON s.user_id = u.id WHERE s.token = ? AND s.expires_at > CURRENT_TIMESTAMP').bind(token).first();104}105```106107## Analytics/Events108109```typescript110async function logEvent(event: { type: string; userId?: number; metadata: object }, env: Env) {111return await env.DB.prepare('INSERT INTO events (type, user_id, metadata) VALUES (?, ?, ?)').bind(event.type, event.userId || null, JSON.stringify(event.metadata)).run();112}113114async function getEventStats(startDate: string, endDate: string, env: Env) {115return await env.DB.prepare('SELECT type, COUNT(*) as count FROM events WHERE timestamp BETWEEN ? AND ? GROUP BY type ORDER BY count DESC').bind(startDate, endDate).all();116}117```118119## Read Replication Pattern (Paid Plans)120121```typescript122interface Env { DB: D1Database; DB_REPLICA: D1Database; }123124export default {125async fetch(request: Request, env: Env) {126if (request.method === 'GET') {127// Reads: use replica for lower latency128const users = await env.DB_REPLICA.prepare('SELECT * FROM users WHERE active = 1').all();129return Response.json(users.results);130}131132if (request.method === 'POST') {133const { name, email } = await request.json();134const result = await env.DB.prepare('INSERT INTO users (name, email) VALUES (?, ?)').bind(name, email).run();135136// Read-after-write: use primary for consistency (replication lag <100ms-2s)137const user = await env.DB.prepare('SELECT * FROM users WHERE id = ?').bind(result.meta.last_row_id).first();138return Response.json(user, { status: 201 });139}140}141}142```143144**Use replicas for**: Analytics dashboards, search results, public queries (eventual consistency OK)145**Use primary for**: Read-after-write, financial transactions, authentication (consistency required)146147## Sessions API Pattern (Paid Plans)148149```typescript150// Migration with long-running session (up to 15 min)151async function runMigration(env: Env) {152const session = env.DB.withSession({ timeout: 600 }); // 10 min153try {154await session.prepare('CREATE INDEX idx_users_email ON users(email)').run();155await session.prepare('CREATE INDEX idx_posts_user ON posts(user_id)').run();156await session.prepare('ANALYZE').run();157} finally {158session.close(); // Always close to prevent leaks159}160}161162// Bulk transformation with batching163async function transformLargeDataset(env: Env) {164const session = env.DB.withSession({ timeout: 900 }); // 15 min max165try {166const BATCH_SIZE = 1000;167let offset = 0;168while (true) {169const rows = await session.prepare('SELECT id, data FROM legacy LIMIT ? OFFSET ?').bind(BATCH_SIZE, offset).all();170if (rows.results.length === 0) break;171const updates = rows.results.map(row =>172session.prepare('UPDATE legacy SET new_data = ? WHERE id = ?').bind(transform(row.data), row.id)173);174await session.batch(updates);175offset += BATCH_SIZE;176}177} finally { session.close(); }178}179```180181## Time Travel & Backups182183```bash184wrangler d1 time-travel restore <db-name> --timestamp="2024-01-15T14:30:00Z" # Point-in-time185wrangler d1 time-travel info <db-name> # List restore points (7 days free, 30 days paid)186wrangler d1 export <db-name> --remote --output=./backup.sql # Full export187wrangler d1 export <db-name> --remote --no-schema --output=./data.sql # Data only188wrangler d1 execute <db-name> --remote --file=./backup.sql # Import189```190