Loading source
Pulling the file list, source metadata, and syntax-aware rendering for this listing.
Source from repo
Part of a 72-plugin marketplace with 112 AI agents and 146 skills for Claude Code development automation.
Files
Skill
Size
Entrypoint
Format
Open file
Syntax-highlighted preview of this file as included in the skill package.
references/advanced-patterns.md
1# Python Performance Optimization — Advanced Reference23Advanced optimization techniques including NumPy vectorization, caching, memory management, parallelization, async I/O, database optimization, and benchmarking tools.45## Advanced Optimization67### Pattern 11: NumPy for Numerical Operations89```python10import timeit11import numpy as np1213def python_sum(n):14"""Sum using pure Python."""15return sum(range(n))1617def numpy_sum(n):18"""Sum using NumPy."""19return np.arange(n).sum()2021n = 10000002223python_time = timeit.timeit(lambda: python_sum(n), number=100)24numpy_time = timeit.timeit(lambda: numpy_sum(n), number=100)2526print(f"Python: {python_time:.4f}s")27print(f"NumPy: {numpy_time:.4f}s")28print(f"Speedup: {python_time/numpy_time:.2f}x")2930# Vectorized operations31def python_multiply():32"""Element-wise multiplication in Python."""33a = list(range(100000))34b = list(range(100000))35return [x * y for x, y in zip(a, b)]3637def numpy_multiply():38"""Vectorized multiplication in NumPy."""39a = np.arange(100000)40b = np.arange(100000)41return a * b4243py_time = timeit.timeit(python_multiply, number=100)44np_time = timeit.timeit(numpy_multiply, number=100)4546print(f"\nPython multiply: {py_time:.4f}s")47print(f"NumPy multiply: {np_time:.4f}s")48print(f"Speedup: {py_time/np_time:.2f}x")49```5051### Pattern 12: Caching with functools.lru_cache5253```python54from functools import lru_cache55import timeit5657def fibonacci_slow(n):58"""Recursive fibonacci without caching."""59if n < 2:60return n61return fibonacci_slow(n-1) + fibonacci_slow(n-2)6263@lru_cache(maxsize=None)64def fibonacci_fast(n):65"""Recursive fibonacci with caching."""66if n < 2:67return n68return fibonacci_fast(n-1) + fibonacci_fast(n-2)6970# Massive speedup for recursive algorithms71n = 307273slow_time = timeit.timeit(lambda: fibonacci_slow(n), number=1)74fast_time = timeit.timeit(lambda: fibonacci_fast(n), number=1000)7576print(f"Without cache (1 run): {slow_time:.4f}s")77print(f"With cache (1000 runs): {fast_time:.4f}s")7879# Cache info80print(f"Cache info: {fibonacci_fast.cache_info()}")81```8283### Pattern 13: Using __slots__ for Memory8485```python86import sys8788class RegularClass:89"""Regular class with __dict__."""90def __init__(self, x, y, z):91self.x = x92self.y = y93self.z = z9495class SlottedClass:96"""Class with __slots__ for memory efficiency."""97__slots__ = ['x', 'y', 'z']9899def __init__(self, x, y, z):100self.x = x101self.y = y102self.z = z103104# Memory comparison105regular = RegularClass(1, 2, 3)106slotted = SlottedClass(1, 2, 3)107108print(f"Regular class size: {sys.getsizeof(regular)} bytes")109print(f"Slotted class size: {sys.getsizeof(slotted)} bytes")110111# Significant savings with many instances112regular_objects = [RegularClass(i, i+1, i+2) for i in range(10000)]113slotted_objects = [SlottedClass(i, i+1, i+2) for i in range(10000)]114115print(f"\nMemory for 10000 regular objects: ~{sys.getsizeof(regular) * 10000} bytes")116print(f"Memory for 10000 slotted objects: ~{sys.getsizeof(slotted) * 10000} bytes")117```118119### Pattern 14: Multiprocessing for CPU-Bound Tasks120121```python122import multiprocessing as mp123import time124125def cpu_intensive_task(n):126"""CPU-intensive calculation."""127return sum(i**2 for i in range(n))128129def sequential_processing():130"""Process tasks sequentially."""131start = time.time()132results = [cpu_intensive_task(1000000) for _ in range(4)]133elapsed = time.time() - start134return elapsed, results135136def parallel_processing():137"""Process tasks in parallel."""138start = time.time()139with mp.Pool(processes=4) as pool:140results = pool.map(cpu_intensive_task, [1000000] * 4)141elapsed = time.time() - start142return elapsed, results143144if __name__ == "__main__":145seq_time, seq_results = sequential_processing()146par_time, par_results = parallel_processing()147148print(f"Sequential: {seq_time:.2f}s")149print(f"Parallel: {par_time:.2f}s")150print(f"Speedup: {seq_time/par_time:.2f}x")151```152153### Pattern 15: Async I/O for I/O-Bound Tasks154155```python156import asyncio157import aiohttp158import time159import requests160161urls = [162"https://httpbin.org/delay/1",163"https://httpbin.org/delay/1",164"https://httpbin.org/delay/1",165"https://httpbin.org/delay/1",166]167168def synchronous_requests():169"""Synchronous HTTP requests."""170start = time.time()171results = []172for url in urls:173response = requests.get(url)174results.append(response.status_code)175elapsed = time.time() - start176return elapsed, results177178async def async_fetch(session, url):179"""Async HTTP request."""180async with session.get(url) as response:181return response.status182183async def asynchronous_requests():184"""Asynchronous HTTP requests."""185start = time.time()186async with aiohttp.ClientSession() as session:187tasks = [async_fetch(session, url) for url in urls]188results = await asyncio.gather(*tasks)189elapsed = time.time() - start190return elapsed, results191192# Async is much faster for I/O-bound work193sync_time, sync_results = synchronous_requests()194async_time, async_results = asyncio.run(asynchronous_requests())195196print(f"Synchronous: {sync_time:.2f}s")197print(f"Asynchronous: {async_time:.2f}s")198print(f"Speedup: {sync_time/async_time:.2f}x")199```200201## Database Optimization202203### Pattern 16: Batch Database Operations204205```python206import sqlite3207import time208209def create_db():210"""Create test database."""211conn = sqlite3.connect(":memory:")212conn.execute("CREATE TABLE users (id INTEGER PRIMARY KEY, name TEXT)")213return conn214215def slow_inserts(conn, count):216"""Insert records one at a time."""217start = time.time()218cursor = conn.cursor()219for i in range(count):220cursor.execute("INSERT INTO users (name) VALUES (?)", (f"User {i}",))221conn.commit() # Commit each insert222elapsed = time.time() - start223return elapsed224225def fast_inserts(conn, count):226"""Batch insert with single commit."""227start = time.time()228cursor = conn.cursor()229data = [(f"User {i}",) for i in range(count)]230cursor.executemany("INSERT INTO users (name) VALUES (?)", data)231conn.commit() # Single commit232elapsed = time.time() - start233return elapsed234235# Benchmark236conn1 = create_db()237slow_time = slow_inserts(conn1, 1000)238239conn2 = create_db()240fast_time = fast_inserts(conn2, 1000)241242print(f"Individual inserts: {slow_time:.4f}s")243print(f"Batch insert: {fast_time:.4f}s")244print(f"Speedup: {slow_time/fast_time:.2f}x")245```246247### Pattern 17: Query Optimization248249```python250# Use indexes for frequently queried columns251"""252-- Slow: No index253SELECT * FROM users WHERE email = '[email protected]';254255-- Fast: With index256CREATE INDEX idx_users_email ON users(email);257SELECT * FROM users WHERE email = '[email protected]';258"""259260# Use query planning261import sqlite3262263conn = sqlite3.connect("example.db")264cursor = conn.cursor()265266# Analyze query performance267cursor.execute("EXPLAIN QUERY PLAN SELECT * FROM users WHERE email = ?", ("[email protected]",))268print(cursor.fetchall())269270# Use SELECT only needed columns271# Slow: SELECT *272# Fast: SELECT id, name273```274275## Memory Optimization276277### Pattern 18: Detecting Memory Leaks278279```python280import tracemalloc281import gc282283def memory_leak_example():284"""Example that leaks memory."""285leaked_objects = []286287for i in range(100000):288# Objects added but never removed289leaked_objects.append([i] * 100)290291# In real code, this would be an unintended reference292293def track_memory_usage():294"""Track memory allocations."""295tracemalloc.start()296297# Take snapshot before298snapshot1 = tracemalloc.take_snapshot()299300# Run code301memory_leak_example()302303# Take snapshot after304snapshot2 = tracemalloc.take_snapshot()305306# Compare307top_stats = snapshot2.compare_to(snapshot1, 'lineno')308309print("Top 10 memory allocations:")310for stat in top_stats[:10]:311print(stat)312313tracemalloc.stop()314315# Monitor memory316track_memory_usage()317318# Force garbage collection319gc.collect()320```321322### Pattern 19: Iterators vs Lists323324```python325import sys326327def process_file_list(filename):328"""Load entire file into memory."""329with open(filename) as f:330lines = f.readlines() # Loads all lines331return sum(1 for line in lines if line.strip())332333def process_file_iterator(filename):334"""Process file line by line."""335with open(filename) as f:336return sum(1 for line in f if line.strip())337338# Iterator uses constant memory339# List loads entire file into memory340```341342### Pattern 20: Weakref for Caches343344```python345import weakref346347class CachedResource:348"""Resource that can be garbage collected."""349def __init__(self, data):350self.data = data351352# Regular cache prevents garbage collection353regular_cache = {}354355def get_resource_regular(key):356"""Get resource from regular cache."""357if key not in regular_cache:358regular_cache[key] = CachedResource(f"Data for {key}")359return regular_cache[key]360361# Weak reference cache allows garbage collection362weak_cache = weakref.WeakValueDictionary()363364def get_resource_weak(key):365"""Get resource from weak cache."""366resource = weak_cache.get(key)367if resource is None:368resource = CachedResource(f"Data for {key}")369weak_cache[key] = resource370return resource371372# When no strong references exist, objects can be GC'd373```374375## Benchmarking Tools376377### Custom Benchmark Decorator378379```python380import time381from functools import wraps382383def benchmark(func):384"""Decorator to benchmark function execution."""385@wraps(func)386def wrapper(*args, **kwargs):387start = time.perf_counter()388result = func(*args, **kwargs)389elapsed = time.perf_counter() - start390print(f"{func.__name__} took {elapsed:.6f} seconds")391return result392return wrapper393394@benchmark395def slow_function():396"""Function to benchmark."""397time.sleep(0.5)398return sum(range(1000000))399400result = slow_function()401```402403### Performance Testing with pytest-benchmark404405```python406# Install: pip install pytest-benchmark407408def test_list_comprehension(benchmark):409"""Benchmark list comprehension."""410result = benchmark(lambda: [i**2 for i in range(10000)])411assert len(result) == 10000412413def test_map_function(benchmark):414"""Benchmark map function."""415result = benchmark(lambda: list(map(lambda x: x**2, range(10000))))416assert len(result) == 10000417418# Run with: pytest test_performance.py --benchmark-compare419```420