Loading source
Pulling the file list, source metadata, and syntax-aware rendering for this listing.
Source from repo
A comprehensive collection of Agent Skills for context engineering, multi-agent architectures, and production agent systems.
Files
Skill
Size
Entrypoint
Format
Open file
Syntax-highlighted preview of this file as included in the skill package.
skills/filesystem-context/references/implementation-patterns.md
1# Filesystem Context Implementation Patterns23This reference provides detailed implementation patterns for filesystem-based context engineering.45## Pattern Catalog67### 1. Scratch Pad Manager89A centralized manager for handling large tool outputs and intermediate results.1011```python12import os13import json14from datetime import datetime15from pathlib import Path1617class ScratchPadManager:18"""Manages temporary file storage for agent context offloading."""1920def __init__(self, base_path: str = "scratch", token_threshold: int = 2000):21self.base_path = Path(base_path)22self.base_path.mkdir(parents=True, exist_ok=True)23self.token_threshold = token_threshold24self.manifest = {}2526def should_offload(self, content: str) -> bool:27"""Determine if content exceeds threshold for offloading."""28# Rough token estimate: 1 token ≈ 4 characters29estimated_tokens = len(content) // 430return estimated_tokens > self.token_threshold3132def offload(self, content: str, source: str, summary: str = None) -> dict:33"""Write content to file, return reference."""34timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")35filename = f"{source}_{timestamp}.txt"36file_path = self.base_path / filename3738file_path.write_text(content)3940reference = {41"type": "file_reference",42"path": str(file_path),43"source": source,44"timestamp": timestamp,45"size_chars": len(content),46"summary": summary or self._extract_summary(content)47}4849self.manifest[filename] = reference50return reference5152def _extract_summary(self, content: str, max_chars: int = 500) -> str:53"""Extract first meaningful content as summary."""54lines = content.strip().split('\n')55summary_lines = []56char_count = 05758for line in lines:59if char_count + len(line) > max_chars:60break61summary_lines.append(line)62char_count += len(line)6364return '\n'.join(summary_lines)6566def cleanup(self, max_age_hours: int = 24):67"""Remove scratch files older than threshold."""68cutoff = datetime.now().timestamp() - (max_age_hours * 3600)6970for file_path in self.base_path.glob("*.txt"):71if file_path.stat().st_mtime < cutoff:72file_path.unlink()73if file_path.name in self.manifest:74del self.manifest[file_path.name]75```7677### 2. Plan Persistence7879Structured plan storage with progress tracking.8081```python82import yaml83from dataclasses import dataclass, field, asdict84from enum import Enum85from typing import List, Optional8687class StepStatus(Enum):88PENDING = "pending"89IN_PROGRESS = "in_progress"90COMPLETED = "completed"91BLOCKED = "blocked"92CANCELLED = "cancelled"9394@dataclass95class PlanStep:96id: int97description: str98status: StepStatus = StepStatus.PENDING99notes: Optional[str] = None100101@dataclass102class AgentPlan:103objective: str104steps: List[PlanStep] = field(default_factory=list)105status: str = "in_progress"106107def save(self, path: str = "scratch/current_plan.yaml"):108"""Persist plan to filesystem."""109data = {110"objective": self.objective,111"status": self.status,112"steps": [113{114"id": s.id,115"description": s.description,116"status": s.status.value,117"notes": s.notes118}119for s in self.steps120]121}122with open(path, 'w') as f:123yaml.dump(data, f, default_flow_style=False)124125@classmethod126def load(cls, path: str = "scratch/current_plan.yaml") -> "AgentPlan":127"""Load plan from filesystem."""128with open(path, 'r') as f:129data = yaml.safe_load(f)130131plan = cls(objective=data["objective"], status=data.get("status", "in_progress"))132for step_data in data.get("steps", []):133plan.steps.append(PlanStep(134id=step_data["id"],135description=step_data["description"],136status=StepStatus(step_data["status"]),137notes=step_data.get("notes")138))139return plan140141def current_step(self) -> Optional[PlanStep]:142"""Get the first non-completed step."""143for step in self.steps:144if step.status != StepStatus.COMPLETED:145return step146return None147148def complete_step(self, step_id: int, notes: str = None):149"""Mark step as completed."""150for step in self.steps:151if step.id == step_id:152step.status = StepStatus.COMPLETED153if notes:154step.notes = notes155break156```157158### 3. Sub-Agent Workspace159160File-based communication between agents.161162```python163from pathlib import Path164from datetime import datetime165import json166167class AgentWorkspace:168"""Manages file-based workspace for an agent."""169170def __init__(self, agent_id: str, base_path: str = "workspace/agents"):171self.agent_id = agent_id172self.path = Path(base_path) / agent_id173self.path.mkdir(parents=True, exist_ok=True)174175# Standard files176self.findings_file = self.path / "findings.md"177self.status_file = self.path / "status.json"178self.log_file = self.path / "activity.log"179180def write_finding(self, content: str, append: bool = True):181"""Write or append a finding."""182mode = 'a' if append else 'w'183with open(self.findings_file, mode) as f:184if append:185f.write(f"\n---\n## {datetime.now().isoformat()}\n\n")186f.write(content)187188def update_status(self, status: str, progress: float = None, details: dict = None):189"""Update agent status for coordinator visibility."""190status_data = {191"agent_id": self.agent_id,192"status": status,193"updated_at": datetime.now().isoformat(),194"progress": progress,195"details": details or {}196}197self.status_file.write_text(json.dumps(status_data, indent=2))198199def log(self, message: str):200"""Append to activity log."""201with open(self.log_file, 'a') as f:202f.write(f"[{datetime.now().isoformat()}] {message}\n")203204def read_peer_findings(self, peer_id: str) -> str:205"""Read findings from another agent's workspace."""206peer_path = self.path.parent / peer_id / "findings.md"207if peer_path.exists():208return peer_path.read_text()209return ""210211212class CoordinatorWorkspace:213"""Coordinator that reads from sub-agent workspaces."""214215def __init__(self, base_path: str = "workspace/agents"):216self.base_path = Path(base_path)217218def get_all_statuses(self) -> dict:219"""Collect status from all sub-agents."""220statuses = {}221for agent_dir in self.base_path.iterdir():222if agent_dir.is_dir():223status_file = agent_dir / "status.json"224if status_file.exists():225statuses[agent_dir.name] = json.loads(status_file.read_text())226return statuses227228def aggregate_findings(self) -> str:229"""Combine all agent findings into synthesis."""230findings = []231for agent_dir in self.base_path.iterdir():232if agent_dir.is_dir():233findings_file = agent_dir / "findings.md"234if findings_file.exists():235findings.append(f"# {agent_dir.name}\n\n{findings_file.read_text()}")236return "\n\n".join(findings)237```238239### 4. Dynamic Skill Loader240241Load skill content on demand.242243```python244from pathlib import Path245from typing import List, Optional246import yaml247248@dataclass249class SkillMetadata:250name: str251description: str252path: str253triggers: List[str] = field(default_factory=list)254255class SkillLoader:256"""Manages dynamic loading of agent skills."""257258def __init__(self, skills_path: str = "skills"):259self.skills_path = Path(skills_path)260self.skill_index = self._build_index()261262def _build_index(self) -> dict:263"""Build index of available skills from SKILL.md frontmatter."""264index = {}265for skill_dir in self.skills_path.iterdir():266if skill_dir.is_dir():267skill_file = skill_dir / "SKILL.md"268if skill_file.exists():269metadata = self._parse_frontmatter(skill_file)270if metadata:271index[metadata.name] = metadata272return index273274def _parse_frontmatter(self, path: Path) -> Optional[SkillMetadata]:275"""Extract YAML frontmatter from skill file."""276content = path.read_text()277if content.startswith('---'):278end = content.find('---', 3)279if end > 0:280frontmatter = yaml.safe_load(content[3:end])281return SkillMetadata(282name=frontmatter.get('name', path.parent.name),283description=frontmatter.get('description', ''),284path=str(path),285triggers=frontmatter.get('triggers', [])286)287return None288289def get_static_context(self) -> str:290"""Generate minimal static context listing available skills."""291lines = ["Available skills (load with read_file when relevant):"]292for name, meta in self.skill_index.items():293lines.append(f"- {name}: {meta.description[:100]}")294return "\n".join(lines)295296def load_skill(self, name: str) -> str:297"""Load full skill content."""298if name in self.skill_index:299return Path(self.skill_index[name].path).read_text()300raise ValueError(f"Unknown skill: {name}")301302def find_relevant_skills(self, query: str) -> List[str]:303"""Find skills that might be relevant to a query."""304query_lower = query.lower()305relevant = []306for name, meta in self.skill_index.items():307if any(trigger in query_lower for trigger in meta.triggers):308relevant.append(name)309elif name.replace('-', ' ') in query_lower:310relevant.append(name)311return relevant312```313314### 5. Terminal Output Persistence315316Capture and persist terminal sessions.317318```python319import subprocess320from pathlib import Path321from datetime import datetime322import re323324class TerminalCapture:325"""Captures and persists terminal output for agent access."""326327def __init__(self, terminals_path: str = "terminals"):328self.terminals_path = Path(terminals_path)329self.terminals_path.mkdir(parents=True, exist_ok=True)330self.session_counter = 0331332def run_command(self, command: str, capture: bool = True) -> dict:333"""Run command and optionally capture output to file."""334self.session_counter += 1335336result = subprocess.run(337command,338shell=True,339capture_output=True,340text=True341)342343output = {344"command": command,345"exit_code": result.returncode,346"stdout": result.stdout,347"stderr": result.stderr,348"timestamp": datetime.now().isoformat()349}350351if capture:352output["file"] = self._persist_output(output)353354return output355356def _persist_output(self, output: dict) -> str:357"""Write output to terminal file."""358filename = f"{self.session_counter}.txt"359file_path = self.terminals_path / filename360361content = f"""---362command: {output['command']}363exit_code: {output['exit_code']}364timestamp: {output['timestamp']}365---366367=== STDOUT ===368{output['stdout']}369370=== STDERR ===371{output['stderr']}372"""373file_path.write_text(content)374return str(file_path)375376def grep_terminals(self, pattern: str, context_lines: int = 3) -> List[dict]:377"""Search all terminal outputs for pattern."""378matches = []379regex = re.compile(pattern, re.IGNORECASE)380381for term_file in self.terminals_path.glob("*.txt"):382content = term_file.read_text()383lines = content.split('\n')384385for i, line in enumerate(lines):386if regex.search(line):387start = max(0, i - context_lines)388end = min(len(lines), i + context_lines + 1)389matches.append({390"file": str(term_file),391"line_number": i + 1,392"context": '\n'.join(lines[start:end])393})394395return matches396```397398### 6. Self-Modification Guard399400Safe pattern for agent self-learning.401402```python403import yaml404from pathlib import Path405from datetime import datetime406from typing import Any407408class PreferenceStore:409"""Guarded storage for agent-learned preferences."""410411MAX_ENTRIES = 100412MAX_VALUE_LENGTH = 1000413414def __init__(self, path: str = "agent/preferences.yaml"):415self.path = Path(path)416self.path.parent.mkdir(parents=True, exist_ok=True)417self.preferences = self._load()418419def _load(self) -> dict:420"""Load preferences from file."""421if self.path.exists():422return yaml.safe_load(self.path.read_text()) or {}423return {}424425def _save(self):426"""Persist preferences to file."""427self.path.write_text(yaml.dump(self.preferences, default_flow_style=False))428429def remember(self, key: str, value: Any, source: str = "user"):430"""Store a preference with validation."""431# Validate key432if not key or len(key) > 100:433raise ValueError("Invalid key length")434435# Validate value436value_str = str(value)437if len(value_str) > self.MAX_VALUE_LENGTH:438raise ValueError(f"Value exceeds max length of {self.MAX_VALUE_LENGTH}")439440# Check entry limit441if len(self.preferences) >= self.MAX_ENTRIES and key not in self.preferences:442raise ValueError(f"Max entries ({self.MAX_ENTRIES}) reached")443444# Store with metadata445self.preferences[key] = {446"value": value,447"source": source,448"updated_at": datetime.now().isoformat()449}450self._save()451452def recall(self, key: str, default: Any = None) -> Any:453"""Retrieve a preference."""454entry = self.preferences.get(key)455if entry:456return entry["value"]457return default458459def list_all(self) -> dict:460"""Get all preferences for context injection."""461return {k: v["value"] for k, v in self.preferences.items()}462463def forget(self, key: str):464"""Remove a preference."""465if key in self.preferences:466del self.preferences[key]467self._save()468```469470## Integration Example471472Combining patterns in an agent harness:473474```python475class FilesystemContextAgent:476"""Agent with filesystem-based context management."""477478def __init__(self):479self.scratch = ScratchPadManager()480self.skills = SkillLoader()481self.preferences = PreferenceStore()482self.workspace = AgentWorkspace("main_agent")483484def handle_tool_output(self, tool_name: str, output: str) -> str:485"""Process tool output, offloading if necessary."""486if self.scratch.should_offload(output):487ref = self.scratch.offload(output, source=tool_name)488return f"[{tool_name} output saved to {ref['path']}. Summary: {ref['summary'][:200]}]"489return output490491def get_system_prompt(self) -> str:492"""Build system prompt with dynamic skill references."""493base_prompt = "You are a helpful assistant."494skill_context = self.skills.get_static_context()495user_prefs = self.preferences.list_all()496497pref_section = ""498if user_prefs:499pref_section = "\n\nUser preferences:\n" + "\n".join(500f"- {k}: {v}" for k, v in user_prefs.items()501)502503return f"{base_prompt}\n\n{skill_context}{pref_section}"504```505506## File Organization Best Practices507508```509project/510├── scratch/ # Ephemeral working files511│ ├── tool_outputs/ # Large tool results512│ │ └── search_20260107.txt513│ └── plans/ # Active task plans514│ └── current_plan.yaml515├── workspace/ # Agent workspaces516│ └── agents/517│ ├── research_agent/518│ │ ├── findings.md519│ │ └── status.json520│ └── code_agent/521│ ├── findings.md522│ └── status.json523├── agent/ # Agent configuration524│ ├── preferences.yaml # Learned preferences525│ └── patterns.md # Discovered patterns526├── skills/ # Loadable skills527│ └── {skill-name}/528│ └── SKILL.md529├── terminals/ # Terminal output530│ ├── 1.txt531│ └── 2.txt532└── history/ # Chat history archives533└── session_001.txt534```535536## Token Accounting Metrics537538Track these metrics to validate filesystem patterns:5395401. **Static context ratio**: tokens in static context / total tokens5412. **Dynamic load rate**: how often skills/files are loaded per task5423. **Offload savings**: tokens saved by writing to files vs keeping in context5434. **Retrieval precision**: percentage of loaded content actually used544545Target benchmarks:546- Static context ratio < 20%547- Offload savings > 50% for tool-heavy workflows548- Retrieval precision > 70% (loaded content is relevant)549550