Initial commit: Flutter 无书应用项目
This commit is contained in:
44
.trae/skills/planning-with-files/scripts/check-complete.ps1
Normal file
44
.trae/skills/planning-with-files/scripts/check-complete.ps1
Normal file
@@ -0,0 +1,44 @@
|
||||
# Check if all phases in task_plan.md are complete
|
||||
# Always exits 0 -- uses stdout for status reporting
|
||||
# Used by Stop hook to report task completion status
|
||||
|
||||
param(
|
||||
[string]$PlanFile = "task_plan.md"
|
||||
)
|
||||
|
||||
if (-not (Test-Path $PlanFile)) {
|
||||
Write-Host '[planning-with-files] No task_plan.md found -- no active planning session.'
|
||||
exit 0
|
||||
}
|
||||
|
||||
# Read file content
|
||||
$content = Get-Content $PlanFile -Raw
|
||||
|
||||
# Count total phases
|
||||
$TOTAL = ([regex]::Matches($content, "### Phase")).Count
|
||||
|
||||
# Check for **Status:** format first
|
||||
$COMPLETE = ([regex]::Matches($content, "\*\*Status:\*\* complete")).Count
|
||||
$IN_PROGRESS = ([regex]::Matches($content, "\*\*Status:\*\* in_progress")).Count
|
||||
$PENDING = ([regex]::Matches($content, "\*\*Status:\*\* pending")).Count
|
||||
|
||||
# Fallback: check for [complete] inline format if **Status:** not found
|
||||
if ($COMPLETE -eq 0 -and $IN_PROGRESS -eq 0 -and $PENDING -eq 0) {
|
||||
$COMPLETE = ([regex]::Matches($content, "\[complete\]")).Count
|
||||
$IN_PROGRESS = ([regex]::Matches($content, "\[in_progress\]")).Count
|
||||
$PENDING = ([regex]::Matches($content, "\[pending\]")).Count
|
||||
}
|
||||
|
||||
# Report status -- always exit 0, incomplete task is a normal state
|
||||
if ($COMPLETE -eq $TOTAL -and $TOTAL -gt 0) {
|
||||
Write-Host ('[planning-with-files] ALL PHASES COMPLETE (' + $COMPLETE + '/' + $TOTAL + '). If the user has additional work, add new phases to task_plan.md before starting.')
|
||||
} else {
|
||||
Write-Host ('[planning-with-files] Task in progress (' + $COMPLETE + '/' + $TOTAL + ' phases complete). Update progress.md before stopping.')
|
||||
if ($IN_PROGRESS -gt 0) {
|
||||
Write-Host ('[planning-with-files] ' + $IN_PROGRESS + ' phase(s) still in progress.')
|
||||
}
|
||||
if ($PENDING -gt 0) {
|
||||
Write-Host ('[planning-with-files] ' + $PENDING + ' phase(s) pending.')
|
||||
}
|
||||
}
|
||||
exit 0
|
||||
46
.trae/skills/planning-with-files/scripts/check-complete.sh
Normal file
46
.trae/skills/planning-with-files/scripts/check-complete.sh
Normal file
@@ -0,0 +1,46 @@
|
||||
#!/bin/bash
|
||||
# Check if all phases in task_plan.md are complete
|
||||
# Always exits 0 — uses stdout for status reporting
|
||||
# Used by Stop hook to report task completion status
|
||||
|
||||
PLAN_FILE="${1:-task_plan.md}"
|
||||
|
||||
if [ ! -f "$PLAN_FILE" ]; then
|
||||
echo "[planning-with-files] No task_plan.md found — no active planning session."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Count total phases
|
||||
TOTAL=$(grep -c "### Phase" "$PLAN_FILE" || true)
|
||||
|
||||
# Check for **Status:** format first
|
||||
COMPLETE=$(grep -cF "**Status:** complete" "$PLAN_FILE" || true)
|
||||
IN_PROGRESS=$(grep -cF "**Status:** in_progress" "$PLAN_FILE" || true)
|
||||
PENDING=$(grep -cF "**Status:** pending" "$PLAN_FILE" || true)
|
||||
|
||||
# Fallback: check for [complete] inline format if **Status:** not found
|
||||
if [ "$COMPLETE" -eq 0 ] && [ "$IN_PROGRESS" -eq 0 ] && [ "$PENDING" -eq 0 ]; then
|
||||
COMPLETE=$(grep -c "\[complete\]" "$PLAN_FILE" || true)
|
||||
IN_PROGRESS=$(grep -c "\[in_progress\]" "$PLAN_FILE" || true)
|
||||
PENDING=$(grep -c "\[pending\]" "$PLAN_FILE" || true)
|
||||
fi
|
||||
|
||||
# Default to 0 if empty
|
||||
: "${TOTAL:=0}"
|
||||
: "${COMPLETE:=0}"
|
||||
: "${IN_PROGRESS:=0}"
|
||||
: "${PENDING:=0}"
|
||||
|
||||
# Report status (always exit 0 — incomplete task is a normal state)
|
||||
if [ "$COMPLETE" -eq "$TOTAL" ] && [ "$TOTAL" -gt 0 ]; then
|
||||
echo "[planning-with-files] ALL PHASES COMPLETE ($COMPLETE/$TOTAL). If the user has additional work, add new phases to task_plan.md before starting."
|
||||
else
|
||||
echo "[planning-with-files] Task in progress ($COMPLETE/$TOTAL phases complete). Update progress.md before stopping."
|
||||
if [ "$IN_PROGRESS" -gt 0 ]; then
|
||||
echo "[planning-with-files] $IN_PROGRESS phase(s) still in progress."
|
||||
fi
|
||||
if [ "$PENDING" -gt 0 ]; then
|
||||
echo "[planning-with-files] $PENDING phase(s) pending."
|
||||
fi
|
||||
fi
|
||||
exit 0
|
||||
34
.trae/skills/planning-with-files/scripts/check-continue.sh
Normal file
34
.trae/skills/planning-with-files/scripts/check-continue.sh
Normal file
@@ -0,0 +1,34 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
missing=0
|
||||
|
||||
require_file() {
|
||||
local path="$1"
|
||||
if [ ! -f "$path" ]; then
|
||||
echo "Missing: $path"
|
||||
missing=1
|
||||
fi
|
||||
}
|
||||
|
||||
require_file ".continue/prompts/planning-with-files.prompt"
|
||||
require_file ".continue/skills/planning-with-files/SKILL.md"
|
||||
require_file ".continue/skills/planning-with-files/examples.md"
|
||||
require_file ".continue/skills/planning-with-files/reference.md"
|
||||
require_file ".continue/skills/planning-with-files/scripts/init-session.sh"
|
||||
require_file ".continue/skills/planning-with-files/scripts/init-session.ps1"
|
||||
require_file ".continue/skills/planning-with-files/scripts/check-complete.sh"
|
||||
require_file ".continue/skills/planning-with-files/scripts/check-complete.ps1"
|
||||
require_file ".continue/skills/planning-with-files/scripts/session-catchup.py"
|
||||
|
||||
if [ "$missing" -ne 0 ]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
case ".continue/prompts/planning-with-files.prompt" in
|
||||
*.prompt) ;;
|
||||
*) echo "Prompt file must end with .prompt"; exit 1 ;;
|
||||
esac
|
||||
|
||||
echo "Continue integration files look OK."
|
||||
166
.trae/skills/planning-with-files/scripts/init-session.ps1
Normal file
166
.trae/skills/planning-with-files/scripts/init-session.ps1
Normal file
@@ -0,0 +1,166 @@
|
||||
# Initialize planning files for a new session
|
||||
# Usage: .\init-session.ps1 [-Template TYPE] [project-name]
|
||||
# Templates: default, analytics
|
||||
|
||||
param(
|
||||
[string]$ProjectName = "project",
|
||||
[string]$Template = "default"
|
||||
)
|
||||
|
||||
$DATE = Get-Date -Format "yyyy-MM-dd"
|
||||
|
||||
# Resolve template directory (skill root is one level up from scripts/)
|
||||
$ScriptDir = Split-Path -Parent $MyInvocation.MyCommand.Path
|
||||
$SkillRoot = Split-Path -Parent $ScriptDir
|
||||
$TemplateDir = Join-Path $SkillRoot "templates"
|
||||
|
||||
Write-Host "Initializing planning files for: $ProjectName (template: $Template)"
|
||||
|
||||
# Validate template
|
||||
if ($Template -ne "default" -and $Template -ne "analytics") {
|
||||
Write-Host "Unknown template: $Template (available: default, analytics). Using default."
|
||||
$Template = "default"
|
||||
}
|
||||
|
||||
# Create task_plan.md if it doesn't exist
|
||||
if (-not (Test-Path "task_plan.md")) {
|
||||
$AnalyticsPlan = Join-Path $TemplateDir "analytics_task_plan.md"
|
||||
if ($Template -eq "analytics" -and (Test-Path $AnalyticsPlan)) {
|
||||
Copy-Item $AnalyticsPlan "task_plan.md"
|
||||
} else {
|
||||
@"
|
||||
# Task Plan: [Brief Description]
|
||||
|
||||
## Goal
|
||||
[One sentence describing the end state]
|
||||
|
||||
## Current Phase
|
||||
Phase 1
|
||||
|
||||
## Phases
|
||||
|
||||
### Phase 1: Requirements & Discovery
|
||||
- [ ] Understand user intent
|
||||
- [ ] Identify constraints
|
||||
- [ ] Document in findings.md
|
||||
- **Status:** in_progress
|
||||
|
||||
### Phase 2: Planning & Structure
|
||||
- [ ] Define approach
|
||||
- [ ] Create project structure
|
||||
- **Status:** pending
|
||||
|
||||
### Phase 3: Implementation
|
||||
- [ ] Execute the plan
|
||||
- [ ] Write to files before executing
|
||||
- **Status:** pending
|
||||
|
||||
### Phase 4: Testing & Verification
|
||||
- [ ] Verify requirements met
|
||||
- [ ] Document test results
|
||||
- **Status:** pending
|
||||
|
||||
### Phase 5: Delivery
|
||||
- [ ] Review outputs
|
||||
- [ ] Deliver to user
|
||||
- **Status:** pending
|
||||
|
||||
## Decisions Made
|
||||
| Decision | Rationale |
|
||||
|----------|-----------|
|
||||
|
||||
## Errors Encountered
|
||||
| Error | Resolution |
|
||||
|-------|------------|
|
||||
"@ | Out-File -FilePath "task_plan.md" -Encoding UTF8
|
||||
}
|
||||
Write-Host "Created task_plan.md"
|
||||
} else {
|
||||
Write-Host "task_plan.md already exists, skipping"
|
||||
}
|
||||
|
||||
# Create findings.md if it doesn't exist
|
||||
if (-not (Test-Path "findings.md")) {
|
||||
$AnalyticsFindings = Join-Path $TemplateDir "analytics_findings.md"
|
||||
if ($Template -eq "analytics" -and (Test-Path $AnalyticsFindings)) {
|
||||
Copy-Item $AnalyticsFindings "findings.md"
|
||||
} else {
|
||||
@"
|
||||
# Findings & Decisions
|
||||
|
||||
## Requirements
|
||||
-
|
||||
|
||||
## Research Findings
|
||||
-
|
||||
|
||||
## Technical Decisions
|
||||
| Decision | Rationale |
|
||||
|----------|-----------|
|
||||
|
||||
## Issues Encountered
|
||||
| Issue | Resolution |
|
||||
|-------|------------|
|
||||
|
||||
## Resources
|
||||
-
|
||||
"@ | Out-File -FilePath "findings.md" -Encoding UTF8
|
||||
}
|
||||
Write-Host "Created findings.md"
|
||||
} else {
|
||||
Write-Host "findings.md already exists, skipping"
|
||||
}
|
||||
|
||||
# Create progress.md if it doesn't exist
|
||||
if (-not (Test-Path "progress.md")) {
|
||||
if ($Template -eq "analytics") {
|
||||
@"
|
||||
# Progress Log
|
||||
|
||||
## Session: $DATE
|
||||
|
||||
### Current Status
|
||||
- **Phase:** 1 - Data Discovery
|
||||
- **Started:** $DATE
|
||||
|
||||
### Actions Taken
|
||||
-
|
||||
|
||||
### Query Log
|
||||
| Query | Result Summary | Interpretation |
|
||||
|-------|---------------|----------------|
|
||||
|
||||
### Errors
|
||||
| Error | Resolution |
|
||||
|-------|------------|
|
||||
"@ | Out-File -FilePath "progress.md" -Encoding UTF8
|
||||
} else {
|
||||
@"
|
||||
# Progress Log
|
||||
|
||||
## Session: $DATE
|
||||
|
||||
### Current Status
|
||||
- **Phase:** 1 - Requirements & Discovery
|
||||
- **Started:** $DATE
|
||||
|
||||
### Actions Taken
|
||||
-
|
||||
|
||||
### Test Results
|
||||
| Test | Expected | Actual | Status |
|
||||
|------|----------|--------|--------|
|
||||
|
||||
### Errors
|
||||
| Error | Resolution |
|
||||
|-------|------------|
|
||||
"@ | Out-File -FilePath "progress.md" -Encoding UTF8
|
||||
}
|
||||
Write-Host "Created progress.md"
|
||||
} else {
|
||||
Write-Host "progress.md already exists, skipping"
|
||||
}
|
||||
|
||||
Write-Host ""
|
||||
Write-Host "Planning files initialized!"
|
||||
Write-Host "Files: task_plan.md, findings.md, progress.md"
|
||||
179
.trae/skills/planning-with-files/scripts/init-session.sh
Normal file
179
.trae/skills/planning-with-files/scripts/init-session.sh
Normal file
@@ -0,0 +1,179 @@
|
||||
#!/bin/bash
|
||||
# Initialize planning files for a new session
|
||||
# Usage: ./init-session.sh [--template TYPE] [project-name]
|
||||
# Templates: default, analytics
|
||||
|
||||
set -e
|
||||
|
||||
# Parse arguments
|
||||
TEMPLATE="default"
|
||||
PROJECT_NAME="project"
|
||||
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case "$1" in
|
||||
--template|-t)
|
||||
TEMPLATE="$2"
|
||||
shift 2
|
||||
;;
|
||||
*)
|
||||
PROJECT_NAME="$1"
|
||||
shift
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
DATE=$(date +%Y-%m-%d)
|
||||
|
||||
# Resolve template directory (skill root is one level up from scripts/)
|
||||
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
|
||||
SKILL_ROOT="$(dirname "$SCRIPT_DIR")"
|
||||
TEMPLATE_DIR="$SKILL_ROOT/templates"
|
||||
|
||||
echo "Initializing planning files for: $PROJECT_NAME (template: $TEMPLATE)"
|
||||
|
||||
# Validate template
|
||||
if [ "$TEMPLATE" != "default" ] && [ "$TEMPLATE" != "analytics" ]; then
|
||||
echo "Unknown template: $TEMPLATE (available: default, analytics). Using default."
|
||||
TEMPLATE="default"
|
||||
fi
|
||||
|
||||
# Create task_plan.md if it doesn't exist
|
||||
if [ ! -f "task_plan.md" ]; then
|
||||
if [ "$TEMPLATE" = "analytics" ] && [ -f "$TEMPLATE_DIR/analytics_task_plan.md" ]; then
|
||||
cp "$TEMPLATE_DIR/analytics_task_plan.md" task_plan.md
|
||||
else
|
||||
cat > task_plan.md << 'EOF'
|
||||
# Task Plan: [Brief Description]
|
||||
|
||||
## Goal
|
||||
[One sentence describing the end state]
|
||||
|
||||
## Current Phase
|
||||
Phase 1
|
||||
|
||||
## Phases
|
||||
|
||||
### Phase 1: Requirements & Discovery
|
||||
- [ ] Understand user intent
|
||||
- [ ] Identify constraints
|
||||
- [ ] Document in findings.md
|
||||
- **Status:** in_progress
|
||||
|
||||
### Phase 2: Planning & Structure
|
||||
- [ ] Define approach
|
||||
- [ ] Create project structure
|
||||
- **Status:** pending
|
||||
|
||||
### Phase 3: Implementation
|
||||
- [ ] Execute the plan
|
||||
- [ ] Write to files before executing
|
||||
- **Status:** pending
|
||||
|
||||
### Phase 4: Testing & Verification
|
||||
- [ ] Verify requirements met
|
||||
- [ ] Document test results
|
||||
- **Status:** pending
|
||||
|
||||
### Phase 5: Delivery
|
||||
- [ ] Review outputs
|
||||
- [ ] Deliver to user
|
||||
- **Status:** pending
|
||||
|
||||
## Decisions Made
|
||||
| Decision | Rationale |
|
||||
|----------|-----------|
|
||||
|
||||
## Errors Encountered
|
||||
| Error | Resolution |
|
||||
|-------|------------|
|
||||
EOF
|
||||
fi
|
||||
echo "Created task_plan.md"
|
||||
else
|
||||
echo "task_plan.md already exists, skipping"
|
||||
fi
|
||||
|
||||
# Create findings.md if it doesn't exist
|
||||
if [ ! -f "findings.md" ]; then
|
||||
if [ "$TEMPLATE" = "analytics" ] && [ -f "$TEMPLATE_DIR/analytics_findings.md" ]; then
|
||||
cp "$TEMPLATE_DIR/analytics_findings.md" findings.md
|
||||
else
|
||||
cat > findings.md << 'EOF'
|
||||
# Findings & Decisions
|
||||
|
||||
## Requirements
|
||||
-
|
||||
|
||||
## Research Findings
|
||||
-
|
||||
|
||||
## Technical Decisions
|
||||
| Decision | Rationale |
|
||||
|----------|-----------|
|
||||
|
||||
## Issues Encountered
|
||||
| Issue | Resolution |
|
||||
|-------|------------|
|
||||
|
||||
## Resources
|
||||
-
|
||||
EOF
|
||||
fi
|
||||
echo "Created findings.md"
|
||||
else
|
||||
echo "findings.md already exists, skipping"
|
||||
fi
|
||||
|
||||
# Create progress.md if it doesn't exist
|
||||
if [ ! -f "progress.md" ]; then
|
||||
if [ "$TEMPLATE" = "analytics" ]; then
|
||||
cat > progress.md << EOF
|
||||
# Progress Log
|
||||
|
||||
## Session: $DATE
|
||||
|
||||
### Current Status
|
||||
- **Phase:** 1 - Data Discovery
|
||||
- **Started:** $DATE
|
||||
|
||||
### Actions Taken
|
||||
-
|
||||
|
||||
### Query Log
|
||||
| Query | Result Summary | Interpretation |
|
||||
|-------|---------------|----------------|
|
||||
|
||||
### Errors
|
||||
| Error | Resolution |
|
||||
|-------|------------|
|
||||
EOF
|
||||
else
|
||||
cat > progress.md << EOF
|
||||
# Progress Log
|
||||
|
||||
## Session: $DATE
|
||||
|
||||
### Current Status
|
||||
- **Phase:** 1 - Requirements & Discovery
|
||||
- **Started:** $DATE
|
||||
|
||||
### Actions Taken
|
||||
-
|
||||
|
||||
### Test Results
|
||||
| Test | Expected | Actual | Status |
|
||||
|------|----------|--------|--------|
|
||||
|
||||
### Errors
|
||||
| Error | Resolution |
|
||||
|-------|------------|
|
||||
EOF
|
||||
fi
|
||||
echo "Created progress.md"
|
||||
else
|
||||
echo "progress.md already exists, skipping"
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo "Planning files initialized!"
|
||||
echo "Files: task_plan.md, findings.md, progress.md"
|
||||
352
.trae/skills/planning-with-files/scripts/session-catchup.py
Normal file
352
.trae/skills/planning-with-files/scripts/session-catchup.py
Normal file
@@ -0,0 +1,352 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Session Catchup Script for planning-with-files
|
||||
|
||||
Session-agnostic scanning: finds the most recent planning file update across
|
||||
ALL sessions, then collects all conversation from that point forward through
|
||||
all subsequent sessions until now.
|
||||
|
||||
Supports multiple AI IDEs:
|
||||
- Claude Code (.claude/projects/)
|
||||
- OpenCode (.local/share/opencode/storage/)
|
||||
|
||||
Usage: python3 session-catchup.py [project-path]
|
||||
"""
|
||||
|
||||
import json
|
||||
import sys
|
||||
import os
|
||||
from pathlib import Path
|
||||
from typing import List, Dict, Optional, Tuple
|
||||
|
||||
PLANNING_FILES = ['task_plan.md', 'progress.md', 'findings.md']
|
||||
|
||||
|
||||
def detect_ide() -> str:
|
||||
"""
|
||||
Detect which IDE is being used based on environment and file structure.
|
||||
Returns 'claude-code', 'opencode', or 'unknown'.
|
||||
"""
|
||||
# Check for OpenCode environment
|
||||
if os.environ.get('OPENCODE_DATA_DIR'):
|
||||
return 'opencode'
|
||||
|
||||
# Check for Claude Code directory
|
||||
claude_dir = Path.home() / '.claude'
|
||||
if claude_dir.exists():
|
||||
return 'claude-code'
|
||||
|
||||
# Check for OpenCode directory
|
||||
opencode_dir = Path.home() / '.local' / 'share' / 'opencode'
|
||||
if opencode_dir.exists():
|
||||
return 'opencode'
|
||||
|
||||
return 'unknown'
|
||||
|
||||
|
||||
def get_project_dir_claude(project_path: str) -> Path:
|
||||
"""Convert project path to Claude's storage path format."""
|
||||
sanitized = project_path.replace('/', '-')
|
||||
if not sanitized.startswith('-'):
|
||||
sanitized = '-' + sanitized
|
||||
sanitized = sanitized.replace('_', '-')
|
||||
return Path.home() / '.claude' / 'projects' / sanitized
|
||||
|
||||
|
||||
def get_project_dir_opencode(project_path: str) -> Optional[Path]:
|
||||
"""
|
||||
Get OpenCode session storage directory.
|
||||
OpenCode uses: ~/.local/share/opencode/storage/session/{projectHash}/
|
||||
|
||||
Note: OpenCode's structure is different - this function returns the storage root.
|
||||
Session discovery happens differently in OpenCode.
|
||||
"""
|
||||
data_dir = os.environ.get('OPENCODE_DATA_DIR',
|
||||
str(Path.home() / '.local' / 'share' / 'opencode'))
|
||||
storage_dir = Path(data_dir) / 'storage'
|
||||
|
||||
if not storage_dir.exists():
|
||||
return None
|
||||
|
||||
return storage_dir
|
||||
|
||||
|
||||
def get_sessions_sorted(project_dir: Path) -> List[Path]:
|
||||
"""Get all session files sorted by modification time (newest first)."""
|
||||
sessions = list(project_dir.glob('*.jsonl'))
|
||||
main_sessions = [s for s in sessions if not s.name.startswith('agent-')]
|
||||
return sorted(main_sessions, key=lambda p: p.stat().st_mtime, reverse=True)
|
||||
|
||||
|
||||
def get_sessions_sorted_opencode(storage_dir: Path) -> List[Path]:
|
||||
"""
|
||||
Get all OpenCode session files sorted by modification time.
|
||||
OpenCode stores sessions at: storage/session/{projectHash}/{sessionID}.json
|
||||
"""
|
||||
session_dir = storage_dir / 'session'
|
||||
if not session_dir.exists():
|
||||
return []
|
||||
|
||||
sessions = []
|
||||
for project_hash_dir in session_dir.iterdir():
|
||||
if project_hash_dir.is_dir():
|
||||
for session_file in project_hash_dir.glob('*.json'):
|
||||
sessions.append(session_file)
|
||||
|
||||
return sorted(sessions, key=lambda p: p.stat().st_mtime, reverse=True)
|
||||
|
||||
|
||||
def get_session_first_timestamp(session_file: Path) -> Optional[str]:
|
||||
"""Get the timestamp of the first message in a session."""
|
||||
try:
|
||||
with open(session_file, 'r') as f:
|
||||
for line in f:
|
||||
try:
|
||||
data = json.loads(line)
|
||||
ts = data.get('timestamp')
|
||||
if ts:
|
||||
return ts
|
||||
except:
|
||||
continue
|
||||
except:
|
||||
pass
|
||||
return None
|
||||
|
||||
|
||||
def scan_for_planning_update(session_file: Path) -> Tuple[int, Optional[str]]:
|
||||
"""
|
||||
Quickly scan a session file for planning file updates.
|
||||
Returns (line_number, filename) of last update, or (-1, None) if none found.
|
||||
"""
|
||||
last_update_line = -1
|
||||
last_update_file = None
|
||||
|
||||
try:
|
||||
with open(session_file, 'r') as f:
|
||||
for line_num, line in enumerate(f):
|
||||
if '"Write"' not in line and '"Edit"' not in line:
|
||||
continue
|
||||
|
||||
try:
|
||||
data = json.loads(line)
|
||||
if data.get('type') != 'assistant':
|
||||
continue
|
||||
|
||||
content = data.get('message', {}).get('content', [])
|
||||
if not isinstance(content, list):
|
||||
continue
|
||||
|
||||
for item in content:
|
||||
if item.get('type') != 'tool_use':
|
||||
continue
|
||||
tool_name = item.get('name', '')
|
||||
if tool_name not in ('Write', 'Edit'):
|
||||
continue
|
||||
|
||||
file_path = item.get('input', {}).get('file_path', '')
|
||||
for pf in PLANNING_FILES:
|
||||
if file_path.endswith(pf):
|
||||
last_update_line = line_num
|
||||
last_update_file = pf
|
||||
break
|
||||
except json.JSONDecodeError:
|
||||
continue
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
return last_update_line, last_update_file
|
||||
|
||||
|
||||
def extract_messages_from_session(session_file: Path, after_line: int = -1) -> List[Dict]:
|
||||
"""
|
||||
Extract conversation messages from a session file.
|
||||
If after_line >= 0, only extract messages after that line.
|
||||
If after_line < 0, extract all messages.
|
||||
"""
|
||||
result = []
|
||||
|
||||
try:
|
||||
with open(session_file, 'r') as f:
|
||||
for line_num, line in enumerate(f):
|
||||
if after_line >= 0 and line_num <= after_line:
|
||||
continue
|
||||
|
||||
try:
|
||||
msg = json.loads(line)
|
||||
except json.JSONDecodeError:
|
||||
continue
|
||||
|
||||
msg_type = msg.get('type')
|
||||
is_meta = msg.get('isMeta', False)
|
||||
|
||||
if msg_type == 'user' and not is_meta:
|
||||
content = msg.get('message', {}).get('content', '')
|
||||
if isinstance(content, list):
|
||||
for item in content:
|
||||
if isinstance(item, dict) and item.get('type') == 'text':
|
||||
content = item.get('text', '')
|
||||
break
|
||||
else:
|
||||
content = ''
|
||||
|
||||
if content and isinstance(content, str):
|
||||
# Skip system/command messages
|
||||
if content.startswith(('<local-command', '<command-', '<task-notification')):
|
||||
continue
|
||||
if len(content) > 20:
|
||||
result.append({
|
||||
'role': 'user',
|
||||
'content': content,
|
||||
'line': line_num,
|
||||
'session': session_file.stem[:8]
|
||||
})
|
||||
|
||||
elif msg_type == 'assistant':
|
||||
msg_content = msg.get('message', {}).get('content', '')
|
||||
text_content = ''
|
||||
tool_uses = []
|
||||
|
||||
if isinstance(msg_content, str):
|
||||
text_content = msg_content
|
||||
elif isinstance(msg_content, list):
|
||||
for item in msg_content:
|
||||
if item.get('type') == 'text':
|
||||
text_content = item.get('text', '')
|
||||
elif item.get('type') == 'tool_use':
|
||||
tool_name = item.get('name', '')
|
||||
tool_input = item.get('input', {})
|
||||
if tool_name == 'Edit':
|
||||
tool_uses.append(f"Edit: {tool_input.get('file_path', 'unknown')}")
|
||||
elif tool_name == 'Write':
|
||||
tool_uses.append(f"Write: {tool_input.get('file_path', 'unknown')}")
|
||||
elif tool_name == 'Bash':
|
||||
cmd = tool_input.get('command', '')[:80]
|
||||
tool_uses.append(f"Bash: {cmd}")
|
||||
elif tool_name == 'AskUserQuestion':
|
||||
tool_uses.append("AskUserQuestion")
|
||||
else:
|
||||
tool_uses.append(f"{tool_name}")
|
||||
|
||||
if text_content or tool_uses:
|
||||
result.append({
|
||||
'role': 'assistant',
|
||||
'content': text_content[:600] if text_content else '',
|
||||
'tools': tool_uses,
|
||||
'line': line_num,
|
||||
'session': session_file.stem[:8]
|
||||
})
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def main():
|
||||
project_path = sys.argv[1] if len(sys.argv) > 1 else os.getcwd()
|
||||
|
||||
# Detect IDE
|
||||
ide = detect_ide()
|
||||
|
||||
if ide == 'opencode':
|
||||
print("\n[planning-with-files] OpenCode session catchup is not yet fully supported")
|
||||
print("OpenCode uses a different session storage format (.json) than Claude Code (.jsonl)")
|
||||
print("Session catchup requires parsing OpenCode's message storage structure.")
|
||||
print("\nWorkaround: Manually read task_plan.md, progress.md, and findings.md to catch up.")
|
||||
return
|
||||
|
||||
# Claude Code path
|
||||
project_dir = get_project_dir_claude(project_path)
|
||||
|
||||
if not project_dir.exists():
|
||||
return
|
||||
|
||||
sessions = get_sessions_sorted(project_dir)
|
||||
if len(sessions) < 2:
|
||||
return
|
||||
|
||||
# Skip the current session (most recently modified = index 0)
|
||||
previous_sessions = sessions[1:]
|
||||
|
||||
# Find the most recent planning file update across ALL previous sessions
|
||||
# Sessions are sorted newest first, so we scan in order
|
||||
update_session = None
|
||||
update_line = -1
|
||||
update_file = None
|
||||
update_session_idx = -1
|
||||
|
||||
for idx, session in enumerate(previous_sessions):
|
||||
line, filename = scan_for_planning_update(session)
|
||||
if line >= 0:
|
||||
update_session = session
|
||||
update_line = line
|
||||
update_file = filename
|
||||
update_session_idx = idx
|
||||
break
|
||||
|
||||
if not update_session:
|
||||
# No planning file updates found in any previous session
|
||||
return
|
||||
|
||||
# Collect ALL messages from the update point forward, across all sessions
|
||||
all_messages = []
|
||||
|
||||
# 1. Get messages from the session with the update (after the update line)
|
||||
messages_from_update_session = extract_messages_from_session(update_session, after_line=update_line)
|
||||
all_messages.extend(messages_from_update_session)
|
||||
|
||||
# 2. Get ALL messages from sessions between update_session and current
|
||||
# These are sessions[1:update_session_idx] (newer than update_session)
|
||||
intermediate_sessions = previous_sessions[:update_session_idx]
|
||||
|
||||
# Process from oldest to newest for correct chronological order
|
||||
for session in reversed(intermediate_sessions):
|
||||
messages = extract_messages_from_session(session, after_line=-1) # Get all messages
|
||||
all_messages.extend(messages)
|
||||
|
||||
if not all_messages:
|
||||
return
|
||||
|
||||
# Output catchup report
|
||||
print(f"\n[planning-with-files] SESSION CATCHUP DETECTED (IDE: {ide})")
|
||||
print(f"Last planning update: {update_file} in session {update_session.stem[:8]}...")
|
||||
|
||||
sessions_covered = update_session_idx + 1
|
||||
if sessions_covered > 1:
|
||||
print(f"Scanning {sessions_covered} sessions for unsynced context")
|
||||
|
||||
print(f"Unsynced messages: {len(all_messages)}")
|
||||
|
||||
print("\n--- UNSYNCED CONTEXT ---")
|
||||
|
||||
# Show up to 100 messages
|
||||
MAX_MESSAGES = 100
|
||||
if len(all_messages) > MAX_MESSAGES:
|
||||
print(f"(Showing last {MAX_MESSAGES} of {len(all_messages)} messages)\n")
|
||||
messages_to_show = all_messages[-MAX_MESSAGES:]
|
||||
else:
|
||||
messages_to_show = all_messages
|
||||
|
||||
current_session = None
|
||||
for msg in messages_to_show:
|
||||
# Show session marker when it changes
|
||||
if msg.get('session') != current_session:
|
||||
current_session = msg.get('session')
|
||||
print(f"\n[Session: {current_session}...]")
|
||||
|
||||
if msg['role'] == 'user':
|
||||
print(f"USER: {msg['content'][:300]}")
|
||||
else:
|
||||
if msg.get('content'):
|
||||
print(f"CLAUDE: {msg['content'][:300]}")
|
||||
if msg.get('tools'):
|
||||
print(f" Tools: {', '.join(msg['tools'][:4])}")
|
||||
|
||||
print("\n--- RECOMMENDED ---")
|
||||
print("1. Run: git diff --stat")
|
||||
print("2. Read: task_plan.md, progress.md, findings.md")
|
||||
print("3. Update planning files based on above context")
|
||||
print("4. Continue with task")
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
294
.trae/skills/planning-with-files/scripts/sync-ide-folders.py
Normal file
294
.trae/skills/planning-with-files/scripts/sync-ide-folders.py
Normal file
@@ -0,0 +1,294 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
sync-ide-folders.py — Syncs shared files from the canonical source
|
||||
(skills/planning-with-files/) to all IDE-specific folders.
|
||||
|
||||
Run this from the repo root before releases:
|
||||
python scripts/sync-ide-folders.py
|
||||
|
||||
What it syncs:
|
||||
- Templates (findings.md, progress.md, task_plan.md)
|
||||
- References (examples.md, reference.md)
|
||||
- Scripts (check-complete.sh/.ps1, init-session.sh/.ps1, session-catchup.py)
|
||||
|
||||
What it NEVER touches:
|
||||
- SKILL.md (IDE-specific frontmatter differs per IDE)
|
||||
- IDE-specific files (hooks, prompts, package.json, steering files)
|
||||
|
||||
Use --dry-run to preview changes without writing anything.
|
||||
Use --verify to check for drift without making changes (exits 1 if drift found).
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import shutil
|
||||
import sys
|
||||
import hashlib
|
||||
from pathlib import Path
|
||||
|
||||
# ─── Canonical source ──────────────────────────────────────────────
|
||||
CANONICAL = Path("skills/planning-with-files")
|
||||
|
||||
# ─── Shared source files (relative to CANONICAL) ──────────────────
|
||||
TEMPLATES = [
|
||||
"templates/findings.md",
|
||||
"templates/progress.md",
|
||||
"templates/task_plan.md",
|
||||
]
|
||||
|
||||
REFERENCES = [
|
||||
"examples.md",
|
||||
"reference.md",
|
||||
]
|
||||
|
||||
SCRIPTS = [
|
||||
"scripts/check-complete.sh",
|
||||
"scripts/check-complete.ps1",
|
||||
"scripts/init-session.sh",
|
||||
"scripts/init-session.ps1",
|
||||
"scripts/session-catchup.py",
|
||||
]
|
||||
|
||||
# ─── IDE sync manifests ───────────────────────────────────────────
|
||||
# Each IDE maps: canonical_source_file -> target_path (relative to repo root)
|
||||
# Only files listed here are synced. Everything else is untouched.
|
||||
|
||||
def _build_manifest(base, *, ref_style="flat", template_dirs=None,
|
||||
include_scripts=True, extra_template_dirs=None):
|
||||
"""Build a sync manifest for an IDE folder.
|
||||
|
||||
Args:
|
||||
base: IDE skill folder path (e.g. ".gemini/skills/planning-with-files")
|
||||
ref_style: "flat" = examples.md at root, "subdir" = references/examples.md
|
||||
template_dirs: list of template subdirs (default: ["templates/"])
|
||||
include_scripts: whether to sync scripts
|
||||
extra_template_dirs: additional dirs to also receive template copies
|
||||
"""
|
||||
manifest = {}
|
||||
b = Path(base)
|
||||
|
||||
# Templates
|
||||
if template_dirs is None:
|
||||
template_dirs = ["templates/"]
|
||||
for tdir in template_dirs:
|
||||
for t in TEMPLATES:
|
||||
filename = Path(t).name # e.g. "findings.md"
|
||||
manifest[t] = str(b / tdir / filename)
|
||||
|
||||
# Extra template locations (e.g. assets/templates/ in codex, codebuddy)
|
||||
if extra_template_dirs:
|
||||
for tdir in extra_template_dirs:
|
||||
for t in TEMPLATES:
|
||||
filename = Path(t).name
|
||||
manifest[f"{t}__extra_{tdir}"] = str(b / tdir / filename)
|
||||
|
||||
# References
|
||||
if ref_style == "flat":
|
||||
for r in REFERENCES:
|
||||
manifest[r] = str(b / r)
|
||||
elif ref_style == "subdir":
|
||||
for r in REFERENCES:
|
||||
manifest[r] = str(b / "references" / r)
|
||||
# ref_style == "skip" means don't sync references (IDE uses custom format)
|
||||
|
||||
# Scripts
|
||||
if include_scripts:
|
||||
for s in SCRIPTS:
|
||||
manifest[s] = str(b / s)
|
||||
|
||||
return manifest
|
||||
|
||||
|
||||
IDE_MANIFESTS = {
|
||||
".cursor": _build_manifest(
|
||||
".cursor/skills/planning-with-files",
|
||||
ref_style="flat",
|
||||
include_scripts=False,
|
||||
# Cursor hooks are IDE-specific, not synced
|
||||
),
|
||||
|
||||
".gemini": _build_manifest(
|
||||
".gemini/skills/planning-with-files",
|
||||
ref_style="subdir",
|
||||
include_scripts=True,
|
||||
),
|
||||
|
||||
".codex": _build_manifest(
|
||||
".codex/skills/planning-with-files",
|
||||
ref_style="subdir",
|
||||
include_scripts=True,
|
||||
),
|
||||
|
||||
# .openclaw, .kilocode, .adal, .agent removed in v2.24.0 (IDE audit)
|
||||
# These IDEs use the standard Agent Skills spec — install via npx skills add
|
||||
|
||||
".pi": _build_manifest(
|
||||
".pi/skills/planning-with-files",
|
||||
ref_style="flat",
|
||||
include_scripts=True,
|
||||
# package.json and README.md are IDE-specific, not synced
|
||||
),
|
||||
|
||||
".continue": _build_manifest(
|
||||
".continue/skills/planning-with-files",
|
||||
ref_style="flat",
|
||||
template_dirs=[], # Continue has no templates dir
|
||||
include_scripts=True,
|
||||
# .continue/prompts/ is IDE-specific, not synced
|
||||
),
|
||||
|
||||
".codebuddy": _build_manifest(
|
||||
".codebuddy/skills/planning-with-files",
|
||||
ref_style="subdir",
|
||||
include_scripts=True,
|
||||
),
|
||||
|
||||
".factory": _build_manifest(
|
||||
".factory/skills/planning-with-files",
|
||||
ref_style="skip", # Uses combined references.md, not synced
|
||||
include_scripts=True,
|
||||
),
|
||||
|
||||
".opencode": _build_manifest(
|
||||
".opencode/skills/planning-with-files",
|
||||
ref_style="flat",
|
||||
include_scripts=False,
|
||||
),
|
||||
|
||||
# Kiro: maintained under .kiro/ (skill + wrappers); not synced from canonical scripts/.
|
||||
".kiro": {},
|
||||
}
|
||||
|
||||
|
||||
# ─── Utility functions ─────────────────────────────────────────────
|
||||
|
||||
def file_hash(path):
|
||||
"""Return SHA-256 hash of a file, or None if it doesn't exist."""
|
||||
try:
|
||||
return hashlib.sha256(Path(path).read_bytes()).hexdigest()
|
||||
except FileNotFoundError:
|
||||
return None
|
||||
|
||||
|
||||
def sync_file(src, dst, *, dry_run=False):
|
||||
"""Copy src to dst. Returns (action, detail) tuple.
|
||||
|
||||
Actions: "updated", "created", "skipped" (already identical), "missing_src"
|
||||
"""
|
||||
if not src.exists():
|
||||
return "missing_src", f"Canonical file not found: {src}"
|
||||
|
||||
src_hash = file_hash(src)
|
||||
dst_hash = file_hash(dst)
|
||||
|
||||
if src_hash == dst_hash:
|
||||
return "skipped", "Already up to date"
|
||||
|
||||
action = "created" if dst_hash is None else "updated"
|
||||
|
||||
if not dry_run:
|
||||
dst.parent.mkdir(parents=True, exist_ok=True)
|
||||
shutil.copy2(src, dst)
|
||||
|
||||
return action, f"{'Would ' if dry_run else ''}{action}: {dst}"
|
||||
|
||||
|
||||
# ─── Main ──────────────────────────────────────────────────────────
|
||||
|
||||
def parse_args(argv=None):
|
||||
"""Parse CLI arguments for sync behavior."""
|
||||
parser = argparse.ArgumentParser(
|
||||
description=(
|
||||
"Sync shared planning-with-files assets from canonical source "
|
||||
"to IDE-specific folders."
|
||||
)
|
||||
)
|
||||
parser.add_argument(
|
||||
"--dry-run",
|
||||
action="store_true",
|
||||
help="Preview changes without writing files.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--verify",
|
||||
action="store_true",
|
||||
help="Check for drift only; exit with code 1 if drift is found.",
|
||||
)
|
||||
return parser.parse_args(argv)
|
||||
|
||||
|
||||
def main(argv=None):
|
||||
args = parse_args(argv)
|
||||
dry_run = args.dry_run
|
||||
verify = args.verify
|
||||
|
||||
# Must run from repo root
|
||||
if not CANONICAL.exists():
|
||||
print(f"Error: Canonical source not found at {CANONICAL}/")
|
||||
print("Run this script from the repo root.")
|
||||
sys.exit(1)
|
||||
|
||||
print(f"{'[DRY RUN] ' if dry_run else ''}{'[VERIFY] ' if verify else ''}"
|
||||
f"Syncing from {CANONICAL}/\n")
|
||||
|
||||
stats = {"updated": 0, "created": 0, "skipped": 0, "missing_src": 0, "drift": 0}
|
||||
|
||||
for ide_name, manifest in sorted(IDE_MANIFESTS.items()):
|
||||
# Skip IDEs whose base directory doesn't exist
|
||||
ide_root = Path(ide_name)
|
||||
if not ide_root.exists():
|
||||
continue
|
||||
|
||||
print(f" {ide_name}/")
|
||||
ide_changes = 0
|
||||
|
||||
for canonical_key, target_path in sorted(manifest.items()):
|
||||
# Handle __extra_ keys (canonical key contains __extra_ suffix)
|
||||
canonical_rel = canonical_key.split("__extra_")[0]
|
||||
src = CANONICAL / canonical_rel
|
||||
dst = Path(target_path)
|
||||
|
||||
if verify:
|
||||
# Verify mode: just check for drift
|
||||
src_hash = file_hash(src)
|
||||
dst_hash = file_hash(dst)
|
||||
if src_hash and dst_hash and src_hash != dst_hash:
|
||||
print(f" DRIFT: {dst}")
|
||||
stats["drift"] += 1
|
||||
ide_changes += 1
|
||||
elif src_hash and not dst_hash:
|
||||
print(f" MISSING: {dst}")
|
||||
stats["drift"] += 1
|
||||
ide_changes += 1
|
||||
else:
|
||||
action, detail = sync_file(src, dst, dry_run=dry_run)
|
||||
stats[action] += 1
|
||||
if action in ("updated", "created"):
|
||||
print(f" {action.upper()}: {dst}")
|
||||
ide_changes += 1
|
||||
|
||||
if ide_changes == 0:
|
||||
print(" (up to date)")
|
||||
|
||||
# Summary
|
||||
print(f"\n{'-' * 50}")
|
||||
if verify:
|
||||
total_drift = stats["drift"]
|
||||
if total_drift > 0:
|
||||
print(f"DRIFT DETECTED: {total_drift} file(s) out of sync.")
|
||||
print("Run 'python scripts/sync-ide-folders.py' to fix.")
|
||||
sys.exit(1)
|
||||
else:
|
||||
print("All IDE folders are in sync.")
|
||||
sys.exit(0)
|
||||
else:
|
||||
print(f" Updated: {stats['updated']}")
|
||||
print(f" Created: {stats['created']}")
|
||||
print(f" Skipped: {stats['skipped']} (already up to date)")
|
||||
if stats["missing_src"] > 0:
|
||||
print(f" Missing: {stats['missing_src']} (canonical source not found)")
|
||||
if dry_run:
|
||||
print("\n This was a dry run. No files were modified.")
|
||||
print(" Run without --dry-run to apply changes.")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
Reference in New Issue
Block a user