Loading source
Pulling the file list, source metadata, and syntax-aware rendering for this listing.
Source from repo
Search, analyze, and interact with Xiaohongshu (RedNote/小红书) content via a local MCP server and shell scripts.
Files
Skill
Size
Entrypoint
Format
Open file
Syntax-highlighted preview of this file as included in the skill package.
scripts/track-topic.py
1#!/usr/bin/env python32"""3小红书热点跟踪工具45用法:6python track-topic.py <话题> [--limit N] [--feishu] [--output FILE]78示例:9python track-topic.py "DeepSeek" --limit 5 --feishu10python track-topic.py "春节旅游" --limit 10 --output report.md11"""1213import argparse14import json15import subprocess16import sys17import os18from datetime import datetime19from pathlib import Path2021# 获取脚本目录22SCRIPT_DIR = Path(__file__).parent.resolve()23XHS_SCRIPTS = SCRIPT_DIR # 现在就在 xiaohongshu/scripts 目录下2425# 飞书 skill 路径(支持多种可能的位置)26def find_feishu_scripts() -> Path:27"""查找 feishu-docs skill 的 scripts 目录"""28# 只允许在已知的 skill 目录中查找29allowed_roots = [30SCRIPT_DIR.parent.parent, # 同级 skill 目录31Path.home() / ".openclaw" / "workspace" / "skills",32Path.home() / ".claude" / "skills",33]34for root in allowed_roots:35candidate = (root / "feishu-docs" / "scripts").resolve()36# 校验解析后的路径仍在允许的根目录下(防止符号链接逃逸)37if candidate.is_dir() and any(38str(candidate).startswith(str(r.resolve()) + os.sep) for r in allowed_roots39):40return candidate41return allowed_roots[0] / "feishu-docs" / "scripts" # 返回默认路径(可能不存在)4243FEISHU_SCRIPTS = find_feishu_scripts()444546def call_xhs_mcp(tool: str, args: dict) -> dict:47"""调用小红书 MCP 工具"""48mcp_call = XHS_SCRIPTS / "mcp-call.sh"49if not mcp_call.exists():50print(f"❌ 找不到 xiaohongshu skill: {mcp_call}", file=sys.stderr)51sys.exit(1)5253result = subprocess.run(54[str(mcp_call), tool, json.dumps(args)],55capture_output=True, text=True, timeout=12056)5758if result.returncode != 0:59print(f"❌ MCP 调用失败: {result.stderr}", file=sys.stderr)60return {}6162try:63response = json.loads(result.stdout)64if "result" in response and "content" in response["result"]:65text = response["result"]["content"][0].get("text", "{}")66return json.loads(text) if text else {}67elif "error" in response:68print(f"⚠️ MCP 错误: {response['error'].get('message', 'Unknown')}", file=sys.stderr)69return {}70return response71except json.JSONDecodeError:72return {}737475def search_feeds(keyword: str) -> list:76"""搜索小红书内容"""77print(f"🔍 搜索: {keyword}")78result = call_xhs_mcp("search_feeds", {"keyword": keyword})79feeds = result.get("feeds", [])80# 过滤掉 hot_query 类型81return [f for f in feeds if f.get("modelType") == "note"]828384def get_feed_detail(feed_id: str, xsec_token: str, load_comments: bool = True) -> dict:85"""获取帖子详情"""86args = {87"feed_id": feed_id,88"xsec_token": xsec_token,89"load_all_comments": load_comments90}91result = call_xhs_mcp("get_feed_detail", args)92return result.get("data", {})939495def format_timestamp(ts: int) -> str:96"""格式化时间戳"""97if not ts:98return "未知"99try:100dt = datetime.fromtimestamp(ts / 1000)101return dt.strftime("%Y-%m-%d %H:%M")102except:103return "未知"104105106def get_comments_list(post: dict) -> list:107"""安全地获取评论列表"""108comments = post.get("comments", {})109if isinstance(comments, dict):110return comments.get("list", [])111elif isinstance(comments, list):112return comments113return []114115116def generate_report(keyword: str, posts: list) -> str:117"""生成 Markdown 报告"""118now = datetime.now().strftime("%Y-%m-%d %H:%M")119120report = f"""# 🔥 小红书热点跟踪报告121122**话题:** {keyword}123**生成时间:** {now}124**收录帖子:** {len(posts)} 篇125126---127128## 📊 概览129130"""131132# 统计信息133total_likes = sum(int(p.get("note", {}).get("interactInfo", {}).get("likedCount", 0) or 0) for p in posts)134total_comments = sum(len(get_comments_list(p)) for p in posts)135136report += f"""| 指标 | 数值 |137|------|------|138| 总帖子数 | {len(posts)} |139| 总点赞数 | {total_likes:,} |140| 总评论数 | {total_comments} |141142---143144## 📝 热帖详情145146"""147148for i, post in enumerate(posts, 1):149note = post.get("note", {})150comments = get_comments_list(post)151152title = note.get("title", "无标题")153desc = note.get("desc", "")154user = note.get("user", {}).get("nickname", "匿名")155time_str = format_timestamp(note.get("time"))156interact = note.get("interactInfo", {})157likes = interact.get("likedCount", "0")158collected = interact.get("collectedCount", "0")159160report += f"""### {i}. {title}161162**作者:** {user}163**时间:** {time_str}164**互动:** ❤️ {likes} 赞 · ⭐ {collected} 收藏165166**正文:**167168> {desc[:500]}{"..." if len(desc) > 500 else ""}169170"""171172if comments:173report += f"""**热门评论 ({len(comments)} 条):**174175"""176for j, comment in enumerate(list(comments)[:5], 1):177c_user = comment.get("userInfo", {}).get("nickname", "匿名")178c_content = comment.get("content", "")179c_likes = comment.get("likeCount", 0)180report += f"- **{c_user}** ({c_likes}赞): {c_content[:100]}\n"181182if len(comments) > 5:183report += f"- *... 还有 {len(comments) - 5} 条评论*\n"184185report += "\n---\n\n"186187# 评论区热点总结188report += """## 💬 评论区热点关键词189190"""191192# 简单的关键词提取(统计高频词)193all_comments = []194for post in posts:195for c in get_comments_list(post):196all_comments.append(c.get("content", ""))197198if all_comments:199report += f"共 {len(all_comments)} 条评论,主要讨论方向:\n\n"200# 这里可以做更复杂的 NLP 分析,暂时简化201report += "- 用户对该话题的关注度较高\n"202report += "- 评论区互动活跃\n"203else:204report += "暂无足够评论数据进行分析\n"205206report += """207---208209## 📈 趋势分析210211基于以上热帖和评论数据,该话题在小红书上呈现以下特点:2122131. **热度指数**: """ + ("🔥🔥🔥 高" if total_likes > 1000 else "🔥🔥 中" if total_likes > 100 else "🔥 低") + f"""2142. **互动活跃度**: """ + ("活跃" if total_comments > 50 else "一般" if total_comments > 10 else "较低") + """2153. **内容类型**: 以图文笔记为主216217---218219*报告由 OpenClaw 小红书热点跟踪工具自动生成*220"""221222return report223224225def export_to_feishu(title: str, content: str) -> str:226"""导出到飞书文档"""227import_script = FEISHU_SCRIPTS / "doc-import.sh"228if not import_script.exists():229print(f"❌ 找不到 feishu-docs skill: {import_script}", file=sys.stderr)230return ""231232print("📤 导出到飞书文档...")233234# 写入临时文件235tmp_file = Path("/tmp/xhs_report.md")236tmp_file.write_text(content, encoding="utf-8")237238result = subprocess.run(239[str(import_script), title, "--file", str(tmp_file)],240capture_output=True, text=True, timeout=60241)242243if result.returncode != 0:244print(f"⚠️ 飞书导出失败: {result.stderr}", file=sys.stderr)245return ""246247# 解析返回的文档链接248output = result.stdout249print(output)250return output251252253def main():254parser = argparse.ArgumentParser(description="小红书热点跟踪工具")255parser.add_argument("keyword", help="要跟踪的话题/关键词")256parser.add_argument("--limit", "-n", type=int, default=10, help="获取帖子数量 (默认 10)")257parser.add_argument("--feishu", "-f", action="store_true", help="导出到飞书文档")258parser.add_argument("--output", "-o", help="输出 Markdown 文件路径")259parser.add_argument("--no-comments", action="store_true", help="不获取评论")260261args = parser.parse_args()262263# 1. 搜索帖子264feeds = search_feeds(args.keyword)265if not feeds:266print("❌ 未找到相关帖子")267sys.exit(1)268269print(f"✅ 找到 {len(feeds)} 条帖子")270271# 2. 获取详情272posts = []273for i, feed in enumerate(feeds[:args.limit]):274feed_id = feed.get("id")275xsec_token = feed.get("xsecToken")276title = feed.get("noteCard", {}).get("displayTitle", "")277278print(f"📖 [{i+1}/{min(len(feeds), args.limit)}] 获取: {title[:30]}...")279280detail = get_feed_detail(feed_id, xsec_token, not args.no_comments)281if detail:282posts.append(detail)283284if not posts:285print("❌ 未能获取帖子详情")286sys.exit(1)287288print(f"✅ 成功获取 {len(posts)} 篇帖子详情")289290# 3. 生成报告291print("📝 生成报告...")292report = generate_report(args.keyword, posts)293294# 4. 输出295if args.output:296output_path = Path(args.output)297output_path.write_text(report, encoding="utf-8")298print(f"✅ 报告已保存: {output_path}")299300if args.feishu:301doc_title = f"小红书热点跟踪: {args.keyword} ({datetime.now().strftime('%m-%d')})"302export_to_feishu(doc_title, report)303304if not args.output and not args.feishu:305# 默认输出到 stdout306print("\n" + "="*60 + "\n")307print(report)308309return report310311312if __name__ == "__main__":313main()314