#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
使用 Playwright + Cookies 爬取小红书展览信息
"""

import json
from pathlib import Path
from playwright.sync_api import sync_playwright

# 加载小红书 cookies（从配置文件读取）
CONFIG_FILE = Path(__file__).parent.parent / "config" / "xiaohongshu_cookies.json"
XHS_COOKIES_DICT = {}
try:
    with open(CONFIG_FILE, "r", encoding="utf-8") as f:
        XHS_COOKIES_DICT = json.load(f)
    print(f"✓ 已加载小红书 cookie 配置：{len(XHS_COOKIES_DICT)} 个")
except Exception as e:
    print(f"⚠ 加载 cookie 配置失败：{e}，使用默认配置")

# 转换为 Playwright 格式
XHS_COOKIES = [
    {"name": name, "value": value, "domain": ".xiaohongshu.com", "path": "/"}
    for name, value in XHS_COOKIES_DICT.items()
]

def search_xiaohongshu(keyword):
    """搜索小红书笔记"""
    exhibitions = []
    
    with sync_playwright() as p:
        browser = p.chromium.launch(headless=True, args=[
            '--disable-blink-features=AutomationControlled',
            '--user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36'
        ])
        
        context = browser.new_context()
        context.add_cookies(XHS_COOKIES)
        page = context.new_page()
        
        try:
            # 访问小红书搜索页面
            url = f"https://www.xiaohongshu.com/search_result?keyword={keyword}&source=web_search_result_notes"
            print(f"访问：{url}")
            
            page.goto(url, wait_until="domcontentloaded", timeout=60000)
            page.wait_for_timeout(5000)  # 等待 JS 加载
            
            # 获取页面内容
            content = page.content()
            print(f"页面长度：{len(content)}")
            
            # 尝试获取笔记列表（多种选择器）
            notes = page.query_selector_all("section.note-item")
            if not notes:
                notes = page.query_selector_all("div.note-card")
            print(f"找到 {len(notes)} 篇笔记")
            
            for note in notes[:10]:
                try:
                    # 尝试多种选择器
                    title_el = note.query_selector("div.title, span.title, h3.title, a.title")
                    title = title_el.inner_text().strip() if title_el else ""
                    
                    user_el = note.query_selector("div.username, span.username, div.nickname")
                    user = user_el.inner_text().strip() if user_el else ""
                    
                    # 如果还是没找到，尝试获取整个文本
                    if not title:
                        title = note.inner_text().strip().split('\n')[0][:50]
                    
                    if title and len(title) > 2:
                        exhibitions.append({
                            "title": title,
                            "user": user or "未知",
                            "source": "小红书"
                        })
                        print(f"  ✓ {title[:30]}... by @{user or '未知'}")
                except Exception as e:
                    print(f"解析笔记失败：{e}")
            
            # 保存完整页面用于分析
            with open("data/xiaohongshu_page.html", "w", encoding="utf-8") as f:
                f.write(content)
            print("已保存页面到 data/xiaohongshu_page.html")
            
        except Exception as e:
            print(f"爬取失败：{e}")
        
        browser.close()
    
    return exhibitions


if __name__ == "__main__":
    print("=" * 50)
    print("小红书爬虫 - 北京看展 2026")
    print("=" * 50)
    
    exhibitions = search_xiaohongshu("北京看展 2026")
    
    print(f"\n找到 {len(exhibitions)} 篇笔记：")
    for ex in exhibitions[:5]:
        print(f"  - {ex['title']} by @{ex['user']}")
    
    # 保存结果
    with open("data/xiaohongshu_exhibitions.json", "w", encoding="utf-8") as f:
        json.dump(exhibitions, f, ensure_ascii=False, indent=2)
    
    print(f"\n结果已保存到 data/xiaohongshu_exhibitions.json")
