#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
继续爬取国家博物馆和首都博物馆
"""

import json
import time
from playwright.sync_api import sync_playwright

def crawl_chnmuseum_detail():
    """国家博物馆 - 详细爬取（多次尝试）"""
    print("【国家博物馆】尝试多种方式爬取...")
    exhibitions = []
    
    # URL 列表
    urls = [
        "https://www.chnmuseum.cn/zl/zhanlanyugao/",
        "https://www.chnmuseum.cn/zl/",
        "https://www.chnmuseum.cn/",
    ]
    
    with sync_playwright() as p:
        browser = p.chromium.launch(headless=True, args=['--disable-blink-features=AutomationControlled'])
        page = browser.new_page()
        page.set_extra_http_headers({
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36"
        })
        
        for url in urls:
            try:
                print(f"   尝试：{url}")
                page.goto(url, wait_until="domcontentloaded", timeout=30000)
                page.wait_for_timeout(8000)
                
                text = page.inner_text("body")
                print(f"   页面长度：{len(text)}")
                
                # 查找展览关键词
                if "展览" in text or "展" in text:
                    lines = text.split("\n")
                    for line in lines:
                        line = line.strip()
                        # 匹配展览名称
                        if "展" in line and len(line) > 5 and len(line) < 80:
                            if "展览预告" not in line and "正在展出" not in line and "常设展览" not in line:
                                exhibitions.append({
                                    "title": line,
                                    "venue": "中国国家博物馆",
                                    "area": "东城区",
                                    "source": "国家博物馆官网",
                                    "price": "免费（需预约）"
                                })
                
                if len(exhibitions) > 0:
                    break
                    
            except Exception as e:
                print(f"   失败：{e}")
        
        browser.close()
    
    # 去重
    seen = set()
    unique = []
    for ex in exhibitions:
        if ex["title"] not in seen and "展" in ex["title"]:
            seen.add(ex["title"])
            unique.append(ex)
    
    print(f"   ✓ 获取 {len(unique)} 个展览")
    return unique


def crawl_capital_museum():
    """首都博物馆 - 尝试多个 URL"""
    print("\n【首都博物馆】尝试多个 URL...")
    exhibitions = []
    
    urls = [
        "http://www.capitalmuseum.org.cn/",
        "https://www.capitalmuseum.org.cn/",
        "http://www.capitalmuseum.com.cn/",
        "https://www.capitalmuseum.com.cn/",
    ]
    
    with sync_playwright() as p:
        browser = p.chromium.launch(headless=True)
        page = browser.new_page()
        
        for url in urls:
            try:
                print(f"   尝试：{url}")
                page.goto(url, wait_until="domcontentloaded", timeout=20000)
                page.wait_for_timeout(5000)
                
                text = page.inner_text("body")
                print(f"   页面长度：{len(text)}")
                
                if len(text) > 100:
                    # 查找展览
                    lines = text.split("\n")
                    for line in lines:
                        line = line.strip()
                        if "展" in line and len(line) > 5 and len(line) < 60:
                            exhibitions.append({
                                "title": line,
                                "venue": "首都博物馆",
                                "area": "西城区",
                                "source": "首都博物馆官网"
                            })
                    if len(exhibitions) > 0:
                        break
                        
            except Exception as e:
                print(f"   失败：{e}")
        
        browser.close()
    
    print(f"   ✓ 获取 {len(exhibitions)} 个展览")
    return exhibitions


def crawl_beijing_museum_bureau():
    """北京市文物局官网"""
    print("\n【北京市文物局】尝试爬取...")
    exhibitions = []
    
    try:
        import requests
        url = "https://r.jina.ai/http://wwj.beijing.gov.cn/"
        response = requests.get(url, timeout=30)
        text = response.text
        
        print(f"   页面长度：{len(text)}")
        
        if "展览" in text or "博物馆" in text:
            lines = text.split("\n")
            for line in lines:
                line = line.strip()
                if "展" in line and len(line) > 5 and len(line) < 80:
                    exhibitions.append({
                        "title": line,
                        "source": "北京市文物局官网"
                    })
        
        print(f"   ✓ 获取 {len(exhibitions)} 条信息")
    except Exception as e:
        print(f"   ✗ 失败：{e}")
    
    return exhibitions


if __name__ == "__main__":
    print("=" * 60)
    print("继续爬取剩余数据源 - P2 任务")
    print("=" * 60)
    
    all_exhibitions = []
    
    # 国家博物馆
    chnmuseum = crawl_chnmuseum_detail()
    all_exhibitions.extend(chnmuseum)
    
    time.sleep(2)
    
    # 首都博物馆
    capital = crawl_capital_museum()
    all_exhibitions.extend(capital)
    
    time.sleep(1)
    
    # 北京市文物局
    bureau = crawl_beijing_museum_bureau()
    all_exhibitions.extend(bureau)
    
    # 保存结果
    with open("data/remaining_exhibitions.json", "w", encoding="utf-8") as f:
        json.dump({
            "chnmuseum": chnmuseum,
            "capital": capital,
            "bureau": bureau,
            "total": len(all_exhibitions)
        }, f, ensure_ascii=False, indent=2)
    
    print("\n" + "=" * 60)
    print(f"总计：{len(all_exhibitions)} 个展览")
    print("=" * 60)
