File size: 4,605 Bytes
7277266
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
# app.py

import streamlit as st
from playwright.sync_api import sync_playwright, Error
from bs4 import BeautifulSoup
import pandas as pd
import time

# --- 配置页面 ---
st.set_page_config(page_title="实时电影场次查询", page_icon="🎬")
st.title("🎬 实时电影场次查询")

# --- Playwright 抓取函数 ---
# 使用Streamlit的缓存机制,避免每次刷新都重新抓取。数据会缓存10分钟。
@st.cache_data(ttl=600)
def fetch_movie_data_live(url: str):
    st.info("正在启动后台浏览器,实时抓取数据,请稍候...")
    all_showtimes_data = []

    try:
        with sync_playwright() as p:
            # 连接到一个预装在Docker环境中的Chromium浏览器
            browser = p.chromium.launch(headless=True)
            page = browser.new_page()

            try:
                page.goto(url, timeout=60000)
                # 等待关键元素出现
                page.wait_for_selector('div.movie-list-container', timeout=30000)
                time.sleep(2) # 额外等待渲染
                html_content = page.content()
            finally:
                # 确保浏览器被关闭
                browser.close()

        st.info("数据抓取完成,正在解析...")
        
        soup = BeautifulSoup(html_content, 'html.parser')
        movie_show_lists = soup.find_all('div', class_='show-list')

        if not movie_show_lists:
            st.error("解析失败:在页面中未找到电影列表。")
            return pd.DataFrame()

        for movie_section in movie_show_lists:
            movie_name_tag = movie_section.find('h2', class_='movie-name')
            if not movie_name_tag:
                continue
            movie_name = movie_name_tag.text.strip()

            date_tags = movie_section.find_all('span', class_='date-item')
            plist_containers = movie_section.find_all('div', class_='plist-container')

            for date_tag in date_tags:
                viewing_date = ' '.join(date_tag.text.strip().split())
                date_index = date_tag.get('data-index')
                
                correct_plist = next((p for p in plist_containers if p.get('data-index') == date_index), None)

                if not correct_plist:
                    continue

                for show_row in correct_plist.find('tbody').find_all('tr'):
                    try:
                        start_time = show_row.find('span', class_='begin-time').text.strip()
                        end_time_raw = show_row.find('span', class_='end-time').text.strip()
                        end_time = end_time_raw.replace('散场', '')
                        language = show_row.find('span', class_='lang').text.strip()
                        hall = show_row.find('span', class_='hall').text.strip()

                        all_showtimes_data.append({
                            '电影名称': movie_name,
                            '观影日期': viewing_date,
                            '开始时间': start_time,
                            '结束时间': end_time,
                            '语言版本': language,
                            '影厅': hall,
                        })
                    except AttributeError:
                        continue
        
        if not all_showtimes_data:
            return pd.DataFrame()

        return pd.DataFrame(all_showtimes_data)

    except Error as e:
        st.error(f"Playwright 在服务器上运行时出错: {e}")
        st.error("这可能是由于Hugging Face服务器资源临时紧张或目标网站反爬虫策略导致。请稍后刷新重试。")
        return pd.DataFrame()


# --- 主应用逻辑 ---
cinema_url = "https://www.maoyan.com/cinema/15050?poi=97785807"
df = fetch_movie_data_live(cinema_url)

if not df.empty:
    st.success("实时数据加载成功!")
    
    # UI 界面
    all_movies = df['电影名称'].unique()
    selected_movie = st.selectbox("请选择电影:", all_movies)
    
    if selected_movie:
        st.subheader(f"《{selected_movie}》的场次信息")
        filtered_df = df[df['电影名称'] == selected_movie]
        
        all_dates = filtered_df['观影日期'].unique()
        selected_date = st.selectbox("请选择日期:", all_dates)
        
        if selected_date:
            final_df = filtered_df[filtered_df['观影日期'] == selected_date].drop(columns=['电影名称', '观影日期']).reset_index(drop=True)
            st.dataframe(final_df, use_container_width=True)
else:
    st.warning("未能加载到任何场次信息。")