File size: 6,867 Bytes
1c04e5a
8535b44
63de751
 
 
8535b44
1c04e5a
8535b44
63de751
 
1c04e5a
8535b44
 
 
 
 
 
 
 
63de751
8535b44
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1c04e5a
63de751
8535b44
63de751
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1c04e5a
8535b44
 
 
 
 
 
 
 
 
 
 
 
 
 
63de751
 
8535b44
63de751
8535b44
63de751
 
 
 
9ba763c
8535b44
9ba763c
8535b44
 
 
9ba763c
8535b44
 
 
 
 
 
 
 
 
 
 
 
 
 
63de751
 
8535b44
63de751
8535b44
1c04e5a
63de751
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
import streamlit as st
import av
import cv2
import numpy as np
import mediapipe as mp
from streamlit_webrtc import webrtc_streamer, WebRtcMode

# Initialize MediaPipe Pose
mp_pose = mp.solutions.pose
mp_drawing = mp.solutions.drawing_utils

# Session state initialization
if 'camera_access' not in st.session_state:
    st.session_state.camera_access = False
if 'posture_status' not in st.session_state:
    st.session_state.posture_status = "カメラを起動してください (Please enable camera)"
if 'last_status' not in st.session_state:
    st.session_state.last_status = ""

def analyze_posture(image):
    """Analyze posture on the image and return annotated image and status"""
    with mp_pose.Pose(
        min_detection_confidence=0.5,
        min_tracking_confidence=0.5,
        model_complexity=1
    ) as pose:
        
        image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
        results = pose.process(image_rgb)
        
        annotated_image = image.copy()
        if results.pose_landmarks:
            mp_drawing.draw_landmarks(
                annotated_image, 
                results.pose_landmarks, 
                mp_pose.POSE_CONNECTIONS,
                mp_drawing.DrawingSpec(color=(0, 255, 0), thickness=2, circle_radius=2),
                mp_drawing.DrawingSpec(color=(0, 0, 255), thickness=2)
            )
            posture_status = check_posture(results.pose_landmarks, image.shape)
        else:
            posture_status = "キーポイントが検出されませんでした (Key points not detected)"
        
        return annotated_image, posture_status

def check_posture(landmarks, image_shape):
    """Analyze posture and return text report"""
    h, w, _ = image_shape
    
    # Get key points
    left_shoulder = landmarks.landmark[mp_pose.PoseLandmark.LEFT_SHOULDER]
    right_shoulder = landmarks.landmark[mp_pose.PoseLandmark.RIGHT_SHOULDER]
    left_hip = landmarks.landmark[mp_pose.PoseLandmark.LEFT_HIP]
    right_hip = landmarks.landmark[mp_pose.PoseLandmark.RIGHT_HIP]
    left_ear = landmarks.landmark[mp_pose.PoseLandmark.LEFT_EAR]
    right_ear = landmarks.landmark[mp_pose.PoseLandmark.RIGHT_EAR]
    nose = landmarks.landmark[mp_pose.PoseLandmark.NOSE]
    
    # Determine posture (sitting/standing)
    sitting = left_hip.y < left_shoulder.y + 0.1 or right_hip.y < right_shoulder.y + 0.1
    
    messages = []
    
    # Check for forward head posture
    head_forward = (left_ear.y > left_shoulder.y + 0.1 or right_ear.y > right_shoulder.y + 0.1) and \
                   (nose.y > left_shoulder.y or nose.y > right_shoulder.y)
    if head_forward:
        messages.append("• 頭が前に傾いています (テキストネック) (Head tilted forward - text neck)")
    
    # Check for rounded shoulders
    shoulders_rounded = left_shoulder.x > left_hip.x + 0.05 or right_shoulder.x < right_hip.x - 0.05
    if shoulders_rounded:
        messages.append("• 肩が丸まっています (Rounded shoulders)")
    
    # Check for side tilt
    shoulder_diff = abs(left_shoulder.y - right_shoulder.y)
    hip_diff = abs(left_hip.y - right_hip.y)
    if shoulder_diff > 0.05 or hip_diff > 0.05:
        messages.append("• 体が横に傾いています (Asymmetrical posture)")
    
    # Check pelvis position
    if sitting and (left_hip.y < left_shoulder.y + 0.15 or right_hip.y < right_shoulder.y + 0.15):
        messages.append("• 骨盤が前に傾いています (Pelvis tilted forward)")
    
    # Generate final report
    if messages:
        report = [
            f"**{'座り姿勢' if sitting else '立ち姿勢'} - 問題が検出されました ({'Sitting' if sitting else 'Standing'} - problems detected):**",
            *messages,
            "\n**アドバイス (Recommendations):**",
            "• 頭をまっすぐに保ち、耳が肩の上にくるように (Keep your head straight, ears over shoulders)",
            "• 肩を後ろに引き下げて (Pull shoulders back and down)",
            "• 背中をまっすぐに保ち、横に傾かないように (Keep your back straight, avoid side tilting)",
            "• 座るときは坐骨で支えるように (When sitting, support your weight on sitting bones)"
        ]
    else:
        report = [
            f"**完璧な姿勢です ({'座り姿勢' if sitting else '立ち姿勢'})! (Perfect posture {'sitting' if sitting else 'standing'})**",
            "すべてのキーポイントが正しい位置にあります (All key points are in correct position)",
            "\n**アドバイス (Advice):**",
            "• 一日中姿勢に気を付けてください (Continue to monitor your posture throughout the day)"
        ]
    
    return "\n\n".join(report)

def video_frame_callback(frame):
    """Process each video frame"""
    img = frame.to_ndarray(format="bgr24")
    
    try:
        analyzed_img, posture_status = analyze_posture(img)
        if posture_status != st.session_state.last_status:
            st.session_state.posture_status = posture_status
            st.session_state.last_status = posture_status
        return av.VideoFrame.from_ndarray(analyzed_img, format="bgr24")
    except Exception as e:
        st.error(f"処理エラー: {str(e)} (Processing error)")
        return av.VideoFrame.from_ndarray(img, format="bgr24")

def main():
    st.set_page_config(layout="wide")
    st.title("📷 リアルタイム姿勢分析アプリ (Real-time Posture Analyzer)")
    
    # Create columns
    col1, col2 = st.columns([2, 1])
    
    with col1:
        st.header("カメラビュー (Camera View)")
        
        if not st.session_state.camera_access:
            st.warning("⚠️ カメラを使用するには許可が必要です (Camera access requires permission)")
            if st.button("カメラアクセスを許可 (Allow camera access)"):
                st.session_state.camera_access = True
                st.rerun()
        else:
            webrtc_ctx = webrtc_streamer(
                key="posture-analysis",
                mode=WebRtcMode.SENDRECV,
                video_frame_callback=video_frame_callback,
                media_stream_constraints={"video": True, "audio": False},
                async_processing=True,
                rtc_configuration={
                    "iceServers": [{"urls": ["stun:stun.l.google.com:19302"]}]
                }
            )
            
            if not webrtc_ctx.state.playing:
                st.session_state.posture_status = "カメラが停止しました (Camera stopped)"
                st.session_state.last_status = ""
    
    with col2:
        st.header("姿勢分析結果 (Posture Analysis)")
        status_placeholder = st.empty()
        status_placeholder.markdown(st.session_state.posture_status)

if __name__ == "__main__":
    main()