fizzarif7 commited on
Commit
ce4fe6e
·
verified ·
1 Parent(s): 6b3a032

Upload 8 files

Browse files
Files changed (9) hide show
  1. .gitattributes +1 -0
  2. ibtehaj dataset.parquet +3 -0
  3. index.html +51 -0
  4. legal.py +266 -0
  5. man.jpg +0 -0
  6. pdf_data.json +3 -0
  7. requirement.txt +11 -0
  8. script.js +106 -0
  9. style.css +154 -0
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ pdf_data.json filter=lfs diff=lfs merge=lfs -text
ibtehaj dataset.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dcf30ef8425b78d6420c65065a161542a0a51daf3fcf7e26073f82daa1f958b7
3
+ size 25068229
index.html ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!DOCTYPE html>
2
+ <html lang="en">
3
+ <head>
4
+ <meta charset="UTF-8" />
5
+ <title>Legal Assistant</title>
6
+ <link rel="stylesheet" href="style.css" />
7
+ <script src="script.js"></script>
8
+ </head>
9
+ <body>
10
+ <h1 class="title">Legal Assistant</h1>
11
+
12
+ <div class="container">
13
+ <div class="left-panel">
14
+ <img src="man.jpg" alt="Profile" />
15
+ </div>
16
+
17
+ <div class="center-panel">
18
+ <div class="top-label">
19
+ <textarea id="topLabel" readonly>Dictate your legal question!</textarea>
20
+ </div>
21
+
22
+ <div class="qa-section">
23
+ <div class="input-area">
24
+ <label>Ask your legal question:</label>
25
+ <textarea id="question" rows="10"></textarea>
26
+ </div>
27
+ <div class="output-area">
28
+ <label>Answer:</label>
29
+ <textarea id="answer" rows="10" readonly></textarea>
30
+ </div>
31
+ </div>
32
+
33
+ <div class="button-panel">
34
+ <button onclick="handleDictate()">🎙 Dictate</button>
35
+ <button onclick="generateAnswer()">🧾 Generate Response</button>
36
+ <button onclick="readAloud()">🔊 Read Aloud</button>
37
+ <button onclick="uploadMP3()">🎵 Upload MP3</button>
38
+ <button onclick="saveQA()">🖨 Save/Print</button>
39
+ <button onclick="resetApp()">🧹 Reset</button>
40
+ </div>
41
+ </div>
42
+
43
+ <div class="right-panel">
44
+ <label>Conversation History:</label>
45
+ <textarea id="history" readonly></textarea>
46
+ </div>
47
+ </div>
48
+
49
+ <script src="script.js"></script>
50
+ </body>
51
+ </html>
legal.py ADDED
@@ -0,0 +1,266 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from flask import Flask, request, jsonify, send_from_directory
2
+ import speech_recognition as sr
3
+ import threading
4
+ import datetime
5
+ import pyttsx3
6
+ from langdetect import detect
7
+ from huggingface_hub import login
8
+ from sentence_transformers import SentenceTransformer
9
+ from transformers import pipeline, AutoTokenizer, AutoModelForQuestionAnswering, AutoModelForSeq2SeqLM
10
+ import faiss
11
+ import numpy as np
12
+ import pandas as pd
13
+ import json
14
+ import webbrowser
15
+ from pydub import AudioSegment
16
+ import os
17
+ from werkzeug.utils import secure_filename
18
+ import tempfile
19
+
20
+ app = Flask(__name__, static_folder='.') # Serve static files from the current directory
21
+
22
+ # Load Hugging Face API key from environment variable
23
+ hf_token = os.environ.get("API_KEY")
24
+ if not hf_token:
25
+ # Attempt to load from .env file if not set in environment
26
+ from dotenv import load_dotenv
27
+ load_dotenv()
28
+ hf_token = os.environ.get("API_KEY")
29
+ if not hf_token:
30
+ raise ValueError("Hugging Face API key not found. Please set 'API_KEY' as an environment variable or in a .env file.")
31
+
32
+ login(token=hf_token)
33
+
34
+ # QA Models
35
+ qa_model = AutoModelForQuestionAnswering.from_pretrained("deepset/roberta-base-squad2")
36
+ qa_tokenizer = AutoTokenizer.from_pretrained("deepset/roberta-base-squad2")
37
+ qa_pipeline = pipeline("question-answering", model=qa_model, tokenizer=qa_tokenizer)
38
+
39
+ # Summarization Model
40
+ summarizer_model = AutoModelForSeq2SeqLM.from_pretrained("facebook/bart-large-cnn")
41
+ summarizer_tokenizer = AutoTokenizer.from_pretrained("facebook/bart-large-cnn")
42
+ summarizer_pipeline = pipeline("summarization", model=summarizer_model, tokenizer=summarizer_tokenizer)
43
+
44
+ embed_model = SentenceTransformer("sentence-transformers/paraphrase-MiniLM-L6-v2")
45
+
46
+ # Load both datasets
47
+ df_parquet = pd.read_parquet("ibtehaj dataset.parquet")
48
+ corpus_parquet = df_parquet["text"].dropna().tolist()
49
+
50
+ # Load the JSON dataset
51
+ with open("pdf_data.json", "r", encoding="utf-8") as f:
52
+ json_data = json.load(f)
53
+
54
+ # Extract text from JSON
55
+ corpus_json = []
56
+ for entry in json_data:
57
+ if isinstance(entry, dict) and "text" in entry:
58
+ text = entry["text"].strip()
59
+ if text:
60
+ corpus_json.append(text)
61
+
62
+ # Combine both corpora
63
+ corpus = corpus_parquet + corpus_json
64
+
65
+ # Compute embeddings
66
+ embeddings = embed_model.encode(corpus, show_progress_bar=True, batch_size=16)
67
+
68
+ # Build FAISS index
69
+ index = faiss.IndexFlatL2(embeddings.shape[1])
70
+ index.add(np.array(embeddings))
71
+
72
+ def rag_answer(question: str, k: int = 3) -> str:
73
+ q_emb = embed_model.encode([question])
74
+ D, I = index.search(q_emb, k)
75
+ context = "\n\n".join(corpus[i] for i in I[0] if 0 <= i < len(corpus))
76
+
77
+ if not context.strip():
78
+ return "Context is empty. Try rephrasing the question."
79
+
80
+ try:
81
+ result = qa_pipeline(question=question, context=context)
82
+ raw_answer = result.get("answer", "No answer found.")
83
+
84
+ # Summarize if answer is too long (>40 words or 300 characters)
85
+ if len(raw_answer.split()) > 40 or len(raw_answer) > 300:
86
+ summary = summarizer_pipeline(raw_answer, max_length=50, min_length=15, do_sample=False)
87
+ summarized_answer = summary[0]['summary_text']
88
+ else:
89
+ summarized_answer = raw_answer
90
+
91
+ return f"Answer: {summarized_answer}\n\n[Context Used]:\n{context[:500]}..."
92
+ except Exception as e:
93
+ return f"Error: {e}"
94
+
95
+ # Global for TTS engine (to allow stopping)
96
+ tts_engine = None
97
+
98
+ def init_tts_engine():
99
+ global tts_engine
100
+ if tts_engine is None:
101
+ tts_engine = pyttsx3.init()
102
+ tts_engine.setProperty('rate', 150)
103
+ tts_engine.setProperty('volume', 1.0)
104
+ voices = tts_engine.getProperty('voices')
105
+ for v in voices:
106
+ if "zira" in v.name.lower() or "female" in v.name.lower():
107
+ tts_engine.setProperty('voice', v.id)
108
+ break
109
+
110
+ init_tts_engine()
111
+
112
+ # Global variables for managing state (simplify for web context)
113
+ conversation_history = []
114
+ last_question_text = ""
115
+ last_answer_text = ""
116
+
117
+ @app.route('/')
118
+ def serve_index():
119
+ return send_from_directory('.', 'index.html')
120
+
121
+ @app.route('/<path:path>')
122
+ def serve_static_files(path):
123
+ return send_from_directory('.', path)
124
+
125
+ @app.route('/answer', methods=['POST'])
126
+ def generate_answer_endpoint():
127
+ global last_question_text, last_answer_text, conversation_history
128
+ data = request.get_json()
129
+ question = data.get('question', '').strip()
130
+
131
+ if not question:
132
+ return jsonify({"answer": "Please provide a question."}), 400
133
+
134
+ last_question_text = question
135
+ timestamp = datetime.datetime.now().strftime("%H:%M:%S")
136
+ conversation_history.append({"role": "user", "time": timestamp, "text": question})
137
+
138
+ ans = rag_answer(question)
139
+ last_answer_text = ans
140
+ conversation_history.append({"role": "bot", "time": timestamp, "text": ans})
141
+
142
+ return jsonify({"answer": ans})
143
+
144
+ @app.route('/read-aloud', methods=['POST'])
145
+ def read_aloud_endpoint():
146
+ data = request.get_json()
147
+ text_to_read = data.get('text', '').strip()
148
+
149
+ if not text_to_read:
150
+ return jsonify({"status": "No text provided to read."}), 400
151
+
152
+ try:
153
+ # Create a temporary file for the speech audio
154
+ with tempfile.NamedTemporaryFile(delete=False, suffix=".mp3") as fp:
155
+ temp_audio_path = fp.name
156
+
157
+ tts_engine.save_to_file(text_to_read, temp_audio_path)
158
+ tts_engine.runAndWait()
159
+
160
+ # You would typically serve this file or stream it.
161
+ # For simplicity, let's just confirm it was generated.
162
+ # In a real app, you might use Flask's send_file for audio playback.
163
+ # For now, let's just return success.
164
+ # This approach is suitable if the browser requests the audio file directly after this.
165
+ # For direct playback, you might stream it or serve it immediately.
166
+ # For web, it's more common to have the frontend's SpeechSynthesis API handle this.
167
+ # The frontend `readAloud` function already does this.
168
+ # So, this endpoint might not be strictly necessary unless for server-side TTS.
169
+ return jsonify({"status": "TTS audio generated (server-side)."})
170
+ except Exception as e:
171
+ return jsonify({"status": f"Error during TTS: {str(e)}"}), 500
172
+ finally:
173
+ if os.path.exists(temp_audio_path):
174
+ os.remove(temp_audio_path)
175
+
176
+
177
+ @app.route('/upload-mp3', methods=['POST'])
178
+ def upload_mp3_endpoint():
179
+ global last_question_text, last_answer_text, conversation_history
180
+
181
+ if 'file' not in request.files:
182
+ return jsonify({"message": "No file part"}), 400
183
+ file = request.files['file']
184
+ if file.filename == '':
185
+ return jsonify({"message": "No selected file"}), 400
186
+ if file:
187
+ filename = secure_filename(file.filename)
188
+ # Create a temporary directory to save the uploaded file and its WAV conversion
189
+ with tempfile.TemporaryDirectory() as tmpdir:
190
+ mp3_path = os.path.join(tmpdir, filename)
191
+ file.save(mp3_path)
192
+
193
+ wav_path = os.path.join(tmpdir, filename.replace(".mp3", ".wav"))
194
+ try:
195
+ sound = AudioSegment.from_mp3(mp3_path)
196
+ sound.export(wav_path, format="wav")
197
+ except Exception as e:
198
+ return jsonify({"message": f"Error converting MP3 to WAV: {e}"}), 500
199
+
200
+ try:
201
+ recognizer = sr.Recognizer()
202
+ with sr.AudioFile(wav_path) as src:
203
+ audio = recognizer.record(src)
204
+ text = recognizer.recognize_google(audio)
205
+ except sr.UnknownValueError:
206
+ return jsonify({"message": "Speech not understood."}), 400
207
+ except sr.RequestError as e:
208
+ return jsonify({"message": f"Speech recognition service error: {e}"}), 500
209
+
210
+ # Store transcription temporarily (can be handled differently)
211
+ transcript_path = os.path.join(tmpdir, "transcription.txt")
212
+ with open(transcript_path, "w", encoding="utf-8") as f:
213
+ f.write(text)
214
+
215
+ # Option to summarize or generate answer from transcription
216
+ # For this web integration, we'll return the transcription and let frontend decide
217
+ return jsonify({
218
+ "message": "MP3 transcribed successfully.",
219
+ "transcription": text
220
+ })
221
+
222
+ @app.route('/summarize', methods=['POST'])
223
+ def summarize_endpoint():
224
+ data = request.get_json()
225
+ text_to_summarize = data.get('text', '').strip()
226
+
227
+ if not text_to_summarize:
228
+ return jsonify({"summary": "No text provided for summarization."}), 400
229
+
230
+ def chunk_text(text, max_chunk_size=4000):
231
+ sentences = text.split(". ")
232
+ chunks = []
233
+ current_chunk = ""
234
+ for sentence in sentences:
235
+ # Add sentence length + 2 for ". "
236
+ if len(current_chunk) + len(sentence) + 2 < max_chunk_size:
237
+ current_chunk += sentence + ". "
238
+ else:
239
+ chunks.append(current_chunk.strip())
240
+ current_chunk = sentence + ". "
241
+ if current_chunk:
242
+ chunks.append(current_chunk.strip())
243
+ return chunks
244
+
245
+ try:
246
+ chunks = chunk_text(text_to_summarize)
247
+ summaries = [
248
+ summarizer_pipeline(chunk, max_length=150, min_length=50, do_sample=False)[0]["summary_text"]
249
+ for chunk in chunks
250
+ ]
251
+ final_input = " ".join(summaries)
252
+ final_summary = summarizer_pipeline(final_input, max_length=150, min_length=50, do_sample=False)[0]["summary_text"]
253
+ return jsonify({"summary": final_summary})
254
+ except Exception as e:
255
+ return jsonify({"summary": f"Error during summarization: {e}"}), 500
256
+
257
+ @app.route('/history', methods=['GET'])
258
+ def get_history():
259
+ return jsonify({"history": conversation_history})
260
+
261
+ if __name__ == '__main__':
262
+ # Make sure your datasets are in the same directory as app.py
263
+ # ibtehaj dataset.parquet
264
+ # pdf_data.json
265
+ # man.jpg (for the image)
266
+ app.run(debug=True) # debug=True allows for automatic reloading on code changes
man.jpg ADDED
pdf_data.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8556bdcc80fe496120c4f527e17d032dbaa699358026007a107545b9b71e2944
3
+ size 46924676
requirement.txt ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ gradio
2
+ faiss-cpu
3
+ sentence-transformers
4
+ transformers
5
+ huggingface_hub
6
+ pyttsx3
7
+ speechrecognition
8
+ pydub
9
+ pandas
10
+ langdetect
11
+ numpy
script.js ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Handle Generate button click
2
+ document.getElementById("generateBtn").addEventListener("click", async () => {
3
+ const question = document.getElementById("questionInput").value.trim();
4
+ const responseBox = document.getElementById("responseOutput");
5
+ const historyBox = document.getElementById("history");
6
+
7
+ if (!question) {
8
+ responseBox.value = "Please enter your legal question.";
9
+ return;
10
+ }
11
+
12
+ responseBox.value = "Generating...";
13
+
14
+ try {
15
+ const res = await fetch("/answer", {
16
+ method: "POST",
17
+ headers: { "Content-Type": "application/json" },
18
+ body: JSON.stringify({ question })
19
+ });
20
+
21
+ const data = await res.json();
22
+ const answer = data.answer || "No answer received.";
23
+
24
+ responseBox.value = answer;
25
+
26
+ // Update top label
27
+ document.getElementById("topLabel").innerText = question;
28
+
29
+ // Update history
30
+ historyBox.value += `\n\nYou: ${question}\nBot: ${answer}`;
31
+ } catch (err) {
32
+ responseBox.value = `Error: ${err}`;
33
+ }
34
+ });
35
+
36
+ // Handle Reset button
37
+ function resetApp() {
38
+ document.getElementById("questionInput").value = "";
39
+ document.getElementById("responseOutput").value = "";
40
+ document.getElementById("topLabel").innerText = "Dictate your legal question!";
41
+ }
42
+
43
+ // Handle Read Aloud
44
+ function readAloud() {
45
+ const text = document.getElementById("responseOutput").value;
46
+ if (!text.trim()) return;
47
+ const synth = window.speechSynthesis;
48
+ const utterance = new SpeechSynthesisUtterance(text);
49
+ synth.speak(utterance);
50
+ }
51
+
52
+ // Handle Save
53
+ function saveQA() {
54
+ const question = document.getElementById("questionInput").value.trim();
55
+ const answer = document.getElementById("responseOutput").value.trim();
56
+
57
+ if (!question || !answer) {
58
+ alert("Nothing to save.");
59
+ return;
60
+ }
61
+
62
+ const blob = new Blob([`Question:\n${question}\n\nAnswer:\n${answer}`], { type: "text/plain" });
63
+ const link = document.createElement("a");
64
+ link.href = URL.createObjectURL(blob);
65
+ link.download = "QnA.txt";
66
+ document.body.appendChild(link);
67
+ link.click();
68
+ document.body.removeChild(link);
69
+ }
70
+
71
+ // Handle Upload MP3 (Disabled in web)
72
+ function uploadMP3() {
73
+ alert("📁 MP3 upload is only supported in the desktop assistant.");
74
+ }
75
+
76
+ // Handle Dictate (Web Speech API)
77
+ function handleDictate() {
78
+ if (!('webkitSpeechRecognition' in window)) {
79
+ alert("Speech recognition not supported in this browser. Use Chrome.");
80
+ return;
81
+ }
82
+
83
+ const recognition = new webkitSpeechRecognition();
84
+ recognition.lang = "en-US";
85
+ recognition.interimResults = false;
86
+ recognition.maxAlternatives = 1;
87
+
88
+ document.getElementById("topLabel").innerText = "Listening... 🎙";
89
+
90
+ recognition.onresult = function (event) {
91
+ const transcript = event.results[0][0].transcript;
92
+ document.getElementById("questionInput").value = transcript;
93
+ document.getElementById("topLabel").innerText = transcript;
94
+ };
95
+
96
+ recognition.onerror = function (event) {
97
+ console.error("Speech recognition error:", event.error);
98
+ document.getElementById("topLabel").innerText = "Could not recognize speech.";
99
+ };
100
+
101
+ recognition.onend = function () {
102
+ console.log("Speech recognition ended.");
103
+ };
104
+
105
+ recognition.start();
106
+ }
style.css ADDED
@@ -0,0 +1,154 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* === GENERAL === */
2
+ body {
3
+ margin: 0;
4
+ font-family: Arial, sans-serif;
5
+ background-color: #b6edf0;
6
+ }
7
+
8
+ h1.title {
9
+ text-align: center;
10
+ font-size: 48px;
11
+ font-weight: bold;
12
+ color: #333;
13
+ margin-top: 20px;
14
+ }
15
+
16
+ /* === CONTAINER LAYOUT === */
17
+ .container {
18
+ display: flex;
19
+ flex-direction: row;
20
+ padding: 10px;
21
+ margin: 10px;
22
+ gap: 10px;
23
+ }
24
+
25
+ /* === LEFT PANEL (Image) === */
26
+ .left-panel {
27
+ background-color: #ffffff;
28
+ width: 240px;
29
+ padding: 10px;
30
+ border-radius: 20px;
31
+ text-align: center;
32
+ padding-top: 90px;
33
+ }
34
+
35
+ .left-panel img {
36
+ width: 220px;
37
+ height: 260px;
38
+ border-radius: 10px;
39
+ margin-top: 60px;
40
+ }
41
+
42
+ /* === CENTER PANEL === */
43
+ .center-panel {
44
+ flex-grow: 1;
45
+ background-color: #b0dde9;
46
+ border-radius: 20px;
47
+ padding: 10px;
48
+ display: flex;
49
+ flex-direction: column;
50
+ gap: 10px;
51
+ }
52
+
53
+ /* Top Center Label */
54
+ .top-label textarea {
55
+ width: 97%;
56
+ height: 100px;
57
+ font-size: 14px;
58
+ font-family: Arial, sans-serif;
59
+ background-color: #f0f0f0;
60
+ border-radius: 10px;
61
+ border: 1px solid #ccc;
62
+ padding: 12px;
63
+ resize: none;
64
+ }
65
+
66
+ /* Q&A Section */
67
+ .qa-section {
68
+ display: flex;
69
+ gap: 10px;
70
+ }
71
+
72
+ .input-area, .output-area {
73
+ flex: 1;
74
+ display: flex;
75
+ flex-direction: column;
76
+ gap: 5px;
77
+ background-color: #ffffff;
78
+ padding: 10px;
79
+ border-radius: 20px;
80
+ }
81
+
82
+ textarea {
83
+ resize: none;
84
+ font-family: Arial, sans-serif;
85
+ font-size: 14px;
86
+ padding: 10px;
87
+ border-radius: 10px;
88
+ border: 1px solid #ccc;
89
+ min-height: 180px;
90
+ background-color: #f9fbe7;
91
+ }
92
+
93
+ /* === BUTTON PANEL === */
94
+ .button-panel {
95
+ display: flex;
96
+ flex-wrap: wrap;
97
+ justify-content: space-between;
98
+ gap: 10px;
99
+ padding: 10px;
100
+ background-color: #ffffff;
101
+ border-radius: 20px;
102
+ }
103
+
104
+ .button-panel button {
105
+ flex: 1;
106
+ min-width: 140px;
107
+ height: 45px;
108
+ border: none;
109
+ font-size: 14px;
110
+ font-weight: bold;
111
+ border-radius: 25px;
112
+ cursor: pointer;
113
+ transition: 0.2s ease-in-out;
114
+ }
115
+
116
+ .button-panel button:hover {
117
+ transform: scale(1.05);
118
+ }
119
+
120
+ /* Individual button colors to match Tkinter version */
121
+ .button-panel button:nth-child(1) { background-color: #80deea; color: black; }
122
+ .button-panel button:nth-child(2) { background-color: #c5e1a5; color: black; }
123
+ .button-panel button:nth-child(3) { background-color: #ffccbc; color: black; }
124
+ .button-panel button:nth-child(4) { background-color: #8ad5d5; color: black; }
125
+ .button-panel button:nth-child(5) { background-color: #ffe082; color: black; }
126
+ .button-panel button:nth-child(6) { background-color: #d7ccc8; color: black; }
127
+
128
+ /* === RIGHT PANEL (History) === */
129
+ .right-panel {
130
+ width: 300px;
131
+ background-color: #9ed4dc;
132
+ border-radius: 20px;
133
+ padding: 10px;
134
+ display: flex;
135
+ flex-direction: column;
136
+ gap: 10px;
137
+ }
138
+
139
+ .right-panel label {
140
+ font-weight: bold;
141
+ }
142
+
143
+ #history {
144
+ flex: 1;
145
+ height: 500px;
146
+ resize: none;
147
+ border-radius: 10px;
148
+ padding: 10px;
149
+ font-family: Consolas, monospace;
150
+ font-size: 13px;
151
+ border: 1px solid #ccc;
152
+ background-color: #ffffff;
153
+ overflow-y: auto;
154
+ }