File size: 5,192 Bytes
1e8f979
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f230245
 
 
 
1e8f979
 
 
f230245
 
 
1e8f979
 
 
 
f230245
1e8f979
 
f230245
1e8f979
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
import os
os.system('pip install transformers')
os.system('pip install gradio')
os.system('pip install requests')

import requests
import gradio as gr
from huggingface_hub import InferenceClient
from transformers import pipeline

# Inference client for chat completion
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")

# Different pipelines for different tasks
qa_pipeline = pipeline("question-answering", model="deepset/roberta-base-squad2")

def respond(message, system_message, max_tokens, temperature, top_p):
    messages = [{"role": "system", "content": system_message}]
    messages.append({"role": "user", "content": message})

    response = ""
    for message in client.chat_completion(
        messages,
        max_tokens=max_tokens,
        stream=True,
        temperature=temperature,
        top_p=top_p,
    ):
        token = message.choices[0].delta.content
        if token is not None:
            response += token
    return response

def generate_prosecution_argument(case_details):
    system_message = (
        "You are an expert Prosecution Attorney. Provide the best and most detailed arguments "
        "to prosecute the case based on the given case details. Include thorough analysis, "
        "evidence presentation, and any relevant legal precedents."
    )
    arguments = respond(case_details, system_message, max_tokens=1024, temperature=0.7, top_p=0.95)
    return arguments

def generate_defense_argument(prosecution_argument):
    system_message = (
        "You are an expert Defense Attorney. Provide the best and most detailed arguments "
        "to defend the case based on the given case details. Include thorough analysis, "
        "evidence presentation, and any relevant legal precedents."
    )
    arguments = respond(prosecution_argument, system_message, max_tokens=1024, temperature=0.7, top_p=0.95)
    return arguments

# Custom CSS for a clean layout
custom_css = """
body {
    background-color: #ffffff;
    color: #000000;
    font-family: Arial, sans-serif;
}
.gradio-container {
    max-width: 1000px;
    margin: 0 auto;
    padding: 20px;
    background-color: #ffffff;
    border: 1px solid #e0e0e0;
    border-radius: 8px;
    box-shadow: 0 2px 5px rgba(0, 0, 0, 0.1);
}
.gr-button {
    background-color: #ffffff !important;
    border-color: #ffffff !important;
    color: #000000 !important;
    margin: 5px;
}
.gr-button:hover {
    background-color: #ffffff !important;
    border-color: #004085 !important;
}
.gr-input, .gr-textbox, .gr-slider, .gr-markdown, .gr-chatbox {
    border-radius: 4px;
    border: 1px solid #ced4da;
    background-color: #ffffff !important;
    color: #000000 !important;
}
.gr-input:focus, .gr-textbox:focus, .gr-slider:focus {
    border-color: #ffffff;
    outline: 0;
    box-shadow: 0 0 0 0.2rem rgba(255, 255, 255, 1.0);
}
#flagging-button {
    display: none;
}
footer {
    display: none;
}
.chatbox .chat-container .chat-message {
    background-color: #ffffff !important;
    color: #000000 !important;
}
.chatbox .chat-container .chat-message-input {
    background-color: #ffffff !important;
    color: #000000 !important;
}
.gr-markdown {
    background-color: #ffffff !important;
    color: #000000 !important;
}
.gr-markdown h1, .gr-markdown h2, .gr-markdown h3, .gr-markdown h4, .gr-markdown h5, .gr-markdown h6, .gr-markdown p, .gr-markdown ul, .gr-markdown ol, .gr-markdown li {
    color: #000000 !important;
}
.score-box {
    width: 60px;
    height: 60px;
    display: flex;
    align-items: center
}
"""

# Gradio Interface
with gr.Blocks(css=custom_css) as demo:
    with gr.Column():
        gr.Markdown("# Court Argument Simulation\n### Provide Initial Case Details")
        case_details = gr.Textbox(lines=5, placeholder="Enter initial case details here...")
        evidence = gr.Textbox(lines=3, placeholder="Enter evidence details here...")
        witness_statements = gr.Textbox(lines=3, placeholder="Enter witness statements here...")
        legal_references = gr.Textbox(lines=3, placeholder="Enter legal references here...")
        
        prosecution_argument = gr.Textbox(lines=10, placeholder="Prosecution's Argument...")
        defense_argument = gr.Textbox(lines=10, placeholder="Defense's Argument...")
        
        def run_simulation(case_details, evidence, witness_statements, legal_references):
            full_case_details = f"Case Details: {case_details}\n\nEvidence: {evidence}\n\nWitness Statements: {witness_statements}\n\nLegal References: {legal_references}"
            prosecution_arg = generate_prosecution_argument(full_case_details)
            defense_arg = generate_defense_argument(prosecution_arg)
            return prosecution_arg, defense_arg
        
        simulate_btn = gr.Button("Start Argument Simulation")
        simulate_btn.click(run_simulation, inputs=[case_details, evidence, witness_statements, legal_references], outputs=[prosecution_argument, defense_argument])
        
        clear_btn = gr.Button("Clear")
        clear_btn.click(lambda: ("", "", "", "", "", ""), None, [case_details, evidence, witness_statements, legal_references, prosecution_argument, defense_argument])

demo.launch()