cynnix69 commited on
Commit
9369350
·
verified ·
1 Parent(s): 655b384

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +146 -0
app.py ADDED
@@ -0,0 +1,146 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import gradio as gr
3
+ from openai import OpenAI
4
+ from dotenv import load_dotenv
5
+ import datetime
6
+ import tempfile
7
+
8
+ # Load environment variables from .env file (for local testing)
9
+ load_dotenv()
10
+
11
+ # Get API key from environment variables
12
+ OPENROUTER_API_KEY = os.environ.get("OPENROUTER_API_KEY")
13
+
14
+ # Initialize OpenAI client with OpenRouter base URL
15
+ client = OpenAI(
16
+ base_url="https://openrouter.ai/api/v1",
17
+ api_key=OPENROUTER_API_KEY
18
+ )
19
+
20
+ # Permanent system prompt (your specific instructions)
21
+ SYSTEM_PROMPT = """You are an **AI Code Generator**. Your task is to **write complete, production-ready code** with no explanations unless explicitly asked. Follow these rules:
22
+
23
+ 1. **Always prioritize code** over text. If the user asks for a solution, respond ONLY with:
24
+ - A full code block (with imports, error handling, and tests if applicable).
25
+ - Brief comments in the code (no paragraphs).
26
+
27
+ 2. **Never provide summaries, plans, or step-by-step guides** unless the user requests them with:
28
+ - "Explain..." or "How does this work?"
29
+
30
+ 3. **Assume technical proficiency**: Skip introductory notes (e.g., "Here's how to...")."""
31
+
32
+ # List of available models
33
+ AVAILABLE_MODELS = [
34
+ "openai/gpt-3.5-turbo",
35
+ "openai/gpt-4",
36
+ "anthropic/claude-3-opus",
37
+ "anthropic/claude-3-sonnet",
38
+ "meta-llama/llama-3-70b-instruct",
39
+ "google/gemini-pro",
40
+ "deepseek/deepseek-chat-v3-0324:free"
41
+ ]
42
+
43
+ def generate_response(message, model_name, temperature=0.7, history=None):
44
+ """Generate a response using the selected model via OpenRouter."""
45
+ if not message:
46
+ return "", []
47
+
48
+ try:
49
+ messages = [{"role": "system", "content": SYSTEM_PROMPT}]
50
+
51
+ # Add conversation history if available
52
+ if history:
53
+ for user_msg, bot_msg in history:
54
+ messages.append({"role": "user", "content": user_msg})
55
+ messages.append({"role": "assistant", "content": bot_msg})
56
+
57
+ # Add current message
58
+ messages.append({"role": "user", "content": message})
59
+
60
+ response = client.chat.completions.create(
61
+ model=model_name,
62
+ messages=messages,
63
+ temperature=temperature
64
+ )
65
+ return response.choices[0].message.content
66
+ except Exception as e:
67
+ return f"Error: {str(e)}"
68
+
69
+ def save_chat_history(history):
70
+ """Save chat history to a temporary file for download."""
71
+ chat_text = f"System Prompt:\n{SYSTEM_PROMPT}\n\n"
72
+ chat_text += "Chat History:\n\n"
73
+ for user_msg, bot_msg in history:
74
+ chat_text += f"User: {user_msg}\n"
75
+ chat_text += f"Assistant: {bot_msg}\n\n"
76
+
77
+ timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
78
+ filename = f"code_generator_chat_{timestamp}.txt"
79
+
80
+ temp_file = tempfile.NamedTemporaryFile(mode="w", delete=False, suffix=".txt")
81
+ temp_file.write(chat_text)
82
+ temp_file.close()
83
+
84
+ return temp_file.name
85
+
86
+ with gr.Blocks(title="AI Code Generator") as demo:
87
+ gr.Markdown("# AI Code Generator (via OpenRouter)")
88
+ gr.Markdown("### System prompt is locked to code-generation mode")
89
+
90
+ # Store chat history
91
+ chat_history = gr.State([])
92
+ download_file = gr.State(None)
93
+
94
+ with gr.Row():
95
+ with gr.Column(scale=3):
96
+ model_dropdown = gr.Dropdown(
97
+ choices=AVAILABLE_MODELS,
98
+ value=AVAILABLE_MODELS[0],
99
+ label="Select Model"
100
+ )
101
+
102
+ temperature_slider = gr.Slider(
103
+ minimum=0.1,
104
+ maximum=1.0,
105
+ value=0.7,
106
+ step=0.1,
107
+ label="Temperature"
108
+ )
109
+
110
+ with gr.Row():
111
+ with gr.Column(scale=4):
112
+ chatbot = gr.Chatbot(height=400)
113
+ user_input = gr.Textbox(
114
+ label="Your Message",
115
+ placeholder="Type your coding request here...",
116
+ lines=3
117
+ )
118
+
119
+ with gr.Row():
120
+ submit_btn = gr.Button("Generate Code", variant="primary")
121
+ download_btn = gr.DownloadButton("Download Chat", visible=False)
122
+ clear_btn = gr.ClearButton([user_input, chatbot])
123
+
124
+ def respond(message, history, model_name, temperature):
125
+ response = generate_response(message, model_name, temperature, history)
126
+ updated_history = history + [(message, response)]
127
+ file_path = save_chat_history(updated_history)
128
+ return updated_history, updated_history, gr.DownloadButton(value=file_path, visible=True)
129
+
130
+ submit_btn.click(
131
+ respond,
132
+ [user_input, chat_history, model_dropdown, temperature_slider],
133
+ [chat_history, chatbot, download_btn]
134
+ ).then(
135
+ lambda: "", None, user_input
136
+ )
137
+
138
+ user_input.submit(
139
+ respond,
140
+ [user_input, chat_history, model_dropdown, temperature_slider],
141
+ [chat_history, chatbot, download_btn]
142
+ ).then(
143
+ lambda: "", None, user_input
144
+ )
145
+
146
+ demo.launch()