abhishekdeshmukh
commited on
Commit
·
666c071
1
Parent(s):
0d00fe6
First
Browse files- .idea/.gitignore +8 -0
- .idea/inspectionProfiles/profiles_settings.xml +6 -0
- .idea/llm.iml +8 -0
- .idea/misc.xml +7 -0
- .idea/modules.xml +8 -0
- .idea/vcs.xml +6 -0
- app.py +86 -0
- requirements.txt +4 -0
.idea/.gitignore
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Default ignored files
|
2 |
+
/shelf/
|
3 |
+
/workspace.xml
|
4 |
+
# Editor-based HTTP Client requests
|
5 |
+
/httpRequests/
|
6 |
+
# Datasource local storage ignored files
|
7 |
+
/dataSources/
|
8 |
+
/dataSources.local.xml
|
.idea/inspectionProfiles/profiles_settings.xml
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<component name="InspectionProjectProfileManager">
|
2 |
+
<settings>
|
3 |
+
<option name="USE_PROJECT_PROFILE" value="false" />
|
4 |
+
<version value="1.0" />
|
5 |
+
</settings>
|
6 |
+
</component>
|
.idea/llm.iml
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<?xml version="1.0" encoding="UTF-8"?>
|
2 |
+
<module type="PYTHON_MODULE" version="4">
|
3 |
+
<component name="NewModuleRootManager">
|
4 |
+
<content url="file://$MODULE_DIR$" />
|
5 |
+
<orderEntry type="jdk" jdkName="Python 3.8" jdkType="Python SDK" />
|
6 |
+
<orderEntry type="sourceFolder" forTests="false" />
|
7 |
+
</component>
|
8 |
+
</module>
|
.idea/misc.xml
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<?xml version="1.0" encoding="UTF-8"?>
|
2 |
+
<project version="4">
|
3 |
+
<component name="Black">
|
4 |
+
<option name="sdkName" value="Python 3.8" />
|
5 |
+
</component>
|
6 |
+
<component name="ProjectRootManager" version="2" project-jdk-name="Python 3.8" project-jdk-type="Python SDK" />
|
7 |
+
</project>
|
.idea/modules.xml
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<?xml version="1.0" encoding="UTF-8"?>
|
2 |
+
<project version="4">
|
3 |
+
<component name="ProjectModuleManager">
|
4 |
+
<modules>
|
5 |
+
<module fileurl="file://$PROJECT_DIR$/.idea/llm.iml" filepath="$PROJECT_DIR$/.idea/llm.iml" />
|
6 |
+
</modules>
|
7 |
+
</component>
|
8 |
+
</project>
|
.idea/vcs.xml
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<?xml version="1.0" encoding="UTF-8"?>
|
2 |
+
<project version="4">
|
3 |
+
<component name="VcsDirectoryMappings">
|
4 |
+
<mapping directory="" vcs="Git" />
|
5 |
+
</component>
|
6 |
+
</project>
|
app.py
ADDED
@@ -0,0 +1,86 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM
|
3 |
+
import torch
|
4 |
+
|
5 |
+
# Initialize model and tokenizer
|
6 |
+
model_name = "deepseek-ai/DeepSeek-V3-0324"
|
7 |
+
|
8 |
+
print("Loading tokenizer...")
|
9 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
|
10 |
+
|
11 |
+
print("Loading model...")
|
12 |
+
model = AutoModelForCausalLM.from_pretrained(
|
13 |
+
model_name,
|
14 |
+
torch_dtype=torch.float16, # Use float16 for efficiency
|
15 |
+
device_map="auto", # Automatically determine device mapping
|
16 |
+
trust_remote_code=True
|
17 |
+
)
|
18 |
+
|
19 |
+
# Set model to evaluation mode
|
20 |
+
model.eval()
|
21 |
+
|
22 |
+
|
23 |
+
def generate_response(message, chat_history, system_prompt="You are a helpful AI assistant.", max_length=2048,
|
24 |
+
temperature=0.7):
|
25 |
+
# Format the conversation
|
26 |
+
full_prompt = f"{system_prompt}\n\nUser: {message}\nAssistant:"
|
27 |
+
|
28 |
+
# Add chat history if it exists
|
29 |
+
if chat_history:
|
30 |
+
history_text = ""
|
31 |
+
for user_msg, assistant_msg in chat_history:
|
32 |
+
history_text += f"User: {user_msg}\nAssistant: {assistant_msg}\n"
|
33 |
+
full_prompt = f"{system_prompt}\n\n{history_text}User: {message}\nAssistant:"
|
34 |
+
|
35 |
+
# Tokenize input
|
36 |
+
inputs = tokenizer(full_prompt, return_tensors="pt").to(model.device)
|
37 |
+
|
38 |
+
# Generate response
|
39 |
+
with torch.no_grad():
|
40 |
+
outputs = model.generate(
|
41 |
+
inputs.input_ids,
|
42 |
+
max_length=max_length,
|
43 |
+
temperature=temperature,
|
44 |
+
do_sample=True,
|
45 |
+
pad_token_id=tokenizer.eos_token_id,
|
46 |
+
top_p=0.9,
|
47 |
+
repetition_penalty=1.1
|
48 |
+
)
|
49 |
+
|
50 |
+
# Decode and return the response
|
51 |
+
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
52 |
+
|
53 |
+
# Extract only the assistant's response
|
54 |
+
response = response.split("Assistant:")[-1].strip()
|
55 |
+
|
56 |
+
return response
|
57 |
+
|
58 |
+
|
59 |
+
# Create the Gradio interface
|
60 |
+
with gr.Blocks(css="footer {visibility: hidden}") as demo:
|
61 |
+
gr.Markdown("# DeepSeek V3 Chatbot")
|
62 |
+
gr.Markdown("Welcome! This is a chatbot powered by the DeepSeek-V3-0324 model.")
|
63 |
+
|
64 |
+
chatbot = gr.Chatbot(height=600)
|
65 |
+
msg = gr.Textbox(label="Type your message here...", placeholder="Hello! How can I help you today?")
|
66 |
+
clear = gr.Button("Clear Conversation")
|
67 |
+
|
68 |
+
|
69 |
+
def user(user_message, history):
|
70 |
+
return "", history + [[user_message, None]]
|
71 |
+
|
72 |
+
|
73 |
+
def bot(history):
|
74 |
+
user_message = history[-1][0]
|
75 |
+
bot_message = generate_response(user_message, history[:-1])
|
76 |
+
history[-1][1] = bot_message
|
77 |
+
return history
|
78 |
+
|
79 |
+
|
80 |
+
msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then(
|
81 |
+
bot, chatbot, chatbot
|
82 |
+
)
|
83 |
+
clear.click(lambda: None, None, chatbot, queue=False)
|
84 |
+
|
85 |
+
demo.queue()
|
86 |
+
demo.launch()
|
requirements.txt
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
gradio==4.19.2
|
2 |
+
torch==2.2.0
|
3 |
+
transformers==4.37.2
|
4 |
+
accelerate==0.27.2
|