NassimeBejaia commited on
Commit
5d50da3
·
verified ·
1 Parent(s): 590e331

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -25
app.py CHANGED
@@ -1,28 +1,19 @@
1
- import requests
2
 
3
- api_key = "YOUR_API_KEY" # or use os.getenv("DEEPSEEK_API_KEY") if set in your env
 
4
 
5
- headers = {
6
- "Authorization": f"Bearer {api_key}",
7
- "Content-Type": "application/json"
8
- }
 
 
 
 
 
 
9
 
10
- url = "https://api.deepseek.com/v1/chat/completions"
11
-
12
- data = {
13
- "model": "deepseek-chat",
14
- "messages": [
15
- {"role": "user", "content": "Correct and improve this sentence: 'I goed to the park and play.'"}
16
- ],
17
- "temperature": 0.7,
18
- "max_tokens": 100
19
- }
20
-
21
- response = requests.post(url, headers=headers, json=data)
22
-
23
- if response.status_code == 200:
24
- print("✅ Response from DeepSeek:")
25
- print(response.json()["choices"][0]["message"]["content"])
26
- else:
27
- print("❌ Error:")
28
- print(response.status_code, response.text)
 
1
+ from groq import Groq
2
 
3
+ # Initialize the Groq client
4
+ client = Groq()
5
 
6
+ # Create a chat completion request
7
+ completion = client.chat.completions.create(
8
+ model="meta-llama/llama-4-scout-17b-16e-instruct", # Specify the model
9
+ messages=[{"role": "user", "content": "Hello, how can I improve my coding skills?"}], # Example message
10
+ temperature=1, # Controls randomness (higher is more random)
11
+ max_completion_tokens=1024, # Max tokens in the response
12
+ top_p=1, # Controls the diversity via nucleus sampling
13
+ stream=True, # Use streaming to receive chunks of the response
14
+ stop=None, # Define stopping criteria (optional)
15
+ )
16
 
17
+ # Process and print the response
18
+ for chunk in completion:
19
+ print(chunk.choices[0].delta.content or "", end="") # Print the content from the response