NassimeBejaia commited on
Commit
d36aa50
·
verified ·
1 Parent(s): d6a1a6b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +36 -17
app.py CHANGED
@@ -1,19 +1,38 @@
 
1
  from groq import Groq
2
 
3
- # Initialize the Groq client
4
- client = Groq()
5
-
6
- # Create a chat completion request
7
- completion = client.chat.completions.create(
8
- model="meta-llama/llama-4-scout-17b-16e-instruct", # Specify the model
9
- messages=[{"role": "user", "content": "Hello, how can I improve my coding skills?"}], # Example message
10
- temperature=1, # Controls randomness (higher is more random)
11
- max_completion_tokens=1024, # Max tokens in the response
12
- top_p=1, # Controls the diversity via nucleus sampling
13
- stream=True, # Use streaming to receive chunks of the response
14
- stop=None, # Define stopping criteria (optional)
15
- )
16
-
17
- # Process and print the response
18
- for chunk in completion:
19
- print(chunk.choices[0].delta.content or "", end="") # Print the content from the response
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
  from groq import Groq
3
 
4
+ # Retrieve the DEEPSEEK_API_KEY from environment variables or secret manager
5
+ DEEPSEEK_API_KEY = os.getenv("DEEPSEEK_API_KEY") # Assumes the key is stored as an environment variable
6
+
7
+ # Ensure the API key is available
8
+ if not DEEPSEEK_API_KEY:
9
+ raise ValueError("API key is missing. Please check your environment variables.")
10
+
11
+ # Initialize the Groq client with the retrieved API key
12
+ client = Groq(api_key=DEEPSEEK_API_KEY)
13
+
14
+ # Prepare the message (this is the input from the user)
15
+ messages = [{"role": "user", "content": "Hello, how can I improve my coding skills?"}]
16
+
17
+ # Call the API to get a completion from DeepSeek (streaming)
18
+ try:
19
+ print("Calling API...")
20
+
21
+ # Make the API call
22
+ completion = client.chat.completions.create(
23
+ model="meta-llama/llama-4-scout-17b-16e-instruct", # Model to use
24
+ messages=messages, # Messages to send
25
+ temperature=1, # Adjust the creativity of the response
26
+ max_completion_tokens=1024, # Max tokens for the completion
27
+ top_p=1, # Top-p sampling for response variety
28
+ stream=True # Enable streaming mode
29
+ )
30
+
31
+ print("Streaming response received...")
32
+
33
+ # Handle the streaming response (output each chunk as it arrives)
34
+ for chunk in completion:
35
+ print(chunk.choices[0].delta.content or "", end="")
36
+
37
+ except Exception as e:
38
+ print(f"Error occurred: {str(e)}")