Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -1,25 +1,46 @@
|
|
|
|
1 |
import streamlit as st
|
2 |
-
from
|
3 |
from fpdf import FPDF # For PDF generation
|
4 |
|
5 |
-
|
6 |
-
llama_model = "meta-llama/Llama-2-7b-hf" # Update with the specific Llama model you want to use
|
7 |
|
8 |
-
#
|
|
|
|
|
|
|
9 |
def generate_code(summary, language):
|
10 |
try:
|
11 |
-
|
12 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
13 |
return generated_code
|
14 |
except Exception as e:
|
15 |
st.error(f"Error generating code: {e}")
|
16 |
return ""
|
17 |
|
18 |
-
# Function to explain the generated code using Llama
|
19 |
def explain_code(code):
|
20 |
try:
|
21 |
-
|
22 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
23 |
return explanation
|
24 |
except Exception as e:
|
25 |
st.error(f"Error explaining code: {e}")
|
@@ -38,7 +59,7 @@ def save_code_as_pdf(code, file_name="generated_code.pdf"):
|
|
38 |
st.set_page_config(page_title="Generative AI Code Generator", page_icon="π§βπ»", layout="wide")
|
39 |
|
40 |
# Page Title
|
41 |
-
st.title("π Generative AI Code Generator Using
|
42 |
|
43 |
# Input Fields
|
44 |
summary = st.text_area("π Enter the Task Summary", "For example: Create a function to add two numbers.")
|
@@ -79,4 +100,4 @@ if st.button("Generate New Code"):
|
|
79 |
|
80 |
# Footer Information
|
81 |
st.markdown("---")
|
82 |
-
st.write("π Powered by **Streamlit**, **
|
|
|
1 |
+
import os
|
2 |
import streamlit as st
|
3 |
+
from groq import Groq
|
4 |
from fpdf import FPDF # For PDF generation
|
5 |
|
6 |
+
api = "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"
|
|
|
7 |
|
8 |
+
# --- Groq API Setup ---
|
9 |
+
client = Groq(api_key=api)
|
10 |
+
|
11 |
+
# Function to get code from StarCoder model
|
12 |
def generate_code(summary, language):
|
13 |
try:
|
14 |
+
chat_completion = client.chat.completions.create(
|
15 |
+
messages=[
|
16 |
+
{
|
17 |
+
"role": "user",
|
18 |
+
"content": f"Generate efficient {language} code for the following task without any explanations or comments: {summary}",
|
19 |
+
}
|
20 |
+
],
|
21 |
+
model="llama3-8b-8192", # Specify the model
|
22 |
+
stream=False,
|
23 |
+
)
|
24 |
+
generated_code = chat_completion.choices[0].message.content
|
25 |
return generated_code
|
26 |
except Exception as e:
|
27 |
st.error(f"Error generating code: {e}")
|
28 |
return ""
|
29 |
|
30 |
+
# Function to explain the generated code using Llama
|
31 |
def explain_code(code):
|
32 |
try:
|
33 |
+
chat_completion = client.chat.completions.create(
|
34 |
+
messages=[
|
35 |
+
{
|
36 |
+
"role": "user",
|
37 |
+
"content": f"Explain the following code descriptively and attractively so the user can easily understand it:\n\n{code}",
|
38 |
+
}
|
39 |
+
],
|
40 |
+
model="llama3-8b-8192",
|
41 |
+
stream=False,
|
42 |
+
)
|
43 |
+
explanation = chat_completion.choices[0].message.content
|
44 |
return explanation
|
45 |
except Exception as e:
|
46 |
st.error(f"Error explaining code: {e}")
|
|
|
59 |
st.set_page_config(page_title="Generative AI Code Generator", page_icon="π§βπ»", layout="wide")
|
60 |
|
61 |
# Page Title
|
62 |
+
st.title("π Generative AI Code Generator Using StarCoder")
|
63 |
|
64 |
# Input Fields
|
65 |
summary = st.text_area("π Enter the Task Summary", "For example: Create a function to add two numbers.")
|
|
|
100 |
|
101 |
# Footer Information
|
102 |
st.markdown("---")
|
103 |
+
st.write("π Powered by **Streamlit**, **Groq**, and **StarCoder** | Deployed on Hugging Face")
|