Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -1,46 +1,25 @@
|
|
1 |
-
import os
|
2 |
import streamlit as st
|
3 |
-
#from groq import Groq
|
4 |
-
#from fpdf import FPDF # For PDF generation
|
5 |
from transformers import pipeline
|
|
|
6 |
|
7 |
-
|
|
|
8 |
|
9 |
-
#
|
10 |
-
client = Groq(api_key=api)
|
11 |
-
# Function to get code from StarCoder model
|
12 |
def generate_code(summary, language):
|
13 |
try:
|
14 |
-
|
15 |
-
|
16 |
-
{
|
17 |
-
"role": "user",
|
18 |
-
"content": f"Generate efficient {language} code for the following task without any explanations or comments: {summary}",
|
19 |
-
}
|
20 |
-
],
|
21 |
-
model="llama3-8b-8192", # Specify the model
|
22 |
-
stream=False,
|
23 |
-
)
|
24 |
-
generated_code = chat_completion.choices[0].message.content
|
25 |
return generated_code
|
26 |
except Exception as e:
|
27 |
st.error(f"Error generating code: {e}")
|
28 |
return ""
|
29 |
|
30 |
-
# Function to explain the generated code using Llama
|
31 |
def explain_code(code):
|
32 |
try:
|
33 |
-
|
34 |
-
|
35 |
-
{
|
36 |
-
"role": "user",
|
37 |
-
"content": f"Explain the following code descriptively and attractively so the user can easily understand it:\n\n{code}",
|
38 |
-
}
|
39 |
-
],
|
40 |
-
model="llama3-8b-8192",
|
41 |
-
stream=False,
|
42 |
-
)
|
43 |
-
explanation = chat_completion.choices[0].message.content
|
44 |
return explanation
|
45 |
except Exception as e:
|
46 |
st.error(f"Error explaining code: {e}")
|
@@ -59,7 +38,7 @@ def save_code_as_pdf(code, file_name="generated_code.pdf"):
|
|
59 |
st.set_page_config(page_title="Generative AI Code Generator", page_icon="π§βπ»", layout="wide")
|
60 |
|
61 |
# Page Title
|
62 |
-
st.title("π Generative AI Code Generator Using
|
63 |
|
64 |
# Input Fields
|
65 |
summary = st.text_area("π Enter the Task Summary", "For example: Create a function to add two numbers.")
|
@@ -100,4 +79,4 @@ if st.button("Generate New Code"):
|
|
100 |
|
101 |
# Footer Information
|
102 |
st.markdown("---")
|
103 |
-
st.write("π Powered by **Streamlit**, **
|
|
|
|
|
1 |
import streamlit as st
|
|
|
|
|
2 |
from transformers import pipeline
|
3 |
+
from fpdf import FPDF # For PDF generation
|
4 |
|
5 |
+
# Initialize the pipeline for text generation using Llama model
|
6 |
+
llama_model = "meta-llama/Llama-2-7b-hf" # Update with the specific Llama model you want to use
|
7 |
|
8 |
+
# Function to get code from Hugging Face Llama model
|
|
|
|
|
9 |
def generate_code(summary, language):
|
10 |
try:
|
11 |
+
generator = pipeline('text-generation', model=llama_model, tokenizer=llama_model)
|
12 |
+
generated_code = generator(f"Generate {language} code: {summary}", max_length=150)[0]['generated_text']
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
13 |
return generated_code
|
14 |
except Exception as e:
|
15 |
st.error(f"Error generating code: {e}")
|
16 |
return ""
|
17 |
|
18 |
+
# Function to explain the generated code using Llama (or a suitable summarization model)
|
19 |
def explain_code(code):
|
20 |
try:
|
21 |
+
explainer = pipeline('summarization', model=llama_model, tokenizer=llama_model) # You can use a summarization model or a specific Llama-based explanation model
|
22 |
+
explanation = explainer(f"Explain the following code:\n\n{code}", max_length=250, min_length=50)[0]['summary_text']
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
23 |
return explanation
|
24 |
except Exception as e:
|
25 |
st.error(f"Error explaining code: {e}")
|
|
|
38 |
st.set_page_config(page_title="Generative AI Code Generator", page_icon="π§βπ»", layout="wide")
|
39 |
|
40 |
# Page Title
|
41 |
+
st.title("π Generative AI Code Generator Using Hugging Face Llama")
|
42 |
|
43 |
# Input Fields
|
44 |
summary = st.text_area("π Enter the Task Summary", "For example: Create a function to add two numbers.")
|
|
|
79 |
|
80 |
# Footer Information
|
81 |
st.markdown("---")
|
82 |
+
st.write("π Powered by **Streamlit**, **Hugging Face**, and **Transformers** | Deployed on Hugging Face")
|