Spaces:
Running
Running
with some changes with access
Browse files- README.md +12 -1
- __pycache__/app.cpython-312.pyc +0 -0
- app.py +71 -13
README.md
CHANGED
@@ -17,7 +17,7 @@ This is an interactive demo of the CodeLlama-7b model for generating code comple
|
|
17 |
|
18 |
## Features
|
19 |
|
20 |
-
- Interactive code generation with CodeLlama-7b
|
21 |
- Adjustable parameters (temperature, max length, etc.)
|
22 |
- Example prompts to get started quickly
|
23 |
- Real-time generation with timing information
|
@@ -42,6 +42,17 @@ The demo includes several example prompts to help you get started:
|
|
42 |
- Binary search tree class
|
43 |
- Asynchronous data fetching function
|
44 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
45 |
## Technical Details
|
46 |
|
47 |
This demo uses:
|
|
|
17 |
|
18 |
## Features
|
19 |
|
20 |
+
- Interactive code generation with CodeLlama-7b model
|
21 |
- Adjustable parameters (temperature, max length, etc.)
|
22 |
- Example prompts to get started quickly
|
23 |
- Real-time generation with timing information
|
|
|
42 |
- Binary search tree class
|
43 |
- Asynchronous data fetching function
|
44 |
|
45 |
+
## Authentication Requirements
|
46 |
+
|
47 |
+
**Important**: CodeLlama is a gated model that requires authentication to access. To use this demo:
|
48 |
+
|
49 |
+
1. You must accept the model's license at [meta-llama/CodeLlama-7b-hf](https://huggingface.co/meta-llama/CodeLlama-7b-hf)
|
50 |
+
2. You need to set your Hugging Face token in the Space's settings:
|
51 |
+
- Go to Settings > Repository Secrets > Add
|
52 |
+
- Create a secret named `HF_TOKEN` with your Hugging Face token as the value
|
53 |
+
|
54 |
+
Without proper authentication, the demo will show a limited interface with instructions.
|
55 |
+
|
56 |
## Technical Details
|
57 |
|
58 |
This demo uses:
|
__pycache__/app.cpython-312.pyc
ADDED
Binary file (7.54 kB). View file
|
|
app.py
CHANGED
@@ -6,7 +6,7 @@ import os
|
|
6 |
import time
|
7 |
|
8 |
# Model configuration
|
9 |
-
MODEL_NAME = "meta-llama/CodeLlama-7b-hf"
|
10 |
|
11 |
# Default example prompts
|
12 |
EXAMPLES = [
|
@@ -16,25 +16,66 @@ EXAMPLES = [
|
|
16 |
["async def fetch_data(url: str):"]
|
17 |
]
|
18 |
|
19 |
-
# Load model with error handling
|
20 |
def load_model():
|
21 |
try:
|
22 |
print("Loading model and tokenizer...")
|
23 |
-
|
24 |
-
|
25 |
-
#
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
33 |
|
34 |
print("Model loaded successfully!")
|
35 |
return tokenizer, pipeline
|
36 |
except Exception as e:
|
37 |
print(f"Error loading model: {str(e)}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
38 |
# Return None to indicate failure
|
39 |
return None, None
|
40 |
|
@@ -71,7 +112,7 @@ def generate_code(prompt, max_length=200, temperature=0.1, top_p=0.95, top_k=10)
|
|
71 |
return f"Error generating code: {str(e)}"
|
72 |
|
73 |
# Load the model and tokenizer
|
74 |
-
print("Initializing CodeLlama...")
|
75 |
tokenizer, pipeline = load_model()
|
76 |
|
77 |
# Create the Gradio interface
|
@@ -79,6 +120,20 @@ with gr.Blocks(title="CodeLlama Code Generation") as demo:
|
|
79 |
gr.Markdown("# CodeLlama Code Generation")
|
80 |
gr.Markdown("Enter a code prompt and CodeLlama will complete it for you.")
|
81 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
82 |
with gr.Row():
|
83 |
with gr.Column():
|
84 |
prompt = gr.Textbox(
|
@@ -151,6 +206,9 @@ with gr.Blocks(title="CodeLlama Code Generation") as demo:
|
|
151 |
- **Top-p**: Controls diversity via nucleus sampling
|
152 |
- **Top-k**: Controls diversity via top-k sampling
|
153 |
|
|
|
|
|
|
|
154 |
Created by DheepLearning
|
155 |
""")
|
156 |
|
|
|
6 |
import time
|
7 |
|
8 |
# Model configuration
|
9 |
+
MODEL_NAME = "meta-llama/CodeLlama-7b-hf" # Using CodeLlama as requested
|
10 |
|
11 |
# Default example prompts
|
12 |
EXAMPLES = [
|
|
|
16 |
["async def fetch_data(url: str):"]
|
17 |
]
|
18 |
|
19 |
+
# Load model with error handling and authentication
|
20 |
def load_model():
|
21 |
try:
|
22 |
print("Loading model and tokenizer...")
|
23 |
+
|
24 |
+
# Get Hugging Face token from environment variable
|
25 |
+
# This will be set in the Hugging Face Space settings
|
26 |
+
hf_token = os.environ.get("HF_TOKEN")
|
27 |
+
|
28 |
+
# If running locally and token is not set, try to use the token from git config
|
29 |
+
if not hf_token:
|
30 |
+
try:
|
31 |
+
# Extract token from git config if available
|
32 |
+
import subprocess
|
33 |
+
git_url = subprocess.check_output(["git", "config", "--get", "remote.origin.url"]).decode().strip()
|
34 |
+
if "@huggingface.co" in git_url:
|
35 |
+
# Extract token from URL if it's in the format https://username:[email protected]/...
|
36 |
+
hf_token = git_url.split(":")[-2].split("/")[-1] if ":" in git_url else None
|
37 |
+
if hf_token:
|
38 |
+
print("Using token from git config")
|
39 |
+
except Exception as e:
|
40 |
+
print(f"Could not extract token from git config: {str(e)}")
|
41 |
+
|
42 |
+
# Load tokenizer with token if available
|
43 |
+
if hf_token:
|
44 |
+
print("Using Hugging Face token for authentication")
|
45 |
+
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, token=hf_token)
|
46 |
+
|
47 |
+
# Configure the pipeline with token
|
48 |
+
pipeline = transformers.pipeline(
|
49 |
+
"text-generation",
|
50 |
+
model=MODEL_NAME,
|
51 |
+
torch_dtype=torch.float16,
|
52 |
+
device_map="auto",
|
53 |
+
token=hf_token
|
54 |
+
)
|
55 |
+
else:
|
56 |
+
# Try without token (will only work if model is public or user is logged in)
|
57 |
+
print("No Hugging Face token found, trying without authentication")
|
58 |
+
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
|
59 |
+
|
60 |
+
pipeline = transformers.pipeline(
|
61 |
+
"text-generation",
|
62 |
+
model=MODEL_NAME,
|
63 |
+
torch_dtype=torch.float16,
|
64 |
+
device_map="auto",
|
65 |
+
)
|
66 |
|
67 |
print("Model loaded successfully!")
|
68 |
return tokenizer, pipeline
|
69 |
except Exception as e:
|
70 |
print(f"Error loading model: {str(e)}")
|
71 |
+
# Try to provide more helpful error message
|
72 |
+
if "gated repo" in str(e) or "401" in str(e):
|
73 |
+
print("\nIMPORTANT: CodeLlama is a gated model that requires authentication.")
|
74 |
+
print("To use this model, you need to:")
|
75 |
+
print("1. Accept the model's license at https://huggingface.co/meta-llama/CodeLlama-7b-hf")
|
76 |
+
print("2. Set your Hugging Face token in the Space's settings")
|
77 |
+
print(" (Settings > Repository Secrets > Add > HF_TOKEN)")
|
78 |
+
|
79 |
# Return None to indicate failure
|
80 |
return None, None
|
81 |
|
|
|
112 |
return f"Error generating code: {str(e)}"
|
113 |
|
114 |
# Load the model and tokenizer
|
115 |
+
print("Initializing CodeLlama-7b...")
|
116 |
tokenizer, pipeline = load_model()
|
117 |
|
118 |
# Create the Gradio interface
|
|
|
120 |
gr.Markdown("# CodeLlama Code Generation")
|
121 |
gr.Markdown("Enter a code prompt and CodeLlama will complete it for you.")
|
122 |
|
123 |
+
# Add a note about authentication if needed
|
124 |
+
if tokenizer is None or pipeline is None:
|
125 |
+
gr.Markdown("""
|
126 |
+
## ⚠️ Authentication Required
|
127 |
+
|
128 |
+
This demo requires authentication to access the CodeLlama model.
|
129 |
+
|
130 |
+
To use this model, you need to:
|
131 |
+
1. Accept the model's license at [meta-llama/CodeLlama-7b-hf](https://huggingface.co/meta-llama/CodeLlama-7b-hf)
|
132 |
+
2. Set your Hugging Face token in the Space's settings (Settings > Repository Secrets > Add > HF_TOKEN)
|
133 |
+
|
134 |
+
The demo will show a limited interface until authentication is set up.
|
135 |
+
""")
|
136 |
+
|
137 |
with gr.Row():
|
138 |
with gr.Column():
|
139 |
prompt = gr.Textbox(
|
|
|
206 |
- **Top-p**: Controls diversity via nucleus sampling
|
207 |
- **Top-k**: Controls diversity via top-k sampling
|
208 |
|
209 |
+
**Note**: CodeLlama is a gated model that requires authentication. If you're seeing authentication errors,
|
210 |
+
please follow the instructions at the top of the page.
|
211 |
+
|
212 |
Created by DheepLearning
|
213 |
""")
|
214 |
|