dolphinium
commited on
Commit
Β·
15039e8
1
Parent(s):
043003f
refactor: Remove max_output_tokens from generation configuration for LLM and visualization code
Browse files- connections.py +1 -1
- data_processing.py +1 -1
connections.py
CHANGED
@@ -41,7 +41,7 @@ def initialize_connections():
|
|
41 |
|
42 |
# 3. Initialize the LLM
|
43 |
genai.configure(api_key=config.GEMINI_API_KEY)
|
44 |
-
llm_model = genai.GenerativeModel('gemini-2.5-flash', generation_config=genai.types.GenerationConfig(temperature=0
|
45 |
print(f"β
LLM Model '{llm_model.model_name}' initialized.")
|
46 |
|
47 |
print("β
System Initialized Successfully.")
|
|
|
41 |
|
42 |
# 3. Initialize the LLM
|
43 |
genai.configure(api_key=config.GEMINI_API_KEY)
|
44 |
+
llm_model = genai.GenerativeModel('gemini-2.5-flash', generation_config=genai.types.GenerationConfig(temperature=0))
|
45 |
print(f"β
LLM Model '{llm_model.model_name}' initialized.")
|
46 |
|
47 |
print("β
System Initialized Successfully.")
|
data_processing.py
CHANGED
@@ -133,7 +133,7 @@ def llm_generate_visualization_code(llm_model, query_context, facet_data):
|
|
133 |
"""Generates Python code for visualization based on query and data."""
|
134 |
prompt = get_visualization_code_prompt(query_context, facet_data)
|
135 |
try:
|
136 |
-
generation_config = genai.types.GenerationConfig(temperature=0
|
137 |
response = llm_model.generate_content(prompt, generation_config=generation_config)
|
138 |
code = re.sub(r'^```python\s*|```$', '', response.text, flags=re.MULTILINE)
|
139 |
return code
|
|
|
133 |
"""Generates Python code for visualization based on query and data."""
|
134 |
prompt = get_visualization_code_prompt(query_context, facet_data)
|
135 |
try:
|
136 |
+
generation_config = genai.types.GenerationConfig(temperature=0)
|
137 |
response = llm_model.generate_content(prompt, generation_config=generation_config)
|
138 |
code = re.sub(r'^```python\s*|```$', '', response.text, flags=re.MULTILINE)
|
139 |
return code
|