Spaces:
Running
Running
Skip API key validation for Ollama and update version
Browse files- README.md +1 -1
- app.py +3 -3
- helpers/llm_helper.py +6 -4
- requirements.txt +1 -1
README.md
CHANGED
@@ -85,7 +85,7 @@ Visit the respective websites to obtain the API keys.
|
|
85 |
|
86 |
SlideDeck AI allows the use of offline LLMs to generate the contents of the slide decks. This is typically suitable for individuals or organizations who would like to use self-hosted LLMs for privacy concerns, for example.
|
87 |
|
88 |
-
Offline LLMs are made available via Ollama. Therefore, a pre-requisite here is to have [Ollama installed](https://ollama.com/download) on the system and the desired [LLM](https://ollama.com/search) pulled locally.
|
89 |
|
90 |
In addition, the `RUN_IN_OFFLINE_MODE` environment variable needs to be set to `True` to enable the offline mode. This, for example, can be done using a `.env` file or from the terminal. The typical steps to use SlideDeck AI in offline mode (in a `bash` shell) are as follows:
|
91 |
|
|
|
85 |
|
86 |
SlideDeck AI allows the use of offline LLMs to generate the contents of the slide decks. This is typically suitable for individuals or organizations who would like to use self-hosted LLMs for privacy concerns, for example.
|
87 |
|
88 |
+
Offline LLMs are made available via Ollama. Therefore, a pre-requisite here is to have [Ollama installed](https://ollama.com/download) on the system and the desired [LLM](https://ollama.com/search) pulled locally. You should choose a model to use based on your hardware capacity. However, if you have no GPU, [gemma3:1b](https://ollama.com/library/gemma3:1b) can be a suitable model to run only on CPU.
|
89 |
|
90 |
In addition, the `RUN_IN_OFFLINE_MODE` environment variable needs to be set to `True` to enable the offline mode. This, for example, can be done using a `.env` file or from the terminal. The typical steps to use SlideDeck AI in offline mode (in a `bash` shell) are as follows:
|
91 |
|
app.py
CHANGED
@@ -159,11 +159,11 @@ with st.sidebar:
|
|
159 |
|
160 |
if RUN_IN_OFFLINE_MODE:
|
161 |
llm_provider_to_use = st.text_input(
|
162 |
-
label='2: Enter Ollama model name to use (e.g.,
|
163 |
help=(
|
164 |
'Specify a correct, locally available LLM, found by running `ollama list`, for'
|
165 |
-
' example `mistral:v0.2
|
166 |
-
' and supported GPU is strongly recommended.'
|
167 |
)
|
168 |
)
|
169 |
api_key_token: str = ''
|
|
|
159 |
|
160 |
if RUN_IN_OFFLINE_MODE:
|
161 |
llm_provider_to_use = st.text_input(
|
162 |
+
label='2: Enter Ollama model name to use (e.g., gemma3:1b):',
|
163 |
help=(
|
164 |
'Specify a correct, locally available LLM, found by running `ollama list`, for'
|
165 |
+
' example, `gemma3:1b`, `mistral:v0.2`, and `mistral-nemo:latest`. Having an'
|
166 |
+
' Ollama-compatible and supported GPU is strongly recommended.'
|
167 |
)
|
168 |
)
|
169 |
api_key_token: str = ''
|
helpers/llm_helper.py
CHANGED
@@ -95,11 +95,13 @@ def is_valid_llm_provider_model(
|
|
95 |
if not provider or not model or provider not in GlobalConfig.VALID_PROVIDERS:
|
96 |
return False
|
97 |
|
98 |
-
if
|
99 |
-
|
|
|
|
|
100 |
|
101 |
-
|
102 |
-
|
103 |
|
104 |
if provider == GlobalConfig.PROVIDER_AZURE_OPENAI:
|
105 |
valid_url = urllib3.util.parse_url(azure_endpoint_url)
|
|
|
95 |
if not provider or not model or provider not in GlobalConfig.VALID_PROVIDERS:
|
96 |
return False
|
97 |
|
98 |
+
if provider != GlobalConfig.PROVIDER_OLLAMA:
|
99 |
+
# No API key is required for offline Ollama models
|
100 |
+
if not api_key:
|
101 |
+
return False
|
102 |
|
103 |
+
if api_key and API_KEY_REGEX.match(api_key) is None:
|
104 |
+
return False
|
105 |
|
106 |
if provider == GlobalConfig.PROVIDER_AZURE_OPENAI:
|
107 |
valid_url = urllib3.util.parse_url(azure_endpoint_url)
|
requirements.txt
CHANGED
@@ -37,4 +37,4 @@ anyio==4.4.0
|
|
37 |
|
38 |
httpx~=0.27.2
|
39 |
huggingface-hub~=0.24.5
|
40 |
-
ollama~=0.
|
|
|
37 |
|
38 |
httpx~=0.27.2
|
39 |
huggingface-hub~=0.24.5
|
40 |
+
ollama~=0.5.1
|