import gradio as gr import subprocess import time # import os # Not strictly needed in *this* version of app.py as no env vars are read # --- Ollama Helper Functions --- def check_ollama_running(): """Checks if the Ollama service is accessible.""" try: subprocess.run(["ollama", "ps"], check=True, capture_output=True, timeout=5) return True except (subprocess.CalledProcessError, FileNotFoundError, subprocess.TimeoutExpired): return False def get_ollama_models(): """Gets a list of locally available Ollama models.""" # Removed the 'if not check_ollama_running(): return []' # because it's called after AVAILABLE_MODELS is determined, # and check_ollama_running is implicitly done by the initial AVAILABLE_MODELS load. # However, in a container, Ollama should be running. try: result = subprocess.run(["ollama", "list"], check=True, capture_output=True, text=True, timeout=10) models = [] lines = result.stdout.strip().split("\n") if len(lines) > 1: for line in lines[1:]: parts = line.split() if parts: models.append(parts[0]) # Ensure models are sorted and unique for consistent dropdown return sorted(list(set(models))) except (subprocess.CalledProcessError, FileNotFoundError, subprocess.TimeoutExpired) as e: print(f"Error in get_ollama_models: {e}") # Added a print for debugging return [] # --- Core Logic --- # Typing speed simulation CHAR_DELAY = 0.02 # Adjust for desired speed (0.01 is fast, 0.05 is slower) def reasoning_ollama_stream(model_name, prompt, mode): # Renamed prompt_text back to prompt """ Streams response from an Ollama model with simulated typing speed. """ if not model_name: yield "Error: No model selected. Please choose a model." return if not prompt.strip(): # Using original 'prompt' variable name yield "Error: Prompt cannot be empty." return # This check is good for robustness, even in Docker. if not check_ollama_running(): yield "Error: Ollama service does not seem to be running or accessible. Please start Ollama." return # This is a runtime check. The Dockerfile aims to pull models, but this confirms. available_models_runtime = get_ollama_models() if model_name not in available_models_runtime: yield f"Error: Model '{model_name}' selected, but not found by Ollama at runtime. Available: {available_models_runtime}. Please ensure it was pulled." return # Using original 'prompt' and 'mode' prompt_with_mode = f"{prompt.strip()} /{mode}" command = ["ollama", "run", model_name] displayed_response = "" try: process = subprocess.Popen( command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True, bufsize=1, universal_newlines=True, ) process.stdin.write(prompt_with_mode + "\n") process.stdin.close() for line_chunk in iter(process.stdout.readline, ""): if not line_chunk and process.poll() is not None: # Check if process ended break for char in line_chunk: displayed_response += char yield displayed_response if char.strip(): # Only sleep for non-whitespace characters time.sleep(CHAR_DELAY) process.stdout.close() return_code = process.wait(timeout=10) # Added timeout to wait if return_code != 0: error_output = process.stderr.read() error_message = f"\n\n--- Ollama Error (code {return_code}) ---\n{error_output.strip()}" if displayed_response and not displayed_response.endswith(error_message): displayed_response += error_message elif not displayed_response: displayed_response = error_message.strip() yield displayed_response return if not displayed_response.strip() and return_code == 0: yield "Model returned an empty response." elif displayed_response: yield displayed_response except FileNotFoundError: yield "Error: 'ollama' command not found. Please ensure Ollama is installed and in your PATH (or Dockerfile is correct)." except subprocess.TimeoutExpired: # Catch timeout from process.wait() yield "Error: Ollama process timed out while waiting for completion." if displayed_response: yield displayed_response except Exception as e: yield f"An unexpected error occurred: {str(e)}" if displayed_response: yield displayed_response # --- Gradio UI --- # This runs once when the script starts. # In Docker, this will query the Ollama instance inside the container AFTER models are pulled by CMD. AVAILABLE_MODELS = get_ollama_models() QWEN_MODELS = [m for m in AVAILABLE_MODELS if "qwen" in m.lower()] INITIAL_MODEL = None # Prioritize qwen3:4b if available - This logic is from your original app.py if "qwen3:4b" in AVAILABLE_MODELS: INITIAL_MODEL = "qwen3:4b" elif QWEN_MODELS: INITIAL_MODEL = QWEN_MODELS[0] elif AVAILABLE_MODELS: INITIAL_MODEL = AVAILABLE_MODELS[0] # If no models, INITIAL_MODEL remains None, and dropdown will show "No models found..." with gr.Blocks(title="Qwen3 x Ollama", theme=gr.themes.Soft()) as demo: gr.HTML( """