Upload goonsai_civitprompt_LLM.ipynb
Browse files
goonsai_civitprompt_LLM.ipynb
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"cells":[{"cell_type":"code","source":["#@title Install Dependencies and Set Up GPU\n","!pip install llama-cpp-python huggingface_hub --quiet\n","print(\"Dependencies installed.\")\n","\n","#@title Select a Model { run: \"auto\" }\n","model_name = \"qwen2.5-1.5B-civitai-nsfw-v1\" #@param [\"gemma3-1B-goonsai-nsfw-100k\", \"qwen2.5-1.5B-civitai-nsfw-v1\", \"qwen2.5-3B-goonsai-nsfw-100k\", \"qwen3-1.7B-civitai-nsfw-v1\"]\n","\n","# Download the selected model\n","from huggingface_hub import hf_hub_download\n","model_filename = f\"{model_name}/{model_name}-BF16.gguf\"\n","model_path = hf_hub_download(\n"," repo_id=\"goonsai-com/civitaiprompts\",\n"," filename=model_filename,\n"," local_dir=\"./models\"\n",")\n","print(f\"Downloaded {model_name} to: {model_path}\")\n","\n","#@title Load and Run Model on T4 GPU\n","from llama_cpp import Llama\n","\n","# Load the model\n","llm = Llama(\n"," model_path=model_path,\n"," n_ctx=2048, # Context length\n"," n_batch=512, # Batch size\n"," n_gpu_layers=-1, # Offload all layers to T4 GPU\n"," verbose=False\n",")\n","\n"],"metadata":{"id":"J7itQqSrK1TG"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["\n","#@markdown Define a simple prompt\n","input_prompt = \"a woman in a red bikini on the beach\" #@param {type:\"string\"}\n","#@markdown ...using settings:\n","system_message = \"make a prompt from this\" #@param {type:\"string\"}\n","full_prompt = f\"{system_message}:\\n\\n{input_prompt}\"\n","temperature = 0.7 #@param {type:\"slider\", min:0.1, max:1.5, step:0.01}\n","top_p = 0.9 #@param {type:\"slider\", min:0.7, max:0.9, step:0.01}\n","\n","# Generate a detailed prompt\n","output = llm(\n"," prompt=full_prompt, # Use full_prompt with system message\n"," max_tokens=512, # Adjust based on desired output length\n"," temperature=temperature, # Controls randomness\n"," top_p=top_p, # Nucleus sampling\n"," stop=[\"\\n\"] # Stop at newline for cleaner output\n",")\n","\n","# Print the generated prompt\n","print(\"Generated Prompt:\\n---------\\n\")\n","print(output[\"choices\"][0][\"text\"].replace(',', ',\\n'))\n","\n","\n","\n"],"metadata":{"id":"aq10GOl6KoxJ"},"execution_count":null,"outputs":[]}],"metadata":{"accelerator":"GPU","colab":{"gpuType":"T4","provenance":[],"authorship_tag":"ABX9TyMpt1C32fzmsJACKhoEMnIO"},"kernelspec":{"display_name":"Python 3","name":"python3"},"language_info":{"name":"python"}},"nbformat":4,"nbformat_minor":0}
|