Spaces:
				
			
			
	
			
			
		Running
		
			on 
			
			Zero
	
	
	
			
			
	
	
	
	
		
		
		Running
		
			on 
			
			Zero
	Upload 3 files
Browse files- app.py +41 -10
 - llmdolphin.py +232 -54
 - llmenv.py +23 -0
 
    	
        app.py
    CHANGED
    
    | 
         @@ -4,8 +4,8 @@ from tagger.utils import gradio_copy_text, COPY_ACTION_JS 
     | 
|
| 4 | 
         
             
            from tagger.tagger import convert_danbooru_to_e621_prompt, insert_recom_prompt
         
     | 
| 5 | 
         
             
            from genimage import generate_image
         
     | 
| 6 | 
         
             
            from llmdolphin import (get_llm_formats, get_dolphin_model_format,
         
     | 
| 7 | 
         
            -
                get_dolphin_models, get_dolphin_model_info, select_dolphin_model,
         
     | 
| 8 | 
         
            -
                select_dolphin_format, add_dolphin_models, get_dolphin_sysprompt,
         
     | 
| 9 | 
         
             
                get_dolphin_sysprompt_mode, select_dolphin_sysprompt, get_dolphin_languages,
         
     | 
| 10 | 
         
             
                select_dolphin_language, dolphin_respond, dolphin_parse, respond_playground)
         
     | 
| 11 | 
         | 
| 
         @@ -27,7 +27,7 @@ with gr.Blocks(theme='NoCrypt/miku@>=1.2.2', fill_width=True, css=css, delete_ca 
     | 
|
| 27 | 
         
             
                                chat_submit = gr.Button("Send", scale=1, variant="primary")
         
     | 
| 28 | 
         
             
                                chat_clear = gr.Button("Clear", scale=1, variant="secondary")
         
     | 
| 29 | 
         
             
                            with gr.Accordion("Additional inputs", open=False):
         
     | 
| 30 | 
         
            -
                                chat_format = gr.Dropdown(choices=get_llm_formats(), value=get_dolphin_model_format(get_dolphin_models()[0] 
     | 
| 31 | 
         
             
                                chat_sysmsg = gr.Textbox(value=get_dolphin_sysprompt(), label="System message")
         
     | 
| 32 | 
         
             
                                with gr.Row():
         
     | 
| 33 | 
         
             
                                    chat_tokens = gr.Slider(minimum=1, maximum=4096, value=512, step=1, label="Max tokens")
         
     | 
| 
         @@ -35,13 +35,18 @@ with gr.Blocks(theme='NoCrypt/miku@>=1.2.2', fill_width=True, css=css, delete_ca 
     | 
|
| 35 | 
         
             
                                    chat_topp = gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p")
         
     | 
| 36 | 
         
             
                                    chat_topk = gr.Slider(minimum=0, maximum=100, value=40, step=1, label="Top-k")
         
     | 
| 37 | 
         
             
                                    chat_rp = gr.Slider(minimum=0.0, maximum=2.0, value=1.1, step=0.1, label="Repetition penalty")
         
     | 
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 38 | 
         
             
                            with gr.Accordion("Add models", open=False):
         
     | 
| 39 | 
         
             
                                chat_add_text = gr.Textbox(label="URL or Repo ID", placeholder="https://huggingface.co/mradermacher/MagnumChronos-i1-GGUF/blob/main/MagnumChronos.i1-Q4_K_M.gguf", lines=1)
         
     | 
| 40 | 
         
             
                                chat_add_format = gr.Dropdown(choices=get_llm_formats(), value=get_llm_formats()[0], label="Message format")
         
     | 
| 41 | 
         
             
                                chat_add_submit = gr.Button("Update lists of models")
         
     | 
| 42 | 
         
             
                            with gr.Accordion("Modes", open=True):
         
     | 
| 43 | 
         
            -
                                chat_model = gr.Dropdown(choices=get_dolphin_models(), value=get_dolphin_models()[0] 
     | 
| 44 | 
         
            -
                                chat_model_info = gr.Markdown(value=get_dolphin_model_info(get_dolphin_models()[0] 
     | 
| 45 | 
         
             
                                with gr.Row():
         
     | 
| 46 | 
         
             
                                    chat_mode = gr.Dropdown(choices=get_dolphin_sysprompt_mode(), value=get_dolphin_sysprompt_mode()[0], allow_custom_value=False, label="Mode")
         
     | 
| 47 | 
         
             
                                    chat_lang = gr.Dropdown(choices=get_dolphin_languages(), value="English", allow_custom_value=True, label="Output language")
         
     | 
| 
         @@ -68,9 +73,9 @@ with gr.Blocks(theme='NoCrypt/miku@>=1.2.2', fill_width=True, css=css, delete_ca 
     | 
|
| 68 | 
         
             
                                Don't worry about the strange appearance, **it's just a bug of Gradio!**""", elem_classes="title")
         
     | 
| 69 | 
         
             
                    pg_chatbot = gr.Chatbot(scale=1, show_copy_button=True, show_share_button=False)
         
     | 
| 70 | 
         
             
                    with gr.Accordion("Additional inputs", open=False):
         
     | 
| 71 | 
         
            -
                        pg_chat_model = gr.Dropdown(choices=get_dolphin_models(), value=get_dolphin_models()[0] 
     | 
| 72 | 
         
            -
                        pg_chat_model_info = gr.Markdown(value=get_dolphin_model_info(get_dolphin_models()[0] 
     | 
| 73 | 
         
            -
                        pg_chat_format = gr.Dropdown(choices=get_llm_formats(), value=get_dolphin_model_format(get_dolphin_models()[0] 
     | 
| 74 | 
         
             
                        pg_chat_sysmsg = gr.Textbox(value="You are a helpful assistant.", label="System message")
         
     | 
| 75 | 
         
             
                        with gr.Row():
         
     | 
| 76 | 
         
             
                            pg_chat_tokens = gr.Slider(minimum=1, maximum=4096, value=2048, step=1, label="Max tokens")
         
     | 
| 
         @@ -78,6 +83,11 @@ with gr.Blocks(theme='NoCrypt/miku@>=1.2.2', fill_width=True, css=css, delete_ca 
     | 
|
| 78 | 
         
             
                            pg_chat_topp = gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p")
         
     | 
| 79 | 
         
             
                            pg_chat_topk = gr.Slider(minimum=0, maximum=100, value=40, step=1, label="Top-k")
         
     | 
| 80 | 
         
             
                            pg_chat_rp = gr.Slider(minimum=0.0, maximum=2.0, value=1.1, step=0.1, label="Repetition penalty")
         
     | 
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 81 | 
         
             
                        with gr.Accordion("Add models", open=True):
         
     | 
| 82 | 
         
             
                            pg_chat_add_text = gr.Textbox(label="URL or Repo ID", placeholder="https://huggingface.co/mradermacher/MagnumChronos-i1-GGUF/blob/main/MagnumChronos.i1-Q4_K_M.gguf", lines=1)
         
     | 
| 83 | 
         
             
                            pg_chat_add_format = gr.Dropdown(choices=get_llm_formats(), value=get_llm_formats()[0], label="Message format")
         
     | 
| 
         @@ -90,7 +100,8 @@ with gr.Blocks(theme='NoCrypt/miku@>=1.2.2', fill_width=True, css=css, delete_ca 
     | 
|
| 90 | 
         
             
                        #clear_btn="Clear",
         
     | 
| 91 | 
         
             
                        submit_btn="Send",
         
     | 
| 92 | 
         
             
                        #additional_inputs_accordion='gr.Accordion(label="Additional Inputs", open=False)',
         
     | 
| 93 | 
         
            -
                        additional_inputs=[pg_chat_model, pg_chat_sysmsg, pg_chat_tokens, pg_chat_temperature, pg_chat_topp, pg_chat_topk, pg_chat_rp, 
     | 
| 
         | 
|
| 94 | 
         
             
                        chatbot=pg_chatbot
         
     | 
| 95 | 
         
             
                    )
         
     | 
| 96 | 
         
             
                gr.LoginButton()
         
     | 
| 
         @@ -99,7 +110,7 @@ with gr.Blocks(theme='NoCrypt/miku@>=1.2.2', fill_width=True, css=css, delete_ca 
     | 
|
| 99 | 
         
             
                gr.on(
         
     | 
| 100 | 
         
             
                    triggers=[chat_msg.submit, chat_submit.click],
         
     | 
| 101 | 
         
             
                    fn=dolphin_respond,
         
     | 
| 102 | 
         
            -
                    inputs=[chat_msg, chatbot, chat_model, chat_sysmsg, chat_tokens, chat_temperature, chat_topp, chat_topk, chat_rp, state],
         
     | 
| 103 | 
         
             
                    outputs=[chatbot],
         
     | 
| 104 | 
         
             
                    queue=True,
         
     | 
| 105 | 
         
             
                    show_progress="full",
         
     | 
| 
         @@ -113,6 +124,8 @@ with gr.Blocks(theme='NoCrypt/miku@>=1.2.2', fill_width=True, css=css, delete_ca 
     | 
|
| 113 | 
         
             
                .success(lambda: None, None, chatbot, queue=False)
         
     | 
| 114 | 
         
             
                chat_format.change(select_dolphin_format, [chat_format, state], [chat_format, state], queue=False)\
         
     | 
| 115 | 
         
             
                .success(lambda: None, None, chatbot, queue=False)
         
     | 
| 
         | 
|
| 
         | 
|
| 116 | 
         
             
                chat_mode.change(select_dolphin_sysprompt, [chat_mode, state], [chat_sysmsg, state], queue=False)
         
     | 
| 117 | 
         
             
                chat_lang.change(select_dolphin_language, [chat_lang, state], [chat_sysmsg, state], queue=False)
         
     | 
| 118 | 
         
             
                gr.on(
         
     | 
| 
         @@ -123,6 +136,14 @@ with gr.Blocks(theme='NoCrypt/miku@>=1.2.2', fill_width=True, css=css, delete_ca 
     | 
|
| 123 | 
         
             
                    queue=True,
         
     | 
| 124 | 
         
             
                    trigger_mode="once",
         
     | 
| 125 | 
         
             
                )
         
     | 
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 126 | 
         | 
| 127 | 
         
             
                copy_btn.click(gradio_copy_text, [output_text], js=COPY_ACTION_JS)
         
     | 
| 128 | 
         
             
                copy_btn_pony.click(gradio_copy_text, [output_text_pony], js=COPY_ACTION_JS)
         
     | 
| 
         @@ -131,6 +152,7 @@ with gr.Blocks(theme='NoCrypt/miku@>=1.2.2', fill_width=True, css=css, delete_ca 
     | 
|
| 131 | 
         | 
| 132 | 
         
             
                pg_chat_model.change(select_dolphin_model, [pg_chat_model, state], [pg_chat_model, pg_chat_format, pg_chat_model_info, state], queue=True, show_progress="full")
         
     | 
| 133 | 
         
             
                pg_chat_format.change(select_dolphin_format, [pg_chat_format, state], [pg_chat_format, state], queue=False)
         
     | 
| 
         | 
|
| 134 | 
         
             
                gr.on(
         
     | 
| 135 | 
         
             
                    triggers=[pg_chat_add_text.submit, pg_chat_add_submit.click],
         
     | 
| 136 | 
         
             
                    fn=add_dolphin_models,
         
     | 
| 
         @@ -139,6 +161,15 @@ with gr.Blocks(theme='NoCrypt/miku@>=1.2.2', fill_width=True, css=css, delete_ca 
     | 
|
| 139 | 
         
             
                    queue=True,
         
     | 
| 140 | 
         
             
                    trigger_mode="once",
         
     | 
| 141 | 
         
             
                )
         
     | 
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 142 | 
         | 
| 143 | 
         
             
            if __name__ == "__main__":
         
     | 
| 144 | 
         
             
                app.queue()
         
     | 
| 
         | 
|
| 4 | 
         
             
            from tagger.tagger import convert_danbooru_to_e621_prompt, insert_recom_prompt
         
     | 
| 5 | 
         
             
            from genimage import generate_image
         
     | 
| 6 | 
         
             
            from llmdolphin import (get_llm_formats, get_dolphin_model_format,
         
     | 
| 7 | 
         
            +
                get_dolphin_models, get_dolphin_model_info, select_dolphin_model, get_dolphin_loras, select_dolphin_lora,
         
     | 
| 8 | 
         
            +
                add_dolphin_loras, select_dolphin_format, add_dolphin_models, get_dolphin_sysprompt,
         
     | 
| 9 | 
         
             
                get_dolphin_sysprompt_mode, select_dolphin_sysprompt, get_dolphin_languages,
         
     | 
| 10 | 
         
             
                select_dolphin_language, dolphin_respond, dolphin_parse, respond_playground)
         
     | 
| 11 | 
         | 
| 
         | 
|
| 27 | 
         
             
                                chat_submit = gr.Button("Send", scale=1, variant="primary")
         
     | 
| 28 | 
         
             
                                chat_clear = gr.Button("Clear", scale=1, variant="secondary")
         
     | 
| 29 | 
         
             
                            with gr.Accordion("Additional inputs", open=False):
         
     | 
| 30 | 
         
            +
                                chat_format = gr.Dropdown(choices=get_llm_formats(), value=get_dolphin_model_format(get_dolphin_models()[0]), label="Message format")
         
     | 
| 31 | 
         
             
                                chat_sysmsg = gr.Textbox(value=get_dolphin_sysprompt(), label="System message")
         
     | 
| 32 | 
         
             
                                with gr.Row():
         
     | 
| 33 | 
         
             
                                    chat_tokens = gr.Slider(minimum=1, maximum=4096, value=512, step=1, label="Max tokens")
         
     | 
| 
         | 
|
| 35 | 
         
             
                                    chat_topp = gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p")
         
     | 
| 36 | 
         
             
                                    chat_topk = gr.Slider(minimum=0, maximum=100, value=40, step=1, label="Top-k")
         
     | 
| 37 | 
         
             
                                    chat_rp = gr.Slider(minimum=0.0, maximum=2.0, value=1.1, step=0.1, label="Repetition penalty")
         
     | 
| 38 | 
         
            +
                                with gr.Accordion("Loras", open=True, visible=False):
         
     | 
| 39 | 
         
            +
                                    chat_lora = gr.Dropdown(choices=get_dolphin_loras(), value=get_dolphin_loras()[0], allow_custom_value=True, label="Lora")
         
     | 
| 40 | 
         
            +
                                    chat_lora_scale = gr.Slider(minimum=0.0, maximum=1.0, value=1.0, step=0.01, label="Lora scale")
         
     | 
| 41 | 
         
            +
                                    chat_add_lora_text = gr.Textbox(label="URL or Repo ID", placeholder="https://huggingface.co/ggml-org/LoRA-Qwen2.5-14B-Instruct-abliterated-v2-F16-GGUF/blob/main/LoRA-Qwen2.5-14B-Instruct-abliterated-v2-f16.gguf", lines=1)
         
     | 
| 42 | 
         
            +
                                    chat_add_lora_submit = gr.Button("Update lists of loras")
         
     | 
| 43 | 
         
             
                            with gr.Accordion("Add models", open=False):
         
     | 
| 44 | 
         
             
                                chat_add_text = gr.Textbox(label="URL or Repo ID", placeholder="https://huggingface.co/mradermacher/MagnumChronos-i1-GGUF/blob/main/MagnumChronos.i1-Q4_K_M.gguf", lines=1)
         
     | 
| 45 | 
         
             
                                chat_add_format = gr.Dropdown(choices=get_llm_formats(), value=get_llm_formats()[0], label="Message format")
         
     | 
| 46 | 
         
             
                                chat_add_submit = gr.Button("Update lists of models")
         
     | 
| 47 | 
         
             
                            with gr.Accordion("Modes", open=True):
         
     | 
| 48 | 
         
            +
                                chat_model = gr.Dropdown(choices=get_dolphin_models(), value=get_dolphin_models()[0], allow_custom_value=True, label="Model")
         
     | 
| 49 | 
         
            +
                                chat_model_info = gr.Markdown(value=get_dolphin_model_info(get_dolphin_models()[0]), label="Model info")
         
     | 
| 50 | 
         
             
                                with gr.Row():
         
     | 
| 51 | 
         
             
                                    chat_mode = gr.Dropdown(choices=get_dolphin_sysprompt_mode(), value=get_dolphin_sysprompt_mode()[0], allow_custom_value=False, label="Mode")
         
     | 
| 52 | 
         
             
                                    chat_lang = gr.Dropdown(choices=get_dolphin_languages(), value="English", allow_custom_value=True, label="Output language")
         
     | 
| 
         | 
|
| 73 | 
         
             
                                Don't worry about the strange appearance, **it's just a bug of Gradio!**""", elem_classes="title")
         
     | 
| 74 | 
         
             
                    pg_chatbot = gr.Chatbot(scale=1, show_copy_button=True, show_share_button=False)
         
     | 
| 75 | 
         
             
                    with gr.Accordion("Additional inputs", open=False):
         
     | 
| 76 | 
         
            +
                        pg_chat_model = gr.Dropdown(choices=get_dolphin_models(), value=get_dolphin_models()[0], allow_custom_value=True, label="Model")
         
     | 
| 77 | 
         
            +
                        pg_chat_model_info = gr.Markdown(value=get_dolphin_model_info(get_dolphin_models()[0]), label="Model info")
         
     | 
| 78 | 
         
            +
                        pg_chat_format = gr.Dropdown(choices=get_llm_formats(), value=get_dolphin_model_format(get_dolphin_models()[0]), label="Message format")
         
     | 
| 79 | 
         
             
                        pg_chat_sysmsg = gr.Textbox(value="You are a helpful assistant.", label="System message")
         
     | 
| 80 | 
         
             
                        with gr.Row():
         
     | 
| 81 | 
         
             
                            pg_chat_tokens = gr.Slider(minimum=1, maximum=4096, value=2048, step=1, label="Max tokens")
         
     | 
| 
         | 
|
| 83 | 
         
             
                            pg_chat_topp = gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p")
         
     | 
| 84 | 
         
             
                            pg_chat_topk = gr.Slider(minimum=0, maximum=100, value=40, step=1, label="Top-k")
         
     | 
| 85 | 
         
             
                            pg_chat_rp = gr.Slider(minimum=0.0, maximum=2.0, value=1.1, step=0.1, label="Repetition penalty")
         
     | 
| 86 | 
         
            +
                            with gr.Accordion("Loras", open=True, visible=False):
         
     | 
| 87 | 
         
            +
                                pg_chat_lora = gr.Dropdown(choices=get_dolphin_loras(), value=get_dolphin_loras()[0], allow_custom_value=True, label="Lora")
         
     | 
| 88 | 
         
            +
                                pg_chat_lora_scale = gr.Slider(minimum=0.0, maximum=1.0, value=1.0, step=0.01, label="Lora scale")
         
     | 
| 89 | 
         
            +
                                pg_chat_add_lora_text = gr.Textbox(label="URL or Repo ID", placeholder="https://huggingface.co/ggml-org/LoRA-Qwen2.5-14B-Instruct-abliterated-v2-F16-GGUF/blob/main/LoRA-Qwen2.5-14B-Instruct-abliterated-v2-f16.gguf", lines=1)
         
     | 
| 90 | 
         
            +
                                pg_chat_add_lora_submit = gr.Button("Update lists of loras")
         
     | 
| 91 | 
         
             
                        with gr.Accordion("Add models", open=True):
         
     | 
| 92 | 
         
             
                            pg_chat_add_text = gr.Textbox(label="URL or Repo ID", placeholder="https://huggingface.co/mradermacher/MagnumChronos-i1-GGUF/blob/main/MagnumChronos.i1-Q4_K_M.gguf", lines=1)
         
     | 
| 93 | 
         
             
                            pg_chat_add_format = gr.Dropdown(choices=get_llm_formats(), value=get_llm_formats()[0], label="Message format")
         
     | 
| 
         | 
|
| 100 | 
         
             
                        #clear_btn="Clear",
         
     | 
| 101 | 
         
             
                        submit_btn="Send",
         
     | 
| 102 | 
         
             
                        #additional_inputs_accordion='gr.Accordion(label="Additional Inputs", open=False)',
         
     | 
| 103 | 
         
            +
                        additional_inputs=[pg_chat_model, pg_chat_sysmsg, pg_chat_tokens, pg_chat_temperature, pg_chat_topp, pg_chat_topk, pg_chat_rp,
         
     | 
| 104 | 
         
            +
                                           pg_chat_lora, pg_chat_lora_scale, state],
         
     | 
| 105 | 
         
             
                        chatbot=pg_chatbot
         
     | 
| 106 | 
         
             
                    )
         
     | 
| 107 | 
         
             
                gr.LoginButton()
         
     | 
| 
         | 
|
| 110 | 
         
             
                gr.on(
         
     | 
| 111 | 
         
             
                    triggers=[chat_msg.submit, chat_submit.click],
         
     | 
| 112 | 
         
             
                    fn=dolphin_respond,
         
     | 
| 113 | 
         
            +
                    inputs=[chat_msg, chatbot, chat_model, chat_sysmsg, chat_tokens, chat_temperature, chat_topp, chat_topk, chat_rp, chat_lora, chat_lora_scale, state],
         
     | 
| 114 | 
         
             
                    outputs=[chatbot],
         
     | 
| 115 | 
         
             
                    queue=True,
         
     | 
| 116 | 
         
             
                    show_progress="full",
         
     | 
| 
         | 
|
| 124 | 
         
             
                .success(lambda: None, None, chatbot, queue=False)
         
     | 
| 125 | 
         
             
                chat_format.change(select_dolphin_format, [chat_format, state], [chat_format, state], queue=False)\
         
     | 
| 126 | 
         
             
                .success(lambda: None, None, chatbot, queue=False)
         
     | 
| 127 | 
         
            +
                chat_lora.change(select_dolphin_lora, [chat_lora, state], [chat_lora, state], queue=True, show_progress="full")\
         
     | 
| 128 | 
         
            +
                .success(lambda: None, None, chatbot, queue=False)
         
     | 
| 129 | 
         
             
                chat_mode.change(select_dolphin_sysprompt, [chat_mode, state], [chat_sysmsg, state], queue=False)
         
     | 
| 130 | 
         
             
                chat_lang.change(select_dolphin_language, [chat_lang, state], [chat_sysmsg, state], queue=False)
         
     | 
| 131 | 
         
             
                gr.on(
         
     | 
| 
         | 
|
| 136 | 
         
             
                    queue=True,
         
     | 
| 137 | 
         
             
                    trigger_mode="once",
         
     | 
| 138 | 
         
             
                )
         
     | 
| 139 | 
         
            +
                gr.on(
         
     | 
| 140 | 
         
            +
                    triggers=[chat_add_lora_text.submit, chat_add_lora_submit.click],
         
     | 
| 141 | 
         
            +
                    fn=add_dolphin_loras,
         
     | 
| 142 | 
         
            +
                    inputs=[chat_add_lora_text],
         
     | 
| 143 | 
         
            +
                    outputs=[chat_lora],
         
     | 
| 144 | 
         
            +
                    queue=True,
         
     | 
| 145 | 
         
            +
                    trigger_mode="once",
         
     | 
| 146 | 
         
            +
                )
         
     | 
| 147 | 
         | 
| 148 | 
         
             
                copy_btn.click(gradio_copy_text, [output_text], js=COPY_ACTION_JS)
         
     | 
| 149 | 
         
             
                copy_btn_pony.click(gradio_copy_text, [output_text_pony], js=COPY_ACTION_JS)
         
     | 
| 
         | 
|
| 152 | 
         | 
| 153 | 
         
             
                pg_chat_model.change(select_dolphin_model, [pg_chat_model, state], [pg_chat_model, pg_chat_format, pg_chat_model_info, state], queue=True, show_progress="full")
         
     | 
| 154 | 
         
             
                pg_chat_format.change(select_dolphin_format, [pg_chat_format, state], [pg_chat_format, state], queue=False)
         
     | 
| 155 | 
         
            +
                pg_chat_lora.change(select_dolphin_lora, [pg_chat_lora, state], [pg_chat_lora, state], queue=True, show_progress="full")
         
     | 
| 156 | 
         
             
                gr.on(
         
     | 
| 157 | 
         
             
                    triggers=[pg_chat_add_text.submit, pg_chat_add_submit.click],
         
     | 
| 158 | 
         
             
                    fn=add_dolphin_models,
         
     | 
| 
         | 
|
| 161 | 
         
             
                    queue=True,
         
     | 
| 162 | 
         
             
                    trigger_mode="once",
         
     | 
| 163 | 
         
             
                )
         
     | 
| 164 | 
         
            +
                gr.on(
         
     | 
| 165 | 
         
            +
                    triggers=[pg_chat_add_lora_text.submit, pg_chat_add_lora_submit.click],
         
     | 
| 166 | 
         
            +
                    fn=add_dolphin_loras,
         
     | 
| 167 | 
         
            +
                    inputs=[pg_chat_add_lora_text],
         
     | 
| 168 | 
         
            +
                    outputs=[pg_chat_lora],
         
     | 
| 169 | 
         
            +
                    queue=True,
         
     | 
| 170 | 
         
            +
                    trigger_mode="once",
         
     | 
| 171 | 
         
            +
                )
         
     | 
| 172 | 
         
            +
             
     | 
| 173 | 
         | 
| 174 | 
         
             
            if __name__ == "__main__":
         
     | 
| 175 | 
         
             
                app.queue()
         
     | 
    	
        llmdolphin.py
    CHANGED
    
    | 
         @@ -4,6 +4,8 @@ from pathlib import Path 
     | 
|
| 4 | 
         
             
            import re
         
     | 
| 5 | 
         
             
            import torch
         
     | 
| 6 | 
         
             
            import gc
         
     | 
| 
         | 
|
| 
         | 
|
| 7 | 
         
             
            from typing import Any
         
     | 
| 8 | 
         
             
            from huggingface_hub import hf_hub_download, HfApi
         
     | 
| 9 | 
         
             
            from llama_cpp import Llama
         
     | 
| 
         @@ -15,14 +17,17 @@ from ja_to_danbooru.ja_to_danbooru import jatags_to_danbooru_tags 
     | 
|
| 15 | 
         
             
            import wrapt_timeout_decorator
         
     | 
| 16 | 
         
             
            from llama_cpp_agent.messages_formatter import MessagesFormatter
         
     | 
| 17 | 
         
             
            from formatter import mistral_v1_formatter, mistral_v2_formatter, mistral_v3_tekken_formatter
         
     | 
| 18 | 
         
            -
            from llmenv import llm_models, llm_models_dir, llm_formats, llm_languages, dolphin_system_prompt
         
     | 
| 19 | 
         
             
            import subprocess
         
     | 
| 20 | 
         
             
            subprocess.run("rm -rf /data-nvme/zerogpu-offload/*", env={}, shell=True)
         
     | 
| 21 | 
         | 
| 22 | 
         | 
| 23 | 
         
            -
             
     | 
| 
         | 
|
| 24 | 
         
             
            default_llm_model_filename = list(llm_models.keys())[0]
         
     | 
| 
         | 
|
| 25 | 
         
             
            device = "cuda" if torch.cuda.is_available() else "cpu"
         
     | 
| 
         | 
|
| 26 | 
         | 
| 27 | 
         | 
| 28 | 
         
             
            def to_list(s: str):
         
     | 
| 
         @@ -68,43 +73,170 @@ def is_japanese(s: str): 
     | 
|
| 68 | 
         
             
                return False
         
     | 
| 69 | 
         | 
| 70 | 
         | 
| 71 | 
         
            -
            def  
     | 
| 72 | 
         
            -
                 
     | 
| 73 | 
         
            -
                 
     | 
| 74 | 
         
            -
             
     | 
| 75 | 
         
            -
             
     | 
| 76 | 
         
            -
             
     | 
| 77 | 
         
            -
             
     | 
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 78 | 
         
             
                model_files = Path(llm_models_dir).glob('*.gguf')
         
     | 
| 79 | 
         
             
                for path in model_files:
         
     | 
| 80 | 
         
            -
                     
     | 
| 81 | 
         
            -
             
     | 
| 82 | 
         
            -
             
     | 
| 83 | 
         
            -
                llm_models_tupled_list = list_uniq(llm_models_tupled_list)
         
     | 
| 84 | 
         
            -
                return llm_models_tupled_list
         
     | 
| 85 | 
         
            -
             
     | 
| 86 | 
         
            -
             
     | 
| 87 | 
         
            -
            def download_llm_models():
         
     | 
| 88 | 
         
            -
                global llm_models_tupled_list
         
     | 
| 89 | 
         
            -
                llm_models_tupled_list = []
         
     | 
| 90 | 
         
            -
                for k, v in llm_models.items():
         
     | 
| 91 | 
         
            -
                    try:
         
     | 
| 92 | 
         
            -
                        hf_hub_download(repo_id = v[0], filename = k, local_dir = llm_models_dir)
         
     | 
| 93 | 
         
            -
                    except Exception:
         
     | 
| 94 | 
         
            -
                        continue
         
     | 
| 95 | 
         
            -
                    name = k
         
     | 
| 96 | 
         
            -
                    value = k
         
     | 
| 97 | 
         
            -
                    llm_models_tupled_list.append((name, value))
         
     | 
| 98 | 
         | 
| 99 | 
         | 
| 100 | 
         
             
            def download_llm_model(filename: str):
         
     | 
| 101 | 
         
            -
                if not  
     | 
| 102 | 
         
             
                try:
         
     | 
| 103 | 
         
            -
                    hf_hub_download(repo_id 
     | 
| 104 | 
         
             
                except Exception as e:
         
     | 
| 105 | 
         
             
                    print(e)
         
     | 
| 106 | 
         
             
                    return default_llm_model_filename
         
     | 
| 107 | 
         
            -
                 
     | 
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 108 | 
         
             
                return filename
         
     | 
| 109 | 
         | 
| 110 | 
         | 
| 
         @@ -122,9 +254,18 @@ def select_dolphin_model(filename: str, state: dict, progress=gr.Progress(track_ 
     | 
|
| 122 | 
         
             
                value = download_llm_model(filename)
         
     | 
| 123 | 
         
             
                progress(1, desc="Model loaded.")
         
     | 
| 124 | 
         
             
                md = get_dolphin_model_info(filename)
         
     | 
| 
         | 
|
| 125 | 
         
             
                return gr.update(value=value, choices=get_dolphin_models()), gr.update(value=get_dolphin_model_format(value)), gr.update(value=md), state
         
     | 
| 126 | 
         | 
| 127 | 
         | 
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 128 | 
         
             
            def select_dolphin_format(format_name: str, state: dict):
         
     | 
| 129 | 
         
             
                set_state(state, "override_llm_format", llm_formats[format_name])
         
     | 
| 130 | 
         
             
                return gr.update(value=format_name), state
         
     | 
| 
         @@ -134,7 +275,11 @@ download_llm_model(default_llm_model_filename) 
     | 
|
| 134 | 
         | 
| 135 | 
         | 
| 136 | 
         
             
            def get_dolphin_models():
         
     | 
| 137 | 
         
            -
                return  
     | 
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 138 | 
         | 
| 139 | 
         | 
| 140 | 
         
             
            def get_llm_formats():
         
     | 
| 
         @@ -157,33 +302,41 @@ def get_dolphin_model_format(filename: str): 
     | 
|
| 157 | 
         | 
| 158 | 
         
             
            def add_dolphin_models(query: str, format_name: str):
         
     | 
| 159 | 
         
             
                global llm_models
         
     | 
| 160 | 
         
            -
                api = HfApi()
         
     | 
| 161 | 
         
            -
                add_models = {}
         
     | 
| 162 | 
         
            -
                format = llm_formats[format_name]
         
     | 
| 163 | 
         
            -
                filename = ""
         
     | 
| 164 | 
         
            -
                repo = ""
         
     | 
| 165 | 
         
             
                try:
         
     | 
| 166 | 
         
            -
                     
     | 
| 167 | 
         
            -
                     
     | 
| 168 | 
         
            -
                     
     | 
| 169 | 
         
            -
             
     | 
| 170 | 
         
            -
             
     | 
| 171 | 
         
            -
             
     | 
| 172 | 
         
            -
                         
     | 
| 173 | 
         
            -
             
     | 
| 174 | 
         
            -
             
     | 
| 175 | 
         
            -
             
     | 
| 176 | 
         
            -
             
     | 
| 177 | 
         
            -
                        if not api.repo_exists(repo_id = repo) or not api.file_exists(repo_id = repo, filename = filename): return gr.update()
         
     | 
| 178 | 
         
            -
                        add_models[filename] = [repo, format]
         
     | 
| 179 | 
         
             
                    else: return gr.update()
         
     | 
| 180 | 
         
             
                except Exception as e:
         
     | 
| 181 | 
         
             
                    print(e)
         
     | 
| 182 | 
         
             
                    return gr.update()
         
     | 
| 183 | 
         
             
                llm_models = (llm_models | add_models).copy()
         
     | 
| 184 | 
         
            -
                 
     | 
| 185 | 
         
             
                choices = get_dolphin_models()
         
     | 
| 186 | 
         
            -
                return gr.update(choices=choices, value=choices[-1] 
     | 
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 187 | 
         | 
| 188 | 
         | 
| 189 | 
         
             
            def get_dolphin_sysprompt(state: dict={}):
         
     | 
| 
         @@ -221,6 +374,7 @@ def get_raw_prompt(msg: str): 
     | 
|
| 221 | 
         
             
                return re.sub(r'[*/:_"#\n]', ' ', ", ".join(m)).lower() if m else ""
         
     | 
| 222 | 
         | 
| 223 | 
         | 
| 
         | 
|
| 224 | 
         
             
            @torch.inference_mode()
         
     | 
| 225 | 
         
             
            @spaces.GPU(duration=59)
         
     | 
| 226 | 
         
             
            def dolphin_respond(
         
     | 
| 
         @@ -233,6 +387,8 @@ def dolphin_respond( 
     | 
|
| 233 | 
         
             
                top_p: float = 0.95,
         
     | 
| 234 | 
         
             
                top_k: int = 40,
         
     | 
| 235 | 
         
             
                repeat_penalty: float = 1.1,
         
     | 
| 
         | 
|
| 
         | 
|
| 236 | 
         
             
                state: dict = {},
         
     | 
| 237 | 
         
             
                progress=gr.Progress(track_tqdm=True),
         
     | 
| 238 | 
         
             
            ):
         
     | 
| 
         @@ -244,12 +400,18 @@ def dolphin_respond( 
     | 
|
| 244 | 
         
             
                    if override_llm_format: chat_template = override_llm_format
         
     | 
| 245 | 
         
             
                    else: chat_template = llm_models[model][1]
         
     | 
| 246 | 
         | 
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 247 | 
         
             
                    llm = Llama(
         
     | 
| 248 | 
         
             
                        model_path=str(model_path),
         
     | 
| 249 | 
         
            -
                        flash_attn=True,
         
     | 
| 250 | 
         
             
                        n_gpu_layers=81, # 81
         
     | 
| 251 | 
         
             
                        n_batch=1024,
         
     | 
| 252 | 
         
             
                        n_ctx=8192, #8192
         
     | 
| 
         | 
|
| 253 | 
         
             
                    )
         
     | 
| 254 | 
         
             
                    provider = LlamaCppPythonProvider(llm)
         
     | 
| 255 | 
         | 
| 
         @@ -339,6 +501,8 @@ def dolphin_respond_auto( 
     | 
|
| 339 | 
         
             
                top_p: float = 0.95,
         
     | 
| 340 | 
         
             
                top_k: int = 40,
         
     | 
| 341 | 
         
             
                repeat_penalty: float = 1.1,
         
     | 
| 
         | 
|
| 
         | 
|
| 342 | 
         
             
                state: dict = {},
         
     | 
| 343 | 
         
             
                progress=gr.Progress(track_tqdm=True),
         
     | 
| 344 | 
         
             
            ):
         
     | 
| 
         @@ -351,12 +515,18 @@ def dolphin_respond_auto( 
     | 
|
| 351 | 
         
             
                    if override_llm_format: chat_template = override_llm_format
         
     | 
| 352 | 
         
             
                    else: chat_template = llm_models[model][1]
         
     | 
| 353 | 
         | 
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 354 | 
         
             
                    llm = Llama(
         
     | 
| 355 | 
         
             
                        model_path=str(model_path),
         
     | 
| 356 | 
         
            -
                        flash_attn=True,
         
     | 
| 357 | 
         
             
                        n_gpu_layers=81, # 81
         
     | 
| 358 | 
         
             
                        n_batch=1024,
         
     | 
| 359 | 
         
             
                        n_ctx=8192, #8192
         
     | 
| 
         | 
|
| 360 | 
         
             
                    )
         
     | 
| 361 | 
         
             
                    provider = LlamaCppPythonProvider(llm)
         
     | 
| 362 | 
         | 
| 
         @@ -452,6 +622,8 @@ def respond_playground( 
     | 
|
| 452 | 
         
             
                top_p: float = 0.95,
         
     | 
| 453 | 
         
             
                top_k: int = 40,
         
     | 
| 454 | 
         
             
                repeat_penalty: float = 1.1,
         
     | 
| 
         | 
|
| 
         | 
|
| 455 | 
         
             
                state: dict = {},
         
     | 
| 456 | 
         
             
                progress=gr.Progress(track_tqdm=True),
         
     | 
| 457 | 
         
             
            ):
         
     | 
| 
         @@ -462,12 +634,18 @@ def respond_playground( 
     | 
|
| 462 | 
         
             
                    if override_llm_format: chat_template = override_llm_format
         
     | 
| 463 | 
         
             
                    else: chat_template = llm_models[model][1]
         
     | 
| 464 | 
         | 
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 465 | 
         
             
                    llm = Llama(
         
     | 
| 466 | 
         
             
                        model_path=str(model_path),
         
     | 
| 467 | 
         
            -
                        flash_attn=True,
         
     | 
| 468 | 
         
             
                        n_gpu_layers=81, # 81
         
     | 
| 469 | 
         
             
                        n_batch=1024,
         
     | 
| 470 | 
         
             
                        n_ctx=8192, #8192
         
     | 
| 
         | 
|
| 471 | 
         
             
                    )
         
     | 
| 472 | 
         
             
                    provider = LlamaCppPythonProvider(llm)
         
     | 
| 473 | 
         | 
| 
         | 
|
| 4 | 
         
             
            import re
         
     | 
| 5 | 
         
             
            import torch
         
     | 
| 6 | 
         
             
            import gc
         
     | 
| 7 | 
         
            +
            import os
         
     | 
| 8 | 
         
            +
            import urllib
         
     | 
| 9 | 
         
             
            from typing import Any
         
     | 
| 10 | 
         
             
            from huggingface_hub import hf_hub_download, HfApi
         
     | 
| 11 | 
         
             
            from llama_cpp import Llama
         
     | 
| 
         | 
|
| 17 | 
         
             
            import wrapt_timeout_decorator
         
     | 
| 18 | 
         
             
            from llama_cpp_agent.messages_formatter import MessagesFormatter
         
     | 
| 19 | 
         
             
            from formatter import mistral_v1_formatter, mistral_v2_formatter, mistral_v3_tekken_formatter
         
     | 
| 20 | 
         
            +
            from llmenv import llm_models, llm_models_dir, llm_loras, llm_loras_dir, llm_formats, llm_languages, dolphin_system_prompt
         
     | 
| 21 | 
         
             
            import subprocess
         
     | 
| 22 | 
         
             
            subprocess.run("rm -rf /data-nvme/zerogpu-offload/*", env={}, shell=True)
         
     | 
| 23 | 
         | 
| 24 | 
         | 
| 25 | 
         
            +
            llm_models_list = []
         
     | 
| 26 | 
         
            +
            llm_loras_list = []
         
     | 
| 27 | 
         
             
            default_llm_model_filename = list(llm_models.keys())[0]
         
     | 
| 28 | 
         
            +
            default_llm_lora_filename = list(llm_loras.keys())[0]
         
     | 
| 29 | 
         
             
            device = "cuda" if torch.cuda.is_available() else "cpu"
         
     | 
| 30 | 
         
            +
            HF_TOKEN = os.getenv("HF_TOKEN", False)
         
     | 
| 31 | 
         | 
| 32 | 
         | 
| 33 | 
         
             
            def to_list(s: str):
         
     | 
| 
         | 
|
| 73 | 
         
             
                return False
         
     | 
| 74 | 
         | 
| 75 | 
         | 
| 76 | 
         
            +
            def get_dir_size(path: str):
         
     | 
| 77 | 
         
            +
                total = 0
         
     | 
| 78 | 
         
            +
                with os.scandir(path) as it:
         
     | 
| 79 | 
         
            +
                    for entry in it:
         
     | 
| 80 | 
         
            +
                        if entry.is_file():
         
     | 
| 81 | 
         
            +
                            total += entry.stat().st_size
         
     | 
| 82 | 
         
            +
                        elif entry.is_dir():
         
     | 
| 83 | 
         
            +
                            total += get_dir_size(entry.path)
         
     | 
| 84 | 
         
            +
                return total
         
     | 
| 85 | 
         
            +
             
     | 
| 86 | 
         
            +
             
     | 
| 87 | 
         
            +
            def get_dir_size_gb(path: str):
         
     | 
| 88 | 
         
            +
                try:
         
     | 
| 89 | 
         
            +
                    size_gb = get_dir_size(path) / (1024 ** 3)
         
     | 
| 90 | 
         
            +
                    print(f"Dir size: {size_gb:.2f} GB ({path})")
         
     | 
| 91 | 
         
            +
                except Exception as e:
         
     | 
| 92 | 
         
            +
                    size_gb = 999
         
     | 
| 93 | 
         
            +
                    print(f"Error while retrieving the used storage: {e}.")
         
     | 
| 94 | 
         
            +
                finally:
         
     | 
| 95 | 
         
            +
                    return size_gb
         
     | 
| 96 | 
         
            +
             
     | 
| 97 | 
         
            +
             
     | 
| 98 | 
         
            +
            def clean_dir(path: str, size_gb: float, limit_gb: float):
         
     | 
| 99 | 
         
            +
                try:
         
     | 
| 100 | 
         
            +
                    files = os.listdir(path)
         
     | 
| 101 | 
         
            +
                    files = [os.path.join(path, f) for f in files if f.endswith(".gguf") and default_llm_model_filename not in f and default_llm_lora_filename not in f]
         
     | 
| 102 | 
         
            +
                    files.sort(key=os.path.getatime, reverse=False)
         
     | 
| 103 | 
         
            +
                    req_bytes = int((size_gb - limit_gb) * (1024 ** 3))
         
     | 
| 104 | 
         
            +
                    for file in files:
         
     | 
| 105 | 
         
            +
                        if req_bytes < 0: break
         
     | 
| 106 | 
         
            +
                        size = os.path.getsize(file)
         
     | 
| 107 | 
         
            +
                        Path(file).unlink()
         
     | 
| 108 | 
         
            +
                        req_bytes -= size
         
     | 
| 109 | 
         
            +
                        print(f"Deleted: {file}")
         
     | 
| 110 | 
         
            +
                except Exception as e:
         
     | 
| 111 | 
         
            +
                    print(e)
         
     | 
| 112 | 
         
            +
             
     | 
| 113 | 
         
            +
             
     | 
| 114 | 
         
            +
            def update_storage(path: str, limit_gb: float=50.0):
         
     | 
| 115 | 
         
            +
                size_gb = get_dir_size_gb(path)
         
     | 
| 116 | 
         
            +
                if size_gb > limit_gb:
         
     | 
| 117 | 
         
            +
                    print("Cleaning storage...")
         
     | 
| 118 | 
         
            +
                    clean_dir(path, size_gb, limit_gb)
         
     | 
| 119 | 
         
            +
                    #get_dir_size_gb(path)
         
     | 
| 120 | 
         
            +
             
     | 
| 121 | 
         
            +
             
     | 
| 122 | 
         
            +
            def split_hf_url(url: str):
         
     | 
| 123 | 
         
            +
                try:
         
     | 
| 124 | 
         
            +
                    s = list(re.findall(r'^(?:https?://huggingface.co/)(?:(datasets|spaces)/)?(.+?/.+?)/\w+?/.+?/(?:(.+)/)?(.+?.\w+)(?:\?download=true)?$', url)[0])
         
     | 
| 125 | 
         
            +
                    if len(s) < 4: return "", "", "", ""
         
     | 
| 126 | 
         
            +
                    repo_id = s[1]
         
     | 
| 127 | 
         
            +
                    if s[0] == "datasets": repo_type = "dataset"
         
     | 
| 128 | 
         
            +
                    elif s[0] == "spaces": repo_type = "space"
         
     | 
| 129 | 
         
            +
                    else: repo_type = "model"
         
     | 
| 130 | 
         
            +
                    subfolder = urllib.parse.unquote(s[2]) if s[2] else None
         
     | 
| 131 | 
         
            +
                    filename = urllib.parse.unquote(s[3])
         
     | 
| 132 | 
         
            +
                    return repo_id, filename, subfolder, repo_type
         
     | 
| 133 | 
         
            +
                except Exception as e:
         
     | 
| 134 | 
         
            +
                    print(e)
         
     | 
| 135 | 
         
            +
             
     | 
| 136 | 
         
            +
             
     | 
| 137 | 
         
            +
            def hf_url_exists(url: str):
         
     | 
| 138 | 
         
            +
                hf_token = HF_TOKEN
         
     | 
| 139 | 
         
            +
                repo_id, filename, subfolder, repo_type = split_hf_url(url)
         
     | 
| 140 | 
         
            +
                api = HfApi(token=hf_token)
         
     | 
| 141 | 
         
            +
                return api.file_exists(repo_id=repo_id, filename=filename, repo_type=repo_type, token=hf_token)
         
     | 
| 142 | 
         
            +
             
     | 
| 143 | 
         
            +
             
     | 
| 144 | 
         
            +
            def get_repo_type(repo_id: str):
         
     | 
| 145 | 
         
            +
                try:
         
     | 
| 146 | 
         
            +
                    api = HfApi(token=HF_TOKEN)
         
     | 
| 147 | 
         
            +
                    if api.repo_exists(repo_id=repo_id, repo_type="dataset", token=HF_TOKEN): return "dataset"
         
     | 
| 148 | 
         
            +
                    elif api.repo_exists(repo_id=repo_id, repo_type="space", token=HF_TOKEN): return "space"
         
     | 
| 149 | 
         
            +
                    elif api.repo_exists(repo_id=repo_id, token=HF_TOKEN): return "model"
         
     | 
| 150 | 
         
            +
                    else: return None
         
     | 
| 151 | 
         
            +
                except Exception as e:
         
     | 
| 152 | 
         
            +
                    print(e)
         
     | 
| 153 | 
         
            +
                    raise Exception(f"Repo not found: {repo_id} {e}")
         
     | 
| 154 | 
         
            +
             
     | 
| 155 | 
         
            +
             
     | 
| 156 | 
         
            +
            def get_hf_blob_url(repo_id: str, repo_type: str, path: str):
         
     | 
| 157 | 
         
            +
                if repo_type == "model": return f"https://huggingface.co/{repo_id}/blob/main/{path}"
         
     | 
| 158 | 
         
            +
                elif repo_type == "dataset": return f"https://huggingface.co/datasets/{repo_id}/blob/main/{path}"
         
     | 
| 159 | 
         
            +
                elif repo_type == "space": return f"https://huggingface.co/spaces/{repo_id}/blob/main/{path}"
         
     | 
| 160 | 
         
            +
             
     | 
| 161 | 
         
            +
             
     | 
| 162 | 
         
            +
            def get_gguf_url(s: str):
         
     | 
| 163 | 
         
            +
                def find_gguf(d: dict, keys: dict):
         
     | 
| 164 | 
         
            +
                    paths = []
         
     | 
| 165 | 
         
            +
                    for key, size in keys.items():
         
     | 
| 166 | 
         
            +
                        if size != 0: l = [p for p, s in d.items() if key.lower() in p.lower() and s < size]
         
     | 
| 167 | 
         
            +
                        else: l = [p for p in d.keys() if key.lower() in p.lower()]
         
     | 
| 168 | 
         
            +
                        if len(l) > 0: paths.append(l[0])
         
     | 
| 169 | 
         
            +
                    if len(paths) > 0: return paths[0]
         
     | 
| 170 | 
         
            +
                    return list(d.keys())[0]
         
     | 
| 171 | 
         
            +
             
     | 
| 172 | 
         
            +
                try:
         
     | 
| 173 | 
         
            +
                    if s.lower().endswith(".gguf"): return s
         
     | 
| 174 | 
         
            +
                    repo_type = get_repo_type(s)
         
     | 
| 175 | 
         
            +
                    if repo_type is None: return s
         
     | 
| 176 | 
         
            +
                    repo_id = s
         
     | 
| 177 | 
         
            +
                    api = HfApi(token=HF_TOKEN)
         
     | 
| 178 | 
         
            +
                    gguf_dict = {i.path: i.size for i in api.list_repo_tree(repo_id=repo_id, repo_type=repo_type, recursive=True, token=HF_TOKEN) if i.path.endswith(".gguf")}
         
     | 
| 179 | 
         
            +
                    if len(gguf_dict) == 0: return s
         
     | 
| 180 | 
         
            +
                    return get_hf_blob_url(repo_id, repo_type, find_gguf(gguf_dict, {"Q5_K_M": 6000000000, "Q4_K_M": 0, "Q4": 0}))
         
     | 
| 181 | 
         
            +
                except Exception as e:
         
     | 
| 182 | 
         
            +
                    print(e)
         
     | 
| 183 | 
         
            +
                    return s
         
     | 
| 184 | 
         
            +
             
     | 
| 185 | 
         
            +
             
     | 
| 186 | 
         
            +
            def download_hf_file(directory, url, progress=gr.Progress(track_tqdm=True)):
         
     | 
| 187 | 
         
            +
                hf_token = HF_TOKEN
         
     | 
| 188 | 
         
            +
                repo_id, filename, subfolder, repo_type = split_hf_url(url)
         
     | 
| 189 | 
         
            +
                try:
         
     | 
| 190 | 
         
            +
                    print(f"Downloading {url} to {directory}")
         
     | 
| 191 | 
         
            +
                    if subfolder is not None: path = hf_hub_download(repo_id=repo_id, filename=filename, subfolder=subfolder, repo_type=repo_type, local_dir=directory, token=hf_token)
         
     | 
| 192 | 
         
            +
                    else: path = hf_hub_download(repo_id=repo_id, filename=filename, repo_type=repo_type, local_dir=directory, token=hf_token)
         
     | 
| 193 | 
         
            +
                    return path
         
     | 
| 194 | 
         
            +
                except Exception as e:
         
     | 
| 195 | 
         
            +
                    print(f"Failed to download: {e}")
         
     | 
| 196 | 
         
            +
                    return None
         
     | 
| 197 | 
         
            +
             
     | 
| 198 | 
         
            +
             
     | 
| 199 | 
         
            +
            def update_llm_model_list():
         
     | 
| 200 | 
         
            +
                global llm_models_list
         
     | 
| 201 | 
         
            +
                llm_models_list = []
         
     | 
| 202 | 
         
            +
                for k in llm_models.keys():
         
     | 
| 203 | 
         
            +
                    llm_models_list.append(k)
         
     | 
| 204 | 
         
             
                model_files = Path(llm_models_dir).glob('*.gguf')
         
     | 
| 205 | 
         
             
                for path in model_files:
         
     | 
| 206 | 
         
            +
                    llm_models_list.append(path.name)
         
     | 
| 207 | 
         
            +
                llm_models_list = list_uniq(llm_models_list)
         
     | 
| 208 | 
         
            +
                return llm_models_list
         
     | 
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 209 | 
         | 
| 210 | 
         | 
| 211 | 
         
             
            def download_llm_model(filename: str):
         
     | 
| 212 | 
         
            +
                if filename not in llm_models.keys(): return default_llm_model_filename
         
     | 
| 213 | 
         
             
                try:
         
     | 
| 214 | 
         
            +
                    hf_hub_download(repo_id=llm_models[filename][0], filename=filename, local_dir=llm_models_dir, token=HF_TOKEN)
         
     | 
| 215 | 
         
             
                except Exception as e:
         
     | 
| 216 | 
         
             
                    print(e)
         
     | 
| 217 | 
         
             
                    return default_llm_model_filename
         
     | 
| 218 | 
         
            +
                update_llm_model_list()
         
     | 
| 219 | 
         
            +
                return filename
         
     | 
| 220 | 
         
            +
             
     | 
| 221 | 
         
            +
             
     | 
| 222 | 
         
            +
            def update_llm_lora_list():
         
     | 
| 223 | 
         
            +
                global llm_loras_list
         
     | 
| 224 | 
         
            +
                llm_loras_list = list(llm_loras.keys()).copy()
         
     | 
| 225 | 
         
            +
                model_files = Path(llm_loras_dir).glob('*.gguf')
         
     | 
| 226 | 
         
            +
                for path in model_files:
         
     | 
| 227 | 
         
            +
                    llm_loras_list.append(path.name)
         
     | 
| 228 | 
         
            +
                llm_loras_list = list_uniq([""] + llm_loras_list)
         
     | 
| 229 | 
         
            +
                return llm_loras_list
         
     | 
| 230 | 
         
            +
             
     | 
| 231 | 
         
            +
             
     | 
| 232 | 
         
            +
            def download_llm_lora(filename: str):
         
     | 
| 233 | 
         
            +
                if not filename in llm_loras.keys(): return ""
         
     | 
| 234 | 
         
            +
                try:
         
     | 
| 235 | 
         
            +
                    download_hf_file(llm_loras_dir, llm_loras[filename])
         
     | 
| 236 | 
         
            +
                except Exception as e:
         
     | 
| 237 | 
         
            +
                    print(e)
         
     | 
| 238 | 
         
            +
                    return ""
         
     | 
| 239 | 
         
            +
                update_llm_lora_list()
         
     | 
| 240 | 
         
             
                return filename
         
     | 
| 241 | 
         | 
| 242 | 
         | 
| 
         | 
|
| 254 | 
         
             
                value = download_llm_model(filename)
         
     | 
| 255 | 
         
             
                progress(1, desc="Model loaded.")
         
     | 
| 256 | 
         
             
                md = get_dolphin_model_info(filename)
         
     | 
| 257 | 
         
            +
                update_storage(llm_models_dir)
         
     | 
| 258 | 
         
             
                return gr.update(value=value, choices=get_dolphin_models()), gr.update(value=get_dolphin_model_format(value)), gr.update(value=md), state
         
     | 
| 259 | 
         | 
| 260 | 
         | 
| 261 | 
         
            +
            def select_dolphin_lora(filename: str, state: dict, progress=gr.Progress(track_tqdm=True)):
         
     | 
| 262 | 
         
            +
                progress(0, desc="Loading lora...")
         
     | 
| 263 | 
         
            +
                value = download_llm_lora(filename)
         
     | 
| 264 | 
         
            +
                progress(1, desc="Lora loaded.")
         
     | 
| 265 | 
         
            +
                update_storage(llm_loras_dir)
         
     | 
| 266 | 
         
            +
                return gr.update(value=value, choices=get_dolphin_loras()), state
         
     | 
| 267 | 
         
            +
             
     | 
| 268 | 
         
            +
             
     | 
| 269 | 
         
             
            def select_dolphin_format(format_name: str, state: dict):
         
     | 
| 270 | 
         
             
                set_state(state, "override_llm_format", llm_formats[format_name])
         
     | 
| 271 | 
         
             
                return gr.update(value=format_name), state
         
     | 
| 
         | 
|
| 275 | 
         | 
| 276 | 
         | 
| 277 | 
         
             
            def get_dolphin_models():
         
     | 
| 278 | 
         
            +
                return update_llm_model_list()
         
     | 
| 279 | 
         
            +
             
     | 
| 280 | 
         
            +
             
     | 
| 281 | 
         
            +
            def get_dolphin_loras():
         
     | 
| 282 | 
         
            +
                return update_llm_lora_list()
         
     | 
| 283 | 
         | 
| 284 | 
         | 
| 285 | 
         
             
            def get_llm_formats():
         
     | 
| 
         | 
|
| 302 | 
         | 
| 303 | 
         
             
            def add_dolphin_models(query: str, format_name: str):
         
     | 
| 304 | 
         
             
                global llm_models
         
     | 
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 305 | 
         
             
                try:
         
     | 
| 306 | 
         
            +
                    add_models = {}
         
     | 
| 307 | 
         
            +
                    format = llm_formats[format_name]
         
     | 
| 308 | 
         
            +
                    filename = ""
         
     | 
| 309 | 
         
            +
                    repo = ""
         
     | 
| 310 | 
         
            +
                    query = get_gguf_url(query)
         
     | 
| 311 | 
         
            +
                    if hf_url_exists(query):
         
     | 
| 312 | 
         
            +
                        s = list(re.findall(r'^https?://huggingface.co/(.+?/.+?)/(?:blob|resolve)/main/(.+.gguf)(?:\?download=true)?$', query)[0])
         
     | 
| 313 | 
         
            +
                        if len(s) == 2:
         
     | 
| 314 | 
         
            +
                            repo = s[0]
         
     | 
| 315 | 
         
            +
                            filename = s[1]
         
     | 
| 316 | 
         
            +
                            add_models[filename] = [repo, format]
         
     | 
| 
         | 
|
| 
         | 
|
| 317 | 
         
             
                    else: return gr.update()
         
     | 
| 318 | 
         
             
                except Exception as e:
         
     | 
| 319 | 
         
             
                    print(e)
         
     | 
| 320 | 
         
             
                    return gr.update()
         
     | 
| 321 | 
         
             
                llm_models = (llm_models | add_models).copy()
         
     | 
| 322 | 
         
            +
                update_llm_model_list()
         
     | 
| 323 | 
         
             
                choices = get_dolphin_models()
         
     | 
| 324 | 
         
            +
                return gr.update(choices=choices, value=choices[-1])
         
     | 
| 325 | 
         
            +
             
     | 
| 326 | 
         
            +
             
     | 
| 327 | 
         
            +
            def add_dolphin_loras(query: str):
         
     | 
| 328 | 
         
            +
                global llm_loras
         
     | 
| 329 | 
         
            +
                try:
         
     | 
| 330 | 
         
            +
                    add_loras = {}
         
     | 
| 331 | 
         
            +
                    query = get_gguf_url(query)
         
     | 
| 332 | 
         
            +
                    if hf_url_exists(query): add_loras[Path(query).name] = query
         
     | 
| 333 | 
         
            +
                except Exception as e:
         
     | 
| 334 | 
         
            +
                    print(e)
         
     | 
| 335 | 
         
            +
                    return gr.update()
         
     | 
| 336 | 
         
            +
                llm_loras = (llm_loras | add_loras).copy()
         
     | 
| 337 | 
         
            +
                update_llm_lora_list()
         
     | 
| 338 | 
         
            +
                choices = get_dolphin_loras()
         
     | 
| 339 | 
         
            +
                return gr.update(choices=choices, value=choices[-1])
         
     | 
| 340 | 
         | 
| 341 | 
         | 
| 342 | 
         
             
            def get_dolphin_sysprompt(state: dict={}):
         
     | 
| 
         | 
|
| 374 | 
         
             
                return re.sub(r'[*/:_"#\n]', ' ', ", ".join(m)).lower() if m else ""
         
     | 
| 375 | 
         | 
| 376 | 
         | 
| 377 | 
         
            +
            # https://llama-cpp-python.readthedocs.io/en/latest/api-reference/
         
     | 
| 378 | 
         
             
            @torch.inference_mode()
         
     | 
| 379 | 
         
             
            @spaces.GPU(duration=59)
         
     | 
| 380 | 
         
             
            def dolphin_respond(
         
     | 
| 
         | 
|
| 387 | 
         
             
                top_p: float = 0.95,
         
     | 
| 388 | 
         
             
                top_k: int = 40,
         
     | 
| 389 | 
         
             
                repeat_penalty: float = 1.1,
         
     | 
| 390 | 
         
            +
                lora: str = "",
         
     | 
| 391 | 
         
            +
                lora_scale: float = 1.0,
         
     | 
| 392 | 
         
             
                state: dict = {},
         
     | 
| 393 | 
         
             
                progress=gr.Progress(track_tqdm=True),
         
     | 
| 394 | 
         
             
            ):
         
     | 
| 
         | 
|
| 400 | 
         
             
                    if override_llm_format: chat_template = override_llm_format
         
     | 
| 401 | 
         
             
                    else: chat_template = llm_models[model][1]
         
     | 
| 402 | 
         | 
| 403 | 
         
            +
                    kwargs = {}
         
     | 
| 404 | 
         
            +
                    if lora:
         
     | 
| 405 | 
         
            +
                        kwargs["lora_path"] = str(Path(f"{llm_loras_dir}/{lora}"))
         
     | 
| 406 | 
         
            +
                        kwargs["lora_scale"] = lora_scale
         
     | 
| 407 | 
         
            +
                    else:
         
     | 
| 408 | 
         
            +
                        kwargs["flash_attn"] = True
         
     | 
| 409 | 
         
             
                    llm = Llama(
         
     | 
| 410 | 
         
             
                        model_path=str(model_path),
         
     | 
| 
         | 
|
| 411 | 
         
             
                        n_gpu_layers=81, # 81
         
     | 
| 412 | 
         
             
                        n_batch=1024,
         
     | 
| 413 | 
         
             
                        n_ctx=8192, #8192
         
     | 
| 414 | 
         
            +
                        **kwargs,
         
     | 
| 415 | 
         
             
                    )
         
     | 
| 416 | 
         
             
                    provider = LlamaCppPythonProvider(llm)
         
     | 
| 417 | 
         | 
| 
         | 
|
| 501 | 
         
             
                top_p: float = 0.95,
         
     | 
| 502 | 
         
             
                top_k: int = 40,
         
     | 
| 503 | 
         
             
                repeat_penalty: float = 1.1,
         
     | 
| 504 | 
         
            +
                lora: str = "",
         
     | 
| 505 | 
         
            +
                lora_scale: float = 1.0,
         
     | 
| 506 | 
         
             
                state: dict = {},
         
     | 
| 507 | 
         
             
                progress=gr.Progress(track_tqdm=True),
         
     | 
| 508 | 
         
             
            ):
         
     | 
| 
         | 
|
| 515 | 
         
             
                    if override_llm_format: chat_template = override_llm_format
         
     | 
| 516 | 
         
             
                    else: chat_template = llm_models[model][1]
         
     | 
| 517 | 
         | 
| 518 | 
         
            +
                    kwargs = {}
         
     | 
| 519 | 
         
            +
                    if lora:
         
     | 
| 520 | 
         
            +
                        kwargs["lora_path"] = str(Path(f"{llm_loras_dir}/{lora}"))
         
     | 
| 521 | 
         
            +
                        kwargs["lora_scale"] = lora_scale
         
     | 
| 522 | 
         
            +
                    else:
         
     | 
| 523 | 
         
            +
                        kwargs["flash_attn"] = True
         
     | 
| 524 | 
         
             
                    llm = Llama(
         
     | 
| 525 | 
         
             
                        model_path=str(model_path),
         
     | 
| 
         | 
|
| 526 | 
         
             
                        n_gpu_layers=81, # 81
         
     | 
| 527 | 
         
             
                        n_batch=1024,
         
     | 
| 528 | 
         
             
                        n_ctx=8192, #8192
         
     | 
| 529 | 
         
            +
                        **kwargs,
         
     | 
| 530 | 
         
             
                    )
         
     | 
| 531 | 
         
             
                    provider = LlamaCppPythonProvider(llm)
         
     | 
| 532 | 
         | 
| 
         | 
|
| 622 | 
         
             
                top_p: float = 0.95,
         
     | 
| 623 | 
         
             
                top_k: int = 40,
         
     | 
| 624 | 
         
             
                repeat_penalty: float = 1.1,
         
     | 
| 625 | 
         
            +
                lora: str = "",
         
     | 
| 626 | 
         
            +
                lora_scale: float = 1.0,
         
     | 
| 627 | 
         
             
                state: dict = {},
         
     | 
| 628 | 
         
             
                progress=gr.Progress(track_tqdm=True),
         
     | 
| 629 | 
         
             
            ):
         
     | 
| 
         | 
|
| 634 | 
         
             
                    if override_llm_format: chat_template = override_llm_format
         
     | 
| 635 | 
         
             
                    else: chat_template = llm_models[model][1]
         
     | 
| 636 | 
         | 
| 637 | 
         
            +
                    kwargs = {}
         
     | 
| 638 | 
         
            +
                    if lora:
         
     | 
| 639 | 
         
            +
                        kwargs["lora_path"] = str(Path(f"{llm_loras_dir}/{lora}"))
         
     | 
| 640 | 
         
            +
                        kwargs["lora_scale"] = lora_scale
         
     | 
| 641 | 
         
            +
                    else:
         
     | 
| 642 | 
         
            +
                        kwargs["flash_attn"] = True
         
     | 
| 643 | 
         
             
                    llm = Llama(
         
     | 
| 644 | 
         
             
                        model_path=str(model_path),
         
     | 
| 
         | 
|
| 645 | 
         
             
                        n_gpu_layers=81, # 81
         
     | 
| 646 | 
         
             
                        n_batch=1024,
         
     | 
| 647 | 
         
             
                        n_ctx=8192, #8192
         
     | 
| 648 | 
         
            +
                        **kwargs,
         
     | 
| 649 | 
         
             
                    )
         
     | 
| 650 | 
         
             
                    provider = LlamaCppPythonProvider(llm)
         
     | 
| 651 | 
         | 
    	
        llmenv.py
    CHANGED
    
    | 
         @@ -1,5 +1,7 @@ 
     | 
|
| 1 | 
         
             
            from llama_cpp_agent import MessagesFormatterType
         
     | 
| 2 | 
         
             
            from formatter import mistral_v1_formatter, mistral_v2_formatter, mistral_v3_tekken_formatter
         
     | 
| 
         | 
|
| 
         | 
|
| 3 | 
         | 
| 4 | 
         
             
            llm_models = {
         
     | 
| 5 | 
         
             
                #"": ["", MessagesFormatterType.LLAMA_3],
         
     | 
| 
         @@ -96,6 +98,20 @@ llm_models = { 
     | 
|
| 96 | 
         
             
                #"": ["", MessagesFormatterType.OPEN_CHAT],
         
     | 
| 97 | 
         
             
                #"": ["", MessagesFormatterType.CHATML],
         
     | 
| 98 | 
         
             
                #"": ["", MessagesFormatterType.PHI_3],
         
     | 
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 99 | 
         
             
                "SJT-14B.Q4_K_M.gguf": ["mradermacher/SJT-14B-GGUF", MessagesFormatterType.OPEN_CHAT],
         
     | 
| 100 | 
         
             
                "Hermes-Llama-3.2-CoT-Summary.Q5_K_M.gguf": ["mradermacher/Hermes-Llama-3.2-CoT-Summary-GGUF", MessagesFormatterType.LLAMA_3],
         
     | 
| 101 | 
         
             
                "Rombo-LLM-V2.5-Qwen-7b.Q5_K_M.gguf": ["mradermacher/Rombo-LLM-V2.5-Qwen-7b-GGUF", MessagesFormatterType.OPEN_CHAT],
         
     | 
| 
         @@ -2080,7 +2096,14 @@ llm_models = { 
     | 
|
| 2080 | 
         
             
            }
         
     | 
| 2081 | 
         | 
| 2082 | 
         | 
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 2083 | 
         
             
            llm_models_dir = "./llm_models"
         
     | 
| 
         | 
|
| 2084 | 
         | 
| 2085 | 
         | 
| 2086 | 
         
             
            llm_formats = {
         
     | 
| 
         | 
|
| 1 | 
         
             
            from llama_cpp_agent import MessagesFormatterType
         
     | 
| 2 | 
         
             
            from formatter import mistral_v1_formatter, mistral_v2_formatter, mistral_v3_tekken_formatter
         
     | 
| 3 | 
         
            +
            from pathlib import Path
         
     | 
| 4 | 
         
            +
             
     | 
| 5 | 
         | 
| 6 | 
         
             
            llm_models = {
         
     | 
| 7 | 
         
             
                #"": ["", MessagesFormatterType.LLAMA_3],
         
     | 
| 
         | 
|
| 98 | 
         
             
                #"": ["", MessagesFormatterType.OPEN_CHAT],
         
     | 
| 99 | 
         
             
                #"": ["", MessagesFormatterType.CHATML],
         
     | 
| 100 | 
         
             
                #"": ["", MessagesFormatterType.PHI_3],
         
     | 
| 101 | 
         
            +
                "MN-12B-solracht-EXPERIMENTAL-011425.Q4_K_M.gguf": ["mradermacher/MN-12B-solracht-EXPERIMENTAL-011425-GGUF", MessagesFormatterType.MISTRAL],
         
     | 
| 102 | 
         
            +
                "Llamaverse-3.1-8B-Instruct.Q5_K_M.gguf": ["mradermacher/Llamaverse-3.1-8B-Instruct-GGUF", MessagesFormatterType.LLAMA_3],
         
     | 
| 103 | 
         
            +
                "Morphing-8B-Model_Stock.Q5_K_M.gguf": ["mradermacher/Morphing-8B-Model_Stock-GGUF", MessagesFormatterType.LLAMA_3],
         
     | 
| 104 | 
         
            +
                "Not_Even_My_Final_Form-8B-Model_Stock.Q5_K_M.gguf": ["mradermacher/Not_Even_My_Final_Form-8B-Model_Stock-GGUF", MessagesFormatterType.LLAMA_3],
         
     | 
| 105 | 
         
            +
                "Qwen2.5-7B-sft-ultrachat.Q5_K_M.gguf": ["mradermacher/Qwen2.5-7B-sft-ultrachat-GGUF", MessagesFormatterType.OPEN_CHAT],
         
     | 
| 106 | 
         
            +
                "Kosmos-EVAA-Franken-Immersive-v40-8B.Q5_K_M.gguf": ["mradermacher/Kosmos-EVAA-Franken-Immersive-v40-8B-GGUF", MessagesFormatterType.LLAMA_3],
         
     | 
| 107 | 
         
            +
                "light-7b-beta.Q5_K_M.gguf": ["mradermacher/light-7b-beta-GGUF", MessagesFormatterType.OPEN_CHAT],
         
     | 
| 108 | 
         
            +
                "light-3B-beta.Q5_K_M.gguf": ["mradermacher/light-3B-beta-GGUF", MessagesFormatterType.OPEN_CHAT],
         
     | 
| 109 | 
         
            +
                "Magnolia-v4-12B.Q4_K_M.gguf": ["mradermacher/Magnolia-v4-12B-GGUF", MessagesFormatterType.MISTRAL],
         
     | 
| 110 | 
         
            +
                "Darkest-muse-v1-lorablated-v2.i1-Q4_K_M.gguf": ["mradermacher/Darkest-muse-v1-lorablated-v2-i1-GGUF", MessagesFormatterType.ALPACA],
         
     | 
| 111 | 
         
            +
                "Eunoia-Gemma-9B-o1-Indo.Q4_K_M.gguf": ["mradermacher/Eunoia-Gemma-9B-o1-Indo-GGUF", MessagesFormatterType.ALPACA],
         
     | 
| 112 | 
         
            +
                "VISION-1.Q5_K_M.gguf": ["mradermacher/VISION-1-GGUF", MessagesFormatterType.LLAMA_3],
         
     | 
| 113 | 
         
            +
                "RigoChat-7b-v2.i1-Q5_K_M.gguf": ["mradermacher/RigoChat-7b-v2-i1-GGUF", MessagesFormatterType.OPEN_CHAT],
         
     | 
| 114 | 
         
            +
                "italy-10b-q5_k_m.gguf": ["ClaudioItaly/Italy-10B-Q5_K_M-GGUF", MessagesFormatterType.ALPACA],
         
     | 
| 115 | 
         
             
                "SJT-14B.Q4_K_M.gguf": ["mradermacher/SJT-14B-GGUF", MessagesFormatterType.OPEN_CHAT],
         
     | 
| 116 | 
         
             
                "Hermes-Llama-3.2-CoT-Summary.Q5_K_M.gguf": ["mradermacher/Hermes-Llama-3.2-CoT-Summary-GGUF", MessagesFormatterType.LLAMA_3],
         
     | 
| 117 | 
         
             
                "Rombo-LLM-V2.5-Qwen-7b.Q5_K_M.gguf": ["mradermacher/Rombo-LLM-V2.5-Qwen-7b-GGUF", MessagesFormatterType.OPEN_CHAT],
         
     | 
| 
         | 
|
| 2096 | 
         
             
            }
         
     | 
| 2097 | 
         | 
| 2098 | 
         | 
| 2099 | 
         
            +
            llm_loras_urls = [
         
     | 
| 2100 | 
         
            +
                "https://huggingface.co/ggml-org/LoRA-Qwen2.5-32B-Instruct-abliterated-F16-GGUF/blob/main/LoRA-Qwen2.5-32B-Instruct-abliterated-f16.gguf",
         
     | 
| 2101 | 
         
            +
            ]
         
     | 
| 2102 | 
         
            +
            llm_loras = {str(Path(u).name): u for u in llm_loras_urls}
         
     | 
| 2103 | 
         
            +
             
     | 
| 2104 | 
         
            +
             
     | 
| 2105 | 
         
             
            llm_models_dir = "./llm_models"
         
     | 
| 2106 | 
         
            +
            llm_loras_dir = "./llm_loras"
         
     | 
| 2107 | 
         | 
| 2108 | 
         | 
| 2109 | 
         
             
            llm_formats = {
         
     |