Oleg Shulyakov commited on
Commit
d10d482
·
1 Parent(s): 5a54419

Update README.md

Browse files
Files changed (1) hide show
  1. app.py +43 -54
app.py CHANGED
@@ -11,8 +11,9 @@ from pathlib import Path
11
  from textwrap import dedent
12
  from apscheduler.schedulers.background import BackgroundScheduler
13
 
14
- # used for restarting the space
15
- SPACE_ID = os.environ.get("SPACE_ID")
 
16
  HF_TOKEN = os.environ.get("HF_TOKEN")
17
 
18
  # Folder
@@ -44,6 +45,9 @@ def escape(s: str) -> str:
44
  s = s.replace("\n", "<br/>")
45
  return s
46
 
 
 
 
47
  def get_model_name(model_id: str):
48
  return model_id.split('/')[-1]
49
 
@@ -251,6 +255,9 @@ def quantize_model(
251
  return quantized_gguf
252
 
253
  def generate_readme(outdir: tempfile.TemporaryDirectory, token: str, model_id: str, new_repo_id: str, gguf_name: str):
 
 
 
254
  try:
255
  card = ModelCard.load(model_id, token=token)
256
  except:
@@ -264,57 +271,39 @@ def generate_readme(outdir: tempfile.TemporaryDirectory, token: str, model_id: s
264
  card.data.base_model = model_id
265
  card.text = dedent(
266
  f"""
267
- # {new_repo_id}
268
- This model was converted to GGUF format from [`{model_id}`](https://huggingface.co/{model_id}) using llama.cpp via the ggml.ai's [GGUF-my-repo](https://huggingface.co/spaces/ggml-org/gguf-my-repo) space.
269
- Refer to the [original model card](https://huggingface.co/{model_id}) for more details on the model.
270
-
271
- ## Use with ollama
272
- Install ollama from the [official website](https://ollama.com/).
273
-
274
- Run the model on the CLI.
275
- ```sh
276
- ollama run hf.co/{model_id}
277
- ```
278
-
279
- ## Use with llama.cpp
280
- Install llama.cpp through brew (works on Mac and Linux)
281
-
282
- ```bash
283
- brew install llama.cpp
284
-
285
- ```
286
- Invoke the llama.cpp server or the CLI.
287
-
288
- ### CLI:
289
- ```bash
290
- llama-cli --hf-repo {new_repo_id} --hf-file {gguf_name} -p "The meaning to life and the universe is"
291
- ```
292
-
293
- ### Server:
294
- ```bash
295
- llama-server --hf-repo {new_repo_id} --hf-file {gguf_name} -c 2048
296
- ```
297
-
298
- Note: You can also use this checkpoint directly through the [usage steps](https://github.com/ggerganov/llama.cpp?tab=readme-ov-file#usage) listed in the Llama.cpp repo as well.
299
-
300
- Step 1: Clone llama.cpp from GitHub.
301
- ```
302
- git clone https://github.com/ggerganov/llama.cpp
303
- ```
304
-
305
- Step 2: Move into the llama.cpp folder and build it with `LLAMA_CURL=1` flag along with other hardware-specific flags (for ex: LLAMA_CUDA=1 for Nvidia GPUs on Linux).
306
- ```
307
- cd llama.cpp && LLAMA_CURL=1 make
308
- ```
309
-
310
- Step 3: Run inference through the main binary.
311
- ```
312
- ./llama-cli --hf-repo {new_repo_id} --hf-file {gguf_name} -p "The meaning to life and the universe is"
313
- ```
314
- or
315
- ```
316
- ./llama-server --hf-repo {new_repo_id} --hf-file {gguf_name} -c 2048
317
- ```
318
  """
319
  )
320
  readme_path = Path(outdir)/"README.md"
@@ -602,7 +591,7 @@ with gr.Blocks(css=css) as demo:
602
  gr.LoginButton(min_width=250)
603
 
604
  gr.HTML("<h1 style=\"text-aling:center;\">Create your own GGUF Quants!</h1>")
605
- gr.Markdown("The space takes an HF repo as an input, quantizes it and creates a Public repo containing the selected quant under your HF user namespace.")
606
 
607
  with gr.Row():
608
  with gr.Column() as inputs:
 
11
  from textwrap import dedent
12
  from apscheduler.schedulers.background import BackgroundScheduler
13
 
14
+ # Space parameters
15
+ SPACE_ID = os.environ.get("SPACE_ID") if os.environ.get("SPACE_ID") else ""
16
+ SPACE_URL = "https://" + SPACE_ID.replace("/", "-") + ".hf.space/" if SPACE_ID else "http://localhost:7860/"
17
  HF_TOKEN = os.environ.get("HF_TOKEN")
18
 
19
  # Folder
 
45
  s = s.replace("\n", "<br/>")
46
  return s
47
 
48
+ def get_model_creator(model_id: str):
49
+ return model_id.split('/')[0]
50
+
51
  def get_model_name(model_id: str):
52
  return model_id.split('/')[-1]
53
 
 
255
  return quantized_gguf
256
 
257
  def generate_readme(outdir: tempfile.TemporaryDirectory, token: str, model_id: str, new_repo_id: str, gguf_name: str):
258
+ creator = get_model_creator(model_id)
259
+ model_name = get_model_name(model_id)
260
+
261
  try:
262
  card = ModelCard.load(model_id, token=token)
263
  except:
 
271
  card.data.base_model = model_id
272
  card.text = dedent(
273
  f"""
274
+ # {model_name}
275
+
276
+ **Model creator:** [{creator}](https://huggingface.co/{creator})
277
+ **Original model**: [{model_id}](https://huggingface.co/{model_id})
278
+ **GGUF quantization:** provided by [{SPACE_ID}](https:/huggingface.co/spaces/{SPACE_ID}) team using `llama.cpp`
279
+
280
+ ## Special thanks
281
+
282
+ 🙏 Special thanks to [Georgi Gerganov](https://github.com/ggerganov) and the whole team working on [llama.cpp](https://github.com/ggerganov/llama.cpp/) for making all of this possible.
283
+
284
+ ## Use with Ollama
285
+
286
+ ```bash
287
+ ollama run hf.co/{new_repo_id}:<quantization>
288
+ ```
289
+
290
+ ## Use with LM Studio
291
+
292
+ ```bash
293
+ lms load {new_repo_id}
294
+ ```
295
+
296
+ ## Use with llama.cpp CLI
297
+
298
+ ```bash
299
+ llama-cli --hf-repo {new_repo_id} --hf-file {gguf_name} -p "The meaning to life and the universe is"
300
+ ```
301
+
302
+ ## Use with llama.cpp Server:
303
+
304
+ ```bash
305
+ llama-server --hf-repo {new_repo_id} --hf-file {gguf_name} -c 4096
306
+ ```
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
307
  """
308
  )
309
  readme_path = Path(outdir)/"README.md"
 
591
  gr.LoginButton(min_width=250)
592
 
593
  gr.HTML("<h1 style=\"text-aling:center;\">Create your own GGUF Quants!</h1>")
594
+ gr.Markdown(f"The space takes an HF repo as an input, quantizes it and creates a Public repo containing the selected quant under your HF user namespace. Use via {SPACE_URL}")
595
 
596
  with gr.Row():
597
  with gr.Column() as inputs: