chansung commited on
Commit
8888fc4
·
1 Parent(s): aa6543c

upload v1687507574 model

Browse files
.ipynb_checkpoints/README-checkpoint.md CHANGED
@@ -10,4 +10,4 @@ pinned: false
10
  license: apache-2.0
11
  ---
12
 
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
10
  license: apache-2.0
11
  ---
12
 
13
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference---
.ipynb_checkpoints/app-checkpoint.py CHANGED
@@ -8,7 +8,7 @@ from huggingface_hub import Repository
8
 
9
  local_path = "hf_model"
10
 
11
- model_version = "v1687488717"
12
  model_repo_id = "chansung/kerasnlp-gpt2-alpaca-pipeline"
13
  model_repo_url = f"https://huggingface.co/{model_repo_id}"
14
 
@@ -23,7 +23,7 @@ _ = _clone_and_checkout(model_repo_url, local_path, model_version)
23
  model = tf.saved_model.load(local_path, tags=[tag_constants.SERVING])
24
  gpt_lm_predict_fn = model.signatures["serving_default"]
25
 
26
- def gen_text(prompt, max_length=512):
27
  prompt = tf.constant(f"### Instruction:\n{prompt}\n\n### Response:\n")
28
  max_length = tf.constant(max_length, dtype="int64")
29
 
 
8
 
9
  local_path = "hf_model"
10
 
11
+ model_version = "v1687507574"
12
  model_repo_id = "chansung/kerasnlp-gpt2-alpaca-pipeline"
13
  model_repo_url = f"https://huggingface.co/{model_repo_id}"
14
 
 
23
  model = tf.saved_model.load(local_path, tags=[tag_constants.SERVING])
24
  gpt_lm_predict_fn = model.signatures["serving_default"]
25
 
26
+ def gen_text(prompt, max_length=256):
27
  prompt = tf.constant(f"### Instruction:\n{prompt}\n\n### Response:\n")
28
  max_length = tf.constant(max_length, dtype="int64")
29
 
README.md CHANGED
@@ -11,15 +11,3 @@ license: apache-2.0
11
  ---
12
 
13
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference---
14
- title: KerasNLP GPT2 Alpaca
15
- emoji: 📊
16
- colorFrom: red
17
- colorTo: yellow
18
- sdk: gradio
19
- sdk_version: 3.35.2
20
- app_file: app.py
21
- pinned: false
22
- license: apache-2.0
23
- ---
24
-
25
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
11
  ---
12
 
13
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference---
 
 
 
 
 
 
 
 
 
 
 
 
app.py CHANGED
@@ -8,7 +8,7 @@ from huggingface_hub import Repository
8
 
9
  local_path = "hf_model"
10
 
11
- model_version = "v1687488717"
12
  model_repo_id = "chansung/kerasnlp-gpt2-alpaca-pipeline"
13
  model_repo_url = f"https://huggingface.co/{model_repo_id}"
14
 
@@ -23,7 +23,7 @@ _ = _clone_and_checkout(model_repo_url, local_path, model_version)
23
  model = tf.saved_model.load(local_path, tags=[tag_constants.SERVING])
24
  gpt_lm_predict_fn = model.signatures["serving_default"]
25
 
26
- def gen_text(prompt, max_length=512):
27
  prompt = tf.constant(f"### Instruction:\n{prompt}\n\n### Response:\n")
28
  max_length = tf.constant(max_length, dtype="int64")
29
 
 
8
 
9
  local_path = "hf_model"
10
 
11
+ model_version = "v1687507574"
12
  model_repo_id = "chansung/kerasnlp-gpt2-alpaca-pipeline"
13
  model_repo_url = f"https://huggingface.co/{model_repo_id}"
14
 
 
23
  model = tf.saved_model.load(local_path, tags=[tag_constants.SERVING])
24
  gpt_lm_predict_fn = model.signatures["serving_default"]
25
 
26
+ def gen_text(prompt, max_length=256):
27
  prompt = tf.constant(f"### Instruction:\n{prompt}\n\n### Response:\n")
28
  max_length = tf.constant(max_length, dtype="int64")
29