chansung commited on
Commit
aa6543c
Β·
1 Parent(s): 44724ed

upload v1687488717 model

Browse files
.ipynb_checkpoints/README-checkpoint.md ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: KerasNLP GPT2 Alpaca
3
+ emoji: πŸ“Š
4
+ colorFrom: red
5
+ colorTo: yellow
6
+ sdk: gradio
7
+ sdk_version: 3.35.2
8
+ app_file: app.py
9
+ pinned: false
10
+ license: apache-2.0
11
+ ---
12
+
13
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
.ipynb_checkpoints/app-checkpoint.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Text, Any, Dict, Optional
2
+
3
+ import gradio as gr
4
+ import tensorflow as tf
5
+ import tensorflow_text
6
+ from tensorflow.python.saved_model import tag_constants
7
+ from huggingface_hub import Repository
8
+
9
+ local_path = "hf_model"
10
+
11
+ model_version = "v1687488717"
12
+ model_repo_id = "chansung/kerasnlp-gpt2-alpaca-pipeline"
13
+ model_repo_url = f"https://huggingface.co/{model_repo_id}"
14
+
15
+ def _clone_and_checkout(repo_url: str, local_path: str, version: str) -> Repository:
16
+ repository = Repository(
17
+ local_dir=local_path, clone_from=repo_url
18
+ )
19
+ repository.git_checkout(revision=version)
20
+ return repository
21
+
22
+ _ = _clone_and_checkout(model_repo_url, local_path, model_version)
23
+ model = tf.saved_model.load(local_path, tags=[tag_constants.SERVING])
24
+ gpt_lm_predict_fn = model.signatures["serving_default"]
25
+
26
+ def gen_text(prompt, max_length=512):
27
+ prompt = tf.constant(f"### Instruction:\n{prompt}\n\n### Response:\n")
28
+ max_length = tf.constant(max_length, dtype="int64")
29
+
30
+ result = gpt_lm_predict_fn(
31
+ prompt=prompt,
32
+ max_length=max_length,
33
+ )
34
+
35
+ return result['result'].numpy().decode('UTF-8').split("### Response:")[-1].strip()
36
+
37
+ with gr.Blocks() as demo:
38
+ instruction = gr.Textbox("Instruction")
39
+ output = gr.Textbox("Output", lines=5)
40
+
41
+ instruction.submit(
42
+ lambda prompt: gen_text(prompt),
43
+ instruction, output
44
+ )
45
+
46
+ demo.launch()
.ipynb_checkpoints/requirements-checkpoint.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ tensorflow
2
+ tensorflow_text
3
+ huggingface_hub
README.md CHANGED
@@ -1,12 +1,25 @@
1
  ---
2
- title: Kerasnlp Gpt2 Alpaca Pipeline
3
- emoji: πŸ¦€
4
- colorFrom: green
5
- colorTo: gray
6
  sdk: gradio
7
  sdk_version: 3.35.2
8
  app_file: app.py
9
  pinned: false
 
10
  ---
11
 
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ title: KerasNLP GPT2 Alpaca
3
+ emoji: πŸ“Š
4
+ colorFrom: red
5
+ colorTo: yellow
6
  sdk: gradio
7
  sdk_version: 3.35.2
8
  app_file: app.py
9
  pinned: false
10
+ license: apache-2.0
11
  ---
12
 
13
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference---
14
+ title: KerasNLP GPT2 Alpaca
15
+ emoji: πŸ“Š
16
+ colorFrom: red
17
+ colorTo: yellow
18
+ sdk: gradio
19
+ sdk_version: 3.35.2
20
+ app_file: app.py
21
+ pinned: false
22
+ license: apache-2.0
23
+ ---
24
+
25
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Text, Any, Dict, Optional
2
+
3
+ import gradio as gr
4
+ import tensorflow as tf
5
+ import tensorflow_text
6
+ from tensorflow.python.saved_model import tag_constants
7
+ from huggingface_hub import Repository
8
+
9
+ local_path = "hf_model"
10
+
11
+ model_version = "v1687488717"
12
+ model_repo_id = "chansung/kerasnlp-gpt2-alpaca-pipeline"
13
+ model_repo_url = f"https://huggingface.co/{model_repo_id}"
14
+
15
+ def _clone_and_checkout(repo_url: str, local_path: str, version: str) -> Repository:
16
+ repository = Repository(
17
+ local_dir=local_path, clone_from=repo_url
18
+ )
19
+ repository.git_checkout(revision=version)
20
+ return repository
21
+
22
+ _ = _clone_and_checkout(model_repo_url, local_path, model_version)
23
+ model = tf.saved_model.load(local_path, tags=[tag_constants.SERVING])
24
+ gpt_lm_predict_fn = model.signatures["serving_default"]
25
+
26
+ def gen_text(prompt, max_length=512):
27
+ prompt = tf.constant(f"### Instruction:\n{prompt}\n\n### Response:\n")
28
+ max_length = tf.constant(max_length, dtype="int64")
29
+
30
+ result = gpt_lm_predict_fn(
31
+ prompt=prompt,
32
+ max_length=max_length,
33
+ )
34
+
35
+ return result['result'].numpy().decode('UTF-8').split("### Response:")[-1].strip()
36
+
37
+ with gr.Blocks() as demo:
38
+ instruction = gr.Textbox("Instruction")
39
+ output = gr.Textbox("Output", lines=5)
40
+
41
+ instruction.submit(
42
+ lambda prompt: gen_text(prompt),
43
+ instruction, output
44
+ )
45
+
46
+ demo.launch()
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ tensorflow
2
+ tensorflow_text
3
+ huggingface_hub