freddyaboulton HF Staff commited on
Commit
b4f0710
·
verified ·
1 Parent(s): 22e42c8

Upload folder using huggingface_hub

Browse files
Files changed (3) hide show
  1. requirements.txt +2 -2
  2. run.ipynb +1 -1
  3. run.py +1 -1
requirements.txt CHANGED
@@ -1,4 +1,4 @@
1
- gradio-client @ git+https://github.com/gradio-app/gradio@4ba7b238e22ac042de14a6b69aa9d61536ddffba#subdirectory=client/python
2
- https://gradio-builds.s3.amazonaws.com/4ba7b238e22ac042de14a6b69aa9d61536ddffba/gradio-4.40.0-py3-none-any.whl
3
  torch
4
  transformers
 
1
+ gradio-client @ git+https://github.com/gradio-app/gradio@890bae3942cc19f2b9040cfb6792adaa3cd478b0#subdirectory=client/python
2
+ https://gradio-builds.s3.amazonaws.com/890bae3942cc19f2b9040cfb6792adaa3cd478b0/gradio-4.40.0-py3-none-any.whl
3
  torch
4
  transformers
run.ipynb CHANGED
@@ -1 +1 @@
1
- {"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: unified_demo_text_generation"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio torch transformers"]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "from transformers import pipeline\n", "\n", "generator = pipeline('text-generation', model = 'gpt2')\n", "\n", "def generate_text(text_prompt):\n", " response = generator(text_prompt, max_length = 30, num_return_sequences=5)\n", " return response[0]['generated_text'] #type: ignore\n", "\n", "textbox = gr.Textbox()\n", "\n", "demo = gr.Interface(generate_text, textbox, textbox)\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
 
1
+ {"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: unified_demo_text_generation"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio torch transformers"]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "from transformers import pipeline\n", "\n", "generator = pipeline('text-generation', model = 'gpt2')\n", "\n", "def generate_text(text_prompt):\n", " response = generator(text_prompt, max_length = 30, num_return_sequences=5)\n", " return response[0]['generated_text'] # type: ignore\n", "\n", "textbox = gr.Textbox()\n", "\n", "demo = gr.Interface(generate_text, textbox, textbox)\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
run.py CHANGED
@@ -5,7 +5,7 @@ generator = pipeline('text-generation', model = 'gpt2')
5
 
6
  def generate_text(text_prompt):
7
  response = generator(text_prompt, max_length = 30, num_return_sequences=5)
8
- return response[0]['generated_text'] #type: ignore
9
 
10
  textbox = gr.Textbox()
11
 
 
5
 
6
  def generate_text(text_prompt):
7
  response = generator(text_prompt, max_length = 30, num_return_sequences=5)
8
+ return response[0]['generated_text'] # type: ignore
9
 
10
  textbox = gr.Textbox()
11