File size: 3,448 Bytes
63a2ee7
 
 
cc7663c
f5bc984
2aa3700
cc7663c
 
 
 
63a2ee7
 
f5bc984
9286286
 
 
 
512ab02
63a2ee7
 
 
 
9286286
 
7c89aa5
51831b1
7c89aa5
011a828
7c89aa5
63a2ee7
 
 
 
 
 
6cbeedf
da63718
6cbeedf
0e43f0b
37f6179
a39c367
37f6179
c8691ae
c0a6979
37f6179
0e43f0b
6cbeedf
a5041f8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
921294b
f5bc984
dcac4d1
 
 
068f083
0ab8c6f
f5bc984
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
import numpy as np
import pandas as pd
import requests
import os
import gradio as gr
import json
from dotenv import load_dotenv, find_dotenv
_ = load_dotenv(find_dotenv())


databricks_token  = os.getenv('DATABRICKS_TOKEN')
model_uri  = "https://dbc-eb788f31-6c73.cloud.databricks.com/serving-endpoints/Mpt-7b-tester/invocations"

def score_model(model_uri, databricks_token, prompt):
  dataset=pd.DataFrame({
            "prompt":[prompt],
            "temperature": [0.5],
            "max_tokens": [1500]})
  headers = {
    "Authorization": f"Bearer {databricks_token}",
    "Content-Type": "application/json",
  }
  ds_dict = {'dataframe_split': dataset.to_dict(orient='split')} if isinstance(dataset, pd.DataFrame) else create_tf_serving_json(dataset)
  data_json = json.dumps(ds_dict, allow_nan=True)
  print("***ds_dict: ")
  print(ds_dict)
  print("***data_json: ")
  print(data_json)
  response = requests.request(method='POST', headers=headers, url=model_uri, data=data_json)
  if response.status_code != 200:
      raise Exception(f"Request failed with status {response.status_code}, {response.text}")
  return response.json()

def get_completion(prompt):
    return score_model(model_uri, databricks_token, prompt)

def greet(input):
	prompt = f"""
Determine the product or solution, the problem being solved, features, target customer that are being discussed in the \
following text, which is delimited by triple backticks. Then, pretend that you are the target customer. \
State if you would use this product and elaborate on why. Also state if you would pay for it and elaborate on why.\

Format your response as a JSON object with \
'solution', 'problem', 'features', 'target_customer', 'fg_will_use', 'reason_to_use', 'fg_will_pay', 'reason_to_pay' as the keys.\

Text sample: '''{input}'''
	"""

	sys_msg="You are demanding customer."

	instruction = """\\n\
Determine the product or solution, the problem being solved, features, target customer that are being discussed in the \
following user prompt. State if you would use this product and elaborate on why. Also state if you would pay for it and elaborate on why.\
Finally, state if you would invest in it and elaborate on why.\\n\
\\n\
Give a score for the product. Format your response as a JSON object with \
'solution', 'problem', 'features', 'target_customer', 'fg_will_use', 'reason_to_use', 'fg_will_pay', 'reason_to_pay', 'fg_will_invest', 'reason_to_invest', 'score' as the keys.\\n\
	"""

	prompt_template = f"""\\n\
Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.\\n\
\\n\
### Instruction:\\n\
{sys_msg}\\n\
{instruction}\\n\
\\n\
### Input:\\n\
{input}\\n\
\\n\
### Response:\\n\
	"""

	response = get_completion(prompt_template)
	return json.dumps(response)

#iface = gr.Interface(fn=greet, inputs="text", outputs="text")
#iface.launch()

#iface = gr.Interface(fn=greet, inputs=[gr.Textbox(label="Text to find entities", lines=2)], outputs=[gr.HighlightedText(label="Text with entities")], title="NER with dslim/bert-base-NER", description="Find entities using the `dslim/bert-base-NER` model under the hood!", allow_flagging="never", examples=["My name is Andrew and I live in California", "My name is Poli and work at HuggingFace"])
iface = gr.Interface(fn=greet, inputs=[gr.Textbox(label="Elevator pitch", lines=3)], outputs="json")
iface.launch()