arjunanand13 commited on
Commit
666ffb4
·
verified ·
1 Parent(s): 6a264b5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +14 -1
app.py CHANGED
@@ -22,6 +22,19 @@ def generate(
22
  temperature = 1e-2
23
  top_p = float(top_p)
24
 
 
 
 
 
 
 
 
 
 
 
 
 
 
25
  generate_kwargs = dict(
26
  temperature=temperature,
27
  max_new_tokens=max_new_tokens,
@@ -33,7 +46,7 @@ def generate(
33
 
34
  formatted_prompt = format_prompt(prompt, history)
35
 
36
- stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
37
  output = ""
38
 
39
  for response in stream:
 
22
  temperature = 1e-2
23
  top_p = float(top_p)
24
 
25
+ template1 = '''Given below are the different type of main video classes
26
+ {main_categories}
27
+ You are a text classifier that catergorises the transcript and captions into one main class whose context match with one main class and only generate main class name no need of sub classe or explanation.
28
+ Give more importance to Transcript while classifying .
29
+ Transcript: {transcript}
30
+ Captions: {captions}
31
+ Return only the answer chosen from list and nothing else
32
+ Main-class => '''
33
+ main_categories="x"
34
+ transcript="y"
35
+ captions="z"
36
+ prompt1 = PromptTemplate(template=template1, input_variables=['main_categories', 'transcript', 'captions'])
37
+
38
  generate_kwargs = dict(
39
  temperature=temperature,
40
  max_new_tokens=max_new_tokens,
 
46
 
47
  formatted_prompt = format_prompt(prompt, history)
48
 
49
+ stream = client.text_generation(template1, **generate_kwargs, stream=True, details=True, return_full_text=False)
50
  output = ""
51
 
52
  for response in stream: