File size: 1,482 Bytes
c834a26
c1d1caf
 
 
 
 
 
 
 
 
c834a26
c1d1caf
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c834a26
c1d1caf
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c834a26
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
import gradio as gr
import os
import google.generativeai as genai
generation_config = {
  "temperature": 1,
  "top_p": 0.95,
  "top_k": 40,
  "max_output_tokens": 8192,
  "response_mime_type": "text/plain",
}

def  create_chat_session():
    chat_session = Model.start_chat(
    history=[
        {
          "role": "user",
          "parts": [
            "ุงู„ุณู„ุงู… ุนู„ูŠูƒู… ูƒูŠู ุงู„ุญุงู„\n",
          ],
        },
        {
          "role": "model",
          "parts": [
            "ูˆุนู„ูŠูƒู… ุงู„ุณู„ุงู… ูˆุฑุญู…ุฉ ุงู„ู„ู‡ ูˆุจุฑูƒุงุชู‡.  ุจุฎูŠุฑุŒ ูˆุงู„ุญุงู„ ู…ุนูƒุŸ\n",
          ],
        },
      ]
    )
    return chat_session


def   get_answer_ai(text):
      global AI
      try:
          response = AI.send_message(text)
          return response.text

          
      except :
          AI=create_chat_session()
          response = AI.send_message(text)
          return response.text
AI=None
Model=None
print(response.text)
isActive="";
def greet(text,key):
    global key
    global Model
    global AI
    if key!="":
            genai.configure(api_key=key)
            Model = genai.GenerativeModel(
              model_name="gemini-1.5-flash-8b",
              generation_config=generation_config,
            )
            isActive=key
            AI=create_chat_session()
            
          
    
        
    
    return get_answer_ai(text)

demo = gr.Interface(fn=greet, inputs=["text","text"], outputs="text")
demo.launch()