shukdevdatta123 commited on
Commit
dbcdb5d
·
verified ·
1 Parent(s): c6e9269

Delete abc.txt

Browse files
Files changed (1) hide show
  1. abc.txt +0 -118
abc.txt DELETED
@@ -1,118 +0,0 @@
1
- import gradio as gr
2
- import openai
3
- import base64
4
- from PIL import Image
5
- import io
6
-
7
- # Function to send the request to OpenAI API with an image or text input
8
- def generate_response(input_text, image, openai_api_key, reasoning_effort="medium", model_choice="o1"):
9
- if not openai_api_key:
10
- return "Error: No API key provided."
11
-
12
- openai.api_key = openai_api_key
13
-
14
- # Process the input depending on whether it's text or an image
15
- if image:
16
- # Convert the image to base64 string
17
- image_info = get_base64_string_from_image(image)
18
- input_text = f"data:image/png;base64,{image_info}"
19
-
20
- # Prepare the messages for OpenAI API
21
- if model_choice == "o1":
22
- messages = [
23
- {"role": "user", "content": [{"type": "image_url", "image_url": {"url": input_text}}]}
24
- ]
25
- elif model_choice == "o3-mini":
26
- messages = [
27
- {"role": "user", "content": [{"type": "text", "text": input_text}]}
28
- ]
29
-
30
- try:
31
- # Call OpenAI API with the selected model
32
- response = openai.ChatCompletion.create(
33
- model=model_choice, # Dynamically choose the model (o1 or o3-mini)
34
- messages=messages,
35
- reasoning_effort=reasoning_effort, # Set reasoning_effort for the response
36
- max_completion_tokens=2000 # Limit response tokens to 2000
37
- )
38
-
39
- return response["choices"][0]["message"]["content"]
40
- except Exception as e:
41
- return f"Error calling OpenAI API: {str(e)}"
42
-
43
- # Function to convert an uploaded image to a base64 string
44
- def get_base64_string_from_image(pil_image):
45
- # Convert PIL Image to bytes
46
- buffered = io.BytesIO()
47
- pil_image.save(buffered, format="PNG")
48
- img_bytes = buffered.getvalue()
49
- base64_str = base64.b64encode(img_bytes).decode("utf-8")
50
- return base64_str
51
-
52
- # The function that will be used by Gradio interface
53
- def chatbot(input_text, image, openai_api_key, reasoning_effort, model_choice, history=[]):
54
- response = generate_response(input_text, image, openai_api_key, reasoning_effort, model_choice)
55
-
56
- # Append the response to the history
57
- history.append((f"User: {input_text}", f"Assistant: {response}"))
58
-
59
- return "", history
60
-
61
- # Function to clear the chat history
62
- def clear_history():
63
- return "", []
64
-
65
- # Gradio interface setup
66
- def create_interface():
67
- with gr.Blocks() as demo:
68
- gr.Markdown("# Multimodal Chatbot (Text + Image)")
69
-
70
- # Add a description after the title
71
- gr.Markdown("""
72
- ### Description:
73
- This is a multimodal chatbot that can handle both text and image inputs.
74
- - You can ask questions or provide text, and the assistant will respond.
75
- - You can also upload an image, and the assistant will process it and answer questions about the image.
76
- - Enter your OpenAI API key to start interacting with the model.
77
- - You can use the 'Clear History' button to remove the conversation history.
78
- - "o1" is for image chat and "o3-mini" is for text chat.
79
- ### Reasoning Effort:
80
- The reasoning effort controls how complex or detailed the assistant's answers should be.
81
- - **Low**: Provides quick, concise answers with minimal reasoning or details.
82
- - **Medium**: Offers a balanced response with a reasonable level of detail and thought.
83
- - **High**: Produces more detailed, analytical, or thoughtful responses, requiring deeper reasoning.
84
- """)
85
-
86
- with gr.Row():
87
- openai_api_key = gr.Textbox(label="Enter OpenAI API Key", type="password", placeholder="sk-...", interactive=True)
88
-
89
- with gr.Row():
90
- image_input = gr.Image(label="Upload an Image", type="pil") # Image upload input
91
- input_text = gr.Textbox(label="Enter Text Question", placeholder="Ask a question or provide text", lines=2)
92
-
93
- with gr.Row():
94
- reasoning_effort = gr.Dropdown(
95
- label="Reasoning Effort",
96
- choices=["low", "medium", "high"],
97
- value="medium"
98
- )
99
- model_choice = gr.Dropdown(
100
- label="Select Model",
101
- choices=["o1", "o3-mini"],
102
- value="o1" # Default to 'o1' for image-related tasks
103
- )
104
- submit_btn = gr.Button("Send")
105
- clear_btn = gr.Button("Clear History")
106
-
107
- chat_history = gr.Chatbot()
108
-
109
- # Button interactions
110
- submit_btn.click(fn=chatbot, inputs=[input_text, image_input, openai_api_key, reasoning_effort, model_choice, chat_history], outputs=[input_text, chat_history])
111
- clear_btn.click(fn=clear_history, inputs=[], outputs=[chat_history, chat_history])
112
-
113
- return demo
114
-
115
- # Run the interface
116
- if __name__ == "__main__":
117
- demo = create_interface()
118
- demo.launch()