from langchain.llms import OpenAI from dotenv import load_dotenv import streamlit as st import os load_dotenv() # take environment variable from .env # function to load OpenAI model and response def get_openai_response(question:str): llm = OpenAI(temperature=0.6,openai_api_key = os.getenv('OPENAI_API_KEY'),model = "gpt-3.5-turbo-instruct") # initilizing llm model response = llm(question) return response # initize streamlit st.set_page_config(page_title="Q&A Demo") st.header("My 1st Langchain Application") input = st.text_input("Input:", key="input") response = get_openai_response(input) submit = st.button("Ask the question") if submit: st.subheader("response is :") st.write(response)