File size: 2,826 Bytes
13b193b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c2efe39
13b193b
c58e43a
c2efe39
63ea8f9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3281300
a0040f5
13b193b
0696a9e
13b193b
 
 
0620e33
 
 
 
0ea7645
 
0620e33
7b1dc49
 
 
1b38f54
d6f7726
1b38f54
3ea59ed
12fdec6
7b1dc49
 
12fdec6
d6f7726
 
 
247fbec
c2efe39
 
63ea8f9
ff70426
a05ca56
13b193b
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
import os
from langchain.chains import RetrievalQA
from langchain.llms import AzureOpenAI
from langchain.document_loaders import TextLoader
from langchain.document_loaders import PyPDFLoader
from langchain.indexes import VectorstoreIndexCreator
from langchain.text_splitter import CharacterTextSplitter
from langchain.embeddings import OpenAIEmbeddings
from langchain.vectorstores import Chroma
from langchain.chains.question_answering import load_qa_chain
from langchain.llms import AzureOpenAI
from langchain.chains.question_answering import load_qa_chain
import streamlit as st
from PIL import Image
import time
import random

@st.cache_data
def findanswer(Nand_url, Nand_question, randomnumber):
    if True:
      if Nand_url:
          index = None
          loader1 = PyPDFLoader(Nand_url)
          langchainembeddings = OpenAIEmbeddings(deployment="textembedding", chunk_size=1)

          index = VectorstoreIndexCreator(
                  # split the documents into chunks
                  text_splitter=CharacterTextSplitter(chunk_size=1000, chunk_overlap=0),
                  # select which embeddings we want to use
                  embedding=langchainembeddings,
                  # use Chroma as the vectorestore to index and search embeddings
                  vectorstore_cls=Chroma
              ).from_loaders([loader1])             
#          st.write("indexed PDF...AI finding answer....please wait")
      if Nand_question:
        answer = index.query(llm=llmgpt3, question=yourquestion, chain_type="map_reduce")
        return answer


          
image = Image.open('Wipro logo.png')
#st.image(image,   width=100)

st.write("Learn best practices in Data Centre Sustainability")




os.environ['OPENAI_API_TYPE'] = 'azure'
os.environ['OPENAI_API_VERSION'] = '2023-03-15-preview'

llmgpt3 = AzureOpenAI(      deployment_name="testdavanci", model_name="text-davinci-003" )
#llmchatgpt = AzureOpenAI(     deployment_name="esujnand", model_name="gpt-35-turbo" )

samplequestions = ["What is  Energy Star 4.0 Standard?", "What is RoHS Directive?", "What is Green IT?", "Benefits of greening IT?", "Holistic Approach to Green IT",
                   "Using IT: Environmentally Sound Practices", "Designing Green Computers", "Epeat" ]


with st.form("my_form"):

   myurl = st.text_input("What is the URL?", "https://sites.pitt.edu/~dtipper/2011/GreenPaper.pdf")

   yourquestion = st.selectbox(
    'Select',  samplequestions    )    

   # Every form must have a submit button.
   submitted = st.form_submit_button("Ask question")
   if submitted:
      #st.write("AI is looking for the answer...It will take atleast 2 mintutes... Answers will appear below....")
      randomnumber = random.randint(0, 1)
      Nandanswer = findanswer(myurl, yourquestion , randomnumber  )
      st.write(Nandanswer)