File size: 2,893 Bytes
1de8870
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
# -*- coding: utf-8 -*-
"""mohanism.195

Automatically generated by Colab.

Original file is located at
    https://colab.research.google.com/drive/1AvIdAQmhCWUUe6rT9sck2gBGkecNCjEc
"""

!pip install dotenv

from dotenv import load_dotenv,find_dotenv
load_dotenv(find_dotenv())

from langchain.llns import OpenAI
llm = OpenAI(model_name="text-davinci-003")
llm("explain large language models in one sentence")

from langchain.schema import (
    AIMessage,
    HumanMessage,
    SystemMessage
)
from langchain.chat_models import ChatOpenAI

chat = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0.3)
messages = (
    SystemMessage(content="You are an expert data scientist"),
    HumanMessage(content="Write a Python script that trains a neural network on simulated data ")
)
response=chat(messages)

print(response.content,ends="\n")

from langchain import PromptTemplate

template = """You are an expert data scientist with an expertise in building deep learning models,
Explain the concept of {concept} in a couple of lines
"""

prompt = PromptTemplate(
    input_variable=["concept"],
    template=template,
)

prompt

llm(prompt.format(concept="autoencoder"))

from langchain.chains import LLMChain
chain = LLMchain(llm=lln, prompt=prompt)

second_prompt = PromptTemplate(
    input_variables=["ml_concept"],
    template="Turn the concept description of {ml_concept} and explain it to me like I'm five in 500 words",
)
chain_two = LLMChain(llm=llm, prompt=second_prompt)

from langchain.chains import SimpleSequenttialChain
overall_chain = SimpleSequenttialChain(chains=[chain, chain_two], verbose=True)

explanation = overall_chain.run("autoencoder")
print(explanation)

from langchain.text_splitter importRecursiveCharacterTextSplitter

text_splitter = RecursiveCharacterTextSplitter(
    chunk_size = 100,
    chunk_overlap = 0,
)

text = text_splitter.create_documents([explanation])

text[0].page_content

from langchain.embeddings import OpenAIEmbeddings

embeddings = OpenAIEmbeddings(model_name="ada")

query_result = embeddings.embed_query(texts[0].page_content)
query_result

import os
import pinecome
from langchain.vectors import Pinecone

# initialize pinecome
pinecome.init(
    api_key=os.getenv["PINECONE_API_KEY"],
    environment(=os.getenv("PINECONE_ENV")
)

index_name = "langchain-quickstart"
search = Pinecone.form_documents(texts, embeddings, index_name=index_name)

query = "What is magical about an autoencoder?"
result = search.similarity_search(query)

result

from langhain.agent.agent_toolkets import create_python_agent
from langchain.tools.python.tool import PythonREPLTool
from langchain.python import PythonREPL
from langchain.llms.openai import OpenAI

agent_executor = create_python_agent(
    llm=OpenAI(temperature=0), max_tokens=1000),
    verbose=True
)

agent_executor.run("Find the roots (zeros) if the quadratic function 3 * x==2 + 2** - 1")