ReyDev commited on
Commit
42cdc8f
Β·
unverified Β·
1 Parent(s): 125586f

πŸ”§ refactor(ai.py): reorder imports and improve code readability

Browse files

πŸ”§ refactor(Dockerfile): replace poetry build and pip install with poetry install for simplicity
πŸ”§ refactor(.dockerignore, .gitignore): add newline at end of file for POSIX compliance
✨ feat(.flake8, .pre-commit-config.yaml): add flake8 and pre-commit configuration files for better code quality control

🎨 style(app.py, const.py, makefile): improve code readability by reformatting code and removing unnecessary whitespace
πŸ”₯ remove(anthropic): remove unused import in const.py for cleaner code

πŸ”§ chore(pyproject.toml): add flake8, pre-commit to dev-dependencies for better code quality control
πŸ”§ chore(pyproject.toml): configure isort and black for consistent code formatting
🎨 style(settings.py): improve code formatting to adhere to PEP8 standards

Files changed (12) hide show
  1. .dockerignore +1 -1
  2. .flake8 +5 -0
  3. .gitignore +1 -1
  4. .pre-commit-config.yaml +52 -0
  5. Dockerfile +1 -3
  6. ai.py +18 -12
  7. app.py +122 -35
  8. const.py +7 -9
  9. makefile +1 -1
  10. poetry.lock +54 -1
  11. pyproject.toml +29 -0
  12. settings.py +3 -3
.dockerignore CHANGED
@@ -16,4 +16,4 @@ __pycache__
16
  flagged
17
  gradio_cached_examples
18
 
19
- .venv
 
16
  flagged
17
  gradio_cached_examples
18
 
19
+ .venv
.flake8 ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ [flake8]
2
+ max-line-length = 88
3
+ select = C,E,F,W,B,B9
4
+ ignore = E203, E501, W503, E712, E301, F403, F405, E711
5
+ exclude = .tox,.git,venv,__init__.py
.gitignore CHANGED
@@ -16,4 +16,4 @@ __pycache__
16
  flagged
17
  gradio_cached_examples
18
 
19
- .venv
 
16
  flagged
17
  gradio_cached_examples
18
 
19
+ .venv
.pre-commit-config.yaml ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ repos:
2
+ - repo: https://github.com/pre-commit/pre-commit-hooks
3
+ rev: v4.0.1
4
+ hooks:
5
+ - id: trailing-whitespace
6
+ exclude: ^(.github/|.art/)
7
+ - id: end-of-file-fixer
8
+ exclude: ^(.github/|.art/)
9
+ - id: debug-statements
10
+
11
+ - repo: https://github.com/myint/autoflake
12
+ rev: v1.4
13
+ hooks:
14
+ - id: autoflake
15
+ args:
16
+ [
17
+ "--in-place",
18
+ "--remove-unused-variable",
19
+ "--ignore-init-module-imports",
20
+ "--remove-all-unused-imports",
21
+ ]
22
+
23
+ - repo: https://github.com/asottile/pyupgrade
24
+ rev: v3.1.0
25
+ hooks:
26
+ - id: pyupgrade
27
+ args: ["--py3-plus"]
28
+
29
+ # - repo: https://github.com/asottile/seed-isort-config
30
+ # rev: v2.2.0
31
+ # hooks:
32
+ # - id: seed-isort-config
33
+
34
+ - repo: https://github.com/pre-commit/mirrors-isort
35
+ rev: v5.9.1
36
+ hooks:
37
+ - id: isort
38
+
39
+ - repo: https://github.com/ambv/black
40
+ rev: 22.3.0
41
+ hooks:
42
+ - id: black
43
+ language_version: python3.10
44
+
45
+ - repo: local
46
+ hooks:
47
+ - id: flake8
48
+ name: flake8
49
+ types: [python]
50
+ language: system
51
+ entry: poetry run flake8 --config .flake8
52
+ exclude: run.py
Dockerfile CHANGED
@@ -39,9 +39,7 @@ RUN python -m venv /venv
39
  RUN /venv/bin/pip install --upgrade pip wheel setuptools setuptools_rust
40
  COPY . .
41
 
42
- RUN poetry build && \
43
- /venv/bin/pip install --upgrade pip wheel setuptools && \
44
- /venv/bin/pip install dist/*.whl
45
 
46
  COPY . .
47
  CMD ["python", "app.py"]
 
39
  RUN /venv/bin/pip install --upgrade pip wheel setuptools setuptools_rust
40
  COPY . .
41
 
42
+ RUN poetry install
 
 
43
 
44
  COPY . .
45
  CMD ["python", "app.py"]
ai.py CHANGED
@@ -1,25 +1,28 @@
1
- import anthropic
2
  import os
 
 
3
  from dotenv import load_dotenv
4
 
 
 
5
  load_dotenv()
6
 
7
- from settings import settings
8
 
9
- syncClient = anthropic.Anthropic(api_key=settings.ANTHROPIC_API_KEY,
10
- timeout=5)
11
- asyncClient = anthropic.AsyncAnthropic(api_key=settings.ANTHROPIC_API_KEY,
12
- timeout=60)
13
- class AnthropicCustom():
14
  def __init__(self, api_key, model, max_tokens=1000, prompt=""):
15
  self.api_key = api_key
16
  self.model = model
17
  self.max_tokens = max_tokens
18
  self.prompt = prompt
19
- if os.environ.get('ANTHROPIC_API_KEY') is not None:
20
- api_key = os.environ.get('ANTHROPIC_API_KEY')
21
  else:
22
- os.environ['ANTHROPIC_API_KEY'] = api_key
 
23
  def get_anthropic_response(self):
24
  response = syncClient.completions.create(
25
  prompt=self.prompt,
@@ -27,12 +30,15 @@ class AnthropicCustom():
27
  max_tokens_to_sample=self.max_tokens,
28
  )
29
  return response.completion
 
30
  async def get_anthropic_response_async(self):
31
  async for line in await asyncClient.completions.create(
32
  prompt=self.prompt,
33
  model=self.model,
34
  max_tokens_to_sample=self.max_tokens,
35
- stop_sequences=[anthropic.HUMAN_PROMPT,],
 
 
36
  stream=True,
37
  ):
38
- yield line.completion
 
 
1
  import os
2
+
3
+ import anthropic
4
  from dotenv import load_dotenv
5
 
6
+ from settings import settings
7
+
8
  load_dotenv()
9
 
 
10
 
11
+ syncClient = anthropic.Anthropic(api_key=settings.ANTHROPIC_API_KEY, timeout=5)
12
+ asyncClient = anthropic.AsyncAnthropic(api_key=settings.ANTHROPIC_API_KEY, timeout=60)
13
+
14
+
15
+ class AnthropicCustom:
16
  def __init__(self, api_key, model, max_tokens=1000, prompt=""):
17
  self.api_key = api_key
18
  self.model = model
19
  self.max_tokens = max_tokens
20
  self.prompt = prompt
21
+ if os.environ.get("ANTHROPIC_API_KEY") is not None:
22
+ api_key = os.environ.get("ANTHROPIC_API_KEY")
23
  else:
24
+ os.environ["ANTHROPIC_API_KEY"] = api_key
25
+
26
  def get_anthropic_response(self):
27
  response = syncClient.completions.create(
28
  prompt=self.prompt,
 
30
  max_tokens_to_sample=self.max_tokens,
31
  )
32
  return response.completion
33
+
34
  async def get_anthropic_response_async(self):
35
  async for line in await asyncClient.completions.create(
36
  prompt=self.prompt,
37
  model=self.model,
38
  max_tokens_to_sample=self.max_tokens,
39
+ stop_sequences=[
40
+ anthropic.HUMAN_PROMPT,
41
+ ],
42
  stream=True,
43
  ):
44
+ yield line.completion
app.py CHANGED
@@ -1,37 +1,58 @@
 
 
1
  import anthropic
2
  import gradio as gr
3
- from gradio.components import Dropdown, Checkbox,Textbox,IOComponent
4
- import re
5
- from ai import AnthropicCustom
6
- from const import ClaudeModels,ModelTokenLength,Prompts
7
 
 
 
8
 
9
  # Define a global variable for the conversation history
10
  conversation_history = ""
11
 
12
- async def interact_with_ai(user_question,token, model, token_length, prompt, prompt_input, memory):
 
 
 
13
  global conversation_history
14
-
15
  if memory:
16
- prompt = Prompts[prompt].value.format(memory=conversation_history, question=user_question)
 
 
17
  else:
18
  prompt = Prompts[prompt].value.format(memory="", question=user_question)
19
 
20
- if prompt_input != re.search(r'Human: (.*?) \n\nConversations:', prompt).group(1):
21
- prompt = re.sub(r'Human: (.*?) \n\nConversations:', f'Human: {prompt_input} \n\nConversations:', prompt)
22
-
 
 
 
 
23
  # Create an instance of the custom class
24
- anth = AnthropicCustom(api_key=token, model=model, max_tokens=token_length, prompt= prompt)
25
-
 
 
26
  # Create a generator to stream the response
27
  response_accumulated = ""
28
  async for response in anth.get_anthropic_response_async():
29
  response_accumulated += response
30
  conversation_history = f"{conversation_history} {anthropic.HUMAN_PROMPT} {user_question} {anthropic.AI_PROMPT} {response_accumulated}"
31
  yield response_accumulated
32
-
33
 
34
- async def chat_with_ai(message, history, token,model, token_length, prompt, prompt_input, memory,):
 
 
 
 
 
 
 
 
 
 
35
  global conversation_history
36
  if memory:
37
  for conversation in history:
@@ -41,39 +62,105 @@ async def chat_with_ai(message, history, token,model, token_length, prompt, prom
41
  else:
42
  prompt = Prompts[prompt].value.format(memory="", question=message)
43
 
44
- if prompt_input != re.search(r'Human: (.*?) \n\nConversations:', prompt).group(1):
45
- prompt = re.sub(r'Human: (.*?) \n\nConversations:', f'Human: {prompt_input} \n\nConversations:', prompt)
46
-
 
 
 
 
47
  # Create an instance of the custom class
48
- anth = AnthropicCustom(api_key=token, model=model, max_tokens=token_length, prompt= prompt)
49
-
 
 
50
  # Create a generator to stream the response
51
  response_accumulated = ""
52
  async for response in anth.get_anthropic_response_async():
53
  response_accumulated += response
54
  yield response_accumulated
55
 
56
- promptDropdown:IOComponent = Dropdown(choices=list(Prompts.__members__.keys()),label="Prompt",value=list(Prompts.__members__.keys())[0])
57
- prompt_input :IOComponent = Textbox(label="Custom Prompt", placeholder="Enter a custom prompt here", lines=3, value=re.search(r'Human: (.*?) \n\nConversations:', Prompts[promptDropdown.value].value).group(1), )
58
-
59
 
60
- iface = gr.Interface(fn=interact_with_ai,
61
- flagging_options=["Inappropriate", "Disrespectful", "Spam"],
62
- allow_flagging='auto',
63
- title="Claude Space",
64
- inputs=[Textbox(label="Question", placeholder="Enter a question here"),Textbox(label="Token", placeholder="Enter a token here",type='password'),Dropdown(choices=[model.value for model in ClaudeModels],label="Model",value=[model.value for model in ClaudeModels][0]),Dropdown(choices=[token.value for token in ModelTokenLength],label="Token Length",value= [token.value for token in ModelTokenLength][0]),promptDropdown,prompt_input,Checkbox(label="Memory", value=False)],
65
- outputs="markdown",
66
- cache_examples=True,
67
- )
 
 
 
 
 
68
 
69
- promptDropdown:IOComponent = Dropdown(choices=list(Prompts.__members__.keys()),label="Prompt",value=list(Prompts.__members__.keys())[0])
70
- prompt_input :IOComponent = Textbox(label="Custom Prompt", placeholder="Enter a custom prompt here", lines=3, value=re.search(r'Human: (.*?) \n\nConversations:', Prompts[promptDropdown.value].value).group(1), )
71
 
72
- cface = gr.ChatInterface(fn=chat_with_ai,additional_inputs=[Textbox(label="Token", placeholder="Enter a token here",type='password'),Dropdown(choices=[model.value for model in ClaudeModels],label="Model",value=[model.value for model in ClaudeModels][0]),Dropdown(choices=[token.value for token in ModelTokenLength],label="Token Length",value= [token.value for token in ModelTokenLength][0]),promptDropdown,prompt_input,Checkbox(label="Memory", value=True)])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
73
 
 
 
 
 
 
 
 
 
 
 
 
 
 
74
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
75
 
76
 
77
  if __name__ == "__main__":
78
- gd = gr.TabbedInterface([iface, cface], tab_names=["Claude Space", "Claude Chat"],title="Claude Space")
79
- gd.queue(concurrency_count=75, max_size=100).launch(debug=True, share=False,server_name='0.0.0.0', server_port=7864)
 
 
 
 
 
1
+ import re
2
+
3
  import anthropic
4
  import gradio as gr
5
+ from gradio.components import Checkbox, Dropdown, IOComponent, Textbox
 
 
 
6
 
7
+ from ai import AnthropicCustom
8
+ from const import ClaudeModels, ModelTokenLength, Prompts
9
 
10
  # Define a global variable for the conversation history
11
  conversation_history = ""
12
 
13
+
14
+ async def interact_with_ai(
15
+ user_question, token, model, token_length, prompt, prompt_input, memory
16
+ ):
17
  global conversation_history
18
+
19
  if memory:
20
+ prompt = Prompts[prompt].value.format(
21
+ memory=conversation_history, question=user_question
22
+ )
23
  else:
24
  prompt = Prompts[prompt].value.format(memory="", question=user_question)
25
 
26
+ if prompt_input != re.search(r"Human: (.*?) \n\nConversations:", prompt).group(1):
27
+ prompt = re.sub(
28
+ r"Human: (.*?) \n\nConversations:",
29
+ f"Human: {prompt_input} \n\nConversations:",
30
+ prompt,
31
+ )
32
+
33
  # Create an instance of the custom class
34
+ anth = AnthropicCustom(
35
+ api_key=token, model=model, max_tokens=token_length, prompt=prompt
36
+ )
37
+
38
  # Create a generator to stream the response
39
  response_accumulated = ""
40
  async for response in anth.get_anthropic_response_async():
41
  response_accumulated += response
42
  conversation_history = f"{conversation_history} {anthropic.HUMAN_PROMPT} {user_question} {anthropic.AI_PROMPT} {response_accumulated}"
43
  yield response_accumulated
 
44
 
45
+
46
+ async def chat_with_ai(
47
+ message,
48
+ history,
49
+ token,
50
+ model,
51
+ token_length,
52
+ prompt,
53
+ prompt_input,
54
+ memory,
55
+ ):
56
  global conversation_history
57
  if memory:
58
  for conversation in history:
 
62
  else:
63
  prompt = Prompts[prompt].value.format(memory="", question=message)
64
 
65
+ if prompt_input != re.search(r"Human: (.*?) \n\nConversations:", prompt).group(1):
66
+ prompt = re.sub(
67
+ r"Human: (.*?) \n\nConversations:",
68
+ f"Human: {prompt_input} \n\nConversations:",
69
+ prompt,
70
+ )
71
+
72
  # Create an instance of the custom class
73
+ anth = AnthropicCustom(
74
+ api_key=token, model=model, max_tokens=token_length, prompt=prompt
75
+ )
76
+
77
  # Create a generator to stream the response
78
  response_accumulated = ""
79
  async for response in anth.get_anthropic_response_async():
80
  response_accumulated += response
81
  yield response_accumulated
82
 
 
 
 
83
 
84
+ promptDropdown: IOComponent = Dropdown(
85
+ choices=list(Prompts.__members__.keys()),
86
+ label="Prompt",
87
+ value=list(Prompts.__members__.keys())[0],
88
+ )
89
+ prompt_input: IOComponent = Textbox(
90
+ label="Custom Prompt",
91
+ placeholder="Enter a custom prompt here",
92
+ lines=3,
93
+ value=re.search(
94
+ r"Human: (.*?) \n\nConversations:", Prompts[promptDropdown.value].value
95
+ ).group(1),
96
+ )
97
 
 
 
98
 
99
+ iface = gr.Interface(
100
+ fn=interact_with_ai,
101
+ flagging_options=["Inappropriate", "Disrespectful", "Spam"],
102
+ allow_flagging="auto",
103
+ title="Claude Space",
104
+ inputs=[
105
+ Textbox(label="Question", placeholder="Enter a question here"),
106
+ Textbox(label="Token", placeholder="Enter a token here", type="password"),
107
+ Dropdown(
108
+ choices=[model.value for model in ClaudeModels],
109
+ label="Model",
110
+ value=[model.value for model in ClaudeModels][0],
111
+ ),
112
+ Dropdown(
113
+ choices=[token.value for token in ModelTokenLength],
114
+ label="Token Length",
115
+ value=[token.value for token in ModelTokenLength][0],
116
+ ),
117
+ promptDropdown,
118
+ prompt_input,
119
+ Checkbox(label="Memory", value=False),
120
+ ],
121
+ outputs="markdown",
122
+ cache_examples=True,
123
+ )
124
 
125
+ promptDropdown: IOComponent = Dropdown(
126
+ choices=list(Prompts.__members__.keys()),
127
+ label="Prompt",
128
+ value=list(Prompts.__members__.keys())[0],
129
+ )
130
+ prompt_input: IOComponent = Textbox(
131
+ label="Custom Prompt",
132
+ placeholder="Enter a custom prompt here",
133
+ lines=3,
134
+ value=re.search(
135
+ r"Human: (.*?) \n\nConversations:", Prompts[promptDropdown.value].value
136
+ ).group(1),
137
+ )
138
 
139
+ cface = gr.ChatInterface(
140
+ fn=chat_with_ai,
141
+ additional_inputs=[
142
+ Textbox(label="Token", placeholder="Enter a token here", type="password"),
143
+ Dropdown(
144
+ choices=[model.value for model in ClaudeModels],
145
+ label="Model",
146
+ value=[model.value for model in ClaudeModels][0],
147
+ ),
148
+ Dropdown(
149
+ choices=[token.value for token in ModelTokenLength],
150
+ label="Token Length",
151
+ value=[token.value for token in ModelTokenLength][0],
152
+ ),
153
+ promptDropdown,
154
+ prompt_input,
155
+ Checkbox(label="Memory", value=True),
156
+ ],
157
+ )
158
 
159
 
160
  if __name__ == "__main__":
161
+ gd = gr.TabbedInterface(
162
+ [iface, cface], tab_names=["Claude Space", "Claude Chat"], title="Claude Space"
163
+ )
164
+ gd.queue(concurrency_count=75, max_size=100).launch(
165
+ debug=True, share=False, server_name="0.0.0.0", server_port=7864
166
+ )
const.py CHANGED
@@ -1,16 +1,15 @@
1
  import enum
2
- import anthropic
3
 
4
 
5
  class ClaudeModels(str, enum.Enum):
6
  Inatant1_1: str = "claude-instant-1"
7
  Instant1_2: str = "claude-instant-1.2"
8
- Instant1_3: str = "claude-instant-1.3"
9
- Claude2: str = "claude-2"
10
-
11
-
12
  class ModelTokenLength(str, enum.Enum):
13
- ten : int = 10
14
  twenty_five: int = 25
15
  fifty: int = 50
16
  hundred: int = 100
@@ -21,8 +20,7 @@ class ModelTokenLength(str, enum.Enum):
21
  twenty_k: int = 20000
22
  fifty_k: int = 50000
23
  hundred_k: int = 100000
24
-
25
-
26
  class Prompts(str, enum.Enum):
27
  general: str = "\n\nHuman: You're a AI bot who loves to gossip. Listin user's query and answer in markdown using Conversations. \n\nConversations: {memory} \n\nQuery: {question} \n\nAssistant:"
28
-
 
1
  import enum
 
2
 
3
 
4
  class ClaudeModels(str, enum.Enum):
5
  Inatant1_1: str = "claude-instant-1"
6
  Instant1_2: str = "claude-instant-1.2"
7
+ Instant1_3: str = "claude-instant-1.3"
8
+ Claude2: str = "claude-2"
9
+
10
+
11
  class ModelTokenLength(str, enum.Enum):
12
+ ten: int = 10
13
  twenty_five: int = 25
14
  fifty: int = 50
15
  hundred: int = 100
 
20
  twenty_k: int = 20000
21
  fifty_k: int = 50000
22
  hundred_k: int = 100000
23
+
24
+
25
  class Prompts(str, enum.Enum):
26
  general: str = "\n\nHuman: You're a AI bot who loves to gossip. Listin user's query and answer in markdown using Conversations. \n\nConversations: {memory} \n\nQuery: {question} \n\nAssistant:"
 
makefile CHANGED
@@ -1,2 +1,2 @@
1
  start:
2
- python app.py
 
1
  start:
2
+ python app.py
poetry.lock CHANGED
@@ -378,6 +378,23 @@ files = [
378
  docs = ["furo (>=2023.5.20)", "sphinx (>=7.0.1)", "sphinx-autodoc-typehints (>=1.23,!=1.23.4)"]
379
  testing = ["covdefaults (>=2.3)", "coverage (>=7.2.7)", "diff-cover (>=7.5)", "pytest (>=7.3.1)", "pytest-cov (>=4.1)", "pytest-mock (>=3.10)", "pytest-timeout (>=2.1)"]
380
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
381
  [[package]]
382
  name = "fonttools"
383
  version = "4.42.1"
@@ -945,6 +962,18 @@ pillow = ">=6.2.0"
945
  pyparsing = ">=2.3.1,<3.1"
946
  python-dateutil = ">=2.7"
947
 
 
 
 
 
 
 
 
 
 
 
 
 
948
  [[package]]
949
  name = "numpy"
950
  version = "1.25.2"
@@ -1199,6 +1228,18 @@ files = [
1199
  docs = ["furo", "olefile", "sphinx (>=2.4)", "sphinx-copybutton", "sphinx-inline-tabs", "sphinx-removed-in", "sphinxext-opengraph"]
1200
  tests = ["check-manifest", "coverage", "defusedxml", "markdown2", "olefile", "packaging", "pyroma", "pytest", "pytest-cov", "pytest-timeout"]
1201
 
 
 
 
 
 
 
 
 
 
 
 
 
1202
  [[package]]
1203
  name = "pydantic"
1204
  version = "2.3.0"
@@ -1350,6 +1391,18 @@ files = [
1350
  {file = "pydub-0.25.1.tar.gz", hash = "sha256:980a33ce9949cab2a569606b65674d748ecbca4f0796887fd6f46173a7b0d30f"},
1351
  ]
1352
 
 
 
 
 
 
 
 
 
 
 
 
 
1353
  [[package]]
1354
  name = "pyparsing"
1355
  version = "3.0.9"
@@ -1908,4 +1961,4 @@ files = [
1908
  [metadata]
1909
  lock-version = "2.0"
1910
  python-versions = "^3.10"
1911
- content-hash = "3cfcceb51ce4dd46c33225e911d15ec8c8a2e209b8beccea43e726362e00795e"
 
378
  docs = ["furo (>=2023.5.20)", "sphinx (>=7.0.1)", "sphinx-autodoc-typehints (>=1.23,!=1.23.4)"]
379
  testing = ["covdefaults (>=2.3)", "coverage (>=7.2.7)", "diff-cover (>=7.5)", "pytest (>=7.3.1)", "pytest-cov (>=4.1)", "pytest-mock (>=3.10)", "pytest-timeout (>=2.1)"]
380
 
381
+ [[package]]
382
+ name = "flake8"
383
+ version = "6.1.0"
384
+ description = "the modular source code checker: pep8 pyflakes and co"
385
+ category = "main"
386
+ optional = false
387
+ python-versions = ">=3.8.1"
388
+ files = [
389
+ {file = "flake8-6.1.0-py2.py3-none-any.whl", hash = "sha256:ffdfce58ea94c6580c77888a86506937f9a1a227dfcd15f245d694ae20a6b6e5"},
390
+ {file = "flake8-6.1.0.tar.gz", hash = "sha256:d5b3857f07c030bdb5bf41c7f53799571d75c4491748a3adcd47de929e34cd23"},
391
+ ]
392
+
393
+ [package.dependencies]
394
+ mccabe = ">=0.7.0,<0.8.0"
395
+ pycodestyle = ">=2.11.0,<2.12.0"
396
+ pyflakes = ">=3.1.0,<3.2.0"
397
+
398
  [[package]]
399
  name = "fonttools"
400
  version = "4.42.1"
 
962
  pyparsing = ">=2.3.1,<3.1"
963
  python-dateutil = ">=2.7"
964
 
965
+ [[package]]
966
+ name = "mccabe"
967
+ version = "0.7.0"
968
+ description = "McCabe checker, plugin for flake8"
969
+ category = "main"
970
+ optional = false
971
+ python-versions = ">=3.6"
972
+ files = [
973
+ {file = "mccabe-0.7.0-py2.py3-none-any.whl", hash = "sha256:6c2d30ab6be0e4a46919781807b4f0d834ebdd6c6e3dca0bda5a15f863427b6e"},
974
+ {file = "mccabe-0.7.0.tar.gz", hash = "sha256:348e0240c33b60bbdf4e523192ef919f28cb2c3d7d5c7794f74009290f236325"},
975
+ ]
976
+
977
  [[package]]
978
  name = "numpy"
979
  version = "1.25.2"
 
1228
  docs = ["furo", "olefile", "sphinx (>=2.4)", "sphinx-copybutton", "sphinx-inline-tabs", "sphinx-removed-in", "sphinxext-opengraph"]
1229
  tests = ["check-manifest", "coverage", "defusedxml", "markdown2", "olefile", "packaging", "pyroma", "pytest", "pytest-cov", "pytest-timeout"]
1230
 
1231
+ [[package]]
1232
+ name = "pycodestyle"
1233
+ version = "2.11.0"
1234
+ description = "Python style guide checker"
1235
+ category = "main"
1236
+ optional = false
1237
+ python-versions = ">=3.8"
1238
+ files = [
1239
+ {file = "pycodestyle-2.11.0-py2.py3-none-any.whl", hash = "sha256:5d1013ba8dc7895b548be5afb05740ca82454fd899971563d2ef625d090326f8"},
1240
+ {file = "pycodestyle-2.11.0.tar.gz", hash = "sha256:259bcc17857d8a8b3b4a2327324b79e5f020a13c16074670f9c8c8f872ea76d0"},
1241
+ ]
1242
+
1243
  [[package]]
1244
  name = "pydantic"
1245
  version = "2.3.0"
 
1391
  {file = "pydub-0.25.1.tar.gz", hash = "sha256:980a33ce9949cab2a569606b65674d748ecbca4f0796887fd6f46173a7b0d30f"},
1392
  ]
1393
 
1394
+ [[package]]
1395
+ name = "pyflakes"
1396
+ version = "3.1.0"
1397
+ description = "passive checker of Python programs"
1398
+ category = "main"
1399
+ optional = false
1400
+ python-versions = ">=3.8"
1401
+ files = [
1402
+ {file = "pyflakes-3.1.0-py2.py3-none-any.whl", hash = "sha256:4132f6d49cb4dae6819e5379898f2b8cce3c5f23994194c24b77d5da2e36f774"},
1403
+ {file = "pyflakes-3.1.0.tar.gz", hash = "sha256:a0aae034c444db0071aa077972ba4768d40c830d9539fd45bf4cd3f8f6992efc"},
1404
+ ]
1405
+
1406
  [[package]]
1407
  name = "pyparsing"
1408
  version = "3.0.9"
 
1961
  [metadata]
1962
  lock-version = "2.0"
1963
  python-versions = "^3.10"
1964
+ content-hash = "041957f123ae41db1e7ffaeaee6d89e51646954137366587e4ba343dca3b005e"
pyproject.toml CHANGED
@@ -13,8 +13,37 @@ requests = "^2.31.0"
13
  gradio = "^3.41.2"
14
  anthropic = "^0.3.10"
15
  python-dotenv = "^1.0.0"
 
16
 
 
 
17
 
18
  [build-system]
19
  requires = ["poetry-core"]
20
  build-backend = "poetry.core.masonry.api"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13
  gradio = "^3.41.2"
14
  anthropic = "^0.3.10"
15
  python-dotenv = "^1.0.0"
16
+ flake8 = "^6.1.0"
17
 
18
+ [tool.poetry.dev-dependencies]
19
+ pre-commit = "^2.15.0"
20
 
21
  [build-system]
22
  requires = ["poetry-core"]
23
  build-backend = "poetry.core.masonry.api"
24
+
25
+ [tool.isort]
26
+ profile = "black"
27
+ known_third_party = ["anthropic", "gradio", "dotenv"]
28
+
29
+ [tool.black]
30
+ line-length = 88
31
+ include = '\.pyi?$'
32
+ exclude = '''
33
+ (
34
+ /(
35
+ \.eggs # exclude a few common directories in the
36
+ | \.git # root of the project
37
+ | \.hg
38
+ | \.mypy_cache
39
+ | \.tox
40
+ | \.venv
41
+ | _build
42
+ | buck-out
43
+ | build
44
+ | dist
45
+ )/
46
+ | foo.py # also separately exclude a file named foo.py in
47
+ # the root of the project
48
+ )
49
+ '''
settings.py CHANGED
@@ -3,7 +3,7 @@ import os
3
 
4
  class Settings:
5
 
6
- ANTHROPIC_API_KEY:str=os.environ.get('ANTHROPIC_API_KEY')
7
-
8
 
9
- settings = Settings()
 
 
3
 
4
  class Settings:
5
 
6
+ ANTHROPIC_API_KEY: str = os.environ.get("ANTHROPIC_API_KEY")
 
7
 
8
+
9
+ settings = Settings()