MH0386 commited on
Commit
2a28594
·
verified ·
1 Parent(s): 6e5e02b

Upload folder using huggingface_hub

Browse files
.dockerignore ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ **/__pycache__/
2
+ .deepsource.toml
3
+ .env
4
+ .github/
5
+ .idea/
6
+ .mypy_cache/
7
+ .ruff_cache/
8
+ .venv/
9
+ .vscode/
.gitattributes CHANGED
@@ -1,35 +1,132 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
- *.model filter=lfs diff=lfs merge=lfs -text
13
- *.msgpack filter=lfs diff=lfs merge=lfs -text
14
- *.npy filter=lfs diff=lfs merge=lfs -text
15
- *.npz filter=lfs diff=lfs merge=lfs -text
16
- *.onnx filter=lfs diff=lfs merge=lfs -text
17
- *.ot filter=lfs diff=lfs merge=lfs -text
18
- *.parquet filter=lfs diff=lfs merge=lfs -text
19
- *.pb filter=lfs diff=lfs merge=lfs -text
20
- *.pickle filter=lfs diff=lfs merge=lfs -text
21
- *.pkl filter=lfs diff=lfs merge=lfs -text
22
- *.pt filter=lfs diff=lfs merge=lfs -text
23
- *.pth filter=lfs diff=lfs merge=lfs -text
24
- *.rar filter=lfs diff=lfs merge=lfs -text
25
- *.safetensors filter=lfs diff=lfs merge=lfs -text
26
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
- *.tar.* filter=lfs diff=lfs merge=lfs -text
28
- *.tar filter=lfs diff=lfs merge=lfs -text
29
- *.tflite filter=lfs diff=lfs merge=lfs -text
30
- *.tgz filter=lfs diff=lfs merge=lfs -text
31
- *.wasm filter=lfs diff=lfs merge=lfs -text
32
- *.xz filter=lfs diff=lfs merge=lfs -text
33
- *.zip filter=lfs diff=lfs merge=lfs -text
34
- *.zst filter=lfs diff=lfs merge=lfs -text
35
- *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Apply override to all files in the directory
2
+ *.md linguist-detectable
3
+
4
+ # Basic .gitattributes for a python repo.
5
+
6
+ # Source files
7
+ # ============
8
+ *.pxd text diff=python
9
+ *.py text diff=python
10
+ *.py3 text diff=python
11
+ *.pyw text diff=python
12
+ *.pyx text diff=python
13
+ *.pyz text diff=python
14
+ *.pyi text diff=python
15
+
16
+ # Binary files
17
+ # ============
18
+ *.db binary
19
+ *.p binary
20
+ *.pkl binary
21
+ *.pickle binary
22
+ *.pyc binary export-ignore
23
+ *.pyo binary export-ignore
24
+ *.pyd binary
25
+
26
+ # Jupyter notebook
27
+ *.ipynb text eol=lf
28
+
29
+ # Note: .db, .p, and .pkl files are associated
30
+ # with the python modules ``pickle``, ``dbm.*``,
31
+ # ``shelve``, ``marshal``, ``anydbm``, & ``bsddb``
32
+ # (among others).
33
+
34
+ # Common settings that generally should always be used with your language specific settings
35
+
36
+ # Auto detect text files and perform LF normalization
37
+ * text=auto
38
+
39
+ #
40
+ # The above will handle all files NOT found below
41
+ #
42
+
43
+ # Documents
44
+ *.bibtex text diff=bibtex
45
+ *.doc diff=astextplain
46
+ *.DOC diff=astextplain
47
+ *.docx diff=astextplain
48
+ *.DOCX diff=astextplain
49
+ *.dot diff=astextplain
50
+ *.DOT diff=astextplain
51
+ *.pdf diff=astextplain
52
+ *.PDF diff=astextplain
53
+ *.rtf diff=astextplain
54
+ *.RTF diff=astextplain
55
+ *.md text diff=markdown
56
+ *.mdx text diff=markdown
57
+ *.tex text diff=tex
58
+ *.adoc text
59
+ *.textile text
60
+ *.mustache text
61
+ *.csv text eol=crlf
62
+ *.tab text
63
+ *.tsv text
64
+ *.txt text
65
+ *.sql text
66
+ *.epub diff=astextplain
67
+
68
+ # Graphics
69
+ *.png binary
70
+ *.jpg binary
71
+ *.jpeg binary
72
+ *.gif binary
73
+ *.tif binary
74
+ *.tiff binary
75
+ *.ico binary
76
+ # SVG treated as text by default.
77
+ *.svg text
78
+ # If you want to treat it as binary,
79
+ # use the following line instead.
80
+ # *.svg binary
81
+ *.eps binary
82
+
83
+ # Scripts
84
+ *.bash text eol=lf
85
+ *.fish text eol=lf
86
+ *.ksh text eol=lf
87
+ *.sh text eol=lf
88
+ *.zsh text eol=lf
89
+ # These are explicitly windows files and should use crlf
90
+ *.bat text eol=crlf
91
+ *.cmd text eol=crlf
92
+ *.ps1 text eol=crlf
93
+
94
+ # Serialisation
95
+ *.json text eol=lf
96
+ *.toml text eol=lf
97
+ *.xml text eol=lf
98
+ *.yaml text eol=lf
99
+ *.yml text eol=lf
100
+
101
+ # Archives
102
+ *.7z binary
103
+ *.bz binary
104
+ *.bz2 binary
105
+ *.bzip2 binary
106
+ *.gz binary
107
+ *.lz binary
108
+ *.lzma binary
109
+ *.rar binary
110
+ *.tar binary
111
+ *.taz binary
112
+ *.tbz binary
113
+ *.tbz2 binary
114
+ *.tgz binary
115
+ *.tlz binary
116
+ *.txz binary
117
+ *.xz binary
118
+ *.Z binary
119
+ *.zip binary
120
+ *.zst binary
121
+
122
+ # Text files where line endings should be preserved
123
+ *.patch -text
124
+
125
+ #
126
+ # Exclude files from exporting
127
+ #
128
+
129
+ .gitattributes export-ignore
130
+ .gitignore export-ignore
131
+ .gitkeep export-ignore
132
+ assets/image/Einstein.jpg filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ **/__pycache__/
2
+ .env
3
+ .mypy_cache/
4
+ .ruff_cache/
5
+ .venv/
6
+ logs/
7
+ results/
8
+ .github/
9
+ .trunk/
10
+ .idea/
11
+ renovate.json
12
+ .deepsource.toml
13
+ .pre-commit-config.yaml
14
+ compose.yaml
.python-version ADDED
@@ -0,0 +1 @@
 
 
1
+ 3.13
.sonarlint/connectedMode.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "sonarCloudOrganization": "alphaspheredotai",
3
+ "projectKey": "AlphaSphereDotAI_chatacter_backend_app",
4
+ "region": "EU"
5
+ }
Dockerfile ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.13@sha256:5f69d22a88dd4cc4ee1576def19aef48c8faa1b566054c44291183831cbad13b AS builder
2
+
3
+ SHELL ["/bin/bash", "-c"]
4
+
5
+ ENV UV_LINK_MODE=copy \
6
+ UV_COMPILE_BYTECODE=1 \
7
+ UV_PYTHON_DOWNLOADS=0
8
+
9
+ COPY --from=ghcr.io/astral-sh/uv:latest@sha256:68a26194ea8da0dbb014e8ae1d8ab08a469ee3ba0f4e2ac07b8bb66c0f8185c1 \
10
+ /uv /uvx /bin/
11
+
12
+ WORKDIR /app
13
+
14
+ RUN --mount=type=cache,target=/root/.cache/uv \
15
+ --mount=type=bind,source=uv.lock,target=uv.lock \
16
+ --mount=type=bind,source=pyproject.toml,target=pyproject.toml \
17
+ --mount=type=bind,source=README.md,target=README.md \
18
+ uv sync --no-install-project --no-dev --locked --no-editable
19
+
20
+ COPY . /app
21
+
22
+ RUN --mount=type=cache,target=/root/.cache/uv \
23
+ uv sync --no-dev --locked --no-editable
24
+
25
+ FROM python:3.13-slim@sha256:f2fdaec50160418e0c2867ba3e254755edd067171725886d5d303fd7057bbf81 AS production
26
+
27
+ SHELL ["/bin/bash", "-c"]
28
+
29
+ ENV GRADIO_SERVER_PORT=7860 \
30
+ GRADIO_SERVER_NAME=0.0.0.0
31
+
32
+ RUN groupadd app && \
33
+ useradd -m -g app -s /bin/bash app && \
34
+ apt-get update > /dev/null && \
35
+ apt-get install -y --no-install-recommends curl > /dev/null && \
36
+ apt-get clean > /dev/null && \
37
+ rm -rf /var/lib/apt/lists/*
38
+
39
+ WORKDIR /home/app
40
+
41
+ COPY --from=builder --chown=app:app --chmod=555 /app/.venv /app/.venv
42
+
43
+ USER app
44
+
45
+ EXPOSE ${GRADIO_SERVER_PORT}
46
+
47
+ CMD ["/app/.venv/bin/chattr"]
README.md CHANGED
@@ -1,12 +1,11 @@
1
  ---
2
  title: Chattr
3
- emoji: 🔥
4
  colorFrom: gray
5
- colorTo: yellow
6
- sdk: gradio
7
- sdk_version: 5.36.2
8
- app_file: app.py
9
- pinned: false
10
  ---
11
 
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
  title: Chattr
3
+ emoji: 💬
4
  colorFrom: gray
5
+ colorTo: blue
6
+ sdk: docker
7
+ app_port: 7860
8
+ short_description: Chat with Characters
 
9
  ---
10
 
11
+ # **Chattr**: App part of the Chatacter Backend
assets/image/Einstein.jpg ADDED

Git LFS Details

  • SHA256: 4c97e39f0682557315535871d13878aef8b89cd16edfd657c9c528a30ba25eb4
  • Pointer size: 131 Bytes
  • Size of remote file: 161 kB
assets/image/Napoleon.jpg ADDED
pyproject.toml ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [project]
2
+ name = "chattr"
3
+ version = "0.1.0"
4
+ description = "App part of the Chatacter Backend"
5
+ readme = "README.md"
6
+ authors = [
7
+ { name = "Mohamed Hisham Abdelzaher", email = "[email protected]" },
8
+ ]
9
+ requires-python = ">=3.12"
10
+ dependencies = [
11
+ "gradio>=5.36.2",
12
+ "langchain>=0.3.26",
13
+ "langchain-mcp-adapters>=0.1.9",
14
+ "langchain-openai>=0.3.27",
15
+ "langgraph>=0.5.2",
16
+ "loguru>=0.7.3",
17
+ "python-dotenv>=1.1.1",
18
+ ]
19
+
20
+ [project.scripts]
21
+ chattr = "chattr.__main__:main"
22
+
23
+ [build-system]
24
+ requires = ["uv_build"]
25
+ build-backend = "uv_build"
26
+
27
+ [dependency-groups]
28
+ dev = ["ruff>=0.12.2", "ty>=0.0.1a12"]
src/chattr/__init__.py ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from datetime import datetime
2
+ from os import getenv
3
+ from pathlib import Path
4
+
5
+ from dotenv import load_dotenv
6
+ from loguru import logger
7
+ from requests import get
8
+
9
+ load_dotenv()
10
+
11
+ SERVER_URL: str = getenv(key="SERVER_URL", default="127.0.0.1")
12
+ SERVER_PORT: int = getenv(key="SERVER_PORT", default=7860)
13
+ CURRENT_DATE: str = datetime.now().strftime(format="%Y-%m-%d_%H-%M-%S")
14
+ MCP_VOICE_GENERATOR: str = getenv(
15
+ key="MCP_VOICE_GENERATOR", default="http://localhost:8001/"
16
+ )
17
+ MCP_VIDEO_GENERATOR: str = getenv(
18
+ key="MCP_VIDEO_GENERATOR", default="http://localhost:8002/"
19
+ )
20
+ VECTOR_DATABASE_NAME: str = getenv(key="VECTOR_DATABASE_NAME", default="chattr")
21
+ DOCKER_MODEL_RUNNER_URL: str = getenv(
22
+ key="DOCKER_MODEL_RUNNER_URL", default="http://127.0.0.1:12434/engines/v1"
23
+ )
24
+ DOCKER_MODEL_RUNNER_MODEL_NAME: str = getenv(
25
+ key="DOCKER_MODEL_RUNNER_MODEL_NAME",
26
+ default="ai/qwen3:0.6B-Q4_0",
27
+ )
28
+ GROQ_URL: str = getenv(key="MODEL_URL", default="https://api.groq.com/openai/v1")
29
+ GROQ_MODEL_NAME: str = getenv(key="GROQ_MODEL_NAME", default="llama3-70b-8192")
30
+
31
+ BASE_DIR: Path = Path.cwd()
32
+ ASSETS_DIR: Path = BASE_DIR / "assets"
33
+ LOG_DIR: Path = BASE_DIR / "logs"
34
+ IMAGE_DIR: Path = ASSETS_DIR / "image"
35
+ AUDIO_DIR: Path = ASSETS_DIR / "audio"
36
+ VIDEO_DIR: Path = ASSETS_DIR / "video"
37
+
38
+ LOG_FILE_PATH: Path = LOG_DIR / f"{CURRENT_DATE}.log"
39
+ AUDIO_FILE_PATH: Path = AUDIO_DIR / f"{CURRENT_DATE}.wav"
40
+ VIDEO_FILE_PATH: Path = VIDEO_DIR / f"{CURRENT_DATE}.mp4"
41
+
42
+ ASSETS_DIR.mkdir(exist_ok=True)
43
+ IMAGE_DIR.mkdir(exist_ok=True)
44
+ AUDIO_DIR.mkdir(exist_ok=True)
45
+ VIDEO_DIR.mkdir(exist_ok=True)
46
+ LOG_DIR.mkdir(exist_ok=True)
47
+
48
+ MODEL_URL: str = (
49
+ DOCKER_MODEL_RUNNER_URL
50
+ if get(DOCKER_MODEL_RUNNER_URL, timeout=5).status_code == 200
51
+ else GROQ_URL
52
+ )
53
+ MODEL_NAME: str = (
54
+ DOCKER_MODEL_RUNNER_MODEL_NAME
55
+ if MODEL_URL == DOCKER_MODEL_RUNNER_URL
56
+ else GROQ_MODEL_NAME
57
+ )
58
+ MODEL_API_KEY: str = (
59
+ "not-needed" if MODEL_URL == DOCKER_MODEL_RUNNER_URL else getenv("GROQ_API_KEY")
60
+ )
61
+ MODEL_TEMPERATURE: float = float(getenv(key="MODEL_TEMPERATURE", default=0.0))
62
+
63
+ logger.add(
64
+ sink=LOG_FILE_PATH,
65
+ format="{time:YYYY-MM-DD at HH:mm:ss} | {level} | {message}",
66
+ colorize=True,
67
+ )
68
+ logger.info(f"Current date: {CURRENT_DATE}")
69
+ logger.info(f"Base directory: {BASE_DIR}")
70
+ logger.info(f"Assets directory: {ASSETS_DIR}")
71
+ logger.info(f"Log directory: {LOG_DIR}")
72
+ logger.info(f"Audio file path: {AUDIO_FILE_PATH}")
73
+ logger.info(f"Log file path: {LOG_FILE_PATH}")
74
+ logger.info(f"Model URL is going to be used is {MODEL_URL}")
75
+ logger.info(f"Model name is going to be used is {MODEL_NAME}")
76
+ logger.info(f"Model temperature is going to be used is {MODEL_TEMPERATURE}")
src/chattr/__main__.py ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from gradio import Blocks
2
+
3
+ from chattr import SERVER_PORT, SERVER_URL
4
+ from chattr.gui import app_block
5
+
6
+
7
+ def main() -> None:
8
+ """
9
+ Initializes and launches the Gradio-based Chattr application server with API access, monitoring, and PWA support enabled.
10
+ """
11
+ app: Blocks = app_block()
12
+ app.queue(api_open=True).launch(
13
+ server_name=SERVER_URL,
14
+ server_port=SERVER_PORT,
15
+ debug=True,
16
+ show_api=True,
17
+ enable_monitoring=True,
18
+ show_error=True,
19
+ pwa=True,
20
+ )
21
+
22
+
23
+ if __name__ == "__main__":
24
+ main()
src/chattr/graph.py ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain_core.messages import HumanMessage, SystemMessage
2
+ from langchain_core.tools import BaseTool
3
+ from langchain_mcp_adapters.client import MultiServerMCPClient
4
+ from langchain_openai import ChatOpenAI
5
+ from langgraph.graph import START, StateGraph
6
+ from langgraph.graph.message import MessagesState
7
+ from langgraph.graph.state import CompiledStateGraph
8
+ from langgraph.prebuilt import ToolNode, tools_condition
9
+
10
+ from chattr import (
11
+ ASSETS_DIR,
12
+ MODEL_API_KEY,
13
+ MODEL_NAME,
14
+ MODEL_TEMPERATURE,
15
+ MODEL_URL,
16
+ )
17
+
18
+ SYSTEM_MESSAGE: SystemMessage = SystemMessage(
19
+ content="You are a helpful assistant that can answer questions about the time."
20
+ )
21
+
22
+
23
+ async def create_graph() -> CompiledStateGraph:
24
+ """
25
+ Asynchronously creates and compiles a conversational state graph for a time-answering assistant with integrated external tools.
26
+
27
+ Returns:
28
+ CompiledStateGraph: The compiled state graph ready for execution, with nodes for agent responses and tool invocation.
29
+ """
30
+ _mcp_client = MultiServerMCPClient(
31
+ {
32
+ "time": {
33
+ "command": "docker",
34
+ "args": ["run", "-i", "--rm", "mcp/time"],
35
+ "transport": "stdio",
36
+ }
37
+ }
38
+ )
39
+ _tools: list[BaseTool] = await _mcp_client.get_tools()
40
+ try:
41
+ _model: ChatOpenAI = ChatOpenAI(
42
+ base_url=MODEL_URL,
43
+ model=MODEL_NAME,
44
+ api_key=MODEL_API_KEY,
45
+ temperature=MODEL_TEMPERATURE,
46
+ )
47
+ _model = _model.bind_tools(_tools, parallel_tool_calls=False)
48
+ except Exception as e:
49
+ raise RuntimeError(f"Failed to initialize ChatOpenAI model: {e}") from e
50
+
51
+ def call_model(state: MessagesState) -> MessagesState:
52
+ """
53
+ Generate a new message state by invoking the chat model with the system message prepended to the current messages.
54
+
55
+ Parameters:
56
+ state (MessagesState): The current state containing a list of messages.
57
+
58
+ Returns:
59
+ MessagesState: A new state with the model's response appended to the messages.
60
+ """
61
+ return {"messages": [_model.invoke([SYSTEM_MESSAGE] + state["messages"])]}
62
+
63
+ _builder: StateGraph = StateGraph(MessagesState)
64
+ _builder.add_node("agent", call_model)
65
+ _builder.add_node("tools", ToolNode(_tools))
66
+ _builder.add_edge(START, "agent")
67
+ _builder.add_conditional_edges("agent", tools_condition)
68
+ _builder.add_edge("tools", "agent")
69
+ graph: CompiledStateGraph = _builder.compile()
70
+ return graph
71
+
72
+
73
+ def draw_graph(graph: CompiledStateGraph) -> None:
74
+ """
75
+ Render the compiled state graph as a Mermaid PNG image and save it to the assets directory.
76
+ """
77
+ graph.get_graph().draw_mermaid_png(output_file_path=ASSETS_DIR / "graph.png")
78
+
79
+
80
+ if __name__ == "__main__":
81
+ import asyncio
82
+
83
+ async def test_graph():
84
+ """
85
+ Asynchronously creates and tests the conversational state graph by sending a time-related query and printing the resulting messages.
86
+ """
87
+ g: CompiledStateGraph = await create_graph()
88
+
89
+ messages = await g.ainvoke(
90
+ {"messages": [HumanMessage(content="What is the time?")]}
91
+ )
92
+
93
+ for m in messages["messages"]:
94
+ m.pretty_print()
95
+
96
+ asyncio.run(test_graph())
src/chattr/gui.py ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio
2
+ from gradio import Blocks, Button, Chatbot, ChatMessage, Row, Textbox
3
+
4
+
5
+ def generate_response(history: list[ChatMessage], thread_id: str) -> list[ChatMessage]:
6
+ """
7
+ Appends an assistant message about a quarterly sales plot to the chat history for the specified thread ID.
8
+
9
+ If the thread ID is 0, raises a Gradio error prompting for a valid thread ID.
10
+
11
+ Returns:
12
+ The updated chat history including the new assistant message.
13
+ """
14
+ if thread_id == 0:
15
+ raise gradio.Error("Please enter a thread ID.")
16
+ history.append(
17
+ ChatMessage(
18
+ role="assistant",
19
+ content=f"Here is the plot of quarterly sales for {thread_id}.",
20
+ metadata={
21
+ "title": "🛠️ Used tool Weather API",
22
+ },
23
+ )
24
+ )
25
+ return history
26
+
27
+
28
+ def app_block() -> Blocks:
29
+ """
30
+ Constructs and returns the main Gradio chat application interface with a thread ID input, chatbot display, and control buttons.
31
+
32
+ Returns:
33
+ Blocks: The complete Gradio Blocks interface for the chat application.
34
+ """
35
+
36
+ history = [
37
+ ChatMessage(role="assistant", content="How can I help you?"),
38
+ ChatMessage(role="user", content="Can you make me a plot of quarterly sales?"),
39
+ ChatMessage(
40
+ role="assistant",
41
+ content="I am happy to provide you that report and plot.",
42
+ ),
43
+ ]
44
+ with Blocks() as app:
45
+ with Row():
46
+ thread_id: Textbox = Textbox(label="Thread ID", info="Enter Thread ID")
47
+
48
+ chatbot: Chatbot = Chatbot(history, type="messages")
49
+
50
+ with Row():
51
+ generate_btn: Button = Button(value="Generate", variant="primary")
52
+ stop_btn: Button = Button(value="Stop", variant="stop")
53
+ _event = generate_btn.click(generate_response, [chatbot, thread_id], chatbot)
54
+ stop_btn.click(cancels=[_event])
55
+ return app
src/chattr/tools.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Literal
2
+
3
+ from langchain_core.tools import tool
4
+
5
+
6
+ @tool
7
+ def get_weather(city: Literal["nyc", "sf"]) -> str:
8
+ """
9
+ Returns a weather description for the specified city.
10
+
11
+ Parameters:
12
+ city (Literal["nyc", "sf"]): The city for which to retrieve weather information.
13
+
14
+ Returns:
15
+ str: A message describing the weather in the specified city.
16
+
17
+ Raises:
18
+ AssertionError: If the city is not "nyc" or "sf".
19
+ """
20
+ if city == "nyc":
21
+ return "It might be cloudy in nyc"
22
+ elif city == "sf":
23
+ return "It's always sunny in sf"
24
+ else:
25
+ raise AssertionError("Unknown city")
uv.lock ADDED
The diff for this file is too large to render. See raw diff