darabos commited on
Commit
f940d73
·
1 Parent(s): 95f0558

Update to final LynxScribe package names.

Browse files
lynxkite-lynxscribe/src/lynxkite_lynxscribe/agentic.py CHANGED
@@ -5,12 +5,12 @@ import os
5
  import typing
6
 
7
  from lynxkite_core import ops
8
- from lynxscribe.components.tool_use import LLM
 
9
  from lynxscribe.components.mcp_client import MCPClient
10
- from lynxscribe.components.rag.rag_chatbot import RAGChatbotResponse
11
  from lynxscribe.core.llm.base import get_llm_engine
12
  from lynxscribe.core.models.prompts import Function, Tool, ChatCompletionPrompt
13
- from lynxscribe.core import router
14
 
15
  if typing.TYPE_CHECKING:
16
  import fastapi
@@ -34,7 +34,7 @@ def gradio_chat(agent: dict):
34
  async def respond(message, chat_history):
35
  await ag.init()
36
  response = await ag.ask([*chat_history, {"role": "user", "content": message}])
37
- async for messages in rag_chatbot_response_to_gradio(response):
38
  yield messages
39
 
40
  ag = agent_from_dict(agent, default_model={"name": "openai", "model_name": "gpt-4.1-nano"})
@@ -207,7 +207,7 @@ class Agent:
207
 
208
  return {"error": "Not found"}
209
 
210
- async def ask(self, messages: list[str], **kwargs) -> RAGChatbotResponse:
211
  await self.init()
212
  res = await self.llm.ask([self.prompt, *messages], **kwargs)
213
  return res
@@ -244,7 +244,7 @@ class Agent:
244
  return ask
245
 
246
 
247
- async def rag_chatbot_response_to_gradio(response: RAGChatbotResponse):
248
  """The Gradio chatbot interface expects a list of ChatMessage objects to be yielded.
249
  We can keep updating the messages to stream the response.
250
  """
 
5
  import typing
6
 
7
  from lynxkite_core import ops
8
+ from lynxscribe.common import router
9
+ from lynxscribe.core.llm import LLM
10
  from lynxscribe.components.mcp_client import MCPClient
11
+ from lynxscribe.core.models.streaming_response import StreamingResponse
12
  from lynxscribe.core.llm.base import get_llm_engine
13
  from lynxscribe.core.models.prompts import Function, Tool, ChatCompletionPrompt
 
14
 
15
  if typing.TYPE_CHECKING:
16
  import fastapi
 
34
  async def respond(message, chat_history):
35
  await ag.init()
36
  response = await ag.ask([*chat_history, {"role": "user", "content": message}])
37
+ async for messages in lynxscribe_response_to_gradio(response):
38
  yield messages
39
 
40
  ag = agent_from_dict(agent, default_model={"name": "openai", "model_name": "gpt-4.1-nano"})
 
207
 
208
  return {"error": "Not found"}
209
 
210
+ async def ask(self, messages: list[str], **kwargs) -> StreamingResponse:
211
  await self.init()
212
  res = await self.llm.ask([self.prompt, *messages], **kwargs)
213
  return res
 
244
  return ask
245
 
246
 
247
+ async def lynxscribe_response_to_gradio(response: StreamingResponse):
248
  """The Gradio chatbot interface expects a list of ChatMessage objects to be yielded.
249
  We can keep updating the messages to stream the response.
250
  """
uv.lock CHANGED
The diff for this file is too large to render. See raw diff