Tesvia commited on
Commit
8cf4b42
·
verified ·
1 Parent(s): 40ad9f8

Upload tools.py

Browse files
Files changed (1) hide show
  1. tools.py +4 -4
tools.py CHANGED
@@ -16,7 +16,7 @@ class PythonRunTool(Tool):
16
  inputs = {
17
  "code": {"type": str, "description": "Python code to execute", "required": True}
18
  }
19
- output_type = "str"
20
 
21
  def forward(self, code: str) -> str:
22
  buf, ns = io.StringIO(), {}
@@ -72,7 +72,7 @@ class YouTubeTranscriptTool(Tool):
72
  "url": {"type": str, "description": "YouTube URL", "required": True},
73
  "lang": {"type": str, "description": "Transcript language (default: en)", "required": False, "default": "en"}
74
  }
75
- output_type = "str"
76
 
77
  def forward(self, url: str, lang: str = "en") -> str:
78
  from urllib.parse import urlparse, parse_qs
@@ -91,7 +91,7 @@ class AudioTranscriptionTool(Tool):
91
  "path": {"type": str, "description": "Path to audio file", "required": True},
92
  "model": {"type": str, "description": "Model name for transcription (default: whisper-1)", "required": False, "default": "whisper-1"}
93
  }
94
- output_type = "str"
95
 
96
  def forward(self, path: str, model: str = "whisper-1") -> str:
97
  import openai
@@ -111,7 +111,7 @@ class SimpleOCRTool(Tool):
111
  inputs = {
112
  "path": {"type": str, "description": "Path to image file", "required": True}
113
  }
114
- output_type = "str"
115
 
116
  def forward(self, path: str) -> str:
117
  from PIL import Image
 
16
  inputs = {
17
  "code": {"type": str, "description": "Python code to execute", "required": True}
18
  }
19
+ output_type = "string"
20
 
21
  def forward(self, code: str) -> str:
22
  buf, ns = io.StringIO(), {}
 
72
  "url": {"type": str, "description": "YouTube URL", "required": True},
73
  "lang": {"type": str, "description": "Transcript language (default: en)", "required": False, "default": "en"}
74
  }
75
+ output_type = "string"
76
 
77
  def forward(self, url: str, lang: str = "en") -> str:
78
  from urllib.parse import urlparse, parse_qs
 
91
  "path": {"type": str, "description": "Path to audio file", "required": True},
92
  "model": {"type": str, "description": "Model name for transcription (default: whisper-1)", "required": False, "default": "whisper-1"}
93
  }
94
+ output_type = "string"
95
 
96
  def forward(self, path: str, model: str = "whisper-1") -> str:
97
  import openai
 
111
  inputs = {
112
  "path": {"type": str, "description": "Path to image file", "required": True}
113
  }
114
+ output_type = "string"
115
 
116
  def forward(self, path: str) -> str:
117
  from PIL import Image