index
int64
0
731k
package
stringlengths
2
98
name
stringlengths
1
76
docstring
stringlengths
0
281k
code
stringlengths
4
1.07M
signature
stringlengths
2
42.8k
54,072
teradatasql
DataError
null
class DataError(DatabaseError): # Required by DBAPI 2.0 pass
null
54,073
teradatasql
DatabaseError
null
class DatabaseError(Error): # Required by DBAPI 2.0 pass
null
54,075
teradatasql
Error
null
class Error(Exception): # Required by DBAPI 2.0 pass
null
54,076
teradatasql
IntegrityError
null
class IntegrityError(DatabaseError): # Required by DBAPI 2.0 pass
null
54,077
teradatasql
InterfaceError
null
class InterfaceError(Error): # Required by DBAPI 2.0 pass
null
54,078
teradatasql
InternalError
null
class InternalError(DatabaseError): # Required by DBAPI 2.0 pass
null
54,079
teradatasql
NotSupportedError
null
class NotSupportedError(DatabaseError): # Required by DBAPI 2.0 pass
null
54,080
teradatasql
OperationalError
null
class OperationalError(DatabaseError): # Required by DBAPI 2.0 pass
null
54,081
teradatasql
ProgrammingError
null
class ProgrammingError(DatabaseError): # Required by DBAPI 2.0 pass
null
54,083
teradatasql
TeradataConnection
null
class TeradataConnection: def __init__ (self, sConnectParams=None, **kwargs): if ctypes.sizeof (ctypes.c_voidp) < 8: raise ImportError ("This package requires 64-bit Python. 32-bit Python is not supported.") self.uLog = 0 self.bTraceLog = False self.bDebugLog = False self.bTimingLog = False self.uConnHandle = None # needed by __repr__ if not sConnectParams: sConnectParams = '{}' for sKey, oValue in kwargs.items (): if isinstance (oValue, bool): kwargs [sKey] = str (oValue).lower () # use lowercase words true and false else: kwargs [sKey] = str (oValue) # Compose a streamlined stack trace of script file names and package names listFrames = [] sPackagesDir = os.path.dirname (os.path.dirname (__file__)).replace (os.sep, "/") + "/" for fr in traceback.extract_stack (): sFrame = fr [0].replace (os.sep, "/") if sFrame.startswith (sPackagesDir): sFrame = sFrame [len (sPackagesDir) : ].split ("/") [0] # remove the packages dir prefix and take the first directory, which is the package name else: sFrame = sFrame.split ("/") [-1] # take the last element, which is the Python script file name if not sFrame.startswith ("<") and sFrame not in listFrames: # omit <string>, omit <template>, omit repeated entries listFrames += [ sFrame ] kwargs ['client_kind' ] = 'P' # G = Go, P = Python, R = R, S = Node.js kwargs ['client_vmname'] = 'Python ' + sys.version kwargs ['client_osname'] = platform.platform () + ' ' + platform.machine () kwargs ['client_stack' ] = " ".join (listFrames) kwargs ['client_extra' ] = 'PYTHON=' + platform.python_version () + ';' # must be semicolon-terminated try: kwargs ['client_extra'] += 'TZ=' + datetime.datetime.now (tz=datetime.timezone.utc).astimezone ().strftime ('%Z %z') + ';' # must be semicolon-terminated except: # astimezone() can fail when the TZ environment variable is set to an unexpected format pass sConnectArgs = json.dumps (kwargs) global bInitDone, goside # assigned-to variables are local unless marked as global try: lockInit.acquire() if not bInitDone: bInitDone = True if osType == "Windows": sExtension = "dll" elif osType == "Darwin": sExtension = "dylib" elif bARM: sExtension = "arm.so" else: sExtension = "so" sLibPathName = os.path.join(os.path.dirname(__file__), "teradatasql." + sExtension) goside = ctypes.cdll.LoadLibrary(sLibPathName) prototype (None, goside.goCombineJSON , ctypes.c_char_p, ctypes.c_char_p, ctypes.POINTER (ctypes.POINTER (ctypes.c_char)), ctypes.POINTER (ctypes.POINTER (ctypes.c_char))) prototype (None, goside.goParseParams , ctypes.c_char_p, ctypes.POINTER (ctypes.POINTER (ctypes.c_char)), ctypes.POINTER (ctypes.c_uint64)) prototype (None, goside.goCreateConnection, ctypes.c_uint64, ctypes.c_char_p, ctypes.c_char_p, ctypes.POINTER (ctypes.POINTER (ctypes.c_char)), ctypes.POINTER (ctypes.c_uint64)) prototype (None, goside.goCloseConnection , ctypes.c_uint64, ctypes.c_uint64, ctypes.POINTER (ctypes.POINTER (ctypes.c_char))) prototype (None, goside.goCancelRequest , ctypes.c_uint64, ctypes.c_uint64, ctypes.POINTER (ctypes.POINTER (ctypes.c_char))) prototype (None, goside.goCreateRows , ctypes.c_uint64, ctypes.c_uint64, ctypes.c_char_p, ctypes.c_uint64, ctypes.c_char_p, ctypes.POINTER (ctypes.POINTER (ctypes.c_char)), ctypes.POINTER (ctypes.c_uint64)) prototype (None, goside.goResultMetaData , ctypes.c_uint64, ctypes.c_uint64, ctypes.POINTER (ctypes.POINTER (ctypes.c_char)), ctypes.POINTER (ctypes.c_uint64), ctypes.POINTER (ctypes.c_uint16), ctypes.POINTER (ctypes.POINTER (ctypes.c_char)), ctypes.POINTER (ctypes.c_int32), ctypes.POINTER (ctypes.POINTER (ctypes.c_char))) prototype (None, goside.goFetchRow , ctypes.c_uint64, ctypes.c_uint64, ctypes.POINTER (ctypes.POINTER (ctypes.c_char)), ctypes.POINTER (ctypes.c_int32), ctypes.POINTER (ctypes.POINTER (ctypes.c_char))) prototype (None, goside.goNextResult , ctypes.c_uint64, ctypes.c_uint64, ctypes.POINTER (ctypes.POINTER (ctypes.c_char)), ctypes.POINTER (ctypes.c_char)) prototype (None, goside.goCloseRows , ctypes.c_uint64, ctypes.c_uint64, ctypes.POINTER (ctypes.POINTER (ctypes.c_char))) prototype (None, goside.goFreePointer , ctypes.c_uint64, ctypes.POINTER (ctypes.c_char)) finally: lockInit.release() pcError = ctypes.POINTER (ctypes.c_char) () pcCombined = ctypes.POINTER (ctypes.c_char) () goside.goCombineJSON (sConnectParams.encode ('utf-8'), sConnectArgs.encode ('utf-8'), ctypes.byref (pcError), ctypes.byref (pcCombined)) if pcError: sErr = ctypes.string_at (pcError).decode ('utf-8') goside.goFreePointer (self.uLog, pcError) raise OperationalError (sErr) sConnectParams = ctypes.string_at (pcCombined).decode ('utf-8') goside.goFreePointer (self.uLog, pcCombined) pcError = ctypes.POINTER (ctypes.c_char) () uLog = ctypes.c_uint64 () goside.goParseParams (sConnectParams.encode ('utf-8'), ctypes.byref (pcError), ctypes.byref (uLog)) if pcError: sErr = ctypes.string_at (pcError).decode ('utf-8') goside.goFreePointer (self.uLog, pcError) raise OperationalError (sErr) self.uLog = uLog.value self.bTraceLog = (self.uLog & 1) != 0 self.bDebugLog = (self.uLog & 2) != 0 self.bTimingLog = (self.uLog & 8) != 0 if self.bTraceLog: traceLog ("> enter __init__ {}".format (sConnectParams)) try: pcError = ctypes.POINTER (ctypes.c_char)() uConnHandle = ctypes.c_uint64() goside.goCreateConnection (self.uLog, vernumber.sVersionNumber.encode ('utf-8'), sConnectParams.encode ('utf-8'), ctypes.byref (pcError), ctypes.byref (uConnHandle)) if pcError: sErr = ctypes.string_at(pcError).decode('utf-8') goside.goFreePointer (self.uLog, pcError) raise OperationalError(sErr) self.uConnHandle = uConnHandle.value finally: if self.bTraceLog: traceLog ("< leave __init__ {}".format (self)) # end __init__ def close(self): # Required by DBAPI 2.0 if self.bTraceLog: traceLog ("> enter close {}".format (self)) try: pcError = ctypes.POINTER (ctypes.c_char)() goside.goCloseConnection (self.uLog, self.uConnHandle, ctypes.byref (pcError)) if pcError: sErr = ctypes.string_at(pcError).decode('utf-8') goside.goFreePointer (self.uLog, pcError) raise OperationalError(sErr) finally: if self.bTraceLog: traceLog ("< leave close {}".format (self)) # end close def cancel(self): if self.bTraceLog: traceLog ("> enter cancel {}".format (self)) try: pcError = ctypes.POINTER (ctypes.c_char)() goside.goCancelRequest (self.uLog, self.uConnHandle, ctypes.byref (pcError)) if pcError: sErr = ctypes.string_at(pcError).decode('utf-8') goside.goFreePointer (self.uLog, pcError) raise OperationalError(sErr) finally: if self.bTraceLog: traceLog ("< leave cancel {}".format (self)) # end cancel def commit(self): # Required by DBAPI 2.0 if self.bTraceLog: traceLog ("> enter commit {}".format (self)) try: with self.cursor () as cur: cur.execute ("{fn teradata_commit}") finally: if self.bTraceLog: traceLog ("< leave commit {}".format (self)) # end commit def rollback(self): # Required by DBAPI 2.0 if self.bTraceLog: traceLog ("> enter rollback {}".format (self)) try: with self.cursor () as cur: cur.execute ("{fn teradata_rollback}") finally: if self.bTraceLog: traceLog ("< leave rollback {}".format (self)) # end rollback def cursor(self): # Required by DBAPI 2.0 return TeradataCursor(self) @property def autocommit(self): # Required by SQLAlchemy 2.0 if self.bTraceLog: traceLog ("> enter autocommit getter {}".format (self)) try: with self.cursor () as cur: cur.execute ("{fn teradata_nativesql}{fn teradata_autocommit}") # teradata_nativesql never produces a fake result set return cur.fetchone () [0] == "true" finally: if self.bTraceLog: traceLog ("< leave autocommit getter {}".format (self)) # end commit @autocommit.setter def autocommit(self, value): # Required by SQLAlchemy 2.0 if self.bTraceLog: traceLog ("> enter autocommit setter {}".format (self)) if type (value) != bool: raise TypeError ("value unexpected type {}".format (type (value))) try: with self.cursor () as cur: s = "on" if value else "off" cur.execute ("{fn teradata_nativesql}{fn teradata_autocommit_" + s + "}") finally: if self.bTraceLog: traceLog ("< leave autocommit setter {}".format (self)) def __enter__(self): # Implements with-statement context manager return self def __exit__(self, t, value, traceback): # Implements with-statement context manager if self.bTraceLog: traceLog ("> enter __exit__ {}".format (self)) try: self.close() finally: if self.bTraceLog: traceLog ("< leave __exit__ {}".format (self)) # end __exit__ def __repr__(self): # Equivalent to the toString method in Java or the String method in Go return("{} uConnHandle={}".format(self.__class__.__name__, self.uConnHandle)) # end class TeradataConnection
(sConnectParams=None, **kwargs)
54,084
teradatasql
__enter__
null
def __enter__(self): # Implements with-statement context manager return self
(self)
54,085
teradatasql
__exit__
null
def __exit__(self, t, value, traceback): # Implements with-statement context manager if self.bTraceLog: traceLog ("> enter __exit__ {}".format (self)) try: self.close() finally: if self.bTraceLog: traceLog ("< leave __exit__ {}".format (self)) # end __exit__
(self, t, value, traceback)
54,086
teradatasql
__init__
null
def __init__ (self, sConnectParams=None, **kwargs): if ctypes.sizeof (ctypes.c_voidp) < 8: raise ImportError ("This package requires 64-bit Python. 32-bit Python is not supported.") self.uLog = 0 self.bTraceLog = False self.bDebugLog = False self.bTimingLog = False self.uConnHandle = None # needed by __repr__ if not sConnectParams: sConnectParams = '{}' for sKey, oValue in kwargs.items (): if isinstance (oValue, bool): kwargs [sKey] = str (oValue).lower () # use lowercase words true and false else: kwargs [sKey] = str (oValue) # Compose a streamlined stack trace of script file names and package names listFrames = [] sPackagesDir = os.path.dirname (os.path.dirname (__file__)).replace (os.sep, "/") + "/" for fr in traceback.extract_stack (): sFrame = fr [0].replace (os.sep, "/") if sFrame.startswith (sPackagesDir): sFrame = sFrame [len (sPackagesDir) : ].split ("/") [0] # remove the packages dir prefix and take the first directory, which is the package name else: sFrame = sFrame.split ("/") [-1] # take the last element, which is the Python script file name if not sFrame.startswith ("<") and sFrame not in listFrames: # omit <string>, omit <template>, omit repeated entries listFrames += [ sFrame ] kwargs ['client_kind' ] = 'P' # G = Go, P = Python, R = R, S = Node.js kwargs ['client_vmname'] = 'Python ' + sys.version kwargs ['client_osname'] = platform.platform () + ' ' + platform.machine () kwargs ['client_stack' ] = " ".join (listFrames) kwargs ['client_extra' ] = 'PYTHON=' + platform.python_version () + ';' # must be semicolon-terminated try: kwargs ['client_extra'] += 'TZ=' + datetime.datetime.now (tz=datetime.timezone.utc).astimezone ().strftime ('%Z %z') + ';' # must be semicolon-terminated except: # astimezone() can fail when the TZ environment variable is set to an unexpected format pass sConnectArgs = json.dumps (kwargs) global bInitDone, goside # assigned-to variables are local unless marked as global try: lockInit.acquire() if not bInitDone: bInitDone = True if osType == "Windows": sExtension = "dll" elif osType == "Darwin": sExtension = "dylib" elif bARM: sExtension = "arm.so" else: sExtension = "so" sLibPathName = os.path.join(os.path.dirname(__file__), "teradatasql." + sExtension) goside = ctypes.cdll.LoadLibrary(sLibPathName) prototype (None, goside.goCombineJSON , ctypes.c_char_p, ctypes.c_char_p, ctypes.POINTER (ctypes.POINTER (ctypes.c_char)), ctypes.POINTER (ctypes.POINTER (ctypes.c_char))) prototype (None, goside.goParseParams , ctypes.c_char_p, ctypes.POINTER (ctypes.POINTER (ctypes.c_char)), ctypes.POINTER (ctypes.c_uint64)) prototype (None, goside.goCreateConnection, ctypes.c_uint64, ctypes.c_char_p, ctypes.c_char_p, ctypes.POINTER (ctypes.POINTER (ctypes.c_char)), ctypes.POINTER (ctypes.c_uint64)) prototype (None, goside.goCloseConnection , ctypes.c_uint64, ctypes.c_uint64, ctypes.POINTER (ctypes.POINTER (ctypes.c_char))) prototype (None, goside.goCancelRequest , ctypes.c_uint64, ctypes.c_uint64, ctypes.POINTER (ctypes.POINTER (ctypes.c_char))) prototype (None, goside.goCreateRows , ctypes.c_uint64, ctypes.c_uint64, ctypes.c_char_p, ctypes.c_uint64, ctypes.c_char_p, ctypes.POINTER (ctypes.POINTER (ctypes.c_char)), ctypes.POINTER (ctypes.c_uint64)) prototype (None, goside.goResultMetaData , ctypes.c_uint64, ctypes.c_uint64, ctypes.POINTER (ctypes.POINTER (ctypes.c_char)), ctypes.POINTER (ctypes.c_uint64), ctypes.POINTER (ctypes.c_uint16), ctypes.POINTER (ctypes.POINTER (ctypes.c_char)), ctypes.POINTER (ctypes.c_int32), ctypes.POINTER (ctypes.POINTER (ctypes.c_char))) prototype (None, goside.goFetchRow , ctypes.c_uint64, ctypes.c_uint64, ctypes.POINTER (ctypes.POINTER (ctypes.c_char)), ctypes.POINTER (ctypes.c_int32), ctypes.POINTER (ctypes.POINTER (ctypes.c_char))) prototype (None, goside.goNextResult , ctypes.c_uint64, ctypes.c_uint64, ctypes.POINTER (ctypes.POINTER (ctypes.c_char)), ctypes.POINTER (ctypes.c_char)) prototype (None, goside.goCloseRows , ctypes.c_uint64, ctypes.c_uint64, ctypes.POINTER (ctypes.POINTER (ctypes.c_char))) prototype (None, goside.goFreePointer , ctypes.c_uint64, ctypes.POINTER (ctypes.c_char)) finally: lockInit.release() pcError = ctypes.POINTER (ctypes.c_char) () pcCombined = ctypes.POINTER (ctypes.c_char) () goside.goCombineJSON (sConnectParams.encode ('utf-8'), sConnectArgs.encode ('utf-8'), ctypes.byref (pcError), ctypes.byref (pcCombined)) if pcError: sErr = ctypes.string_at (pcError).decode ('utf-8') goside.goFreePointer (self.uLog, pcError) raise OperationalError (sErr) sConnectParams = ctypes.string_at (pcCombined).decode ('utf-8') goside.goFreePointer (self.uLog, pcCombined) pcError = ctypes.POINTER (ctypes.c_char) () uLog = ctypes.c_uint64 () goside.goParseParams (sConnectParams.encode ('utf-8'), ctypes.byref (pcError), ctypes.byref (uLog)) if pcError: sErr = ctypes.string_at (pcError).decode ('utf-8') goside.goFreePointer (self.uLog, pcError) raise OperationalError (sErr) self.uLog = uLog.value self.bTraceLog = (self.uLog & 1) != 0 self.bDebugLog = (self.uLog & 2) != 0 self.bTimingLog = (self.uLog & 8) != 0 if self.bTraceLog: traceLog ("> enter __init__ {}".format (sConnectParams)) try: pcError = ctypes.POINTER (ctypes.c_char)() uConnHandle = ctypes.c_uint64() goside.goCreateConnection (self.uLog, vernumber.sVersionNumber.encode ('utf-8'), sConnectParams.encode ('utf-8'), ctypes.byref (pcError), ctypes.byref (uConnHandle)) if pcError: sErr = ctypes.string_at(pcError).decode('utf-8') goside.goFreePointer (self.uLog, pcError) raise OperationalError(sErr) self.uConnHandle = uConnHandle.value finally: if self.bTraceLog: traceLog ("< leave __init__ {}".format (self)) # end __init__
(self, sConnectParams=None, **kwargs)
54,087
teradatasql
__repr__
null
def __repr__(self): # Equivalent to the toString method in Java or the String method in Go return("{} uConnHandle={}".format(self.__class__.__name__, self.uConnHandle))
(self)
54,088
teradatasql
cancel
null
def cancel(self): if self.bTraceLog: traceLog ("> enter cancel {}".format (self)) try: pcError = ctypes.POINTER (ctypes.c_char)() goside.goCancelRequest (self.uLog, self.uConnHandle, ctypes.byref (pcError)) if pcError: sErr = ctypes.string_at(pcError).decode('utf-8') goside.goFreePointer (self.uLog, pcError) raise OperationalError(sErr) finally: if self.bTraceLog: traceLog ("< leave cancel {}".format (self)) # end cancel
(self)
54,089
teradatasql
close
null
def close(self): # Required by DBAPI 2.0 if self.bTraceLog: traceLog ("> enter close {}".format (self)) try: pcError = ctypes.POINTER (ctypes.c_char)() goside.goCloseConnection (self.uLog, self.uConnHandle, ctypes.byref (pcError)) if pcError: sErr = ctypes.string_at(pcError).decode('utf-8') goside.goFreePointer (self.uLog, pcError) raise OperationalError(sErr) finally: if self.bTraceLog: traceLog ("< leave close {}".format (self)) # end close
(self)
54,090
teradatasql
commit
null
def commit(self): # Required by DBAPI 2.0 if self.bTraceLog: traceLog ("> enter commit {}".format (self)) try: with self.cursor () as cur: cur.execute ("{fn teradata_commit}") finally: if self.bTraceLog: traceLog ("< leave commit {}".format (self)) # end commit
(self)
54,091
teradatasql
cursor
null
def cursor(self): # Required by DBAPI 2.0 return TeradataCursor(self)
(self)
54,092
teradatasql
rollback
null
def rollback(self): # Required by DBAPI 2.0 if self.bTraceLog: traceLog ("> enter rollback {}".format (self)) try: with self.cursor () as cur: cur.execute ("{fn teradata_rollback}") finally: if self.bTraceLog: traceLog ("< leave rollback {}".format (self)) # end rollback
(self)
54,093
teradatasql
TeradataCursor
null
class TeradataCursor: def __init__(self, con): self.description = None # Required by DBAPI 2.0 self.rowcount = -1 # Required by DBAPI 2.0 self.activitytype = None self.activityname = None self.arraysize = 1 # Required by DBAPI 2.0 self.rownumber = None # Optional by DBAPI 2.0 self.connection = con # Optional by DBAPI 2.0 self.uRowsHandle = None self.bClosed = False # end __init__ def callproc(self, sProcName, params=None): # Required by DBAPI 2.0 if self.connection.bTraceLog: traceLog ("> enter callproc {}".format (self)) try: sCall = "{call " + sProcName if params: sCall += " (" + ", ".join (["?"] * len (params)) + ")" sCall += "}" self.execute (sCall, params) finally: if self.connection.bTraceLog: traceLog ("< leave callproc {}".format (self)) # end callproc def close(self): # Required by DBAPI 2.0 if self.connection.bTraceLog: traceLog ("> enter close {}".format (self)) try: if not self.bClosed: self.bClosed = True self._closeRows () finally: if self.connection.bTraceLog: traceLog ("< leave close {}".format (self)) # end close def _stopIfClosed (self): if self.connection.bTraceLog: traceLog ("> enter _stopIfClosed {}".format (self)) try: if self.bClosed: raise ProgrammingError ("Cursor is closed") finally: if self.connection.bTraceLog: traceLog ("< leave _stopIfClosed {}".format (self)) # end _stopIfClosed def _closeRows (self): if self.connection.bTraceLog: traceLog ("> enter _closeRows {}".format (self)) try: if self.uRowsHandle: pcError = ctypes.POINTER (ctypes.c_char)() goside.goCloseRows (self.connection.uLog, self.uRowsHandle, ctypes.byref (pcError)) self.uRowsHandle = None if pcError: sErr = ctypes.string_at(pcError).decode('utf-8') goside.goFreePointer (self.connection.uLog, pcError) raise OperationalError(sErr) finally: if self.connection.bTraceLog: traceLog ("< leave _closeRows {}".format (self)) # end _closeRows def execute (self, sOperation, params = None, ignoreErrors = None): # Required by DBAPI 2.0 if self.connection.bTraceLog: traceLog ("> enter execute {}".format (self)) try: if params and type (params) not in [list, tuple]: raise TypeError ("params unexpected type {}".format (type (params))) if not params: self.executemany (sOperation, None, ignoreErrors) elif type (params [0]) in [list, tuple]: # Excerpt from PEP 249 DBAPI documentation: # The parameters may also be specified as list of tuples to e.g. insert multiple rows in a single # operation, but this kind of usage is deprecated: .executemany() should be used instead. self.executemany (sOperation, params, ignoreErrors) else: self.executemany (sOperation, [params, ], ignoreErrors) return self finally: if self.connection.bTraceLog: traceLog ("< leave execute {}".format (self)) # end execute def _obtainResultMetaData (self): if self.connection.bTraceLog: traceLog ("> enter _obtainResultMetaData {}".format (self)) try: pcError = ctypes.POINTER (ctypes.c_char) () uActivityCount = ctypes.c_uint64 () uActivityType = ctypes.c_uint16 () pcActivityName = ctypes.POINTER (ctypes.c_char) () pcColumnMetaData = ctypes.POINTER (ctypes.c_char) () goside.goResultMetaData (self.connection.uLog, self.uRowsHandle, ctypes.byref (pcError), ctypes.byref (uActivityCount), ctypes.byref (uActivityType), ctypes.byref (pcActivityName), None, ctypes.byref (pcColumnMetaData)) if pcError: sErr = ctypes.string_at (pcError).decode ('utf-8') goside.goFreePointer (self.connection.uLog, pcError) raise OperationalError (sErr) self.rowcount = uActivityCount.value self.activitytype = uActivityType.value if pcActivityName: self.activityname = ctypes.string_at (pcActivityName).decode ('utf-8') goside.goFreePointer (self.connection.uLog, pcActivityName) if pcColumnMetaData: self.description = [] i = 0 while pcColumnMetaData [i] != b'Z': # Z=terminator columnDesc = [] # (1) Column name i = _deserializeString (pcColumnMetaData, i, columnDesc) i = _deserializeString (pcColumnMetaData, i, None) # discard Type name # (2) Type code i = _deserializeString (pcColumnMetaData, i, columnDesc) if columnDesc [-1] == 'b': # typeCode b=bytes columnDesc [-1] = BINARY elif columnDesc [-1] == 'd': # typeCode d=double columnDesc [-1] = float elif columnDesc [-1] in ('i', 'l'): # typeCode i=integer (int32), l=long (int64) columnDesc [-1] = int elif columnDesc [-1] == 'm': # typeCode m=number columnDesc [-1] = decimal.Decimal elif columnDesc [-1] == 's': # typeCode s=string columnDesc [-1] = STRING elif columnDesc [-1] == 'u': # typeCode u=date columnDesc [-1] = datetime.date elif columnDesc [-1] in ('v', 'w'): # typeCode v=time, w=time with time zone columnDesc [-1] = datetime.time elif columnDesc [-1] in ('x', 'y'): # typeCode x=timestamp, y=timestamp with time zone columnDesc [-1] = datetime.datetime # (3) Display size columnDesc.append (None) # not provided # (4) Max byte count i = _deserializeLong (pcColumnMetaData, i, columnDesc) # (5) Precision i = _deserializeLong (pcColumnMetaData, i, columnDesc) # (6) Scale i = _deserializeLong (pcColumnMetaData, i, columnDesc) # (7) Nullable i = _deserializeBool (pcColumnMetaData, i, columnDesc) self.description.append (columnDesc) # end while goside.goFreePointer (self.connection.uLog, pcColumnMetaData) # end if pcColumnMetaData finally: if self.connection.bTraceLog: traceLog ("< leave _obtainResultMetaData {}".format (self)) # end _obtainResultMetaData def executemany (self, sOperation, seqOfParams, ignoreErrors = None): # Required by DBAPI 2.0 if self.connection.bTraceLog: traceLog ("> enter executemany {}".format (self)) try: self._stopIfClosed () self._closeRows () if ignoreErrors: if type (ignoreErrors) == int: ignoreErrors = [ignoreErrors] if type (ignoreErrors) not in [list, tuple]: raise TypeError ("ignoreErrors unexpected type {}".format (type (ignoreErrors))) for i in range (0, len (ignoreErrors)): if type (ignoreErrors [i]) != int: raise TypeError ("ignoreErrors[{}] unexpected type {}".format (i, type (ignoreErrors [i]))) setIgnoreErrorCodes = set (ignoreErrors) else: setIgnoreErrorCodes = set () # empty set dStartTime = time.time () with io.BytesIO (b'') as osBindValues: if seqOfParams: if type (seqOfParams) not in [list, tuple]: raise TypeError ("seqOfParams unexpected type {}".format (type (seqOfParams))) for i in range (0, len (seqOfParams)): aoRowValues = seqOfParams [i] if type (aoRowValues) not in [list, tuple]: raise TypeError ("seqOfParams[{}] unexpected type {}".format (i, type (aoRowValues))) if len (aoRowValues) == 0: raise ValueError ("seqOfParams[{}] is zero length".format (i)) for j in range (0, len (aoRowValues)): oValue = aoRowValues [j] if isinstance (oValue, str): aby = oValue.encode ("utf-8") osBindValues.write (b'S') osBindValues.write (struct.pack (">Q", len (aby))) osBindValues.write (aby) continue if isinstance (oValue, int): osBindValues.write (b'L') osBindValues.write (struct.pack (">q", oValue)) continue if oValue is None: osBindValues.write (b'N') continue if isinstance (oValue, float): osBindValues.write (b'D') osBindValues.write (struct.pack (">d", oValue)) continue if isinstance (oValue, decimal.Decimal): aby = "{:f}".format (oValue).encode ("utf-8") # avoid exponential notation osBindValues.write (b'M') osBindValues.write (struct.pack (">Q", len (aby))) osBindValues.write (aby) continue if isinstance (oValue, datetime.datetime): # check first because datetime is a subclass of date aby = oValue.isoformat (" ").encode ("utf-8") osBindValues.write (b'Y' if oValue.tzinfo else b'X') osBindValues.write (struct.pack (">Q", len (aby))) osBindValues.write (aby) continue if isinstance (oValue, datetime.date): aby = oValue.isoformat ().encode ("utf-8") osBindValues.write (b'U') osBindValues.write (struct.pack (">Q", len (aby))) osBindValues.write (aby) continue if isinstance (oValue, datetime.time): aby = oValue.isoformat ().encode ("utf-8") osBindValues.write (b'W' if oValue.tzinfo else b'V') osBindValues.write (struct.pack (">Q", len (aby))) osBindValues.write (aby) continue if isinstance (oValue, datetime.timedelta): aby = _formatTimedelta (oValue).encode ("utf-8") osBindValues.write (b'S') # serialized as string osBindValues.write (struct.pack (">Q", len (aby))) osBindValues.write (aby) continue if isinstance (oValue, bytes) or isinstance (oValue, bytearray): osBindValues.write (b'B') osBindValues.write (struct.pack (">Q", len (oValue))) osBindValues.write (oValue) continue raise TypeError ("seqOfParams[{}][{}] unexpected type {}".format (i, j, type (oValue))) # end for j osBindValues.write (b'Z') # end of row terminator # end for i # end if seqOfParams osBindValues.write (b'Z') # end of all rows terminator abyBindValues = osBindValues.getvalue () # end with osBindValues if self.connection.bTimingLog: timingLog ("executemany serialize bind values took {} ms and produced {} bytes".format ((time.time () - dStartTime) * 1000.0, len (abyBindValues))) dStartTime = time.time () pcError = ctypes.POINTER (ctypes.c_char) () uRowsHandle = ctypes.c_uint64 () goside.goCreateRows (self.connection.uLog, self.connection.uConnHandle, sOperation.encode ('utf-8'), len (abyBindValues), abyBindValues, ctypes.byref (pcError), ctypes.byref (uRowsHandle)) if pcError: sErr = ctypes.string_at (pcError).decode ('utf-8') goside.goFreePointer (self.connection.uLog, pcError) setErrorCodes = { int (s) for s in re.findall ("\\[Error (\\d+)\\]", sErr) } setIntersection = setErrorCodes & setIgnoreErrorCodes bIgnore = len (setIntersection) > 0 # ignore when intersection is non-empty if self.connection.bDebugLog: debugLog ("executemany bIgnore={} setIntersection={} setErrorCodes={} setIgnoreErrorCodes={}".format (bIgnore, setIntersection, setErrorCodes, setIgnoreErrorCodes)) if bIgnore: return raise OperationalError (sErr) if self.connection.bTimingLog: timingLog ("executemany call to goCreateRows took {} ms".format ((time.time () - dStartTime) * 1000.0)) self.uRowsHandle = uRowsHandle.value self._obtainResultMetaData () finally: if self.connection.bTraceLog: traceLog ("< leave executemany {}".format (self)) # end executemany def fetchone(self): # Required by DBAPI 2.0 try: return next(self) except StopIteration: return None # end fetchone def fetchmany(self, nDesiredRowCount=None): # Required by DBAPI 2.0 if self.connection.bTraceLog: traceLog ("> enter fetchmany {}".format (self)) try: if nDesiredRowCount is None: nDesiredRowCount = self.arraysize rows = [] nObservedRowCount = 0 for row in self: rows.append(row) nObservedRowCount += 1 if nObservedRowCount == nDesiredRowCount: break return rows finally: if self.connection.bTraceLog: traceLog ("< leave fetchmany {}".format (self)) # end fetchmany def fetchall(self): # Required by DBAPI 2.0 if self.connection.bTraceLog: traceLog ("> enter fetchall {}".format (self)) try: rows = [] for row in self: rows.append(row) return rows finally: if self.connection.bTraceLog: traceLog ("< leave fetchall {}".format (self)) # end fetchall def nextset(self): # Optional by DBAPI 2.0 if self.connection.bTraceLog: traceLog ("> enter nextset {}".format (self)) try: self._stopIfClosed () if self.uRowsHandle: pcError = ctypes.POINTER (ctypes.c_char)() cAvail = ctypes.c_char() goside.goNextResult (self.connection.uLog, self.uRowsHandle, ctypes.byref (pcError), ctypes.byref (cAvail)) if pcError: sErr = ctypes.string_at(pcError).decode('utf-8') goside.goFreePointer (self.connection.uLog, pcError) raise OperationalError(sErr) if cAvail.value == b'Y': self._obtainResultMetaData () else: self.description = None self.rowcount = -1 self.activitytype = None self.activityname = None return cAvail.value == b'Y' finally: if self.connection.bTraceLog: traceLog ("< leave nextset {}".format (self)) # end nextset def setinputsizes(self, sizes): # Required by DBAPI 2.0 self._stopIfClosed () def setoutputsize(self, size, column=None): # Required by DBAPI 2.0 self._stopIfClosed () def __iter__(self): # Implements iterable # Optional by DBAPI 2.0 return self def __next__(self): # Implements Python 3 iterator if self.connection.bTraceLog: traceLog ("> enter __next__ {}".format (self)) try: self._stopIfClosed () if self.uRowsHandle: pcError = ctypes.POINTER (ctypes.c_char)() nColumnValuesByteCount = ctypes.c_int32 () pcColumnValues = ctypes.POINTER (ctypes.c_char)() goside.goFetchRow (self.connection.uLog, self.uRowsHandle, ctypes.byref (pcError), ctypes.byref (nColumnValuesByteCount), ctypes.byref (pcColumnValues)) if pcError: sErr = ctypes.string_at (pcError).decode ('utf-8') goside.goFreePointer (self.connection.uLog, pcError) raise OperationalError (sErr) if pcColumnValues: if self.connection.bDebugLog and nColumnValuesByteCount: debugLog ("__next__ nColumnValuesByteCount={}\n{}".format (nColumnValuesByteCount.value, _hexDump (ctypes.string_at (pcColumnValues, nColumnValuesByteCount)))) row = [] i = 0 while pcColumnValues [i] != b'Z': # Z=terminator if pcColumnValues [i] == b'N': # N=null iNew = _deserializeNull (pcColumnValues, i, row) elif pcColumnValues [i] == b'B': # B=bytes iNew = _deserializeBytes (pcColumnValues, i, row) elif pcColumnValues [i] == b'D': # D=double iNew = _deserializeDouble (pcColumnValues, i, row) elif pcColumnValues [i] == b'I': # I=integer iNew = _deserializeInt (pcColumnValues, i, row) elif pcColumnValues [i] == b'L': # L=long iNew = _deserializeLong (pcColumnValues, i, row) elif pcColumnValues [i] == b'M': # M=number iNew = _deserializeNumber (pcColumnValues, i, row) elif pcColumnValues [i] == b'S': # S=string iNew = _deserializeString (pcColumnValues, i, row) elif pcColumnValues [i] == b'U': # U=date iNew = _deserializeDate (pcColumnValues, i, row) elif pcColumnValues [i] == b'V': # V=time iNew = _deserializeTime (pcColumnValues, i, row) elif pcColumnValues [i] == b'W': # W=time with time zone iNew = _deserializeTimeWithTimeZone (pcColumnValues, i, row) elif pcColumnValues [i] == b'X': # X=timestamp iNew = _deserializeTimestamp (pcColumnValues, i, row) elif pcColumnValues [i] == b'Y': # Y=timestamp with time zone iNew = _deserializeTimestampWithTimeZone (pcColumnValues, i, row) else: raise OperationalError ('Unrecognized column type {} at byte offset {}'.format (pcColumnValues [i], i)) if self.connection.bDebugLog: debugLog ("__next__ row[{}] typeCode={} type={} value={}".format (len (row) - 1, pcColumnValues [i], type (row [-1]), row [-1])) i = iNew # end while goside.goFreePointer (self.connection.uLog, pcColumnValues) return row # end if pcColumnValues # end if self.uRowsHandle raise StopIteration () finally: if self.connection.bTraceLog: traceLog ("< leave __next__ {}".format (self)) # end __next__ def next(self): # Implements Python 2 iterator # Optional by DBAPI 2.0 return self.__next__() def __enter__(self): # Implements with-statement context manager return self def __exit__(self, t, value, traceback): # Implements with-statement context manager if self.connection.bTraceLog: traceLog ("> enter __exit__ {}".format (self)) try: self.close() finally: if self.connection.bTraceLog: traceLog ("< leave __exit__ {}".format (self)) # end __exit__ def __repr__(self): # Equivalent to the toString method in Java or the String method in Go return "{} uRowsHandle={} bClosed={}".format (self.__class__.__name__, self.uRowsHandle, self.bClosed) # end class TeradataCursor
(con)
54,095
teradatasql
__exit__
null
def __exit__(self, t, value, traceback): # Implements with-statement context manager if self.connection.bTraceLog: traceLog ("> enter __exit__ {}".format (self)) try: self.close() finally: if self.connection.bTraceLog: traceLog ("< leave __exit__ {}".format (self)) # end __exit__
(self, t, value, traceback)
54,096
teradatasql
__init__
null
def __init__(self, con): self.description = None # Required by DBAPI 2.0 self.rowcount = -1 # Required by DBAPI 2.0 self.activitytype = None self.activityname = None self.arraysize = 1 # Required by DBAPI 2.0 self.rownumber = None # Optional by DBAPI 2.0 self.connection = con # Optional by DBAPI 2.0 self.uRowsHandle = None self.bClosed = False # end __init__
(self, con)
54,097
teradatasql
__iter__
null
def __iter__(self): # Implements iterable # Optional by DBAPI 2.0 return self
(self)
54,098
teradatasql
__next__
null
def __next__(self): # Implements Python 3 iterator if self.connection.bTraceLog: traceLog ("> enter __next__ {}".format (self)) try: self._stopIfClosed () if self.uRowsHandle: pcError = ctypes.POINTER (ctypes.c_char)() nColumnValuesByteCount = ctypes.c_int32 () pcColumnValues = ctypes.POINTER (ctypes.c_char)() goside.goFetchRow (self.connection.uLog, self.uRowsHandle, ctypes.byref (pcError), ctypes.byref (nColumnValuesByteCount), ctypes.byref (pcColumnValues)) if pcError: sErr = ctypes.string_at (pcError).decode ('utf-8') goside.goFreePointer (self.connection.uLog, pcError) raise OperationalError (sErr) if pcColumnValues: if self.connection.bDebugLog and nColumnValuesByteCount: debugLog ("__next__ nColumnValuesByteCount={}\n{}".format (nColumnValuesByteCount.value, _hexDump (ctypes.string_at (pcColumnValues, nColumnValuesByteCount)))) row = [] i = 0 while pcColumnValues [i] != b'Z': # Z=terminator if pcColumnValues [i] == b'N': # N=null iNew = _deserializeNull (pcColumnValues, i, row) elif pcColumnValues [i] == b'B': # B=bytes iNew = _deserializeBytes (pcColumnValues, i, row) elif pcColumnValues [i] == b'D': # D=double iNew = _deserializeDouble (pcColumnValues, i, row) elif pcColumnValues [i] == b'I': # I=integer iNew = _deserializeInt (pcColumnValues, i, row) elif pcColumnValues [i] == b'L': # L=long iNew = _deserializeLong (pcColumnValues, i, row) elif pcColumnValues [i] == b'M': # M=number iNew = _deserializeNumber (pcColumnValues, i, row) elif pcColumnValues [i] == b'S': # S=string iNew = _deserializeString (pcColumnValues, i, row) elif pcColumnValues [i] == b'U': # U=date iNew = _deserializeDate (pcColumnValues, i, row) elif pcColumnValues [i] == b'V': # V=time iNew = _deserializeTime (pcColumnValues, i, row) elif pcColumnValues [i] == b'W': # W=time with time zone iNew = _deserializeTimeWithTimeZone (pcColumnValues, i, row) elif pcColumnValues [i] == b'X': # X=timestamp iNew = _deserializeTimestamp (pcColumnValues, i, row) elif pcColumnValues [i] == b'Y': # Y=timestamp with time zone iNew = _deserializeTimestampWithTimeZone (pcColumnValues, i, row) else: raise OperationalError ('Unrecognized column type {} at byte offset {}'.format (pcColumnValues [i], i)) if self.connection.bDebugLog: debugLog ("__next__ row[{}] typeCode={} type={} value={}".format (len (row) - 1, pcColumnValues [i], type (row [-1]), row [-1])) i = iNew # end while goside.goFreePointer (self.connection.uLog, pcColumnValues) return row # end if pcColumnValues # end if self.uRowsHandle raise StopIteration () finally: if self.connection.bTraceLog: traceLog ("< leave __next__ {}".format (self)) # end __next__
(self)
54,099
teradatasql
__repr__
null
def __repr__(self): # Equivalent to the toString method in Java or the String method in Go return "{} uRowsHandle={} bClosed={}".format (self.__class__.__name__, self.uRowsHandle, self.bClosed)
(self)
54,100
teradatasql
_closeRows
null
def _closeRows (self): if self.connection.bTraceLog: traceLog ("> enter _closeRows {}".format (self)) try: if self.uRowsHandle: pcError = ctypes.POINTER (ctypes.c_char)() goside.goCloseRows (self.connection.uLog, self.uRowsHandle, ctypes.byref (pcError)) self.uRowsHandle = None if pcError: sErr = ctypes.string_at(pcError).decode('utf-8') goside.goFreePointer (self.connection.uLog, pcError) raise OperationalError(sErr) finally: if self.connection.bTraceLog: traceLog ("< leave _closeRows {}".format (self)) # end _closeRows
(self)
54,101
teradatasql
_obtainResultMetaData
null
def _obtainResultMetaData (self): if self.connection.bTraceLog: traceLog ("> enter _obtainResultMetaData {}".format (self)) try: pcError = ctypes.POINTER (ctypes.c_char) () uActivityCount = ctypes.c_uint64 () uActivityType = ctypes.c_uint16 () pcActivityName = ctypes.POINTER (ctypes.c_char) () pcColumnMetaData = ctypes.POINTER (ctypes.c_char) () goside.goResultMetaData (self.connection.uLog, self.uRowsHandle, ctypes.byref (pcError), ctypes.byref (uActivityCount), ctypes.byref (uActivityType), ctypes.byref (pcActivityName), None, ctypes.byref (pcColumnMetaData)) if pcError: sErr = ctypes.string_at (pcError).decode ('utf-8') goside.goFreePointer (self.connection.uLog, pcError) raise OperationalError (sErr) self.rowcount = uActivityCount.value self.activitytype = uActivityType.value if pcActivityName: self.activityname = ctypes.string_at (pcActivityName).decode ('utf-8') goside.goFreePointer (self.connection.uLog, pcActivityName) if pcColumnMetaData: self.description = [] i = 0 while pcColumnMetaData [i] != b'Z': # Z=terminator columnDesc = [] # (1) Column name i = _deserializeString (pcColumnMetaData, i, columnDesc) i = _deserializeString (pcColumnMetaData, i, None) # discard Type name # (2) Type code i = _deserializeString (pcColumnMetaData, i, columnDesc) if columnDesc [-1] == 'b': # typeCode b=bytes columnDesc [-1] = BINARY elif columnDesc [-1] == 'd': # typeCode d=double columnDesc [-1] = float elif columnDesc [-1] in ('i', 'l'): # typeCode i=integer (int32), l=long (int64) columnDesc [-1] = int elif columnDesc [-1] == 'm': # typeCode m=number columnDesc [-1] = decimal.Decimal elif columnDesc [-1] == 's': # typeCode s=string columnDesc [-1] = STRING elif columnDesc [-1] == 'u': # typeCode u=date columnDesc [-1] = datetime.date elif columnDesc [-1] in ('v', 'w'): # typeCode v=time, w=time with time zone columnDesc [-1] = datetime.time elif columnDesc [-1] in ('x', 'y'): # typeCode x=timestamp, y=timestamp with time zone columnDesc [-1] = datetime.datetime # (3) Display size columnDesc.append (None) # not provided # (4) Max byte count i = _deserializeLong (pcColumnMetaData, i, columnDesc) # (5) Precision i = _deserializeLong (pcColumnMetaData, i, columnDesc) # (6) Scale i = _deserializeLong (pcColumnMetaData, i, columnDesc) # (7) Nullable i = _deserializeBool (pcColumnMetaData, i, columnDesc) self.description.append (columnDesc) # end while goside.goFreePointer (self.connection.uLog, pcColumnMetaData) # end if pcColumnMetaData finally: if self.connection.bTraceLog: traceLog ("< leave _obtainResultMetaData {}".format (self)) # end _obtainResultMetaData
(self)
54,102
teradatasql
_stopIfClosed
null
def _stopIfClosed (self): if self.connection.bTraceLog: traceLog ("> enter _stopIfClosed {}".format (self)) try: if self.bClosed: raise ProgrammingError ("Cursor is closed") finally: if self.connection.bTraceLog: traceLog ("< leave _stopIfClosed {}".format (self)) # end _stopIfClosed
(self)
54,103
teradatasql
callproc
null
def callproc(self, sProcName, params=None): # Required by DBAPI 2.0 if self.connection.bTraceLog: traceLog ("> enter callproc {}".format (self)) try: sCall = "{call " + sProcName if params: sCall += " (" + ", ".join (["?"] * len (params)) + ")" sCall += "}" self.execute (sCall, params) finally: if self.connection.bTraceLog: traceLog ("< leave callproc {}".format (self)) # end callproc
(self, sProcName, params=None)
54,104
teradatasql
close
null
def close(self): # Required by DBAPI 2.0 if self.connection.bTraceLog: traceLog ("> enter close {}".format (self)) try: if not self.bClosed: self.bClosed = True self._closeRows () finally: if self.connection.bTraceLog: traceLog ("< leave close {}".format (self)) # end close
(self)
54,105
teradatasql
execute
null
def execute (self, sOperation, params = None, ignoreErrors = None): # Required by DBAPI 2.0 if self.connection.bTraceLog: traceLog ("> enter execute {}".format (self)) try: if params and type (params) not in [list, tuple]: raise TypeError ("params unexpected type {}".format (type (params))) if not params: self.executemany (sOperation, None, ignoreErrors) elif type (params [0]) in [list, tuple]: # Excerpt from PEP 249 DBAPI documentation: # The parameters may also be specified as list of tuples to e.g. insert multiple rows in a single # operation, but this kind of usage is deprecated: .executemany() should be used instead. self.executemany (sOperation, params, ignoreErrors) else: self.executemany (sOperation, [params, ], ignoreErrors) return self finally: if self.connection.bTraceLog: traceLog ("< leave execute {}".format (self)) # end execute
(self, sOperation, params=None, ignoreErrors=None)
54,106
teradatasql
executemany
null
def executemany (self, sOperation, seqOfParams, ignoreErrors = None): # Required by DBAPI 2.0 if self.connection.bTraceLog: traceLog ("> enter executemany {}".format (self)) try: self._stopIfClosed () self._closeRows () if ignoreErrors: if type (ignoreErrors) == int: ignoreErrors = [ignoreErrors] if type (ignoreErrors) not in [list, tuple]: raise TypeError ("ignoreErrors unexpected type {}".format (type (ignoreErrors))) for i in range (0, len (ignoreErrors)): if type (ignoreErrors [i]) != int: raise TypeError ("ignoreErrors[{}] unexpected type {}".format (i, type (ignoreErrors [i]))) setIgnoreErrorCodes = set (ignoreErrors) else: setIgnoreErrorCodes = set () # empty set dStartTime = time.time () with io.BytesIO (b'') as osBindValues: if seqOfParams: if type (seqOfParams) not in [list, tuple]: raise TypeError ("seqOfParams unexpected type {}".format (type (seqOfParams))) for i in range (0, len (seqOfParams)): aoRowValues = seqOfParams [i] if type (aoRowValues) not in [list, tuple]: raise TypeError ("seqOfParams[{}] unexpected type {}".format (i, type (aoRowValues))) if len (aoRowValues) == 0: raise ValueError ("seqOfParams[{}] is zero length".format (i)) for j in range (0, len (aoRowValues)): oValue = aoRowValues [j] if isinstance (oValue, str): aby = oValue.encode ("utf-8") osBindValues.write (b'S') osBindValues.write (struct.pack (">Q", len (aby))) osBindValues.write (aby) continue if isinstance (oValue, int): osBindValues.write (b'L') osBindValues.write (struct.pack (">q", oValue)) continue if oValue is None: osBindValues.write (b'N') continue if isinstance (oValue, float): osBindValues.write (b'D') osBindValues.write (struct.pack (">d", oValue)) continue if isinstance (oValue, decimal.Decimal): aby = "{:f}".format (oValue).encode ("utf-8") # avoid exponential notation osBindValues.write (b'M') osBindValues.write (struct.pack (">Q", len (aby))) osBindValues.write (aby) continue if isinstance (oValue, datetime.datetime): # check first because datetime is a subclass of date aby = oValue.isoformat (" ").encode ("utf-8") osBindValues.write (b'Y' if oValue.tzinfo else b'X') osBindValues.write (struct.pack (">Q", len (aby))) osBindValues.write (aby) continue if isinstance (oValue, datetime.date): aby = oValue.isoformat ().encode ("utf-8") osBindValues.write (b'U') osBindValues.write (struct.pack (">Q", len (aby))) osBindValues.write (aby) continue if isinstance (oValue, datetime.time): aby = oValue.isoformat ().encode ("utf-8") osBindValues.write (b'W' if oValue.tzinfo else b'V') osBindValues.write (struct.pack (">Q", len (aby))) osBindValues.write (aby) continue if isinstance (oValue, datetime.timedelta): aby = _formatTimedelta (oValue).encode ("utf-8") osBindValues.write (b'S') # serialized as string osBindValues.write (struct.pack (">Q", len (aby))) osBindValues.write (aby) continue if isinstance (oValue, bytes) or isinstance (oValue, bytearray): osBindValues.write (b'B') osBindValues.write (struct.pack (">Q", len (oValue))) osBindValues.write (oValue) continue raise TypeError ("seqOfParams[{}][{}] unexpected type {}".format (i, j, type (oValue))) # end for j osBindValues.write (b'Z') # end of row terminator # end for i # end if seqOfParams osBindValues.write (b'Z') # end of all rows terminator abyBindValues = osBindValues.getvalue () # end with osBindValues if self.connection.bTimingLog: timingLog ("executemany serialize bind values took {} ms and produced {} bytes".format ((time.time () - dStartTime) * 1000.0, len (abyBindValues))) dStartTime = time.time () pcError = ctypes.POINTER (ctypes.c_char) () uRowsHandle = ctypes.c_uint64 () goside.goCreateRows (self.connection.uLog, self.connection.uConnHandle, sOperation.encode ('utf-8'), len (abyBindValues), abyBindValues, ctypes.byref (pcError), ctypes.byref (uRowsHandle)) if pcError: sErr = ctypes.string_at (pcError).decode ('utf-8') goside.goFreePointer (self.connection.uLog, pcError) setErrorCodes = { int (s) for s in re.findall ("\\[Error (\\d+)\\]", sErr) } setIntersection = setErrorCodes & setIgnoreErrorCodes bIgnore = len (setIntersection) > 0 # ignore when intersection is non-empty if self.connection.bDebugLog: debugLog ("executemany bIgnore={} setIntersection={} setErrorCodes={} setIgnoreErrorCodes={}".format (bIgnore, setIntersection, setErrorCodes, setIgnoreErrorCodes)) if bIgnore: return raise OperationalError (sErr) if self.connection.bTimingLog: timingLog ("executemany call to goCreateRows took {} ms".format ((time.time () - dStartTime) * 1000.0)) self.uRowsHandle = uRowsHandle.value self._obtainResultMetaData () finally: if self.connection.bTraceLog: traceLog ("< leave executemany {}".format (self)) # end executemany
(self, sOperation, seqOfParams, ignoreErrors=None)
54,107
teradatasql
fetchall
null
def fetchall(self): # Required by DBAPI 2.0 if self.connection.bTraceLog: traceLog ("> enter fetchall {}".format (self)) try: rows = [] for row in self: rows.append(row) return rows finally: if self.connection.bTraceLog: traceLog ("< leave fetchall {}".format (self)) # end fetchall
(self)
54,108
teradatasql
fetchmany
null
def fetchmany(self, nDesiredRowCount=None): # Required by DBAPI 2.0 if self.connection.bTraceLog: traceLog ("> enter fetchmany {}".format (self)) try: if nDesiredRowCount is None: nDesiredRowCount = self.arraysize rows = [] nObservedRowCount = 0 for row in self: rows.append(row) nObservedRowCount += 1 if nObservedRowCount == nDesiredRowCount: break return rows finally: if self.connection.bTraceLog: traceLog ("< leave fetchmany {}".format (self)) # end fetchmany
(self, nDesiredRowCount=None)
54,109
teradatasql
fetchone
null
def fetchone(self): # Required by DBAPI 2.0 try: return next(self) except StopIteration: return None # end fetchone
(self)
54,110
teradatasql
next
null
def next(self): # Implements Python 2 iterator # Optional by DBAPI 2.0 return self.__next__()
(self)
54,111
teradatasql
nextset
null
def nextset(self): # Optional by DBAPI 2.0 if self.connection.bTraceLog: traceLog ("> enter nextset {}".format (self)) try: self._stopIfClosed () if self.uRowsHandle: pcError = ctypes.POINTER (ctypes.c_char)() cAvail = ctypes.c_char() goside.goNextResult (self.connection.uLog, self.uRowsHandle, ctypes.byref (pcError), ctypes.byref (cAvail)) if pcError: sErr = ctypes.string_at(pcError).decode('utf-8') goside.goFreePointer (self.connection.uLog, pcError) raise OperationalError(sErr) if cAvail.value == b'Y': self._obtainResultMetaData () else: self.description = None self.rowcount = -1 self.activitytype = None self.activityname = None return cAvail.value == b'Y' finally: if self.connection.bTraceLog: traceLog ("< leave nextset {}".format (self)) # end nextset
(self)
54,112
teradatasql
setinputsizes
null
def setinputsizes(self, sizes): # Required by DBAPI 2.0 self._stopIfClosed ()
(self, sizes)
54,113
teradatasql
setoutputsize
null
def setoutputsize(self, size, column=None): # Required by DBAPI 2.0 self._stopIfClosed ()
(self, size, column=None)
54,115
teradatasql
TimeFromTicks
null
def TimeFromTicks (x): # Required by DBAPI 2.0 return datetime.datetime.fromtimestamp (x).time ()
(x)
54,117
teradatasql
Warning
null
class Warning(Exception): # Required by DBAPI 2.0 pass
null
54,118
teradatasql
_deserializeBool
null
def _deserializeBool (pc, i, row): if pc [i] in (b'T', b'F'): # T=true, F=false if row is not None: row.append (pc [i] == b'T') return i + 1 elif pc [i] == b'N': # N=null return _deserializeNull (pc, i, row) else: raise OperationalError ('Expected column type T/F/N but got {} at byte offset {}'.format (pc [i], i)) # end _deserializeBool
(pc, i, row)
54,119
teradatasql
_deserializeBytes
null
def _deserializeBytes (pc, i, row): if pc [i] == b'B': # B=bytes i += 1 uByteCount = struct.unpack (">Q", pc [i : i + 8]) [0] # uint64 i += 8 abyValue = pc [i : i + uByteCount] i += uByteCount if row is not None: row.append (abyValue) return i elif pc [i] == b'N': # N=null return _deserializeNull (pc, i, row) else: raise OperationalError ('Expected column type B/N but got {} at byte offset {}'.format (pc [i], i)) # end _deserializeBytes
(pc, i, row)
54,120
teradatasql
_deserializeCharacterValue
null
def _deserializeCharacterValue (abyTypeCode, pc, i, row): if pc [i] == abyTypeCode: i += 1 uByteCount = struct.unpack (">Q", pc [i : i + 8]) [0] # uint64 i += 8 sValue = pc [i : i + uByteCount].decode ('utf-8') i += uByteCount if row is not None: # Accommodate optional fractional seconds for V=time, W=time with time zone, X=timestamp, Y=timestamp with time zone sFormatSuffix = '.%f' if abyTypeCode in (b'V', b'W', b'X', b'Y') and '.' in sValue else '' if abyTypeCode in (b'W', b'Y'): # W=time with time zone, Y=timestamp with time zone sValue = sValue [ : -3] + sValue [-2 : ] # remove colon from time zone value for compatibility with strptime sFormatSuffix += '%z' if abyTypeCode == b'U': # U=date row.append (datetime.datetime.strptime (sValue, '%Y-%m-%d').date ()) elif abyTypeCode in (b'V', b'W'): # V=time, W=time with time zone row.append (datetime.datetime.strptime (sValue, '%H:%M:%S' + sFormatSuffix).timetz ()) elif abyTypeCode in (b'X', b'Y'): # X=timestamp, Y=timestamp with time zone row.append (datetime.datetime.strptime (sValue, '%Y-%m-%d %H:%M:%S' + sFormatSuffix)) elif abyTypeCode == b'M': # M=number row.append (decimal.Decimal (sValue)) else: # S=string row.append (sValue) # end if row is not None return i elif pc [i] == b'N': # N=null return _deserializeNull (pc, i, row) else: raise OperationalError ('Expected column type {}/N but got {} at byte offset {}'.format (abyTypeCode, pc [i], i)) # end _deserializeCharacterValue
(abyTypeCode, pc, i, row)
54,121
teradatasql
_deserializeDate
null
def _deserializeDate (pc, i, row): return _deserializeCharacterValue (b'U', pc, i, row)
(pc, i, row)
54,122
teradatasql
_deserializeDouble
null
def _deserializeDouble (pc, i, row): if pc [i] == b'D': # D=double i += 1 dValue = struct.unpack (">d", pc [i : i + 8]) [0] # float64 i += 8 if row is not None: row.append (dValue) return i elif pc [i] == b'N': # N=null return _deserializeNull (pc, i, row) else: raise OperationalError ('Expected column type D/N but got {} at byte offset {}'.format (pc [i], i)) # end _deserializeDouble
(pc, i, row)
54,123
teradatasql
_deserializeInt
null
def _deserializeInt (pc, i, row): if pc [i] == b'I': # I=integer i += 1 nValue = struct.unpack (">i", pc [i : i + 4]) [0] # int32 i += 4 if row is not None: row.append (nValue) return i elif pc [i] == b'N': # N=null return _deserializeNull (pc, i, row) else: raise OperationalError ('Expected column type I/N but got {} at byte offset {}'.format (pc [i], i)) # end _deserializeInt
(pc, i, row)
54,124
teradatasql
_deserializeLong
null
def _deserializeLong (pc, i, row): if pc [i] == b'L': # L=long i += 1 nValue = struct.unpack (">q", pc [i : i + 8]) [0] # int64 i += 8 if row is not None: row.append (nValue) return i elif pc [i] == b'N': # N=null return _deserializeNull (pc, i, row) else: raise OperationalError ('Expected column type L/N but got {} at byte offset {}'.format (pc [i], i)) # end _deserializeLong
(pc, i, row)
54,125
teradatasql
_deserializeNull
null
def _deserializeNull (pc, i, row): if pc [i] == b'N': # N=null if row is not None: row.append (None) return i + 1 else: raise OperationalError ('Expected column type N but got {} at byte offset {}'.format (pc [i], i)) # end _deserializeNull
(pc, i, row)
54,126
teradatasql
_deserializeNumber
null
def _deserializeNumber (pc, i, row): return _deserializeCharacterValue (b'M', pc, i, row)
(pc, i, row)
54,127
teradatasql
_deserializeString
null
def _deserializeString (pc, i, row): return _deserializeCharacterValue (b'S', pc, i, row)
(pc, i, row)
54,128
teradatasql
_deserializeTime
null
def _deserializeTime (pc, i, row): return _deserializeCharacterValue (b'V', pc, i, row)
(pc, i, row)
54,129
teradatasql
_deserializeTimeWithTimeZone
null
def _deserializeTimeWithTimeZone (pc, i, row): return _deserializeCharacterValue (b'W', pc, i, row)
(pc, i, row)
54,130
teradatasql
_deserializeTimestamp
null
def _deserializeTimestamp (pc, i, row): return _deserializeCharacterValue (b'X', pc, i, row)
(pc, i, row)
54,131
teradatasql
_deserializeTimestampWithTimeZone
null
def _deserializeTimestampWithTimeZone (pc, i, row): return _deserializeCharacterValue (b'Y', pc, i, row)
(pc, i, row)
54,132
teradatasql
_formatTimedelta
null
def _formatTimedelta (tdelta): # Output format matches VARCHAR values accepted by the Teradata Database for implicit conversion to INTERVAL DAY TO SECOND. # positive: 1234 12:34:56.123456 # negative: -1234 12:34:56.123456 nMM, nSS = divmod (tdelta.seconds, 60) nHH, nMM = divmod (nMM, 60) # Prepend a space character for a positive days value. return '{: d} {:02d}:{:02d}:{:02d}.{:06d}'.format (tdelta.days, nHH, nMM, nSS, tdelta.microseconds) # end _formatTimedelta
(tdelta)
54,133
teradatasql
_hexDump
null
def _hexDump (aby): asLines = [] for iOffset in range (0, len (aby), 16): abySegment = aby [iOffset : min (iOffset + 16, len (aby))] sHexDigits = binascii.hexlify (abySegment).decode ('ascii') asHexDigits = [ sHexDigits [i : i + 2] for i in range (0, len (sHexDigits), 2) ] sSpacedHexDigits = " ".join (asHexDigits) abyPrintable = b'' for i in range (0, len (abySegment)): if abySegment [i] in range (32, 126): # printable chars are 32 space through 126 ~ tilde abyPrintable += abySegment [i : i + 1] else: abyPrintable += b'.' sPrintable = abyPrintable.decode ('ascii') asLines += [ "{:08x} {:<47} |{}|".format (iOffset, sSpacedHexDigits, sPrintable) ] return "\n".join (asLines) # end _hexDump
(aby)
54,134
teradatasql
_serializeBool
null
def _serializeBool (b): return b'T' if b else b'F'
(b)
54,135
teradatasql
_serializeBytes
null
def _serializeBytes (aby): return b'B' + struct.pack (">Q", len (aby)) + aby
(aby)
54,136
teradatasql
_serializeCharacterValue
null
def _serializeCharacterValue (abyTypeCode, s): aby = s.encode ('utf-8') return abyTypeCode + struct.pack (">Q", len (aby)) + aby
(abyTypeCode, s)
54,137
teradatasql
_serializeDate
null
def _serializeDate (da): return _serializeCharacterValue (b'U', da.isoformat ())
(da)
54,138
teradatasql
_serializeDouble
null
def _serializeDouble (d): return b'D' + struct.pack (">d", d)
(d)
54,139
teradatasql
_serializeInt
null
def _serializeInt (n): return b'I' + struct.pack (">i", n)
(n)
54,140
teradatasql
_serializeLong
null
def _serializeLong (n): return b'L' + struct.pack (">q", n)
(n)
54,141
teradatasql
_serializeNull
null
def _serializeNull (): return b'N'
()
54,142
teradatasql
_serializeNumber
null
def _serializeNumber (dec): return _serializeCharacterValue (b'M', '{:f}'.format (dec)) # avoid exponential notation
(dec)
54,143
teradatasql
_serializeString
null
def _serializeString (s): return _serializeCharacterValue (b'S', s)
(s)
54,144
teradatasql
_serializeTime
null
def _serializeTime (ti): return _serializeCharacterValue (b'W' if ti.tzinfo else b'V', ti.isoformat ())
(ti)
54,145
teradatasql
_serializeTimestamp
null
def _serializeTimestamp (ts): return _serializeCharacterValue (b'Y' if ts.tzinfo else b'X', ts.isoformat (' '))
(ts)
54,159
teradatasql
debugLog
null
def debugLog (s): logMsg ("DEBUG", s)
(s)
54,163
teradatasql
logMsg
null
def logMsg (sCategory, s): print ("{:.23} [{}] PYDBAPI-{} {}".format (datetime.datetime.now ().strftime ("%Y-%m-%d.%H:%M:%S.%f"), threading.current_thread ().name, sCategory, s)) sys.stdout.flush ()
(sCategory, s)
54,166
teradatasql
prototype
null
def prototype(rtype, func, *args): func.restype = rtype func.argtypes = args
(rtype, func, *args)
54,172
teradatasql
timingLog
null
def timingLog (s): logMsg ("TIMING", s)
(s)
54,173
teradatasql
traceLog
null
def traceLog (s): logMsg ("TRACE", s)
(s)
54,176
nbconvert.exporters.asciidoc
ASCIIDocExporter
Exports to an ASCIIDoc document (.asciidoc)
class ASCIIDocExporter(TemplateExporter): """ Exports to an ASCIIDoc document (.asciidoc) """ @default("file_extension") def _file_extension_default(self): return ".asciidoc" @default("template_name") def _template_name_default(self): return "asciidoc" output_mimetype = "text/asciidoc" export_from_notebook = "AsciiDoc" @default("raw_mimetypes") def _raw_mimetypes_default(self): return ["text/asciidoc/", "text/markdown", "text/html", ""] @property def default_config(self): c = Config( { "NbConvertBase": { "display_data_priority": [ "text/html", "text/markdown", "image/svg+xml", "image/png", "image/jpeg", "text/plain", "text/latex", ] }, "ExtractOutputPreprocessor": {"enabled": True}, "HighlightMagicsPreprocessor": {"enabled": True}, } ) if super().default_config: c2 = super().default_config.copy() c2.merge(c) c = c2 return c
(*args: 't.Any', **kwargs: 't.Any') -> 't.Any'
54,178
nbconvert.exporters.templateexporter
__init__
Public constructor Parameters ---------- config : config User configuration instance. extra_loaders : list[of Jinja Loaders] ordered list of Jinja loader to find templates. Will be tried in order before the default FileSystem ones. template_file : str (optional, kw arg) Template to use when exporting.
def __init__(self, config=None, **kw): """ Public constructor Parameters ---------- config : config User configuration instance. extra_loaders : list[of Jinja Loaders] ordered list of Jinja loader to find templates. Will be tried in order before the default FileSystem ones. template_file : str (optional, kw arg) Template to use when exporting. """ super().__init__(config=config, **kw) self.observe( self._invalidate_environment_cache, list(self.traits(affects_environment=True)) ) self.observe(self._invalidate_template_cache, list(self.traits(affects_template=True)))
(self, config=None, **kw)
54,182
nbconvert.exporters.templateexporter
_create_environment
Create the Jinja templating environment.
def _create_environment(self): """ Create the Jinja templating environment. """ paths = self.template_paths self.log.debug("Template paths:\n\t%s", "\n\t".join(paths)) loaders = [ *self.extra_loaders, ExtensionTolerantLoader(FileSystemLoader(paths), self.template_extension), DictLoader({self._raw_template_key: self.raw_template}), ] environment = Environment( # noqa: S701 loader=ChoiceLoader(loaders), extensions=JINJA_EXTENSIONS, enable_async=self.enable_async, ) environment.globals["uuid4"] = uuid.uuid4 # Add default filters to the Jinja2 environment for key, value in self.default_filters(): self._register_filter(environment, key, value) # Load user filters. Overwrite existing filters if need be. if self.filters: for key, user_filter in self.filters.items(): self._register_filter(environment, key, user_filter) return environment
(self)
54,184
nbconvert.exporters.templateexporter
_get_conf
null
def _get_conf(self): conf: dict[str, t.Any] = {} # the configuration once all conf files are merged for path in map(Path, self.template_paths): conf_path = path / "conf.json" if conf_path.exists(): with conf_path.open() as f: conf = recursive_update(conf, json.load(f)) return conf
(self)
54,187
nbconvert.exporters.templateexporter
_init_preprocessors
null
def _init_preprocessors(self): super()._init_preprocessors() conf = self._get_conf() preprocessors = conf.get("preprocessors", {}) # preprocessors is a dict for three reasons # * We rely on recursive_update, which can only merge dicts, lists will be overwritten # * We can use the key with numerical prefixing to guarantee ordering (/etc/*.d/XY-file style) # * We can disable preprocessors by overwriting the value with None for _, preprocessor in sorted(preprocessors.items(), key=lambda x: x[0]): if preprocessor is not None: kwargs = preprocessor.copy() preprocessor_cls = kwargs.pop("type") preprocessor_cls = import_item(preprocessor_cls) if preprocessor_cls.__name__ in self.config: kwargs.update(self.config[preprocessor_cls.__name__]) preprocessor = preprocessor_cls(**kwargs) # noqa: PLW2901 self.register_preprocessor(preprocessor)
(self)
54,188
nbconvert.exporters.templateexporter
_init_resources
null
def _init_resources(self, resources): resources = super()._init_resources(resources) resources["deprecated"] = deprecated return resources
(self, resources)
54,189
nbconvert.exporters.templateexporter
_invalidate_environment_cache
null
def _invalidate_environment_cache(self, change=None): self._environment_cached = None self._invalidate_template_cache()
(self, change=None)
54,190
nbconvert.exporters.templateexporter
_invalidate_template_cache
null
def _invalidate_template_cache(self, change=None): self._template_cached = None
(self, change=None)
54,192
nbconvert.exporters.templateexporter
_load_template
Load the Jinja template object from the template file This is triggered by various trait changes that would change the template.
def _load_template(self): """Load the Jinja template object from the template file This is triggered by various trait changes that would change the template. """ # this gives precedence to a raw_template if present with self.hold_trait_notifications(): if self.template_file and (self.template_file != self._raw_template_key): self._last_template_file = self.template_file if self.raw_template: self.template_file = self._raw_template_key if not self.template_file: msg = "No template_file specified!" raise ValueError(msg) # First try to load the # template by name with extension added, then try loading the template # as if the name is explicitly specified. template_file = self.template_file self.log.debug("Attempting to load template %s", template_file) self.log.debug(" template_paths: %s", os.pathsep.join(self.template_paths)) return self.environment.get_template(template_file)
(self)
54,195
nbconvert.exporters.exporter
_preprocess
Preprocess the notebook before passing it into the Jinja engine. To preprocess the notebook is to successively apply all the enabled preprocessors. Output from each preprocessor is passed along to the next one. Parameters ---------- nb : notebook node notebook that is being exported. resources : a dict of additional resources that can be accessed read/write by preprocessors
def _preprocess(self, nb, resources): """ Preprocess the notebook before passing it into the Jinja engine. To preprocess the notebook is to successively apply all the enabled preprocessors. Output from each preprocessor is passed along to the next one. Parameters ---------- nb : notebook node notebook that is being exported. resources : a dict of additional resources that can be accessed read/write by preprocessors """ # Do a copy.deepcopy first, # we are never safe enough with what the preprocessors could do. nbc = copy.deepcopy(nb) resc = copy.deepcopy(resources) if hasattr(validator, "normalize"): _, nbc = validator.normalize(nbc) # Run each preprocessor on the notebook. Carry the output along # to each preprocessor for preprocessor in self._preprocessors: nbc, resc = preprocessor(nbc, resc) if not self.optimistic_validation: self._validate_preprocessor(nbc, preprocessor) if self.optimistic_validation: self._validate_preprocessor(nbc, preprocessor) return nbc, resc
(self, nb, resources)
54,196
nbconvert.exporters.templateexporter
_register_filter
Register a filter. A filter is a function that accepts and acts on one string. The filters are accessible within the Jinja templating engine. Parameters ---------- name : str name to give the filter in the Jinja engine filter : filter
def _register_filter(self, environ, name, jinja_filter): """ Register a filter. A filter is a function that accepts and acts on one string. The filters are accessible within the Jinja templating engine. Parameters ---------- name : str name to give the filter in the Jinja engine filter : filter """ if jinja_filter is None: msg = "filter" raise TypeError(msg) isclass = isinstance(jinja_filter, type) constructed = not isclass # Handle filter's registration based on it's type if constructed and isinstance(jinja_filter, (str,)): # filter is a string, import the namespace and recursively call # this register_filter method filter_cls = import_item(jinja_filter) return self._register_filter(environ, name, filter_cls) if constructed and callable(jinja_filter): # filter is a function, no need to construct it. environ.filters[name] = jinja_filter return jinja_filter if isclass and issubclass(jinja_filter, HasTraits): # filter is configurable. Make sure to pass in new default for # the enabled flag if one was specified. filter_instance = jinja_filter(parent=self) self._register_filter(environ, name, filter_instance) return None if isclass: # filter is not configurable, construct it filter_instance = jinja_filter() self._register_filter(environ, name, filter_instance) return None # filter is an instance of something without a __call__ # attribute. msg = "filter" raise TypeError(msg)
(self, environ, name, jinja_filter)
54,199
nbconvert.exporters.exporter
_validate_preprocessor
null
def _validate_preprocessor(self, nbc, preprocessor): try: nbformat.validate(nbc, relax_add_props=True) except nbformat.ValidationError: self.log.error("Notebook is invalid after preprocessor %s", preprocessor) raise
(self, nbc, preprocessor)
54,201
nbconvert.exporters.templateexporter
default_filters
Override in subclasses to provide extra filters. This should return an iterable of 2-tuples: (name, class-or-function). You should call the method on the parent class and include the filters it provides. If a name is repeated, the last filter provided wins. Filters from user-supplied config win over filters provided by classes.
def default_filters(self): """Override in subclasses to provide extra filters. This should return an iterable of 2-tuples: (name, class-or-function). You should call the method on the parent class and include the filters it provides. If a name is repeated, the last filter provided wins. Filters from user-supplied config win over filters provided by classes. """ return default_filters.items()
(self)
54,202
nbconvert.exporters.templateexporter
from_file
Convert a notebook from a file.
def from_file( # type:ignore[override] self, file_stream: t.Any, resources: dict[str, t.Any] | None = None, **kw: t.Any ) -> tuple[str, dict[str, t.Any]]: """Convert a notebook from a file.""" return super().from_file(file_stream, resources, **kw) # type:ignore[return-value]
(self, file_stream: Any, resources: Optional[dict[str, Any]] = None, **kw: Any) -> tuple[str, dict[str, typing.Any]]
54,203
nbconvert.exporters.templateexporter
from_filename
Convert a notebook from a filename.
def from_filename( # type:ignore[override] self, filename: str, resources: dict[str, t.Any] | None = None, **kw: t.Any ) -> tuple[str, dict[str, t.Any]]: """Convert a notebook from a filename.""" return super().from_filename(filename, resources, **kw) # type:ignore[return-value]
(self, filename: str, resources: Optional[dict[str, Any]] = None, **kw: Any) -> tuple[str, dict[str, typing.Any]]
54,204
nbconvert.exporters.templateexporter
from_notebook_node
Convert a notebook from a notebook node instance. Parameters ---------- nb : :class:`~nbformat.NotebookNode` Notebook node resources : dict Additional resources that can be accessed read/write by preprocessors and filters.
def from_notebook_node( # type:ignore[explicit-override, override] self, nb: NotebookNode, resources: dict[str, t.Any] | None = None, **kw: t.Any ) -> tuple[str, dict[str, t.Any]]: """ Convert a notebook from a notebook node instance. Parameters ---------- nb : :class:`~nbformat.NotebookNode` Notebook node resources : dict Additional resources that can be accessed read/write by preprocessors and filters. """ nb_copy, resources = super().from_notebook_node(nb, resources, **kw) resources.setdefault("raw_mimetypes", self.raw_mimetypes) resources["global_content_filter"] = { "include_code": not self.exclude_code_cell, "include_markdown": not self.exclude_markdown, "include_raw": not self.exclude_raw, "include_unknown": not self.exclude_unknown, "include_input": not self.exclude_input, "include_output": not self.exclude_output, "include_output_stdin": not self.exclude_output_stdin, "include_input_prompt": not self.exclude_input_prompt, "include_output_prompt": not self.exclude_output_prompt, "no_prompt": self.exclude_input_prompt and self.exclude_output_prompt, } # Top level variables are passed to the template_exporter here. output = self.template.render(nb=nb_copy, resources=resources) output = output.lstrip("\r\n") return output, resources
(self, nb: nbformat.notebooknode.NotebookNode, resources: Optional[dict[str, Any]] = None, **kw: Any) -> tuple[str, dict[str, typing.Any]]
54,205
nbconvert.exporters.templateexporter
get_prefix_root_dirs
Get the prefix root dirs.
def get_prefix_root_dirs(self): """Get the prefix root dirs.""" # We look at the usual jupyter locations, and for development purposes also # relative to the package directory (first entry, meaning with highest precedence) root_dirs = [] if DEV_MODE: root_dirs.append(os.path.abspath(os.path.join(ROOT, "..", "..", "share", "jupyter"))) root_dirs.extend(jupyter_path()) return root_dirs
(self)
54,206
nbconvert.exporters.templateexporter
get_template_names
Finds a list of template names where each successive template name is the base template
def get_template_names(self): """Finds a list of template names where each successive template name is the base template""" template_names = [] root_dirs = self.get_prefix_root_dirs() base_template: str | None = self.template_name merged_conf: dict[str, t.Any] = {} # the configuration once all conf files are merged while base_template is not None: template_names.append(base_template) conf: dict[str, t.Any] = {} found_at_least_one = False for base_dir in self.extra_template_basedirs: template_dir = os.path.join(base_dir, base_template) if os.path.exists(template_dir): found_at_least_one = True conf_file = os.path.join(template_dir, "conf.json") if os.path.exists(conf_file): with open(conf_file) as f: conf = recursive_update(json.load(f), conf) for root_dir in root_dirs: template_dir = os.path.join(root_dir, "nbconvert", "templates", base_template) if os.path.exists(template_dir): found_at_least_one = True conf_file = os.path.join(template_dir, "conf.json") if os.path.exists(conf_file): with open(conf_file) as f: conf = recursive_update(json.load(f), conf) if not found_at_least_one: # Check for backwards compatibility template names for root_dir in root_dirs: compatibility_file = base_template + ".tpl" compatibility_path = os.path.join( root_dir, "nbconvert", "templates", "compatibility", compatibility_file ) if os.path.exists(compatibility_path): found_at_least_one = True warnings.warn( f"5.x template name passed '{self.template_name}'. Use 'lab' or 'classic' for new template usage.", DeprecationWarning, stacklevel=2, ) self.template_file = compatibility_file conf = self.get_compatibility_base_template_conf(base_template) self.template_name = t.cast(str, conf.get("base_template")) break if not found_at_least_one: paths = "\n\t".join(root_dirs) msg = f"No template sub-directory with name {base_template!r} found in the following paths:\n\t{paths}" raise ValueError(msg) merged_conf = recursive_update(dict(conf), merged_conf) base_template = t.cast(t.Any, conf.get("base_template")) conf = merged_conf mimetypes = [mimetype for mimetype, enabled in conf.get("mimetypes", {}).items() if enabled] if self.output_mimetype and self.output_mimetype not in mimetypes and mimetypes: supported_mimetypes = "\n\t".join(mimetypes) msg = f"Unsupported mimetype {self.output_mimetype!r} for template {self.template_name!r}, mimetypes supported are: \n\t{supported_mimetypes}" raise ValueError(msg) return template_names
(self)
54,212
nbconvert.exporters.templateexporter
register_filter
Register a filter. A filter is a function that accepts and acts on one string. The filters are accessible within the Jinja templating engine. Parameters ---------- name : str name to give the filter in the Jinja engine filter : filter
def register_filter(self, name, jinja_filter): """ Register a filter. A filter is a function that accepts and acts on one string. The filters are accessible within the Jinja templating engine. Parameters ---------- name : str name to give the filter in the Jinja engine filter : filter """ return self._register_filter(self.environment, name, jinja_filter)
(self, name, jinja_filter)
54,213
nbconvert.exporters.exporter
register_preprocessor
Register a preprocessor. Preprocessors are classes that act upon the notebook before it is passed into the Jinja templating engine. Preprocessors are also capable of passing additional information to the Jinja templating engine. Parameters ---------- preprocessor : `nbconvert.preprocessors.Preprocessor` A dotted module name, a type, or an instance enabled : bool Mark the preprocessor as enabled
def register_preprocessor(self, preprocessor, enabled=False): """ Register a preprocessor. Preprocessors are classes that act upon the notebook before it is passed into the Jinja templating engine. Preprocessors are also capable of passing additional information to the Jinja templating engine. Parameters ---------- preprocessor : `nbconvert.preprocessors.Preprocessor` A dotted module name, a type, or an instance enabled : bool Mark the preprocessor as enabled """ if preprocessor is None: msg = "preprocessor must not be None" raise TypeError(msg) isclass = isinstance(preprocessor, type) constructed = not isclass # Handle preprocessor's registration based on it's type if constructed and isinstance( preprocessor, str, ): # Preprocessor is a string, import the namespace and recursively call # this register_preprocessor method preprocessor_cls = import_item(preprocessor) return self.register_preprocessor(preprocessor_cls, enabled) if constructed and callable(preprocessor): # Preprocessor is a function, no need to construct it. # Register and return the preprocessor. if enabled: preprocessor.enabled = True self._preprocessors.append(preprocessor) return preprocessor if isclass and issubclass(preprocessor, HasTraits): # Preprocessor is configurable. Make sure to pass in new default for # the enabled flag if one was specified. self.register_preprocessor(preprocessor(parent=self), enabled) return None if isclass: # Preprocessor is not configurable, construct it self.register_preprocessor(preprocessor(), enabled) return None # Preprocessor is an instance of something without a __call__ # attribute. raise TypeError( "preprocessor must be callable or an importable constructor, got %r" % preprocessor )
(self, preprocessor, enabled=False)
54,225
nbconvert.exporters.exporter
Exporter
Class containing methods that sequentially run a list of preprocessors on a NotebookNode object and then return the modified NotebookNode object and accompanying resources dict.
class Exporter(LoggingConfigurable): """ Class containing methods that sequentially run a list of preprocessors on a NotebookNode object and then return the modified NotebookNode object and accompanying resources dict. """ enabled = Bool(True, help="Disable this exporter (and any exporters inherited from it).").tag( config=True ) file_extension = FilenameExtension( help="Extension of the file that should be written to disk" ).tag(config=True) optimistic_validation = Bool( False, help="Reduces the number of validation steps so that it only occurs after all preprocesors have run.", ).tag(config=True) # MIME type of the result file, for HTTP response headers. # This is *not* a traitlet, because we want to be able to access it from # the class, not just on instances. output_mimetype = "" # Should this converter be accessible from the notebook front-end? # If so, should be a friendly name to display (and possibly translated). export_from_notebook: str = None # type:ignore[assignment] # Configurability, allows the user to easily add filters and preprocessors. preprocessors: List[t.Any] = List( help="""List of preprocessors, by name or namespace, to enable.""" ).tag(config=True) _preprocessors: List[t.Any] = List() default_preprocessors: List[t.Any] = List( [ "nbconvert.preprocessors.TagRemovePreprocessor", "nbconvert.preprocessors.RegexRemovePreprocessor", "nbconvert.preprocessors.ClearOutputPreprocessor", "nbconvert.preprocessors.CoalesceStreamsPreprocessor", "nbconvert.preprocessors.ExecutePreprocessor", "nbconvert.preprocessors.SVG2PDFPreprocessor", "nbconvert.preprocessors.LatexPreprocessor", "nbconvert.preprocessors.HighlightMagicsPreprocessor", "nbconvert.preprocessors.ExtractOutputPreprocessor", "nbconvert.preprocessors.ExtractAttachmentsPreprocessor", "nbconvert.preprocessors.ClearMetadataPreprocessor", ], help="""List of preprocessors available by default, by name, namespace, instance, or type.""", ).tag(config=True) def __init__(self, config=None, **kw): """ Public constructor Parameters ---------- config : ``traitlets.config.Config`` User configuration instance. `**kw` Additional keyword arguments passed to parent __init__ """ with_default_config = self.default_config if config: with_default_config.merge(config) super().__init__(config=with_default_config, **kw) self._init_preprocessors() self._nb_metadata = {} @property def default_config(self): return Config() def from_notebook_node( self, nb: NotebookNode, resources: t.Any | None = None, **kw: t.Any ) -> tuple[NotebookNode, dict[str, t.Any]]: """ Convert a notebook from a notebook node instance. Parameters ---------- nb : :class:`~nbformat.NotebookNode` Notebook node (dict-like with attr-access) resources : dict Additional resources that can be accessed read/write by preprocessors and filters. `**kw` Ignored """ nb_copy = copy.deepcopy(nb) resources = self._init_resources(resources) if "language" in nb["metadata"]: resources["language"] = nb["metadata"]["language"].lower() # Preprocess nb_copy, resources = self._preprocess(nb_copy, resources) notebook_name = "" if resources is not None: name = resources.get("metadata", {}).get("name", "") path = resources.get("metadata", {}).get("path", "") notebook_name = os.path.join(path, name) self._nb_metadata[notebook_name] = nb_copy.metadata return nb_copy, resources def from_filename( self, filename: str, resources: dict[str, t.Any] | None = None, **kw: t.Any ) -> tuple[NotebookNode, dict[str, t.Any]]: """ Convert a notebook from a notebook file. Parameters ---------- filename : str Full filename of the notebook file to open and convert. resources : dict Additional resources that can be accessed read/write by preprocessors and filters. `**kw` Ignored """ # Pull the metadata from the filesystem. if resources is None: resources = ResourcesDict() if "metadata" not in resources or resources["metadata"] == "": resources["metadata"] = ResourcesDict() path, basename = os.path.split(filename) notebook_name = os.path.splitext(basename)[0] resources["metadata"]["name"] = notebook_name resources["metadata"]["path"] = path modified_date = datetime.datetime.fromtimestamp( os.path.getmtime(filename), tz=datetime.timezone.utc ) # datetime.strftime date format for ipython if sys.platform == "win32": date_format = "%B %d, %Y" else: date_format = "%B %-d, %Y" resources["metadata"]["modified_date"] = modified_date.strftime(date_format) with open(filename, encoding="utf-8") as f: return self.from_file(f, resources=resources, **kw) def from_file( self, file_stream: t.Any, resources: dict[str, t.Any] | None = None, **kw: t.Any ) -> tuple[NotebookNode, dict[str, t.Any]]: """ Convert a notebook from a notebook file. Parameters ---------- file_stream : file-like object Notebook file-like object to convert. resources : dict Additional resources that can be accessed read/write by preprocessors and filters. `**kw` Ignored """ return self.from_notebook_node( nbformat.read(file_stream, as_version=4), resources=resources, **kw ) def register_preprocessor(self, preprocessor, enabled=False): """ Register a preprocessor. Preprocessors are classes that act upon the notebook before it is passed into the Jinja templating engine. Preprocessors are also capable of passing additional information to the Jinja templating engine. Parameters ---------- preprocessor : `nbconvert.preprocessors.Preprocessor` A dotted module name, a type, or an instance enabled : bool Mark the preprocessor as enabled """ if preprocessor is None: msg = "preprocessor must not be None" raise TypeError(msg) isclass = isinstance(preprocessor, type) constructed = not isclass # Handle preprocessor's registration based on it's type if constructed and isinstance( preprocessor, str, ): # Preprocessor is a string, import the namespace and recursively call # this register_preprocessor method preprocessor_cls = import_item(preprocessor) return self.register_preprocessor(preprocessor_cls, enabled) if constructed and callable(preprocessor): # Preprocessor is a function, no need to construct it. # Register and return the preprocessor. if enabled: preprocessor.enabled = True self._preprocessors.append(preprocessor) return preprocessor if isclass and issubclass(preprocessor, HasTraits): # Preprocessor is configurable. Make sure to pass in new default for # the enabled flag if one was specified. self.register_preprocessor(preprocessor(parent=self), enabled) return None if isclass: # Preprocessor is not configurable, construct it self.register_preprocessor(preprocessor(), enabled) return None # Preprocessor is an instance of something without a __call__ # attribute. raise TypeError( "preprocessor must be callable or an importable constructor, got %r" % preprocessor ) def _init_preprocessors(self): """ Register all of the preprocessors needed for this exporter, disabled unless specified explicitly. """ self._preprocessors = [] # Load default preprocessors (not necessarily enabled by default). for preprocessor in self.default_preprocessors: self.register_preprocessor(preprocessor) # Load user-specified preprocessors. Enable by default. for preprocessor in self.preprocessors: self.register_preprocessor(preprocessor, enabled=True) def _init_resources(self, resources): # Make sure the resources dict is of ResourcesDict type. if resources is None: resources = ResourcesDict() if not isinstance(resources, ResourcesDict): new_resources = ResourcesDict() new_resources.update(resources) resources = new_resources # Make sure the metadata extension exists in resources if "metadata" in resources: if not isinstance(resources["metadata"], ResourcesDict): new_metadata = ResourcesDict() new_metadata.update(resources["metadata"]) resources["metadata"] = new_metadata else: resources["metadata"] = ResourcesDict() if not resources["metadata"]["name"]: resources["metadata"]["name"] = "Notebook" # Set the output extension resources["output_extension"] = self.file_extension return resources def _validate_preprocessor(self, nbc, preprocessor): try: nbformat.validate(nbc, relax_add_props=True) except nbformat.ValidationError: self.log.error("Notebook is invalid after preprocessor %s", preprocessor) raise def _preprocess(self, nb, resources): """ Preprocess the notebook before passing it into the Jinja engine. To preprocess the notebook is to successively apply all the enabled preprocessors. Output from each preprocessor is passed along to the next one. Parameters ---------- nb : notebook node notebook that is being exported. resources : a dict of additional resources that can be accessed read/write by preprocessors """ # Do a copy.deepcopy first, # we are never safe enough with what the preprocessors could do. nbc = copy.deepcopy(nb) resc = copy.deepcopy(resources) if hasattr(validator, "normalize"): _, nbc = validator.normalize(nbc) # Run each preprocessor on the notebook. Carry the output along # to each preprocessor for preprocessor in self._preprocessors: nbc, resc = preprocessor(nbc, resc) if not self.optimistic_validation: self._validate_preprocessor(nbc, preprocessor) if self.optimistic_validation: self._validate_preprocessor(nbc, preprocessor) return nbc, resc
(config=None, **kw)
54,227
nbconvert.exporters.exporter
__init__
Public constructor Parameters ---------- config : ``traitlets.config.Config`` User configuration instance. `**kw` Additional keyword arguments passed to parent __init__
def __init__(self, config=None, **kw): """ Public constructor Parameters ---------- config : ``traitlets.config.Config`` User configuration instance. `**kw` Additional keyword arguments passed to parent __init__ """ with_default_config = self.default_config if config: with_default_config.merge(config) super().__init__(config=with_default_config, **kw) self._init_preprocessors() self._nb_metadata = {}
(self, config=None, **kw)
54,234
nbconvert.exporters.exporter
_init_preprocessors
Register all of the preprocessors needed for this exporter, disabled unless specified explicitly.
def _init_preprocessors(self): """ Register all of the preprocessors needed for this exporter, disabled unless specified explicitly. """ self._preprocessors = [] # Load default preprocessors (not necessarily enabled by default). for preprocessor in self.default_preprocessors: self.register_preprocessor(preprocessor) # Load user-specified preprocessors. Enable by default. for preprocessor in self.preprocessors: self.register_preprocessor(preprocessor, enabled=True)
(self)
54,235
nbconvert.exporters.exporter
_init_resources
null
def _init_resources(self, resources): # Make sure the resources dict is of ResourcesDict type. if resources is None: resources = ResourcesDict() if not isinstance(resources, ResourcesDict): new_resources = ResourcesDict() new_resources.update(resources) resources = new_resources # Make sure the metadata extension exists in resources if "metadata" in resources: if not isinstance(resources["metadata"], ResourcesDict): new_metadata = ResourcesDict() new_metadata.update(resources["metadata"]) resources["metadata"] = new_metadata else: resources["metadata"] = ResourcesDict() if not resources["metadata"]["name"]: resources["metadata"]["name"] = "Notebook" # Set the output extension resources["output_extension"] = self.file_extension return resources
(self, resources)
54,244
nbconvert.exporters.exporter
from_file
Convert a notebook from a notebook file. Parameters ---------- file_stream : file-like object Notebook file-like object to convert. resources : dict Additional resources that can be accessed read/write by preprocessors and filters. `**kw` Ignored
def from_file( self, file_stream: t.Any, resources: dict[str, t.Any] | None = None, **kw: t.Any ) -> tuple[NotebookNode, dict[str, t.Any]]: """ Convert a notebook from a notebook file. Parameters ---------- file_stream : file-like object Notebook file-like object to convert. resources : dict Additional resources that can be accessed read/write by preprocessors and filters. `**kw` Ignored """ return self.from_notebook_node( nbformat.read(file_stream, as_version=4), resources=resources, **kw )
(self, file_stream: Any, resources: Optional[dict[str, Any]] = None, **kw: Any) -> tuple[nbformat.notebooknode.NotebookNode, dict[str, typing.Any]]
54,245
nbconvert.exporters.exporter
from_filename
Convert a notebook from a notebook file. Parameters ---------- filename : str Full filename of the notebook file to open and convert. resources : dict Additional resources that can be accessed read/write by preprocessors and filters. `**kw` Ignored
def from_filename( self, filename: str, resources: dict[str, t.Any] | None = None, **kw: t.Any ) -> tuple[NotebookNode, dict[str, t.Any]]: """ Convert a notebook from a notebook file. Parameters ---------- filename : str Full filename of the notebook file to open and convert. resources : dict Additional resources that can be accessed read/write by preprocessors and filters. `**kw` Ignored """ # Pull the metadata from the filesystem. if resources is None: resources = ResourcesDict() if "metadata" not in resources or resources["metadata"] == "": resources["metadata"] = ResourcesDict() path, basename = os.path.split(filename) notebook_name = os.path.splitext(basename)[0] resources["metadata"]["name"] = notebook_name resources["metadata"]["path"] = path modified_date = datetime.datetime.fromtimestamp( os.path.getmtime(filename), tz=datetime.timezone.utc ) # datetime.strftime date format for ipython if sys.platform == "win32": date_format = "%B %d, %Y" else: date_format = "%B %-d, %Y" resources["metadata"]["modified_date"] = modified_date.strftime(date_format) with open(filename, encoding="utf-8") as f: return self.from_file(f, resources=resources, **kw)
(self, filename: str, resources: Optional[dict[str, Any]] = None, **kw: Any) -> tuple[nbformat.notebooknode.NotebookNode, dict[str, typing.Any]]