code
string | signature
string | docstring
string | loss_without_docstring
float64 | loss_with_docstring
float64 | factor
float64 |
---|---|---|---|---|---|
file_path = self._path_with_dir_fd(file_path, self.open, dir_fd)
if mode is None:
if self.filesystem.is_windows_fs:
mode = 0o666
else:
mode = 0o777 & ~self._umask()
open_modes = _OpenModes(
must_exist=not flags & os.O_CREAT,
can_read=not flags & os.O_WRONLY,
can_write=flags & (os.O_RDWR | os.O_WRONLY),
truncate=flags & os.O_TRUNC,
append=flags & os.O_APPEND,
must_not_exist=flags & os.O_EXCL
)
if open_modes.must_not_exist and open_modes.must_exist:
raise NotImplementedError(
'O_EXCL without O_CREAT mode is not supported')
if (not self.filesystem.is_windows_fs and
self.filesystem.exists(file_path)):
# handle opening directory - only allowed under Posix
# with read-only mode
obj = self.filesystem.resolve(file_path)
if isinstance(obj, FakeDirectory):
if ((not open_modes.must_exist and
not self.filesystem.is_macos)
or open_modes.can_write):
self.filesystem.raise_os_error(errno.EISDIR, file_path)
dir_wrapper = FakeDirWrapper(obj, file_path, self.filesystem)
file_des = self.filesystem._add_open_file(dir_wrapper)
dir_wrapper.filedes = file_des
return file_des
# low level open is always binary
str_flags = 'b'
delete_on_close = False
if hasattr(os, 'O_TEMPORARY'):
delete_on_close = flags & os.O_TEMPORARY == os.O_TEMPORARY
fake_file = FakeFileOpen(
self.filesystem, delete_on_close=delete_on_close, raw_io=True)(
file_path, str_flags, open_modes=open_modes)
if fake_file.file_object != self.filesystem.dev_null:
self.chmod(file_path, mode)
return fake_file.fileno() | def open(self, file_path, flags, mode=None, dir_fd=None) | Return the file descriptor for a FakeFile.
Args:
file_path: the path to the file
flags: low-level bits to indicate io operation
mode: bits to define default permissions
Note: only basic modes are supported, OS-specific modes are
ignored
dir_fd: If not `None`, the file descriptor of a directory,
with `file_path` being relative to this directory.
New in Python 3.3.
Returns:
A file descriptor.
Raises:
IOError: if the path cannot be found
ValueError: if invalid mode is given
NotImplementedError: if `os.O_EXCL` is used without `os.O_CREAT` | 3.663715 | 3.605502 | 1.016146 |
file_handle = self.filesystem.get_open_file(file_des)
file_handle.close() | def close(self, file_des) | Close a file descriptor.
Args:
file_des: An integer file descriptor for the file object requested.
Raises:
OSError: bad file descriptor.
TypeError: if file descriptor is not an integer. | 4.667246 | 7.731921 | 0.603633 |
file_handle = self.filesystem.get_open_file(file_des)
file_handle.raw_io = True
return file_handle.read(num_bytes) | def read(self, file_des, num_bytes) | Read number of bytes from a file descriptor, returns bytes read.
Args:
file_des: An integer file descriptor for the file object requested.
num_bytes: Number of bytes to read from file.
Returns:
Bytes read from file.
Raises:
OSError: bad file descriptor.
TypeError: if file descriptor is not an integer. | 5.063514 | 7.356327 | 0.688321 |
file_handle = self.filesystem.get_open_file(file_des)
if isinstance(file_handle, FakeDirWrapper):
self.filesystem.raise_os_error(errno.EBADF, file_handle.file_path)
if isinstance(file_handle, FakePipeWrapper):
return file_handle.write(contents)
file_handle.raw_io = True
file_handle._sync_io()
file_handle.update_flush_pos()
file_handle.write(contents)
file_handle.flush()
return len(contents) | def write(self, file_des, contents) | Write string to file descriptor, returns number of bytes written.
Args:
file_des: An integer file descriptor for the file object requested.
contents: String of bytes to write to file.
Returns:
Number of bytes written.
Raises:
OSError: bad file descriptor.
TypeError: if file descriptor is not an integer. | 4.19074 | 4.492935 | 0.93274 |
# stat should return the tuple representing return value of os.stat
file_object = self.filesystem.get_open_file(file_des).get_object()
return file_object.stat_result.copy() | def fstat(self, file_des) | Return the os.stat-like tuple for the FakeFile object of file_des.
Args:
file_des: The file descriptor of filesystem object to retrieve.
Returns:
The FakeStatResult object corresponding to entry_path.
Raises:
OSError: if the filesystem object doesn't exist. | 9.250723 | 9.839747 | 0.940138 |
if not is_int_type(new_mask):
raise TypeError('an integer is required')
old_umask = self.filesystem.umask
self.filesystem.umask = new_mask
return old_umask | def umask(self, new_mask) | Change the current umask.
Args:
new_mask: (int) The new umask value.
Returns:
The old umask.
Raises:
TypeError: if new_mask is of an invalid type. | 3.654445 | 4.209515 | 0.868139 |
target_directory = self.filesystem.resolve_path(
target_directory, allow_fd=True)
self.filesystem.confirmdir(target_directory)
directory = self.filesystem.resolve(target_directory)
# A full implementation would check permissions all the way
# up the tree.
if not is_root() and not directory.st_mode | PERM_EXE:
self.filesystem.raise_os_error(errno.EACCES, directory)
self.filesystem.cwd = target_directory | def chdir(self, target_directory) | Change current working directory to target directory.
Args:
target_directory: The path to new current working directory.
Raises:
OSError: if user lacks permission to enter the argument directory
or if the target is not a directory. | 6.718758 | 7.321107 | 0.917724 |
path = self._path_with_dir_fd(path, self.readlink, dir_fd)
return self.filesystem.readlink(path) | def readlink(self, path, dir_fd=None) | Read the target of a symlink.
Args:
path: Symlink to read the target of.
dir_fd: If not `None`, the file descriptor of a directory,
with `path` being relative to this directory.
New in Python 3.3.
Returns:
the string representing the path to which the symbolic link points.
Raises:
TypeError: if `path` is None
OSError: (with errno=ENOENT) if path is not a valid path, or
(with errno=EINVAL) if path is valid, but is not a symlink | 6.07905 | 9.417669 | 0.645494 |
if follow_symlinks is None:
follow_symlinks = True
elif sys.version_info < (3, 3):
raise TypeError(
"stat() got an unexpected keyword argument 'follow_symlinks'")
entry_path = self._path_with_dir_fd(entry_path, self.stat, dir_fd)
return self.filesystem.stat(entry_path, follow_symlinks) | def stat(self, entry_path, dir_fd=None, follow_symlinks=None) | Return the os.stat-like tuple for the FakeFile object of entry_path.
Args:
entry_path: path to filesystem object to retrieve.
dir_fd: (int) If not `None`, the file descriptor of a directory,
with `entry_path` being relative to this directory.
New in Python 3.3.
follow_symlinks: (bool) If `False` and `entry_path` points to a
symlink, the link itself is changed instead of the linked
object.
New in Python 3.3.
Returns:
The FakeStatResult object corresponding to entry_path.
Raises:
OSError: if the filesystem object doesn't exist. | 3.004302 | 3.52161 | 0.853105 |
# stat should return the tuple representing return value of os.stat
entry_path = self._path_with_dir_fd(entry_path, self.lstat, dir_fd)
return self.filesystem.stat(entry_path, follow_symlinks=False) | def lstat(self, entry_path, dir_fd=None) | Return the os.stat-like tuple for entry_path, not following symlinks.
Args:
entry_path: path to filesystem object to retrieve.
dir_fd: If not `None`, the file descriptor of a directory, with
`entry_path` being relative to this directory.
New in Python 3.3.
Returns:
the FakeStatResult object corresponding to `entry_path`.
Raises:
OSError: if the filesystem object doesn't exist. | 6.306667 | 8.252961 | 0.76417 |
path = self._path_with_dir_fd(path, self.remove, dir_fd)
self.filesystem.remove(path) | def remove(self, path, dir_fd=None) | Remove the FakeFile object at the specified file path.
Args:
path: Path to file to be removed.
dir_fd: If not `None`, the file descriptor of a directory,
with `path` being relative to this directory.
New in Python 3.3.
Raises:
OSError: if path points to a directory.
OSError: if path does not exist.
OSError: if removal failed. | 6.763379 | 10.008952 | 0.675733 |
old_file_path = self._path_with_dir_fd(
old_file_path, self.rename, dir_fd)
self.filesystem.rename(old_file_path, new_file_path) | def rename(self, old_file_path, new_file_path, dir_fd=None) | Rename a FakeFile object at old_file_path to new_file_path,
preserving all properties.
Also replaces existing new_file_path object, if one existed
(Unix only).
Args:
old_file_path: Path to filesystem object to rename.
new_file_path: Path to where the filesystem object will live
after this call.
dir_fd: If not `None`, the file descriptor of a directory,
with `old_file_path` being relative to this directory.
New in Python 3.3.
Raises:
OSError: if old_file_path does not exist.
OSError: if new_file_path is an existing directory.
OSError: if new_file_path is an existing file (Windows only)
OSError: if new_file_path is an existing file and could not
be removed (Unix)
OSError: if `dirname(new_file)` does not exist
OSError: if the file would be moved to another filesystem
(e.g. mount point) | 4.021221 | 5.078963 | 0.791741 |
target_directory = self._path_with_dir_fd(
target_directory, self.rmdir, dir_fd)
self.filesystem.rmdir(target_directory) | def rmdir(self, target_directory, dir_fd=None) | Remove a leaf Fake directory.
Args:
target_directory: (str) Name of directory to remove.
dir_fd: If not `None`, the file descriptor of a directory,
with `target_directory` being relative to this directory.
New in Python 3.3.
Raises:
OSError: if target_directory does not exist or is not a directory,
or as per FakeFilesystem.remove_object. Cannot remove '.'. | 4.514528 | 8.402496 | 0.537284 |
target_directory = self.filesystem.absnormpath(target_directory)
directory = self.filesystem.confirmdir(target_directory)
if directory.contents:
self.filesystem.raise_os_error(
errno.ENOTEMPTY, self.path.basename(target_directory))
else:
self.rmdir(target_directory)
head, tail = self.path.split(target_directory)
if not tail:
head, tail = self.path.split(head)
while head and tail:
head_dir = self.filesystem.confirmdir(head)
if head_dir.contents:
break
# only the top-level dir may not be a symlink
self.filesystem.rmdir(head, allow_symlink=True)
head, tail = self.path.split(head) | def removedirs(self, target_directory) | Remove a leaf fake directory and all empty intermediate ones.
Args:
target_directory: the directory to be removed.
Raises:
OSError: if target_directory does not exist or is not a directory.
OSError: if target_directory is not empty. | 3.308984 | 3.587685 | 0.922317 |
dir_name = self._path_with_dir_fd(dir_name, self.mkdir, dir_fd)
try:
self.filesystem.makedir(dir_name, mode)
except IOError as e:
if e.errno == errno.EACCES:
self.filesystem.raise_os_error(e.errno, dir_name)
raise | def mkdir(self, dir_name, mode=PERM_DEF, dir_fd=None) | Create a leaf Fake directory.
Args:
dir_name: (str) Name of directory to create.
Relative paths are assumed to be relative to '/'.
mode: (int) Mode to create directory with. This argument defaults
to 0o777. The umask is applied to this mode.
dir_fd: If not `None`, the file descriptor of a directory,
with `dir_name` being relative to this directory.
New in Python 3.3.
Raises:
OSError: if the directory name is invalid or parent directory is
read only or as per FakeFilesystem.add_object. | 3.335253 | 4.359632 | 0.765031 |
if exist_ok is None:
exist_ok = False
elif sys.version_info < (3, 2):
raise TypeError("makedir() got an unexpected "
"keyword argument 'exist_ok'")
self.filesystem.makedirs(dir_name, mode, exist_ok) | def makedirs(self, dir_name, mode=PERM_DEF, exist_ok=None) | Create a leaf Fake directory + create any non-existent parent dirs.
Args:
dir_name: (str) Name of directory to create.
mode: (int) Mode to create directory (and any necessary parent
directories) with. This argument defaults to 0o777.
The umask is applied to this mode.
exist_ok: (boolean) If exist_ok is False (the default), an OSError
is raised if the target directory already exists.
New in Python 3.2.
Raises:
OSError: if the directory already exists and exist_ok=False, or as
per :py:meth:`FakeFilesystem.create_dir`. | 3.190747 | 3.657912 | 0.872286 |
if dir_fd is not None:
if sys.version_info < (3, 3):
raise TypeError("%s() got an unexpected keyword "
"argument 'dir_fd'" % fct.__name__)
# check if fd is supported for the built-in real function
real_fct = getattr(os, fct.__name__)
if real_fct not in self.supports_dir_fd:
raise NotImplementedError(
'dir_fd unavailable on this platform')
if isinstance(path, int):
raise ValueError("%s: Can't specify dir_fd without "
"matching path" % fct.__name__)
if not self.path.isabs(path):
return self.path.join(
self.filesystem.get_open_file(
dir_fd).get_object().path, path)
return path | def _path_with_dir_fd(self, path, fct, dir_fd) | Return the path considering dir_fd. Raise on nmvalid parameters. | 4.44441 | 4.226499 | 1.051558 |
if follow_symlinks is not None and sys.version_info < (3, 3):
raise TypeError("access() got an unexpected "
"keyword argument 'follow_symlinks'")
path = self._path_with_dir_fd(path, self.access, dir_fd)
try:
stat_result = self.stat(path, follow_symlinks=follow_symlinks)
except OSError as os_error:
if os_error.errno == errno.ENOENT:
return False
raise
if is_root():
mode &= ~os.W_OK
return (mode & ((stat_result.st_mode >> 6) & 7)) == mode | def access(self, path, mode, dir_fd=None, follow_symlinks=None) | Check if a file exists and has the specified permissions.
Args:
path: (str) Path to the file.
mode: (int) Permissions represented as a bitwise-OR combination of
os.F_OK, os.R_OK, os.W_OK, and os.X_OK.
dir_fd: If not `None`, the file descriptor of a directory, with
`path` being relative to this directory.
New in Python 3.3.
follow_symlinks: (bool) If `False` and `path` points to a symlink,
the link itself is queried instead of the linked object.
New in Python 3.3.
Returns:
bool, `True` if file is accessible, `False` otherwise. | 3.212349 | 3.188976 | 1.007329 |
if follow_symlinks is None:
follow_symlinks = True
elif sys.version_info < (3, 3):
raise TypeError(
"chmod() got an unexpected keyword argument 'follow_symlinks'")
path = self._path_with_dir_fd(path, self.chmod, dir_fd)
self.filesystem.chmod(path, mode, follow_symlinks) | def chmod(self, path, mode, dir_fd=None, follow_symlinks=None) | Change the permissions of a file as encoded in integer mode.
Args:
path: (str) Path to the file.
mode: (int) Permissions.
dir_fd: If not `None`, the file descriptor of a directory, with
`path` being relative to this directory.
New in Python 3.3.
follow_symlinks: (bool) If `False` and `path` points to a symlink,
the link itself is queried instead of the linked object.
New in Python 3.3. | 3.294095 | 3.548901 | 0.928201 |
if self.filesystem.is_windows_fs:
raise (NameError, "name 'lchmod' is not defined")
self.filesystem.chmod(path, mode, follow_symlinks=False) | def lchmod(self, path, mode) | Change the permissions of a file as encoded in integer mode.
If the file is a link, the permissions of the link are changed.
Args:
path: (str) Path to the file.
mode: (int) Permissions. | 5.319731 | 6.652598 | 0.799647 |
if follow_symlinks is None:
follow_symlinks = True
elif sys.version_info < (3, 3):
raise TypeError(
"utime() got an unexpected keyword argument 'follow_symlinks'")
path = self._path_with_dir_fd(path, self.utime, dir_fd)
if ns is not None and sys.version_info < (3, 3):
raise TypeError("utime() got an unexpected keyword argument 'ns'")
self.filesystem.utime(path, times, ns, follow_symlinks) | def utime(self, path, times=None, ns=None,
dir_fd=None, follow_symlinks=None) | Change the access and modified times of a file.
Args:
path: (str) Path to the file.
times: 2-tuple of int or float numbers, of the form (atime, mtime)
which is used to set the access and modified times in seconds.
If None, both times are set to the current time.
ns: 2-tuple of int numbers, of the form (atime, mtime) which is
used to set the access and modified times in nanoseconds.
If None, both times are set to the current time.
New in Python 3.3.
dir_fd: If not `None`, the file descriptor of a directory,
with `path` being relative to this directory.
New in Python 3.3.
follow_symlinks: (bool) If `False` and `path` points to a symlink,
the link itself is queried instead of the linked object.
New in Python 3.3.
Raises:
TypeError: If anything other than the expected types is
specified in the passed `times` or `ns` tuple,
or if the tuple length is not equal to 2.
ValueError: If both times and ns are specified. | 2.729196 | 2.996614 | 0.91076 |
if follow_symlinks is None:
follow_symlinks = True
elif sys.version_info < (3, 3):
raise TypeError(
"chown() got an unexpected keyword argument 'follow_symlinks'")
path = self._path_with_dir_fd(path, self.chown, dir_fd)
try:
file_object = self.filesystem.resolve(
path, follow_symlinks, allow_fd=True)
except IOError as io_error:
if io_error.errno == errno.ENOENT:
self.filesystem.raise_os_error(errno.ENOENT, path)
raise
if not ((is_int_type(uid) or uid is None) and
(is_int_type(gid) or gid is None)):
raise TypeError("An integer is required")
if uid != -1:
file_object.st_uid = uid
if gid != -1:
file_object.st_gid = gid | def chown(self, path, uid, gid, dir_fd=None, follow_symlinks=None) | Set ownership of a faked file.
Args:
path: (str) Path to the file or directory.
uid: (int) Numeric uid to set the file or directory to.
gid: (int) Numeric gid to set the file or directory to.
dir_fd: (int) If not `None`, the file descriptor of a directory,
with `path` being relative to this directory.
New in Python 3.3.
follow_symlinks: (bool) If `False` and path points to a symlink,
the link itself is changed instead of the linked object.
New in Python 3.3.
Raises:
OSError: if path does not exist.
`None` is also allowed for `uid` and `gid`. This permits `os.rename`
to use `os.chown` even when the source file `uid` and `gid` are
`None` (unset). | 2.765583 | 2.843746 | 0.972514 |
if self.filesystem.is_windows_fs:
raise(AttributeError, "module 'os' has no attribute 'mknode'")
if mode is None:
# note that a default value of 0o600 without a device type is
# documented - this is not how it seems to work
mode = S_IFREG | 0o600
if device or not mode & S_IFREG and not is_root():
self.filesystem.raise_os_error(errno.EPERM)
filename = self._path_with_dir_fd(filename, self.mknod, dir_fd)
head, tail = self.path.split(filename)
if not tail:
if self.filesystem.exists(head, check_link=True):
self.filesystem.raise_os_error(errno.EEXIST, filename)
self.filesystem.raise_os_error(errno.ENOENT, filename)
if tail in (b'.', u'.', b'..', u'..'):
self.filesystem.raise_os_error(errno.ENOENT, filename)
if self.filesystem.exists(filename, check_link=True):
self.filesystem.raise_os_error(errno.EEXIST, filename)
try:
self.filesystem.add_object(head, FakeFile(
tail, mode & ~self.filesystem.umask,
filesystem=self.filesystem))
except IOError as e:
self.filesystem.raise_os_error(e.errno, filename) | def mknod(self, filename, mode=None, device=None, dir_fd=None) | Create a filesystem node named 'filename'.
Does not support device special files or named pipes as the real os
module does.
Args:
filename: (str) Name of the file to create
mode: (int) Permissions to use and type of file to be created.
Default permissions are 0o666. Only the stat.S_IFREG file type
is supported by the fake implementation. The umask is applied
to this mode.
device: not supported in fake implementation
dir_fd: If not `None`, the file descriptor of a directory,
with `filename` being relative to this directory.
New in Python 3.3.
Raises:
OSError: if called with unsupported options or the file can not be
created. | 3.47098 | 3.54119 | 0.980173 |
link_target = self._path_with_dir_fd(link_target, self.symlink, dir_fd)
self.filesystem.create_symlink(
path, link_target, create_missing_dirs=False) | def symlink(self, link_target, path, dir_fd=None) | Creates the specified symlink, pointed at the specified link target.
Args:
link_target: The target of the symlink.
path: Path to the symlink to create.
dir_fd: If not `None`, the file descriptor of a directory,
with `link_target` being relative to this directory.
New in Python 3.3.
Raises:
OSError: if the file already exists. | 5.26994 | 6.846777 | 0.769696 |
oldpath = self._path_with_dir_fd(oldpath, self.link, dir_fd)
self.filesystem.link(oldpath, newpath) | def link(self, oldpath, newpath, dir_fd=None) | Create a hard link at new_path, pointing at old_path.
Args:
oldpath: An existing link to the target file.
newpath: The destination path to create a new link at.
dir_fd: If not `None`, the file descriptor of a directory,
with `oldpath` being relative to this directory.
New in Python 3.3.
Returns:
The FakeFile object referred to by `oldpath`.
Raises:
OSError: if something already exists at new_path.
OSError: if the parent directory doesn't exist.
OSError: if on Windows before Python 3.2. | 5.745142 | 9.112447 | 0.630472 |
# Throw an error if file_des isn't valid
if 0 <= file_des < NR_STD_STREAMS:
self.filesystem.raise_os_error(errno.EINVAL)
file_object = self.filesystem.get_open_file(file_des)
if self.filesystem.is_windows_fs:
if (not hasattr(file_object, 'allow_update') or
not file_object.allow_update):
self.filesystem.raise_os_error(
errno.EBADF, file_object.file_path) | def fsync(self, file_des) | Perform fsync for a fake file (in other words, do nothing).
Args:
file_des: The file descriptor of the open file.
Raises:
OSError: file_des is an invalid file descriptor.
TypeError: file_des is not an integer. | 4.627587 | 4.983043 | 0.928667 |
# Throw an error if file_des isn't valid
if self.filesystem.is_windows_fs or self.filesystem.is_macos:
raise AttributeError("module 'os' has no attribute 'fdatasync'")
if 0 <= file_des < NR_STD_STREAMS:
self.filesystem.raise_os_error(errno.EINVAL)
self.filesystem.get_open_file(file_des) | def fdatasync(self, file_des) | Perform fdatasync for a fake file (in other words, do nothing).
Args:
file_des: The file descriptor of the open file.
Raises:
OSError: file_des is an invalid file descriptor.
TypeError: file_des is not an integer. | 5.772142 | 6.717361 | 0.859287 |
if opener is not None and sys.version_info < (3, 3):
raise TypeError(
"open() got an unexpected keyword argument 'opener'")
fake_open = FakeFileOpen(self.filesystem, use_io=True)
return fake_open(file, mode, buffering, encoding, errors,
newline, closefd, opener) | def open(self, file, mode='r', buffering=-1, encoding=None,
errors=None, newline=None, closefd=True, opener=None) | Redirect the call to FakeFileOpen.
See FakeFileOpen.call() for description. | 3.523296 | 2.947876 | 1.195198 |
# ignore closing a closed file
if not self._is_open():
return
# for raw io, all writes are flushed immediately
if self.allow_update and not self.raw_io:
self.flush()
if self._filesystem.is_windows_fs and self._changed:
self.file_object.st_mtime = time.time()
if self._closefd:
self._filesystem._close_open_file(self.filedes)
else:
self._filesystem.open_files[self.filedes].remove(self)
if self.delete_on_close:
self._filesystem.remove_object(self.get_object().path) | def close(self) | Close the file. | 6.690878 | 6.498428 | 1.029615 |
self._check_open_file()
if self.allow_update and not self.is_stream:
contents = self._io.getvalue()
if self._append:
self._sync_io()
old_contents = (self.file_object.byte_contents
if is_byte_string(contents) else
self.file_object.contents)
contents = old_contents + contents[self._flush_pos:]
self._set_stream_contents(contents)
self.update_flush_pos()
else:
self._io.flush()
if self.file_object.set_contents(contents, self._encoding):
if self._filesystem.is_windows_fs:
self._changed = True
else:
current_time = time.time()
self.file_object.st_ctime = current_time
self.file_object.st_mtime = current_time
self._file_epoch = self.file_object.epoch
if not self.is_stream:
self._flush_related_files() | def flush(self) | Flush file contents to 'disk'. | 4.604208 | 4.439532 | 1.037093 |
self._check_open_file()
if not self._append:
self._io.seek(offset, whence)
else:
self._read_seek = offset
self._read_whence = whence
if not self.is_stream:
self.flush() | def seek(self, offset, whence=0) | Move read/write pointer in 'file'. | 4.331897 | 4.13154 | 1.048495 |
self._check_open_file()
if self._flushes_after_tell():
self.flush()
if not self._append:
return self._io.tell()
if self._read_whence:
write_seek = self._io.tell()
self._io.seek(self._read_seek, self._read_whence)
self._read_seek = self._io.tell()
self._read_whence = 0
self._io.seek(write_seek)
return self._read_seek | def tell(self) | Return the file's current position.
Returns:
int, file's current position in bytes. | 3.50516 | 3.608811 | 0.971278 |
if self._file_epoch == self.file_object.epoch:
return
if self._io.binary:
contents = self.file_object.byte_contents
else:
contents = self.file_object.contents
self._set_stream_contents(contents)
self._file_epoch = self.file_object.epoch | def _sync_io(self) | Update the stream with changes to the file object contents. | 4.499038 | 3.447618 | 1.30497 |
io_attr = getattr(self._io, name)
def read_wrapper(*args, **kwargs):
self._io.seek(self._read_seek, self._read_whence)
ret_value = io_attr(*args, **kwargs)
self._read_seek = self._io.tell()
self._read_whence = 0
self._io.seek(0, 2)
return ret_value
return read_wrapper | def _read_wrappers(self, name) | Wrap a stream attribute in a read wrapper.
Returns a read_wrapper which tracks our own read pointer since the
stream object has no concept of a different read and write pointer.
Args:
name: The name of the attribute to wrap. Should be a read call.
Returns:
The read_wrapper function. | 2.923246 | 2.653636 | 1.1016 |
io_attr = getattr(self._io, name)
def other_wrapper(*args, **kwargs):
write_seek = self._io.tell()
ret_value = io_attr(*args, **kwargs)
if write_seek != self._io.tell():
self._read_seek = self._io.tell()
self._read_whence = 0
if not writing or not IS_PY2:
return ret_value
return other_wrapper | def _other_wrapper(self, name, writing) | Wrap a stream attribute in an other_wrapper.
Args:
name: the name of the stream attribute to wrap.
Returns:
other_wrapper which is described below. | 4.346694 | 4.550016 | 0.955314 |
io_attr = getattr(self._io, 'truncate')
def truncate_wrapper(*args, **kwargs):
if self._append:
self._io.seek(self._read_seek, self._read_whence)
size = io_attr(*args, **kwargs)
self.flush()
if not self.is_stream:
self.file_object.size = size
buffer_size = len(self._io.getvalue())
if buffer_size < size:
self._io.seek(buffer_size)
self._io.write('\0' * (size - buffer_size))
self.file_object.set_contents(
self._io.getvalue(), self._encoding)
self._flush_pos = size
if self._filesystem.is_macos or sys.version_info[0] > 2:
self._adapt_size_for_related_files(size - buffer_size)
self.flush()
if not IS_PY2:
return size
return truncate_wrapper | def _truncate_wrapper(self) | Wrap truncate() to allow flush after truncate.
Returns:
Wrapper which is described below. | 4.706283 | 4.622874 | 1.018043 |
io_attr = getattr(self._io, name)
def write_wrapper(*args, **kwargs):
ret_value = io_attr(*args, **kwargs)
if not IS_PY2:
return ret_value
return write_wrapper | def _write_wrapper(self, name) | Wrap write() to adapt return value for Python 2.
Returns:
Wrapper which is described below. | 5.261494 | 4.705904 | 1.118062 |
self._filesystem.open_files[self.filedes].remove(self)
os.close(self.fd) | def close(self) | Close the pipe descriptor. | 14.18351 | 11.686082 | 1.21371 |
# Backwards compatibility, mode arg used to be named flags
mode = flags or mode
return self.call(file_path, mode, buffering, open_modes=open_modes) | def _call_ver2(self, file_path, mode='r', buffering=-1, flags=None,
open_modes=None) | Limits args of open() or file() for Python 2.x versions. | 6.088551 | 6.189052 | 0.983761 |
try: # Python 2
return isinstance(val, (int, long))
except NameError: # Python 3
return isinstance(val, int) | def is_int_type(val) | Return True if `val` is of integer type. | 2.893933 | 2.759283 | 1.048799 |
stat_result = copy(self)
stat_result.use_float = self.use_float
return stat_result | def copy(self) | Return a copy where the float usage is hard-coded to mimic the
behavior of the real os.stat_result. | 8.03033 | 4.507236 | 1.781653 |
self.st_mode = stat_result.st_mode
self.st_uid = stat_result.st_uid
self.st_gid = stat_result.st_gid
self._st_size = stat_result.st_size
if sys.version_info < (3, 3):
self._st_atime_ns = self.long_type(stat_result.st_atime * 1e9)
self._st_mtime_ns = self.long_type(stat_result.st_mtime * 1e9)
self._st_ctime_ns = self.long_type(stat_result.st_ctime * 1e9)
else:
self._st_atime_ns = stat_result.st_atime_ns
self._st_mtime_ns = stat_result.st_mtime_ns
self._st_ctime_ns = stat_result.st_ctime_ns | def set_from_stat_result(self, stat_result) | Set values from a real os.stat_result.
Note: values that are controlled by the fake filesystem are not set.
This includes st_ino, st_dev and st_nlink. | 1.523674 | 1.444983 | 1.054458 |
if newvalue is not None:
cls._stat_float_times = bool(newvalue)
return cls._stat_float_times | def stat_float_times(cls, newvalue=None) | Determine whether a file's time stamps are reported as floats
or ints.
Calling without arguments returns the current value.
The value is shared by all instances of FakeOsModule.
Args:
newvalue: If `True`, mtime, ctime, atime are reported as floats.
Otherwise, they are returned as ints (rounding down). | 2.706464 | 3.420793 | 0.79118 |
ctime = self._st_ctime_ns / 1e9
return ctime if self.use_float else int(ctime) | def st_ctime(self) | Return the creation time in seconds. | 6.363762 | 5.717019 | 1.113126 |
atime = self._st_atime_ns / 1e9
return atime if self.use_float else int(atime) | def st_atime(self) | Return the access time in seconds. | 5.729389 | 5.345877 | 1.07174 |
mtime = self._st_mtime_ns / 1e9
return mtime if self.use_float else int(mtime) | def st_mtime(self) | Return the modification time in seconds. | 6.679174 | 5.668794 | 1.178235 |
sid = str(sid)
if not sid.isnumeric():
if sid.startswith('2-s2.0-'):
id_type = 'eid'
elif '/' in sid:
id_type = 'doi'
elif 16 <= len(sid) <= 17:
id_type = 'pii'
elif sid.isnumeric():
if len(sid) < 10:
id_type = 'pubmed_id'
else:
id_type = 'scopus_id'
else:
raise ValueError('ID type detection failed for \'{}\'.'.format(sid))
return id_type | def detect_id_type(sid) | Method that tries to infer the type of abstract ID.
Parameters
----------
sid : str
The ID of an abstract on Scopus.
Raises
------
ValueError
If the ID type cannot be inferred.
Notes
-----
PII usually has 17 chars, but in Scopus there are valid cases with only
16 for old converted articles.
Scopus ID contains only digits, but it can have leading zeros. If ID
with leading zeros is treated as a number, SyntaxError can occur, or the
ID will be rendered invalid and the type will be misinterpreted. | 3.24931 | 2.897245 | 1.121517 |
# Value check
accepted = ("json", "xml", "atom+xml")
if accept.lower() not in accepted:
raise ValueError('accept parameter must be one of ' +
', '.join(accepted))
# Get credentials
key = config.get('Authentication', 'APIKey')
header = {'X-ELS-APIKey': key}
if config.has_option('Authentication', 'InstToken'):
token = config.get('Authentication', 'InstToken')
header.update({'X-ELS-APIKey': key, 'X-ELS-Insttoken': token})
header.update({'Accept': 'application/{}'.format(accept)})
# Perform request
params.update(**kwds)
resp = requests.get(url, headers=header, params=params)
# Raise error if necessary
try:
reason = resp.reason.upper() + " for url: " + url
raise errors[resp.status_code](reason)
except KeyError: # Exception not specified in scopus
resp.raise_for_status() # Will pass when everything is ok
return resp | def download(url, params=None, accept="xml", **kwds) | Helper function to download a file and return its content.
Parameters
----------
url : string
The URL to be parsed.
params : dict (optional)
Dictionary containing query parameters. For required keys
and accepted values see e.g.
https://api.elsevier.com/documentation/AuthorRetrievalAPI.wadl
accept : str (optional, default=xml)
mime type of the file to be downloaded. Accepted values are json,
atom+xml, xml.
kwds : key-value parings, optional
Keywords passed on to as query parameters. Must contain fields
and values specified in the respective API specification.
Raises
------
ScopusHtmlError
If the status of the response is not ok.
ValueError
If the accept parameter is not one of the accepted values.
Returns
-------
resp : byte-like object
The content of the file, which needs to be serialized. | 4.041761 | 3.705871 | 1.090637 |
if not refresh and os.path.exists(qfile):
with open(qfile, 'rb') as f:
content = f.read()
else:
content = download(*args, **kwds).text.encode('utf-8')
with open(qfile, 'wb') as f:
f.write(content)
return content | def get_content(qfile, refresh, *args, **kwds) | Helper function to read file content as xml. The file is cached
in a subfolder of ~/.scopus/.
Parameters
----------
qfile : string
The name of the file to be created.
refresh : bool
Whether the file content should be refreshed if it exists.
*args, **kwds :
Arguments and keywords to be passed on to download().
Returns
-------
content : str
The content of the file. | 1.866575 | 1.925411 | 0.969442 |
out = []
variant = namedtuple('Variant', 'name doc_count')
for var in chained_get(self._json, ['name-variants', 'name-variant'], []):
new = variant(name=var['$'], doc_count=var.get('@doc-count'))
out.append(new)
return out | def name_variants(self) | A list of namedtuples representing variants of the affiliation name
with number of documents referring to this variant. | 6.612649 | 5.027443 | 1.315311 |
date_created = self.xml.find('institution-profile/date-created')
if date_created is not None:
date_created = (int(date_created.attrib['year']),
int(date_created.attrib['month']),
int(date_created.attrib['day']))
else:
date_created = (None, None, None)
return date_created | def date_created(self) | Date the Scopus record was created. | 2.442849 | 2.208386 | 1.106169 |
url = self.xml.find('coredata/link[@rel="scopus-affiliation"]')
if url is not None:
url = url.get('href')
return url | def url(self) | URL to the affiliation's profile page. | 6.904174 | 5.160128 | 1.337985 |
for key in path:
try:
container = container[key]
except (AttributeError, KeyError, TypeError):
return default
return container | def chained_get(container, path, default=None) | Helper function to perform a series of .get() methods on a dictionary
and return a default object type in the end.
Parameters
----------
container : dict
The dictionary on which the .get() methods should be performed.
path : list or tuple
The list of keys that should be searched for.
default : any (optional, default=None)
The object type that should be returned if the search yields
no result. | 2.55536 | 3.405404 | 0.750384 |
date = dct['date-created']
if date:
return (int(date['@year']), int(date['@month']), int(date['@day']))
else:
return (None, None, None) | def parse_date_created(dct) | Helper function to parse date-created from profile. | 2.967463 | 2.646963 | 1.121082 |
affs = self._json.get('affiliation-history', {}).get('affiliation')
try:
return [d['@id'] for d in affs]
except TypeError: # No affiliation history
return None | def affiliation_history(self) | Unordered list of IDs of all affiliations the author was
affiliated with acccording to Scopus. | 5.450538 | 4.557802 | 1.19587 |
path = ['author-profile', 'classificationgroup', 'classifications',
'classification']
out = [(item['$'], item['@frequency']) for item in
listify(chained_get(self._json, path, []))]
return out or None | def classificationgroup(self) | List with (subject group ID, number of documents)-tuples. | 15.814574 | 13.623629 | 1.16082 |
hist = chained_get(self._json, ["coredata", 'historical-identifier'], [])
return [d['$'].split(":")[-1] for d in hist] or None | def historical_identifier(self) | Scopus IDs of previous profiles now compromising this profile. | 15.289807 | 10.904834 | 1.402113 |
ident = self._json['coredata']['dc:identifier'].split(":")[-1]
if ident != self._id:
text = "Profile with ID {} has been merged and the new ID is "\
"{}. Please update your records manually. Files have "\
"been cached with the old ID.".format(self._id, ident)
warn(text, UserWarning)
return ident | def identifier(self) | The author's ID. Might differ from the one provided. | 9.574554 | 7.85587 | 1.218777 |
jour = namedtuple('Journal', 'sourcetitle abbreviation type issn')
path = ['author-profile', 'journal-history', 'journal']
hist = [jour(sourcetitle=pub['sourcetitle'], issn=pub.get('issn'),
abbreviation=pub.get('sourcetitle-abbrev'),
type=pub['@type'])
for pub in listify(chained_get(self._json, path, []))]
return hist or None | def journal_history(self) | List of named tuples of authored publications in the form
(sourcetitle, abbreviation, type, issn). issn is only given
for journals. abbreviation and issn may be None. | 7.263731 | 4.747949 | 1.529867 |
fields = 'indexed_name initials surname given_name doc_count'
variant = namedtuple('Variant', fields)
path = ['author-profile', 'name-variant']
out = [variant(indexed_name=var['indexed-name'], surname=var['surname'],
doc_count=var.get('@doc-count'), initials=var['initials'],
given_name=var.get('given-name'))
for var in listify(chained_get(self._json, path, []))]
return out or None | def name_variants(self) | List of named tuples containing variants of the author name with
number of documents published with that variant. | 6.815444 | 5.638871 | 1.208654 |
r = self._json['author-profile']['publication-range']
return (r['@start'], r['@end'])
return self._json['coredata'].get('orcid') | def publication_range(self) | Tuple containing years of first and last publication. | 12.265336 | 9.896713 | 1.239334 |
path = ['subject-areas', 'subject-area']
area = namedtuple('Subjectarea', 'area abbreviation code')
areas = [area(area=item['$'], code=item['@code'],
abbreviation=item['@abbrev'])
for item in chained_get(self._json, path, [])]
return areas or None | def subject_areas(self) | List of named tuples of subject areas in the form
(area, abbreviation, code) of author's publication. | 7.116665 | 5.865143 | 1.213383 |
# Get number of authors to search for
res = download(url=self.coauthor_link, accept='json')
data = loads(res.text)['search-results']
N = int(data.get('opensearch:totalResults', 0))
# Store information in namedtuples
fields = 'surname given_name id areas affiliation_id name city country'
coauth = namedtuple('Coauthor', fields)
coauthors = []
# Iterate over search results in chunks of 25 results
count = 0
while count < N:
params = {'start': count, 'count': 25}
res = download(url=self.coauthor_link, params=params, accept='json')
data = loads(res.text)['search-results'].get('entry', [])
# Extract information for each coauthor
for entry in data:
aff = entry.get('affiliation-current', {})
try:
areas = [a['$'] for a in entry.get('subject-area', [])]
except TypeError: # Only one subject area given
areas = [entry['subject-area']['$']]
new = coauth(surname=entry['preferred-name']['surname'],
given_name=entry['preferred-name'].get('given-name'),
id=entry['dc:identifier'].split(':')[-1],
areas='; '.join(areas), name=aff.get('affiliation-name'),
affiliation_id=aff.get('affiliation-id'),
city=aff.get('affiliation-city'),
country=aff.get('affiliation-country'))
coauthors.append(new)
count += 25
return coauthors | def get_coauthors(self) | Retrieves basic information about co-authors as a list of
namedtuples in the form
(surname, given_name, id, areas, affiliation_id, name, city, country),
where areas is a list of subject area codes joined by "; ".
Note: These information will not be cached and are slow for large
coauthor groups. | 3.153931 | 2.651851 | 1.189332 |
search = ScopusSearch('au-id({})'.format(self.identifier), refresh)
if subtypes:
return [p for p in search.results if p.subtype in subtypes]
else:
return search.results | def get_documents(self, subtypes=None, refresh=False) | Return list of author's publications using ScopusSearch, which
fit a specified set of document subtypes. | 5.711597 | 3.9603 | 1.442213 |
out = []
for i in lst:
if i not in out:
out.append(i)
return out | def _deduplicate(lst) | Auxiliary function to deduplicate lst. | 2.545218 | 2.171598 | 1.172048 |
return sep.join([d[key] for d in lst if d[key]]) | def _join(lst, key, sep=";") | Auxiliary function to join same elements of a list of dictionaries if
the elements are not None. | 4.653528 | 4.367124 | 1.065582 |
authors = self.xml.find('authors', ns)
try:
return [_ScopusAuthor(author) for author in authors]
except TypeError:
return None | def authors(self) | A list of scopus_api._ScopusAuthor objects. | 6.107326 | 3.999389 | 1.527065 |
cite_link = self.coredata.find('link[@rel="scopus-citedby"]', ns)
try:
return cite_link.get('href')
except AttributeError: # cite_link is None
return None | def citedby_url(self) | URL to Scopus page listing citing papers. | 5.537208 | 4.59269 | 1.205657 |
refs = self.items.find('bibrecord/tail/bibliography', ns)
try:
return refs.attrib['refcount']
except AttributeError: # refs is None
return None | def refcount(self) | Number of references of an article.
Note: Requires the FULL view of the article. | 13.3496 | 11.246605 | 1.186989 |
refs = self.items.find('bibrecord/tail/bibliography', ns)
if refs is not None:
eids = [r.find("ref-info/refd-itemidlist/itemid", ns).text for r
in refs.findall("reference", ns)]
return ["2-s2.0-" + eid for eid in eids]
else:
return None | def references(self) | Return EIDs of references of an article.
Note: Requires the FULL view of the article. | 8.792287 | 7.341005 | 1.197695 |
subjectAreas = self.xml.find('subject-areas', ns)
try:
return [a.text for a in subjectAreas]
except:
return None | def subjectAreas(self) | List of subject areas of article.
Note: Requires the FULL view of the article. | 4.861883 | 4.658132 | 1.043741 |
scopus_url = self.coredata.find('link[@rel="scopus"]', ns)
try:
return scopus_url.get('href')
except AttributeError: # scopus_url is None
return None | def scopus_url(self) | URL to the abstract page on Scopus. | 4.388094 | 4.034966 | 1.087517 |
resp = requests.get(self.scopus_url)
from lxml import html
parsed_doc = html.fromstring(resp.content)
for div in parsed_doc.body.xpath('.//div'):
for a in div.xpath('a'):
if '/cdn-cgi/l/email-protection' not in a.get('href', ''):
continue
encoded_text = a.attrib['href'].replace('/cdn-cgi/l/email-protection#', '')
key = int(encoded_text[0:2], 16)
email = ''.join([chr(int('0x{}'.format(x), 16) ^ key)
for x in
map(''.join, zip(*[iter(encoded_text[2:])]*2))])
for aa in div.xpath('a'):
if 'http://www.scopus.com/authid/detail.url' in aa.get('href', ''):
scopus_url = aa.attrib['href']
name = aa.text
else:
scopus_url, name = None, None
return (scopus_url, name, email) | def get_corresponding_author_info(self) | Try to get corresponding author information.
Returns (scopus-id, name, email). | 3.092516 | 2.931439 | 1.054948 |
s = ('{authors}, \\textit{{{title}}}, {journal}, {volissue}, '
'{pages}, ({date}). {doi}, {scopus_url}.')
if len(self.authors) > 1:
authors = ', '.join([str(a.given_name) +
' ' + str(a.surname)
for a in self.authors[0:-1]])
authors += (' and ' +
str(self.authors[-1].given_name) +
' ' + str(self.authors[-1].surname))
else:
a = self.authors[0]
authors = str(a.given_name) + ' ' + str(a.surname)
title = self.title
journal = self.publicationName
volume = self.volume
issue = self.issueIdentifier
if volume and issue:
volissue = '\\textbf{{{0}({1})}}'.format(volume, issue)
elif volume:
volissue = '\\textbf{{0}}'.format(volume)
else:
volissue = 'no volume'
date = self.coverDate
if self.pageRange:
pages = 'p. {0}'.format(self.pageRange)
elif self.startingPage:
pages = 'p. {self.startingPage}'.format(self)
elif self.article_number:
pages = 'Art. No. {self.article_number}, '.format(self)
else:
pages = '(no pages found)'
doi = '\\href{{https://doi.org/{0}}}{{doi:{0}}}'.format(self.doi)
scopus_url = '\\href{{{0}}}{{scopus:{1}}}'.format(self.scopus_url,
self.eid)
return s.format(**locals()) | def latex(self) | Return LaTeX representation of the abstract. | 2.751095 | 2.718321 | 1.012057 |
s = (u'{authors}, {title}, {journal}, {volissue}, {pages}, '
'({date}). {doi}.')
au_link = ('<a href="https://www.scopus.com/authid/detail.url'
'?origin=AuthorProfile&authorId={0}">{1}</a>')
if len(self.authors) > 1:
authors = u', '.join([au_link.format(a.auid,
(str(a.given_name) +
' ' + str(a.surname)))
for a in self.authors[0:-1]])
authors += (u' and ' +
au_link.format(self.authors[-1].auid,
(str(self.authors[-1].given_name) +
' ' +
str(self.authors[-1].surname))))
else:
a = self.authors[0]
authors = au_link.format(a.auid,
str(a.given_name) + ' ' + str(a.surname))
title = u'<a href="{link}">{title}</a>'.format(link=self.scopus_url,
title=self.title)
jname = self.publicationName
sid = self.source_id
jlink = ('<a href="https://www.scopus.com/source/sourceInfo.url'
'?sourceId={sid}">{journal}</a>')
journal = jlink.format(sid=sid, journal=jname)
volume = self.volume
issue = self.issueIdentifier
if volume and issue:
volissue = u'<b>{0}({1})</b>'.format(volume, issue)
elif volume:
volissue = u'<b>{0}</b>'.format(volume)
else:
volissue = 'no volume'
date = self.coverDate
if self.pageRange:
pages = u'p. {0}'.format(self.pageRange)
elif self.startingPage:
pages = u'p. {self.startingPage}'.format(self=self)
elif self.article_number:
pages = u'Art. No. {self.article_number}, '.format(self=self)
else:
pages = '(no pages found)'
doi = '<a href="https://doi.org/{0}">doi:{0}</a>'.format(self.doi)
html = s.format(**locals())
return html.replace('None', '') | def html(self) | Returns an HTML citation. | 2.598191 | 2.525263 | 1.02888 |
if self.aggregationType != 'Journal':
raise ValueError('Only Journal articles supported.')
template = u'''@article{{{key},
author = {{{author}}},
title = {{{title}}},
journal = {{{journal}}},
year = {{{year}}},
volume = {{{volume}}},
number = {{{number}}},
pages = {{{pages}}},
doi = {{{doi}}}
}}
'''
if self.pageRange:
pages = self.pageRange
elif self.startingPage:
pages = self.startingPage
elif self.article_number:
pages = self.article_number
else:
pages = 'no pages found'
year = self.coverDate[0:4]
first = self.title.split()[0].title()
last = self.title.split()[-1].title()
key = ''.join([self.authors[0].surname, year, first, last])
authors = ' and '.join(["{} {}".format(a.given_name, a.surname)
for a in self.authors])
bibtex = template.format(
key=key, author=authors, title=self.title,
journal=self.publicationName, year=year, volume=self.volume,
number=self.issueIdentifier, pages=pages, doi=self.doi)
return bibtex | def bibtex(self) | Bibliographic entry in BibTeX format.
Returns
-------
bibtex : str
A string representing a bibtex entry for the item.
Raises
------
ValueError : If the item's aggregationType is not Journal. | 2.688665 | 2.445076 | 1.099624 |
if self.aggregationType != 'Journal':
raise ValueError('Only Journal articles supported.')
template = u'''TY - JOUR
TI - {title}
JO - {journal}
VL - {volume}
DA - {date}
SP - {pages}
PY - {year}
DO - {doi}
UR - https://doi.org/{doi}
'''
ris = template.format(
title=self.title, journal=self.publicationName,
volume=self.volume, date=self.coverDate, pages=self.pageRange,
year=self.coverDate[0:4], doi=self.doi)
for au in self.authors:
ris += 'AU - {}\n'.format(au.indexed_name)
if self.issueIdentifier is not None:
ris += 'IS - {}\n'.format(self.issueIdentifier)
ris += 'ER - \n\n'
return ris | def ris(self) | Bibliographic entry in RIS (Research Information System Format)
format.
Returns
-------
ris : str
The RIS string representing an item.
Raises
------
ValueError : If the item's aggregationType is not Journal. | 3.337282 | 2.792299 | 1.195174 |
out = []
order = 'name surname initials id url'
auth = namedtuple('Author', order)
for author in self._citeInfoMatrix.get('author'):
author = {k.split(":", 1)[-1]: v for k, v in author.items()}
new = auth(name=author.get('index-name'), id=author.get('authid'),
surname=author.get('surname'),
initials=author.get('initials'),
url=author.get('author-url'))
out.append(new)
return out or None | def authors(self) | A list of namedtuples storing author information,
where each namedtuple corresponds to one author.
The information in each namedtuple is (name surname initials id url).
All entries are strings. | 4.876801 | 4.08707 | 1.193226 |
_years = range(self._start, self._end+1)
try:
return list(zip(_years, [d.get('$') for d in self._citeInfoMatrix['cc']]))
except AttributeError: # No citations
return list(zip(_years, [0]*len(_years))) | def cc(self) | List of tuples of yearly number of citations
for specified years. | 8.706602 | 6.405389 | 1.359262 |
out = []
order = 'eid name variant documents city country parent'
aff = namedtuple('Affiliation', order)
for item in self._json:
name = item.get('affiliation-name')
variants = [d.get('$', "") for d in item.get('name-variant', [])
if d.get('$', "") != name]
new = aff(eid=item['eid'], variant=";".join(variants),
documents=item.get('document-count', '0'), name=name,
city=item.get('city'), country=item.get('country'),
parent=item.get('parent-affiliation-id'))
out.append(new)
return out or None | def affiliations(self) | A list of namedtuples storing affiliation information,
where each namedtuple corresponds to one affiliation.
The information in each namedtuple is (eid name variant documents city
country parent).
All entries are strings or None. variant combines variants of names
with a semicolon. | 4.738722 | 3.460664 | 1.36931 |
aff_ids = [e.attrib.get('affiliation-id') for e in
self.xml.findall('author-profile/affiliation-history/affiliation')
if e is not None and len(list(e.find("ip-doc").iter())) > 1]
return [ScopusAffiliation(aff_id) for aff_id in aff_ids] | def affiliation_history(self) | List of ScopusAffiliation objects representing former
affiliations of the author. Only affiliations with more than one
publication are considered. | 5.762571 | 4.116124 | 1.399999 |
date_created = self.xml.find('author-profile/date-created', ns)
try:
return (int(date_created.attrib['year']),
int(date_created.attrib['month']),
int(date_created.attrib['day']))
except AttributeError: # date_created is None
return (None, None, None) | def date_created(self) | Date the Scopus record was created. | 3.297194 | 2.818805 | 1.169714 |
areas = self.xml.findall('subject-areas/subject-area')
freqs = self.xml.findall('author-profile/classificationgroup/'
'classifications[@type="ASJC"]/classification')
c = {int(cls.text): int(cls.attrib['frequency']) for cls in freqs}
cats = [(a.text, c[int(a.get("code"))], a.get("abbrev"), a.get("code"))
for a in areas]
cats.sort(reverse=True, key=itemgetter(1))
return cats | def subject_areas(self) | List of tuples of author subject areas in the form
(area, frequency, abbreviation, code), where frequency is the
number of publications in this subject area. | 5.986228 | 4.780307 | 1.252268 |
pub_hist = self.xml.findall('author-profile/journal-history/')
hist = []
for pub in pub_hist:
try:
issn = pub.find("issn").text
except AttributeError:
issn = None
try:
abbr = pub.find("sourcetitle-abbrev").text
except AttributeError:
abbr = None
hist.append((pub.find("sourcetitle").text, abbr, pub.get("type"), issn))
return hist | def publication_history(self) | List of tuples of authored publications in the form
(title, abbreviation, type, issn), where issn is only given
for journals. abbreviation and issn may be None. | 3.811138 | 2.824451 | 1.349337 |
url = self.xml.find('coredata/link[@rel="coauthor-search"]').get('href')
xml = download(url=url).text.encode('utf-8')
xml = ET.fromstring(xml)
coauthors = []
N = int(get_encoded_text(xml, 'opensearch:totalResults') or 0)
AUTHOR = namedtuple('Author',
['name', 'scopus_id', 'affiliation', 'categories'])
count = 0
while count < N:
params = {'start': count, 'count': 25}
xml = download(url=url, params=params).text.encode('utf-8')
xml = ET.fromstring(xml)
for entry in xml.findall('atom:entry', ns):
given_name = get_encoded_text(entry,
'atom:preferred-name/atom:given-name')
surname = get_encoded_text(entry,
'atom:preferred-name/atom:surname')
coauthor_name = u'{0} {1}'.format(given_name, surname)
scopus_id = get_encoded_text(entry,
'dc:identifier').replace('AUTHOR_ID:', '')
affiliation = get_encoded_text(entry,
'atom:affiliation-current/atom:affiliation-name')
# get categories for this author
s = u', '.join(['{0} ({1})'.format(subject.text,
subject.attrib['frequency'])
for subject in
entry.findall('atom:subject-area', ns)])
coauthors += [AUTHOR(coauthor_name, scopus_id, affiliation, s)]
count += 25
return coauthors | def get_coauthors(self) | Return list of coauthors, their scopus-id and research areas. | 3.034226 | 2.984488 | 1.016666 |
search = ScopusSearch('au-id({})'.format(self.author_id),
*args, **kwds)
return search.get_eids() | def get_document_eids(self, *args, **kwds) | Return list of EIDs for the author using ScopusSearch. | 7.978638 | 5.001359 | 1.595294 |
return [ScopusAbstract(eid, refresh=refresh)
for eid in self.get_document_eids(refresh=refresh)] | def get_abstracts(self, refresh=True) | Return a list of ScopusAbstract objects using ScopusSearch. | 7.099995 | 4.64673 | 1.527955 |
return [abstract for abstract in self.get_abstracts(refresh=refresh) if
abstract.aggregationType == 'Journal'] | def get_journal_abstracts(self, refresh=True) | Return a list of ScopusAbstract objects using ScopusSearch,
but only if belonging to a Journal. | 7.409416 | 5.162609 | 1.435208 |
abstracts = self.get_abstracts(refresh=refresh)
if cite_sort:
counts = [(a, int(a.citedby_count)) for a in abstracts]
counts.sort(reverse=True, key=itemgetter(1))
abstracts = [a[0] for a in counts]
if N is None:
N = len(abstracts)
s = [u'{0} of {1} documents'.format(N, len(abstracts))]
for i in range(N):
s += ['{0:2d}. {1}\n'.format(i + 1, str(abstracts[i]))]
return '\n'.join(s) | def get_document_summary(self, N=None, cite_sort=True, refresh=True) | Return a summary string of documents.
Parameters
----------
N : int or None (optional, default=None)
Maximum number of documents to include in the summary.
If None, return all documents.
cite_sort : bool (optional, default=True)
Whether to sort xml by number of citations, in decreasing order,
or not.
refresh : bool (optional, default=True)
Whether to refresh the cached abstract file (if it exists) or not.
Returns
-------
s : str
Text summarizing an author's documents. | 2.522751 | 2.745039 | 0.919022 |
scopus_abstracts = self.get_journal_abstracts(refresh=refresh)
cites = [int(ab.citedby_count) for ab in scopus_abstracts]
years = [int(ab.coverDate.split('-')[0]) for ab in scopus_abstracts]
data = zip(years, cites, scopus_abstracts)
data = sorted(data, key=itemgetter(1), reverse=True)
# now get aif papers for year-1 and year-2
aif_data = [tup for tup in data if tup[0] in (year - 1, year - 2)]
Ncites = sum([tup[1] for tup in aif_data])
if len(aif_data) > 0:
return (Ncites, len(aif_data), Ncites / float(len(aif_data)))
else:
return (Ncites, len(aif_data), 0) | def author_impact_factor(self, year=2014, refresh=True) | Get author_impact_factor for the .
Parameters
----------
year : int (optional, default=2014)
The year based for which the impact factor is to be calculated.
refresh : bool (optional, default=True)
Whether to refresh the cached search file (if it exists) or not.
Returns
-------
(ncites, npapers, aif) : tuple of integers
The citations count, publication count, and author impact factor. | 3.045485 | 2.971086 | 1.025041 |
first_authors = [1 for ab in self.get_journal_abstracts(refresh=refresh)
if ab.authors[0].scopusid == self.author_id]
return sum(first_authors) | def n_first_author_papers(self, refresh=True) | Return number of papers with author as the first author. | 6.050968 | 5.530639 | 1.094081 |
pub_years = [int(ab.coverDate.split('-')[0])
for ab in self.get_journal_abstracts(refresh=refresh)]
return Counter(pub_years) | def n_yearly_publications(self, refresh=True) | Number of journal publications in a given year. | 6.574074 | 5.467672 | 1.202353 |
try:
org = aff['organization']
if not isinstance(org, str):
try:
org = org['$']
except TypeError: # Multiple names given
org = ', '.join([d['$'] for d in org if d])
except KeyError: # Author group w/o affiliation
org = None
return org | def _get_org(aff) | Auxiliary function to extract org information from affiliation
for authorgroup. | 5.165172 | 4.312546 | 1.197708 |
authors = ', '.join([' '.join([a.given_name, a.surname]) for a in lst[0:-1]])
authors += ' and ' + ' '.join([lst[-1].given_name, lst[-1].surname])
return authors | def _list_authors(lst) | Format a list of authors (Surname, Firstname and Firstname Surname). | 2.758295 | 2.709682 | 1.017941 |
if self.pageRange:
pages = 'pp. {}'.format(self.pageRange)
elif self.startingPage:
pages = 'pp. {}-{}'.format(self.startingPage, self.endingPage)
else:
pages = '(no pages found)'
if unicode:
pages = u'{}'.format(pages)
return pages | def _parse_pages(self, unicode=False) | Auxiliary function to parse and format page range of a document. | 3.440181 | 2.921575 | 1.177509 |
out = []
aff = namedtuple('Affiliation', 'id name city country')
affs = listify(self._json.get('affiliation', []))
for item in affs:
new = aff(id=item.get('@id'), name=item.get('affilname'),
city=item.get('affiliation-city'),
country=item.get('affiliation-country'))
out.append(new)
return out or None | def affiliation(self) | A list of namedtuples representing listed affiliations in
the form (id, name, city, country).
Note: Might be empty. | 3.797028 | 3.476581 | 1.092173 |
keywords = self._json['authkeywords']
if keywords is None:
return None
else:
try:
return [d['$'] for d in keywords['author-keyword']]
except TypeError: # Singleton keyword
return [keywords['author-keyword']['$']] | def authkeywords(self) | List of author-provided keywords of the abstract. | 5.867673 | 5.062985 | 1.158936 |
out = []
fields = 'affiliation_id dptid organization city postalcode '\
'addresspart country auid indexed_name surname given_name'
auth = namedtuple('Author', fields)
items = listify(self._head.get('author-group', []))
for item in items:
# Affiliation information
aff = item.get('affiliation', {})
try:
aff_ids = listify(aff['affiliation-id'])
aff_id = ", ".join([a["@afid"] for a in aff_ids])
except KeyError:
aff_id = aff.get("@afid")
org = _get_org(aff)
# Author information (might relate to collaborations)
authors = listify(item.get('author', item.get('collaboration', [])))
for au in authors:
try:
given = au.get('ce:given-name', au['ce:initials'])
except KeyError: # Collaboration
given = au.get('ce:text')
new = auth(affiliation_id=aff_id, organization=org,
city=aff.get('city'), dptid=aff.get("@dptid"),
postalcode=aff.get('postal-code'),
addresspart=aff.get('address-part'),
country=aff.get('country'), auid=au.get('@auid'),
surname=au.get('ce:surname'), given_name=given,
indexed_name=chained_get(au, ['preferred-name', 'ce:indexed-name']))
out.append(new)
return out or None | def authorgroup(self) | A list of namedtuples representing the article's authors organized
by affiliation, in the form (affiliation_id, dptid, organization,
city, postalcode, addresspart, country, auid, indexed_name,
surname, given_name).
If "given_name" is not present, fall back to initials.
Note: Affiliation information might be missing or mal-assigned even
when it lookes correct in the web view. In this case please request
a correction. | 4.387335 | 3.18213 | 1.378742 |
out = []
fields = 'auid indexed_name surname given_name affiliation'
auth = namedtuple('Author', fields)
for item in chained_get(self._json, ['authors', 'author'], []):
affs = [a for a in listify(item.get('affiliation')) if a]
if affs:
aff = [aff.get('@id') for aff in affs]
else:
aff = None
new = auth(auid=item['@auid'], surname=item.get('ce:surname'),
indexed_name=item.get('ce:indexed-name'), affiliation=aff,
given_name=chained_get(item, ['preferred-name', 'ce:given-name']))
out.append(new)
return out or None | def authors(self) | A list of namedtuples representing the article's authors, in the
form (auid, indexed_name, surname, given_name, affiliation_id,
affiliation, city, country).
Note: The affiliation referred to here is what Scopus' algorithm
determined as the main affiliation. Property `authorgroup` provides
all affiliations. | 4.27553 | 3.7266 | 1.147301 |
path = ['enhancement', 'chemicalgroup', 'chemicals']
items = listify(chained_get(self._head, path, []))
chemical = namedtuple('Chemical', 'source chemical_name cas_registry_number')
out = []
for item in items:
for chem in listify(item['chemical']):
number = chem.get('cas-registry-number')
try: # Multiple numbers given
num = ";".join([n['$'] for n in number])
except TypeError:
num = number
new = chemical(source=item['@source'], cas_registry_number=num,
chemical_name=chem['chemical-name'])
out.append(new)
return out or None | def chemicals(self) | List of namedtuples representing chemical entities in the form
(source, chemical_name, cas_registry_number). In case multiple
numbers given, they are joined on ";". | 5.851252 | 4.181663 | 1.399264 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.