code
stringlengths 66
870k
| docstring
stringlengths 19
26.7k
| func_name
stringlengths 1
138
| language
stringclasses 1
value | repo
stringlengths 7
68
| path
stringlengths 5
324
| url
stringlengths 46
389
| license
stringclasses 7
values |
---|---|---|---|---|---|---|---|
def linux_find_processes(self, names):
"""But what if a blacklisted process spawns after we call
this? We'd have to call this every time we do anything.
"""
pids = []
proc_pid_dirs = glob.glob('/proc/[0-9]*/')
comm_file = ''
for proc_pid_dir in proc_pid_dirs:
comm_file = os.path.join(proc_pid_dir, 'comm')
try:
with open(comm_file, 'r') as f:
comm = f.read().strip()
if comm in names:
pid = int(proc_pid_dir.split('/')[-2], 10)
pids.append(pid)
except IOError as e:
# Silently ignore
pass
return pids | But what if a blacklisted process spawns after we call
this? We'd have to call this every time we do anything.
| linux_find_processes | python | mandiant/flare-fakenet-ng | fakenet/diverters/linutil.py | https://github.com/mandiant/flare-fakenet-ng/blob/master/fakenet/diverters/linutil.py | Apache-2.0 |
def _linux_find_sock_by_endpoint_unsafe(self, ipver, proto_name, ip, port,
local=True):
"""Search /proc/net/tcp for a socket whose local (field 1, zero-based)
or remote (field 2) address matches ip:port and return the
corresponding inode (field 9).
Fields referenced above are zero-based.
Example contents of /proc/net/tcp (wrapped and double-spaced)
sl local_address rem_address st tx_queue rx_queue tr tm->when
retrnsmt uid timeout inode
0: 0100007F:0277 00000000:0000 0A 00000000:00000000 00:00000000
00000000 0 0 53320 1 0000000000000000 100 0 0 10 0
1: 00000000:021A 00000000:0000 0A 00000000:00000000 00:00000000
00000000 0 0 11125 1 0000000000000000 100 0 0 10 0
2: 00000000:1A0B 00000000:0000 0A 00000000:00000000 00:00000000
00000000 39 0 11175 1 0000000000000000 100 0 0 10 0
3: 0100007F:8071 0100007F:1F90 01 00000000:00000000 00:00000000
00000000 1000 0 58661 1 0000000000000000 20 0 0 10 -1
4: 0100007F:1F90 0100007F:8071 01 00000000:00000000 00:00000000
00000000 1000 0 58640 1 0000000000000000 20 4 30 10 -1
Returns inode
"""
INODE_COLUMN = 9
# IPv6 untested
suffix = '6' if (ipver == 6) else ''
procfs_path = '/proc/net/' + proto_name.lower() + suffix
inode = None
port_tag = self._port_for_proc_net_tcp(port)
match_column = 1 if local else 2
local_column = 1
remote_column = 2
try:
with open(procfs_path) as f:
f.readline() # Discard header
while True:
line = f.readline()
if not len(line):
break
fields = line.split()
# Local matches can be made based on port only
if local and fields[local_column].endswith(port_tag):
inode = int(fields[INODE_COLUMN], 10)
self.pdebug(DPROCFS, 'MATCHING CONNECTION: %s' %
(line.strip()))
break
# Untested: Remote matches must be more specific and
# include the IP address. Hence, an "endpoint tag" is
# constructed to match what would appear in
# /proc/net/{tcp,udp}{,6}
elif not local:
endpoint_tag = self._ip_port_for_proc_net_tcp(ipver,
ip, port)
if fields[remote_column] == endpoint_tag:
inode = int(fields[INODE_COLUMN], 10)
self.pdebug(DPROCFS, 'MATCHING CONNECTION: %s' %
(line.strip()))
except IOError as e:
self.logger.error('No such protocol/IP ver (%s) or error: %s' %
(procfs_path, str(e)))
return inode | Search /proc/net/tcp for a socket whose local (field 1, zero-based)
or remote (field 2) address matches ip:port and return the
corresponding inode (field 9).
Fields referenced above are zero-based.
Example contents of /proc/net/tcp (wrapped and double-spaced)
sl local_address rem_address st tx_queue rx_queue tr tm->when
retrnsmt uid timeout inode
0: 0100007F:0277 00000000:0000 0A 00000000:00000000 00:00000000
00000000 0 0 53320 1 0000000000000000 100 0 0 10 0
1: 00000000:021A 00000000:0000 0A 00000000:00000000 00:00000000
00000000 0 0 11125 1 0000000000000000 100 0 0 10 0
2: 00000000:1A0B 00000000:0000 0A 00000000:00000000 00:00000000
00000000 39 0 11175 1 0000000000000000 100 0 0 10 0
3: 0100007F:8071 0100007F:1F90 01 00000000:00000000 00:00000000
00000000 1000 0 58661 1 0000000000000000 20 0 0 10 -1
4: 0100007F:1F90 0100007F:8071 01 00000000:00000000 00:00000000
00000000 1000 0 58640 1 0000000000000000 20 4 30 10 -1
Returns inode
| _linux_find_sock_by_endpoint_unsafe | python | mandiant/flare-fakenet-ng | fakenet/diverters/linutil.py | https://github.com/mandiant/flare-fakenet-ng/blob/master/fakenet/diverters/linutil.py | Apache-2.0 |
def linux_get_pid_comm_by_endpoint(self, ipver, proto_name, ip, port):
"""Obtain a pid and executable name associated with an endpoint.
NOTE: procfs does not allow us to answer questions like "who just
called send()?"; only questions like "who owns a socket associated with
this local port?" Since fork() etc. can result in multiple ownership,
the real answer may be that multiple processes actually own the socket.
This implementation stops at the first match and hence may not give a
perfectly accurate answer in those cases. In practice, this may be
adequate, or it may need to be revisited to return a list of (pid,comm)
tuples to take into account cases where multiple processes have the
same inode open.
"""
pid, comm = None, None
# 1. Find the inode number associated with this socket
inode = self.linux_find_sock_by_endpoint(ipver, proto_name, ip, port)
if inode:
# 2. Search for a /proc/<pid>/fd/<fd> that has this inode open.
proc_fds_glob = '/proc/[0-9]*/fd/*'
proc_fd_paths = glob.glob(proc_fds_glob)
for fd_path in proc_fd_paths:
candidate = self._linux_get_sk_ino_for_fd_file(fd_path)
if candidate and (candidate == inode):
# 3. Record the pid and executable name
try:
pid = int(fd_path.split('/')[-3], 10)
comm = self.linux_get_comm_by_pid(pid)
# Not interested in e.g.
except ValueError:
pass
return pid, comm | Obtain a pid and executable name associated with an endpoint.
NOTE: procfs does not allow us to answer questions like "who just
called send()?"; only questions like "who owns a socket associated with
this local port?" Since fork() etc. can result in multiple ownership,
the real answer may be that multiple processes actually own the socket.
This implementation stops at the first match and hence may not give a
perfectly accurate answer in those cases. In practice, this may be
adequate, or it may need to be revisited to return a list of (pid,comm)
tuples to take into account cases where multiple processes have the
same inode open.
| linux_get_pid_comm_by_endpoint | python | mandiant/flare-fakenet-ng | fakenet/diverters/linutil.py | https://github.com/mandiant/flare-fakenet-ng/blob/master/fakenet/diverters/linutil.py | Apache-2.0 |
def handle_nonlocal(self, nfqpkt):
"""Handle comms sent to IP addresses that are not bound to any adapter.
This allows analysts to observe when malware is communicating with
hard-coded IP addresses in MultiHost mode.
"""
try:
pkt = LinuxPacketCtx('handle_nonlocal', nfqpkt)
self.handle_pkt(pkt, self.nonlocal_net_cbs, [])
if pkt.mangled:
nfqpkt.set_payload(pkt.octets)
# Catch-all exceptions are usually bad practice, agreed, but
# python-netfilterqueue has a catch-all that will not print enough
# information to troubleshoot with, so if there is going to be a
# catch-all exception handler anyway, it might as well be mine so that
# I can print out the stack trace before I lose access to this valuable
# debugging information.
except Exception:
self.logger.error('Exception: %s' % (traceback.format_exc()))
raise
nfqpkt.accept() | Handle comms sent to IP addresses that are not bound to any adapter.
This allows analysts to observe when malware is communicating with
hard-coded IP addresses in MultiHost mode.
| handle_nonlocal | python | mandiant/flare-fakenet-ng | fakenet/diverters/linux.py | https://github.com/mandiant/flare-fakenet-ng/blob/master/fakenet/diverters/linux.py | Apache-2.0 |
def handle_incoming(self, nfqpkt):
"""Incoming packet hook.
Specific to incoming packets:
5.) If SingleHost mode:
a.) Conditionally fix up source IPs to support IP forwarding for
otherwise foreign-destined packets
4.) Conditionally mangle destination ports to implement port forwarding
for unbound ports to point to the default listener
No return value.
"""
try:
pkt = LinuxPacketCtx('handle_incoming', nfqpkt)
self.handle_pkt(pkt, self.incoming_net_cbs,
self.incoming_trans_cbs)
if pkt.mangled:
nfqpkt.set_payload(pkt.octets)
except Exception:
self.logger.error('Exception: %s' % (traceback.format_exc()))
raise
nfqpkt.accept() | Incoming packet hook.
Specific to incoming packets:
5.) If SingleHost mode:
a.) Conditionally fix up source IPs to support IP forwarding for
otherwise foreign-destined packets
4.) Conditionally mangle destination ports to implement port forwarding
for unbound ports to point to the default listener
No return value.
| handle_incoming | python | mandiant/flare-fakenet-ng | fakenet/diverters/linux.py | https://github.com/mandiant/flare-fakenet-ng/blob/master/fakenet/diverters/linux.py | Apache-2.0 |
def handle_outgoing(self, nfqpkt):
"""Outgoing packet hook.
Specific to outgoing packets:
4.) If SingleHost mode:
a.) Conditionally log packets destined for foreign IP addresses
(the corresponding check for MultiHost mode is called by
handle_nonlocal())
b.) Conditionally mangle destination IPs for otherwise foreign-
destined packets to implement IP forwarding
5.) Conditionally fix up mangled source ports to support port
forwarding
No return value.
"""
try:
pkt = LinuxPacketCtx('handle_outgoing', nfqpkt)
self.handle_pkt(pkt, self.outgoing_net_cbs,
self.outgoing_trans_cbs)
if pkt.mangled:
nfqpkt.set_payload(pkt.octets)
except Exception:
self.logger.error('Exception: %s' % (traceback.format_exc()))
raise
nfqpkt.accept() | Outgoing packet hook.
Specific to outgoing packets:
4.) If SingleHost mode:
a.) Conditionally log packets destined for foreign IP addresses
(the corresponding check for MultiHost mode is called by
handle_nonlocal())
b.) Conditionally mangle destination IPs for otherwise foreign-
destined packets to implement IP forwarding
5.) Conditionally fix up mangled source ports to support port
forwarding
No return value.
| handle_outgoing | python | mandiant/flare-fakenet-ng | fakenet/diverters/linux.py | https://github.com/mandiant/flare-fakenet-ng/blob/master/fakenet/diverters/linux.py | Apache-2.0 |
def check_log_nonlocal(self, crit, pkt):
"""Conditionally log packets having a foreign destination.
Each foreign destination will be logged only once if the Linux
Diverter's internal log_nonlocal_only_once flag is set. Otherwise, any
foreign destination IP address will be logged each time it is observed.
"""
if pkt.dst_ip not in self.ip_addrs[pkt.ipver]:
self.pdebug(DNONLOC, 'Nonlocal %s' % pkt.hdrToStr())
first_sighting = (pkt.dst_ip not in self.nonlocal_ips_already_seen)
if first_sighting:
self.nonlocal_ips_already_seen.append(pkt.dst_ip)
# Log when a new IP is observed OR if we are not restricted to
# logging only the first occurrence of a given nonlocal IP.
if first_sighting or (not self.log_nonlocal_only_once):
self.logger.info(
'Received nonlocal IPv%d datagram destined for %s' %
(pkt.ipver, pkt.dst_ip))
return None | Conditionally log packets having a foreign destination.
Each foreign destination will be logged only once if the Linux
Diverter's internal log_nonlocal_only_once flag is set. Otherwise, any
foreign destination IP address will be logged each time it is observed.
| check_log_nonlocal | python | mandiant/flare-fakenet-ng | fakenet/diverters/linux.py | https://github.com/mandiant/flare-fakenet-ng/blob/master/fakenet/diverters/linux.py | Apache-2.0 |
def redirIcmpIpUnconditionally(self, crit, pkt):
"""Redirect ICMP to loopback or external IP if necessary.
On Windows, we can't conveniently use an iptables REDIRECT rule to get
ICMP packets sent back home for free, so here is some code.
"""
if (pkt.is_icmp and
pkt.icmp_id not in self.blacklist_ids["ICMP"] and
pkt.dst_ip not in [self.loopback_ip, self.external_ip]):
self.logger.info('Modifying ICMP packet (type %d, code %d):' %
(pkt.icmp_type, pkt.icmp_code))
self.logger.info(' from: %s' % (pkt.hdrToStr()))
pkt.dst_ip = self.getNewDestinationIp(pkt.src_ip)
self.logger.info(' to: %s' % (pkt.hdrToStr()))
return pkt | Redirect ICMP to loopback or external IP if necessary.
On Windows, we can't conveniently use an iptables REDIRECT rule to get
ICMP packets sent back home for free, so here is some code.
| redirIcmpIpUnconditionally | python | mandiant/flare-fakenet-ng | fakenet/diverters/windows.py | https://github.com/mandiant/flare-fakenet-ng/blob/master/fakenet/diverters/windows.py | Apache-2.0 |
def fix_gateway(self):
"""Check if there is a gateway configured on any of the Ethernet
interfaces. If that's not the case, then locate configured IP address
and set a gateway automatically. This is necessary for VMWare Host-Only
DHCP server which leaves default gateway empty.
"""
fixed = False
for adapter in self.get_adapters_info():
# Look for a DHCP interface with a set IP address but no gateway
# (Host-Only)
if self.check_ipaddresses_interface(adapter) and adapter.DhcpEnabled:
(ip_address, netmask) = next(self.get_ipaddresses_netmask(adapter))
# set the gateway ip address to be that of the virtual network adapter
# https://docs.vmware.com/en/VMware-Workstation-Pro/17/com.vmware.ws.using.doc/GUID-9831F49E-1A83-4881-BB8A-D4573F2C6D91.html
gw_address = ip_address[:ip_address.rfind('.')] + '.1'
interface_name = self.get_adapter_friendlyname(adapter.Index)
# Don't set gateway on loopback interfaces (e.g. Npcap Loopback
# Adapter)
if not "loopback" in interface_name.lower():
self.adapters_dhcp_restore.append(interface_name)
cmd_set_gw = "netsh interface ip set address name=\"%s\" static %s %s %s" % (
interface_name, ip_address, netmask, gw_address)
# Configure gateway
try:
subprocess.check_call(cmd_set_gw, shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except subprocess.CalledProcessError as e:
self.logger.error(" Failed to set gateway %s on interface %s."
% (gw_address, interface_name))
else:
self.logger.info(" Setting gateway %s on interface %s"
% (gw_address, interface_name))
fixed = True
return fixed | Check if there is a gateway configured on any of the Ethernet
interfaces. If that's not the case, then locate configured IP address
and set a gateway automatically. This is necessary for VMWare Host-Only
DHCP server which leaves default gateway empty.
| fix_gateway | python | mandiant/flare-fakenet-ng | fakenet/diverters/winutil.py | https://github.com/mandiant/flare-fakenet-ng/blob/master/fakenet/diverters/winutil.py | Apache-2.0 |
def fix_dns(self):
"""Check if there is a DNS server on any of the Ethernet interfaces. If
that's not the case, then locate configured IP address and set a DNS
server automatically.
"""
fixed = False
for adapter in self.get_adapters_info():
if self.check_ipaddresses_interface(adapter):
ip_address = next(self.get_ipaddresses(adapter))
dns_address = ip_address
interface_name = self.get_adapter_friendlyname(adapter.Index)
# Don't set DNS on loopback interfaces (e.g. Npcap Loopback
# Adapter)
if not "loopback" in interface_name.lower():
self.adapters_dns_restore.append(interface_name)
cmd_set_dns = "netsh interface ip set dns name=\"%s\" static %s" % (
interface_name, dns_address)
# Configure DNS server
try:
subprocess.check_output(cmd_set_dns,
shell=True,
stderr=subprocess.PIPE)
except subprocess.CalledProcessError as e:
self.logger.error(" Failed to set DNS %s on interface %s."
% (dns_address, interface_name))
self.logger.error(" netsh failed with error: %s"
% (e.output))
else:
self.logger.info(" Setting DNS %s on interface %s"
% (dns_address, interface_name))
fixed = True
return fixed | Check if there is a DNS server on any of the Ethernet interfaces. If
that's not the case, then locate configured IP address and set a DNS
server automatically.
| fix_dns | python | mandiant/flare-fakenet-ng | fakenet/diverters/winutil.py | https://github.com/mandiant/flare-fakenet-ng/blob/master/fakenet/diverters/winutil.py | Apache-2.0 |
def failEarly(self):
"""Raise exceptions upon construction rather than later."""
# Test generating banner
banner_generated = str(self)
# Test generating and getting length of banner
banner_generated_len = len(self)
return banner_generated, banner_generated_len | Raise exceptions upon construction rather than later. | failEarly | python | mandiant/flare-fakenet-ng | fakenet/listeners/BannerFactory.py | https://github.com/mandiant/flare-fakenet-ng/blob/master/fakenet/listeners/BannerFactory.py | Apache-2.0 |
def __len__(self):
"""Needed for pyftpdlib.
If the length changes between the time when the caller obtains the
length and the time when the caller obtains the latest generated
string, then there is not much that could reasonably be done. It would
be possible to cache the formatted banner with a short expiry so that
temporally clustered __len__() and __repr__() call sequences would view
consistent and coherent string contents, however this seems like
overkill since the use case is really just allowing pyftpdlib to
determine which way to send the response (directly versus push() if the
length exceeds a threshold of 75 characters). In this case, if the
banner string length and contents are inconsistent, it appears that the
only effect will be to erroneously send the message differently. Test
code has been left in place for easy repro in case this proves to be an
issue on some future/other platform.
"""
# Test path: simulate length of 75 but actual string of length 76 (part
# 1/2) to test pyftpdlib/handlers.py:1321
if self.test_pyftpdlib_handler_banner_threshold75:
return self.len_75
# Normal path: return the length of the banner generated by self.fmt()
return len(self.fmt()) | Needed for pyftpdlib.
If the length changes between the time when the caller obtains the
length and the time when the caller obtains the latest generated
string, then there is not much that could reasonably be done. It would
be possible to cache the formatted banner with a short expiry so that
temporally clustered __len__() and __repr__() call sequences would view
consistent and coherent string contents, however this seems like
overkill since the use case is really just allowing pyftpdlib to
determine which way to send the response (directly versus push() if the
length exceeds a threshold of 75 characters). In this case, if the
banner string length and contents are inconsistent, it appears that the
only effect will be to erroneously send the message differently. Test
code has been left in place for easy repro in case this proves to be an
issue on some future/other platform.
| __len__ | python | mandiant/flare-fakenet-ng | fakenet/listeners/BannerFactory.py | https://github.com/mandiant/flare-fakenet-ng/blob/master/fakenet/listeners/BannerFactory.py | Apache-2.0 |
def genBanner(self, config, bannerdict, defaultbannerkey='!generic'):
"""Select and initialize a banner.
Supported banner escapes:
!<key> - Use the banner whose key in bannerdict is <key>
!random - Use a random banner from bannerdict
!generic - Every listener supporting banners must have a generic
Banners can include literal '\n' or '\t' tokens (slash followed by the
letter n or t) to indicate that a newline or tab should be inserted.
Banners can include {servername} or {tz} to insert the servername or
time zone (hard-coded to 'UTC' as of this writing).
If the user does not specify a banner, then '!generic' is used by
default, resulting in bannerdict['generic'] being used. If the user
specifies a bang escape e.g. '!iis-6', then the banner keyed by that
name will be used. If the user specifies '!random' then a random banner
will be chosen from bannerdict.
Because some banners include the servername as an insertion string,
this method also retrieves the configuration value for ServerName and
incorporates a couple of similar escape sequences:
!random - Randomized servername with random length between 1-15
!gethostname - Use the real hostname
"""
banner = config.get('banner', defaultbannerkey)
servername = config.get('servername', 'localhost')
if servername.startswith('!'):
servername = servername[1:]
if servername.lower() == 'random':
servername = self.randomizeHostname()
elif servername.lower() == 'gethostname':
servername = socket.gethostname()
else:
raise ValueError('ServerName config invalid escape: !%s' %
(servername))
if banner.startswith('!'):
banner = banner[1:]
if banner.lower() == 'random':
banner = random.choice(list(bannerdict.keys()))
elif banner not in bannerdict:
raise ValueError(
'Banner config escape !%s not a valid banner key' %
(banner))
banner = bannerdict[banner]
insertions = {'servername': servername, 'tz': 'UTC'}
return Banner(banner, insertions) | Select and initialize a banner.
Supported banner escapes:
!<key> - Use the banner whose key in bannerdict is <key>
!random - Use a random banner from bannerdict
!generic - Every listener supporting banners must have a generic
Banners can include literal '
' or ' ' tokens (slash followed by the
letter n or t) to indicate that a newline or tab should be inserted.
Banners can include {servername} or {tz} to insert the servername or
time zone (hard-coded to 'UTC' as of this writing).
If the user does not specify a banner, then '!generic' is used by
default, resulting in bannerdict['generic'] being used. If the user
specifies a bang escape e.g. '!iis-6', then the banner keyed by that
name will be used. If the user specifies '!random' then a random banner
will be chosen from bannerdict.
Because some banners include the servername as an insertion string,
this method also retrieves the configuration value for ServerName and
incorporates a couple of similar escape sequences:
!random - Randomized servername with random length between 1-15
!gethostname - Use the real hostname
| genBanner | python | mandiant/flare-fakenet-ng | fakenet/listeners/BannerFactory.py | https://github.com/mandiant/flare-fakenet-ng/blob/master/fakenet/listeners/BannerFactory.py | Apache-2.0 |
def log_message(self, log_level, is_process_blacklisted, message, *args):
"""The primary objective of this method is to control the log messages
generated for requests from blacklisted processes.
In a case where the DNS server is same as the local machine, the DNS
requests from a blacklisted process will reach the DNS listener (which
listens on port 53 locally) nevertheless. As a user may not wish to see
logs from a blacklisted process, messages are logged with level DEBUG.
Executing FakeNet in the verbose mode will print these logs.
"""
if is_process_blacklisted:
self.server.logger.log(logging.DEBUG, message, *args)
else:
self.server.logger.log(log_level, message, *args) | The primary objective of this method is to control the log messages
generated for requests from blacklisted processes.
In a case where the DNS server is same as the local machine, the DNS
requests from a blacklisted process will reach the DNS listener (which
listens on port 53 locally) nevertheless. As a user may not wish to see
logs from a blacklisted process, messages are logged with level DEBUG.
Executing FakeNet in the verbose mode will print these logs.
| log_message | python | mandiant/flare-fakenet-ng | fakenet/listeners/DNSListener.py | https://github.com/mandiant/flare-fakenet-ng/blob/master/fakenet/listeners/DNSListener.py | Apache-2.0 |
def main():
"""
Run from the flare-fakenet-ng root dir with the following command:
python2 -m fakenet.listeners.HTTPListener
"""
logging.basicConfig(format='%(asctime)s [%(name)15s] %(message)s', datefmt='%m/%d/%y %I:%M:%S %p', level=logging.DEBUG)
config = {'port': '8443', 'usessl': 'Yes', 'webroot': 'fakenet/defaultFiles' }
listener = HTTPListener(config)
listener.start()
###########################################################################
# Run processing
import time
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
pass
###########################################################################
# Run tests
test(config) |
Run from the flare-fakenet-ng root dir with the following command:
python2 -m fakenet.listeners.HTTPListener
| main | python | mandiant/flare-fakenet-ng | fakenet/listeners/HTTPListener.py | https://github.com/mandiant/flare-fakenet-ng/blob/master/fakenet/listeners/HTTPListener.py | Apache-2.0 |
def safe_join(root, path):
"""
Joins a path to a root path, even if path starts with '/', using os.sep
"""
# prepending a '/' ensures '..' does not traverse past the root
# of the path
if not path.startswith('/'):
path = '/' + path
normpath = os.path.normpath(path)
return root + normpath |
Joins a path to a root path, even if path starts with '/', using os.sep
| safe_join | python | mandiant/flare-fakenet-ng | fakenet/listeners/ListenerBase.py | https://github.com/mandiant/flare-fakenet-ng/blob/master/fakenet/listeners/ListenerBase.py | Apache-2.0 |
def abs_config_path(path):
"""
Attempts to return the absolute path of a path from a configuration
setting.
First tries just to just take the abspath() of the parameter to see
if it exists relative to the current working directory. If that does
not exist, attempts to find it relative to the 'fakenet' package
directory. Returns None if neither exists.
"""
# Try absolute path first
abspath = os.path.abspath(path)
if os.path.exists(abspath):
return abspath
if getattr(sys, 'frozen', False) and hasattr(sys, '_MEIPASS'):
relpath = os.path.join(os.path.dirname(sys.executable), path)
else:
# Try to locate the location relative to application path
relpath = os.path.join(os.path.dirname(os.path.dirname(__file__)), path)
if os.path.exists(relpath):
return os.path.abspath(relpath)
return None |
Attempts to return the absolute path of a path from a configuration
setting.
First tries just to just take the abspath() of the parameter to see
if it exists relative to the current working directory. If that does
not exist, attempts to find it relative to the 'fakenet' package
directory. Returns None if neither exists.
| abs_config_path | python | mandiant/flare-fakenet-ng | fakenet/listeners/ListenerBase.py | https://github.com/mandiant/flare-fakenet-ng/blob/master/fakenet/listeners/ListenerBase.py | Apache-2.0 |
def main():
"""
Run from the flare-fakenet-ng root dir with the following command:
python2 -m fakenet.listeners.TFTPListener
"""
logging.basicConfig(format='%(asctime)s [%(name)15s] %(message)s', datefmt='%m/%d/%y %I:%M:%S %p', level=logging.DEBUG)
config = {'port': '69', 'protocol': 'udp', 'tftproot': 'defaultFiles'}
listener = TFTPListener(config)
listener.start()
###########################################################################
# Run processing
import time
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
pass
###########################################################################
# Run tests
#test(config)
listener.stop() |
Run from the flare-fakenet-ng root dir with the following command:
python2 -m fakenet.listeners.TFTPListener
| main | python | mandiant/flare-fakenet-ng | fakenet/listeners/TFTPListener.py | https://github.com/mandiant/flare-fakenet-ng/blob/master/fakenet/listeners/TFTPListener.py | Apache-2.0 |
def create_cert(self, cn, ca_cert=None, ca_key=None, cert_dir=None):
"""
Create a cert given the common name, a signing CA, CA private key and
the directory output.
return: tuple(None, None) on error
tuple(cert_file_path, key_file_path) on success
"""
f_selfsign = ca_cert is None or ca_key is None
if not cert_dir:
cert_dir = self.abs_config_path(self.config.get('cert_dir'))
else:
cert_dir = os.path.abspath(cert_dir)
cert_file = os.path.join(cert_dir, "%s.crt" % (cn))
key_file = os.path.join(cert_dir, "%s.key" % (cn))
if os.path.exists(cert_file) and os.path.exists(key_file):
return cert_file, key_file
if ca_cert is not None and ca_key is not None:
ca_cert_data = self._load_cert(ca_cert)
if ca_cert_data is None:
return None, None
ca_key_data = self._load_private_key(ca_key)
if ca_key_data is None:
return None, None
# generate crypto keys:
key = crypto.PKey()
key.generate_key(crypto.TYPE_RSA, 2048)
# Create a cert
cert = crypto.X509()
# Setting certificate version to 3. This is required to use certificate
# extensions which have proven necessary when working with browsers
cert.set_version(2)
cert.get_subject().C = "US"
cert.get_subject().CN = cn
cert.set_serial_number(random.randint(1, 0x31337))
now = time.time() / 1000000
na = int(now + self.NOT_AFTER_DELTA_SECONDS)
cert.gmtime_adj_notBefore(0)
cert.gmtime_adj_notAfter(na)
cert.set_pubkey(key)
if f_selfsign:
extensions = [
crypto.X509Extension(b'basicConstraints', True, b'CA:TRUE'),
]
cert.set_issuer(cert.get_subject())
cert.add_extensions(extensions)
cert.sign(key, "sha256")
else:
alt_name = b'DNS:' + cn.encode()
extensions = [
crypto.X509Extension(b'basicConstraints', False, b'CA:FALSE'),
crypto.X509Extension(b'subjectAltName', False, alt_name)
]
cert.set_issuer(ca_cert_data.get_subject())
cert.add_extensions(extensions)
cert.sign(ca_key_data, "sha256")
try:
with open(cert_file, "wb") as cert_file_input:
cert_file_input.write(crypto.dump_certificate(
crypto.FILETYPE_PEM, cert)
)
with open(key_file, "wb") as key_file_output:
key_file_output.write(crypto.dump_privatekey(
crypto.FILETYPE_PEM, key)
)
except IOError:
traceback.print_exc()
return None, None
return cert_file, key_file |
Create a cert given the common name, a signing CA, CA private key and
the directory output.
return: tuple(None, None) on error
tuple(cert_file_path, key_file_path) on success
| create_cert | python | mandiant/flare-fakenet-ng | fakenet/listeners/ssl_utils/__init__.py | https://github.com/mandiant/flare-fakenet-ng/blob/master/fakenet/listeners/ssl_utils/__init__.py | Apache-2.0 |
def abs_config_path(self, path):
"""
Attempts to return the absolute path of a path from a configuration
setting.
"""
# Try absolute path first
abspath = os.path.abspath(path)
if os.path.exists(abspath):
return abspath
if getattr(sys, 'frozen', False) and hasattr(sys, '_MEIPASS'):
abspath = os.path.join(os.getcwd(), path)
else:
abspath = os.path.join(os.fspath(Path(__file__).parents[2]), path)
return abspath |
Attempts to return the absolute path of a path from a configuration
setting.
| abs_config_path | python | mandiant/flare-fakenet-ng | fakenet/listeners/ssl_utils/__init__.py | https://github.com/mandiant/flare-fakenet-ng/blob/master/fakenet/listeners/ssl_utils/__init__.py | Apache-2.0 |
def HandleRequest(req, method, post_data=None):
"""Sample dynamic HTTP response handler.
Parameters
----------
req : BaseHTTPServer.BaseHTTPRequestHandler
The BaseHTTPRequestHandler that recevied the request
method: str
The HTTP method, either 'HEAD', 'GET', 'POST' as of this writing
post_data: str
The HTTP post data received by calling `rfile.read()` against the
BaseHTTPRequestHandler that received the request.
"""
response = b'Ahoy\r\n'
if method == 'GET':
req.send_response(200)
req.send_header('Content-Length', len(response))
req.end_headers()
req.wfile.write(response)
elif method == 'POST':
req.send_response(200)
req.send_header('Content-Length', len(response))
req.end_headers()
req.wfile.write(response)
elif method == 'HEAD':
req.send_response(200)
req.end_headers() | Sample dynamic HTTP response handler.
Parameters
----------
req : BaseHTTPServer.BaseHTTPRequestHandler
The BaseHTTPRequestHandler that recevied the request
method: str
The HTTP method, either 'HEAD', 'GET', 'POST' as of this writing
post_data: str
The HTTP post data received by calling `rfile.read()` against the
BaseHTTPRequestHandler that received the request.
| HandleRequest | python | mandiant/flare-fakenet-ng | test/CustomProviderExample.py | https://github.com/mandiant/flare-fakenet-ng/blob/master/test/CustomProviderExample.py | Apache-2.0 |
def HandleTcp(sock):
"""Handle a TCP buffer.
Parameters
----------
sock : socket
The connected socket with which to recv and send data
"""
while True:
try:
data = None
data = sock.recv(1024)
except socket.timeout:
pass
if not data:
break
resp = b''.join([chr(c+1).encode() for c in data])
sock.sendall(resp) | Handle a TCP buffer.
Parameters
----------
sock : socket
The connected socket with which to recv and send data
| HandleTcp | python | mandiant/flare-fakenet-ng | test/CustomProviderExample.py | https://github.com/mandiant/flare-fakenet-ng/blob/master/test/CustomProviderExample.py | Apache-2.0 |
def HandleUdp(sock, data, addr):
"""Handle a UDP buffer.
Parameters
----------
sock : socket
The connected socket with which to recv and send data
data : str
The data received
addr : tuple
The host and port of the remote peer
"""
if data:
resp = b''.join([chr(c+1).encode() for c in data])
sock.sendto(resp, addr) | Handle a UDP buffer.
Parameters
----------
sock : socket
The connected socket with which to recv and send data
data : str
The data received
addr : tuple
The host and port of the remote peer
| HandleUdp | python | mandiant/flare-fakenet-ng | test/CustomProviderExample.py | https://github.com/mandiant/flare-fakenet-ng/blob/master/test/CustomProviderExample.py | Apache-2.0 |
def get_ips(ipvers):
"""Return IP addresses bound to local interfaces including loopbacks.
Parameters
----------
ipvers : list
IP versions desired (4, 6, or both); ensures the netifaces semantics
(e.g. netiface.AF_INET) are localized to this function.
"""
specs = []
results = []
for ver in ipvers:
if ver == 4:
specs.append(netifaces.AF_INET)
elif ver == 6:
specs.append(netifaces.AF_INET6)
else:
raise ValueError('get_ips only supports IP versions 4 and 6')
for iface in netifaces.interfaces():
for spec in specs:
addrs = netifaces.ifaddresses(iface)
# If an interface only has an IPv4 or IPv6 address, then 6 or 4
# respectively will be absent from the keys in the interface
# addresses dictionary.
if spec in addrs:
for link in addrs[spec]:
if 'addr' in link:
results.append(link['addr'])
return results | Return IP addresses bound to local interfaces including loopbacks.
Parameters
----------
ipvers : list
IP versions desired (4, 6, or both); ensures the netifaces semantics
(e.g. netiface.AF_INET) are localized to this function.
| get_ips | python | mandiant/flare-fakenet-ng | test/test.py | https://github.com/mandiant/flare-fakenet-ng/blob/master/test/test.py | Apache-2.0 |
def _irc_evt_handler(self, srv, evt):
"""Check for each case and set the corresponding success flag."""
if evt.type == 'join':
if evt.target.startswith(self.join_chan):
self.join_ok = True
elif evt.type == 'welcome':
if evt.arguments[0].startswith('Welcome to IRC'):
self.welcome_ok = True
elif evt.type == 'privmsg':
if (evt.arguments[0].startswith(self.safehouse) and
evt.source.startswith(self.clouseau)):
self.privmsg_ok = True
elif evt.type == 'pubmsg':
if (evt.arguments[0].startswith(self.black_market) and
evt.target == self.pub_chan):
self.pubmsg_ok = True | Check for each case and set the corresponding success flag. | _irc_evt_handler | python | mandiant/flare-fakenet-ng | test/test.py | https://github.com/mandiant/flare-fakenet-ng/blob/master/test/test.py | Apache-2.0 |
def _irc_script(self, srv):
"""Callback manages individual test cases for IRC."""
# Clear success flags
self.welcome_ok = False
self.join_ok = False
self.privmsg_ok = False
self.pubmsg_ok = False
# This handler should set the success flags in success cases
srv.add_global_handler('join', self._irc_evt_handler)
srv.add_global_handler('welcome', self._irc_evt_handler)
srv.add_global_handler('privmsg', self._irc_evt_handler)
srv.add_global_handler('pubmsg', self._irc_evt_handler)
# Issue all commands, indirectly invoking the event handler for each
# flag
srv.join(self.join_chan)
srv.process_data()
srv.privmsg(self.pub_chan, self.black_market)
srv.process_data()
srv.privmsg(self.clouseau, self.safehouse)
srv.process_data()
srv.quit()
srv.process_data()
if not self.welcome_ok:
raise FakeNetTestException('Welcome test failed')
if not self.join_ok:
raise FakeNetTestException('Join test failed')
if not self.privmsg_ok:
raise FakeNetTestException('privmsg test failed')
if not self.pubmsg_ok:
raise FakeNetTestException('pubmsg test failed')
return all([
self.welcome_ok,
self.join_ok,
self.privmsg_ok,
self.pubmsg_ok
]) | Callback manages individual test cases for IRC. | _irc_script | python | mandiant/flare-fakenet-ng | test/test.py | https://github.com/mandiant/flare-fakenet-ng/blob/master/test/test.py | Apache-2.0 |
def _run_irc_script(self, nm, callback):
"""Connect to server and give control to callback."""
r = irc.client.Reactor()
srv = r.server()
srv.connect(self.hostname, self.port, self.nick)
retval = callback(srv)
srv.close()
return retval | Connect to server and give control to callback. | _run_irc_script | python | mandiant/flare-fakenet-ng | test/test.py | https://github.com/mandiant/flare-fakenet-ng/blob/master/test/test.py | Apache-2.0 |
def _filterMatchingTests(self, tests, matchspec):
"""Remove tests that match negative specifications (regexes preceded by
a minus sign) or do not match positive specifications (regexes not
preceded by a minus sign).
Modifies the contents of the tests dictionary.
"""
negatives = []
positives = []
if len(matchspec):
# If the user specifies a minus sign before a regular expression,
# match negatively (exclude any matching tests)
for spec in matchspec:
if spec.startswith('-'):
negatives.append(spec[1:])
else:
positives.append(spec)
# Iterating over tests first, match specifications second to
# preserve the order of the selected tests. Less efficient to
# compile every regex several times, but less confusing.
for testname, test in list(tests.items()):
# First determine if it is to be excluded, in which case,
# remove it and do not evaluate further match specifications.
exclude = False
for spec in negatives:
if bool(re.search(spec, testname)):
exclude = True
if exclude:
tests.pop(testname)
continue
# If the user ONLY specified negative match specifications,
# then admit all tests
if not len(positives):
continue
# Otherwise, only admit if it matches a positive spec
include = False
for spec in positives:
if bool(re.search(spec, testname)):
include = True
break
if not include:
tests.pop(testname)
return | Remove tests that match negative specifications (regexes preceded by
a minus sign) or do not match positive specifications (regexes not
preceded by a minus sign).
Modifies the contents of the tests dictionary.
| _filterMatchingTests | python | mandiant/flare-fakenet-ng | test/test.py | https://github.com/mandiant/flare-fakenet-ng/blob/master/test/test.py | Apache-2.0 |
def _test_ftp(self, hostname, port=0):
"""Note that the FakeNet-NG Proxy listener won't know what to do with
this client if you point it at some random port, because the client
listens silently for the server 220 welcome message which doesn't give
the Proxy listener anything to work with to decide where to forward it.
"""
fullbuf = ''
m = hashlib.md5()
def update_hash(buf):
m.update(buf)
f = ftplib.FTP()
f.connect(hostname, port)
f.login()
f.set_pasv(False)
f.retrbinary('RETR FakeNet.gif', update_hash)
f.quit()
digest = m.digest()
expected = binascii.unhexlify('a6b78c4791dc8110dec6c55f8a756395')
return (digest == expected) | Note that the FakeNet-NG Proxy listener won't know what to do with
this client if you point it at some random port, because the client
listens silently for the server 220 welcome message which doesn't give
the Proxy listener anything to work with to decide where to forward it.
| _test_ftp | python | mandiant/flare-fakenet-ng | test/test.py | https://github.com/mandiant/flare-fakenet-ng/blob/master/test/test.py | Apache-2.0 |
def preprocess_input(audio_path, dim_ordering='default'):
'''Reads an audio file and outputs a Mel-spectrogram.
'''
if dim_ordering == 'default':
dim_ordering = K.image_dim_ordering()
assert dim_ordering in {'tf', 'th'}
if librosa_exists():
import librosa
else:
raise RuntimeError('Librosa is required to process audio files.\n' +
'Install it via `pip install librosa` \nor visit ' +
'http://librosa.github.io/librosa/ for details.')
# mel-spectrogram parameters
SR = 12000
N_FFT = 512
N_MELS = 96
HOP_LEN = 256
DURA = 29.12
src, sr = librosa.load(audio_path, sr=SR)
n_sample = src.shape[0]
n_sample_wanted = int(DURA * SR)
# trim the signal at the center
if n_sample < n_sample_wanted: # if too short
src = np.hstack((src, np.zeros((int(DURA * SR) - n_sample,))))
elif n_sample > n_sample_wanted: # if too long
src = src[(n_sample - n_sample_wanted) / 2:
(n_sample + n_sample_wanted) / 2]
logam = librosa.logamplitude
melgram = librosa.feature.melspectrogram
x = logam(melgram(y=src, sr=SR, hop_length=HOP_LEN,
n_fft=N_FFT, n_mels=N_MELS) ** 2,
ref_power=1.0)
if dim_ordering == 'th':
x = np.expand_dims(x, axis=0)
elif dim_ordering == 'tf':
x = np.expand_dims(x, axis=3)
return x | Reads an audio file and outputs a Mel-spectrogram.
| preprocess_input | python | fchollet/deep-learning-models | audio_conv_utils.py | https://github.com/fchollet/deep-learning-models/blob/master/audio_conv_utils.py | MIT |
def decode_predictions(preds, top_n=5):
'''Decode the output of a music tagger model.
# Arguments
preds: 2-dimensional numpy array
top_n: integer in [0, 50], number of items to show
'''
assert len(preds.shape) == 2 and preds.shape[1] == 50
results = []
for pred in preds:
result = zip(TAGS, pred)
result = sorted(result, key=lambda x: x[1], reverse=True)
results.append(result[:top_n])
return results | Decode the output of a music tagger model.
# Arguments
preds: 2-dimensional numpy array
top_n: integer in [0, 50], number of items to show
| decode_predictions | python | fchollet/deep-learning-models | audio_conv_utils.py | https://github.com/fchollet/deep-learning-models/blob/master/audio_conv_utils.py | MIT |
def preprocess_input(x):
"""Preprocesses a numpy array encoding a batch of images.
This function applies the "Inception" preprocessing which converts
the RGB values from [0, 255] to [-1, 1]. Note that this preprocessing
function is different from `imagenet_utils.preprocess_input()`.
# Arguments
x: a 4D numpy array consists of RGB values within [0, 255].
# Returns
Preprocessed array.
"""
x /= 255.
x -= 0.5
x *= 2.
return x | Preprocesses a numpy array encoding a batch of images.
This function applies the "Inception" preprocessing which converts
the RGB values from [0, 255] to [-1, 1]. Note that this preprocessing
function is different from `imagenet_utils.preprocess_input()`.
# Arguments
x: a 4D numpy array consists of RGB values within [0, 255].
# Returns
Preprocessed array.
| preprocess_input | python | fchollet/deep-learning-models | inception_resnet_v2.py | https://github.com/fchollet/deep-learning-models/blob/master/inception_resnet_v2.py | MIT |
def conv2d_bn(x,
filters,
kernel_size,
strides=1,
padding='same',
activation='relu',
use_bias=False,
name=None):
"""Utility function to apply conv + BN.
# Arguments
x: input tensor.
filters: filters in `Conv2D`.
kernel_size: kernel size as in `Conv2D`.
padding: padding mode in `Conv2D`.
activation: activation in `Conv2D`.
strides: strides in `Conv2D`.
name: name of the ops; will become `name + '_ac'` for the activation
and `name + '_bn'` for the batch norm layer.
# Returns
Output tensor after applying `Conv2D` and `BatchNormalization`.
"""
x = Conv2D(filters,
kernel_size,
strides=strides,
padding=padding,
use_bias=use_bias,
name=name)(x)
if not use_bias:
bn_axis = 1 if K.image_data_format() == 'channels_first' else 3
bn_name = None if name is None else name + '_bn'
x = BatchNormalization(axis=bn_axis, scale=False, name=bn_name)(x)
if activation is not None:
ac_name = None if name is None else name + '_ac'
x = Activation(activation, name=ac_name)(x)
return x | Utility function to apply conv + BN.
# Arguments
x: input tensor.
filters: filters in `Conv2D`.
kernel_size: kernel size as in `Conv2D`.
padding: padding mode in `Conv2D`.
activation: activation in `Conv2D`.
strides: strides in `Conv2D`.
name: name of the ops; will become `name + '_ac'` for the activation
and `name + '_bn'` for the batch norm layer.
# Returns
Output tensor after applying `Conv2D` and `BatchNormalization`.
| conv2d_bn | python | fchollet/deep-learning-models | inception_resnet_v2.py | https://github.com/fchollet/deep-learning-models/blob/master/inception_resnet_v2.py | MIT |
def inception_resnet_block(x, scale, block_type, block_idx, activation='relu'):
"""Adds a Inception-ResNet block.
This function builds 3 types of Inception-ResNet blocks mentioned
in the paper, controlled by the `block_type` argument (which is the
block name used in the official TF-slim implementation):
- Inception-ResNet-A: `block_type='block35'`
- Inception-ResNet-B: `block_type='block17'`
- Inception-ResNet-C: `block_type='block8'`
# Arguments
x: input tensor.
scale: scaling factor to scale the residuals (i.e., the output of
passing `x` through an inception module) before adding them
to the shortcut branch. Let `r` be the output from the residual branch,
the output of this block will be `x + scale * r`.
block_type: `'block35'`, `'block17'` or `'block8'`, determines
the network structure in the residual branch.
block_idx: an `int` used for generating layer names. The Inception-ResNet blocks
are repeated many times in this network. We use `block_idx` to identify
each of the repetitions. For example, the first Inception-ResNet-A block
will have `block_type='block35', block_idx=0`, ane the layer names will have
a common prefix `'block35_0'`.
activation: activation function to use at the end of the block
(see [activations](keras./activations.md)).
When `activation=None`, no activation is applied
(i.e., "linear" activation: `a(x) = x`).
# Returns
Output tensor for the block.
# Raises
ValueError: if `block_type` is not one of `'block35'`,
`'block17'` or `'block8'`.
"""
if block_type == 'block35':
branch_0 = conv2d_bn(x, 32, 1)
branch_1 = conv2d_bn(x, 32, 1)
branch_1 = conv2d_bn(branch_1, 32, 3)
branch_2 = conv2d_bn(x, 32, 1)
branch_2 = conv2d_bn(branch_2, 48, 3)
branch_2 = conv2d_bn(branch_2, 64, 3)
branches = [branch_0, branch_1, branch_2]
elif block_type == 'block17':
branch_0 = conv2d_bn(x, 192, 1)
branch_1 = conv2d_bn(x, 128, 1)
branch_1 = conv2d_bn(branch_1, 160, [1, 7])
branch_1 = conv2d_bn(branch_1, 192, [7, 1])
branches = [branch_0, branch_1]
elif block_type == 'block8':
branch_0 = conv2d_bn(x, 192, 1)
branch_1 = conv2d_bn(x, 192, 1)
branch_1 = conv2d_bn(branch_1, 224, [1, 3])
branch_1 = conv2d_bn(branch_1, 256, [3, 1])
branches = [branch_0, branch_1]
else:
raise ValueError('Unknown Inception-ResNet block type. '
'Expects "block35", "block17" or "block8", '
'but got: ' + str(block_type))
block_name = block_type + '_' + str(block_idx)
channel_axis = 1 if K.image_data_format() == 'channels_first' else 3
mixed = Concatenate(axis=channel_axis, name=block_name + '_mixed')(branches)
up = conv2d_bn(mixed,
K.int_shape(x)[channel_axis],
1,
activation=None,
use_bias=True,
name=block_name + '_conv')
x = Lambda(lambda inputs, scale: inputs[0] + inputs[1] * scale,
output_shape=K.int_shape(x)[1:],
arguments={'scale': scale},
name=block_name)([x, up])
if activation is not None:
x = Activation(activation, name=block_name + '_ac')(x)
return x | Adds a Inception-ResNet block.
This function builds 3 types of Inception-ResNet blocks mentioned
in the paper, controlled by the `block_type` argument (which is the
block name used in the official TF-slim implementation):
- Inception-ResNet-A: `block_type='block35'`
- Inception-ResNet-B: `block_type='block17'`
- Inception-ResNet-C: `block_type='block8'`
# Arguments
x: input tensor.
scale: scaling factor to scale the residuals (i.e., the output of
passing `x` through an inception module) before adding them
to the shortcut branch. Let `r` be the output from the residual branch,
the output of this block will be `x + scale * r`.
block_type: `'block35'`, `'block17'` or `'block8'`, determines
the network structure in the residual branch.
block_idx: an `int` used for generating layer names. The Inception-ResNet blocks
are repeated many times in this network. We use `block_idx` to identify
each of the repetitions. For example, the first Inception-ResNet-A block
will have `block_type='block35', block_idx=0`, ane the layer names will have
a common prefix `'block35_0'`.
activation: activation function to use at the end of the block
(see [activations](keras./activations.md)).
When `activation=None`, no activation is applied
(i.e., "linear" activation: `a(x) = x`).
# Returns
Output tensor for the block.
# Raises
ValueError: if `block_type` is not one of `'block35'`,
`'block17'` or `'block8'`.
| inception_resnet_block | python | fchollet/deep-learning-models | inception_resnet_v2.py | https://github.com/fchollet/deep-learning-models/blob/master/inception_resnet_v2.py | MIT |
def InceptionResNetV2(include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000):
"""Instantiates the Inception-ResNet v2 architecture.
Optionally loads weights pre-trained on ImageNet.
Note that when using TensorFlow, for best performance you should
set `"image_data_format": "channels_last"` in your Keras config
at `~/.keras/keras.json`.
The model and the weights are compatible with both TensorFlow and Theano
backends (but not CNTK). The data format convention used by the model is
the one specified in your Keras config file.
Note that the default input image size for this model is 299x299, instead
of 224x224 as in the VGG16 and ResNet models. Also, the input preprocessing
function is different (i.e., do not use `imagenet_utils.preprocess_input()`
with this model. Use `preprocess_input()` defined in this module instead).
# Arguments
include_top: whether to include the fully-connected
layer at the top of the network.
weights: one of `None` (random initialization)
or `'imagenet'` (pre-training on ImageNet).
input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: optional shape tuple, only to be specified
if `include_top` is `False` (otherwise the input shape
has to be `(299, 299, 3)` (with `'channels_last'` data format)
or `(3, 299, 299)` (with `'channels_first'` data format).
It should have exactly 3 inputs channels,
and width and height should be no smaller than 139.
E.g. `(150, 150, 3)` would be one valid value.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the last convolutional layer.
- `'avg'` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a 2D tensor.
- `'max'` means that global max pooling will be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is `True`, and
if no `weights` argument is specified.
# Returns
A Keras `Model` instance.
# Raises
ValueError: in case of invalid argument for `weights`,
or invalid input shape.
RuntimeError: If attempting to run this model with an unsupported backend.
"""
if K.backend() in {'cntk'}:
raise RuntimeError(K.backend() + ' backend is currently unsupported for this model.')
if weights not in {'imagenet', None}:
raise ValueError('The `weights` argument should be either '
'`None` (random initialization) or `imagenet` '
'(pre-training on ImageNet).')
if weights == 'imagenet' and include_top and classes != 1000:
raise ValueError('If using `weights` as imagenet with `include_top`'
' as true, `classes` should be 1000')
# Determine proper input shape
input_shape = _obtain_input_shape(
input_shape,
default_size=299,
min_size=139,
data_format=K.image_data_format(),
require_flatten=False,
weights=weights)
if input_tensor is None:
img_input = Input(shape=input_shape)
else:
if not K.is_keras_tensor(input_tensor):
img_input = Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
# Stem block: 35 x 35 x 192
x = conv2d_bn(img_input, 32, 3, strides=2, padding='valid')
x = conv2d_bn(x, 32, 3, padding='valid')
x = conv2d_bn(x, 64, 3)
x = MaxPooling2D(3, strides=2)(x)
x = conv2d_bn(x, 80, 1, padding='valid')
x = conv2d_bn(x, 192, 3, padding='valid')
x = MaxPooling2D(3, strides=2)(x)
# Mixed 5b (Inception-A block): 35 x 35 x 320
branch_0 = conv2d_bn(x, 96, 1)
branch_1 = conv2d_bn(x, 48, 1)
branch_1 = conv2d_bn(branch_1, 64, 5)
branch_2 = conv2d_bn(x, 64, 1)
branch_2 = conv2d_bn(branch_2, 96, 3)
branch_2 = conv2d_bn(branch_2, 96, 3)
branch_pool = AveragePooling2D(3, strides=1, padding='same')(x)
branch_pool = conv2d_bn(branch_pool, 64, 1)
branches = [branch_0, branch_1, branch_2, branch_pool]
channel_axis = 1 if K.image_data_format() == 'channels_first' else 3
x = Concatenate(axis=channel_axis, name='mixed_5b')(branches)
# 10x block35 (Inception-ResNet-A block): 35 x 35 x 320
for block_idx in range(1, 11):
x = inception_resnet_block(x,
scale=0.17,
block_type='block35',
block_idx=block_idx)
# Mixed 6a (Reduction-A block): 17 x 17 x 1088
branch_0 = conv2d_bn(x, 384, 3, strides=2, padding='valid')
branch_1 = conv2d_bn(x, 256, 1)
branch_1 = conv2d_bn(branch_1, 256, 3)
branch_1 = conv2d_bn(branch_1, 384, 3, strides=2, padding='valid')
branch_pool = MaxPooling2D(3, strides=2, padding='valid')(x)
branches = [branch_0, branch_1, branch_pool]
x = Concatenate(axis=channel_axis, name='mixed_6a')(branches)
# 20x block17 (Inception-ResNet-B block): 17 x 17 x 1088
for block_idx in range(1, 21):
x = inception_resnet_block(x,
scale=0.1,
block_type='block17',
block_idx=block_idx)
# Mixed 7a (Reduction-B block): 8 x 8 x 2080
branch_0 = conv2d_bn(x, 256, 1)
branch_0 = conv2d_bn(branch_0, 384, 3, strides=2, padding='valid')
branch_1 = conv2d_bn(x, 256, 1)
branch_1 = conv2d_bn(branch_1, 288, 3, strides=2, padding='valid')
branch_2 = conv2d_bn(x, 256, 1)
branch_2 = conv2d_bn(branch_2, 288, 3)
branch_2 = conv2d_bn(branch_2, 320, 3, strides=2, padding='valid')
branch_pool = MaxPooling2D(3, strides=2, padding='valid')(x)
branches = [branch_0, branch_1, branch_2, branch_pool]
x = Concatenate(axis=channel_axis, name='mixed_7a')(branches)
# 10x block8 (Inception-ResNet-C block): 8 x 8 x 2080
for block_idx in range(1, 10):
x = inception_resnet_block(x,
scale=0.2,
block_type='block8',
block_idx=block_idx)
x = inception_resnet_block(x,
scale=1.,
activation=None,
block_type='block8',
block_idx=10)
# Final convolution block: 8 x 8 x 1536
x = conv2d_bn(x, 1536, 1, name='conv_7b')
if include_top:
# Classification block
x = GlobalAveragePooling2D(name='avg_pool')(x)
x = Dense(classes, activation='softmax', name='predictions')(x)
else:
if pooling == 'avg':
x = GlobalAveragePooling2D()(x)
elif pooling == 'max':
x = GlobalMaxPooling2D()(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`
if input_tensor is not None:
inputs = get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model
model = Model(inputs, x, name='inception_resnet_v2')
# Load weights
if weights == 'imagenet':
if K.image_data_format() == 'channels_first':
if K.backend() == 'tensorflow':
warnings.warn('You are using the TensorFlow backend, yet you '
'are using the Theano '
'image data format convention '
'(`image_data_format="channels_first"`). '
'For best performance, set '
'`image_data_format="channels_last"` in '
'your Keras config '
'at ~/.keras/keras.json.')
if include_top:
weights_filename = 'inception_resnet_v2_weights_tf_dim_ordering_tf_kernels.h5'
weights_path = get_file(weights_filename,
BASE_WEIGHT_URL + weights_filename,
cache_subdir='models',
md5_hash='e693bd0210a403b3192acc6073ad2e96')
else:
weights_filename = 'inception_resnet_v2_weights_tf_dim_ordering_tf_kernels_notop.h5'
weights_path = get_file(weights_filename,
BASE_WEIGHT_URL + weights_filename,
cache_subdir='models',
md5_hash='d19885ff4a710c122648d3b5c3b684e4')
model.load_weights(weights_path)
return model | Instantiates the Inception-ResNet v2 architecture.
Optionally loads weights pre-trained on ImageNet.
Note that when using TensorFlow, for best performance you should
set `"image_data_format": "channels_last"` in your Keras config
at `~/.keras/keras.json`.
The model and the weights are compatible with both TensorFlow and Theano
backends (but not CNTK). The data format convention used by the model is
the one specified in your Keras config file.
Note that the default input image size for this model is 299x299, instead
of 224x224 as in the VGG16 and ResNet models. Also, the input preprocessing
function is different (i.e., do not use `imagenet_utils.preprocess_input()`
with this model. Use `preprocess_input()` defined in this module instead).
# Arguments
include_top: whether to include the fully-connected
layer at the top of the network.
weights: one of `None` (random initialization)
or `'imagenet'` (pre-training on ImageNet).
input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: optional shape tuple, only to be specified
if `include_top` is `False` (otherwise the input shape
has to be `(299, 299, 3)` (with `'channels_last'` data format)
or `(3, 299, 299)` (with `'channels_first'` data format).
It should have exactly 3 inputs channels,
and width and height should be no smaller than 139.
E.g. `(150, 150, 3)` would be one valid value.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the last convolutional layer.
- `'avg'` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a 2D tensor.
- `'max'` means that global max pooling will be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is `True`, and
if no `weights` argument is specified.
# Returns
A Keras `Model` instance.
# Raises
ValueError: in case of invalid argument for `weights`,
or invalid input shape.
RuntimeError: If attempting to run this model with an unsupported backend.
| InceptionResNetV2 | python | fchollet/deep-learning-models | inception_resnet_v2.py | https://github.com/fchollet/deep-learning-models/blob/master/inception_resnet_v2.py | MIT |
def conv2d_bn(x,
filters,
num_row,
num_col,
padding='same',
strides=(1, 1),
name=None):
"""Utility function to apply conv + BN.
Arguments:
x: input tensor.
filters: filters in `Conv2D`.
num_row: height of the convolution kernel.
num_col: width of the convolution kernel.
padding: padding mode in `Conv2D`.
strides: strides in `Conv2D`.
name: name of the ops; will become `name + '_conv'`
for the convolution and `name + '_bn'` for the
batch norm layer.
Returns:
Output tensor after applying `Conv2D` and `BatchNormalization`.
"""
if name is not None:
bn_name = name + '_bn'
conv_name = name + '_conv'
else:
bn_name = None
conv_name = None
if K.image_data_format() == 'channels_first':
bn_axis = 1
else:
bn_axis = 3
x = Conv2D(
filters, (num_row, num_col),
strides=strides,
padding=padding,
use_bias=False,
name=conv_name)(x)
x = BatchNormalization(axis=bn_axis, scale=False, name=bn_name)(x)
x = Activation('relu', name=name)(x)
return x | Utility function to apply conv + BN.
Arguments:
x: input tensor.
filters: filters in `Conv2D`.
num_row: height of the convolution kernel.
num_col: width of the convolution kernel.
padding: padding mode in `Conv2D`.
strides: strides in `Conv2D`.
name: name of the ops; will become `name + '_conv'`
for the convolution and `name + '_bn'` for the
batch norm layer.
Returns:
Output tensor after applying `Conv2D` and `BatchNormalization`.
| conv2d_bn | python | fchollet/deep-learning-models | inception_v3.py | https://github.com/fchollet/deep-learning-models/blob/master/inception_v3.py | MIT |
def InceptionV3(include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000):
"""Instantiates the Inception v3 architecture.
Optionally loads weights pre-trained
on ImageNet. Note that when using TensorFlow,
for best performance you should set
`image_data_format="channels_last"` in your Keras config
at ~/.keras/keras.json.
The model and the weights are compatible with both
TensorFlow and Theano. The data format
convention used by the model is the one
specified in your Keras config file.
Note that the default input image size for this model is 299x299.
Arguments:
include_top: whether to include the fully-connected
layer at the top of the network.
weights: one of `None` (random initialization)
or "imagenet" (pre-training on ImageNet).
input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(299, 299, 3)` (with `channels_last` data format)
or `(3, 299, 299)` (with `channels_first` data format).
It should have exactly 3 inputs channels,
and width and height should be no smaller than 139.
E.g. `(150, 150, 3)` would be one valid value.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the
last convolutional layer.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will
be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
Returns:
A Keras model instance.
Raises:
ValueError: in case of invalid argument for `weights`,
or invalid input shape.
"""
if weights not in {'imagenet', None}:
raise ValueError('The `weights` argument should be either '
'`None` (random initialization) or `imagenet` '
'(pre-training on ImageNet).')
if weights == 'imagenet' and include_top and classes != 1000:
raise ValueError('If using `weights` as imagenet with `include_top`'
' as true, `classes` should be 1000')
# Determine proper input shape
input_shape = _obtain_input_shape(
input_shape,
default_size=299,
min_size=139,
data_format=K.image_data_format(),
include_top=include_top)
if input_tensor is None:
img_input = Input(shape=input_shape)
else:
img_input = Input(tensor=input_tensor, shape=input_shape)
if K.image_data_format() == 'channels_first':
channel_axis = 1
else:
channel_axis = 3
x = conv2d_bn(img_input, 32, 3, 3, strides=(2, 2), padding='valid')
x = conv2d_bn(x, 32, 3, 3, padding='valid')
x = conv2d_bn(x, 64, 3, 3)
x = MaxPooling2D((3, 3), strides=(2, 2))(x)
x = conv2d_bn(x, 80, 1, 1, padding='valid')
x = conv2d_bn(x, 192, 3, 3, padding='valid')
x = MaxPooling2D((3, 3), strides=(2, 2))(x)
# mixed 0, 1, 2: 35 x 35 x 256
branch1x1 = conv2d_bn(x, 64, 1, 1)
branch5x5 = conv2d_bn(x, 48, 1, 1)
branch5x5 = conv2d_bn(branch5x5, 64, 5, 5)
branch3x3dbl = conv2d_bn(x, 64, 1, 1)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch_pool = AveragePooling2D((3, 3), strides=(1, 1), padding='same')(x)
branch_pool = conv2d_bn(branch_pool, 32, 1, 1)
x = layers.concatenate(
[branch1x1, branch5x5, branch3x3dbl, branch_pool],
axis=channel_axis,
name='mixed0')
# mixed 1: 35 x 35 x 256
branch1x1 = conv2d_bn(x, 64, 1, 1)
branch5x5 = conv2d_bn(x, 48, 1, 1)
branch5x5 = conv2d_bn(branch5x5, 64, 5, 5)
branch3x3dbl = conv2d_bn(x, 64, 1, 1)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch_pool = AveragePooling2D((3, 3), strides=(1, 1), padding='same')(x)
branch_pool = conv2d_bn(branch_pool, 64, 1, 1)
x = layers.concatenate(
[branch1x1, branch5x5, branch3x3dbl, branch_pool],
axis=channel_axis,
name='mixed1')
# mixed 2: 35 x 35 x 256
branch1x1 = conv2d_bn(x, 64, 1, 1)
branch5x5 = conv2d_bn(x, 48, 1, 1)
branch5x5 = conv2d_bn(branch5x5, 64, 5, 5)
branch3x3dbl = conv2d_bn(x, 64, 1, 1)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch_pool = AveragePooling2D((3, 3), strides=(1, 1), padding='same')(x)
branch_pool = conv2d_bn(branch_pool, 64, 1, 1)
x = layers.concatenate(
[branch1x1, branch5x5, branch3x3dbl, branch_pool],
axis=channel_axis,
name='mixed2')
# mixed 3: 17 x 17 x 768
branch3x3 = conv2d_bn(x, 384, 3, 3, strides=(2, 2), padding='valid')
branch3x3dbl = conv2d_bn(x, 64, 1, 1)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch3x3dbl = conv2d_bn(
branch3x3dbl, 96, 3, 3, strides=(2, 2), padding='valid')
branch_pool = MaxPooling2D((3, 3), strides=(2, 2))(x)
x = layers.concatenate(
[branch3x3, branch3x3dbl, branch_pool], axis=channel_axis, name='mixed3')
# mixed 4: 17 x 17 x 768
branch1x1 = conv2d_bn(x, 192, 1, 1)
branch7x7 = conv2d_bn(x, 128, 1, 1)
branch7x7 = conv2d_bn(branch7x7, 128, 1, 7)
branch7x7 = conv2d_bn(branch7x7, 192, 7, 1)
branch7x7dbl = conv2d_bn(x, 128, 1, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 128, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 128, 1, 7)
branch7x7dbl = conv2d_bn(branch7x7dbl, 128, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)
branch_pool = AveragePooling2D((3, 3), strides=(1, 1), padding='same')(x)
branch_pool = conv2d_bn(branch_pool, 192, 1, 1)
x = layers.concatenate(
[branch1x1, branch7x7, branch7x7dbl, branch_pool],
axis=channel_axis,
name='mixed4')
# mixed 5, 6: 17 x 17 x 768
for i in range(2):
branch1x1 = conv2d_bn(x, 192, 1, 1)
branch7x7 = conv2d_bn(x, 160, 1, 1)
branch7x7 = conv2d_bn(branch7x7, 160, 1, 7)
branch7x7 = conv2d_bn(branch7x7, 192, 7, 1)
branch7x7dbl = conv2d_bn(x, 160, 1, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 160, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 160, 1, 7)
branch7x7dbl = conv2d_bn(branch7x7dbl, 160, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)
branch_pool = AveragePooling2D(
(3, 3), strides=(1, 1), padding='same')(x)
branch_pool = conv2d_bn(branch_pool, 192, 1, 1)
x = layers.concatenate(
[branch1x1, branch7x7, branch7x7dbl, branch_pool],
axis=channel_axis,
name='mixed' + str(5 + i))
# mixed 7: 17 x 17 x 768
branch1x1 = conv2d_bn(x, 192, 1, 1)
branch7x7 = conv2d_bn(x, 192, 1, 1)
branch7x7 = conv2d_bn(branch7x7, 192, 1, 7)
branch7x7 = conv2d_bn(branch7x7, 192, 7, 1)
branch7x7dbl = conv2d_bn(x, 192, 1, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)
branch_pool = AveragePooling2D((3, 3), strides=(1, 1), padding='same')(x)
branch_pool = conv2d_bn(branch_pool, 192, 1, 1)
x = layers.concatenate(
[branch1x1, branch7x7, branch7x7dbl, branch_pool],
axis=channel_axis,
name='mixed7')
# mixed 8: 8 x 8 x 1280
branch3x3 = conv2d_bn(x, 192, 1, 1)
branch3x3 = conv2d_bn(branch3x3, 320, 3, 3,
strides=(2, 2), padding='valid')
branch7x7x3 = conv2d_bn(x, 192, 1, 1)
branch7x7x3 = conv2d_bn(branch7x7x3, 192, 1, 7)
branch7x7x3 = conv2d_bn(branch7x7x3, 192, 7, 1)
branch7x7x3 = conv2d_bn(
branch7x7x3, 192, 3, 3, strides=(2, 2), padding='valid')
branch_pool = MaxPooling2D((3, 3), strides=(2, 2))(x)
x = layers.concatenate(
[branch3x3, branch7x7x3, branch_pool], axis=channel_axis, name='mixed8')
# mixed 9: 8 x 8 x 2048
for i in range(2):
branch1x1 = conv2d_bn(x, 320, 1, 1)
branch3x3 = conv2d_bn(x, 384, 1, 1)
branch3x3_1 = conv2d_bn(branch3x3, 384, 1, 3)
branch3x3_2 = conv2d_bn(branch3x3, 384, 3, 1)
branch3x3 = layers.concatenate(
[branch3x3_1, branch3x3_2], axis=channel_axis, name='mixed9_' + str(i))
branch3x3dbl = conv2d_bn(x, 448, 1, 1)
branch3x3dbl = conv2d_bn(branch3x3dbl, 384, 3, 3)
branch3x3dbl_1 = conv2d_bn(branch3x3dbl, 384, 1, 3)
branch3x3dbl_2 = conv2d_bn(branch3x3dbl, 384, 3, 1)
branch3x3dbl = layers.concatenate(
[branch3x3dbl_1, branch3x3dbl_2], axis=channel_axis)
branch_pool = AveragePooling2D(
(3, 3), strides=(1, 1), padding='same')(x)
branch_pool = conv2d_bn(branch_pool, 192, 1, 1)
x = layers.concatenate(
[branch1x1, branch3x3, branch3x3dbl, branch_pool],
axis=channel_axis,
name='mixed' + str(9 + i))
if include_top:
# Classification block
x = GlobalAveragePooling2D(name='avg_pool')(x)
x = Dense(classes, activation='softmax', name='predictions')(x)
else:
if pooling == 'avg':
x = GlobalAveragePooling2D()(x)
elif pooling == 'max':
x = GlobalMaxPooling2D()(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
model = Model(inputs, x, name='inception_v3')
# load weights
if weights == 'imagenet':
if K.image_data_format() == 'channels_first':
if K.backend() == 'tensorflow':
warnings.warn('You are using the TensorFlow backend, yet you '
'are using the Theano '
'image data format convention '
'(`image_data_format="channels_first"`). '
'For best performance, set '
'`image_data_format="channels_last"` in '
'your Keras config '
'at ~/.keras/keras.json.')
if include_top:
weights_path = get_file(
'inception_v3_weights_tf_dim_ordering_tf_kernels.h5',
WEIGHTS_PATH,
cache_subdir='models',
md5_hash='9a0d58056eeedaa3f26cb7ebd46da564')
else:
weights_path = get_file(
'inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5',
WEIGHTS_PATH_NO_TOP,
cache_subdir='models',
md5_hash='bcbd6486424b2319ff4ef7d526e38f63')
model.load_weights(weights_path)
if K.backend() == 'theano':
convert_all_kernels_in_model(model)
return model | Instantiates the Inception v3 architecture.
Optionally loads weights pre-trained
on ImageNet. Note that when using TensorFlow,
for best performance you should set
`image_data_format="channels_last"` in your Keras config
at ~/.keras/keras.json.
The model and the weights are compatible with both
TensorFlow and Theano. The data format
convention used by the model is the one
specified in your Keras config file.
Note that the default input image size for this model is 299x299.
Arguments:
include_top: whether to include the fully-connected
layer at the top of the network.
weights: one of `None` (random initialization)
or "imagenet" (pre-training on ImageNet).
input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(299, 299, 3)` (with `channels_last` data format)
or `(3, 299, 299)` (with `channels_first` data format).
It should have exactly 3 inputs channels,
and width and height should be no smaller than 139.
E.g. `(150, 150, 3)` would be one valid value.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the
last convolutional layer.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will
be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
Returns:
A Keras model instance.
Raises:
ValueError: in case of invalid argument for `weights`,
or invalid input shape.
| InceptionV3 | python | fchollet/deep-learning-models | inception_v3.py | https://github.com/fchollet/deep-learning-models/blob/master/inception_v3.py | MIT |
def MobileNet(input_shape=None,
alpha=1.0,
depth_multiplier=1,
dropout=1e-3,
include_top=True,
weights='imagenet',
input_tensor=None,
pooling=None,
classes=1000):
"""Instantiates the MobileNet architecture.
Note that only TensorFlow is supported for now,
therefore it only works with the data format
`image_data_format='channels_last'` in your Keras config
at `~/.keras/keras.json`.
To load a MobileNet model via `load_model`, import the custom
objects `relu6` and `DepthwiseConv2D` and pass them to the
`custom_objects` parameter.
E.g.
model = load_model('mobilenet.h5', custom_objects={
'relu6': mobilenet.relu6,
'DepthwiseConv2D': mobilenet.DepthwiseConv2D})
# Arguments
input_shape: optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(224, 224, 3)` (with `channels_last` data format)
or (3, 224, 224) (with `channels_first` data format).
It should have exactly 3 inputs channels,
and width and height should be no smaller than 32.
E.g. `(200, 200, 3)` would be one valid value.
alpha: controls the width of the network.
- If `alpha` < 1.0, proportionally decreases the number
of filters in each layer.
- If `alpha` > 1.0, proportionally increases the number
of filters in each layer.
- If `alpha` = 1, default number of filters from the paper
are used at each layer.
depth_multiplier: depth multiplier for depthwise convolution
(also called the resolution multiplier)
dropout: dropout rate
include_top: whether to include the fully-connected
layer at the top of the network.
weights: `None` (random initialization) or
`imagenet` (ImageNet weights)
input_tensor: optional Keras tensor (i.e. output of
`layers.Input()`)
to use as image input for the model.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model
will be the 4D tensor output of the
last convolutional layer.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a
2D tensor.
- `max` means that global max pooling will
be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
# Returns
A Keras model instance.
# Raises
ValueError: in case of invalid argument for `weights`,
or invalid input shape.
RuntimeError: If attempting to run this model with a
backend that does not support separable convolutions.
"""
if K.backend() != 'tensorflow':
raise RuntimeError('Only Tensorflow backend is currently supported, '
'as other backends do not support '
'depthwise convolution.')
if weights not in {'imagenet', None}:
raise ValueError('The `weights` argument should be either '
'`None` (random initialization) or `imagenet` '
'(pre-training on ImageNet).')
if weights == 'imagenet' and include_top and classes != 1000:
raise ValueError('If using `weights` as ImageNet with `include_top` '
'as true, `classes` should be 1000')
# Determine proper input shape.
input_shape = _obtain_input_shape(input_shape,
default_size=224,
min_size=32,
data_format=K.image_data_format(),
include_top=include_top or weights)
if K.image_data_format() == 'channels_last':
row_axis, col_axis = (0, 1)
else:
row_axis, col_axis = (1, 2)
rows = input_shape[row_axis]
cols = input_shape[col_axis]
if weights == 'imagenet':
if depth_multiplier != 1:
raise ValueError('If imagenet weights are being loaded, '
'depth multiplier must be 1')
if alpha not in [0.25, 0.50, 0.75, 1.0]:
raise ValueError('If imagenet weights are being loaded, '
'alpha can be one of'
'`0.25`, `0.50`, `0.75` or `1.0` only.')
if rows != cols or rows not in [128, 160, 192, 224]:
raise ValueError('If imagenet weights are being loaded, '
'input must have a static square shape (one of '
'(128,128), (160,160), (192,192), or (224, 224)).'
' Input shape provided = %s' % (input_shape,))
if K.image_data_format() != 'channels_last':
warnings.warn('The MobileNet family of models is only available '
'for the input data format "channels_last" '
'(width, height, channels). '
'However your settings specify the default '
'data format "channels_first" (channels, width, height).'
' You should set `image_data_format="channels_last"` '
'in your Keras config located at ~/.keras/keras.json. '
'The model being returned right now will expect inputs '
'to follow the "channels_last" data format.')
K.set_image_data_format('channels_last')
old_data_format = 'channels_first'
else:
old_data_format = None
if input_tensor is None:
img_input = Input(shape=input_shape)
else:
if not K.is_keras_tensor(input_tensor):
img_input = Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
x = _conv_block(img_input, 32, alpha, strides=(2, 2))
x = _depthwise_conv_block(x, 64, alpha, depth_multiplier, block_id=1)
x = _depthwise_conv_block(x, 128, alpha, depth_multiplier,
strides=(2, 2), block_id=2)
x = _depthwise_conv_block(x, 128, alpha, depth_multiplier, block_id=3)
x = _depthwise_conv_block(x, 256, alpha, depth_multiplier,
strides=(2, 2), block_id=4)
x = _depthwise_conv_block(x, 256, alpha, depth_multiplier, block_id=5)
x = _depthwise_conv_block(x, 512, alpha, depth_multiplier,
strides=(2, 2), block_id=6)
x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=7)
x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=8)
x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=9)
x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=10)
x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=11)
x = _depthwise_conv_block(x, 1024, alpha, depth_multiplier,
strides=(2, 2), block_id=12)
x = _depthwise_conv_block(x, 1024, alpha, depth_multiplier, block_id=13)
if include_top:
if K.image_data_format() == 'channels_first':
shape = (int(1024 * alpha), 1, 1)
else:
shape = (1, 1, int(1024 * alpha))
x = GlobalAveragePooling2D()(x)
x = Reshape(shape, name='reshape_1')(x)
x = Dropout(dropout, name='dropout')(x)
x = Conv2D(classes, (1, 1),
padding='same', name='conv_preds')(x)
x = Activation('softmax', name='act_softmax')(x)
x = Reshape((classes,), name='reshape_2')(x)
else:
if pooling == 'avg':
x = GlobalAveragePooling2D()(x)
elif pooling == 'max':
x = GlobalMaxPooling2D()(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
model = Model(inputs, x, name='mobilenet_%0.2f_%s' % (alpha, rows))
# load weights
if weights == 'imagenet':
if K.image_data_format() == 'channels_first':
raise ValueError('Weights for "channels_last" format '
'are not available.')
if alpha == 1.0:
alpha_text = '1_0'
elif alpha == 0.75:
alpha_text = '7_5'
elif alpha == 0.50:
alpha_text = '5_0'
else:
alpha_text = '2_5'
if include_top:
model_name = 'mobilenet_%s_%d_tf.h5' % (alpha_text, rows)
weigh_path = BASE_WEIGHT_PATH + model_name
weights_path = get_file(model_name,
weigh_path,
cache_subdir='models')
else:
model_name = 'mobilenet_%s_%d_tf_no_top.h5' % (alpha_text, rows)
weigh_path = BASE_WEIGHT_PATH + model_name
weights_path = get_file(model_name,
weigh_path,
cache_subdir='models')
model.load_weights(weights_path)
if old_data_format:
K.set_image_data_format(old_data_format)
return model | Instantiates the MobileNet architecture.
Note that only TensorFlow is supported for now,
therefore it only works with the data format
`image_data_format='channels_last'` in your Keras config
at `~/.keras/keras.json`.
To load a MobileNet model via `load_model`, import the custom
objects `relu6` and `DepthwiseConv2D` and pass them to the
`custom_objects` parameter.
E.g.
model = load_model('mobilenet.h5', custom_objects={
'relu6': mobilenet.relu6,
'DepthwiseConv2D': mobilenet.DepthwiseConv2D})
# Arguments
input_shape: optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(224, 224, 3)` (with `channels_last` data format)
or (3, 224, 224) (with `channels_first` data format).
It should have exactly 3 inputs channels,
and width and height should be no smaller than 32.
E.g. `(200, 200, 3)` would be one valid value.
alpha: controls the width of the network.
- If `alpha` < 1.0, proportionally decreases the number
of filters in each layer.
- If `alpha` > 1.0, proportionally increases the number
of filters in each layer.
- If `alpha` = 1, default number of filters from the paper
are used at each layer.
depth_multiplier: depth multiplier for depthwise convolution
(also called the resolution multiplier)
dropout: dropout rate
include_top: whether to include the fully-connected
layer at the top of the network.
weights: `None` (random initialization) or
`imagenet` (ImageNet weights)
input_tensor: optional Keras tensor (i.e. output of
`layers.Input()`)
to use as image input for the model.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model
will be the 4D tensor output of the
last convolutional layer.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a
2D tensor.
- `max` means that global max pooling will
be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
# Returns
A Keras model instance.
# Raises
ValueError: in case of invalid argument for `weights`,
or invalid input shape.
RuntimeError: If attempting to run this model with a
backend that does not support separable convolutions.
| MobileNet | python | fchollet/deep-learning-models | mobilenet.py | https://github.com/fchollet/deep-learning-models/blob/master/mobilenet.py | MIT |
def _conv_block(inputs, filters, alpha, kernel=(3, 3), strides=(1, 1)):
"""Adds an initial convolution layer (with batch normalization and relu6).
# Arguments
inputs: Input tensor of shape `(rows, cols, 3)`
(with `channels_last` data format) or
(3, rows, cols) (with `channels_first` data format).
It should have exactly 3 inputs channels,
and width and height should be no smaller than 32.
E.g. `(224, 224, 3)` would be one valid value.
filters: Integer, the dimensionality of the output space
(i.e. the number output of filters in the convolution).
alpha: controls the width of the network.
- If `alpha` < 1.0, proportionally decreases the number
of filters in each layer.
- If `alpha` > 1.0, proportionally increases the number
of filters in each layer.
- If `alpha` = 1, default number of filters from the paper
are used at each layer.
kernel: An integer or tuple/list of 2 integers, specifying the
width and height of the 2D convolution window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the convolution along the width and height.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
# Input shape
4D tensor with shape:
`(samples, channels, rows, cols)` if data_format='channels_first'
or 4D tensor with shape:
`(samples, rows, cols, channels)` if data_format='channels_last'.
# Output shape
4D tensor with shape:
`(samples, filters, new_rows, new_cols)` if data_format='channels_first'
or 4D tensor with shape:
`(samples, new_rows, new_cols, filters)` if data_format='channels_last'.
`rows` and `cols` values might have changed due to stride.
# Returns
Output tensor of block.
"""
channel_axis = 1 if K.image_data_format() == 'channels_first' else -1
filters = int(filters * alpha)
x = Conv2D(filters, kernel,
padding='same',
use_bias=False,
strides=strides,
name='conv1')(inputs)
x = BatchNormalization(axis=channel_axis, name='conv1_bn')(x)
return Activation(relu6, name='conv1_relu')(x) | Adds an initial convolution layer (with batch normalization and relu6).
# Arguments
inputs: Input tensor of shape `(rows, cols, 3)`
(with `channels_last` data format) or
(3, rows, cols) (with `channels_first` data format).
It should have exactly 3 inputs channels,
and width and height should be no smaller than 32.
E.g. `(224, 224, 3)` would be one valid value.
filters: Integer, the dimensionality of the output space
(i.e. the number output of filters in the convolution).
alpha: controls the width of the network.
- If `alpha` < 1.0, proportionally decreases the number
of filters in each layer.
- If `alpha` > 1.0, proportionally increases the number
of filters in each layer.
- If `alpha` = 1, default number of filters from the paper
are used at each layer.
kernel: An integer or tuple/list of 2 integers, specifying the
width and height of the 2D convolution window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the convolution along the width and height.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
# Input shape
4D tensor with shape:
`(samples, channels, rows, cols)` if data_format='channels_first'
or 4D tensor with shape:
`(samples, rows, cols, channels)` if data_format='channels_last'.
# Output shape
4D tensor with shape:
`(samples, filters, new_rows, new_cols)` if data_format='channels_first'
or 4D tensor with shape:
`(samples, new_rows, new_cols, filters)` if data_format='channels_last'.
`rows` and `cols` values might have changed due to stride.
# Returns
Output tensor of block.
| _conv_block | python | fchollet/deep-learning-models | mobilenet.py | https://github.com/fchollet/deep-learning-models/blob/master/mobilenet.py | MIT |
def _depthwise_conv_block(inputs, pointwise_conv_filters, alpha,
depth_multiplier=1, strides=(1, 1), block_id=1):
"""Adds a depthwise convolution block.
A depthwise convolution block consists of a depthwise conv,
batch normalization, relu6, pointwise convolution,
batch normalization and relu6 activation.
# Arguments
inputs: Input tensor of shape `(rows, cols, channels)`
(with `channels_last` data format) or
(channels, rows, cols) (with `channels_first` data format).
pointwise_conv_filters: Integer, the dimensionality of the output space
(i.e. the number output of filters in the pointwise convolution).
alpha: controls the width of the network.
- If `alpha` < 1.0, proportionally decreases the number
of filters in each layer.
- If `alpha` > 1.0, proportionally increases the number
of filters in each layer.
- If `alpha` = 1, default number of filters from the paper
are used at each layer.
depth_multiplier: The number of depthwise convolution output channels
for each input channel.
The total number of depthwise convolution output
channels will be equal to `filters_in * depth_multiplier`.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the convolution along the width and height.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
block_id: Integer, a unique identification designating the block number.
# Input shape
4D tensor with shape:
`(batch, channels, rows, cols)` if data_format='channels_first'
or 4D tensor with shape:
`(batch, rows, cols, channels)` if data_format='channels_last'.
# Output shape
4D tensor with shape:
`(batch, filters, new_rows, new_cols)` if data_format='channels_first'
or 4D tensor with shape:
`(batch, new_rows, new_cols, filters)` if data_format='channels_last'.
`rows` and `cols` values might have changed due to stride.
# Returns
Output tensor of block.
"""
channel_axis = 1 if K.image_data_format() == 'channels_first' else -1
pointwise_conv_filters = int(pointwise_conv_filters * alpha)
x = DepthwiseConv2D((3, 3),
padding='same',
depth_multiplier=depth_multiplier,
strides=strides,
use_bias=False,
name='conv_dw_%d' % block_id)(inputs)
x = BatchNormalization(axis=channel_axis, name='conv_dw_%d_bn' % block_id)(x)
x = Activation(relu6, name='conv_dw_%d_relu' % block_id)(x)
x = Conv2D(pointwise_conv_filters, (1, 1),
padding='same',
use_bias=False,
strides=(1, 1),
name='conv_pw_%d' % block_id)(x)
x = BatchNormalization(axis=channel_axis, name='conv_pw_%d_bn' % block_id)(x)
return Activation(relu6, name='conv_pw_%d_relu' % block_id)(x) | Adds a depthwise convolution block.
A depthwise convolution block consists of a depthwise conv,
batch normalization, relu6, pointwise convolution,
batch normalization and relu6 activation.
# Arguments
inputs: Input tensor of shape `(rows, cols, channels)`
(with `channels_last` data format) or
(channels, rows, cols) (with `channels_first` data format).
pointwise_conv_filters: Integer, the dimensionality of the output space
(i.e. the number output of filters in the pointwise convolution).
alpha: controls the width of the network.
- If `alpha` < 1.0, proportionally decreases the number
of filters in each layer.
- If `alpha` > 1.0, proportionally increases the number
of filters in each layer.
- If `alpha` = 1, default number of filters from the paper
are used at each layer.
depth_multiplier: The number of depthwise convolution output channels
for each input channel.
The total number of depthwise convolution output
channels will be equal to `filters_in * depth_multiplier`.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the convolution along the width and height.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
block_id: Integer, a unique identification designating the block number.
# Input shape
4D tensor with shape:
`(batch, channels, rows, cols)` if data_format='channels_first'
or 4D tensor with shape:
`(batch, rows, cols, channels)` if data_format='channels_last'.
# Output shape
4D tensor with shape:
`(batch, filters, new_rows, new_cols)` if data_format='channels_first'
or 4D tensor with shape:
`(batch, new_rows, new_cols, filters)` if data_format='channels_last'.
`rows` and `cols` values might have changed due to stride.
# Returns
Output tensor of block.
| _depthwise_conv_block | python | fchollet/deep-learning-models | mobilenet.py | https://github.com/fchollet/deep-learning-models/blob/master/mobilenet.py | MIT |
def MusicTaggerCRNN(weights='msd', input_tensor=None,
include_top=True):
'''Instantiate the MusicTaggerCRNN architecture,
optionally loading weights pre-trained
on Million Song Dataset. Note that when using TensorFlow,
for best performance you should set
`image_dim_ordering="tf"` in your Keras config
at ~/.keras/keras.json.
The model and the weights are compatible with both
TensorFlow and Theano. The dimension ordering
convention used by the model is the one
specified in your Keras config file.
For preparing mel-spectrogram input, see
`audio_conv_utils.py` in [applications](https://github.com/fchollet/keras/tree/master/keras/applications).
You will need to install [Librosa](http://librosa.github.io/librosa/)
to use it.
# Arguments
weights: one of `None` (random initialization)
or "msd" (pre-training on ImageNet).
input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
include_top: whether to include the 1 fully-connected
layer (output layer) at the top of the network.
If False, the network outputs 32-dim features.
# Returns
A Keras model instance.
'''
if weights not in {'msd', None}:
raise ValueError('The `weights` argument should be either '
'`None` (random initialization) or `msd` '
'(pre-training on Million Song Dataset).')
# Determine proper input shape
if K.image_dim_ordering() == 'th':
input_shape = (1, 96, 1366)
else:
input_shape = (96, 1366, 1)
if input_tensor is None:
melgram_input = Input(shape=input_shape)
else:
if not K.is_keras_tensor(input_tensor):
melgram_input = Input(tensor=input_tensor, shape=input_shape)
else:
melgram_input = input_tensor
# Determine input axis
if K.image_dim_ordering() == 'th':
channel_axis = 1
freq_axis = 2
time_axis = 3
else:
channel_axis = 3
freq_axis = 1
time_axis = 2
# Input block
x = ZeroPadding2D(padding=(0, 37))(melgram_input)
x = BatchNormalization(axis=time_axis, name='bn_0_freq')(x)
# Conv block 1
x = Convolution2D(64, 3, 3, border_mode='same', name='conv1')(x)
x = BatchNormalization(axis=channel_axis, mode=0, name='bn1')(x)
x = ELU()(x)
x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='pool1')(x)
# Conv block 2
x = Convolution2D(128, 3, 3, border_mode='same', name='conv2')(x)
x = BatchNormalization(axis=channel_axis, mode=0, name='bn2')(x)
x = ELU()(x)
x = MaxPooling2D(pool_size=(3, 3), strides=(3, 3), name='pool2')(x)
# Conv block 3
x = Convolution2D(128, 3, 3, border_mode='same', name='conv3')(x)
x = BatchNormalization(axis=channel_axis, mode=0, name='bn3')(x)
x = ELU()(x)
x = MaxPooling2D(pool_size=(4, 4), strides=(4, 4), name='pool3')(x)
# Conv block 4
x = Convolution2D(128, 3, 3, border_mode='same', name='conv4')(x)
x = BatchNormalization(axis=channel_axis, mode=0, name='bn4')(x)
x = ELU()(x)
x = MaxPooling2D(pool_size=(4, 4), strides=(4, 4), name='pool4')(x)
# reshaping
if K.image_dim_ordering() == 'th':
x = Permute((3, 1, 2))(x)
x = Reshape((15, 128))(x)
# GRU block 1, 2, output
x = GRU(32, return_sequences=True, name='gru1')(x)
x = GRU(32, return_sequences=False, name='gru2')(x)
if include_top:
x = Dense(50, activation='sigmoid', name='output')(x)
# Create model
model = Model(melgram_input, x)
if weights is None:
return model
else:
# Load weights
if K.image_dim_ordering() == 'tf':
weights_path = get_file('music_tagger_crnn_weights_tf_kernels_tf_dim_ordering.h5',
TF_WEIGHTS_PATH,
cache_subdir='models')
else:
weights_path = get_file('music_tagger_crnn_weights_tf_kernels_th_dim_ordering.h5',
TH_WEIGHTS_PATH,
cache_subdir='models')
model.load_weights(weights_path, by_name=True)
if K.backend() == 'theano':
convert_all_kernels_in_model(model)
return model | Instantiate the MusicTaggerCRNN architecture,
optionally loading weights pre-trained
on Million Song Dataset. Note that when using TensorFlow,
for best performance you should set
`image_dim_ordering="tf"` in your Keras config
at ~/.keras/keras.json.
The model and the weights are compatible with both
TensorFlow and Theano. The dimension ordering
convention used by the model is the one
specified in your Keras config file.
For preparing mel-spectrogram input, see
`audio_conv_utils.py` in [applications](https://github.com/fchollet/keras/tree/master/keras/applications).
You will need to install [Librosa](http://librosa.github.io/librosa/)
to use it.
# Arguments
weights: one of `None` (random initialization)
or "msd" (pre-training on ImageNet).
input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
include_top: whether to include the 1 fully-connected
layer (output layer) at the top of the network.
If False, the network outputs 32-dim features.
# Returns
A Keras model instance.
| MusicTaggerCRNN | python | fchollet/deep-learning-models | music_tagger_crnn.py | https://github.com/fchollet/deep-learning-models/blob/master/music_tagger_crnn.py | MIT |
def identity_block(input_tensor, kernel_size, filters, stage, block):
"""The identity block is the block that has no conv layer at shortcut.
# Arguments
input_tensor: input tensor
kernel_size: defualt 3, the kernel size of middle conv layer at main path
filters: list of integers, the filterss of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
# Returns
Output tensor for the block.
"""
filters1, filters2, filters3 = filters
if K.image_data_format() == 'channels_last':
bn_axis = 3
else:
bn_axis = 1
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = Conv2D(filters1, (1, 1), name=conv_name_base + '2a')(input_tensor)
x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x)
x = Activation('relu')(x)
x = Conv2D(filters2, kernel_size,
padding='same', name=conv_name_base + '2b')(x)
x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x)
x = Activation('relu')(x)
x = Conv2D(filters3, (1, 1), name=conv_name_base + '2c')(x)
x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2c')(x)
x = layers.add([x, input_tensor])
x = Activation('relu')(x)
return x | The identity block is the block that has no conv layer at shortcut.
# Arguments
input_tensor: input tensor
kernel_size: defualt 3, the kernel size of middle conv layer at main path
filters: list of integers, the filterss of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
# Returns
Output tensor for the block.
| identity_block | python | fchollet/deep-learning-models | resnet50.py | https://github.com/fchollet/deep-learning-models/blob/master/resnet50.py | MIT |
def conv_block(input_tensor, kernel_size, filters, stage, block, strides=(2, 2)):
"""conv_block is the block that has a conv layer at shortcut
# Arguments
input_tensor: input tensor
kernel_size: defualt 3, the kernel size of middle conv layer at main path
filters: list of integers, the filterss of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
# Returns
Output tensor for the block.
Note that from stage 3, the first conv layer at main path is with strides=(2,2)
And the shortcut should have strides=(2,2) as well
"""
filters1, filters2, filters3 = filters
if K.image_data_format() == 'channels_last':
bn_axis = 3
else:
bn_axis = 1
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = Conv2D(filters1, (1, 1), strides=strides,
name=conv_name_base + '2a')(input_tensor)
x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x)
x = Activation('relu')(x)
x = Conv2D(filters2, kernel_size, padding='same',
name=conv_name_base + '2b')(x)
x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x)
x = Activation('relu')(x)
x = Conv2D(filters3, (1, 1), name=conv_name_base + '2c')(x)
x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2c')(x)
shortcut = Conv2D(filters3, (1, 1), strides=strides,
name=conv_name_base + '1')(input_tensor)
shortcut = BatchNormalization(axis=bn_axis, name=bn_name_base + '1')(shortcut)
x = layers.add([x, shortcut])
x = Activation('relu')(x)
return x | conv_block is the block that has a conv layer at shortcut
# Arguments
input_tensor: input tensor
kernel_size: defualt 3, the kernel size of middle conv layer at main path
filters: list of integers, the filterss of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
# Returns
Output tensor for the block.
Note that from stage 3, the first conv layer at main path is with strides=(2,2)
And the shortcut should have strides=(2,2) as well
| conv_block | python | fchollet/deep-learning-models | resnet50.py | https://github.com/fchollet/deep-learning-models/blob/master/resnet50.py | MIT |
def ResNet50(include_top=True, weights='imagenet',
input_tensor=None, input_shape=None,
pooling=None,
classes=1000):
"""Instantiates the ResNet50 architecture.
Optionally loads weights pre-trained
on ImageNet. Note that when using TensorFlow,
for best performance you should set
`image_data_format="channels_last"` in your Keras config
at ~/.keras/keras.json.
The model and the weights are compatible with both
TensorFlow and Theano. The data format
convention used by the model is the one
specified in your Keras config file.
# Arguments
include_top: whether to include the fully-connected
layer at the top of the network.
weights: one of `None` (random initialization)
or "imagenet" (pre-training on ImageNet).
input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(224, 224, 3)` (with `channels_last` data format)
or `(3, 224, 244)` (with `channels_first` data format).
It should have exactly 3 inputs channels,
and width and height should be no smaller than 197.
E.g. `(200, 200, 3)` would be one valid value.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the
last convolutional layer.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will
be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
# Returns
A Keras model instance.
# Raises
ValueError: in case of invalid argument for `weights`,
or invalid input shape.
"""
if weights not in {'imagenet', None}:
raise ValueError('The `weights` argument should be either '
'`None` (random initialization) or `imagenet` '
'(pre-training on ImageNet).')
if weights == 'imagenet' and include_top and classes != 1000:
raise ValueError('If using `weights` as imagenet with `include_top`'
' as true, `classes` should be 1000')
# Determine proper input shape
input_shape = _obtain_input_shape(input_shape,
default_size=224,
min_size=197,
data_format=K.image_data_format(),
include_top=include_top)
if input_tensor is None:
img_input = Input(shape=input_shape)
else:
if not K.is_keras_tensor(input_tensor):
img_input = Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
if K.image_data_format() == 'channels_last':
bn_axis = 3
else:
bn_axis = 1
x = ZeroPadding2D((3, 3))(img_input)
x = Conv2D(64, (7, 7), strides=(2, 2), name='conv1')(x)
x = BatchNormalization(axis=bn_axis, name='bn_conv1')(x)
x = Activation('relu')(x)
x = MaxPooling2D((3, 3), strides=(2, 2))(x)
x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1))
x = identity_block(x, 3, [64, 64, 256], stage=2, block='b')
x = identity_block(x, 3, [64, 64, 256], stage=2, block='c')
x = conv_block(x, 3, [128, 128, 512], stage=3, block='a')
x = identity_block(x, 3, [128, 128, 512], stage=3, block='b')
x = identity_block(x, 3, [128, 128, 512], stage=3, block='c')
x = identity_block(x, 3, [128, 128, 512], stage=3, block='d')
x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a')
x = identity_block(x, 3, [256, 256, 1024], stage=4, block='b')
x = identity_block(x, 3, [256, 256, 1024], stage=4, block='c')
x = identity_block(x, 3, [256, 256, 1024], stage=4, block='d')
x = identity_block(x, 3, [256, 256, 1024], stage=4, block='e')
x = identity_block(x, 3, [256, 256, 1024], stage=4, block='f')
x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a')
x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b')
x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c')
x = AveragePooling2D((7, 7), name='avg_pool')(x)
if include_top:
x = Flatten()(x)
x = Dense(classes, activation='softmax', name='fc1000')(x)
else:
if pooling == 'avg':
x = GlobalAveragePooling2D()(x)
elif pooling == 'max':
x = GlobalMaxPooling2D()(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
model = Model(inputs, x, name='resnet50')
# load weights
if weights == 'imagenet':
if include_top:
weights_path = get_file('resnet50_weights_tf_dim_ordering_tf_kernels.h5',
WEIGHTS_PATH,
cache_subdir='models',
md5_hash='a7b3fe01876f51b976af0dea6bc144eb')
else:
weights_path = get_file('resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5',
WEIGHTS_PATH_NO_TOP,
cache_subdir='models',
md5_hash='a268eb855778b3df3c7506639542a6af')
model.load_weights(weights_path)
if K.backend() == 'theano':
layer_utils.convert_all_kernels_in_model(model)
if K.image_data_format() == 'channels_first':
if include_top:
maxpool = model.get_layer(name='avg_pool')
shape = maxpool.output_shape[1:]
dense = model.get_layer(name='fc1000')
layer_utils.convert_dense_weights_data_format(dense, shape, 'channels_first')
if K.backend() == 'tensorflow':
warnings.warn('You are using the TensorFlow backend, yet you '
'are using the Theano '
'image data format convention '
'(`image_data_format="channels_first"`). '
'For best performance, set '
'`image_data_format="channels_last"` in '
'your Keras config '
'at ~/.keras/keras.json.')
return model | Instantiates the ResNet50 architecture.
Optionally loads weights pre-trained
on ImageNet. Note that when using TensorFlow,
for best performance you should set
`image_data_format="channels_last"` in your Keras config
at ~/.keras/keras.json.
The model and the weights are compatible with both
TensorFlow and Theano. The data format
convention used by the model is the one
specified in your Keras config file.
# Arguments
include_top: whether to include the fully-connected
layer at the top of the network.
weights: one of `None` (random initialization)
or "imagenet" (pre-training on ImageNet).
input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(224, 224, 3)` (with `channels_last` data format)
or `(3, 224, 244)` (with `channels_first` data format).
It should have exactly 3 inputs channels,
and width and height should be no smaller than 197.
E.g. `(200, 200, 3)` would be one valid value.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the
last convolutional layer.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will
be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
# Returns
A Keras model instance.
# Raises
ValueError: in case of invalid argument for `weights`,
or invalid input shape.
| ResNet50 | python | fchollet/deep-learning-models | resnet50.py | https://github.com/fchollet/deep-learning-models/blob/master/resnet50.py | MIT |
def VGG16(include_top=True, weights='imagenet',
input_tensor=None, input_shape=None,
pooling=None,
classes=1000):
"""Instantiates the VGG16 architecture.
Optionally loads weights pre-trained
on ImageNet. Note that when using TensorFlow,
for best performance you should set
`image_data_format="channels_last"` in your Keras config
at ~/.keras/keras.json.
The model and the weights are compatible with both
TensorFlow and Theano. The data format
convention used by the model is the one
specified in your Keras config file.
# Arguments
include_top: whether to include the 3 fully-connected
layers at the top of the network.
weights: one of `None` (random initialization)
or "imagenet" (pre-training on ImageNet).
input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(224, 224, 3)` (with `channels_last` data format)
or `(3, 224, 244)` (with `channels_first` data format).
It should have exactly 3 inputs channels,
and width and height should be no smaller than 48.
E.g. `(200, 200, 3)` would be one valid value.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the
last convolutional layer.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will
be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
# Returns
A Keras model instance.
# Raises
ValueError: in case of invalid argument for `weights`,
or invalid input shape.
"""
if weights not in {'imagenet', None}:
raise ValueError('The `weights` argument should be either '
'`None` (random initialization) or `imagenet` '
'(pre-training on ImageNet).')
if weights == 'imagenet' and include_top and classes != 1000:
raise ValueError('If using `weights` as imagenet with `include_top`'
' as true, `classes` should be 1000')
# Determine proper input shape
input_shape = _obtain_input_shape(input_shape,
default_size=224,
min_size=48,
data_format=K.image_data_format(),
include_top=include_top)
if input_tensor is None:
img_input = Input(shape=input_shape)
else:
if not K.is_keras_tensor(input_tensor):
img_input = Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
# Block 1
x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv1')(img_input)
x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv2')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)
# Block 2
x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv1')(x)
x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv2')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)
# Block 3
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv1')(x)
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv2')(x)
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv3')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x)
# Block 4
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv1')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv2')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv3')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x)
# Block 5
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv1')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv2')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv3')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(x)
if include_top:
# Classification block
x = Flatten(name='flatten')(x)
x = Dense(4096, activation='relu', name='fc1')(x)
x = Dense(4096, activation='relu', name='fc2')(x)
x = Dense(classes, activation='softmax', name='predictions')(x)
else:
if pooling == 'avg':
x = GlobalAveragePooling2D()(x)
elif pooling == 'max':
x = GlobalMaxPooling2D()(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
model = Model(inputs, x, name='vgg16')
# load weights
if weights == 'imagenet':
if include_top:
weights_path = get_file('vgg16_weights_tf_dim_ordering_tf_kernels.h5',
WEIGHTS_PATH,
cache_subdir='models')
else:
weights_path = get_file('vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5',
WEIGHTS_PATH_NO_TOP,
cache_subdir='models')
model.load_weights(weights_path)
if K.backend() == 'theano':
layer_utils.convert_all_kernels_in_model(model)
if K.image_data_format() == 'channels_first':
if include_top:
maxpool = model.get_layer(name='block5_pool')
shape = maxpool.output_shape[1:]
dense = model.get_layer(name='fc1')
layer_utils.convert_dense_weights_data_format(dense, shape, 'channels_first')
if K.backend() == 'tensorflow':
warnings.warn('You are using the TensorFlow backend, yet you '
'are using the Theano '
'image data format convention '
'(`image_data_format="channels_first"`). '
'For best performance, set '
'`image_data_format="channels_last"` in '
'your Keras config '
'at ~/.keras/keras.json.')
return model | Instantiates the VGG16 architecture.
Optionally loads weights pre-trained
on ImageNet. Note that when using TensorFlow,
for best performance you should set
`image_data_format="channels_last"` in your Keras config
at ~/.keras/keras.json.
The model and the weights are compatible with both
TensorFlow and Theano. The data format
convention used by the model is the one
specified in your Keras config file.
# Arguments
include_top: whether to include the 3 fully-connected
layers at the top of the network.
weights: one of `None` (random initialization)
or "imagenet" (pre-training on ImageNet).
input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(224, 224, 3)` (with `channels_last` data format)
or `(3, 224, 244)` (with `channels_first` data format).
It should have exactly 3 inputs channels,
and width and height should be no smaller than 48.
E.g. `(200, 200, 3)` would be one valid value.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the
last convolutional layer.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will
be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
# Returns
A Keras model instance.
# Raises
ValueError: in case of invalid argument for `weights`,
or invalid input shape.
| VGG16 | python | fchollet/deep-learning-models | vgg16.py | https://github.com/fchollet/deep-learning-models/blob/master/vgg16.py | MIT |
def VGG19(include_top=True, weights='imagenet',
input_tensor=None, input_shape=None,
pooling=None,
classes=1000):
"""Instantiates the VGG19 architecture.
Optionally loads weights pre-trained
on ImageNet. Note that when using TensorFlow,
for best performance you should set
`image_data_format="channels_last"` in your Keras config
at ~/.keras/keras.json.
The model and the weights are compatible with both
TensorFlow and Theano. The data format
convention used by the model is the one
specified in your Keras config file.
# Arguments
include_top: whether to include the 3 fully-connected
layers at the top of the network.
weights: one of `None` (random initialization)
or "imagenet" (pre-training on ImageNet).
input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(224, 224, 3)` (with `channels_last` data format)
or `(3, 224, 244)` (with `channels_first` data format).
It should have exactly 3 inputs channels,
and width and height should be no smaller than 48.
E.g. `(200, 200, 3)` would be one valid value.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the
last convolutional layer.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will
be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
# Returns
A Keras model instance.
# Raises
ValueError: in case of invalid argument for `weights`,
or invalid input shape.
"""
if weights not in {'imagenet', None}:
raise ValueError('The `weights` argument should be either '
'`None` (random initialization) or `imagenet` '
'(pre-training on ImageNet).')
if weights == 'imagenet' and include_top and classes != 1000:
raise ValueError('If using `weights` as imagenet with `include_top`'
' as true, `classes` should be 1000')
# Determine proper input shape
input_shape = _obtain_input_shape(input_shape,
default_size=224,
min_size=48,
data_format=K.image_data_format(),
include_top=include_top)
if input_tensor is None:
img_input = Input(shape=input_shape)
else:
if not K.is_keras_tensor(input_tensor):
img_input = Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
# Block 1
x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv1')(img_input)
x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv2')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)
# Block 2
x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv1')(x)
x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv2')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)
# Block 3
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv1')(x)
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv2')(x)
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv3')(x)
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv4')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x)
# Block 4
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv1')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv2')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv3')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv4')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x)
# Block 5
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv1')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv2')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv3')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv4')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(x)
if include_top:
# Classification block
x = Flatten(name='flatten')(x)
x = Dense(4096, activation='relu', name='fc1')(x)
x = Dense(4096, activation='relu', name='fc2')(x)
x = Dense(classes, activation='softmax', name='predictions')(x)
else:
if pooling == 'avg':
x = GlobalAveragePooling2D()(x)
elif pooling == 'max':
x = GlobalMaxPooling2D()(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
model = Model(inputs, x, name='vgg19')
# load weights
if weights == 'imagenet':
if include_top:
weights_path = get_file('vgg19_weights_tf_dim_ordering_tf_kernels.h5',
WEIGHTS_PATH,
cache_subdir='models')
else:
weights_path = get_file('vgg19_weights_tf_dim_ordering_tf_kernels_notop.h5',
WEIGHTS_PATH_NO_TOP,
cache_subdir='models')
model.load_weights(weights_path)
if K.backend() == 'theano':
layer_utils.convert_all_kernels_in_model(model)
if K.image_data_format() == 'channels_first':
if include_top:
maxpool = model.get_layer(name='block5_pool')
shape = maxpool.output_shape[1:]
dense = model.get_layer(name='fc1')
layer_utils.convert_dense_weights_data_format(dense, shape, 'channels_first')
if K.backend() == 'tensorflow':
warnings.warn('You are using the TensorFlow backend, yet you '
'are using the Theano '
'image data format convention '
'(`image_data_format="channels_first"`). '
'For best performance, set '
'`image_data_format="channels_last"` in '
'your Keras config '
'at ~/.keras/keras.json.')
return model | Instantiates the VGG19 architecture.
Optionally loads weights pre-trained
on ImageNet. Note that when using TensorFlow,
for best performance you should set
`image_data_format="channels_last"` in your Keras config
at ~/.keras/keras.json.
The model and the weights are compatible with both
TensorFlow and Theano. The data format
convention used by the model is the one
specified in your Keras config file.
# Arguments
include_top: whether to include the 3 fully-connected
layers at the top of the network.
weights: one of `None` (random initialization)
or "imagenet" (pre-training on ImageNet).
input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(224, 224, 3)` (with `channels_last` data format)
or `(3, 224, 244)` (with `channels_first` data format).
It should have exactly 3 inputs channels,
and width and height should be no smaller than 48.
E.g. `(200, 200, 3)` would be one valid value.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the
last convolutional layer.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will
be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
# Returns
A Keras model instance.
# Raises
ValueError: in case of invalid argument for `weights`,
or invalid input shape.
| VGG19 | python | fchollet/deep-learning-models | vgg19.py | https://github.com/fchollet/deep-learning-models/blob/master/vgg19.py | MIT |
def Xception(include_top=True, weights='imagenet',
input_tensor=None, input_shape=None,
pooling=None,
classes=1000):
"""Instantiates the Xception architecture.
Optionally loads weights pre-trained
on ImageNet. This model is available for TensorFlow only,
and can only be used with inputs following the TensorFlow
data format `(width, height, channels)`.
You should set `image_data_format="channels_last"` in your Keras config
located at ~/.keras/keras.json.
Note that the default input image size for this model is 299x299.
# Arguments
include_top: whether to include the fully-connected
layer at the top of the network.
weights: one of `None` (random initialization)
or "imagenet" (pre-training on ImageNet).
input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(299, 299, 3)`.
It should have exactly 3 inputs channels,
and width and height should be no smaller than 71.
E.g. `(150, 150, 3)` would be one valid value.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the
last convolutional layer.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will
be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
# Returns
A Keras model instance.
# Raises
ValueError: in case of invalid argument for `weights`,
or invalid input shape.
RuntimeError: If attempting to run this model with a
backend that does not support separable convolutions.
"""
if weights not in {'imagenet', None}:
raise ValueError('The `weights` argument should be either '
'`None` (random initialization) or `imagenet` '
'(pre-training on ImageNet).')
if weights == 'imagenet' and include_top and classes != 1000:
raise ValueError('If using `weights` as imagenet with `include_top`'
' as true, `classes` should be 1000')
if K.backend() != 'tensorflow':
raise RuntimeError('The Xception model is only available with '
'the TensorFlow backend.')
if K.image_data_format() != 'channels_last':
warnings.warn('The Xception model is only available for the '
'input data format "channels_last" '
'(width, height, channels). '
'However your settings specify the default '
'data format "channels_first" (channels, width, height). '
'You should set `image_data_format="channels_last"` in your Keras '
'config located at ~/.keras/keras.json. '
'The model being returned right now will expect inputs '
'to follow the "channels_last" data format.')
K.set_image_data_format('channels_last')
old_data_format = 'channels_first'
else:
old_data_format = None
# Determine proper input shape
input_shape = _obtain_input_shape(input_shape,
default_size=299,
min_size=71,
data_format=K.image_data_format(),
include_top=include_top)
if input_tensor is None:
img_input = Input(shape=input_shape)
else:
if not K.is_keras_tensor(input_tensor):
img_input = Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
x = Conv2D(32, (3, 3), strides=(2, 2), use_bias=False, name='block1_conv1')(img_input)
x = BatchNormalization(name='block1_conv1_bn')(x)
x = Activation('relu', name='block1_conv1_act')(x)
x = Conv2D(64, (3, 3), use_bias=False, name='block1_conv2')(x)
x = BatchNormalization(name='block1_conv2_bn')(x)
x = Activation('relu', name='block1_conv2_act')(x)
residual = Conv2D(128, (1, 1), strides=(2, 2),
padding='same', use_bias=False)(x)
residual = BatchNormalization()(residual)
x = SeparableConv2D(128, (3, 3), padding='same', use_bias=False, name='block2_sepconv1')(x)
x = BatchNormalization(name='block2_sepconv1_bn')(x)
x = Activation('relu', name='block2_sepconv2_act')(x)
x = SeparableConv2D(128, (3, 3), padding='same', use_bias=False, name='block2_sepconv2')(x)
x = BatchNormalization(name='block2_sepconv2_bn')(x)
x = MaxPooling2D((3, 3), strides=(2, 2), padding='same', name='block2_pool')(x)
x = layers.add([x, residual])
residual = Conv2D(256, (1, 1), strides=(2, 2),
padding='same', use_bias=False)(x)
residual = BatchNormalization()(residual)
x = Activation('relu', name='block3_sepconv1_act')(x)
x = SeparableConv2D(256, (3, 3), padding='same', use_bias=False, name='block3_sepconv1')(x)
x = BatchNormalization(name='block3_sepconv1_bn')(x)
x = Activation('relu', name='block3_sepconv2_act')(x)
x = SeparableConv2D(256, (3, 3), padding='same', use_bias=False, name='block3_sepconv2')(x)
x = BatchNormalization(name='block3_sepconv2_bn')(x)
x = MaxPooling2D((3, 3), strides=(2, 2), padding='same', name='block3_pool')(x)
x = layers.add([x, residual])
residual = Conv2D(728, (1, 1), strides=(2, 2),
padding='same', use_bias=False)(x)
residual = BatchNormalization()(residual)
x = Activation('relu', name='block4_sepconv1_act')(x)
x = SeparableConv2D(728, (3, 3), padding='same', use_bias=False, name='block4_sepconv1')(x)
x = BatchNormalization(name='block4_sepconv1_bn')(x)
x = Activation('relu', name='block4_sepconv2_act')(x)
x = SeparableConv2D(728, (3, 3), padding='same', use_bias=False, name='block4_sepconv2')(x)
x = BatchNormalization(name='block4_sepconv2_bn')(x)
x = MaxPooling2D((3, 3), strides=(2, 2), padding='same', name='block4_pool')(x)
x = layers.add([x, residual])
for i in range(8):
residual = x
prefix = 'block' + str(i + 5)
x = Activation('relu', name=prefix + '_sepconv1_act')(x)
x = SeparableConv2D(728, (3, 3), padding='same', use_bias=False, name=prefix + '_sepconv1')(x)
x = BatchNormalization(name=prefix + '_sepconv1_bn')(x)
x = Activation('relu', name=prefix + '_sepconv2_act')(x)
x = SeparableConv2D(728, (3, 3), padding='same', use_bias=False, name=prefix + '_sepconv2')(x)
x = BatchNormalization(name=prefix + '_sepconv2_bn')(x)
x = Activation('relu', name=prefix + '_sepconv3_act')(x)
x = SeparableConv2D(728, (3, 3), padding='same', use_bias=False, name=prefix + '_sepconv3')(x)
x = BatchNormalization(name=prefix + '_sepconv3_bn')(x)
x = layers.add([x, residual])
residual = Conv2D(1024, (1, 1), strides=(2, 2),
padding='same', use_bias=False)(x)
residual = BatchNormalization()(residual)
x = Activation('relu', name='block13_sepconv1_act')(x)
x = SeparableConv2D(728, (3, 3), padding='same', use_bias=False, name='block13_sepconv1')(x)
x = BatchNormalization(name='block13_sepconv1_bn')(x)
x = Activation('relu', name='block13_sepconv2_act')(x)
x = SeparableConv2D(1024, (3, 3), padding='same', use_bias=False, name='block13_sepconv2')(x)
x = BatchNormalization(name='block13_sepconv2_bn')(x)
x = MaxPooling2D((3, 3), strides=(2, 2), padding='same', name='block13_pool')(x)
x = layers.add([x, residual])
x = SeparableConv2D(1536, (3, 3), padding='same', use_bias=False, name='block14_sepconv1')(x)
x = BatchNormalization(name='block14_sepconv1_bn')(x)
x = Activation('relu', name='block14_sepconv1_act')(x)
x = SeparableConv2D(2048, (3, 3), padding='same', use_bias=False, name='block14_sepconv2')(x)
x = BatchNormalization(name='block14_sepconv2_bn')(x)
x = Activation('relu', name='block14_sepconv2_act')(x)
if include_top:
x = GlobalAveragePooling2D(name='avg_pool')(x)
x = Dense(classes, activation='softmax', name='predictions')(x)
else:
if pooling == 'avg':
x = GlobalAveragePooling2D()(x)
elif pooling == 'max':
x = GlobalMaxPooling2D()(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
model = Model(inputs, x, name='xception')
# load weights
if weights == 'imagenet':
if include_top:
weights_path = get_file('xception_weights_tf_dim_ordering_tf_kernels.h5',
TF_WEIGHTS_PATH,
cache_subdir='models')
else:
weights_path = get_file('xception_weights_tf_dim_ordering_tf_kernels_notop.h5',
TF_WEIGHTS_PATH_NO_TOP,
cache_subdir='models')
model.load_weights(weights_path)
if old_data_format:
K.set_image_data_format(old_data_format)
return model | Instantiates the Xception architecture.
Optionally loads weights pre-trained
on ImageNet. This model is available for TensorFlow only,
and can only be used with inputs following the TensorFlow
data format `(width, height, channels)`.
You should set `image_data_format="channels_last"` in your Keras config
located at ~/.keras/keras.json.
Note that the default input image size for this model is 299x299.
# Arguments
include_top: whether to include the fully-connected
layer at the top of the network.
weights: one of `None` (random initialization)
or "imagenet" (pre-training on ImageNet).
input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(299, 299, 3)`.
It should have exactly 3 inputs channels,
and width and height should be no smaller than 71.
E.g. `(150, 150, 3)` would be one valid value.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the
last convolutional layer.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will
be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
# Returns
A Keras model instance.
# Raises
ValueError: in case of invalid argument for `weights`,
or invalid input shape.
RuntimeError: If attempting to run this model with a
backend that does not support separable convolutions.
| Xception | python | fchollet/deep-learning-models | xception.py | https://github.com/fchollet/deep-learning-models/blob/master/xception.py | MIT |
def beam_search_generator(sess, net, initial_state, initial_sample,
early_term_token, beam_width, forward_model_fn, forward_args):
'''Run beam search! Yield consensus tokens sequentially, as a generator;
return when reaching early_term_token (newline).
Args:
sess: tensorflow session reference
net: tensorflow net graph (must be compatible with the forward_net function)
initial_state: initial hidden state of the net
initial_sample: single token (excluding any seed/priming material)
to start the generation
early_term_token: stop when the beam reaches consensus on this token
(but do not return this token).
beam_width: how many beams to track
forward_model_fn: function to forward the model, must be of the form:
probability_output, beam_state =
forward_model_fn(sess, net, beam_state, beam_sample, forward_args)
(Note: probability_output has to be a valid probability distribution!)
tot_steps: how many tokens to generate before stopping,
unless already stopped via early_term_token.
Returns: a generator to yield a sequence of beam-sampled tokens.'''
# Store state, outputs and probabilities for up to args.beam_width beams.
# Initialize with just the one starting entry; it will branch to fill the beam
# in the first step.
beam_states = [initial_state] # Stores the best activation states
beam_outputs = [[initial_sample]] # Stores the best generated output sequences so far.
beam_probs = [1.] # Stores the cumulative normalized probabilities of the beams so far.
while True:
# Keep a running list of the best beam branches for next step.
# Don't actually copy any big data structures yet, just keep references
# to existing beam state entries, and then clone them as necessary
# at the end of the generation step.
new_beam_indices = []
new_beam_probs = []
new_beam_samples = []
# Iterate through the beam entries.
for beam_index, beam_state in enumerate(beam_states):
beam_prob = beam_probs[beam_index]
beam_sample = beam_outputs[beam_index][-1]
# Forward the model.
prediction, beam_states[beam_index] = forward_model_fn(
sess, net, beam_state, beam_sample, forward_args)
# Sample best_tokens from the probability distribution.
# Sample from the scaled probability distribution beam_width choices
# (but not more than the number of positive probabilities in scaled_prediction).
count = min(beam_width, sum(1 if p > 0. else 0 for p in prediction))
best_tokens = np.random.choice(len(prediction), size=count,
replace=False, p=prediction)
for token in best_tokens:
prob = prediction[token] * beam_prob
if len(new_beam_indices) < beam_width:
# If we don't have enough new_beam_indices, we automatically qualify.
new_beam_indices.append(beam_index)
new_beam_probs.append(prob)
new_beam_samples.append(token)
else:
# Sample a low-probability beam to possibly replace.
np_new_beam_probs = np.array(new_beam_probs)
inverse_probs = -np_new_beam_probs + max(np_new_beam_probs) + min(np_new_beam_probs)
inverse_probs = inverse_probs / sum(inverse_probs)
sampled_beam_index = np.random.choice(beam_width, p=inverse_probs)
if new_beam_probs[sampled_beam_index] <= prob:
# Replace it.
new_beam_indices[sampled_beam_index] = beam_index
new_beam_probs[sampled_beam_index] = prob
new_beam_samples[sampled_beam_index] = token
# Replace the old states with the new states, first by referencing and then by copying.
already_referenced = [False] * beam_width
new_beam_states = []
new_beam_outputs = []
for i, new_index in enumerate(new_beam_indices):
if already_referenced[new_index]:
new_beam = copy.deepcopy(beam_states[new_index])
else:
new_beam = beam_states[new_index]
already_referenced[new_index] = True
new_beam_states.append(new_beam)
new_beam_outputs.append(beam_outputs[new_index] + [new_beam_samples[i]])
# Normalize the beam probabilities so they don't drop to zero
beam_probs = new_beam_probs / sum(new_beam_probs)
beam_states = new_beam_states
beam_outputs = new_beam_outputs
# Prune the agreed portions of the outputs
# and yield the tokens on which the beam has reached consensus.
l, early_term = consensus_length(beam_outputs, early_term_token)
if l > 0:
for token in beam_outputs[0][:l]: yield token
beam_outputs = [output[l:] for output in beam_outputs]
if early_term: return | Run beam search! Yield consensus tokens sequentially, as a generator;
return when reaching early_term_token (newline).
Args:
sess: tensorflow session reference
net: tensorflow net graph (must be compatible with the forward_net function)
initial_state: initial hidden state of the net
initial_sample: single token (excluding any seed/priming material)
to start the generation
early_term_token: stop when the beam reaches consensus on this token
(but do not return this token).
beam_width: how many beams to track
forward_model_fn: function to forward the model, must be of the form:
probability_output, beam_state =
forward_model_fn(sess, net, beam_state, beam_sample, forward_args)
(Note: probability_output has to be a valid probability distribution!)
tot_steps: how many tokens to generate before stopping,
unless already stopped via early_term_token.
Returns: a generator to yield a sequence of beam-sampled tokens. | beam_search_generator | python | pender/chatbot-rnn | chatbot.py | https://github.com/pender/chatbot-rnn/blob/master/chatbot.py | MIT |
def __init__(self, cell_fn, partition_size=128, partitions=1, layers=2):
"""Create a RNN cell composed sequentially of a number of RNNCells.
Args:
cell_fn: reference to RNNCell function to create each partition in each layer.
partition_size: how many horizontal cells to include in each partition.
partitions: how many horizontal partitions to include in each layer.
layers: how many layers to include in the net.
"""
super(PartitionedMultiRNNCell, self).__init__()
self._cells = []
for i in range(layers):
self._cells.append([cell_fn(partition_size) for _ in range(partitions)])
self._partitions = partitions | Create a RNN cell composed sequentially of a number of RNNCells.
Args:
cell_fn: reference to RNNCell function to create each partition in each layer.
partition_size: how many horizontal cells to include in each partition.
partitions: how many horizontal partitions to include in each layer.
layers: how many layers to include in the net.
| __init__ | python | pender/chatbot-rnn | model.py | https://github.com/pender/chatbot-rnn/blob/master/model.py | MIT |
def _rnn_state_placeholders(state):
"""Convert RNN state tensors to placeholders, reflecting the same nested tuple structure."""
# Adapted from @carlthome's comment:
# https://github.com/tensorflow/tensorflow/issues/2838#issuecomment-302019188
if isinstance(state, tf.contrib.rnn.LSTMStateTuple):
c, h = state
c = tf.placeholder(c.dtype, c.shape, c.op.name)
h = tf.placeholder(h.dtype, h.shape, h.op.name)
return tf.contrib.rnn.LSTMStateTuple(c, h)
elif isinstance(state, tf.Tensor):
h = state
h = tf.placeholder(h.dtype, h.shape, h.op.name)
return h
else:
structure = [_rnn_state_placeholders(x) for x in state]
return tuple(structure) | Convert RNN state tensors to placeholders, reflecting the same nested tuple structure. | _rnn_state_placeholders | python | pender/chatbot-rnn | model.py | https://github.com/pender/chatbot-rnn/blob/master/model.py | MIT |
def forward_model(self, sess, state, input_sample):
'''Run a forward pass. Return the updated hidden state and the output probabilities.'''
shaped_input = np.array([[input_sample]], np.float32)
inputs = {self.input_data: shaped_input}
self.add_state_to_feed_dict(inputs, state)
[probs, state] = sess.run([self.probs, self.final_state], feed_dict=inputs)
return probs[0], state | Run a forward pass. Return the updated hidden state and the output probabilities. | forward_model | python | pender/chatbot-rnn | model.py | https://github.com/pender/chatbot-rnn/blob/master/model.py | MIT |
def check_container_exec_instances(context, num):
"""Modern docker versions remove ExecIDs after they finished, but older
docker versions leave ExecIDs behind. This test is for asserting that
the ExecIDs are cleaned up one way or another"""
container_info = context.docker_client.inspect_container(
context.running_container_id
)
if container_info["ExecIDs"] is None:
execs = []
else:
execs = container_info["ExecIDs"]
print("Container info:\n%s" % container_info)
assert len(execs) <= int(num) | Modern docker versions remove ExecIDs after they finished, but older
docker versions leave ExecIDs behind. This test is for asserting that
the ExecIDs are cleaned up one way or another | check_container_exec_instances | python | Yelp/paasta | general_itests/steps/paasta_execute_docker_command.py | https://github.com/Yelp/paasta/blob/master/general_itests/steps/paasta_execute_docker_command.py | Apache-2.0 |
def tail_paasta_logs_let_threads_be_threads(context):
"""This test lets tail_paasta_logs() fire off processes to do work. We
verify that the work was done, basically irrespective of how it was done.
"""
service = "fake_service"
context.levels = ["fake_level1", "fake_level2"]
context.components = ["deploy", "monitoring"]
context.clusters = ["fake_cluster1"]
context.instances = ["fake_instance"]
context.pods = ["fake_pod"]
with mock.patch(
"paasta_tools.cli.cmds.logs.ScribeLogReader.determine_scribereader_envs",
autospec=True,
) as context.determine_scribereader_envs_patch, mock.patch(
"paasta_tools.cli.cmds.logs.ScribeLogReader.scribe_tail", autospec=True
) as scribe_tail_patch, mock.patch(
"paasta_tools.cli.cmds.logs.log", autospec=True
), mock.patch(
"paasta_tools.cli.cmds.logs.print_log", autospec=True
) as context.print_log_patch, mock.patch(
"paasta_tools.cli.cmds.logs.scribereader", autospec=True
):
context.determine_scribereader_envs_patch.return_value = ["env1", "env2"]
def scribe_tail_side_effect(
self,
scribe_env,
stream_name,
service,
levels,
components,
clusters,
instances,
pods,
queue,
filter_fn,
parse_fn=None,
):
# The print here is just for debugging
print("fake log line added for %s" % scribe_env)
queue.put("fake log line added for %s" % scribe_env)
# This sleep() was the straw that broke the camel's back
# and forced me to move this test into the integration
# suite. The test is flaky without the sleep, and the
# sleep make it a lousy unit test.
time.sleep(0.05)
scribe_tail_patch.side_effect = scribe_tail_side_effect
context.scribe_log_reader = logs.ScribeLogReader(
cluster_map={"env1": "env1", "env2": "env2"}
)
context.scribe_log_reader.tail_logs(
service,
context.levels,
context.components,
context.clusters,
context.instances,
context.pods,
) | This test lets tail_paasta_logs() fire off processes to do work. We
verify that the work was done, basically irrespective of how it was done.
| tail_paasta_logs_let_threads_be_threads | python | Yelp/paasta | general_itests/steps/tail_paasta_logs.py | https://github.com/Yelp/paasta/blob/master/general_itests/steps/tail_paasta_logs.py | Apache-2.0 |
def register_bounce_method(name: str) -> Callable[[BounceMethod], BounceMethod]:
"""Returns a decorator that registers that bounce function at a given name
so get_bounce_method_func can find it."""
def outer(bounce_func: BounceMethod):
_bounce_method_funcs[name] = bounce_func
return bounce_func
return outer | Returns a decorator that registers that bounce function at a given name
so get_bounce_method_func can find it. | register_bounce_method | python | Yelp/paasta | paasta_tools/bounce_lib.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/bounce_lib.py | Apache-2.0 |
def brutal_bounce(
new_config: BounceMethodConfigDict,
new_app_running: bool,
happy_new_tasks: Collection,
old_non_draining_tasks: Sequence,
margin_factor=1.0,
) -> BounceMethodResult:
"""Pays no regard to safety. Starts the new app if necessary, and kills any
old ones. Mostly meant as an example of the simplest working bounce method,
but might be tolerable for some services.
:param new_config: The configuration dictionary representing the desired new app.
:param new_app_running: Whether there is an app in Marathon with the same ID as the new config.
:param happy_new_tasks: Set of MarathonTasks belonging to the new application that are considered healthy and up.
:param old_non_draining_tasks: A sequence of tasks not belonging to the new version. Tasks should be ordered from
most desirable to least desirable.
:param margin_factor: the multiplication factor used to calculate the number of instances to be drained
when the crossover method is used.
:return: A dictionary representing the desired bounce actions and containing the following keys:
- create_app: True if we should start the new Marathon app, False otherwise.
- tasks_to_drain: a set of task objects which should be drained and killed. May be empty.
"""
return {
"create_app": not new_app_running,
"tasks_to_drain": set(old_non_draining_tasks),
} | Pays no regard to safety. Starts the new app if necessary, and kills any
old ones. Mostly meant as an example of the simplest working bounce method,
but might be tolerable for some services.
:param new_config: The configuration dictionary representing the desired new app.
:param new_app_running: Whether there is an app in Marathon with the same ID as the new config.
:param happy_new_tasks: Set of MarathonTasks belonging to the new application that are considered healthy and up.
:param old_non_draining_tasks: A sequence of tasks not belonging to the new version. Tasks should be ordered from
most desirable to least desirable.
:param margin_factor: the multiplication factor used to calculate the number of instances to be drained
when the crossover method is used.
:return: A dictionary representing the desired bounce actions and containing the following keys:
- create_app: True if we should start the new Marathon app, False otherwise.
- tasks_to_drain: a set of task objects which should be drained and killed. May be empty.
| brutal_bounce | python | Yelp/paasta | paasta_tools/bounce_lib.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/bounce_lib.py | Apache-2.0 |
def upthendown_bounce(
new_config: BounceMethodConfigDict,
new_app_running: bool,
happy_new_tasks: Collection,
old_non_draining_tasks: Sequence,
margin_factor=1.0,
) -> BounceMethodResult:
"""Starts a new app if necessary; only kills old apps once all the requested tasks for the new version are running.
See the docstring for brutal_bounce() for parameters and return value.
"""
if new_app_running and len(happy_new_tasks) == new_config["instances"]:
return {"create_app": False, "tasks_to_drain": set(old_non_draining_tasks)}
else:
return {"create_app": not new_app_running, "tasks_to_drain": set()} | Starts a new app if necessary; only kills old apps once all the requested tasks for the new version are running.
See the docstring for brutal_bounce() for parameters and return value.
| upthendown_bounce | python | Yelp/paasta | paasta_tools/bounce_lib.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/bounce_lib.py | Apache-2.0 |
def crossover_bounce(
new_config: BounceMethodConfigDict,
new_app_running: bool,
happy_new_tasks: Collection,
old_non_draining_tasks: Sequence,
margin_factor=1.0,
) -> BounceMethodResult:
"""Starts a new app if necessary; slowly kills old apps as instances of the new app become happy.
See the docstring for brutal_bounce() for parameters and return value.
"""
assert margin_factor > 0
assert margin_factor <= 1
needed_count = max(
int(math.ceil(new_config["instances"] * margin_factor)) - len(happy_new_tasks),
0,
)
return {
"create_app": not new_app_running,
"tasks_to_drain": set(old_non_draining_tasks[needed_count:]),
} | Starts a new app if necessary; slowly kills old apps as instances of the new app become happy.
See the docstring for brutal_bounce() for parameters and return value.
| crossover_bounce | python | Yelp/paasta | paasta_tools/bounce_lib.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/bounce_lib.py | Apache-2.0 |
def downthenup_bounce(
new_config: BounceMethodConfigDict,
new_app_running: bool,
happy_new_tasks: Collection,
old_non_draining_tasks: Sequence,
margin_factor=1.0,
) -> BounceMethodResult:
"""Stops any old apps and waits for them to die before starting a new one.
See the docstring for brutal_bounce() for parameters and return value.
"""
return {
"create_app": not old_non_draining_tasks and not new_app_running,
"tasks_to_drain": set(old_non_draining_tasks),
} | Stops any old apps and waits for them to die before starting a new one.
See the docstring for brutal_bounce() for parameters and return value.
| downthenup_bounce | python | Yelp/paasta | paasta_tools/bounce_lib.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/bounce_lib.py | Apache-2.0 |
def down_bounce(
new_config: BounceMethodConfigDict,
new_app_running: bool,
happy_new_tasks: Collection,
old_non_draining_tasks: Sequence,
margin_factor=1.0,
) -> BounceMethodResult:
"""
Stops old apps, doesn't start any new apps.
Used for the graceful_app_drain script.
"""
return {"create_app": False, "tasks_to_drain": set(old_non_draining_tasks)} |
Stops old apps, doesn't start any new apps.
Used for the graceful_app_drain script.
| down_bounce | python | Yelp/paasta | paasta_tools/bounce_lib.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/bounce_lib.py | Apache-2.0 |
def broadcast_log_all_services_running_here(line: str, soa_dir=DEFAULT_SOA_DIR) -> None:
"""Log a line of text to paasta logs of all services running on this host.
:param line: text to log
"""
system_paasta_config = load_system_paasta_config()
cluster = system_paasta_config.get_cluster()
services = get_all_services_running_here(cluster, soa_dir)
for service, instance, _ in services:
_log(
line=line,
service=service,
instance=instance,
component="monitoring",
cluster=cluster,
) | Log a line of text to paasta logs of all services running on this host.
:param line: text to log
| broadcast_log_all_services_running_here | python | Yelp/paasta | paasta_tools/broadcast_log_to_services.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/broadcast_log_to_services.py | Apache-2.0 |
def get_registrations(self) -> List[str]:
"""
To support apollo we always register in
cassandra_<cluster>.main
"""
registrations = self.config_dict.get("registrations", [])
for registration in registrations:
try:
decompose_job_id(registration)
except InvalidJobNameError:
log.error(
"Provided registration {} for service "
"{} is invalid".format(registration, self.service)
)
return registrations or [
compose_job_id(self.get_service_name_smartstack(), "main")
] |
To support apollo we always register in
cassandra_<cluster>.main
| get_registrations | python | Yelp/paasta | paasta_tools/cassandracluster_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/cassandracluster_tools.py | Apache-2.0 |
def load_cassandracluster_instance_config(
service: str,
instance: str,
cluster: str,
load_deployments: bool = True,
soa_dir: str = DEFAULT_SOA_DIR,
) -> CassandraClusterDeploymentConfig:
"""Read a service instance's configuration for CassandraCluster.
If a branch isn't specified for a config, the 'branch' key defaults to
paasta-${cluster}.${instance}.
:param service: The service name
:param instance: The instance of the service to retrieve
:param cluster: The cluster to read the configuration for
:param load_deployments: A boolean indicating if the corresponding deployments.json for this service
should also be loaded
:param soa_dir: The SOA configuration directory to read from
:returns: A dictionary of whatever was in the config for the service instance"""
general_config = service_configuration_lib.read_service_configuration(
service, soa_dir=soa_dir
)
instance_config = load_service_instance_config(
service, instance, "cassandracluster", cluster, soa_dir=soa_dir
)
general_config = deep_merge_dictionaries(
overrides=instance_config, defaults=general_config
)
branch_dict: Optional[BranchDictV2] = None
if load_deployments:
deployments_json = load_v2_deployments_json(service, soa_dir=soa_dir)
temp_instance_config = CassandraClusterDeploymentConfig(
service=service,
cluster=cluster,
instance=instance,
config_dict=general_config,
branch_dict=None,
soa_dir=soa_dir,
)
branch = temp_instance_config.get_branch()
deploy_group = temp_instance_config.get_deploy_group()
branch_dict = deployments_json.get_branch_dict(service, branch, deploy_group)
return CassandraClusterDeploymentConfig(
service=service,
cluster=cluster,
instance=instance,
config_dict=general_config,
branch_dict=branch_dict,
soa_dir=soa_dir,
) | Read a service instance's configuration for CassandraCluster.
If a branch isn't specified for a config, the 'branch' key defaults to
paasta-${cluster}.${instance}.
:param service: The service name
:param instance: The instance of the service to retrieve
:param cluster: The cluster to read the configuration for
:param load_deployments: A boolean indicating if the corresponding deployments.json for this service
should also be loaded
:param soa_dir: The SOA configuration directory to read from
:returns: A dictionary of whatever was in the config for the service instance | load_cassandracluster_instance_config | python | Yelp/paasta | paasta_tools/cassandracluster_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/cassandracluster_tools.py | Apache-2.0 |
def container_lifetime(
pod: V1Pod,
) -> datetime.timedelta:
"""Return a time duration for how long the pod is alive"""
st = pod.status.start_time
return datetime.datetime.now(st.tzinfo) - st | Return a time duration for how long the pod is alive | container_lifetime | python | Yelp/paasta | paasta_tools/check_flink_services_health.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/check_flink_services_health.py | Apache-2.0 |
def healthy_flink_containers_cnt(si_pods: Sequence[V1Pod], container_type: str) -> int:
"""Return count of healthy Flink containers with given type"""
return len(
[
pod
for pod in si_pods
if pod.metadata.labels["flink.yelp.com/container-type"] == container_type
and is_pod_ready(pod)
and container_lifetime(pod).total_seconds() > 60
]
) | Return count of healthy Flink containers with given type | healthy_flink_containers_cnt | python | Yelp/paasta | paasta_tools/check_flink_services_health.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/check_flink_services_health.py | Apache-2.0 |
def check_under_registered_taskmanagers(
instance_config: FlinkDeploymentConfig,
expected_count: int,
cr_name: str,
is_eks: bool,
) -> Tuple[bool, str, str]:
"""Check if not enough taskmanagers have been registered to the jobmanager and
returns both the result of the check in the form of a boolean and a human-readable
text to be used in logging or monitoring events.
"""
unhealthy = True
if cr_name != "":
try:
overview = flink_tools.get_flink_jobmanager_overview(
cr_name, instance_config.cluster, is_eks
)
num_reported = overview.get("taskmanagers", 0)
crit_threshold = instance_config.get_replication_crit_percentage()
output = (
f"{instance_config.job_id} has {num_reported}/{expected_count} "
f"taskmanagers reported by dashboard (threshold: {crit_threshold}%)"
)
unhealthy, _ = is_under_replicated(
num_reported, expected_count, crit_threshold
)
except ValueError as e:
output = (
f"Dashboard of service {instance_config.job_id} is not available ({e})"
)
else:
output = f"Dashboard of service {instance_config.job_id} is not available"
if unhealthy:
description = f"""
This alert means that the Flink dashboard is not reporting the expected
number of taskmanagers.
Reasons this might be happening:
The service may simply be unhealthy. There also may not be enough resources
in the cluster to support the requested instance count.
Things you can do:
* Fix the cause of the unhealthy service. Try running:
paasta status -s {instance_config.service} -i {instance_config.instance} -c {instance_config.cluster} -vv
"""
else:
description = f"{instance_config.job_id} taskmanager is available"
return unhealthy, output, description | Check if not enough taskmanagers have been registered to the jobmanager and
returns both the result of the check in the form of a boolean and a human-readable
text to be used in logging or monitoring events.
| check_under_registered_taskmanagers | python | Yelp/paasta | paasta_tools/check_flink_services_health.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/check_flink_services_health.py | Apache-2.0 |
def get_cr_name(si_pods: Sequence[V1Pod]) -> str:
"""Returns the flink custom resource name based on the pod name. We are randomly choosing jobmanager pod here.
This change is related to FLINK-3129
"""
jobmanager_pod = [
pod
for pod in si_pods
if pod.metadata.labels["flink.yelp.com/container-type"] == "jobmanager"
and is_pod_ready(pod)
and container_lifetime(pod).total_seconds() > 60
]
if len(jobmanager_pod) == 1:
return jobmanager_pod[0].metadata.name.split("-jobmanager-")[0]
else:
return "" | Returns the flink custom resource name based on the pod name. We are randomly choosing jobmanager pod here.
This change is related to FLINK-3129
| get_cr_name | python | Yelp/paasta | paasta_tools/check_flink_services_health.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/check_flink_services_health.py | Apache-2.0 |
def check_kubernetes_pod_replication(
instance_config: Union[KubernetesDeploymentConfig, EksDeploymentConfig],
pods_by_service_instance: Dict[str, Dict[str, List[V1Pod]]],
replication_checker: KubeSmartstackEnvoyReplicationChecker,
dry_run: bool = False,
) -> Optional[bool]:
"""Checks a service's replication levels based on how the service's replication
should be monitored. (smartstack/envoy or k8s)
:param instance_config: an instance of KubernetesDeploymentConfig or EksDeploymentConfig
:param replication_checker: an instance of KubeSmartstackEnvoyReplicationChecker
"""
default_alert_after = DEFAULT_ALERT_AFTER
expected_count = instance_config.get_instances()
log.info(
"Expecting %d total tasks for %s" % (expected_count, instance_config.job_id)
)
proxy_port = get_proxy_port_for_instance(instance_config)
registrations = instance_config.get_registrations()
# If this instance does not autoscale and only has 1 instance, set alert after to 20m.
# Otherwise, set it to 10 min.
if (
not instance_config.is_autoscaling_enabled()
and instance_config.get_instances() == 1
):
default_alert_after = "20m"
if "monitoring" not in instance_config.config_dict:
instance_config.config_dict["monitoring"] = {}
instance_config.config_dict["monitoring"][
"alert_after"
] = instance_config.config_dict["monitoring"].get(
"alert_after", default_alert_after
)
# if the primary registration does not match the service_instance name then
# the best we can do is check k8s for replication (for now).
if proxy_port is not None and registrations[0] == instance_config.job_id:
is_well_replicated = monitoring_tools.check_replication_for_instance(
instance_config=instance_config,
expected_count=expected_count,
replication_checker=replication_checker,
dry_run=dry_run,
)
return is_well_replicated
else:
check_healthy_kubernetes_tasks_for_service_instance(
instance_config=instance_config,
expected_count=expected_count,
pods_by_service_instance=pods_by_service_instance,
dry_run=dry_run,
)
return None | Checks a service's replication levels based on how the service's replication
should be monitored. (smartstack/envoy or k8s)
:param instance_config: an instance of KubernetesDeploymentConfig or EksDeploymentConfig
:param replication_checker: an instance of KubeSmartstackEnvoyReplicationChecker
| check_kubernetes_pod_replication | python | Yelp/paasta | paasta_tools/check_kubernetes_services_replication.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/check_kubernetes_services_replication.py | Apache-2.0 |
def read_oom_events_from_scribe(cluster, superregion, num_lines=1000):
"""Read the latest 'num_lines' lines from OOM_EVENTS_STREAM and iterate over them."""
# paasta configs incls a map for cluster -> env that is expected by scribe
log_reader_config = load_system_paasta_config().get_log_reader()
cluster_map = log_reader_config["options"]["cluster_map"]
scribe_env = cluster_map[cluster]
# `scribe_env_to_locations` slightly mutates the scribe env based on whether
# or not it is in dev or prod
host, port = scribereader.get_tail_host_and_port(
**scribe_env_to_locations(scribe_env),
)
stream = scribereader.get_stream_tailer(
stream_name=OOM_EVENTS_STREAM,
tailing_host=host,
tailing_port=port,
lines=num_lines,
superregion=superregion,
)
try:
for line in stream:
try:
j = json.loads(line)
if j.get("cluster", "") == cluster:
yield j
except json.decoder.JSONDecodeError:
pass
except StreamTailerSetupError as e:
if "No data in stream" in str(e):
pass
else:
raise e | Read the latest 'num_lines' lines from OOM_EVENTS_STREAM and iterate over them. | read_oom_events_from_scribe | python | Yelp/paasta | paasta_tools/check_oom_events.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/check_oom_events.py | Apache-2.0 |
def latest_oom_events(cluster, superregion, interval=60):
"""
:returns: {(service, instance): [OOMEvent, OOMEvent,...] }
if the number of events > 0
"""
start_timestamp = int(time.time()) - interval
res = {}
for e in read_oom_events_from_scribe(cluster, superregion):
if e["timestamp"] > start_timestamp:
key = (e["service"], e["instance"])
res.setdefault(key, set()).add(e.get("container_id", ""))
return res |
:returns: {(service, instance): [OOMEvent, OOMEvent,...] }
if the number of events > 0
| latest_oom_events | python | Yelp/paasta | paasta_tools/check_oom_events.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/check_oom_events.py | Apache-2.0 |
def compose_sensu_status(
instance, oom_events, is_check_enabled, alert_threshold, check_interval
):
"""
:param instance: InstanceConfig
:param oom_events: a list of OOMEvents
:param is_check_enabled: boolean to indicate whether the check enabled for the instance
"""
interval_string = f"{check_interval} minute(s)"
instance_name = f"{instance.service}.{instance.instance}"
if not is_check_enabled:
return (Status.OK, f"This check is disabled for {instance_name}.")
if not oom_events:
return (
Status.OK,
f"No oom events for {instance_name} in the last {interval_string}.",
)
elif len(oom_events) >= alert_threshold:
return (
Status.CRITICAL,
f"The Out Of Memory killer killed processes for {instance_name} "
f"in the last {interval_string}.",
)
else:
# If the number of OOM kills isn't above the alert threshold,
# don't send anything. This will keep an alert open if it's already open,
# but won't start a new alert if there wasn't one yet
return None |
:param instance: InstanceConfig
:param oom_events: a list of OOMEvents
:param is_check_enabled: boolean to indicate whether the check enabled for the instance
| compose_sensu_status | python | Yelp/paasta | paasta_tools/check_oom_events.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/check_oom_events.py | Apache-2.0 |
def send_sensu_event(instance, oom_events, args):
"""
:param instance: InstanceConfig
:param oom_events: a list of OOMEvents
"""
check_name = compose_check_name_for_service_instance(
"oom-killer", instance.service, instance.instance
)
monitoring_overrides = instance.get_monitoring()
status = compose_sensu_status(
instance=instance,
oom_events=oom_events,
is_check_enabled=monitoring_overrides.get("check_oom_events", True),
alert_threshold=args.alert_threshold,
check_interval=args.check_interval,
)
if not status:
return
memory_limit = instance.get_mem()
try:
memory_limit_str = f"{int(memory_limit)}MB"
except ValueError:
memory_limit_str = memory_limit
monitoring_overrides.update(
{
"page": False,
"alert_after": "0m",
"realert_every": args.realert_every,
"runbook": "y/check-oom-events",
"tip": (
"Follow the runbook to investigate and rightsize memory usage "
f"(curr: {memory_limit_str})"
),
}
)
return monitoring_tools.send_event(
service=instance.service,
check_name=check_name,
overrides=monitoring_overrides,
status=status[0],
output=status[1],
soa_dir=instance.soa_dir,
dry_run=args.dry_run,
) |
:param instance: InstanceConfig
:param oom_events: a list of OOMEvents
| send_sensu_event | python | Yelp/paasta | paasta_tools/check_oom_events.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/check_oom_events.py | Apache-2.0 |
def set_local_vars_configuration_to_none(obj: Any, visited: Set[int] = None) -> None:
"""
Recursive function to ensure that k8s clientlib objects are pickleable.
Without this, k8s clientlib objects can't be used by multiprocessing functions
as those pickle data to shuttle between processes.
"""
if visited is None:
visited = set()
# Avoid infinite recursion for objects that have already been visited
obj_id = id(obj)
if obj_id in visited:
return
visited.add(obj_id)
# if the object has the attribute, set it to None to essentially delete it
if hasattr(obj, "local_vars_configuration"):
setattr(obj, "local_vars_configuration", None)
# recursively check attributes of the object
if hasattr(obj, "__dict__"):
for attr_name, attr_value in obj.__dict__.items():
set_local_vars_configuration_to_none(attr_value, visited)
# if the object is iterable/a collection, iterate over its elements
elif isinstance(obj, (list, tuple, set)):
for item in obj:
set_local_vars_configuration_to_none(item, visited)
elif isinstance(obj, dict):
for value in obj.values():
set_local_vars_configuration_to_none(value, visited) |
Recursive function to ensure that k8s clientlib objects are pickleable.
Without this, k8s clientlib objects can't be used by multiprocessing functions
as those pickle data to shuttle between processes.
| set_local_vars_configuration_to_none | python | Yelp/paasta | paasta_tools/check_services_replication_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/check_services_replication_tools.py | Apache-2.0 |
def instance_is_not_bouncing(
instance_config: Union[KubernetesDeploymentConfig, EksDeploymentConfig],
applications: List[Application],
) -> bool:
"""
:param instance_config: a KubernetesDeploymentConfig or an EksDeploymentConfig with the configuration of the instance
:param applications: a list of all deployments or stateful sets on the cluster that match the service
and instance of provided instance_config
"""
for application in applications:
if isinstance(application, DeploymentWrapper):
existing_app = application.item
if (
(
existing_app.metadata.namespace != instance_config.get_namespace()
and (instance_config.get_bounce_method() == "downthenup")
)
or (
existing_app.metadata.namespace == instance_config.get_namespace()
and (
instance_config.get_instances()
<= (existing_app.status.ready_replicas or 0)
)
)
) or instance_config.get_desired_state() == "stop":
return True
elif (
isinstance(application, StatefulSetWrapper)
and application.item.metadata.namespace != instance_config.get_namespace()
):
log.critical(
"Paasta detected a StatefulSet that was migrated to a new namespace"
"StatefulSet bouncing across namespaces is not supported"
)
raise StatefulSetsAreNotSupportedError
return False |
:param instance_config: a KubernetesDeploymentConfig or an EksDeploymentConfig with the configuration of the instance
:param applications: a list of all deployments or stateful sets on the cluster that match the service
and instance of provided instance_config
| instance_is_not_bouncing | python | Yelp/paasta | paasta_tools/cleanup_kubernetes_jobs.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/cleanup_kubernetes_jobs.py | Apache-2.0 |
def get_applications_to_kill(
applications_dict: Dict[Tuple[str, str], List[Application]],
cluster: str,
valid_services: Set[Tuple[str, str]],
soa_dir: str,
eks: bool = False,
) -> List[Application]:
"""
:param applications_dict: A dictionary with (service, instance) as keys and a list of applications for each tuple
:param cluster: paasta cluster
:param valid_services: a set with the valid (service, instance) tuples for this cluster
:param soa_dir: The SOA config directory to read from
:return: list of applications to kill
"""
log.info("Determining apps to be killed")
applications_to_kill: List[Application] = []
for (service, instance), applications in applications_dict.items():
if len(applications) >= 1:
if (service, instance) not in valid_services:
applications_to_kill.extend(applications)
else:
instance_config: Union[KubernetesDeploymentConfig, EksDeploymentConfig]
if eks:
instance_config = load_eks_service_config(
cluster=cluster,
service=service,
instance=instance,
soa_dir=soa_dir,
)
else:
instance_config = load_kubernetes_service_config(
cluster=cluster,
service=service,
instance=instance,
soa_dir=soa_dir,
)
try:
not_bouncing = instance_is_not_bouncing(
instance_config, applications
)
except StatefulSetsAreNotSupportedError:
overrides = {
"page": True,
"alert_after": 0,
"tip": f"Revert {service}.{instance} in soa-configs to not include the namespace key.",
"runbook": "y/rb-paasta-namespace",
"ticket": True,
}
send_event(
service=service,
check_name=f"statefulset_bounce_{service}.{instance}",
overrides=overrides,
status=Status.CRITICAL, # type: ignore
output=f"Unsupported bounce: {service}.{instance}. PaaSTA managed StatefulSets do not support custom namespace",
soa_dir=soa_dir,
)
else:
for application in applications:
if (
application.kube_deployment.namespace
!= instance_config.get_namespace()
and not_bouncing
):
applications_to_kill.append(application)
return applications_to_kill |
:param applications_dict: A dictionary with (service, instance) as keys and a list of applications for each tuple
:param cluster: paasta cluster
:param valid_services: a set with the valid (service, instance) tuples for this cluster
:param soa_dir: The SOA config directory to read from
:return: list of applications to kill
| get_applications_to_kill | python | Yelp/paasta | paasta_tools/cleanup_kubernetes_jobs.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/cleanup_kubernetes_jobs.py | Apache-2.0 |
def cleanup_unused_apps(
soa_dir: str,
cluster: str,
kill_threshold: float = 0.5,
force: bool = False,
eks: bool = False,
) -> None:
"""Clean up old or invalid jobs/apps from kubernetes. Retrieves
both a list of apps currently in kubernetes and a list of valid
app ids in order to determine what to kill.
:param soa_dir: The SOA config directory to read from
:param cluster: paasta cluster to clean
:param kill_threshold: The decimal fraction of apps we think is
sane to kill when this job runs.
:param force: Force the cleanup if we are above the kill_threshold"""
log.info("Creating KubeClient")
kube_client = KubeClient()
log.info("Loading running Kubernetes apps")
applications_dict = list_all_applications(kube_client, APPLICATION_TYPES)
log.info("Retrieving valid apps from yelpsoa_configs")
valid_services = set(
get_services_for_cluster(
instance_type="eks" if eks else "kubernetes", soa_dir=soa_dir
)
)
applications_to_kill: List[Application] = get_applications_to_kill(
applications_dict, cluster, valid_services, soa_dir, eks
)
log.debug("Running apps: %s" % list(applications_dict))
log.debug("Valid apps: %s" % valid_services)
log.debug("Terminating: %s" % applications_to_kill)
if applications_to_kill:
above_kill_threshold = float(len(applications_to_kill)) / float(
len(applications_dict)
) > float(kill_threshold)
if above_kill_threshold and not force:
log.critical(
"Paasta was about to kill more than %s of the running services, this "
"is probably a BAD mistake!, run again with --force if you "
"really need to destroy everything" % kill_threshold
)
raise DontKillEverythingError
for applicaton in applications_to_kill:
with alert_state_change(applicaton, cluster):
applicaton.deep_delete(kube_client) | Clean up old or invalid jobs/apps from kubernetes. Retrieves
both a list of apps currently in kubernetes and a list of valid
app ids in order to determine what to kill.
:param soa_dir: The SOA config directory to read from
:param cluster: paasta cluster to clean
:param kill_threshold: The decimal fraction of apps we think is
sane to kill when this job runs.
:param force: Force the cleanup if we are above the kill_threshold | cleanup_unused_apps | python | Yelp/paasta | paasta_tools/cleanup_kubernetes_jobs.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/cleanup_kubernetes_jobs.py | Apache-2.0 |
def write_auto_config_data(
service: str,
extra_info: str,
data: Dict[str, Any],
soa_dir: str = DEFAULT_SOA_DIR,
sub_dir: Optional[str] = None,
comment: Optional[str] = None,
) -> Optional[str]:
"""
Replaces the contents of an automated config file for a service, or creates the file if it does not exist.
Returns the filename of the modified file, or None if no file was written.
"""
yaml.YAML().representer.add_representer(type(None), my_represent_none)
service_dir = f"{soa_dir}/{service}"
if not os.path.exists(service_dir):
log.warning(
f"Service {service} does not exist in configs, skipping auto config update"
)
return None
subdir = f"{service_dir}/{sub_dir}" if sub_dir else service_dir
if not os.path.exists(subdir):
os.mkdir(subdir)
filename = f"{subdir}/{extra_info}.yaml"
with open(filename, "w") as f:
# TODO: this can be collapsed into one codeblock. It is separated as two
# because doing content.update(data) results in losing comments from `data`
# we should be able to handle adding a header comment and yaml with comments in it
# without this if/else block
if comment:
content = (
yaml.round_trip_load(
comment.format(
# this is a bit of a hack, but we've decided to not rename files back to kubernetes-*
# files. while we still need to update things to reference the eks files directly, there's
# still a couple of places where we still need kubernetes-* files (e.g., unmigrated operators)
# so for now let's just assume that autotuned things will always actually have their human-managed
# config in eks-* files
regular_filename=f"{service}/{extra_info.replace('kubernetes-', 'eks-')}.yaml",
)
)
if comment
else {}
)
content.update(data)
else:
# avoids content.update to preserve comments in `data`
content = data
f.write(yaml.round_trip_dump(content))
return filename |
Replaces the contents of an automated config file for a service, or creates the file if it does not exist.
Returns the filename of the modified file, or None if no file was written.
| write_auto_config_data | python | Yelp/paasta | paasta_tools/config_utils.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/config_utils.py | Apache-2.0 |
def get_currently_deployed_sha(service, deploy_group, soa_dir=DEFAULT_SOA_DIR):
"""Tries to determine the currently deployed sha for a service and deploy_group,
returns None if there isn't one ready yet"""
try:
deployments = load_v2_deployments_json(service=service, soa_dir=soa_dir)
return deployments.get_git_sha_for_deploy_group(deploy_group=deploy_group)
except NoDeploymentsAvailable:
return None | Tries to determine the currently deployed sha for a service and deploy_group,
returns None if there isn't one ready yet | get_currently_deployed_sha | python | Yelp/paasta | paasta_tools/deployment_utils.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/deployment_utils.py | Apache-2.0 |
def get_currently_deployed_version(
service, deploy_group, soa_dir=DEFAULT_SOA_DIR
) -> Optional[DeploymentVersion]:
"""Tries to determine the currently deployed version for a service and deploy_group,
returns None if there isn't one ready yet"""
try:
deployments = load_v2_deployments_json(service=service, soa_dir=soa_dir)
return deployments.get_deployment_version_for_deploy_group(
deploy_group=deploy_group
)
except NoDeploymentsAvailable:
return None | Tries to determine the currently deployed version for a service and deploy_group,
returns None if there isn't one ready yet | get_currently_deployed_version | python | Yelp/paasta | paasta_tools/deployment_utils.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/deployment_utils.py | Apache-2.0 |
def register_drain_method(
name: str,
) -> Callable[[_RegisterDrainMethod_T], _RegisterDrainMethod_T]:
"""Returns a decorator that registers a DrainMethod subclass at a given name
so get_drain_method/list_drain_methods can find it."""
def outer(drain_method: _RegisterDrainMethod_T) -> _RegisterDrainMethod_T:
_drain_methods[name] = drain_method
return drain_method
return outer | Returns a decorator that registers a DrainMethod subclass at a given name
so get_drain_method/list_drain_methods can find it. | register_drain_method | python | Yelp/paasta | paasta_tools/drain_lib.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/drain_lib.py | Apache-2.0 |
async def drain(self, task: DrainTask) -> None:
"""Make a task stop receiving new traffic."""
raise NotImplementedError() | Make a task stop receiving new traffic. | drain | python | Yelp/paasta | paasta_tools/drain_lib.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/drain_lib.py | Apache-2.0 |
async def stop_draining(self, task: DrainTask) -> None:
"""Make a task that has previously been downed start receiving traffic again."""
raise NotImplementedError() | Make a task that has previously been downed start receiving traffic again. | stop_draining | python | Yelp/paasta | paasta_tools/drain_lib.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/drain_lib.py | Apache-2.0 |
async def is_draining(self, task: DrainTask) -> bool:
"""Return whether a task is being drained."""
raise NotImplementedError() | Return whether a task is being drained. | is_draining | python | Yelp/paasta | paasta_tools/drain_lib.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/drain_lib.py | Apache-2.0 |
async def is_safe_to_kill(self, task: DrainTask) -> bool:
"""Return True if a task is drained and ready to be killed, or False if we should wait."""
raise NotImplementedError() | Return True if a task is drained and ready to be killed, or False if we should wait. | is_safe_to_kill | python | Yelp/paasta | paasta_tools/drain_lib.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/drain_lib.py | Apache-2.0 |
def parse_success_codes(self, success_codes_str: str) -> Set[int]:
"""Expand a string like 200-399,407-409,500 to a set containing all the integers in between."""
acceptable_response_codes: Set[int] = set()
for series_str in str(success_codes_str).split(","):
if "-" in series_str:
start, end = series_str.split("-")
acceptable_response_codes.update(range(int(start), int(end) + 1))
else:
acceptable_response_codes.add(int(series_str))
return acceptable_response_codes | Expand a string like 200-399,407-409,500 to a set containing all the integers in between. | parse_success_codes | python | Yelp/paasta | paasta_tools/drain_lib.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/drain_lib.py | Apache-2.0 |
async def issue_request(self, url_spec: UrlSpec, task: DrainTask) -> None:
"""Issue a request to the URL specified by url_spec regarding the task given."""
format_params = self.get_format_params(task)
urls = [
self.format_url(url_spec["url_format"], param) for param in format_params
]
method = url_spec.get("method", "GET").upper()
async with aiohttp.ClientSession() as session:
reqs = [
session.request(
method=method,
url=url,
headers={"User-Agent": get_user_agent()},
timeout=15,
)
for url in urls
]
res = await asyncio.gather(*reqs)
for response in res:
if not self.check_response_code(
response.status, url_spec["success_codes"]
):
raise StatusCodeNotAcceptableError(
f"Unacceptable status code {response.status} not in {url_spec['success_codes']} when hitting {response.url}"
) | Issue a request to the URL specified by url_spec regarding the task given. | issue_request | python | Yelp/paasta | paasta_tools/drain_lib.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/drain_lib.py | Apache-2.0 |
def load_eks_service_config_no_cache(
service: str,
instance: str,
cluster: str,
load_deployments: bool = True,
soa_dir: str = DEFAULT_SOA_DIR,
) -> "EksDeploymentConfig":
"""Read a service instance's configuration for EKS.
If a branch isn't specified for a config, the 'branch' key defaults to
paasta-${cluster}.${instance}.
:param name: The service name
:param instance: The instance of the service to retrieve
:param cluster: The cluster to read the configuration for
:param load_deployments: A boolean indicating if the corresponding deployments.json for this service
should also be loaded
:param soa_dir: The SOA configuration directory to read from
:returns: A dictionary of whatever was in the config for the service instance"""
general_config = service_configuration_lib.read_service_configuration(
service, soa_dir=soa_dir
)
instance_config = load_service_instance_config(
service, instance, "eks", cluster, soa_dir=soa_dir
)
general_config = deep_merge_dictionaries(
overrides=instance_config, defaults=general_config
)
branch_dict: Optional[BranchDictV2] = None
if load_deployments:
deployments_json = load_v2_deployments_json(service, soa_dir=soa_dir)
temp_instance_config = EksDeploymentConfig(
service=service,
cluster=cluster,
instance=instance,
config_dict=general_config,
branch_dict=None,
soa_dir=soa_dir,
)
branch = temp_instance_config.get_branch()
deploy_group = temp_instance_config.get_deploy_group()
branch_dict = deployments_json.get_branch_dict(service, branch, deploy_group)
return EksDeploymentConfig(
service=service,
cluster=cluster,
instance=instance,
config_dict=general_config,
branch_dict=branch_dict,
soa_dir=soa_dir,
) | Read a service instance's configuration for EKS.
If a branch isn't specified for a config, the 'branch' key defaults to
paasta-${cluster}.${instance}.
:param name: The service name
:param instance: The instance of the service to retrieve
:param cluster: The cluster to read the configuration for
:param load_deployments: A boolean indicating if the corresponding deployments.json for this service
should also be loaded
:param soa_dir: The SOA configuration directory to read from
:returns: A dictionary of whatever was in the config for the service instance | load_eks_service_config_no_cache | python | Yelp/paasta | paasta_tools/eks_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/eks_tools.py | Apache-2.0 |
def load_eks_service_config(
service: str,
instance: str,
cluster: str,
load_deployments: bool = True,
soa_dir: str = DEFAULT_SOA_DIR,
) -> "EksDeploymentConfig":
"""Read a service instance's configuration for EKS.
If a branch isn't specified for a config, the 'branch' key defaults to
paasta-${cluster}.${instance}.
:param name: The service name
:param instance: The instance of the service to retrieve
:param cluster: The cluster to read the configuration for
:param load_deployments: A boolean indicating if the corresponding deployments.json for this service
should also be loaded
:param soa_dir: The SOA configuration directory to read from
:returns: A dictionary of whatever was in the config for the service instance"""
return load_eks_service_config_no_cache(
service=service,
instance=instance,
cluster=cluster,
load_deployments=load_deployments,
soa_dir=soa_dir,
) | Read a service instance's configuration for EKS.
If a branch isn't specified for a config, the 'branch' key defaults to
paasta-${cluster}.${instance}.
:param name: The service name
:param instance: The instance of the service to retrieve
:param cluster: The cluster to read the configuration for
:param load_deployments: A boolean indicating if the corresponding deployments.json for this service
should also be loaded
:param soa_dir: The SOA configuration directory to read from
:returns: A dictionary of whatever was in the config for the service instance | load_eks_service_config | python | Yelp/paasta | paasta_tools/eks_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/eks_tools.py | Apache-2.0 |
def are_services_up_in_pod(
envoy_host: str,
envoy_admin_port: int,
envoy_admin_endpoint_format: str,
registrations: Collection[str],
pod_ip: str,
pod_port: int,
) -> bool:
"""Returns whether a service in a k8s pod is reachable via envoy
:param envoy_host: The host that this check should contact for replication information.
:param envoy_admin_port: The port that Envoy's admin interface is listening on
:param registrations: The service_name.instance_name of the services
:param pod_ip: IP of the pod itself
:param pod_port: The port to reach the service in the pod
"""
for registration in registrations:
backends_per_registration = get_backends(
registration,
envoy_host=envoy_host,
envoy_admin_port=envoy_admin_port,
envoy_admin_endpoint_format=envoy_admin_endpoint_format,
)
healthy_backends = [
backend
for backend in backends_per_registration.get(registration, [])
if backend[0]["address"] == pod_ip
and backend[0]["port_value"] == pod_port
and backend[0]["eds_health_status"] == "HEALTHY"
]
if not healthy_backends:
return False
return True | Returns whether a service in a k8s pod is reachable via envoy
:param envoy_host: The host that this check should contact for replication information.
:param envoy_admin_port: The port that Envoy's admin interface is listening on
:param registrations: The service_name.instance_name of the services
:param pod_ip: IP of the pod itself
:param pod_port: The port to reach the service in the pod
| are_services_up_in_pod | python | Yelp/paasta | paasta_tools/envoy_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/envoy_tools.py | Apache-2.0 |
def are_namespaces_up_in_eds(
envoy_eds_path: str,
namespaces: Collection[str],
pod_ip: str,
pod_port: int,
) -> bool:
"""Returns whether a Pod is registered on Envoy through the EDS
:param envoy_eds_path: path where EDS yaml files are stored
:param namespaces: list of namespaces to check
:param pod_ip: IP of the pod
:param pod_port: The port to reach the service in the pod
"""
for namespace in namespaces:
backends_from_eds = get_backends_from_eds(namespace, envoy_eds_path)
if (pod_ip, pod_port) not in backends_from_eds:
return False
return True | Returns whether a Pod is registered on Envoy through the EDS
:param envoy_eds_path: path where EDS yaml files are stored
:param namespaces: list of namespaces to check
:param pod_ip: IP of the pod
:param pod_port: The port to reach the service in the pod
| are_namespaces_up_in_eds | python | Yelp/paasta | paasta_tools/envoy_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/envoy_tools.py | Apache-2.0 |
def get_casper_endpoints(
clusters_info: Mapping[str, Any]
) -> FrozenSet[Tuple[str, int]]:
"""Filters out and returns casper endpoints from Envoy clusters."""
casper_endpoints: Set[Tuple[str, int]] = set()
for cluster_status in clusters_info["cluster_statuses"]:
if "host_statuses" in cluster_status:
if cluster_status["name"].startswith("spectre.") and cluster_status[
"name"
].endswith(".egress_cluster"):
for host_status in cluster_status["host_statuses"]:
casper_endpoints.add(
(
host_status["address"]["socket_address"]["address"],
host_status["address"]["socket_address"]["port_value"],
)
)
return frozenset(casper_endpoints) | Filters out and returns casper endpoints from Envoy clusters. | get_casper_endpoints | python | Yelp/paasta | paasta_tools/envoy_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/envoy_tools.py | Apache-2.0 |
def get_backends_from_eds(namespace: str, envoy_eds_path: str) -> List[Tuple[str, int]]:
"""Returns a list of backends for a given namespace. Casper backends are also returned (if present).
:param namespace: return backends for this namespace
:param envoy_eds_path: path where EDS yaml files are stored
:returns backends: a list of touples representing the backends for
the requested service
"""
backends = []
eds_file_for_namespace = f"{envoy_eds_path}/{namespace}/{namespace}.yaml"
if os.access(eds_file_for_namespace, os.R_OK):
with open(eds_file_for_namespace) as f:
eds_yaml = yaml.safe_load(f)
for resource in eds_yaml.get("resources", []):
endpoints = resource.get("endpoints")
# endpoints could be None if there are no backends listed
if endpoints:
for endpoint in endpoints:
for lb_endpoint in endpoint.get("lb_endpoints", []):
address = lb_endpoint["endpoint"]["address"][
"socket_address"
]["address"]
port_value = lb_endpoint["endpoint"]["address"][
"socket_address"
]["port_value"]
backends.append((address, port_value))
return backends | Returns a list of backends for a given namespace. Casper backends are also returned (if present).
:param namespace: return backends for this namespace
:param envoy_eds_path: path where EDS yaml files are stored
:returns backends: a list of touples representing the backends for
the requested service
| get_backends_from_eds | python | Yelp/paasta | paasta_tools/envoy_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/envoy_tools.py | Apache-2.0 |
def get_backends(
service: str,
envoy_host: str,
envoy_admin_port: int,
envoy_admin_endpoint_format: str,
) -> Dict[str, List[Tuple[EnvoyBackend, bool]]]:
"""Fetches JSON from Envoy admin's /clusters endpoint and returns a list of backends.
:param service: If None, return backends for all services, otherwise only return backends for this particular
service.
:param envoy_host: The host that this check should contact for replication information.
:param envoy_admin_port: The port that Envoy's admin interface is listening on
:param envoy_admin_endpoint_format: The format of Envoy's admin endpoint
:returns backends: A list of dicts representing the backends of all
services or the requested service
"""
if service:
services = [service]
else:
services = None
return get_multiple_backends(
services,
envoy_host=envoy_host,
envoy_admin_port=envoy_admin_port,
envoy_admin_endpoint_format=envoy_admin_endpoint_format,
) | Fetches JSON from Envoy admin's /clusters endpoint and returns a list of backends.
:param service: If None, return backends for all services, otherwise only return backends for this particular
service.
:param envoy_host: The host that this check should contact for replication information.
:param envoy_admin_port: The port that Envoy's admin interface is listening on
:param envoy_admin_endpoint_format: The format of Envoy's admin endpoint
:returns backends: A list of dicts representing the backends of all
services or the requested service
| get_backends | python | Yelp/paasta | paasta_tools/envoy_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/envoy_tools.py | Apache-2.0 |
def get_multiple_backends(
services: Optional[Sequence[str]],
envoy_host: str,
envoy_admin_port: int,
envoy_admin_endpoint_format: str,
resolve_hostnames: bool = True,
) -> Dict[str, List[Tuple[EnvoyBackend, bool]]]:
"""Fetches JSON from Envoy admin's /clusters endpoint and returns a list of backends.
:param services: If None, return backends for all services, otherwise only return backends for these particular
services.
:param envoy_host: The host that this check should contact for replication information.
:param envoy_admin_port: The port that Envoy's admin interface is listening on
:param envoy_admin_endpoint_format: The format of Envoy's admin endpoint
:returns backends: A list of dicts representing the backends of all
services or the requested service
"""
clusters_info = retrieve_envoy_clusters(
envoy_host=envoy_host,
envoy_admin_port=envoy_admin_port,
envoy_admin_endpoint_format=envoy_admin_endpoint_format,
)
casper_endpoints = get_casper_endpoints(clusters_info)
backends: DefaultDict[
str, List[Tuple[EnvoyBackend, bool]]
] = collections.defaultdict(list)
for cluster_status in clusters_info["cluster_statuses"]:
if "host_statuses" in cluster_status:
if cluster_status["name"].endswith(".egress_cluster"):
service_name = cluster_status["name"][: -len(".egress_cluster")]
if services is None or service_name in services:
cluster_backends = []
casper_endpoint_found = False
for host_status in cluster_status["host_statuses"]:
address = host_status["address"]["socket_address"]["address"]
port_value = host_status["address"]["socket_address"][
"port_value"
]
# Check if this endpoint is actually a casper backend
# If so, omit from the service's list of backends
if not service_name.startswith("spectre."):
if (address, port_value) in casper_endpoints:
casper_endpoint_found = True
continue
hostname = address
if resolve_hostnames:
try:
hostname = socket.gethostbyaddr(address)[0].split(".")[
0
]
except socket.herror:
# Default to the raw IP address if we can't lookup the hostname
pass
cluster_backends.append(
(
EnvoyBackend(
address=address,
port_value=port_value,
hostname=hostname,
eds_health_status=host_status["health_status"][
"eds_health_status"
],
weight=host_status["weight"],
),
casper_endpoint_found,
)
)
backends[service_name] += cluster_backends
return backends | Fetches JSON from Envoy admin's /clusters endpoint and returns a list of backends.
:param services: If None, return backends for all services, otherwise only return backends for these particular
services.
:param envoy_host: The host that this check should contact for replication information.
:param envoy_admin_port: The port that Envoy's admin interface is listening on
:param envoy_admin_endpoint_format: The format of Envoy's admin endpoint
:returns backends: A list of dicts representing the backends of all
services or the requested service
| get_multiple_backends | python | Yelp/paasta | paasta_tools/envoy_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/envoy_tools.py | Apache-2.0 |
def match_backends_and_pods(
backends: Iterable[EnvoyBackend],
pods: Iterable[V1Pod],
) -> List[Tuple[Optional[EnvoyBackend], Optional[V1Pod]]]:
"""Returns tuples of matching (backend, pod) pairs, as matched by IP. Each backend will be listed exactly
once. If a backend does not match with a pod, (backend, None) will be included.
If a pod's IP does not match with any backends, (None, pod) will be included.
:param backends: An iterable of Envoy backend dictionaries, e.g. the list returned by
envoy_tools.get_multiple_backends.
:param pods: A list of pods
"""
# { ip : [backend1, backend2], ... }
backends_by_ip: DefaultDict[str, List[EnvoyBackend]] = collections.defaultdict(list)
backend_pod_pairs = []
for backend in backends:
ip = backend["address"]
backends_by_ip[ip].append(backend)
for pod in pods:
ip = pod.status.pod_ip
for backend in backends_by_ip.pop(ip, [None]):
backend_pod_pairs.append((backend, pod))
# we've been popping in the above loop, so anything left didn't match a k8s pod.
for backends in backends_by_ip.values():
for backend in backends:
backend_pod_pairs.append((backend, None))
return backend_pod_pairs | Returns tuples of matching (backend, pod) pairs, as matched by IP. Each backend will be listed exactly
once. If a backend does not match with a pod, (backend, None) will be included.
If a pod's IP does not match with any backends, (None, pod) will be included.
:param backends: An iterable of Envoy backend dictionaries, e.g. the list returned by
envoy_tools.get_multiple_backends.
:param pods: A list of pods
| match_backends_and_pods | python | Yelp/paasta | paasta_tools/envoy_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/envoy_tools.py | Apache-2.0 |
def get_replication_for_all_services(
envoy_host: str,
envoy_admin_port: int,
envoy_admin_endpoint_format: str,
) -> Dict[str, int]:
"""Returns the replication level for all services known to this Envoy
:param envoy_host: The host that this check should contact for replication information.
:param envoy_admin_port: The port number that this check should contact for replication information.
:param envoy_admin_endpoint_format: The format of Envoy's admin endpoint
:returns available_instance_counts: A dictionary mapping the service names
to an integer number of available replicas.
"""
backends = get_multiple_backends(
services=None,
envoy_host=envoy_host,
envoy_admin_port=envoy_admin_port,
envoy_admin_endpoint_format=envoy_admin_endpoint_format,
resolve_hostnames=False, # we're not really going to use the hostnames, so skip fetching them to save time
)
return collections.Counter(
[
service_name
for service_name, service_backends in backends.items()
for b in service_backends
if backend_is_up(b[0])
]
) | Returns the replication level for all services known to this Envoy
:param envoy_host: The host that this check should contact for replication information.
:param envoy_admin_port: The port number that this check should contact for replication information.
:param envoy_admin_endpoint_format: The format of Envoy's admin endpoint
:returns available_instance_counts: A dictionary mapping the service names
to an integer number of available replicas.
| get_replication_for_all_services | python | Yelp/paasta | paasta_tools/envoy_tools.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/envoy_tools.py | Apache-2.0 |
def _yocalhost_rule(port, comment, protocol="tcp"):
"""Return an iptables rule allowing access to a yocalhost port."""
return iptables.Rule(
protocol=protocol,
src="0.0.0.0/0.0.0.0",
dst="169.254.255.254/255.255.255.255",
target="ACCEPT",
matches=(
("comment", (("comment", (comment,)),)),
(protocol, (("dport", (str(port),)),)),
),
target_parameters=(),
) | Return an iptables rule allowing access to a yocalhost port. | _yocalhost_rule | python | Yelp/paasta | paasta_tools/firewall.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/firewall.py | Apache-2.0 |
def services_running_here():
"""Generator helper that yields (service, instance, mac address) of both
mesos tasks.
"""
for container in get_running_mesos_docker_containers():
if container["HostConfig"]["NetworkMode"] != "bridge":
continue
service = container["Labels"].get("paasta_service")
instance = container["Labels"].get("paasta_instance")
if service is None or instance is None:
continue
network_info = container["NetworkSettings"]["Networks"]["bridge"]
mac = network_info["MacAddress"]
ip = network_info["IPAddress"]
yield service, instance, mac, ip | Generator helper that yields (service, instance, mac address) of both
mesos tasks.
| services_running_here | python | Yelp/paasta | paasta_tools/firewall.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/firewall.py | Apache-2.0 |
def _ensure_common_chain():
"""The common chain allows access for all services to certain resources."""
iptables.ensure_chain(
"PAASTA-COMMON",
(
# Allow return traffic for incoming connections
iptables.Rule(
protocol="ip",
src="0.0.0.0/0.0.0.0",
dst="0.0.0.0/0.0.0.0",
target="ACCEPT",
matches=(("conntrack", (("ctstate", ("ESTABLISHED",)),)),),
target_parameters=(),
),
_yocalhost_rule(1463, "scribed"),
_yocalhost_rule(8125, "metrics-relay", protocol="udp"),
_yocalhost_rule(3030, "sensu"),
iptables.Rule(
protocol="ip",
src="0.0.0.0/0.0.0.0",
dst="0.0.0.0/0.0.0.0",
target="PAASTA-DNS",
matches=(),
target_parameters=(),
),
),
) | The common chain allows access for all services to certain resources. | _ensure_common_chain | python | Yelp/paasta | paasta_tools/firewall.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/firewall.py | Apache-2.0 |
def ensure_service_chains(service_groups, soa_dir, synapse_service_dir):
"""Ensure service chains exist and have the right rules.
service_groups is a dict {ServiceGroup: set([mac_address..])}
Returns dictionary {[service chain] => [list of mac addresses]}.
"""
chains = {}
for service, macs in service_groups.items():
service.update_rules(soa_dir, synapse_service_dir)
chains[service.chain_name] = macs
return chains | Ensure service chains exist and have the right rules.
service_groups is a dict {ServiceGroup: set([mac_address..])}
Returns dictionary {[service chain] => [list of mac addresses]}.
| ensure_service_chains | python | Yelp/paasta | paasta_tools/firewall.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/firewall.py | Apache-2.0 |
def general_update(soa_dir, synapse_service_dir):
"""Update iptables to match the current PaaSTA state."""
ensure_shared_chains()
service_chains = ensure_service_chains(
active_service_groups(), soa_dir, synapse_service_dir
)
ensure_dispatch_chains(service_chains)
garbage_collect_old_service_chains(service_chains) | Update iptables to match the current PaaSTA state. | general_update | python | Yelp/paasta | paasta_tools/firewall.py | https://github.com/Yelp/paasta/blob/master/paasta_tools/firewall.py | Apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.