file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
39k
| suffix
large_stringlengths 0
36.1k
| middle
large_stringlengths 0
29.4k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
nmap.py | #!/usr/bin/env python3
import argparse
import os
import subprocess
import sys
import datetime
from jackal import HostSearch, RangeSearch, Service, ServiceSearch, Logger
from jackal.config import Config
from jackal.utils import print_error, print_notification, print_success
from libnmap.parser import NmapParser, NmapParserException
def all_hosts(*args, **kwargs):
"""
Returns true for all nmap hosts
"""
return True
def import_file():
for arg in sys.argv[1:]:
print_notification("Importing nmap file: {}".format(arg))
try:
with open(arg, 'r') as f:
stats = import_nmap(f.read(), 'nmap_import', check_function=all_hosts, import_services=True)
stats['file'] = arg
Logger().log('import_nmap', 'Imported nmap file', stats=stats)
except NmapParserException:
print_error("File could not be parsed: {}".format(arg))
except FileNotFoundError:
pass
def import_nmap(result, tag, check_function=all_hosts, import_services=False):
"""
Imports the given nmap result.
"""
host_search = HostSearch(arguments=False)
service_search = ServiceSearch() | parser = NmapParser()
report = parser.parse_fromstring(result)
imported_hosts = 0
imported_services = 0
for nmap_host in report.hosts:
if check_function(nmap_host):
imported_hosts += 1
host = host_search.id_to_object(nmap_host.address)
host.status = nmap_host.status
host.add_tag(tag)
if nmap_host.os_fingerprinted:
host.os = nmap_host.os_fingerprint
if nmap_host.hostnames:
host.hostname.extend(nmap_host.hostnames)
if import_services:
for service in nmap_host.services:
imported_services += 1
serv = Service(**service.get_dict())
serv.address = nmap_host.address
service_id = service_search.object_to_id(serv)
if service_id:
# Existing object, save the banner and script results.
serv_old = Service.get(service_id)
if service.banner:
serv_old.banner = service.banner
# TODO implement
# if service.script_results:
# serv_old.script_results.extend(service.script_results)
serv_old.save()
else:
# New object
serv.address = nmap_host.address
serv.save()
if service.state == 'open':
host.open_ports.append(service.port)
if service.state == 'closed':
host.closed_ports.append(service.port)
if service.state == 'filtered':
host.filtered_ports.append(service.port)
host.save()
if imported_hosts:
print_success("Imported {} hosts, with tag {}".format(imported_hosts, tag))
else:
print_error("No hosts found")
return {'hosts': imported_hosts, 'services': imported_services}
def include_hostnames(nmap_host):
"""
Function to filter out hosts with hostnames
"""
if nmap_host.hostnames:
return True
return False
def include_up_hosts(nmap_host):
"""
Includes only hosts that have the status 'up'
"""
if nmap_host.status == 'up':
return True
return False
def nmap(nmap_args, ips):
"""
Start an nmap process with the given args on the given ips.
"""
config = Config()
arguments = ['nmap', '-Pn']
arguments.extend(ips)
arguments.extend(nmap_args)
output_file = ''
now = datetime.datetime.now()
if not '-oA' in nmap_args:
output_name = 'nmap_jackal_{}'.format(now.strftime("%Y-%m-%d %H:%M"))
path_name = os.path.join(config.get('nmap', 'directory'), output_name)
print_notification("Writing output of nmap to {}".format(path_name))
if not os.path.exists(config.get('nmap', 'directory')):
os.makedirs(config.get('nmap', 'directory'))
output_file = path_name + '.xml'
arguments.extend(['-oA', path_name])
else:
output_file = nmap_args[nmap_args.index('-oA') + 1] + '.xml'
print_notification("Starting nmap")
subprocess.call(arguments)
with open(output_file, 'r') as f:
return f.read()
def nmap_discover():
"""
This function retrieves ranges from jackal
Uses two functions of nmap to find hosts:
ping: icmp / arp pinging of targets
lookup: reverse dns lookup
"""
rs = RangeSearch()
rs_parser = rs.argparser
arg = argparse.ArgumentParser(parents=[rs_parser], conflict_handler='resolve')
arg.add_argument('type', metavar='type', \
help='The type of nmap scan to do, choose from ping or lookup', \
type=str, choices=['ping', 'lookup'])
arguments, nmap_args = arg.parse_known_args()
tag = None
if arguments.type == 'ping':
tag = 'nmap_ping'
nmap_args.append('-sn')
nmap_args.append('-n')
check_function = include_up_hosts
elif arguments.type == 'lookup':
tag = 'nmap_lookup'
nmap_args.append('-sL')
check_function = include_hostnames
ranges = rs.get_ranges(tags=['!{}'.format(tag)])
ranges = [r for r in ranges]
ips = []
for r in ranges:
ips.append(r.range)
print_notification("Running nmap with args: {} on {} range(s)".format(nmap_args, len(ips)))
result = nmap(nmap_args, ips)
stats = import_nmap(result, tag, check_function)
stats['scanned_ranges'] = len(ips)
Logger().log('nmap_discover', "Nmap discover with args: {} on {} range(s)".format(nmap_args, len(ips)), stats)
for r in ranges:
r.add_tag(tag)
r.save()
def nmap_scan():
"""
Scans the given hosts with nmap.
"""
# Create the search and config objects
hs = HostSearch()
config = Config()
# Static options to be able to figure out what options to use depending on the input the user gives.
nmap_types = ['top10', 'top100', 'custom', 'top1000', 'all']
options = {'top10':'--top-ports 10', 'top100':'--top-ports 100', 'custom': config.get('nmap', 'options'), 'top1000': '--top-ports 1000', 'all': '-p-'}
# Create an argument parser
hs_parser = hs.argparser
argparser = argparse.ArgumentParser(parents=[hs_parser], conflict_handler='resolve', \
description="Scans hosts from the database using nmap, any arguments that are not in the help are passed to nmap")
argparser.add_argument('type', metavar='type', \
help='The number of ports to scan: top10, top100, custom, top1000 (default) or all', \
type=str, choices=nmap_types, default='top1000', const='top1000', nargs='?')
arguments, extra_nmap_args = argparser.parse_known_args()
# Fix the tags for the search
tags = nmap_types[nmap_types.index(arguments.type):]
tags = ["!nmap_" + tag for tag in tags]
hosts = hs.get_hosts(tags=tags)
hosts = [host for host in hosts]
# Create the nmap arguments
nmap_args = []
nmap_args.extend(extra_nmap_args)
nmap_args.extend(options[arguments.type].split(' '))
# Run nmap
print_notification("Running nmap with args: {} on {} hosts(s)".format(nmap_args, len(hosts)))
if len(hosts):
result = nmap(nmap_args, [str(h.address) for h in hosts])
# Import the nmap result
for host in hosts:
host.add_tag("nmap_{}".format(arguments.type))
host.save()
print_notification("Nmap done, importing results")
stats = import_nmap(result, "nmap_{}".format(arguments.type), check_function=all_hosts, import_services=True)
stats['scanned_hosts'] = len(hosts)
stats['type'] = arguments.type
Logger().log('nmap_scan', "Performed nmap {} scan on {} hosts".format(arguments.type, len(hosts)), stats)
else:
print_notification("No hosts found")
def nmap_smb_vulnscan():
"""
Scans available smb services in the database for smb signing and ms17-010.
"""
service_search = ServiceSearch()
services = service_search.get_services(ports=['445'], tags=['!smb_vulnscan'], up=True)
services = [service for service in services]
service_dict = {}
for service in services:
service.add_tag('smb_vulnscan')
service_dict[str(service.address)] = service
nmap_args = "-Pn -n --disable-arp-ping --script smb-security-mode.nse,smb-vuln-ms17-010.nse -p 445".split(" ")
if services:
result = nmap(nmap_args, [str(s.address) for s in services])
parser = NmapParser()
report = parser.parse_fromstring(result)
smb_signing = 0
ms17 = 0
for nmap_host in report.hosts:
for script_result in nmap_host.scripts_results:
script_result = script_result.get('elements', {})
service = service_dict[str(nmap_host.address)]
if script_result.get('message_signing', '') == 'disabled':
print_success("({}) SMB Signing disabled".format(nmap_host.address))
service.add_tag('smb_signing_disabled')
smb_signing += 1
if script_result.get('CVE-2017-0143', {}).get('state', '') == 'VULNERABLE':
print_success("({}) Vulnerable for MS17-010".format(nmap_host.address))
service.add_tag('MS17-010')
ms17 += 1
service.update(tags=service.tags)
print_notification("Completed, 'smb_signing_disabled' tag added to systems with smb signing disabled, 'MS17-010' tag added to systems that did not apply MS17-010.")
stats = {'smb_signing': smb_signing, 'MS17_010': ms17, 'scanned_services': len(services)}
Logger().log('smb_vulnscan', 'Scanned {} smb services for vulnerabilities'.format(len(services)), stats)
else:
print_notification("No services found to scan.")
def os_discovery():
"""
Performs os (and domain) discovery of smb hosts.
"""
hs = HostSearch()
hosts = hs.get_hosts(ports=[445], tags=['!nmap_os'])
# TODO fix filter for emtpy fields.
hosts = [host for host in hosts if not host.os]
host_dict = {}
for host in hosts:
host_dict[str(host.address)] = host
arguments = "--script smb-os-discovery.nse -p 445 -Pn -n --disable-arp-ping".split(' ')
if len(hosts):
count = 0
print_notification("Checking OS of {} systems".format(len(hosts)))
result = nmap(arguments, [str(h.address) for h in hosts])
parser = NmapParser()
report = parser.parse_fromstring(result)
for nmap_host in report.hosts:
for script_result in nmap_host.scripts_results:
script_result = script_result.get('elements', {})
host = host_dict[str(nmap_host.address)]
if 'fqdn' in script_result:
host.hostname.append(script_result['fqdn'])
if 'os' in script_result:
count += 1
host.os = script_result['os']
host_dict[str(nmap_host.address)] = host
for host in hosts:
host.add_tag('nmap_os')
host.save()
print_notification("Done, found the os of {} systems".format(count))
else:
print_notification("No systems found to be checked.")
if __name__ == '__main__':
os_discovery() | random_line_split |
|
nmap.py | #!/usr/bin/env python3
import argparse
import os
import subprocess
import sys
import datetime
from jackal import HostSearch, RangeSearch, Service, ServiceSearch, Logger
from jackal.config import Config
from jackal.utils import print_error, print_notification, print_success
from libnmap.parser import NmapParser, NmapParserException
def all_hosts(*args, **kwargs):
"""
Returns true for all nmap hosts
"""
return True
def import_file():
for arg in sys.argv[1:]:
print_notification("Importing nmap file: {}".format(arg))
try:
with open(arg, 'r') as f:
stats = import_nmap(f.read(), 'nmap_import', check_function=all_hosts, import_services=True)
stats['file'] = arg
Logger().log('import_nmap', 'Imported nmap file', stats=stats)
except NmapParserException:
print_error("File could not be parsed: {}".format(arg))
except FileNotFoundError:
pass
def import_nmap(result, tag, check_function=all_hosts, import_services=False):
"""
Imports the given nmap result.
"""
host_search = HostSearch(arguments=False)
service_search = ServiceSearch()
parser = NmapParser()
report = parser.parse_fromstring(result)
imported_hosts = 0
imported_services = 0
for nmap_host in report.hosts:
if check_function(nmap_host):
imported_hosts += 1
host = host_search.id_to_object(nmap_host.address)
host.status = nmap_host.status
host.add_tag(tag)
if nmap_host.os_fingerprinted:
host.os = nmap_host.os_fingerprint
if nmap_host.hostnames:
host.hostname.extend(nmap_host.hostnames)
if import_services:
for service in nmap_host.services:
imported_services += 1
serv = Service(**service.get_dict())
serv.address = nmap_host.address
service_id = service_search.object_to_id(serv)
if service_id:
# Existing object, save the banner and script results.
serv_old = Service.get(service_id)
if service.banner:
serv_old.banner = service.banner
# TODO implement
# if service.script_results:
# serv_old.script_results.extend(service.script_results)
serv_old.save()
else:
# New object
serv.address = nmap_host.address
serv.save()
if service.state == 'open':
host.open_ports.append(service.port)
if service.state == 'closed':
host.closed_ports.append(service.port)
if service.state == 'filtered':
host.filtered_ports.append(service.port)
host.save()
if imported_hosts:
print_success("Imported {} hosts, with tag {}".format(imported_hosts, tag))
else:
print_error("No hosts found")
return {'hosts': imported_hosts, 'services': imported_services}
def include_hostnames(nmap_host):
"""
Function to filter out hosts with hostnames
"""
if nmap_host.hostnames:
return True
return False
def include_up_hosts(nmap_host):
"""
Includes only hosts that have the status 'up'
"""
if nmap_host.status == 'up':
return True
return False
def nmap(nmap_args, ips):
"""
Start an nmap process with the given args on the given ips.
"""
config = Config()
arguments = ['nmap', '-Pn']
arguments.extend(ips)
arguments.extend(nmap_args)
output_file = ''
now = datetime.datetime.now()
if not '-oA' in nmap_args:
output_name = 'nmap_jackal_{}'.format(now.strftime("%Y-%m-%d %H:%M"))
path_name = os.path.join(config.get('nmap', 'directory'), output_name)
print_notification("Writing output of nmap to {}".format(path_name))
if not os.path.exists(config.get('nmap', 'directory')):
os.makedirs(config.get('nmap', 'directory'))
output_file = path_name + '.xml'
arguments.extend(['-oA', path_name])
else:
output_file = nmap_args[nmap_args.index('-oA') + 1] + '.xml'
print_notification("Starting nmap")
subprocess.call(arguments)
with open(output_file, 'r') as f:
return f.read()
def nmap_discover():
"""
This function retrieves ranges from jackal
Uses two functions of nmap to find hosts:
ping: icmp / arp pinging of targets
lookup: reverse dns lookup
"""
rs = RangeSearch()
rs_parser = rs.argparser
arg = argparse.ArgumentParser(parents=[rs_parser], conflict_handler='resolve')
arg.add_argument('type', metavar='type', \
help='The type of nmap scan to do, choose from ping or lookup', \
type=str, choices=['ping', 'lookup'])
arguments, nmap_args = arg.parse_known_args()
tag = None
if arguments.type == 'ping':
tag = 'nmap_ping'
nmap_args.append('-sn')
nmap_args.append('-n')
check_function = include_up_hosts
elif arguments.type == 'lookup':
tag = 'nmap_lookup'
nmap_args.append('-sL')
check_function = include_hostnames
ranges = rs.get_ranges(tags=['!{}'.format(tag)])
ranges = [r for r in ranges]
ips = []
for r in ranges:
ips.append(r.range)
print_notification("Running nmap with args: {} on {} range(s)".format(nmap_args, len(ips)))
result = nmap(nmap_args, ips)
stats = import_nmap(result, tag, check_function)
stats['scanned_ranges'] = len(ips)
Logger().log('nmap_discover', "Nmap discover with args: {} on {} range(s)".format(nmap_args, len(ips)), stats)
for r in ranges:
r.add_tag(tag)
r.save()
def nmap_scan():
"""
Scans the given hosts with nmap.
"""
# Create the search and config objects
hs = HostSearch()
config = Config()
# Static options to be able to figure out what options to use depending on the input the user gives.
nmap_types = ['top10', 'top100', 'custom', 'top1000', 'all']
options = {'top10':'--top-ports 10', 'top100':'--top-ports 100', 'custom': config.get('nmap', 'options'), 'top1000': '--top-ports 1000', 'all': '-p-'}
# Create an argument parser
hs_parser = hs.argparser
argparser = argparse.ArgumentParser(parents=[hs_parser], conflict_handler='resolve', \
description="Scans hosts from the database using nmap, any arguments that are not in the help are passed to nmap")
argparser.add_argument('type', metavar='type', \
help='The number of ports to scan: top10, top100, custom, top1000 (default) or all', \
type=str, choices=nmap_types, default='top1000', const='top1000', nargs='?')
arguments, extra_nmap_args = argparser.parse_known_args()
# Fix the tags for the search
tags = nmap_types[nmap_types.index(arguments.type):]
tags = ["!nmap_" + tag for tag in tags]
hosts = hs.get_hosts(tags=tags)
hosts = [host for host in hosts]
# Create the nmap arguments
nmap_args = []
nmap_args.extend(extra_nmap_args)
nmap_args.extend(options[arguments.type].split(' '))
# Run nmap
print_notification("Running nmap with args: {} on {} hosts(s)".format(nmap_args, len(hosts)))
if len(hosts):
result = nmap(nmap_args, [str(h.address) for h in hosts])
# Import the nmap result
for host in hosts:
host.add_tag("nmap_{}".format(arguments.type))
host.save()
print_notification("Nmap done, importing results")
stats = import_nmap(result, "nmap_{}".format(arguments.type), check_function=all_hosts, import_services=True)
stats['scanned_hosts'] = len(hosts)
stats['type'] = arguments.type
Logger().log('nmap_scan', "Performed nmap {} scan on {} hosts".format(arguments.type, len(hosts)), stats)
else:
print_notification("No hosts found")
def | ():
"""
Scans available smb services in the database for smb signing and ms17-010.
"""
service_search = ServiceSearch()
services = service_search.get_services(ports=['445'], tags=['!smb_vulnscan'], up=True)
services = [service for service in services]
service_dict = {}
for service in services:
service.add_tag('smb_vulnscan')
service_dict[str(service.address)] = service
nmap_args = "-Pn -n --disable-arp-ping --script smb-security-mode.nse,smb-vuln-ms17-010.nse -p 445".split(" ")
if services:
result = nmap(nmap_args, [str(s.address) for s in services])
parser = NmapParser()
report = parser.parse_fromstring(result)
smb_signing = 0
ms17 = 0
for nmap_host in report.hosts:
for script_result in nmap_host.scripts_results:
script_result = script_result.get('elements', {})
service = service_dict[str(nmap_host.address)]
if script_result.get('message_signing', '') == 'disabled':
print_success("({}) SMB Signing disabled".format(nmap_host.address))
service.add_tag('smb_signing_disabled')
smb_signing += 1
if script_result.get('CVE-2017-0143', {}).get('state', '') == 'VULNERABLE':
print_success("({}) Vulnerable for MS17-010".format(nmap_host.address))
service.add_tag('MS17-010')
ms17 += 1
service.update(tags=service.tags)
print_notification("Completed, 'smb_signing_disabled' tag added to systems with smb signing disabled, 'MS17-010' tag added to systems that did not apply MS17-010.")
stats = {'smb_signing': smb_signing, 'MS17_010': ms17, 'scanned_services': len(services)}
Logger().log('smb_vulnscan', 'Scanned {} smb services for vulnerabilities'.format(len(services)), stats)
else:
print_notification("No services found to scan.")
def os_discovery():
"""
Performs os (and domain) discovery of smb hosts.
"""
hs = HostSearch()
hosts = hs.get_hosts(ports=[445], tags=['!nmap_os'])
# TODO fix filter for emtpy fields.
hosts = [host for host in hosts if not host.os]
host_dict = {}
for host in hosts:
host_dict[str(host.address)] = host
arguments = "--script smb-os-discovery.nse -p 445 -Pn -n --disable-arp-ping".split(' ')
if len(hosts):
count = 0
print_notification("Checking OS of {} systems".format(len(hosts)))
result = nmap(arguments, [str(h.address) for h in hosts])
parser = NmapParser()
report = parser.parse_fromstring(result)
for nmap_host in report.hosts:
for script_result in nmap_host.scripts_results:
script_result = script_result.get('elements', {})
host = host_dict[str(nmap_host.address)]
if 'fqdn' in script_result:
host.hostname.append(script_result['fqdn'])
if 'os' in script_result:
count += 1
host.os = script_result['os']
host_dict[str(nmap_host.address)] = host
for host in hosts:
host.add_tag('nmap_os')
host.save()
print_notification("Done, found the os of {} systems".format(count))
else:
print_notification("No systems found to be checked.")
if __name__ == '__main__':
os_discovery()
| nmap_smb_vulnscan | identifier_name |
nmap.py | #!/usr/bin/env python3
import argparse
import os
import subprocess
import sys
import datetime
from jackal import HostSearch, RangeSearch, Service, ServiceSearch, Logger
from jackal.config import Config
from jackal.utils import print_error, print_notification, print_success
from libnmap.parser import NmapParser, NmapParserException
def all_hosts(*args, **kwargs):
"""
Returns true for all nmap hosts
"""
return True
def import_file():
for arg in sys.argv[1:]:
print_notification("Importing nmap file: {}".format(arg))
try:
with open(arg, 'r') as f:
stats = import_nmap(f.read(), 'nmap_import', check_function=all_hosts, import_services=True)
stats['file'] = arg
Logger().log('import_nmap', 'Imported nmap file', stats=stats)
except NmapParserException:
print_error("File could not be parsed: {}".format(arg))
except FileNotFoundError:
pass
def import_nmap(result, tag, check_function=all_hosts, import_services=False):
"""
Imports the given nmap result.
"""
host_search = HostSearch(arguments=False)
service_search = ServiceSearch()
parser = NmapParser()
report = parser.parse_fromstring(result)
imported_hosts = 0
imported_services = 0
for nmap_host in report.hosts:
if check_function(nmap_host):
imported_hosts += 1
host = host_search.id_to_object(nmap_host.address)
host.status = nmap_host.status
host.add_tag(tag)
if nmap_host.os_fingerprinted:
host.os = nmap_host.os_fingerprint
if nmap_host.hostnames:
host.hostname.extend(nmap_host.hostnames)
if import_services:
for service in nmap_host.services:
imported_services += 1
serv = Service(**service.get_dict())
serv.address = nmap_host.address
service_id = service_search.object_to_id(serv)
if service_id:
# Existing object, save the banner and script results.
serv_old = Service.get(service_id)
if service.banner:
serv_old.banner = service.banner
# TODO implement
# if service.script_results:
# serv_old.script_results.extend(service.script_results)
serv_old.save()
else:
# New object
serv.address = nmap_host.address
serv.save()
if service.state == 'open':
host.open_ports.append(service.port)
if service.state == 'closed':
host.closed_ports.append(service.port)
if service.state == 'filtered':
host.filtered_ports.append(service.port)
host.save()
if imported_hosts:
print_success("Imported {} hosts, with tag {}".format(imported_hosts, tag))
else:
print_error("No hosts found")
return {'hosts': imported_hosts, 'services': imported_services}
def include_hostnames(nmap_host):
"""
Function to filter out hosts with hostnames
"""
if nmap_host.hostnames:
return True
return False
def include_up_hosts(nmap_host):
"""
Includes only hosts that have the status 'up'
"""
if nmap_host.status == 'up':
return True
return False
def nmap(nmap_args, ips):
"""
Start an nmap process with the given args on the given ips.
"""
config = Config()
arguments = ['nmap', '-Pn']
arguments.extend(ips)
arguments.extend(nmap_args)
output_file = ''
now = datetime.datetime.now()
if not '-oA' in nmap_args:
output_name = 'nmap_jackal_{}'.format(now.strftime("%Y-%m-%d %H:%M"))
path_name = os.path.join(config.get('nmap', 'directory'), output_name)
print_notification("Writing output of nmap to {}".format(path_name))
if not os.path.exists(config.get('nmap', 'directory')):
os.makedirs(config.get('nmap', 'directory'))
output_file = path_name + '.xml'
arguments.extend(['-oA', path_name])
else:
output_file = nmap_args[nmap_args.index('-oA') + 1] + '.xml'
print_notification("Starting nmap")
subprocess.call(arguments)
with open(output_file, 'r') as f:
return f.read()
def nmap_discover():
"""
This function retrieves ranges from jackal
Uses two functions of nmap to find hosts:
ping: icmp / arp pinging of targets
lookup: reverse dns lookup
"""
rs = RangeSearch()
rs_parser = rs.argparser
arg = argparse.ArgumentParser(parents=[rs_parser], conflict_handler='resolve')
arg.add_argument('type', metavar='type', \
help='The type of nmap scan to do, choose from ping or lookup', \
type=str, choices=['ping', 'lookup'])
arguments, nmap_args = arg.parse_known_args()
tag = None
if arguments.type == 'ping':
tag = 'nmap_ping'
nmap_args.append('-sn')
nmap_args.append('-n')
check_function = include_up_hosts
elif arguments.type == 'lookup':
tag = 'nmap_lookup'
nmap_args.append('-sL')
check_function = include_hostnames
ranges = rs.get_ranges(tags=['!{}'.format(tag)])
ranges = [r for r in ranges]
ips = []
for r in ranges:
ips.append(r.range)
print_notification("Running nmap with args: {} on {} range(s)".format(nmap_args, len(ips)))
result = nmap(nmap_args, ips)
stats = import_nmap(result, tag, check_function)
stats['scanned_ranges'] = len(ips)
Logger().log('nmap_discover', "Nmap discover with args: {} on {} range(s)".format(nmap_args, len(ips)), stats)
for r in ranges:
r.add_tag(tag)
r.save()
def nmap_scan():
"""
Scans the given hosts with nmap.
"""
# Create the search and config objects
hs = HostSearch()
config = Config()
# Static options to be able to figure out what options to use depending on the input the user gives.
nmap_types = ['top10', 'top100', 'custom', 'top1000', 'all']
options = {'top10':'--top-ports 10', 'top100':'--top-ports 100', 'custom': config.get('nmap', 'options'), 'top1000': '--top-ports 1000', 'all': '-p-'}
# Create an argument parser
hs_parser = hs.argparser
argparser = argparse.ArgumentParser(parents=[hs_parser], conflict_handler='resolve', \
description="Scans hosts from the database using nmap, any arguments that are not in the help are passed to nmap")
argparser.add_argument('type', metavar='type', \
help='The number of ports to scan: top10, top100, custom, top1000 (default) or all', \
type=str, choices=nmap_types, default='top1000', const='top1000', nargs='?')
arguments, extra_nmap_args = argparser.parse_known_args()
# Fix the tags for the search
tags = nmap_types[nmap_types.index(arguments.type):]
tags = ["!nmap_" + tag for tag in tags]
hosts = hs.get_hosts(tags=tags)
hosts = [host for host in hosts]
# Create the nmap arguments
nmap_args = []
nmap_args.extend(extra_nmap_args)
nmap_args.extend(options[arguments.type].split(' '))
# Run nmap
print_notification("Running nmap with args: {} on {} hosts(s)".format(nmap_args, len(hosts)))
if len(hosts):
result = nmap(nmap_args, [str(h.address) for h in hosts])
# Import the nmap result
for host in hosts:
host.add_tag("nmap_{}".format(arguments.type))
host.save()
print_notification("Nmap done, importing results")
stats = import_nmap(result, "nmap_{}".format(arguments.type), check_function=all_hosts, import_services=True)
stats['scanned_hosts'] = len(hosts)
stats['type'] = arguments.type
Logger().log('nmap_scan', "Performed nmap {} scan on {} hosts".format(arguments.type, len(hosts)), stats)
else:
print_notification("No hosts found")
def nmap_smb_vulnscan():
"""
Scans available smb services in the database for smb signing and ms17-010.
"""
service_search = ServiceSearch()
services = service_search.get_services(ports=['445'], tags=['!smb_vulnscan'], up=True)
services = [service for service in services]
service_dict = {}
for service in services:
service.add_tag('smb_vulnscan')
service_dict[str(service.address)] = service
nmap_args = "-Pn -n --disable-arp-ping --script smb-security-mode.nse,smb-vuln-ms17-010.nse -p 445".split(" ")
if services:
result = nmap(nmap_args, [str(s.address) for s in services])
parser = NmapParser()
report = parser.parse_fromstring(result)
smb_signing = 0
ms17 = 0
for nmap_host in report.hosts:
for script_result in nmap_host.scripts_results:
script_result = script_result.get('elements', {})
service = service_dict[str(nmap_host.address)]
if script_result.get('message_signing', '') == 'disabled':
print_success("({}) SMB Signing disabled".format(nmap_host.address))
service.add_tag('smb_signing_disabled')
smb_signing += 1
if script_result.get('CVE-2017-0143', {}).get('state', '') == 'VULNERABLE':
print_success("({}) Vulnerable for MS17-010".format(nmap_host.address))
service.add_tag('MS17-010')
ms17 += 1
service.update(tags=service.tags)
print_notification("Completed, 'smb_signing_disabled' tag added to systems with smb signing disabled, 'MS17-010' tag added to systems that did not apply MS17-010.")
stats = {'smb_signing': smb_signing, 'MS17_010': ms17, 'scanned_services': len(services)}
Logger().log('smb_vulnscan', 'Scanned {} smb services for vulnerabilities'.format(len(services)), stats)
else:
print_notification("No services found to scan.")
def os_discovery():
"""
Performs os (and domain) discovery of smb hosts.
"""
hs = HostSearch()
hosts = hs.get_hosts(ports=[445], tags=['!nmap_os'])
# TODO fix filter for emtpy fields.
hosts = [host for host in hosts if not host.os]
host_dict = {}
for host in hosts:
host_dict[str(host.address)] = host
arguments = "--script smb-os-discovery.nse -p 445 -Pn -n --disable-arp-ping".split(' ')
if len(hosts):
count = 0
print_notification("Checking OS of {} systems".format(len(hosts)))
result = nmap(arguments, [str(h.address) for h in hosts])
parser = NmapParser()
report = parser.parse_fromstring(result)
for nmap_host in report.hosts:
|
for host in hosts:
host.add_tag('nmap_os')
host.save()
print_notification("Done, found the os of {} systems".format(count))
else:
print_notification("No systems found to be checked.")
if __name__ == '__main__':
os_discovery()
| for script_result in nmap_host.scripts_results:
script_result = script_result.get('elements', {})
host = host_dict[str(nmap_host.address)]
if 'fqdn' in script_result:
host.hostname.append(script_result['fqdn'])
if 'os' in script_result:
count += 1
host.os = script_result['os']
host_dict[str(nmap_host.address)] = host | conditional_block |
nmap.py | #!/usr/bin/env python3
import argparse
import os
import subprocess
import sys
import datetime
from jackal import HostSearch, RangeSearch, Service, ServiceSearch, Logger
from jackal.config import Config
from jackal.utils import print_error, print_notification, print_success
from libnmap.parser import NmapParser, NmapParserException
def all_hosts(*args, **kwargs):
"""
Returns true for all nmap hosts
"""
return True
def import_file():
for arg in sys.argv[1:]:
print_notification("Importing nmap file: {}".format(arg))
try:
with open(arg, 'r') as f:
stats = import_nmap(f.read(), 'nmap_import', check_function=all_hosts, import_services=True)
stats['file'] = arg
Logger().log('import_nmap', 'Imported nmap file', stats=stats)
except NmapParserException:
print_error("File could not be parsed: {}".format(arg))
except FileNotFoundError:
pass
def import_nmap(result, tag, check_function=all_hosts, import_services=False):
"""
Imports the given nmap result.
"""
host_search = HostSearch(arguments=False)
service_search = ServiceSearch()
parser = NmapParser()
report = parser.parse_fromstring(result)
imported_hosts = 0
imported_services = 0
for nmap_host in report.hosts:
if check_function(nmap_host):
imported_hosts += 1
host = host_search.id_to_object(nmap_host.address)
host.status = nmap_host.status
host.add_tag(tag)
if nmap_host.os_fingerprinted:
host.os = nmap_host.os_fingerprint
if nmap_host.hostnames:
host.hostname.extend(nmap_host.hostnames)
if import_services:
for service in nmap_host.services:
imported_services += 1
serv = Service(**service.get_dict())
serv.address = nmap_host.address
service_id = service_search.object_to_id(serv)
if service_id:
# Existing object, save the banner and script results.
serv_old = Service.get(service_id)
if service.banner:
serv_old.banner = service.banner
# TODO implement
# if service.script_results:
# serv_old.script_results.extend(service.script_results)
serv_old.save()
else:
# New object
serv.address = nmap_host.address
serv.save()
if service.state == 'open':
host.open_ports.append(service.port)
if service.state == 'closed':
host.closed_ports.append(service.port)
if service.state == 'filtered':
host.filtered_ports.append(service.port)
host.save()
if imported_hosts:
print_success("Imported {} hosts, with tag {}".format(imported_hosts, tag))
else:
print_error("No hosts found")
return {'hosts': imported_hosts, 'services': imported_services}
def include_hostnames(nmap_host):
|
def include_up_hosts(nmap_host):
"""
Includes only hosts that have the status 'up'
"""
if nmap_host.status == 'up':
return True
return False
def nmap(nmap_args, ips):
"""
Start an nmap process with the given args on the given ips.
"""
config = Config()
arguments = ['nmap', '-Pn']
arguments.extend(ips)
arguments.extend(nmap_args)
output_file = ''
now = datetime.datetime.now()
if not '-oA' in nmap_args:
output_name = 'nmap_jackal_{}'.format(now.strftime("%Y-%m-%d %H:%M"))
path_name = os.path.join(config.get('nmap', 'directory'), output_name)
print_notification("Writing output of nmap to {}".format(path_name))
if not os.path.exists(config.get('nmap', 'directory')):
os.makedirs(config.get('nmap', 'directory'))
output_file = path_name + '.xml'
arguments.extend(['-oA', path_name])
else:
output_file = nmap_args[nmap_args.index('-oA') + 1] + '.xml'
print_notification("Starting nmap")
subprocess.call(arguments)
with open(output_file, 'r') as f:
return f.read()
def nmap_discover():
"""
This function retrieves ranges from jackal
Uses two functions of nmap to find hosts:
ping: icmp / arp pinging of targets
lookup: reverse dns lookup
"""
rs = RangeSearch()
rs_parser = rs.argparser
arg = argparse.ArgumentParser(parents=[rs_parser], conflict_handler='resolve')
arg.add_argument('type', metavar='type', \
help='The type of nmap scan to do, choose from ping or lookup', \
type=str, choices=['ping', 'lookup'])
arguments, nmap_args = arg.parse_known_args()
tag = None
if arguments.type == 'ping':
tag = 'nmap_ping'
nmap_args.append('-sn')
nmap_args.append('-n')
check_function = include_up_hosts
elif arguments.type == 'lookup':
tag = 'nmap_lookup'
nmap_args.append('-sL')
check_function = include_hostnames
ranges = rs.get_ranges(tags=['!{}'.format(tag)])
ranges = [r for r in ranges]
ips = []
for r in ranges:
ips.append(r.range)
print_notification("Running nmap with args: {} on {} range(s)".format(nmap_args, len(ips)))
result = nmap(nmap_args, ips)
stats = import_nmap(result, tag, check_function)
stats['scanned_ranges'] = len(ips)
Logger().log('nmap_discover', "Nmap discover with args: {} on {} range(s)".format(nmap_args, len(ips)), stats)
for r in ranges:
r.add_tag(tag)
r.save()
def nmap_scan():
"""
Scans the given hosts with nmap.
"""
# Create the search and config objects
hs = HostSearch()
config = Config()
# Static options to be able to figure out what options to use depending on the input the user gives.
nmap_types = ['top10', 'top100', 'custom', 'top1000', 'all']
options = {'top10':'--top-ports 10', 'top100':'--top-ports 100', 'custom': config.get('nmap', 'options'), 'top1000': '--top-ports 1000', 'all': '-p-'}
# Create an argument parser
hs_parser = hs.argparser
argparser = argparse.ArgumentParser(parents=[hs_parser], conflict_handler='resolve', \
description="Scans hosts from the database using nmap, any arguments that are not in the help are passed to nmap")
argparser.add_argument('type', metavar='type', \
help='The number of ports to scan: top10, top100, custom, top1000 (default) or all', \
type=str, choices=nmap_types, default='top1000', const='top1000', nargs='?')
arguments, extra_nmap_args = argparser.parse_known_args()
# Fix the tags for the search
tags = nmap_types[nmap_types.index(arguments.type):]
tags = ["!nmap_" + tag for tag in tags]
hosts = hs.get_hosts(tags=tags)
hosts = [host for host in hosts]
# Create the nmap arguments
nmap_args = []
nmap_args.extend(extra_nmap_args)
nmap_args.extend(options[arguments.type].split(' '))
# Run nmap
print_notification("Running nmap with args: {} on {} hosts(s)".format(nmap_args, len(hosts)))
if len(hosts):
result = nmap(nmap_args, [str(h.address) for h in hosts])
# Import the nmap result
for host in hosts:
host.add_tag("nmap_{}".format(arguments.type))
host.save()
print_notification("Nmap done, importing results")
stats = import_nmap(result, "nmap_{}".format(arguments.type), check_function=all_hosts, import_services=True)
stats['scanned_hosts'] = len(hosts)
stats['type'] = arguments.type
Logger().log('nmap_scan', "Performed nmap {} scan on {} hosts".format(arguments.type, len(hosts)), stats)
else:
print_notification("No hosts found")
def nmap_smb_vulnscan():
"""
Scans available smb services in the database for smb signing and ms17-010.
"""
service_search = ServiceSearch()
services = service_search.get_services(ports=['445'], tags=['!smb_vulnscan'], up=True)
services = [service for service in services]
service_dict = {}
for service in services:
service.add_tag('smb_vulnscan')
service_dict[str(service.address)] = service
nmap_args = "-Pn -n --disable-arp-ping --script smb-security-mode.nse,smb-vuln-ms17-010.nse -p 445".split(" ")
if services:
result = nmap(nmap_args, [str(s.address) for s in services])
parser = NmapParser()
report = parser.parse_fromstring(result)
smb_signing = 0
ms17 = 0
for nmap_host in report.hosts:
for script_result in nmap_host.scripts_results:
script_result = script_result.get('elements', {})
service = service_dict[str(nmap_host.address)]
if script_result.get('message_signing', '') == 'disabled':
print_success("({}) SMB Signing disabled".format(nmap_host.address))
service.add_tag('smb_signing_disabled')
smb_signing += 1
if script_result.get('CVE-2017-0143', {}).get('state', '') == 'VULNERABLE':
print_success("({}) Vulnerable for MS17-010".format(nmap_host.address))
service.add_tag('MS17-010')
ms17 += 1
service.update(tags=service.tags)
print_notification("Completed, 'smb_signing_disabled' tag added to systems with smb signing disabled, 'MS17-010' tag added to systems that did not apply MS17-010.")
stats = {'smb_signing': smb_signing, 'MS17_010': ms17, 'scanned_services': len(services)}
Logger().log('smb_vulnscan', 'Scanned {} smb services for vulnerabilities'.format(len(services)), stats)
else:
print_notification("No services found to scan.")
def os_discovery():
"""
Performs os (and domain) discovery of smb hosts.
"""
hs = HostSearch()
hosts = hs.get_hosts(ports=[445], tags=['!nmap_os'])
# TODO fix filter for emtpy fields.
hosts = [host for host in hosts if not host.os]
host_dict = {}
for host in hosts:
host_dict[str(host.address)] = host
arguments = "--script smb-os-discovery.nse -p 445 -Pn -n --disable-arp-ping".split(' ')
if len(hosts):
count = 0
print_notification("Checking OS of {} systems".format(len(hosts)))
result = nmap(arguments, [str(h.address) for h in hosts])
parser = NmapParser()
report = parser.parse_fromstring(result)
for nmap_host in report.hosts:
for script_result in nmap_host.scripts_results:
script_result = script_result.get('elements', {})
host = host_dict[str(nmap_host.address)]
if 'fqdn' in script_result:
host.hostname.append(script_result['fqdn'])
if 'os' in script_result:
count += 1
host.os = script_result['os']
host_dict[str(nmap_host.address)] = host
for host in hosts:
host.add_tag('nmap_os')
host.save()
print_notification("Done, found the os of {} systems".format(count))
else:
print_notification("No systems found to be checked.")
if __name__ == '__main__':
os_discovery()
| """
Function to filter out hosts with hostnames
"""
if nmap_host.hostnames:
return True
return False | identifier_body |
tool.py | # USEFUL FUNC.(TOOL) IN IMSNG MODULE
# 2019.03.03 CREATED BY Gregory S.H. Paek
# 2019.08.29 UPDATED BY Gregory S.H. Paek
#============================================================
def timename():
'''
CONVERT 'TIME' TO YYMMDD, HHMMSS FORM.
INPUT : NONE
OUTPUT : STRIG FORM OF 'YYMMDD', 'HHMMSS'
'''
import numpy as np
import time
now = time.gmtime(time.time())
y, m, d = now.tm_year, now.tm_mon, now.tm_mday
ho, mi, se = now.tm_hour, now.tm_min, now.tm_sec
yy = str(y)[2:]
if len(str(m)) < 2:
mm = '0'+str(m)
else:
mm = str(m)
if len(str(d)) < 2:
dd = '0'+str(d)
else:
dd = str(d)
if len(str(ho)) < 2:
hour = '0'+str(ho)
else:
hour = str(ho)
if len(str(mi)) < 2:
mini = '0'+str(mi)
else:
mini = str(mi)
if len(str(se)) < 2:
|
else:
sec = str(se)
yymmdd = yy+mm+dd
hhmmss = hour+mini+sec
return yymmdd, hhmmss
#------------------------------------------------------------
def detection(name, ra, dec, time, location):
import numpy as np
import os, glob, sys
from astropy import units as u
from astropy.time import Time
from astropy.coordinates import SkyCoord, EarthLocation, AltAz
from astropy.coordinates import get_sun, get_moon
from astropy.io import ascii
from astropy.table import Table, Column
target = SkyCoord(ra, dec, unit='deg') # defaults to ICRS frame
site = location
del_midnight= np.linspace(-12, +12, 720) * u.hour
time_night = time+del_midnight
frame_night = AltAz(obstime=time_night, location=site)
targetaltaz_night = target.transform_to(frame_night)
sunaltaz_night = get_sun(time_night).transform_to(frame_night)
# indx_set = np.where( sunaltaz_night.alt > -18 * u.deg )
indx_rise = np.where( sunaltaz_night.alt < -18 * u.deg )
sunset = del_midnight[np.min(indx_rise)]
sunrise = del_midnight[np.max(indx_rise)]
del_midnight= np.linspace(sunset.value, sunrise.value, 100) * u.hour
time_night = time+del_midnight
frame_night = AltAz(obstime=time_night, location=site)
targetaltaz_night = target.transform_to(frame_night)
return targetaltaz_night
#------------------------------------------------------------
def sendmail(filename, subject, sendID, sendPW, reciver):
'''
Security reference
https://cpuu.postype.com/post/23066
Code reference
https://kimdoky.github.io/python/2017/07/21/smtplib_email.html
File attach
https://brunch.co.kr/@jk-lab/31
'''
import smtplib
from email.mime.text import MIMEText
import codecs
email_text = codecs.open(filename, 'rb', 'utf-8')
msg = MIMEText(email_text.read())
email_text.close()
msg['Subject'] = subject
msg['From'] = sendID
smtp_gmail = smtplib.SMTP_SSL('smtp.gmail.com', 465)
smtp_gmail.login(sendID, sendPW)
smtp_gmail.sendmail(sendID, reciver, msg.as_string())
smtp_gmail.quit()
comment = 'Send '+filename+'\n'+'From '+sendID+' To '+reciver; print(comment)
#------------------------------------------------------------
def send_gmail(subject, contents, fromID, fromPW, toIDs, ccIDs=None, path=None):
'''
SEND GMAIL
Security reference
https://cpuu.postype.com/post/23066
Code reference
https://kimdoky.github.io/python/2017/07/21/smtplib_email.html
File attach
https://brunch.co.kr/@jk-lab/31
'''
import os
import smtplib
from email.mime.base import MIMEBase
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.header import Header
#msg = MIMEBase('mixed')
#msg = MIMEText(contents, 'plain', 'utf-8')
msg = MIMEMultipart()
msg['Subject'] = Header(s=subject, charset="utf-8")
msg['From'] = fromID
msg['To'] = toIDs
if ccIDs != None:
msg['Cc'] = ccIDs
msg.attach(MIMEText(contents, 'plain', 'utf-8'))
# ATTACH TEXT FILE ON MAIL
if path != None:
if type(path) != list:
filelist = []
filelist.append(path)
else:
filelist = path
for file in filelist:
part = MIMEBase("application", "octet-stream")
part.set_payload(open(file, 'rb').read())
part.add_header( 'Content-Disposition',
'attachment; filename="%s"'% os.path.basename(file))
msg.attach(part)
# ACCESS TO GMAIL & SEND MAIL
smtp_gmail = smtplib.SMTP_SSL('smtp.gmail.com', 465)
smtp_gmail.login(fromID, fromPW)
smtp_gmail.sendmail(msg["From"], msg["To"].split(",") + msg["Cc"].split(","), msg.as_string())
smtp_gmail.quit()
comment = 'Send '+str(path)+'\nFrom\t'+fromID+'\nTo'; print(comment); print(toIDs)
#------------------------------------------------------------
def abs2app(mag, magerr, gwdist, gwdiststd):
import numpy as np
app = 5*np.log10(gwdist)-5+mag
apperr = 5*gwdiststd/(gwdist*np.log(10))
return app, apperr
#------------------------------------------------------------
def GW170817_like(gwdist, gwdiststd):
import numpy as np
m0 = 17.476 # [AB] in i-band (t0+10h)
m0err = 0.018
dist0 = 38.4 # [MPC] Im et al. 2017
dist0err= 8.9
m = m0+5.*np.log10(gwdist/dist0)
merr = np.sqrt( (m0err)**2 + ((5.*gwdiststd)/(gwdist*np.log(10)))**2 + ((5.*dist0err)/(dist0*np.log(10)))**2 )
return m, merr
#------------------------------------------------------------
def func_linear(a, x, scaling=[0, 0]):
xpt, ypt= scaling[0], scaling[1]
ydel = ypt - (-1*a*xpt)
return -1*a*x + ydel
#------------------------------------------------------------
def calc_app(mag, magerr, gwdist0, gwdiststd0, gwdist1, gwdiststd1):
import numpy as np
app = mag+5*np.log10(gwdist1/gwdist0)
apperr = np.sqrt( (magerr)**2 + ((5*gwdiststd1)/(np.log(5)*gwdist1))**2 + ((5*gwdiststd0)/(np.log(5)*gwdist0))**2 )
return app, apperr
#------------------------------------------------------------
def ds9regmaker(filename, name, ra, dec):
import os,sys
import string
from astropy.io import ascii
import numpy as np
import math
'''
racol = 'ALPHA_J2000'
deccol = 'DELTA_J2000'
name = 'NUMBER'
intbl = ascii.read(filename)
'''
radius = """ 5" """
color = "green"
f = open(filename, 'w')
head1 = "# Region file format: DS9 version 4.1\n"
head2 = """global color=green dashlist=8 3 width=1 font="helvetica 10 normal roman" select=1 highlite=1 dash=0 fixed=0 edit=1 move=1 delete=1 include=1 source=1\n"""
head3 = "fk5\n"
f.write(head1)
f.write(head2)
f.write(head3)
for n in range(len(ra)):
body="circle("+str(ra[n])+","+str(dec[n])+","+radius+") # color="+color+" text={"+str(name[n])+"}\n"
f.write(body)
f.close()
#------------------------------------------------------------
def rtsmaker(observatory, headname, save_path, obspath, catpath, start, end, altlimit=30., moonseperation=40., sunlimit='-18', numlimit=100):
import pytz
import jdcal
import ephem
#from numpy import *
import numpy as np
import os, sys
import string
import datetime
import astropy.units as u
from astropy.io import ascii
import mskpy.observing as obs
import astropy.coordinates as coord
from astropy import units as u
from astropy.coordinates import SkyCoord
#------------------------------------------------------------#
# INPUT SAMPLE
#------------------------------------------------------------#
'''
observatory = 'SAO'
save_path = './'
obspath = "/home/gw/Research/observatory.txt"
catpath = 'MS181101ab_Preliminary-all_candidates.txt'
start = '2019/04/17'
end = '2019/04/19'
#altitute limit and moon seperation, moon serperation is a little bit close (2~3 deg)
numlimit = 100
altlimit = 30.
moonseperation = 40.
sunlimit = '-18'
'''
#------------------------------------------------------------#
# OBSERVATORY INFO.
#------------------------------------------------------------#
obsinfo = ascii.read(obspath)
obsname = np.copy(obsinfo['name'])
obsindex = np.where(obsname == observatory)[0]
obslat = (np.copy(obsinfo['latitude(N+)'])[obsindex])[0]
obslon = (np.copy(obsinfo['longitude(E+)'])[obsindex])[0]
obsalt = (np.copy(obsinfo['altitude'])[obsindex])[0]
obstz = (np.copy(obsinfo['timezone'])[obsindex])[0]
tz = pytz.timezone(obstz)
#------------------------------------------------------------#
observ = ephem.Observer()
observ.lat = str(obslat)
observ.lon = str(obslon)
observ.elevation= obsalt
observ.horizon = sunlimit
#------------------------------------------------------------#
#objects from catalog file
tdata = ascii.read(catpath)
objname = tdata['name']
ra = tdata['ra']
dec = tdata['dec']
prior = tdata['sort']
rank = tdata['rank']
dist = tdata['dist']
RA = coord.Angle(ra, unit = u.deg)
Dec = coord.Angle(dec, unit = u.deg)
radd = RA.value
rad = RA.hour
decd = Dec.value
decdd = Dec.degree
#angular distance calculation
def angsep(ra1deg, dec1deg, ra2deg, dec2deg) :
ra1rad = ra1deg*np.pi/180
dec1rad = dec1deg*np.pi/180
ra2rad = ra2deg*np.pi/180
dec2rad = dec2deg*np.pi/180
cos_a = np.sin(dec1rad)*np.sin(dec2rad)+(np.cos(dec1rad)*np.cos(dec2rad)*np.cos(ra1rad-ra2rad))
anglesep = np.arccos(cos_a)*180/np.pi
return anglesep
#dates to calculate
fmt = '%Y/%m/%d'
startdt = datetime.datetime.strptime(start, fmt)
enddt = datetime.datetime.strptime(end, fmt)
startmjd = (jdcal.gcal2jd(startdt.year, startdt.month, startdt.day))[1]
endmjd = (jdcal.gcal2jd(enddt.year, enddt.month, enddt.day))[1]
for i in range(int(endmjd-startmjd+1)):
onedaymjd = startmjd+i+1
oneday = jdcal.jd2gcal(2400000.5, onedaymjd)
onedaydt = datetime.datetime(oneday[0], oneday[1], oneday[2])
dst = tz.dst(onedaydt, is_dst=True)
dst = dst.seconds/3600
onedaydt = datetime.datetime(oneday[0], oneday[1], oneday[2], tzinfo=tz)
onedayutc = onedaydt.astimezone(pytz.utc)
observ.date = onedayutc
# Moon distance and information
mcoord = ephem.Moon()
mcoord.compute(observ)
minfo = 'Moon ra, dec : '+str(mcoord.ra)+' '+str(mcoord.dec)+'\n'
mphase = ephem.Moon(observ.date)
mphasestr = 'Moon phase : '+ "%.2f" % mphase.moon_phase +'\n'
msep = angsep(radd, decdd, np.degrees(mcoord.ra), np.degrees(mcoord.dec))
# SUNSET CALC.
sunset = observ.previous_setting(ephem.Sun())
sunsettu = ephem.Date.tuple(sunset)
sunsetdt = datetime.datetime(sunsettu[0],sunsettu[1],sunsettu[2],sunsettu[3],int(sunsettu[4]),tzinfo=pytz.utc)
sunsetlocal = sunsetdt.astimezone(tz)
sunsetstr = sunlimit+' deg sunset : '+str(sunsetlocal.hour)+':'+str(sunsetlocal.minute)+'\n'
sunsethour = sunsetlocal.hour+sunsetlocal.minute/60.+sunsetlocal.second/3600.
# SUNRISE CALC.
sunrise = observ.next_rising(ephem.Sun())
sunrisetu = ephem.Date.tuple(sunrise)
sunrisedt = datetime.datetime(sunrisetu[0],sunrisetu[1],sunrisetu[2],sunrisetu[3],int(sunrisetu[4]),tzinfo=pytz.utc)
sunriselocal = sunrisedt.astimezone(tz)
sunrisestr = sunlimit+' deg sunrise : '+str(sunriselocal.hour)+':'+str(sunriselocal.minute)+'\n'
sunrisehour = sunriselocal.hour+sunriselocal.minute/60.+sunriselocal.second/3600.
#print (observatory)
#print ('Local mid night in UTC : '+str(observ.date))
#print (minfo,mphasestr,sunsetstr,sunrisestr)
# MAKE RESULT FILE
stryear = str(oneday[0])
strmonth = str(oneday[1])
strday = str(oneday[2]-1)
if int(strmonth) < 10 : strmonth = '0'+strmonth
if int(strday) < 10 : strday = '0'+strday
f = open(save_path+'/'+headname+'-'+stryear+strmonth+strday+"-rts_vis-"+observatory+".txt",'w')
f.write('#\t'+str(observ.date)+' UTC & Day Time Saving +'+str(dst)+'\n')
f.write('#\tObservatory\t= '+observatory+'\n')
f.write('#\t'+sunsetstr)
f.write('#\t'+sunrisestr)
f.write('#\t'+minfo)
f.write('#\t'+mphasestr)
f.write('#\tMoon seperation = '+str(moonseperation)+'\n')
f.write('#\tAltitude limit = '+str(altlimit)+'\n')
f.write('#\tRank : the lower rank, the higher priority\n')
f.write('#------------------------------------------------------- \n')
f.write('name ra dec rise(LT) transit(LT) set(LT) moon_dist(deg) distance(Mpc) rank\n')
numcount = 0
for n in range(len(rad)):
#calculate rise transit set time with altitute limit
param_rts = dict( ra=radd[n],
dec=decdd[n],
date=onedaydt,
lon=obslon,
lat=obslat,
tz=obstz,
limit=altlimit,
precision=1440)
rtscal = obs.rts(**param_rts)
rt = rtscal[0]
tt = rtscal[1]
st = rtscal[2]
if rtscal[0]== None:
#print (objname[n],ra[n],dec[n], rtscal[0], rtscal[1], rtscal[2],"%.2f" % msep[n])
pass
elif sunrisehour < rtscal[0] < sunsethour and sunrisehour < rtscal[2] < sunsethour and sunrisehour < rtscal[1] < sunsethour:
#print (objname[n]+' It can be seen in daytime!')
pass
elif msep[n] < moonseperation or msep[n] > 360-moonseperation:
#print (objname[n]+' too close to Moon < '+str(moonseperation)+' deg')
pass
else:
if numcount < numlimit:
c= SkyCoord(ra=ra[n]*u.degree, dec=dec[n]*u.degree, frame='icrs')
c_ra= c.ra.hms
c_dec= c.dec.dms
nra='%02d:%02d:%.3f' %(c_ra[0], abs(c_ra[1]), abs(c_ra[2]))
ndec='%02d:%02d:%.3f' %(c_dec[0], abs(c_dec[1]), abs(c_dec[2]))
rtp ="%.2d" % int(rt)+':'+"%.2d" % int((rt-int(rt))*60)
ttp ="%.2d" % int(tt)+':'+"%.2d" % int((tt-int(tt))*60)
stp ="%.2d" % int(st)+':'+"%.2d" % int((st-int(st))*60)
vis ='{:8s} {:12s} {:12s} {:5s} {:5s} {:5s} {:3s} {:4s} {:4s}'.format(objname[n],str(nra),str(ndec),rtp,ttp,stp,str(int(msep[n])),str(int(dist[n])),str(rank[n]))+'\n'
f.write(vis)
#print (objname[n],ra[n],dec[n], rtp,ttp,stp,"%.2f" % msep[n])
numcount+= 1
else:
pass
'''
if numcount < numlimit:
rtp="%.2d" % int(rt)+':'+"%.2d" % int((rt-int(rt))*60)
ttp="%.2d" % int(tt)+':'+"%.2d" % int((tt-int(tt))*60)
stp="%.2d" % int(st)+':'+"%.2d" % int((st-int(st))*60)
vis='{:24s} {:12s} {:12s} {:5s} {:5s} {:5s} {:3s} {:2s}'.format(objname[n],str(ra[n]),str(dec[n]),rtp,ttp,stp,str(int(msep[n])),str(prior[n]))+'\n'
f.write(vis)
#print (objname[n],ra[n],dec[n], rtp,ttp,stp,"%.2f" % msep[n])
numcount+= 1
else:
pass
'''
f.close()
| sec = '0'+str(se) | conditional_block |
tool.py | # USEFUL FUNC.(TOOL) IN IMSNG MODULE
# 2019.03.03 CREATED BY Gregory S.H. Paek
# 2019.08.29 UPDATED BY Gregory S.H. Paek
#============================================================
def timename():
'''
CONVERT 'TIME' TO YYMMDD, HHMMSS FORM.
INPUT : NONE
OUTPUT : STRIG FORM OF 'YYMMDD', 'HHMMSS'
'''
import numpy as np
import time
now = time.gmtime(time.time())
y, m, d = now.tm_year, now.tm_mon, now.tm_mday
ho, mi, se = now.tm_hour, now.tm_min, now.tm_sec
yy = str(y)[2:]
if len(str(m)) < 2:
mm = '0'+str(m)
else:
mm = str(m)
if len(str(d)) < 2:
dd = '0'+str(d)
else:
dd = str(d)
if len(str(ho)) < 2:
hour = '0'+str(ho)
else:
hour = str(ho)
if len(str(mi)) < 2:
mini = '0'+str(mi)
else:
mini = str(mi)
if len(str(se)) < 2:
sec = '0'+str(se)
else:
sec = str(se)
yymmdd = yy+mm+dd
hhmmss = hour+mini+sec
return yymmdd, hhmmss
#------------------------------------------------------------
def detection(name, ra, dec, time, location):
import numpy as np
import os, glob, sys
from astropy import units as u
from astropy.time import Time
from astropy.coordinates import SkyCoord, EarthLocation, AltAz
from astropy.coordinates import get_sun, get_moon
from astropy.io import ascii
from astropy.table import Table, Column
target = SkyCoord(ra, dec, unit='deg') # defaults to ICRS frame
site = location
del_midnight= np.linspace(-12, +12, 720) * u.hour
time_night = time+del_midnight
frame_night = AltAz(obstime=time_night, location=site)
targetaltaz_night = target.transform_to(frame_night)
sunaltaz_night = get_sun(time_night).transform_to(frame_night)
# indx_set = np.where( sunaltaz_night.alt > -18 * u.deg )
indx_rise = np.where( sunaltaz_night.alt < -18 * u.deg )
sunset = del_midnight[np.min(indx_rise)]
sunrise = del_midnight[np.max(indx_rise)]
del_midnight= np.linspace(sunset.value, sunrise.value, 100) * u.hour
time_night = time+del_midnight
frame_night = AltAz(obstime=time_night, location=site)
targetaltaz_night = target.transform_to(frame_night)
return targetaltaz_night | ERROR: type should be large_string, got "\thttps://cpuu.postype.com/post/23066\n\tCode reference\n\thttps://kimdoky.github.io/python/2017/07/21/smtplib_email.html\n\tFile attach\n\thttps://brunch.co.kr/@jk-lab/31\n\t'''\n\timport smtplib\n\tfrom email.mime.text import MIMEText\n\timport codecs\n\temail_text = codecs.open(filename, 'rb', 'utf-8')\n\tmsg = MIMEText(email_text.read())\n\temail_text.close()\n\tmsg['Subject']\t= subject\n\tmsg['From']\t\t= sendID\n\tsmtp_gmail\t\t= smtplib.SMTP_SSL('smtp.gmail.com', 465)\n\tsmtp_gmail.login(sendID, sendPW)\n\tsmtp_gmail.sendmail(sendID, reciver, msg.as_string())\n\tsmtp_gmail.quit()\n\tcomment\t= 'Send '+filename+'\\n'+'From '+sendID+' To '+reciver; print(comment)\n#------------------------------------------------------------\ndef send_gmail(subject, contents, fromID, fromPW, toIDs, ccIDs=None, path=None):\n\t'''\n\tSEND GMAIL\n\tSecurity reference\n\thttps://cpuu.postype.com/post/23066\n\tCode reference\n\thttps://kimdoky.github.io/python/2017/07/21/smtplib_email.html\n\tFile attach\n\thttps://brunch.co.kr/@jk-lab/31\n\t'''\n\timport os\n\timport smtplib\n\tfrom email.mime.base import MIMEBase\n\tfrom email.mime.text import MIMEText\n\tfrom email.mime.multipart import MIMEMultipart\n\tfrom email.header import Header \n\t#msg\t\t= MIMEBase('mixed')\n\t#msg\t\t= MIMEText(contents, 'plain', 'utf-8')\n\tmsg\t\t= MIMEMultipart()\n\tmsg['Subject']\t= Header(s=subject, charset=\"utf-8\")\n\tmsg['From']\t\t= fromID\n\tmsg['To']\t\t= toIDs\n\tif ccIDs != None:\n\t\tmsg['Cc']\t\t= ccIDs\n\tmsg.attach(MIMEText(contents, 'plain', 'utf-8'))\n\t#\tATTACH TEXT FILE ON MAIL\n\tif path != None:\n\t\tif type(path) != list:\n\t\t\tfilelist\t= []\n\t\t\tfilelist.append(path)\n\t\telse:\n\t\t\tfilelist\t= path\n\n\t\tfor file in filelist:\n\t\t\tpart\t= MIMEBase(\"application\", \"octet-stream\")\n\t\t\tpart.set_payload(open(file, 'rb').read())\n\t\t\tpart.add_header(\t'Content-Disposition',\n\t\t\t\t\t\t\t\t'attachment; filename=\"%s\"'% os.path.basename(file))\n\t\t\tmsg.attach(part)\n\t\n\n\n\t#\tACCESS TO GMAIL & SEND MAIL\n\tsmtp_gmail\t\t= smtplib.SMTP_SSL('smtp.gmail.com', 465)\n\tsmtp_gmail.login(fromID, fromPW)\n\tsmtp_gmail.sendmail(msg[\"From\"], msg[\"To\"].split(\",\") + msg[\"Cc\"].split(\",\"), msg.as_string())\n\tsmtp_gmail.quit()\n\tcomment\t= 'Send '+str(path)+'\\nFrom\\t'+fromID+'\\nTo'; print(comment); print(toIDs)\n#------------------------------------------------------------\ndef abs2app(mag, magerr, gwdist, gwdiststd):\n\timport numpy as np\n\tapp\t\t= 5*np.log10(gwdist)-5+mag\n\tapperr\t= 5*gwdiststd/(gwdist*np.log(10))\n\treturn app, apperr\n#------------------------------------------------------------\ndef GW170817_like(gwdist, gwdiststd):\n\timport numpy as np\n\tm0\t\t= 17.476\t#\t[AB] in i-band (t0+10h)\n\tm0err\t= 0.018\t\n\tdist0\t= 38.4\t\t#\t[MPC]\tIm et al. 2017\n\tdist0err= 8.9\n\tm\t\t= m0+5.*np.log10(gwdist/dist0)\n\tmerr\t= np.sqrt( (m0err)**2 + ((5.*gwdiststd)/(gwdist*np.log(10)))**2 + ((5.*dist0err)/(dist0*np.log(10)))**2 )\n\treturn m, merr\n#------------------------------------------------------------\ndef func_linear(a, x, scaling=[0, 0]):\n\txpt, ypt= scaling[0], scaling[1]\n\tydel\t= ypt - (-1*a*xpt)\n\treturn -1*a*x + ydel\n#------------------------------------------------------------\ndef calc_app(mag, magerr, gwdist0, gwdiststd0, gwdist1, gwdiststd1):\n\timport numpy as np\n\tapp\t\t= mag+5*np.log10(gwdist1/gwdist0)\n\tapperr\t= np.sqrt( (magerr)**2 + ((5*gwdiststd1)/(np.log(5)*gwdist1))**2 + ((5*gwdiststd0)/(np.log(5)*gwdist0))**2 )\n\treturn app, apperr\n#------------------------------------------------------------\ndef ds9regmaker(filename, name, ra, dec):\n\timport os,sys\n\timport string\n\tfrom astropy.io import ascii \n\timport numpy as np\n\timport math\n\t'''\n\tracol\t\t= 'ALPHA_J2000'\n\tdeccol\t\t= 'DELTA_J2000'\n\tname\t\t= 'NUMBER'\n\tintbl\t\t= ascii.read(filename)\n\t'''\n\tradius\t= \"\"\" 5\" \"\"\"\n\tcolor\t= \"green\"\n\tf\t\t= open(filename, 'w')\n\n\thead1\t= \"# Region file format: DS9 version 4.1\\n\"\n\thead2\t= \"\"\"global color=green dashlist=8 3 width=1 font=\"helvetica 10 normal roman\" select=1 highlite=1 dash=0 fixed=0 edit=1 move=1 delete=1 include=1 source=1\\n\"\"\"\n\thead3\t= \"fk5\\n\"\n\n\tf.write(head1)\n\tf.write(head2)\n\tf.write(head3)\n\n\tfor n in range(len(ra)):\n\t\tbody=\"circle(\"+str(ra[n])+\",\"+str(dec[n])+\",\"+radius+\") # color=\"+color+\" text={\"+str(name[n])+\"}\\n\"\t\n\t\tf.write(body)\n\tf.close()\n#------------------------------------------------------------\ndef rtsmaker(observatory, headname, save_path, obspath, catpath, start, end, altlimit=30., moonseperation=40., sunlimit='-18', numlimit=100):\n\timport pytz\n\timport jdcal\n\timport ephem\n\t#from numpy import *\n\timport numpy as np\n\timport os, sys\n\timport string\n\timport datetime\n\timport astropy.units as u\n\tfrom astropy.io import ascii\n\timport mskpy.observing as obs\n\timport astropy.coordinates as coord\n\tfrom astropy import units as u\n\tfrom astropy.coordinates import SkyCoord\n\n\t#------------------------------------------------------------#\n\t#\tINPUT SAMPLE\n\t#------------------------------------------------------------#\n\t'''\n\tobservatory\t\t= 'SAO'\n\tsave_path\t\t= './'\n\tobspath\t\t\t= \"/home/gw/Research/observatory.txt\"\n\tcatpath\t\t\t= 'MS181101ab_Preliminary-all_candidates.txt'\n\tstart\t\t\t= '2019/04/17'\n\tend\t\t\t\t= '2019/04/19'\n\t#altitute limit and moon seperation, moon serperation is a little bit close (2~3 deg)\n\tnumlimit\t\t= 100\n\taltlimit\t\t= 30.\n\tmoonseperation\t= 40.\n\tsunlimit\t\t= '-18'\n\t'''\n\t#------------------------------------------------------------#\n\t#\tOBSERVATORY INFO.\n\t#------------------------------------------------------------#\n\tobsinfo\t\t\t= ascii.read(obspath)\n\tobsname \t= np.copy(obsinfo['name'])\n\tobsindex\t\t= np.where(obsname == observatory)[0]\n\tobslat\t\t\t= (np.copy(obsinfo['latitude(N+)'])[obsindex])[0]\n\tobslon\t\t\t= (np.copy(obsinfo['longitude(E+)'])[obsindex])[0]\n\tobsalt\t\t\t= (np.copy(obsinfo['altitude'])[obsindex])[0]\n\tobstz\t\t\t= (np.copy(obsinfo['timezone'])[obsindex])[0]\n\ttz\t\t\t\t= pytz.timezone(obstz)\n\t#------------------------------------------------------------#\n\tobserv\t\t\t= ephem.Observer()\n\tobserv.lat\t\t= str(obslat)\n\tobserv.lon\t\t= str(obslon)\n\tobserv.elevation= obsalt\n\tobserv.horizon\t= sunlimit\n\t#------------------------------------------------------------#\n\t#objects from catalog file\n\ttdata\t\t\t= ascii.read(catpath)\n\tobjname\t\t\t= tdata['name']\n\tra\t\t\t\t= tdata['ra']\n\tdec\t\t\t\t= tdata['dec']\n\tprior\t\t\t= tdata['sort']\n\trank\t\t\t= tdata['rank']\t\t\t\n\tdist\t\t\t= tdata['dist']\n\n\tRA\t\t\t\t= coord.Angle(ra, unit = u.deg)\n\tDec\t\t\t\t= coord.Angle(dec, unit = u.deg)\n\n\tradd\t\t\t= RA.value\n\trad\t\t\t\t= RA.hour\n\tdecd\t\t\t= Dec.value\n\tdecdd\t\t\t= Dec.degree\n\n\t#angular distance calculation\n\tdef angsep(ra1deg, dec1deg, ra2deg, dec2deg) : \n\t\tra1rad\t\t= ra1deg*np.pi/180\n\t\tdec1rad\t\t= dec1deg*np.pi/180\n\t\tra2rad\t\t= ra2deg*np.pi/180\n\t\tdec2rad\t\t= dec2deg*np.pi/180\n\t\tcos_a\t\t= np.sin(dec1rad)*np.sin(dec2rad)+(np.cos(dec1rad)*np.cos(dec2rad)*np.cos(ra1rad-ra2rad))\n\t\tanglesep\t= np.arccos(cos_a)*180/np.pi\n\t\treturn anglesep\n\n\t#dates to calculate\n\tfmt\t\t\t\t= '%Y/%m/%d'\n\tstartdt\t\t\t= datetime.datetime.strptime(start, fmt)\n\tenddt\t\t\t= datetime.datetime.strptime(end, fmt)\n\tstartmjd\t\t= (jdcal.gcal2jd(startdt.year, startdt.month, startdt.day))[1]\n\tendmjd\t\t\t= (jdcal.gcal2jd(enddt.year, enddt.month, enddt.day))[1]\n\n\tfor i in range(int(endmjd-startmjd+1)):\n\t\tonedaymjd = startmjd+i+1\n\t\toneday = jdcal.jd2gcal(2400000.5, onedaymjd)\n\t\tonedaydt = datetime.datetime(oneday[0], oneday[1], oneday[2])\n\t\tdst = tz.dst(onedaydt, is_dst=True)\n\t\tdst = dst.seconds/3600\n\n\t\tonedaydt = datetime.datetime(oneday[0], oneday[1], oneday[2], tzinfo=tz)\n\t\tonedayutc = onedaydt.astimezone(pytz.utc)\n\t\tobserv.date = onedayutc\n\n\t\t# Moon distance and information\n\t\tmcoord\t\t= ephem.Moon()\n\t\tmcoord.compute(observ)\n\t\tminfo\t\t= 'Moon ra, dec : '+str(mcoord.ra)+' '+str(mcoord.dec)+'\\n'\n\t\tmphase\t\t= ephem.Moon(observ.date)\n\t\tmphasestr\t= 'Moon phase : '+ \"%.2f\" % mphase.moon_phase +'\\n'\n\t\tmsep\t\t= angsep(radd, decdd, np.degrees(mcoord.ra), np.degrees(mcoord.dec))\n\n\t\t#\tSUNSET CALC.\n\t\tsunset = observ.previous_setting(ephem.Sun())\n\t\tsunsettu = ephem.Date.tuple(sunset)\n\t\tsunsetdt = datetime.datetime(sunsettu[0],sunsettu[1],sunsettu[2],sunsettu[3],int(sunsettu[4]),tzinfo=pytz.utc)\n\t\tsunsetlocal = sunsetdt.astimezone(tz)\n\t\tsunsetstr = sunlimit+' deg sunset : '+str(sunsetlocal.hour)+':'+str(sunsetlocal.minute)+'\\n'\n\t\tsunsethour = sunsetlocal.hour+sunsetlocal.minute/60.+sunsetlocal.second/3600.\n\t\t#\tSUNRISE CALC.\n\t\tsunrise = observ.next_rising(ephem.Sun())\n\t\tsunrisetu = ephem.Date.tuple(sunrise)\n\t\tsunrisedt = datetime.datetime(sunrisetu[0],sunrisetu[1],sunrisetu[2],sunrisetu[3],int(sunrisetu[4]),tzinfo=pytz.utc)\n\t\tsunriselocal = sunrisedt.astimezone(tz)\n\t\tsunrisestr = sunlimit+' deg sunrise : '+str(sunriselocal.hour)+':'+str(sunriselocal.minute)+'\\n'\n\t\tsunrisehour = sunriselocal.hour+sunriselocal.minute/60.+sunriselocal.second/3600.\n\t\t#print (observatory)\n\t\t#print ('Local mid night in UTC : '+str(observ.date))\n\t\t#print (minfo,mphasestr,sunsetstr,sunrisestr)\n\n\t\t#\tMAKE RESULT FILE\n\t\tstryear = str(oneday[0])\n\t\tstrmonth = str(oneday[1])\n\t\tstrday = str(oneday[2]-1)\n\t\tif int(strmonth) < 10 : strmonth = '0'+strmonth\n\t\tif int(strday) < 10 : strday = '0'+strday\n\n\t\tf\t\t\t= open(save_path+'/'+headname+'-'+stryear+strmonth+strday+\"-rts_vis-\"+observatory+\".txt\",'w')\n\t\tf.write('#\\t'+str(observ.date)+' UTC & Day Time Saving +'+str(dst)+'\\n')\n\t\tf.write('#\\tObservatory\\t= '+observatory+'\\n')\n\t\tf.write('#\\t'+sunsetstr)\n\t\tf.write('#\\t'+sunrisestr)\n\t\tf.write('#\\t'+minfo)\n\t\tf.write('#\\t'+mphasestr)\n\t\tf.write('#\\tMoon seperation = '+str(moonseperation)+'\\n')\n\t\tf.write('#\\tAltitude limit = '+str(altlimit)+'\\n')\n\t\tf.write('#\\tRank : the lower rank, the higher priority\\n')\n\t\tf.write('#------------------------------------------------------- \\n')\n\t\tf.write('name ra dec rise(LT) transit(LT) set(LT) moon_dist(deg) distance(Mpc) rank\\n')\n\n\t\tnumcount\t= 0\n\t\tfor n in range(len(rad)):\n\t\t\t#calculate rise transit set time with altitute limit\n\t\t\tparam_rts\t= dict(\tra=radd[n],\n\t\t\t\t\t\t\t\tdec=decdd[n],\n\t\t\t\t\t\t\t\tdate=onedaydt,\n\t\t\t\t\t\t\t\tlon=obslon,\n\t\t\t\t\t\t\t\tlat=obslat,\n\t\t\t\t\t\t\t\ttz=obstz,\n\t\t\t\t\t\t\t\tlimit=altlimit,\n\t\t\t\t\t\t\t\tprecision=1440)\n\t\t\trtscal\t\t= obs.rts(**param_rts)\n\t\t\trt\t\t\t= rtscal[0]\n\t\t\ttt\t\t\t= rtscal[1]\n\t\t\tst\t\t\t= rtscal[2]\n\t\t\tif rtscal[0]== None:\n\t\t\t\t#print (objname[n],ra[n],dec[n], rtscal[0], rtscal[1], rtscal[2],\"%.2f\" % msep[n])\n\t\t\t\tpass\n\t\t\telif sunrisehour < rtscal[0] < sunsethour and sunrisehour < rtscal[2] < sunsethour and sunrisehour < rtscal[1] < sunsethour: \n\t\t\t\t#print (objname[n]+' It can be seen in daytime!')\n\t\t\t\tpass\n\t\t\telif msep[n] < moonseperation or msep[n] > 360-moonseperation:\n\t\t\t\t#print (objname[n]+' too close to Moon < '+str(moonseperation)+' deg')\n\t\t\t\tpass\n\t\t\telse:\n\t\t\t\tif numcount < numlimit:\n\t\t\t\t\tc= SkyCoord(ra=ra[n]*u.degree, dec=dec[n]*u.degree, frame='icrs')\n\t\t\t\t\tc_ra= c.ra.hms\n\t\t\t\t\tc_dec= c.dec.dms\n\t\t\t\t\t\n\t\t\t\t\tnra='%02d:%02d:%.3f' %(c_ra[0], abs(c_ra[1]), abs(c_ra[2]))\n\t\t\t\t\tndec='%02d:%02d:%.3f' %(c_dec[0], abs(c_dec[1]), abs(c_dec[2]))\n\t\t\t\t\t\n\t\t\t\t\trtp\t=\"%.2d\" % int(rt)+':'+\"%.2d\" % int((rt-int(rt))*60)\n\t\t\t\t\tttp\t=\"%.2d\" % int(tt)+':'+\"%.2d\" % int((tt-int(tt))*60)\n\t\t\t\t\tstp\t=\"%.2d\" % int(st)+':'+\"%.2d\" % int((st-int(st))*60)\n\t\t\t\t\tvis\t='{:8s}\t{:12s}\t{:12s}\t{:5s}\t{:5s}\t{:5s}\t{:3s}\t\t{:4s}\t{:4s}'.format(objname[n],str(nra),str(ndec),rtp,ttp,stp,str(int(msep[n])),str(int(dist[n])),str(rank[n]))+'\\n'\n\t\t\t\t\tf.write(vis)\n\t\t\t\t\t#print (objname[n],ra[n],dec[n], rtp,ttp,stp,\"%.2f\" % msep[n])\n\t\t\t\t\tnumcount+= 1\n\t\t\t\telse:\n\t\t\t\t\tpass\n\t\t\t\t'''\n\t\t\t\tif numcount < numlimit:\n\t\t\t\t\t\n\t\t\t\t\trtp=\"%.2d\" % int(rt)+':'+\"%.2d\" % int((rt-int(rt))*60)\n\t\t\t\t\tttp=\"%.2d\" % int(tt)+':'+\"%.2d\" % int((tt-int(tt))*60)\n\t\t\t\t\tstp=\"%.2d\" % int(st)+':'+\"%.2d\" % int((st-int(st))*60)\n\t\t\t\t\tvis='{:24s}\t{:12s}\t{:12s}\t{:5s}\t{:5s}\t{:5s}\t{:3s}\t{:2s}'.format(objname[n],str(ra[n]),str(dec[n]),rtp,ttp,stp,str(int(msep[n])),str(prior[n]))+'\\n'\n\t\t\t\t\tf.write(vis)\n\t\t\t\t\t#print (objname[n],ra[n],dec[n], rtp,ttp,stp,\"%.2f\" % msep[n])\n\t\t\t\t\tnumcount+= 1\n\t\t\t\telse:\n\t\t\t\t\tpass\n\t\t\t\t'''\n\t\tf.close()" | #------------------------------------------------------------
def sendmail(filename, subject, sendID, sendPW, reciver):
'''
Security reference | random_line_split |
tool.py | # USEFUL FUNC.(TOOL) IN IMSNG MODULE
# 2019.03.03 CREATED BY Gregory S.H. Paek
# 2019.08.29 UPDATED BY Gregory S.H. Paek
#============================================================
def | ():
'''
CONVERT 'TIME' TO YYMMDD, HHMMSS FORM.
INPUT : NONE
OUTPUT : STRIG FORM OF 'YYMMDD', 'HHMMSS'
'''
import numpy as np
import time
now = time.gmtime(time.time())
y, m, d = now.tm_year, now.tm_mon, now.tm_mday
ho, mi, se = now.tm_hour, now.tm_min, now.tm_sec
yy = str(y)[2:]
if len(str(m)) < 2:
mm = '0'+str(m)
else:
mm = str(m)
if len(str(d)) < 2:
dd = '0'+str(d)
else:
dd = str(d)
if len(str(ho)) < 2:
hour = '0'+str(ho)
else:
hour = str(ho)
if len(str(mi)) < 2:
mini = '0'+str(mi)
else:
mini = str(mi)
if len(str(se)) < 2:
sec = '0'+str(se)
else:
sec = str(se)
yymmdd = yy+mm+dd
hhmmss = hour+mini+sec
return yymmdd, hhmmss
#------------------------------------------------------------
def detection(name, ra, dec, time, location):
import numpy as np
import os, glob, sys
from astropy import units as u
from astropy.time import Time
from astropy.coordinates import SkyCoord, EarthLocation, AltAz
from astropy.coordinates import get_sun, get_moon
from astropy.io import ascii
from astropy.table import Table, Column
target = SkyCoord(ra, dec, unit='deg') # defaults to ICRS frame
site = location
del_midnight= np.linspace(-12, +12, 720) * u.hour
time_night = time+del_midnight
frame_night = AltAz(obstime=time_night, location=site)
targetaltaz_night = target.transform_to(frame_night)
sunaltaz_night = get_sun(time_night).transform_to(frame_night)
# indx_set = np.where( sunaltaz_night.alt > -18 * u.deg )
indx_rise = np.where( sunaltaz_night.alt < -18 * u.deg )
sunset = del_midnight[np.min(indx_rise)]
sunrise = del_midnight[np.max(indx_rise)]
del_midnight= np.linspace(sunset.value, sunrise.value, 100) * u.hour
time_night = time+del_midnight
frame_night = AltAz(obstime=time_night, location=site)
targetaltaz_night = target.transform_to(frame_night)
return targetaltaz_night
#------------------------------------------------------------
def sendmail(filename, subject, sendID, sendPW, reciver):
'''
Security reference
https://cpuu.postype.com/post/23066
Code reference
https://kimdoky.github.io/python/2017/07/21/smtplib_email.html
File attach
https://brunch.co.kr/@jk-lab/31
'''
import smtplib
from email.mime.text import MIMEText
import codecs
email_text = codecs.open(filename, 'rb', 'utf-8')
msg = MIMEText(email_text.read())
email_text.close()
msg['Subject'] = subject
msg['From'] = sendID
smtp_gmail = smtplib.SMTP_SSL('smtp.gmail.com', 465)
smtp_gmail.login(sendID, sendPW)
smtp_gmail.sendmail(sendID, reciver, msg.as_string())
smtp_gmail.quit()
comment = 'Send '+filename+'\n'+'From '+sendID+' To '+reciver; print(comment)
#------------------------------------------------------------
def send_gmail(subject, contents, fromID, fromPW, toIDs, ccIDs=None, path=None):
'''
SEND GMAIL
Security reference
https://cpuu.postype.com/post/23066
Code reference
https://kimdoky.github.io/python/2017/07/21/smtplib_email.html
File attach
https://brunch.co.kr/@jk-lab/31
'''
import os
import smtplib
from email.mime.base import MIMEBase
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.header import Header
#msg = MIMEBase('mixed')
#msg = MIMEText(contents, 'plain', 'utf-8')
msg = MIMEMultipart()
msg['Subject'] = Header(s=subject, charset="utf-8")
msg['From'] = fromID
msg['To'] = toIDs
if ccIDs != None:
msg['Cc'] = ccIDs
msg.attach(MIMEText(contents, 'plain', 'utf-8'))
# ATTACH TEXT FILE ON MAIL
if path != None:
if type(path) != list:
filelist = []
filelist.append(path)
else:
filelist = path
for file in filelist:
part = MIMEBase("application", "octet-stream")
part.set_payload(open(file, 'rb').read())
part.add_header( 'Content-Disposition',
'attachment; filename="%s"'% os.path.basename(file))
msg.attach(part)
# ACCESS TO GMAIL & SEND MAIL
smtp_gmail = smtplib.SMTP_SSL('smtp.gmail.com', 465)
smtp_gmail.login(fromID, fromPW)
smtp_gmail.sendmail(msg["From"], msg["To"].split(",") + msg["Cc"].split(","), msg.as_string())
smtp_gmail.quit()
comment = 'Send '+str(path)+'\nFrom\t'+fromID+'\nTo'; print(comment); print(toIDs)
#------------------------------------------------------------
def abs2app(mag, magerr, gwdist, gwdiststd):
import numpy as np
app = 5*np.log10(gwdist)-5+mag
apperr = 5*gwdiststd/(gwdist*np.log(10))
return app, apperr
#------------------------------------------------------------
def GW170817_like(gwdist, gwdiststd):
import numpy as np
m0 = 17.476 # [AB] in i-band (t0+10h)
m0err = 0.018
dist0 = 38.4 # [MPC] Im et al. 2017
dist0err= 8.9
m = m0+5.*np.log10(gwdist/dist0)
merr = np.sqrt( (m0err)**2 + ((5.*gwdiststd)/(gwdist*np.log(10)))**2 + ((5.*dist0err)/(dist0*np.log(10)))**2 )
return m, merr
#------------------------------------------------------------
def func_linear(a, x, scaling=[0, 0]):
xpt, ypt= scaling[0], scaling[1]
ydel = ypt - (-1*a*xpt)
return -1*a*x + ydel
#------------------------------------------------------------
def calc_app(mag, magerr, gwdist0, gwdiststd0, gwdist1, gwdiststd1):
import numpy as np
app = mag+5*np.log10(gwdist1/gwdist0)
apperr = np.sqrt( (magerr)**2 + ((5*gwdiststd1)/(np.log(5)*gwdist1))**2 + ((5*gwdiststd0)/(np.log(5)*gwdist0))**2 )
return app, apperr
#------------------------------------------------------------
def ds9regmaker(filename, name, ra, dec):
import os,sys
import string
from astropy.io import ascii
import numpy as np
import math
'''
racol = 'ALPHA_J2000'
deccol = 'DELTA_J2000'
name = 'NUMBER'
intbl = ascii.read(filename)
'''
radius = """ 5" """
color = "green"
f = open(filename, 'w')
head1 = "# Region file format: DS9 version 4.1\n"
head2 = """global color=green dashlist=8 3 width=1 font="helvetica 10 normal roman" select=1 highlite=1 dash=0 fixed=0 edit=1 move=1 delete=1 include=1 source=1\n"""
head3 = "fk5\n"
f.write(head1)
f.write(head2)
f.write(head3)
for n in range(len(ra)):
body="circle("+str(ra[n])+","+str(dec[n])+","+radius+") # color="+color+" text={"+str(name[n])+"}\n"
f.write(body)
f.close()
#------------------------------------------------------------
def rtsmaker(observatory, headname, save_path, obspath, catpath, start, end, altlimit=30., moonseperation=40., sunlimit='-18', numlimit=100):
import pytz
import jdcal
import ephem
#from numpy import *
import numpy as np
import os, sys
import string
import datetime
import astropy.units as u
from astropy.io import ascii
import mskpy.observing as obs
import astropy.coordinates as coord
from astropy import units as u
from astropy.coordinates import SkyCoord
#------------------------------------------------------------#
# INPUT SAMPLE
#------------------------------------------------------------#
'''
observatory = 'SAO'
save_path = './'
obspath = "/home/gw/Research/observatory.txt"
catpath = 'MS181101ab_Preliminary-all_candidates.txt'
start = '2019/04/17'
end = '2019/04/19'
#altitute limit and moon seperation, moon serperation is a little bit close (2~3 deg)
numlimit = 100
altlimit = 30.
moonseperation = 40.
sunlimit = '-18'
'''
#------------------------------------------------------------#
# OBSERVATORY INFO.
#------------------------------------------------------------#
obsinfo = ascii.read(obspath)
obsname = np.copy(obsinfo['name'])
obsindex = np.where(obsname == observatory)[0]
obslat = (np.copy(obsinfo['latitude(N+)'])[obsindex])[0]
obslon = (np.copy(obsinfo['longitude(E+)'])[obsindex])[0]
obsalt = (np.copy(obsinfo['altitude'])[obsindex])[0]
obstz = (np.copy(obsinfo['timezone'])[obsindex])[0]
tz = pytz.timezone(obstz)
#------------------------------------------------------------#
observ = ephem.Observer()
observ.lat = str(obslat)
observ.lon = str(obslon)
observ.elevation= obsalt
observ.horizon = sunlimit
#------------------------------------------------------------#
#objects from catalog file
tdata = ascii.read(catpath)
objname = tdata['name']
ra = tdata['ra']
dec = tdata['dec']
prior = tdata['sort']
rank = tdata['rank']
dist = tdata['dist']
RA = coord.Angle(ra, unit = u.deg)
Dec = coord.Angle(dec, unit = u.deg)
radd = RA.value
rad = RA.hour
decd = Dec.value
decdd = Dec.degree
#angular distance calculation
def angsep(ra1deg, dec1deg, ra2deg, dec2deg) :
ra1rad = ra1deg*np.pi/180
dec1rad = dec1deg*np.pi/180
ra2rad = ra2deg*np.pi/180
dec2rad = dec2deg*np.pi/180
cos_a = np.sin(dec1rad)*np.sin(dec2rad)+(np.cos(dec1rad)*np.cos(dec2rad)*np.cos(ra1rad-ra2rad))
anglesep = np.arccos(cos_a)*180/np.pi
return anglesep
#dates to calculate
fmt = '%Y/%m/%d'
startdt = datetime.datetime.strptime(start, fmt)
enddt = datetime.datetime.strptime(end, fmt)
startmjd = (jdcal.gcal2jd(startdt.year, startdt.month, startdt.day))[1]
endmjd = (jdcal.gcal2jd(enddt.year, enddt.month, enddt.day))[1]
for i in range(int(endmjd-startmjd+1)):
onedaymjd = startmjd+i+1
oneday = jdcal.jd2gcal(2400000.5, onedaymjd)
onedaydt = datetime.datetime(oneday[0], oneday[1], oneday[2])
dst = tz.dst(onedaydt, is_dst=True)
dst = dst.seconds/3600
onedaydt = datetime.datetime(oneday[0], oneday[1], oneday[2], tzinfo=tz)
onedayutc = onedaydt.astimezone(pytz.utc)
observ.date = onedayutc
# Moon distance and information
mcoord = ephem.Moon()
mcoord.compute(observ)
minfo = 'Moon ra, dec : '+str(mcoord.ra)+' '+str(mcoord.dec)+'\n'
mphase = ephem.Moon(observ.date)
mphasestr = 'Moon phase : '+ "%.2f" % mphase.moon_phase +'\n'
msep = angsep(radd, decdd, np.degrees(mcoord.ra), np.degrees(mcoord.dec))
# SUNSET CALC.
sunset = observ.previous_setting(ephem.Sun())
sunsettu = ephem.Date.tuple(sunset)
sunsetdt = datetime.datetime(sunsettu[0],sunsettu[1],sunsettu[2],sunsettu[3],int(sunsettu[4]),tzinfo=pytz.utc)
sunsetlocal = sunsetdt.astimezone(tz)
sunsetstr = sunlimit+' deg sunset : '+str(sunsetlocal.hour)+':'+str(sunsetlocal.minute)+'\n'
sunsethour = sunsetlocal.hour+sunsetlocal.minute/60.+sunsetlocal.second/3600.
# SUNRISE CALC.
sunrise = observ.next_rising(ephem.Sun())
sunrisetu = ephem.Date.tuple(sunrise)
sunrisedt = datetime.datetime(sunrisetu[0],sunrisetu[1],sunrisetu[2],sunrisetu[3],int(sunrisetu[4]),tzinfo=pytz.utc)
sunriselocal = sunrisedt.astimezone(tz)
sunrisestr = sunlimit+' deg sunrise : '+str(sunriselocal.hour)+':'+str(sunriselocal.minute)+'\n'
sunrisehour = sunriselocal.hour+sunriselocal.minute/60.+sunriselocal.second/3600.
#print (observatory)
#print ('Local mid night in UTC : '+str(observ.date))
#print (minfo,mphasestr,sunsetstr,sunrisestr)
# MAKE RESULT FILE
stryear = str(oneday[0])
strmonth = str(oneday[1])
strday = str(oneday[2]-1)
if int(strmonth) < 10 : strmonth = '0'+strmonth
if int(strday) < 10 : strday = '0'+strday
f = open(save_path+'/'+headname+'-'+stryear+strmonth+strday+"-rts_vis-"+observatory+".txt",'w')
f.write('#\t'+str(observ.date)+' UTC & Day Time Saving +'+str(dst)+'\n')
f.write('#\tObservatory\t= '+observatory+'\n')
f.write('#\t'+sunsetstr)
f.write('#\t'+sunrisestr)
f.write('#\t'+minfo)
f.write('#\t'+mphasestr)
f.write('#\tMoon seperation = '+str(moonseperation)+'\n')
f.write('#\tAltitude limit = '+str(altlimit)+'\n')
f.write('#\tRank : the lower rank, the higher priority\n')
f.write('#------------------------------------------------------- \n')
f.write('name ra dec rise(LT) transit(LT) set(LT) moon_dist(deg) distance(Mpc) rank\n')
numcount = 0
for n in range(len(rad)):
#calculate rise transit set time with altitute limit
param_rts = dict( ra=radd[n],
dec=decdd[n],
date=onedaydt,
lon=obslon,
lat=obslat,
tz=obstz,
limit=altlimit,
precision=1440)
rtscal = obs.rts(**param_rts)
rt = rtscal[0]
tt = rtscal[1]
st = rtscal[2]
if rtscal[0]== None:
#print (objname[n],ra[n],dec[n], rtscal[0], rtscal[1], rtscal[2],"%.2f" % msep[n])
pass
elif sunrisehour < rtscal[0] < sunsethour and sunrisehour < rtscal[2] < sunsethour and sunrisehour < rtscal[1] < sunsethour:
#print (objname[n]+' It can be seen in daytime!')
pass
elif msep[n] < moonseperation or msep[n] > 360-moonseperation:
#print (objname[n]+' too close to Moon < '+str(moonseperation)+' deg')
pass
else:
if numcount < numlimit:
c= SkyCoord(ra=ra[n]*u.degree, dec=dec[n]*u.degree, frame='icrs')
c_ra= c.ra.hms
c_dec= c.dec.dms
nra='%02d:%02d:%.3f' %(c_ra[0], abs(c_ra[1]), abs(c_ra[2]))
ndec='%02d:%02d:%.3f' %(c_dec[0], abs(c_dec[1]), abs(c_dec[2]))
rtp ="%.2d" % int(rt)+':'+"%.2d" % int((rt-int(rt))*60)
ttp ="%.2d" % int(tt)+':'+"%.2d" % int((tt-int(tt))*60)
stp ="%.2d" % int(st)+':'+"%.2d" % int((st-int(st))*60)
vis ='{:8s} {:12s} {:12s} {:5s} {:5s} {:5s} {:3s} {:4s} {:4s}'.format(objname[n],str(nra),str(ndec),rtp,ttp,stp,str(int(msep[n])),str(int(dist[n])),str(rank[n]))+'\n'
f.write(vis)
#print (objname[n],ra[n],dec[n], rtp,ttp,stp,"%.2f" % msep[n])
numcount+= 1
else:
pass
'''
if numcount < numlimit:
rtp="%.2d" % int(rt)+':'+"%.2d" % int((rt-int(rt))*60)
ttp="%.2d" % int(tt)+':'+"%.2d" % int((tt-int(tt))*60)
stp="%.2d" % int(st)+':'+"%.2d" % int((st-int(st))*60)
vis='{:24s} {:12s} {:12s} {:5s} {:5s} {:5s} {:3s} {:2s}'.format(objname[n],str(ra[n]),str(dec[n]),rtp,ttp,stp,str(int(msep[n])),str(prior[n]))+'\n'
f.write(vis)
#print (objname[n],ra[n],dec[n], rtp,ttp,stp,"%.2f" % msep[n])
numcount+= 1
else:
pass
'''
f.close()
| timename | identifier_name |
tool.py | # USEFUL FUNC.(TOOL) IN IMSNG MODULE
# 2019.03.03 CREATED BY Gregory S.H. Paek
# 2019.08.29 UPDATED BY Gregory S.H. Paek
#============================================================
def timename():
'''
CONVERT 'TIME' TO YYMMDD, HHMMSS FORM.
INPUT : NONE
OUTPUT : STRIG FORM OF 'YYMMDD', 'HHMMSS'
'''
import numpy as np
import time
now = time.gmtime(time.time())
y, m, d = now.tm_year, now.tm_mon, now.tm_mday
ho, mi, se = now.tm_hour, now.tm_min, now.tm_sec
yy = str(y)[2:]
if len(str(m)) < 2:
mm = '0'+str(m)
else:
mm = str(m)
if len(str(d)) < 2:
dd = '0'+str(d)
else:
dd = str(d)
if len(str(ho)) < 2:
hour = '0'+str(ho)
else:
hour = str(ho)
if len(str(mi)) < 2:
mini = '0'+str(mi)
else:
mini = str(mi)
if len(str(se)) < 2:
sec = '0'+str(se)
else:
sec = str(se)
yymmdd = yy+mm+dd
hhmmss = hour+mini+sec
return yymmdd, hhmmss
#------------------------------------------------------------
def detection(name, ra, dec, time, location):
import numpy as np
import os, glob, sys
from astropy import units as u
from astropy.time import Time
from astropy.coordinates import SkyCoord, EarthLocation, AltAz
from astropy.coordinates import get_sun, get_moon
from astropy.io import ascii
from astropy.table import Table, Column
target = SkyCoord(ra, dec, unit='deg') # defaults to ICRS frame
site = location
del_midnight= np.linspace(-12, +12, 720) * u.hour
time_night = time+del_midnight
frame_night = AltAz(obstime=time_night, location=site)
targetaltaz_night = target.transform_to(frame_night)
sunaltaz_night = get_sun(time_night).transform_to(frame_night)
# indx_set = np.where( sunaltaz_night.alt > -18 * u.deg )
indx_rise = np.where( sunaltaz_night.alt < -18 * u.deg )
sunset = del_midnight[np.min(indx_rise)]
sunrise = del_midnight[np.max(indx_rise)]
del_midnight= np.linspace(sunset.value, sunrise.value, 100) * u.hour
time_night = time+del_midnight
frame_night = AltAz(obstime=time_night, location=site)
targetaltaz_night = target.transform_to(frame_night)
return targetaltaz_night
#------------------------------------------------------------
def sendmail(filename, subject, sendID, sendPW, reciver):
'''
Security reference
https://cpuu.postype.com/post/23066
Code reference
https://kimdoky.github.io/python/2017/07/21/smtplib_email.html
File attach
https://brunch.co.kr/@jk-lab/31
'''
import smtplib
from email.mime.text import MIMEText
import codecs
email_text = codecs.open(filename, 'rb', 'utf-8')
msg = MIMEText(email_text.read())
email_text.close()
msg['Subject'] = subject
msg['From'] = sendID
smtp_gmail = smtplib.SMTP_SSL('smtp.gmail.com', 465)
smtp_gmail.login(sendID, sendPW)
smtp_gmail.sendmail(sendID, reciver, msg.as_string())
smtp_gmail.quit()
comment = 'Send '+filename+'\n'+'From '+sendID+' To '+reciver; print(comment)
#------------------------------------------------------------
def send_gmail(subject, contents, fromID, fromPW, toIDs, ccIDs=None, path=None):
'''
SEND GMAIL
Security reference
https://cpuu.postype.com/post/23066
Code reference
https://kimdoky.github.io/python/2017/07/21/smtplib_email.html
File attach
https://brunch.co.kr/@jk-lab/31
'''
import os
import smtplib
from email.mime.base import MIMEBase
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.header import Header
#msg = MIMEBase('mixed')
#msg = MIMEText(contents, 'plain', 'utf-8')
msg = MIMEMultipart()
msg['Subject'] = Header(s=subject, charset="utf-8")
msg['From'] = fromID
msg['To'] = toIDs
if ccIDs != None:
msg['Cc'] = ccIDs
msg.attach(MIMEText(contents, 'plain', 'utf-8'))
# ATTACH TEXT FILE ON MAIL
if path != None:
if type(path) != list:
filelist = []
filelist.append(path)
else:
filelist = path
for file in filelist:
part = MIMEBase("application", "octet-stream")
part.set_payload(open(file, 'rb').read())
part.add_header( 'Content-Disposition',
'attachment; filename="%s"'% os.path.basename(file))
msg.attach(part)
# ACCESS TO GMAIL & SEND MAIL
smtp_gmail = smtplib.SMTP_SSL('smtp.gmail.com', 465)
smtp_gmail.login(fromID, fromPW)
smtp_gmail.sendmail(msg["From"], msg["To"].split(",") + msg["Cc"].split(","), msg.as_string())
smtp_gmail.quit()
comment = 'Send '+str(path)+'\nFrom\t'+fromID+'\nTo'; print(comment); print(toIDs)
#------------------------------------------------------------
def abs2app(mag, magerr, gwdist, gwdiststd):
import numpy as np
app = 5*np.log10(gwdist)-5+mag
apperr = 5*gwdiststd/(gwdist*np.log(10))
return app, apperr
#------------------------------------------------------------
def GW170817_like(gwdist, gwdiststd):
import numpy as np
m0 = 17.476 # [AB] in i-band (t0+10h)
m0err = 0.018
dist0 = 38.4 # [MPC] Im et al. 2017
dist0err= 8.9
m = m0+5.*np.log10(gwdist/dist0)
merr = np.sqrt( (m0err)**2 + ((5.*gwdiststd)/(gwdist*np.log(10)))**2 + ((5.*dist0err)/(dist0*np.log(10)))**2 )
return m, merr
#------------------------------------------------------------
def func_linear(a, x, scaling=[0, 0]):
xpt, ypt= scaling[0], scaling[1]
ydel = ypt - (-1*a*xpt)
return -1*a*x + ydel
#------------------------------------------------------------
def calc_app(mag, magerr, gwdist0, gwdiststd0, gwdist1, gwdiststd1):
import numpy as np
app = mag+5*np.log10(gwdist1/gwdist0)
apperr = np.sqrt( (magerr)**2 + ((5*gwdiststd1)/(np.log(5)*gwdist1))**2 + ((5*gwdiststd0)/(np.log(5)*gwdist0))**2 )
return app, apperr
#------------------------------------------------------------
def ds9regmaker(filename, name, ra, dec):
import os,sys
import string
from astropy.io import ascii
import numpy as np
import math
'''
racol = 'ALPHA_J2000'
deccol = 'DELTA_J2000'
name = 'NUMBER'
intbl = ascii.read(filename)
'''
radius = """ 5" """
color = "green"
f = open(filename, 'w')
head1 = "# Region file format: DS9 version 4.1\n"
head2 = """global color=green dashlist=8 3 width=1 font="helvetica 10 normal roman" select=1 highlite=1 dash=0 fixed=0 edit=1 move=1 delete=1 include=1 source=1\n"""
head3 = "fk5\n"
f.write(head1)
f.write(head2)
f.write(head3)
for n in range(len(ra)):
body="circle("+str(ra[n])+","+str(dec[n])+","+radius+") # color="+color+" text={"+str(name[n])+"}\n"
f.write(body)
f.close()
#------------------------------------------------------------
def rtsmaker(observatory, headname, save_path, obspath, catpath, start, end, altlimit=30., moonseperation=40., sunlimit='-18', numlimit=100):
| import pytz
import jdcal
import ephem
#from numpy import *
import numpy as np
import os, sys
import string
import datetime
import astropy.units as u
from astropy.io import ascii
import mskpy.observing as obs
import astropy.coordinates as coord
from astropy import units as u
from astropy.coordinates import SkyCoord
#------------------------------------------------------------#
# INPUT SAMPLE
#------------------------------------------------------------#
'''
observatory = 'SAO'
save_path = './'
obspath = "/home/gw/Research/observatory.txt"
catpath = 'MS181101ab_Preliminary-all_candidates.txt'
start = '2019/04/17'
end = '2019/04/19'
#altitute limit and moon seperation, moon serperation is a little bit close (2~3 deg)
numlimit = 100
altlimit = 30.
moonseperation = 40.
sunlimit = '-18'
'''
#------------------------------------------------------------#
# OBSERVATORY INFO.
#------------------------------------------------------------#
obsinfo = ascii.read(obspath)
obsname = np.copy(obsinfo['name'])
obsindex = np.where(obsname == observatory)[0]
obslat = (np.copy(obsinfo['latitude(N+)'])[obsindex])[0]
obslon = (np.copy(obsinfo['longitude(E+)'])[obsindex])[0]
obsalt = (np.copy(obsinfo['altitude'])[obsindex])[0]
obstz = (np.copy(obsinfo['timezone'])[obsindex])[0]
tz = pytz.timezone(obstz)
#------------------------------------------------------------#
observ = ephem.Observer()
observ.lat = str(obslat)
observ.lon = str(obslon)
observ.elevation= obsalt
observ.horizon = sunlimit
#------------------------------------------------------------#
#objects from catalog file
tdata = ascii.read(catpath)
objname = tdata['name']
ra = tdata['ra']
dec = tdata['dec']
prior = tdata['sort']
rank = tdata['rank']
dist = tdata['dist']
RA = coord.Angle(ra, unit = u.deg)
Dec = coord.Angle(dec, unit = u.deg)
radd = RA.value
rad = RA.hour
decd = Dec.value
decdd = Dec.degree
#angular distance calculation
def angsep(ra1deg, dec1deg, ra2deg, dec2deg) :
ra1rad = ra1deg*np.pi/180
dec1rad = dec1deg*np.pi/180
ra2rad = ra2deg*np.pi/180
dec2rad = dec2deg*np.pi/180
cos_a = np.sin(dec1rad)*np.sin(dec2rad)+(np.cos(dec1rad)*np.cos(dec2rad)*np.cos(ra1rad-ra2rad))
anglesep = np.arccos(cos_a)*180/np.pi
return anglesep
#dates to calculate
fmt = '%Y/%m/%d'
startdt = datetime.datetime.strptime(start, fmt)
enddt = datetime.datetime.strptime(end, fmt)
startmjd = (jdcal.gcal2jd(startdt.year, startdt.month, startdt.day))[1]
endmjd = (jdcal.gcal2jd(enddt.year, enddt.month, enddt.day))[1]
for i in range(int(endmjd-startmjd+1)):
onedaymjd = startmjd+i+1
oneday = jdcal.jd2gcal(2400000.5, onedaymjd)
onedaydt = datetime.datetime(oneday[0], oneday[1], oneday[2])
dst = tz.dst(onedaydt, is_dst=True)
dst = dst.seconds/3600
onedaydt = datetime.datetime(oneday[0], oneday[1], oneday[2], tzinfo=tz)
onedayutc = onedaydt.astimezone(pytz.utc)
observ.date = onedayutc
# Moon distance and information
mcoord = ephem.Moon()
mcoord.compute(observ)
minfo = 'Moon ra, dec : '+str(mcoord.ra)+' '+str(mcoord.dec)+'\n'
mphase = ephem.Moon(observ.date)
mphasestr = 'Moon phase : '+ "%.2f" % mphase.moon_phase +'\n'
msep = angsep(radd, decdd, np.degrees(mcoord.ra), np.degrees(mcoord.dec))
# SUNSET CALC.
sunset = observ.previous_setting(ephem.Sun())
sunsettu = ephem.Date.tuple(sunset)
sunsetdt = datetime.datetime(sunsettu[0],sunsettu[1],sunsettu[2],sunsettu[3],int(sunsettu[4]),tzinfo=pytz.utc)
sunsetlocal = sunsetdt.astimezone(tz)
sunsetstr = sunlimit+' deg sunset : '+str(sunsetlocal.hour)+':'+str(sunsetlocal.minute)+'\n'
sunsethour = sunsetlocal.hour+sunsetlocal.minute/60.+sunsetlocal.second/3600.
# SUNRISE CALC.
sunrise = observ.next_rising(ephem.Sun())
sunrisetu = ephem.Date.tuple(sunrise)
sunrisedt = datetime.datetime(sunrisetu[0],sunrisetu[1],sunrisetu[2],sunrisetu[3],int(sunrisetu[4]),tzinfo=pytz.utc)
sunriselocal = sunrisedt.astimezone(tz)
sunrisestr = sunlimit+' deg sunrise : '+str(sunriselocal.hour)+':'+str(sunriselocal.minute)+'\n'
sunrisehour = sunriselocal.hour+sunriselocal.minute/60.+sunriselocal.second/3600.
#print (observatory)
#print ('Local mid night in UTC : '+str(observ.date))
#print (minfo,mphasestr,sunsetstr,sunrisestr)
# MAKE RESULT FILE
stryear = str(oneday[0])
strmonth = str(oneday[1])
strday = str(oneday[2]-1)
if int(strmonth) < 10 : strmonth = '0'+strmonth
if int(strday) < 10 : strday = '0'+strday
f = open(save_path+'/'+headname+'-'+stryear+strmonth+strday+"-rts_vis-"+observatory+".txt",'w')
f.write('#\t'+str(observ.date)+' UTC & Day Time Saving +'+str(dst)+'\n')
f.write('#\tObservatory\t= '+observatory+'\n')
f.write('#\t'+sunsetstr)
f.write('#\t'+sunrisestr)
f.write('#\t'+minfo)
f.write('#\t'+mphasestr)
f.write('#\tMoon seperation = '+str(moonseperation)+'\n')
f.write('#\tAltitude limit = '+str(altlimit)+'\n')
f.write('#\tRank : the lower rank, the higher priority\n')
f.write('#------------------------------------------------------- \n')
f.write('name ra dec rise(LT) transit(LT) set(LT) moon_dist(deg) distance(Mpc) rank\n')
numcount = 0
for n in range(len(rad)):
#calculate rise transit set time with altitute limit
param_rts = dict( ra=radd[n],
dec=decdd[n],
date=onedaydt,
lon=obslon,
lat=obslat,
tz=obstz,
limit=altlimit,
precision=1440)
rtscal = obs.rts(**param_rts)
rt = rtscal[0]
tt = rtscal[1]
st = rtscal[2]
if rtscal[0]== None:
#print (objname[n],ra[n],dec[n], rtscal[0], rtscal[1], rtscal[2],"%.2f" % msep[n])
pass
elif sunrisehour < rtscal[0] < sunsethour and sunrisehour < rtscal[2] < sunsethour and sunrisehour < rtscal[1] < sunsethour:
#print (objname[n]+' It can be seen in daytime!')
pass
elif msep[n] < moonseperation or msep[n] > 360-moonseperation:
#print (objname[n]+' too close to Moon < '+str(moonseperation)+' deg')
pass
else:
if numcount < numlimit:
c= SkyCoord(ra=ra[n]*u.degree, dec=dec[n]*u.degree, frame='icrs')
c_ra= c.ra.hms
c_dec= c.dec.dms
nra='%02d:%02d:%.3f' %(c_ra[0], abs(c_ra[1]), abs(c_ra[2]))
ndec='%02d:%02d:%.3f' %(c_dec[0], abs(c_dec[1]), abs(c_dec[2]))
rtp ="%.2d" % int(rt)+':'+"%.2d" % int((rt-int(rt))*60)
ttp ="%.2d" % int(tt)+':'+"%.2d" % int((tt-int(tt))*60)
stp ="%.2d" % int(st)+':'+"%.2d" % int((st-int(st))*60)
vis ='{:8s} {:12s} {:12s} {:5s} {:5s} {:5s} {:3s} {:4s} {:4s}'.format(objname[n],str(nra),str(ndec),rtp,ttp,stp,str(int(msep[n])),str(int(dist[n])),str(rank[n]))+'\n'
f.write(vis)
#print (objname[n],ra[n],dec[n], rtp,ttp,stp,"%.2f" % msep[n])
numcount+= 1
else:
pass
'''
if numcount < numlimit:
rtp="%.2d" % int(rt)+':'+"%.2d" % int((rt-int(rt))*60)
ttp="%.2d" % int(tt)+':'+"%.2d" % int((tt-int(tt))*60)
stp="%.2d" % int(st)+':'+"%.2d" % int((st-int(st))*60)
vis='{:24s} {:12s} {:12s} {:5s} {:5s} {:5s} {:3s} {:2s}'.format(objname[n],str(ra[n]),str(dec[n]),rtp,ttp,stp,str(int(msep[n])),str(prior[n]))+'\n'
f.write(vis)
#print (objname[n],ra[n],dec[n], rtp,ttp,stp,"%.2f" % msep[n])
numcount+= 1
else:
pass
'''
f.close() | identifier_body |
|
retina_loss.py | import torch.nn as nn
import torch
from model.retina_config import DefaultConfig
import numpy as np
def | (image_shape,stride):
'''
transfor one fmap coords to orig coords
Args
featurn [batch_size,h,w,c]
stride int
Returns
coords [n,2]
'''
h,w= image_shape
shifts_x = torch.arange(0, w * stride, stride, dtype=torch.float32)
shifts_y = torch.arange(0, h * stride, stride, dtype=torch.float32)
shift_y, shift_x = torch.meshgrid(shifts_y, shifts_x)
shift_x = torch.reshape(shift_x, [-1])
shift_y = torch.reshape(shift_y, [-1])
coords = torch.stack([shift_x, shift_y, shift_x, shift_y], -1) + stride // 2
return coords
class GenAnchors(nn.Module):
def __init__(self, config = None):
super().__init__()
if config is None:
self.config = DefaultConfig
else:
self.config = config
self.pyramid_levels = self.config.pyramid_levels
self.ratios = np.array(self.config.ratios)
self.scales = np.array(self.config.scales)
self.size = self.config.sizes
self.strides = self.config.strides
def forward(self, image):
H, W = image.size(2), image.size(3) #(ori_H, ori_W)
feature_size = [(H / stride, W / stride) for stride in self.strides]
all_anchors = []
for i in range(len(feature_size)):
anchors = self.generate_anchors(self.size[i], self.ratios, self.scales)
shift_anchors = self.shift(anchors, feature_size[i], self.strides[i]) #(H*W, A, 4)
all_anchors.append(shift_anchors)
all_anchors = torch.cat(all_anchors, dim = 0)
return all_anchors
def generate_anchors(self, base_size=16, ratios=None, scales=None):
if ratios is None:
ratios = np.array([0.5, 1, 2])
if scales is None:
scales = np.array([2 ** 0, 2 ** (1.0 / 3.0), 2 ** (2.0 / 3.0)])
num_anchors = len(ratios) * len(scales) # 9
anchors = np.zeros((num_anchors, 4))
anchors[:, 2:] = base_size * np.tile(scales, (2, len(ratios))).T
# compute areas of anchors
areas = anchors[:, 2] * anchors[:, 3] # (9,)
# fix the ratios of w, h
anchors[:, 2] = np.sqrt(areas / np.repeat(ratios, len(scales))) # (9,)
anchors[:, 3] = anchors[:, 2] * np.repeat(ratios, len(scales)) # (9,)
# transfrom from(0 ,0, w, h ) to ( x1, y1, x2, y2)
anchors[:, 0::2] -= np.tile(anchors[:, 2] * 0.5, (2, 1)).T
anchors[:, 1::2] -= np.tile(anchors[:, 3] * 0.5, (2, 1)).T
anchors = torch.from_numpy(anchors).float().cuda() if torch.cuda.is_available() else torch.from_numpy(anchors).float()
return anchors
def shift(self, anchors, image_shape, stride):
"""
anchors : Tensor(num, 4)
image_shape : (H, W)
return shift_anchor: (H*W*num,4)
"""
ori_coords = coords_fmap2orig(image_shape, stride) # (H*W, 4) 4:(x,y,x,y)
ori_coords = ori_coords.to(device=anchors.device)
shift_anchor = ori_coords[:, None, :] + anchors[None, :, :]
return shift_anchor.reshape(-1, 4)
def calc_iou(box1, box2):
"""
box1:(M,4)
box2:(N,4)
"""
lt = torch.max(box1[:,None,:2], box2[:, :2]) #(M,N,2)
rb = torch.min(box1[:,None,2:], box2[:, 2:]) #(M,N,2)
wh = torch.clamp(rb - lt , min=0.0) #(M, N, 2)
inter_area = wh[..., 0] * wh[..., 1] #(M, N)
area_box1 = (box1[:, 2] - box1[:, 0]) * (box1[:, 3] - box1[:, 1]) #(M,)
area_box2 = (box2[:, 2] - box2[:, 0]) * (box2[:, 3] - box2[:, 1]) #(N,)
iou = inter_area / (area_box1[:,None] + area_box2 - inter_area + 1e-16) #(M,N)
return iou
def focal_loss(preds, targets, alpha=0.25, gamma = 2.0):
preds = preds.sigmoid()
preds = torch.clamp(preds, min=1e-4,max = 1. - 1e-4)
if torch.cuda.is_available():
alpha_factor = torch.ones(targets.shape).cuda() * alpha
else:
alpha_factor = torch.ones(targets.shape) * alpha
alpha_factor = torch.where(torch.eq(targets, 1.), alpha_factor, (1. - alpha_factor))
focal_weights = torch.where(torch.eq(targets, 1.), 1 - preds, preds)
focal_weights = alpha_factor * torch.pow(focal_weights, gamma)
bce = - (targets * torch.log(preds) + (1. - targets) * torch.log(1. - preds))
cls_loss = focal_weights * bce
if torch.cuda.is_available():
cls_loss = torch.where(torch.ne(targets, -1.0), cls_loss, torch.zeros_like(cls_loss).cuda())
else:
cls_loss = torch.where(torch.ne(targets, -1.0), cls_loss, torch.zeros_like(cls_loss))
return cls_loss.sum()
def smooth_l1(pos_inds,anchor_infos, boxes,reg_pred):
"""
pos_inds : (num_pos,)
boxes:(sum(H*W)*A, 4)
reg_pred: (sum(H*W)*A, 4)
"""
anchor_widths, anchor_heights, anchor_ctr_x, anchor_ctr_y = anchor_infos #(sum(H*W)*A,)
if pos_inds.sum() > 0:
pos_reg_pred = reg_pred[pos_inds,:] #(num_pos, 4)
gt_widths = boxes[pos_inds][:, 2] - boxes[pos_inds][:, 0]
gt_heights = boxes[pos_inds][:, 3] - boxes[pos_inds][:, 1]
gt_ctr_x = boxes[pos_inds][:, 0] + gt_widths * 0.5
gt_ctr_y = boxes[pos_inds][:, 1] + gt_heights * 0.5
pos_anchor_widths = anchor_widths[pos_inds]
pos_anchor_heights = anchor_heights[pos_inds]
pos_anchor_ctr_x = anchor_ctr_x[pos_inds]
pos_anchor_ctr_y = anchor_ctr_y[pos_inds]
gt_widths = torch.clamp(gt_widths, min=1.0)
gt_heights = torch.clamp(gt_heights, min=1.0)
target_dx = (gt_ctr_x - pos_anchor_ctr_x) / pos_anchor_widths
target_dy = (gt_ctr_y - pos_anchor_ctr_y) / pos_anchor_heights
target_dw = torch.log(gt_widths / pos_anchor_widths)
target_dh = torch.log(gt_heights / pos_anchor_heights)
targets = torch.stack([target_dx,target_dy,target_dw,target_dh], dim=0).t() #(num_pos,4)
if torch.cuda.is_available():
targets = targets / torch.FloatTensor([0.1,0.1,0.2,0.2]).cuda()
else:
targets = targets / torch.FloatTensor([0.1,0.1,0.2,0.2])
reg_diff = torch.abs(targets - pos_reg_pred) #(num_pos,4)
reg_loss = torch.where(
torch.le(reg_diff, 1.0/9.0),
0.5 * 9.0 * torch.pow(reg_diff, 2),
reg_diff - 0.5 /9.0
)
return reg_loss.mean()
else:
if torch.cuda.is_available():
reg_loss = torch.tensor(0).float().cuda()
else:
reg_loss = torch.tensor(0).float()
return reg_loss
def giou(pos_inds,anchor_infos, boxes,reg_pred):
"""
pos_inds : (num_pos,)
boxes:(sum(H*W)*A, 4)
reg_pred: (sum(H*W)*A, 4)
"""
anchor_widths, anchor_heights, anchor_ctr_x, anchor_ctr_y = anchor_infos #(sum(H*W)*A,)
if pos_inds.sum() > 0:
pos_reg_pred = reg_pred[pos_inds,:] #(num_pos, 4)
gt_boxes = boxes[pos_inds,:] #(num_pos, 4)
pos_anchor_widths = anchor_widths[pos_inds] #(num_pos,)
pos_anchor_heights = anchor_heights[pos_inds] #(num_pos,)
pos_anchor_ctr_x = anchor_ctr_x[pos_inds] #(num_pos,)
pos_anchor_ctr_y = anchor_ctr_y[pos_inds] #(num_pos,)
dx = pos_reg_pred[:, 0] * 0.1 #(num_pos,)
dy = pos_reg_pred[:, 1] * 0.1 #(num_pos,)
dw = pos_reg_pred[:, 2] * 0.2 #(num_pos,)
dh = pos_reg_pred[:, 3] * 0.2 #(num_pos,)
pred_ctr_x = dx * pos_anchor_widths + pos_anchor_ctr_x #(num_pos,)
pred_ctr_y = dy * pos_anchor_heights + pos_anchor_ctr_y #(num_pos,)
pred_w = torch.exp(dw) * pos_anchor_widths #(num_pos,)
pred_h = torch.exp(dh) * pos_anchor_heights #(num_pos,)
pred_x1 = pred_ctr_x - pred_w * 0.5 #(num_pos,)
pred_y1 = pred_ctr_y - pred_h * 0.5 #(num_pos,)
pred_x2 = pred_ctr_x + pred_w * 0.5 #(num_pos,)
pred_y2 = pred_ctr_y + pred_h * 0.5 #(num_pos,)
preds_boxes = torch.stack([pred_x1,pred_y1,pred_x2,pred_y2], dim=0).t() #(num_pos,4)
reg_loss = compute_giou_loss(gt_boxes, preds_boxes)
else:
if torch.cuda.is_available():
reg_loss = torch.tensor(0).float().cuda()
else:
reg_loss = torch.tensor(0).float()
return reg_loss
def compute_giou_loss(boxes1, boxes2):
"""
boxes1 :(N,4) (x1,y1,x2,y2)
boxes2: (N,4) (x1,y1,x2,y2)
"""
x1y1 = torch.max(boxes1[:, :2], boxes2[:, :2])
x2y2 = torch.min(boxes1[:, 2:], boxes2[:, 2:])
wh = torch.clamp(x2y2 - x1y1, min=0.)
area_inter = wh[:, 0] * wh[:, 1]
area_b1 = (boxes1[:, 2] - boxes1[:, 0]) * (boxes1[:, 3] - boxes1[:, 1])
area_b2 = (boxes2[:, 2] - boxes2[:, 0]) * (boxes2[:, 3] - boxes2[:, 1])
union = area_b1 + area_b2 - area_inter
iou = area_inter / (union + 1e-16)
x1y1_max = torch.min(boxes1[:, :2], boxes2[:, :2])
x2y2_max = torch.max(boxes1[:, 2:], boxes2[:, 2:])
g_wh = torch.clamp(x2y2_max - x1y1_max, min=0.)
g_area = g_wh[:, 0] * g_wh[:, 1]
giou = iou - (g_area - union) / g_area.clamp(1e-10)
loss = 1. - giou
return loss.mean()
class LOSS(nn.Module):
def __init__(self,reg_mode = 'giou'):
super(LOSS, self).__init__()
self.reg_mode = reg_mode
def forward(self, inputs):
"""
cls_logits :(n, sum(H*W)*A, class_num+1)
reg_preds:(n, sum(H*W)*A, 4)
anchors:(sum(H*W)*A, 4)
boxes:(n, max_num, 4)
classes:(n, max_num)
"""
cls_logits, reg_preds, anchors, boxes, classes = inputs
anchor_widths = anchors[:, 2] - anchors[:, 0]
anchor_heights = anchors[:, 3] - anchors[:, 1]
anchor_ctr_x = anchors[:, 0] + anchor_widths * 0.5
anchor_ctr_y = anchors[:, 1] + anchor_heights * 0.5
bacth_size = cls_logits.shape[0]
class_loss = []
reg_loss = []
for i in range(bacth_size):
per_cls_logit = cls_logits[i,:,:] #(sum(H*W)*A, class_num)
per_reg_pred = reg_preds[i,:,:]
per_boxes = boxes[i,:,:]
per_classes = classes[i,:]
mask = per_boxes[:, 0] != -1
per_boxes = per_boxes[mask] #(?, 4)
per_classes = per_classes[mask] #(?,)
if per_classes.shape[0] == 0:
alpha_factor = torch.ones(per_cls_logit.shape).cuda() * 0.25 if torch.cuda.is_available() else torch.ones(per_cls_logit.shape) * 0.25
alpha_factor = 1. - alpha_factor
focal_weights = per_cls_logit
focal_weights = alpha_factor * torch.pow(focal_weights, 2.0)
bce = -(torch.log(1.0 - per_cls_logit))
cls_loss = focal_weights * bce
class_loss.append(cls_loss.sum())
reg_loss.append(torch.tensor(0).float())
continue
IoU = calc_iou(anchors, per_boxes) #(sum(H*W)*A, ?)
iou_max, max_ind = torch.max(IoU, dim=1) #(sum(H*W)*A,)
targets = torch.ones_like(per_cls_logit) * -1 #(sum(H*W)*A, class_num)
targets[iou_max < 0.4, :] = 0 #bg
pos_anchors_ind = iou_max >= 0.5 #(?,)
num_pos = torch.clamp(pos_anchors_ind.sum().float(), min=1.0)
assigned_classes = per_classes[max_ind] #(sum(H*W)*A, )
assigned_boxes = per_boxes[max_ind,:] #(sum(H*W)*A, 4)
targets[pos_anchors_ind,:] = 0
targets[pos_anchors_ind, (assigned_classes[pos_anchors_ind]).long() - 1] = 1
class_loss.append(focal_loss(per_cls_logit, targets).view(1) / num_pos)
if self.reg_mode == 'smoothl1':
reg_loss.append(smooth_l1(pos_anchors_ind, [anchor_widths,anchor_heights,anchor_ctr_x,anchor_ctr_y],
assigned_boxes,per_reg_pred))
elif self.reg_mode =='giou':
reg_loss.append(giou(pos_anchors_ind, [anchor_widths, anchor_heights, anchor_ctr_x, anchor_ctr_y],
assigned_boxes, per_reg_pred))
cls_loss = torch.stack(class_loss).mean()
reg_loss = torch.stack(reg_loss).mean()
total_loss = cls_loss + reg_loss
return cls_loss, reg_loss, total_loss
if __name__ =="__main__":
"""
cls_logits :(n, sum(H*W)*A, class_num+1)
reg_preds:(n, sum(H*W)*A, 4)
anchors:(sum(H*W)*A, 4)
boxes:(n, max_num, 4)
classes:(n, max_num)
"""
image = torch.rand((1,3,512,384))
anchor_model = GenAnchors()
anchors = anchor_model(image)
boxes = [[69,172,270,330],[150,141,229,284],[258,198,297,329]]
classes = [12,1,1]
boxes = torch.FloatTensor(boxes) #(3,4)
boxes = torch.nn.functional.pad(boxes,[0, 0, 0, 47],value=-1).unsqueeze(dim=0)
classes = torch.FloatTensor(classes) #(3,)
classes = torch.nn.functional.pad(classes,[0,47],value=-1).unsqueeze(dim=0)
annotation = torch.cat([boxes,classes.unsqueeze(dim=2)], dim=2)
#print(annotation)
# print(anchors.dtype)
# print(boxes.dtype)
cls_logits = torch.ones((1,36828,20)) * 0.5
reg_preds = torch.ones((1,36828,4))
loss = LOSS()
print(loss([cls_logits,reg_preds,anchors,boxes,classes]))
| coords_fmap2orig | identifier_name |
retina_loss.py | import torch.nn as nn
import torch
from model.retina_config import DefaultConfig
import numpy as np
def coords_fmap2orig(image_shape,stride):
'''
transfor one fmap coords to orig coords
Args
featurn [batch_size,h,w,c]
stride int
Returns
coords [n,2]
'''
h,w= image_shape
shifts_x = torch.arange(0, w * stride, stride, dtype=torch.float32)
shifts_y = torch.arange(0, h * stride, stride, dtype=torch.float32)
shift_y, shift_x = torch.meshgrid(shifts_y, shifts_x)
shift_x = torch.reshape(shift_x, [-1])
shift_y = torch.reshape(shift_y, [-1])
coords = torch.stack([shift_x, shift_y, shift_x, shift_y], -1) + stride // 2
return coords
class GenAnchors(nn.Module):
def __init__(self, config = None):
super().__init__()
if config is None:
self.config = DefaultConfig
else:
self.config = config
self.pyramid_levels = self.config.pyramid_levels
self.ratios = np.array(self.config.ratios)
self.scales = np.array(self.config.scales)
self.size = self.config.sizes
self.strides = self.config.strides
def forward(self, image):
H, W = image.size(2), image.size(3) #(ori_H, ori_W)
feature_size = [(H / stride, W / stride) for stride in self.strides]
all_anchors = []
for i in range(len(feature_size)):
anchors = self.generate_anchors(self.size[i], self.ratios, self.scales)
shift_anchors = self.shift(anchors, feature_size[i], self.strides[i]) #(H*W, A, 4)
all_anchors.append(shift_anchors)
all_anchors = torch.cat(all_anchors, dim = 0)
return all_anchors
def generate_anchors(self, base_size=16, ratios=None, scales=None):
if ratios is None:
ratios = np.array([0.5, 1, 2])
if scales is None:
scales = np.array([2 ** 0, 2 ** (1.0 / 3.0), 2 ** (2.0 / 3.0)])
num_anchors = len(ratios) * len(scales) # 9
anchors = np.zeros((num_anchors, 4))
anchors[:, 2:] = base_size * np.tile(scales, (2, len(ratios))).T
# compute areas of anchors
areas = anchors[:, 2] * anchors[:, 3] # (9,)
# fix the ratios of w, h
anchors[:, 2] = np.sqrt(areas / np.repeat(ratios, len(scales))) # (9,)
anchors[:, 3] = anchors[:, 2] * np.repeat(ratios, len(scales)) # (9,)
# transfrom from(0 ,0, w, h ) to ( x1, y1, x2, y2)
anchors[:, 0::2] -= np.tile(anchors[:, 2] * 0.5, (2, 1)).T
anchors[:, 1::2] -= np.tile(anchors[:, 3] * 0.5, (2, 1)).T
anchors = torch.from_numpy(anchors).float().cuda() if torch.cuda.is_available() else torch.from_numpy(anchors).float()
return anchors
def shift(self, anchors, image_shape, stride):
"""
anchors : Tensor(num, 4)
image_shape : (H, W)
return shift_anchor: (H*W*num,4)
"""
ori_coords = coords_fmap2orig(image_shape, stride) # (H*W, 4) 4:(x,y,x,y)
ori_coords = ori_coords.to(device=anchors.device)
shift_anchor = ori_coords[:, None, :] + anchors[None, :, :]
return shift_anchor.reshape(-1, 4)
def calc_iou(box1, box2):
"""
box1:(M,4)
box2:(N,4)
"""
lt = torch.max(box1[:,None,:2], box2[:, :2]) #(M,N,2)
rb = torch.min(box1[:,None,2:], box2[:, 2:]) #(M,N,2)
wh = torch.clamp(rb - lt , min=0.0) #(M, N, 2)
inter_area = wh[..., 0] * wh[..., 1] #(M, N)
area_box1 = (box1[:, 2] - box1[:, 0]) * (box1[:, 3] - box1[:, 1]) #(M,)
area_box2 = (box2[:, 2] - box2[:, 0]) * (box2[:, 3] - box2[:, 1]) #(N,)
iou = inter_area / (area_box1[:,None] + area_box2 - inter_area + 1e-16) #(M,N)
return iou
def focal_loss(preds, targets, alpha=0.25, gamma = 2.0):
preds = preds.sigmoid()
preds = torch.clamp(preds, min=1e-4,max = 1. - 1e-4)
if torch.cuda.is_available():
alpha_factor = torch.ones(targets.shape).cuda() * alpha
else:
alpha_factor = torch.ones(targets.shape) * alpha
alpha_factor = torch.where(torch.eq(targets, 1.), alpha_factor, (1. - alpha_factor))
focal_weights = torch.where(torch.eq(targets, 1.), 1 - preds, preds)
focal_weights = alpha_factor * torch.pow(focal_weights, gamma)
bce = - (targets * torch.log(preds) + (1. - targets) * torch.log(1. - preds))
cls_loss = focal_weights * bce
if torch.cuda.is_available():
cls_loss = torch.where(torch.ne(targets, -1.0), cls_loss, torch.zeros_like(cls_loss).cuda())
else:
cls_loss = torch.where(torch.ne(targets, -1.0), cls_loss, torch.zeros_like(cls_loss))
return cls_loss.sum()
def smooth_l1(pos_inds,anchor_infos, boxes,reg_pred):
"""
pos_inds : (num_pos,)
boxes:(sum(H*W)*A, 4)
reg_pred: (sum(H*W)*A, 4)
"""
anchor_widths, anchor_heights, anchor_ctr_x, anchor_ctr_y = anchor_infos #(sum(H*W)*A,)
if pos_inds.sum() > 0:
pos_reg_pred = reg_pred[pos_inds,:] #(num_pos, 4)
gt_widths = boxes[pos_inds][:, 2] - boxes[pos_inds][:, 0]
gt_heights = boxes[pos_inds][:, 3] - boxes[pos_inds][:, 1]
gt_ctr_x = boxes[pos_inds][:, 0] + gt_widths * 0.5
gt_ctr_y = boxes[pos_inds][:, 1] + gt_heights * 0.5
pos_anchor_widths = anchor_widths[pos_inds]
pos_anchor_heights = anchor_heights[pos_inds]
pos_anchor_ctr_x = anchor_ctr_x[pos_inds]
pos_anchor_ctr_y = anchor_ctr_y[pos_inds]
gt_widths = torch.clamp(gt_widths, min=1.0)
gt_heights = torch.clamp(gt_heights, min=1.0)
target_dx = (gt_ctr_x - pos_anchor_ctr_x) / pos_anchor_widths
target_dy = (gt_ctr_y - pos_anchor_ctr_y) / pos_anchor_heights
target_dw = torch.log(gt_widths / pos_anchor_widths)
target_dh = torch.log(gt_heights / pos_anchor_heights)
targets = torch.stack([target_dx,target_dy,target_dw,target_dh], dim=0).t() #(num_pos,4)
if torch.cuda.is_available():
targets = targets / torch.FloatTensor([0.1,0.1,0.2,0.2]).cuda()
else:
targets = targets / torch.FloatTensor([0.1,0.1,0.2,0.2])
reg_diff = torch.abs(targets - pos_reg_pred) #(num_pos,4)
reg_loss = torch.where(
torch.le(reg_diff, 1.0/9.0),
0.5 * 9.0 * torch.pow(reg_diff, 2),
reg_diff - 0.5 /9.0
)
return reg_loss.mean()
else:
if torch.cuda.is_available():
reg_loss = torch.tensor(0).float().cuda()
else:
reg_loss = torch.tensor(0).float()
return reg_loss
def giou(pos_inds,anchor_infos, boxes,reg_pred):
"""
pos_inds : (num_pos,)
boxes:(sum(H*W)*A, 4)
reg_pred: (sum(H*W)*A, 4)
"""
anchor_widths, anchor_heights, anchor_ctr_x, anchor_ctr_y = anchor_infos #(sum(H*W)*A,)
if pos_inds.sum() > 0:
pos_reg_pred = reg_pred[pos_inds,:] #(num_pos, 4)
gt_boxes = boxes[pos_inds,:] #(num_pos, 4)
pos_anchor_widths = anchor_widths[pos_inds] #(num_pos,)
pos_anchor_heights = anchor_heights[pos_inds] #(num_pos,)
pos_anchor_ctr_x = anchor_ctr_x[pos_inds] #(num_pos,)
pos_anchor_ctr_y = anchor_ctr_y[pos_inds] #(num_pos,)
dx = pos_reg_pred[:, 0] * 0.1 #(num_pos,)
dy = pos_reg_pred[:, 1] * 0.1 #(num_pos,)
dw = pos_reg_pred[:, 2] * 0.2 #(num_pos,)
dh = pos_reg_pred[:, 3] * 0.2 #(num_pos,)
pred_ctr_x = dx * pos_anchor_widths + pos_anchor_ctr_x #(num_pos,)
pred_ctr_y = dy * pos_anchor_heights + pos_anchor_ctr_y #(num_pos,)
pred_w = torch.exp(dw) * pos_anchor_widths #(num_pos,)
pred_h = torch.exp(dh) * pos_anchor_heights #(num_pos,)
pred_x1 = pred_ctr_x - pred_w * 0.5 #(num_pos,)
pred_y1 = pred_ctr_y - pred_h * 0.5 #(num_pos,)
pred_x2 = pred_ctr_x + pred_w * 0.5 #(num_pos,)
pred_y2 = pred_ctr_y + pred_h * 0.5 #(num_pos,)
preds_boxes = torch.stack([pred_x1,pred_y1,pred_x2,pred_y2], dim=0).t() #(num_pos,4)
reg_loss = compute_giou_loss(gt_boxes, preds_boxes)
else:
if torch.cuda.is_available():
reg_loss = torch.tensor(0).float().cuda()
else:
reg_loss = torch.tensor(0).float()
return reg_loss
def compute_giou_loss(boxes1, boxes2):
"""
boxes1 :(N,4) (x1,y1,x2,y2)
boxes2: (N,4) (x1,y1,x2,y2)
"""
x1y1 = torch.max(boxes1[:, :2], boxes2[:, :2])
x2y2 = torch.min(boxes1[:, 2:], boxes2[:, 2:])
wh = torch.clamp(x2y2 - x1y1, min=0.)
area_inter = wh[:, 0] * wh[:, 1]
area_b1 = (boxes1[:, 2] - boxes1[:, 0]) * (boxes1[:, 3] - boxes1[:, 1])
area_b2 = (boxes2[:, 2] - boxes2[:, 0]) * (boxes2[:, 3] - boxes2[:, 1])
union = area_b1 + area_b2 - area_inter
iou = area_inter / (union + 1e-16)
x1y1_max = torch.min(boxes1[:, :2], boxes2[:, :2])
x2y2_max = torch.max(boxes1[:, 2:], boxes2[:, 2:])
g_wh = torch.clamp(x2y2_max - x1y1_max, min=0.)
g_area = g_wh[:, 0] * g_wh[:, 1]
giou = iou - (g_area - union) / g_area.clamp(1e-10)
loss = 1. - giou
return loss.mean()
class LOSS(nn.Module):
def __init__(self,reg_mode = 'giou'):
|
def forward(self, inputs):
"""
cls_logits :(n, sum(H*W)*A, class_num+1)
reg_preds:(n, sum(H*W)*A, 4)
anchors:(sum(H*W)*A, 4)
boxes:(n, max_num, 4)
classes:(n, max_num)
"""
cls_logits, reg_preds, anchors, boxes, classes = inputs
anchor_widths = anchors[:, 2] - anchors[:, 0]
anchor_heights = anchors[:, 3] - anchors[:, 1]
anchor_ctr_x = anchors[:, 0] + anchor_widths * 0.5
anchor_ctr_y = anchors[:, 1] + anchor_heights * 0.5
bacth_size = cls_logits.shape[0]
class_loss = []
reg_loss = []
for i in range(bacth_size):
per_cls_logit = cls_logits[i,:,:] #(sum(H*W)*A, class_num)
per_reg_pred = reg_preds[i,:,:]
per_boxes = boxes[i,:,:]
per_classes = classes[i,:]
mask = per_boxes[:, 0] != -1
per_boxes = per_boxes[mask] #(?, 4)
per_classes = per_classes[mask] #(?,)
if per_classes.shape[0] == 0:
alpha_factor = torch.ones(per_cls_logit.shape).cuda() * 0.25 if torch.cuda.is_available() else torch.ones(per_cls_logit.shape) * 0.25
alpha_factor = 1. - alpha_factor
focal_weights = per_cls_logit
focal_weights = alpha_factor * torch.pow(focal_weights, 2.0)
bce = -(torch.log(1.0 - per_cls_logit))
cls_loss = focal_weights * bce
class_loss.append(cls_loss.sum())
reg_loss.append(torch.tensor(0).float())
continue
IoU = calc_iou(anchors, per_boxes) #(sum(H*W)*A, ?)
iou_max, max_ind = torch.max(IoU, dim=1) #(sum(H*W)*A,)
targets = torch.ones_like(per_cls_logit) * -1 #(sum(H*W)*A, class_num)
targets[iou_max < 0.4, :] = 0 #bg
pos_anchors_ind = iou_max >= 0.5 #(?,)
num_pos = torch.clamp(pos_anchors_ind.sum().float(), min=1.0)
assigned_classes = per_classes[max_ind] #(sum(H*W)*A, )
assigned_boxes = per_boxes[max_ind,:] #(sum(H*W)*A, 4)
targets[pos_anchors_ind,:] = 0
targets[pos_anchors_ind, (assigned_classes[pos_anchors_ind]).long() - 1] = 1
class_loss.append(focal_loss(per_cls_logit, targets).view(1) / num_pos)
if self.reg_mode == 'smoothl1':
reg_loss.append(smooth_l1(pos_anchors_ind, [anchor_widths,anchor_heights,anchor_ctr_x,anchor_ctr_y],
assigned_boxes,per_reg_pred))
elif self.reg_mode =='giou':
reg_loss.append(giou(pos_anchors_ind, [anchor_widths, anchor_heights, anchor_ctr_x, anchor_ctr_y],
assigned_boxes, per_reg_pred))
cls_loss = torch.stack(class_loss).mean()
reg_loss = torch.stack(reg_loss).mean()
total_loss = cls_loss + reg_loss
return cls_loss, reg_loss, total_loss
if __name__ =="__main__":
"""
cls_logits :(n, sum(H*W)*A, class_num+1)
reg_preds:(n, sum(H*W)*A, 4)
anchors:(sum(H*W)*A, 4)
boxes:(n, max_num, 4)
classes:(n, max_num)
"""
image = torch.rand((1,3,512,384))
anchor_model = GenAnchors()
anchors = anchor_model(image)
boxes = [[69,172,270,330],[150,141,229,284],[258,198,297,329]]
classes = [12,1,1]
boxes = torch.FloatTensor(boxes) #(3,4)
boxes = torch.nn.functional.pad(boxes,[0, 0, 0, 47],value=-1).unsqueeze(dim=0)
classes = torch.FloatTensor(classes) #(3,)
classes = torch.nn.functional.pad(classes,[0,47],value=-1).unsqueeze(dim=0)
annotation = torch.cat([boxes,classes.unsqueeze(dim=2)], dim=2)
#print(annotation)
# print(anchors.dtype)
# print(boxes.dtype)
cls_logits = torch.ones((1,36828,20)) * 0.5
reg_preds = torch.ones((1,36828,4))
loss = LOSS()
print(loss([cls_logits,reg_preds,anchors,boxes,classes]))
| super(LOSS, self).__init__()
self.reg_mode = reg_mode | identifier_body |
retina_loss.py | import torch.nn as nn
import torch
from model.retina_config import DefaultConfig
import numpy as np
def coords_fmap2orig(image_shape,stride):
'''
transfor one fmap coords to orig coords
Args
featurn [batch_size,h,w,c]
stride int
Returns
coords [n,2]
'''
h,w= image_shape
shifts_x = torch.arange(0, w * stride, stride, dtype=torch.float32)
shifts_y = torch.arange(0, h * stride, stride, dtype=torch.float32)
shift_y, shift_x = torch.meshgrid(shifts_y, shifts_x)
shift_x = torch.reshape(shift_x, [-1])
shift_y = torch.reshape(shift_y, [-1])
coords = torch.stack([shift_x, shift_y, shift_x, shift_y], -1) + stride // 2
return coords
class GenAnchors(nn.Module):
def __init__(self, config = None):
super().__init__()
if config is None:
self.config = DefaultConfig
else:
self.config = config
self.pyramid_levels = self.config.pyramid_levels
self.ratios = np.array(self.config.ratios)
self.scales = np.array(self.config.scales)
self.size = self.config.sizes
self.strides = self.config.strides
def forward(self, image):
H, W = image.size(2), image.size(3) #(ori_H, ori_W)
feature_size = [(H / stride, W / stride) for stride in self.strides]
all_anchors = []
for i in range(len(feature_size)):
anchors = self.generate_anchors(self.size[i], self.ratios, self.scales)
shift_anchors = self.shift(anchors, feature_size[i], self.strides[i]) #(H*W, A, 4)
all_anchors.append(shift_anchors)
all_anchors = torch.cat(all_anchors, dim = 0)
return all_anchors
def generate_anchors(self, base_size=16, ratios=None, scales=None):
if ratios is None:
ratios = np.array([0.5, 1, 2])
if scales is None:
scales = np.array([2 ** 0, 2 ** (1.0 / 3.0), 2 ** (2.0 / 3.0)])
num_anchors = len(ratios) * len(scales) # 9
anchors = np.zeros((num_anchors, 4))
anchors[:, 2:] = base_size * np.tile(scales, (2, len(ratios))).T
# compute areas of anchors
areas = anchors[:, 2] * anchors[:, 3] # (9,)
# fix the ratios of w, h
anchors[:, 2] = np.sqrt(areas / np.repeat(ratios, len(scales))) # (9,)
anchors[:, 3] = anchors[:, 2] * np.repeat(ratios, len(scales)) # (9,)
# transfrom from(0 ,0, w, h ) to ( x1, y1, x2, y2)
anchors[:, 0::2] -= np.tile(anchors[:, 2] * 0.5, (2, 1)).T
anchors[:, 1::2] -= np.tile(anchors[:, 3] * 0.5, (2, 1)).T
anchors = torch.from_numpy(anchors).float().cuda() if torch.cuda.is_available() else torch.from_numpy(anchors).float()
return anchors
def shift(self, anchors, image_shape, stride):
"""
anchors : Tensor(num, 4)
image_shape : (H, W)
return shift_anchor: (H*W*num,4)
"""
ori_coords = coords_fmap2orig(image_shape, stride) # (H*W, 4) 4:(x,y,x,y)
ori_coords = ori_coords.to(device=anchors.device)
shift_anchor = ori_coords[:, None, :] + anchors[None, :, :]
return shift_anchor.reshape(-1, 4)
def calc_iou(box1, box2):
"""
box1:(M,4)
box2:(N,4)
"""
lt = torch.max(box1[:,None,:2], box2[:, :2]) #(M,N,2)
rb = torch.min(box1[:,None,2:], box2[:, 2:]) #(M,N,2)
wh = torch.clamp(rb - lt , min=0.0) #(M, N, 2)
inter_area = wh[..., 0] * wh[..., 1] #(M, N)
area_box1 = (box1[:, 2] - box1[:, 0]) * (box1[:, 3] - box1[:, 1]) #(M,)
area_box2 = (box2[:, 2] - box2[:, 0]) * (box2[:, 3] - box2[:, 1]) #(N,)
iou = inter_area / (area_box1[:,None] + area_box2 - inter_area + 1e-16) #(M,N)
return iou
def focal_loss(preds, targets, alpha=0.25, gamma = 2.0):
preds = preds.sigmoid()
preds = torch.clamp(preds, min=1e-4,max = 1. - 1e-4)
if torch.cuda.is_available():
alpha_factor = torch.ones(targets.shape).cuda() * alpha
else:
alpha_factor = torch.ones(targets.shape) * alpha
alpha_factor = torch.where(torch.eq(targets, 1.), alpha_factor, (1. - alpha_factor))
focal_weights = torch.where(torch.eq(targets, 1.), 1 - preds, preds)
focal_weights = alpha_factor * torch.pow(focal_weights, gamma)
bce = - (targets * torch.log(preds) + (1. - targets) * torch.log(1. - preds))
cls_loss = focal_weights * bce
if torch.cuda.is_available():
cls_loss = torch.where(torch.ne(targets, -1.0), cls_loss, torch.zeros_like(cls_loss).cuda())
else:
cls_loss = torch.where(torch.ne(targets, -1.0), cls_loss, torch.zeros_like(cls_loss))
return cls_loss.sum()
def smooth_l1(pos_inds,anchor_infos, boxes,reg_pred):
"""
pos_inds : (num_pos,)
boxes:(sum(H*W)*A, 4)
reg_pred: (sum(H*W)*A, 4)
"""
anchor_widths, anchor_heights, anchor_ctr_x, anchor_ctr_y = anchor_infos #(sum(H*W)*A,)
if pos_inds.sum() > 0:
pos_reg_pred = reg_pred[pos_inds,:] #(num_pos, 4)
gt_widths = boxes[pos_inds][:, 2] - boxes[pos_inds][:, 0]
gt_heights = boxes[pos_inds][:, 3] - boxes[pos_inds][:, 1]
gt_ctr_x = boxes[pos_inds][:, 0] + gt_widths * 0.5
gt_ctr_y = boxes[pos_inds][:, 1] + gt_heights * 0.5
pos_anchor_widths = anchor_widths[pos_inds]
pos_anchor_heights = anchor_heights[pos_inds]
pos_anchor_ctr_x = anchor_ctr_x[pos_inds]
pos_anchor_ctr_y = anchor_ctr_y[pos_inds]
gt_widths = torch.clamp(gt_widths, min=1.0)
gt_heights = torch.clamp(gt_heights, min=1.0)
target_dx = (gt_ctr_x - pos_anchor_ctr_x) / pos_anchor_widths
target_dy = (gt_ctr_y - pos_anchor_ctr_y) / pos_anchor_heights
target_dw = torch.log(gt_widths / pos_anchor_widths)
target_dh = torch.log(gt_heights / pos_anchor_heights)
targets = torch.stack([target_dx,target_dy,target_dw,target_dh], dim=0).t() #(num_pos,4)
if torch.cuda.is_available():
targets = targets / torch.FloatTensor([0.1,0.1,0.2,0.2]).cuda()
else:
targets = targets / torch.FloatTensor([0.1,0.1,0.2,0.2])
reg_diff = torch.abs(targets - pos_reg_pred) #(num_pos,4)
reg_loss = torch.where(
torch.le(reg_diff, 1.0/9.0),
0.5 * 9.0 * torch.pow(reg_diff, 2),
reg_diff - 0.5 /9.0
)
return reg_loss.mean()
else:
if torch.cuda.is_available():
reg_loss = torch.tensor(0).float().cuda()
else:
reg_loss = torch.tensor(0).float()
return reg_loss
def giou(pos_inds,anchor_infos, boxes,reg_pred):
"""
pos_inds : (num_pos,)
boxes:(sum(H*W)*A, 4)
reg_pred: (sum(H*W)*A, 4)
"""
anchor_widths, anchor_heights, anchor_ctr_x, anchor_ctr_y = anchor_infos #(sum(H*W)*A,)
if pos_inds.sum() > 0:
pos_reg_pred = reg_pred[pos_inds,:] #(num_pos, 4)
gt_boxes = boxes[pos_inds,:] #(num_pos, 4)
pos_anchor_widths = anchor_widths[pos_inds] #(num_pos,)
pos_anchor_heights = anchor_heights[pos_inds] #(num_pos,)
pos_anchor_ctr_x = anchor_ctr_x[pos_inds] #(num_pos,)
pos_anchor_ctr_y = anchor_ctr_y[pos_inds] #(num_pos,)
dx = pos_reg_pred[:, 0] * 0.1 #(num_pos,)
dy = pos_reg_pred[:, 1] * 0.1 #(num_pos,)
dw = pos_reg_pred[:, 2] * 0.2 #(num_pos,)
dh = pos_reg_pred[:, 3] * 0.2 #(num_pos,)
pred_ctr_x = dx * pos_anchor_widths + pos_anchor_ctr_x #(num_pos,)
pred_ctr_y = dy * pos_anchor_heights + pos_anchor_ctr_y #(num_pos,)
pred_w = torch.exp(dw) * pos_anchor_widths #(num_pos,)
pred_h = torch.exp(dh) * pos_anchor_heights #(num_pos,)
pred_x1 = pred_ctr_x - pred_w * 0.5 #(num_pos,)
pred_y1 = pred_ctr_y - pred_h * 0.5 #(num_pos,)
pred_x2 = pred_ctr_x + pred_w * 0.5 #(num_pos,)
pred_y2 = pred_ctr_y + pred_h * 0.5 #(num_pos,) | reg_loss = torch.tensor(0).float().cuda()
else:
reg_loss = torch.tensor(0).float()
return reg_loss
def compute_giou_loss(boxes1, boxes2):
"""
boxes1 :(N,4) (x1,y1,x2,y2)
boxes2: (N,4) (x1,y1,x2,y2)
"""
x1y1 = torch.max(boxes1[:, :2], boxes2[:, :2])
x2y2 = torch.min(boxes1[:, 2:], boxes2[:, 2:])
wh = torch.clamp(x2y2 - x1y1, min=0.)
area_inter = wh[:, 0] * wh[:, 1]
area_b1 = (boxes1[:, 2] - boxes1[:, 0]) * (boxes1[:, 3] - boxes1[:, 1])
area_b2 = (boxes2[:, 2] - boxes2[:, 0]) * (boxes2[:, 3] - boxes2[:, 1])
union = area_b1 + area_b2 - area_inter
iou = area_inter / (union + 1e-16)
x1y1_max = torch.min(boxes1[:, :2], boxes2[:, :2])
x2y2_max = torch.max(boxes1[:, 2:], boxes2[:, 2:])
g_wh = torch.clamp(x2y2_max - x1y1_max, min=0.)
g_area = g_wh[:, 0] * g_wh[:, 1]
giou = iou - (g_area - union) / g_area.clamp(1e-10)
loss = 1. - giou
return loss.mean()
class LOSS(nn.Module):
def __init__(self,reg_mode = 'giou'):
super(LOSS, self).__init__()
self.reg_mode = reg_mode
def forward(self, inputs):
"""
cls_logits :(n, sum(H*W)*A, class_num+1)
reg_preds:(n, sum(H*W)*A, 4)
anchors:(sum(H*W)*A, 4)
boxes:(n, max_num, 4)
classes:(n, max_num)
"""
cls_logits, reg_preds, anchors, boxes, classes = inputs
anchor_widths = anchors[:, 2] - anchors[:, 0]
anchor_heights = anchors[:, 3] - anchors[:, 1]
anchor_ctr_x = anchors[:, 0] + anchor_widths * 0.5
anchor_ctr_y = anchors[:, 1] + anchor_heights * 0.5
bacth_size = cls_logits.shape[0]
class_loss = []
reg_loss = []
for i in range(bacth_size):
per_cls_logit = cls_logits[i,:,:] #(sum(H*W)*A, class_num)
per_reg_pred = reg_preds[i,:,:]
per_boxes = boxes[i,:,:]
per_classes = classes[i,:]
mask = per_boxes[:, 0] != -1
per_boxes = per_boxes[mask] #(?, 4)
per_classes = per_classes[mask] #(?,)
if per_classes.shape[0] == 0:
alpha_factor = torch.ones(per_cls_logit.shape).cuda() * 0.25 if torch.cuda.is_available() else torch.ones(per_cls_logit.shape) * 0.25
alpha_factor = 1. - alpha_factor
focal_weights = per_cls_logit
focal_weights = alpha_factor * torch.pow(focal_weights, 2.0)
bce = -(torch.log(1.0 - per_cls_logit))
cls_loss = focal_weights * bce
class_loss.append(cls_loss.sum())
reg_loss.append(torch.tensor(0).float())
continue
IoU = calc_iou(anchors, per_boxes) #(sum(H*W)*A, ?)
iou_max, max_ind = torch.max(IoU, dim=1) #(sum(H*W)*A,)
targets = torch.ones_like(per_cls_logit) * -1 #(sum(H*W)*A, class_num)
targets[iou_max < 0.4, :] = 0 #bg
pos_anchors_ind = iou_max >= 0.5 #(?,)
num_pos = torch.clamp(pos_anchors_ind.sum().float(), min=1.0)
assigned_classes = per_classes[max_ind] #(sum(H*W)*A, )
assigned_boxes = per_boxes[max_ind,:] #(sum(H*W)*A, 4)
targets[pos_anchors_ind,:] = 0
targets[pos_anchors_ind, (assigned_classes[pos_anchors_ind]).long() - 1] = 1
class_loss.append(focal_loss(per_cls_logit, targets).view(1) / num_pos)
if self.reg_mode == 'smoothl1':
reg_loss.append(smooth_l1(pos_anchors_ind, [anchor_widths,anchor_heights,anchor_ctr_x,anchor_ctr_y],
assigned_boxes,per_reg_pred))
elif self.reg_mode =='giou':
reg_loss.append(giou(pos_anchors_ind, [anchor_widths, anchor_heights, anchor_ctr_x, anchor_ctr_y],
assigned_boxes, per_reg_pred))
cls_loss = torch.stack(class_loss).mean()
reg_loss = torch.stack(reg_loss).mean()
total_loss = cls_loss + reg_loss
return cls_loss, reg_loss, total_loss
if __name__ =="__main__":
"""
cls_logits :(n, sum(H*W)*A, class_num+1)
reg_preds:(n, sum(H*W)*A, 4)
anchors:(sum(H*W)*A, 4)
boxes:(n, max_num, 4)
classes:(n, max_num)
"""
image = torch.rand((1,3,512,384))
anchor_model = GenAnchors()
anchors = anchor_model(image)
boxes = [[69,172,270,330],[150,141,229,284],[258,198,297,329]]
classes = [12,1,1]
boxes = torch.FloatTensor(boxes) #(3,4)
boxes = torch.nn.functional.pad(boxes,[0, 0, 0, 47],value=-1).unsqueeze(dim=0)
classes = torch.FloatTensor(classes) #(3,)
classes = torch.nn.functional.pad(classes,[0,47],value=-1).unsqueeze(dim=0)
annotation = torch.cat([boxes,classes.unsqueeze(dim=2)], dim=2)
#print(annotation)
# print(anchors.dtype)
# print(boxes.dtype)
cls_logits = torch.ones((1,36828,20)) * 0.5
reg_preds = torch.ones((1,36828,4))
loss = LOSS()
print(loss([cls_logits,reg_preds,anchors,boxes,classes])) |
preds_boxes = torch.stack([pred_x1,pred_y1,pred_x2,pred_y2], dim=0).t() #(num_pos,4)
reg_loss = compute_giou_loss(gt_boxes, preds_boxes)
else:
if torch.cuda.is_available(): | random_line_split |
retina_loss.py | import torch.nn as nn
import torch
from model.retina_config import DefaultConfig
import numpy as np
def coords_fmap2orig(image_shape,stride):
'''
transfor one fmap coords to orig coords
Args
featurn [batch_size,h,w,c]
stride int
Returns
coords [n,2]
'''
h,w= image_shape
shifts_x = torch.arange(0, w * stride, stride, dtype=torch.float32)
shifts_y = torch.arange(0, h * stride, stride, dtype=torch.float32)
shift_y, shift_x = torch.meshgrid(shifts_y, shifts_x)
shift_x = torch.reshape(shift_x, [-1])
shift_y = torch.reshape(shift_y, [-1])
coords = torch.stack([shift_x, shift_y, shift_x, shift_y], -1) + stride // 2
return coords
class GenAnchors(nn.Module):
def __init__(self, config = None):
super().__init__()
if config is None:
self.config = DefaultConfig
else:
self.config = config
self.pyramid_levels = self.config.pyramid_levels
self.ratios = np.array(self.config.ratios)
self.scales = np.array(self.config.scales)
self.size = self.config.sizes
self.strides = self.config.strides
def forward(self, image):
H, W = image.size(2), image.size(3) #(ori_H, ori_W)
feature_size = [(H / stride, W / stride) for stride in self.strides]
all_anchors = []
for i in range(len(feature_size)):
anchors = self.generate_anchors(self.size[i], self.ratios, self.scales)
shift_anchors = self.shift(anchors, feature_size[i], self.strides[i]) #(H*W, A, 4)
all_anchors.append(shift_anchors)
all_anchors = torch.cat(all_anchors, dim = 0)
return all_anchors
def generate_anchors(self, base_size=16, ratios=None, scales=None):
if ratios is None:
ratios = np.array([0.5, 1, 2])
if scales is None:
scales = np.array([2 ** 0, 2 ** (1.0 / 3.0), 2 ** (2.0 / 3.0)])
num_anchors = len(ratios) * len(scales) # 9
anchors = np.zeros((num_anchors, 4))
anchors[:, 2:] = base_size * np.tile(scales, (2, len(ratios))).T
# compute areas of anchors
areas = anchors[:, 2] * anchors[:, 3] # (9,)
# fix the ratios of w, h
anchors[:, 2] = np.sqrt(areas / np.repeat(ratios, len(scales))) # (9,)
anchors[:, 3] = anchors[:, 2] * np.repeat(ratios, len(scales)) # (9,)
# transfrom from(0 ,0, w, h ) to ( x1, y1, x2, y2)
anchors[:, 0::2] -= np.tile(anchors[:, 2] * 0.5, (2, 1)).T
anchors[:, 1::2] -= np.tile(anchors[:, 3] * 0.5, (2, 1)).T
anchors = torch.from_numpy(anchors).float().cuda() if torch.cuda.is_available() else torch.from_numpy(anchors).float()
return anchors
def shift(self, anchors, image_shape, stride):
"""
anchors : Tensor(num, 4)
image_shape : (H, W)
return shift_anchor: (H*W*num,4)
"""
ori_coords = coords_fmap2orig(image_shape, stride) # (H*W, 4) 4:(x,y,x,y)
ori_coords = ori_coords.to(device=anchors.device)
shift_anchor = ori_coords[:, None, :] + anchors[None, :, :]
return shift_anchor.reshape(-1, 4)
def calc_iou(box1, box2):
"""
box1:(M,4)
box2:(N,4)
"""
lt = torch.max(box1[:,None,:2], box2[:, :2]) #(M,N,2)
rb = torch.min(box1[:,None,2:], box2[:, 2:]) #(M,N,2)
wh = torch.clamp(rb - lt , min=0.0) #(M, N, 2)
inter_area = wh[..., 0] * wh[..., 1] #(M, N)
area_box1 = (box1[:, 2] - box1[:, 0]) * (box1[:, 3] - box1[:, 1]) #(M,)
area_box2 = (box2[:, 2] - box2[:, 0]) * (box2[:, 3] - box2[:, 1]) #(N,)
iou = inter_area / (area_box1[:,None] + area_box2 - inter_area + 1e-16) #(M,N)
return iou
def focal_loss(preds, targets, alpha=0.25, gamma = 2.0):
preds = preds.sigmoid()
preds = torch.clamp(preds, min=1e-4,max = 1. - 1e-4)
if torch.cuda.is_available():
alpha_factor = torch.ones(targets.shape).cuda() * alpha
else:
alpha_factor = torch.ones(targets.shape) * alpha
alpha_factor = torch.where(torch.eq(targets, 1.), alpha_factor, (1. - alpha_factor))
focal_weights = torch.where(torch.eq(targets, 1.), 1 - preds, preds)
focal_weights = alpha_factor * torch.pow(focal_weights, gamma)
bce = - (targets * torch.log(preds) + (1. - targets) * torch.log(1. - preds))
cls_loss = focal_weights * bce
if torch.cuda.is_available():
cls_loss = torch.where(torch.ne(targets, -1.0), cls_loss, torch.zeros_like(cls_loss).cuda())
else:
cls_loss = torch.where(torch.ne(targets, -1.0), cls_loss, torch.zeros_like(cls_loss))
return cls_loss.sum()
def smooth_l1(pos_inds,anchor_infos, boxes,reg_pred):
"""
pos_inds : (num_pos,)
boxes:(sum(H*W)*A, 4)
reg_pred: (sum(H*W)*A, 4)
"""
anchor_widths, anchor_heights, anchor_ctr_x, anchor_ctr_y = anchor_infos #(sum(H*W)*A,)
if pos_inds.sum() > 0:
pos_reg_pred = reg_pred[pos_inds,:] #(num_pos, 4)
gt_widths = boxes[pos_inds][:, 2] - boxes[pos_inds][:, 0]
gt_heights = boxes[pos_inds][:, 3] - boxes[pos_inds][:, 1]
gt_ctr_x = boxes[pos_inds][:, 0] + gt_widths * 0.5
gt_ctr_y = boxes[pos_inds][:, 1] + gt_heights * 0.5
pos_anchor_widths = anchor_widths[pos_inds]
pos_anchor_heights = anchor_heights[pos_inds]
pos_anchor_ctr_x = anchor_ctr_x[pos_inds]
pos_anchor_ctr_y = anchor_ctr_y[pos_inds]
gt_widths = torch.clamp(gt_widths, min=1.0)
gt_heights = torch.clamp(gt_heights, min=1.0)
target_dx = (gt_ctr_x - pos_anchor_ctr_x) / pos_anchor_widths
target_dy = (gt_ctr_y - pos_anchor_ctr_y) / pos_anchor_heights
target_dw = torch.log(gt_widths / pos_anchor_widths)
target_dh = torch.log(gt_heights / pos_anchor_heights)
targets = torch.stack([target_dx,target_dy,target_dw,target_dh], dim=0).t() #(num_pos,4)
if torch.cuda.is_available():
targets = targets / torch.FloatTensor([0.1,0.1,0.2,0.2]).cuda()
else:
targets = targets / torch.FloatTensor([0.1,0.1,0.2,0.2])
reg_diff = torch.abs(targets - pos_reg_pred) #(num_pos,4)
reg_loss = torch.where(
torch.le(reg_diff, 1.0/9.0),
0.5 * 9.0 * torch.pow(reg_diff, 2),
reg_diff - 0.5 /9.0
)
return reg_loss.mean()
else:
if torch.cuda.is_available():
reg_loss = torch.tensor(0).float().cuda()
else:
reg_loss = torch.tensor(0).float()
return reg_loss
def giou(pos_inds,anchor_infos, boxes,reg_pred):
"""
pos_inds : (num_pos,)
boxes:(sum(H*W)*A, 4)
reg_pred: (sum(H*W)*A, 4)
"""
anchor_widths, anchor_heights, anchor_ctr_x, anchor_ctr_y = anchor_infos #(sum(H*W)*A,)
if pos_inds.sum() > 0:
pos_reg_pred = reg_pred[pos_inds,:] #(num_pos, 4)
gt_boxes = boxes[pos_inds,:] #(num_pos, 4)
pos_anchor_widths = anchor_widths[pos_inds] #(num_pos,)
pos_anchor_heights = anchor_heights[pos_inds] #(num_pos,)
pos_anchor_ctr_x = anchor_ctr_x[pos_inds] #(num_pos,)
pos_anchor_ctr_y = anchor_ctr_y[pos_inds] #(num_pos,)
dx = pos_reg_pred[:, 0] * 0.1 #(num_pos,)
dy = pos_reg_pred[:, 1] * 0.1 #(num_pos,)
dw = pos_reg_pred[:, 2] * 0.2 #(num_pos,)
dh = pos_reg_pred[:, 3] * 0.2 #(num_pos,)
pred_ctr_x = dx * pos_anchor_widths + pos_anchor_ctr_x #(num_pos,)
pred_ctr_y = dy * pos_anchor_heights + pos_anchor_ctr_y #(num_pos,)
pred_w = torch.exp(dw) * pos_anchor_widths #(num_pos,)
pred_h = torch.exp(dh) * pos_anchor_heights #(num_pos,)
pred_x1 = pred_ctr_x - pred_w * 0.5 #(num_pos,)
pred_y1 = pred_ctr_y - pred_h * 0.5 #(num_pos,)
pred_x2 = pred_ctr_x + pred_w * 0.5 #(num_pos,)
pred_y2 = pred_ctr_y + pred_h * 0.5 #(num_pos,)
preds_boxes = torch.stack([pred_x1,pred_y1,pred_x2,pred_y2], dim=0).t() #(num_pos,4)
reg_loss = compute_giou_loss(gt_boxes, preds_boxes)
else:
if torch.cuda.is_available():
|
else:
reg_loss = torch.tensor(0).float()
return reg_loss
def compute_giou_loss(boxes1, boxes2):
"""
boxes1 :(N,4) (x1,y1,x2,y2)
boxes2: (N,4) (x1,y1,x2,y2)
"""
x1y1 = torch.max(boxes1[:, :2], boxes2[:, :2])
x2y2 = torch.min(boxes1[:, 2:], boxes2[:, 2:])
wh = torch.clamp(x2y2 - x1y1, min=0.)
area_inter = wh[:, 0] * wh[:, 1]
area_b1 = (boxes1[:, 2] - boxes1[:, 0]) * (boxes1[:, 3] - boxes1[:, 1])
area_b2 = (boxes2[:, 2] - boxes2[:, 0]) * (boxes2[:, 3] - boxes2[:, 1])
union = area_b1 + area_b2 - area_inter
iou = area_inter / (union + 1e-16)
x1y1_max = torch.min(boxes1[:, :2], boxes2[:, :2])
x2y2_max = torch.max(boxes1[:, 2:], boxes2[:, 2:])
g_wh = torch.clamp(x2y2_max - x1y1_max, min=0.)
g_area = g_wh[:, 0] * g_wh[:, 1]
giou = iou - (g_area - union) / g_area.clamp(1e-10)
loss = 1. - giou
return loss.mean()
class LOSS(nn.Module):
def __init__(self,reg_mode = 'giou'):
super(LOSS, self).__init__()
self.reg_mode = reg_mode
def forward(self, inputs):
"""
cls_logits :(n, sum(H*W)*A, class_num+1)
reg_preds:(n, sum(H*W)*A, 4)
anchors:(sum(H*W)*A, 4)
boxes:(n, max_num, 4)
classes:(n, max_num)
"""
cls_logits, reg_preds, anchors, boxes, classes = inputs
anchor_widths = anchors[:, 2] - anchors[:, 0]
anchor_heights = anchors[:, 3] - anchors[:, 1]
anchor_ctr_x = anchors[:, 0] + anchor_widths * 0.5
anchor_ctr_y = anchors[:, 1] + anchor_heights * 0.5
bacth_size = cls_logits.shape[0]
class_loss = []
reg_loss = []
for i in range(bacth_size):
per_cls_logit = cls_logits[i,:,:] #(sum(H*W)*A, class_num)
per_reg_pred = reg_preds[i,:,:]
per_boxes = boxes[i,:,:]
per_classes = classes[i,:]
mask = per_boxes[:, 0] != -1
per_boxes = per_boxes[mask] #(?, 4)
per_classes = per_classes[mask] #(?,)
if per_classes.shape[0] == 0:
alpha_factor = torch.ones(per_cls_logit.shape).cuda() * 0.25 if torch.cuda.is_available() else torch.ones(per_cls_logit.shape) * 0.25
alpha_factor = 1. - alpha_factor
focal_weights = per_cls_logit
focal_weights = alpha_factor * torch.pow(focal_weights, 2.0)
bce = -(torch.log(1.0 - per_cls_logit))
cls_loss = focal_weights * bce
class_loss.append(cls_loss.sum())
reg_loss.append(torch.tensor(0).float())
continue
IoU = calc_iou(anchors, per_boxes) #(sum(H*W)*A, ?)
iou_max, max_ind = torch.max(IoU, dim=1) #(sum(H*W)*A,)
targets = torch.ones_like(per_cls_logit) * -1 #(sum(H*W)*A, class_num)
targets[iou_max < 0.4, :] = 0 #bg
pos_anchors_ind = iou_max >= 0.5 #(?,)
num_pos = torch.clamp(pos_anchors_ind.sum().float(), min=1.0)
assigned_classes = per_classes[max_ind] #(sum(H*W)*A, )
assigned_boxes = per_boxes[max_ind,:] #(sum(H*W)*A, 4)
targets[pos_anchors_ind,:] = 0
targets[pos_anchors_ind, (assigned_classes[pos_anchors_ind]).long() - 1] = 1
class_loss.append(focal_loss(per_cls_logit, targets).view(1) / num_pos)
if self.reg_mode == 'smoothl1':
reg_loss.append(smooth_l1(pos_anchors_ind, [anchor_widths,anchor_heights,anchor_ctr_x,anchor_ctr_y],
assigned_boxes,per_reg_pred))
elif self.reg_mode =='giou':
reg_loss.append(giou(pos_anchors_ind, [anchor_widths, anchor_heights, anchor_ctr_x, anchor_ctr_y],
assigned_boxes, per_reg_pred))
cls_loss = torch.stack(class_loss).mean()
reg_loss = torch.stack(reg_loss).mean()
total_loss = cls_loss + reg_loss
return cls_loss, reg_loss, total_loss
if __name__ =="__main__":
"""
cls_logits :(n, sum(H*W)*A, class_num+1)
reg_preds:(n, sum(H*W)*A, 4)
anchors:(sum(H*W)*A, 4)
boxes:(n, max_num, 4)
classes:(n, max_num)
"""
image = torch.rand((1,3,512,384))
anchor_model = GenAnchors()
anchors = anchor_model(image)
boxes = [[69,172,270,330],[150,141,229,284],[258,198,297,329]]
classes = [12,1,1]
boxes = torch.FloatTensor(boxes) #(3,4)
boxes = torch.nn.functional.pad(boxes,[0, 0, 0, 47],value=-1).unsqueeze(dim=0)
classes = torch.FloatTensor(classes) #(3,)
classes = torch.nn.functional.pad(classes,[0,47],value=-1).unsqueeze(dim=0)
annotation = torch.cat([boxes,classes.unsqueeze(dim=2)], dim=2)
#print(annotation)
# print(anchors.dtype)
# print(boxes.dtype)
cls_logits = torch.ones((1,36828,20)) * 0.5
reg_preds = torch.ones((1,36828,4))
loss = LOSS()
print(loss([cls_logits,reg_preds,anchors,boxes,classes]))
| reg_loss = torch.tensor(0).float().cuda() | conditional_block |
preprocess_lidc1.py | import numpy as np
import scipy.ndimage
import matplotlib.pyplot as plt
import pylidc as pl
import warnings
import pickle
from tqdm import tqdm
import glob
import random
import threading
warnings.filterwarnings("ignore")
from joblib import Parallel, delayed
NEW_SPACING= (1.4,1.4,2.)
PROCESSED_DIR = '/home/ronens1/lidc/processed/'
PIXEL_RANGE = 65535
CROP_SHAPE = (32,32,32)
random.seed(1)
NODULE_THRESHOLD = 0.5
class threadsafe_iter:
"""Takes an iterator/generator and makes it thread-safe by
serializing call to the `next` method of given iterator/generator.
"""
def __init__(self, it):
self.it = it
self.lock = threading.Lock()
def __iter__(self):
return self
def next(self):
with self.lock:
return self.it.next()
def threadsafe_generator(f):
"""A decorator that takes a generator function and makes it thread-safe.
"""
def g(*a, **kw):
return threadsafe_iter(f(*a, **kw))
return g
def plot(vol,x,y,z):
corner1 = np.array([x,y,z])-np.array(CROP_SHAPE)/2
corner2 = corner1+np.array(CROP_SHAPE)
plt.subplot(311)
plt.imshow(vol[x,corner1[1]:corner2[1],corner1[2]:corner2[2]],cmap=plt.cm.gray)
plt.subplot(312)
plt.imshow(vol[corner1[0]:corner2[0],y,corner1[2]:corner2[2]],cmap=plt.cm.gray)
plt.subplot(313)
plt.imshow(vol[corner1[0]:corner2[0],corner1[1]:corner2[1],z],cmap=plt.cm.gray)
plt.show()
def process_scan(scan):
uid = scan.series_instance_uid
volume,spacing,orientation,z0 = scan.to_volume()
volume = volume.transpose([1,0,2])
if orientation[0]<0:
volume=volume[::-1,::-1,:]
resize_factor = np.array(spacing)/np.array(NEW_SPACING)
resampled = scipy.ndimage.interpolation.zoom(volume,resize_factor)
resampled = normalize(resampled)
shape = resampled.shape
clusters = scan.annotations_with_matching_overlap()
clusters_data=[] | cluster_group=[]
for ann in cluster:
diameter = ann.estimate_diameter()
features = ann.feature_vals()
c = ann.centroid()
c[:2]=c[:2]*np.array(spacing[:2])
c[2] = c[2]-z0
c = c/np.array(NEW_SPACING)
b = ann.bbox()
b[:2,:] = b[:2,:]*np.expand_dims(np.array(spacing[:2]),axis=1)
b[2,:] = b[2,:]-z0
b = b / np.expand_dims(np.array(NEW_SPACING),axis=1)
if orientation[0]<0:
c[:2] = np.array(resampled.shape)[:2] - c[:2]
b[:2,:] = np.expand_dims(np.array(resampled.shape)[:2],axis=1)-b[:2,:]
#plot(resampled,int(c[0]),int(c[1]),int(c[2]))
annotation= {'diameter': diameter,'features':features, 'centroid':c,'bbox':b}
cluster_group.append(annotation)
if c[2]<0 or b[2,0]<0 or b[2,1]<0:
print "Error",uid,orientation,c,b,ann.centroid(),ann.bbox()
clusters_data.append(cluster_group)
np.save(PROCESSED_DIR+uid+'.npy',resampled)
with open(PROCESSED_DIR+uid+'annotation.txt', 'w') as outfile:
pickle.dump(clusters_data, outfile)
def normalize(image):
MIN_BOUND = -1000.0
MAX_BOUND = 400.0
image = PIXEL_RANGE*(image - MIN_BOUND) / (MAX_BOUND - MIN_BOUND)
image[image>PIXEL_RANGE] = PIXEL_RANGE
image[image<0] = 0.
image = np.round(image).astype(np.uint16)
return image
def load_lidc():
filenames = glob.glob(PROCESSED_DIR+'*.npy')
data = []
annotations = []
for name in tqdm(filenames):
data.append(np.load(name))
annotation_file_name = '.'.join(name.split('.')[:-1])+'annotation.txt'
with open(annotation_file_name,'r') as pickle_file:
annotations.append(pickle.load(pickle_file))
perm = range(len(annotations))
random.shuffle(perm)
data = [data[i] for i in perm]
annotations= [annotations[i] for i in perm]
data=np.asarray(data)
return data,annotations
def soft_focus(pos,centroid):
Focus_radius = 30 # mm
dist = np.linalg.norm( (np.array(pos)-np.array(centroid))*np.array(NEW_SPACING))/Focus_radius
return max(1-dist,0)
def shift_radius(shift):
r = 100
while r > shift:
v = np.random.uniform(-shift,high=shift,size=(3,))
r = np.linalg.norm(v)
vox_shift = (v/np.array(NEW_SPACING)).astype(int)
return vox_shift
def find_nodule(annotation,min_agreement):
good_clusters = [cluster for cluster in annotation if len(cluster)>=min_agreement]
marks = [mark for cluster in good_clusters for mark in cluster]
mark = marks[random.randint(0,len(marks)-1)]
centroid = np.array(mark['centroid']).astype(int)
shift = 12.0 # mm , shold be within soft noudle detection threshold
pos = centroid + shift_radius(shift)
#print "diameter",mark['diameter']
"""
feature_names = \
('subtlety',
'internalStructure',
'calcification',
'sphericity',
'margin',
'lobulation',
'spiculation',
'texture',
'malignancy')
"""
soft = soft_focus(pos,centroid)
if (soft < NODULE_THRESHOLD) :
print 'Error: nodule shifted too much'
malig = mark['features'][8]
diameter = mark['diameter']
return pos,np.array([soft,malig/5.0,diameter])
def plot_patch(image):
c = np.array(image.shape)/2
plt.subplot(311)
plt.imshow(np.squeeze(image[c[0],:,:]),cmap=plt.cm.gray)
plt.subplot(312)
plt.imshow(np.squeeze(image[:,c[1],:]),cmap=plt.cm.gray)
plt.subplot(313)
plt.imshow(np.squeeze(image[:,:,c[2]]),cmap=plt.cm.gray)
plt.show()
def crop(image,position):
corner1 = np.array(position)-np.array(CROP_SHAPE)/2
corner1 = np.maximum(corner1, np.array([0,0,0]))
corner2 = corner1+np.array(CROP_SHAPE)
corner2 = np.minimum(corner2,np.array(image.shape))
corner1 = corner2-np.array(CROP_SHAPE)
patch = image[corner1[0]:corner2[0],corner1[1]:corner2[1],corner1[2]:corner2[2]]
return patch
def bbox_in_patch(bbox,pos):
corner1 = np.array(pos) - np.array(CROP_SHAPE)/2
corner2 = corner1 + np.array(CROP_SHAPE)/2
if np.all(bbox[:,0] > corner1) and np.all(bbox[:,1] < corner2):
nodule = True
else:
nodule = False
return nodule
def check_centroid(centroid,pos):
check = False
diff = np.abs(np.array(centroid)-np.array(pos))
if np.all(diff < np.array(CROP_SHAPE)/4):
#print "check_centroid",diff, CROP_SHAPE
check = True
return check
def find_label(pos,annotation,min_agreement):
nodule = 0
malig = 0
biggest_diameter = 0
c = 0
for cluster in annotation:
if len(cluster) >= min_agreement:
# choose randomly one mark from each cluster
mark = cluster[random.randint(0,len(cluster)-1)]
#bbox = mark['bbox']
#if bbox_in_patch(bbox,pos):
centroid = mark['centroid']
soft = soft_focus(centroid,pos)
if soft > NODULE_THRESHOLD:
diameter = mark['diameter']
if diameter > biggest_diameter:
biggest_diameter = diameter
malig = mark['features'][8]
nodule = soft
c = np.array(centroid).astype(int)
#if nodule:
#print "find_label",biggest_diameter,pos,c
return np.array([nodule,malig/5.0,biggest_diameter]),c
def augment(patch):
if random.random() < 0.5:
patch = patch[::-1,:,:]
if random.random() < 0.5:
patch = patch[:,::-1,:]
if random.random() < 0.5:
patch = patch[:,:,::-1]
perm = [0,1]
random.shuffle(perm)
patch = np.transpose(patch,perm+[2])
return patch
def check_agreement(annotation,minimum):
n = 0
if len(annotation)>0:
n = [ len(x) for x in annotation]
ok = np.max(n) >= minimum
else:
ok = False
#print "check agreement",minimum,np.max(n),ok
return ok
@threadsafe_generator
def generate_lidc(data,annotations):
neg_fraction = 0.5
total = 1.
neg = 0.
min_agreement = 3
PLOT = False
skip = 0
while True:
for i in range(len(annotations)):
random_sample = False
annotation = annotations[i]
if neg/total > neg_fraction:
if check_agreement(annotation,min_agreement):
# get positive sample
pos,label = find_nodule(annotation,min_agreement)
image = data[i]
patch = crop(image,pos)
else:
skip += 1
#print total,skip
continue # continue to find another image with some nodule
else:
# get random sample
random_sample = True
margin = 30
image = data[i]
x = random.randint(margin,image.shape[0] - margin)
y = random.randint(margin,image.shape[1] - margin)
z = random.randint(margin,image.shape[2] - margin)
pos = (x,y,z)
label,centroid = find_label(pos,annotation,min_agreement)
#if label[0]==1:
# pos = centroid
patch = crop(image,pos)
if label[0] == 0:
neg += 1
total += 1
# random augmentation
patch = patch.astype(np.float32)/PIXEL_RANGE
patch = augment(patch)
if PLOT:
print i,label, patch.shape,np.max(patch)
plot_patch(patch)
patch = np.expand_dims(patch,axis=4)
yield patch,label
@threadsafe_generator
def generate_lidc_batch(data,annotations,batch_size=1):
seq=iter(generate_lidc(data,annotations))
while True:
inputs=[]
targets1=[]
targets2=[]
targets3=[]
for _ in range(batch_size):
x, y = seq.next()
inputs.append(x)
targets1.append(y[0])
targets2.append(y[1])
targets3.append(y[2])
inputs = np.asarray(inputs)
targets1 = np.asarray(targets1)
targets2 = np.asarray(targets2)
targets3 = np.asarray(targets3)/10. # mm -> cm
result = ({'inputs':inputs},{'nodule':targets1,'malig':targets2,'diameter': targets3})
yield result
if __name__ == "__main__":
qu = pl.query(pl.Scan)
print qu.count(),'scans'
for scan in tqdm(qu):
process_scan(scan) | for cluster in clusters: | random_line_split |
preprocess_lidc1.py | import numpy as np
import scipy.ndimage
import matplotlib.pyplot as plt
import pylidc as pl
import warnings
import pickle
from tqdm import tqdm
import glob
import random
import threading
warnings.filterwarnings("ignore")
from joblib import Parallel, delayed
NEW_SPACING= (1.4,1.4,2.)
PROCESSED_DIR = '/home/ronens1/lidc/processed/'
PIXEL_RANGE = 65535
CROP_SHAPE = (32,32,32)
random.seed(1)
NODULE_THRESHOLD = 0.5
class threadsafe_iter:
"""Takes an iterator/generator and makes it thread-safe by
serializing call to the `next` method of given iterator/generator.
"""
def __init__(self, it):
self.it = it
self.lock = threading.Lock()
def __iter__(self):
return self
def next(self):
with self.lock:
return self.it.next()
def threadsafe_generator(f):
"""A decorator that takes a generator function and makes it thread-safe.
"""
def g(*a, **kw):
return threadsafe_iter(f(*a, **kw))
return g
def plot(vol,x,y,z):
corner1 = np.array([x,y,z])-np.array(CROP_SHAPE)/2
corner2 = corner1+np.array(CROP_SHAPE)
plt.subplot(311)
plt.imshow(vol[x,corner1[1]:corner2[1],corner1[2]:corner2[2]],cmap=plt.cm.gray)
plt.subplot(312)
plt.imshow(vol[corner1[0]:corner2[0],y,corner1[2]:corner2[2]],cmap=plt.cm.gray)
plt.subplot(313)
plt.imshow(vol[corner1[0]:corner2[0],corner1[1]:corner2[1],z],cmap=plt.cm.gray)
plt.show()
def process_scan(scan):
uid = scan.series_instance_uid
volume,spacing,orientation,z0 = scan.to_volume()
volume = volume.transpose([1,0,2])
if orientation[0]<0:
volume=volume[::-1,::-1,:]
resize_factor = np.array(spacing)/np.array(NEW_SPACING)
resampled = scipy.ndimage.interpolation.zoom(volume,resize_factor)
resampled = normalize(resampled)
shape = resampled.shape
clusters = scan.annotations_with_matching_overlap()
clusters_data=[]
for cluster in clusters:
cluster_group=[]
for ann in cluster:
diameter = ann.estimate_diameter()
features = ann.feature_vals()
c = ann.centroid()
c[:2]=c[:2]*np.array(spacing[:2])
c[2] = c[2]-z0
c = c/np.array(NEW_SPACING)
b = ann.bbox()
b[:2,:] = b[:2,:]*np.expand_dims(np.array(spacing[:2]),axis=1)
b[2,:] = b[2,:]-z0
b = b / np.expand_dims(np.array(NEW_SPACING),axis=1)
if orientation[0]<0:
c[:2] = np.array(resampled.shape)[:2] - c[:2]
b[:2,:] = np.expand_dims(np.array(resampled.shape)[:2],axis=1)-b[:2,:]
#plot(resampled,int(c[0]),int(c[1]),int(c[2]))
annotation= {'diameter': diameter,'features':features, 'centroid':c,'bbox':b}
cluster_group.append(annotation)
if c[2]<0 or b[2,0]<0 or b[2,1]<0:
print "Error",uid,orientation,c,b,ann.centroid(),ann.bbox()
clusters_data.append(cluster_group)
np.save(PROCESSED_DIR+uid+'.npy',resampled)
with open(PROCESSED_DIR+uid+'annotation.txt', 'w') as outfile:
pickle.dump(clusters_data, outfile)
def normalize(image):
MIN_BOUND = -1000.0
MAX_BOUND = 400.0
image = PIXEL_RANGE*(image - MIN_BOUND) / (MAX_BOUND - MIN_BOUND)
image[image>PIXEL_RANGE] = PIXEL_RANGE
image[image<0] = 0.
image = np.round(image).astype(np.uint16)
return image
def load_lidc():
filenames = glob.glob(PROCESSED_DIR+'*.npy')
data = []
annotations = []
for name in tqdm(filenames):
data.append(np.load(name))
annotation_file_name = '.'.join(name.split('.')[:-1])+'annotation.txt'
with open(annotation_file_name,'r') as pickle_file:
annotations.append(pickle.load(pickle_file))
perm = range(len(annotations))
random.shuffle(perm)
data = [data[i] for i in perm]
annotations= [annotations[i] for i in perm]
data=np.asarray(data)
return data,annotations
def soft_focus(pos,centroid):
|
def shift_radius(shift):
r = 100
while r > shift:
v = np.random.uniform(-shift,high=shift,size=(3,))
r = np.linalg.norm(v)
vox_shift = (v/np.array(NEW_SPACING)).astype(int)
return vox_shift
def find_nodule(annotation,min_agreement):
good_clusters = [cluster for cluster in annotation if len(cluster)>=min_agreement]
marks = [mark for cluster in good_clusters for mark in cluster]
mark = marks[random.randint(0,len(marks)-1)]
centroid = np.array(mark['centroid']).astype(int)
shift = 12.0 # mm , shold be within soft noudle detection threshold
pos = centroid + shift_radius(shift)
#print "diameter",mark['diameter']
"""
feature_names = \
('subtlety',
'internalStructure',
'calcification',
'sphericity',
'margin',
'lobulation',
'spiculation',
'texture',
'malignancy')
"""
soft = soft_focus(pos,centroid)
if (soft < NODULE_THRESHOLD) :
print 'Error: nodule shifted too much'
malig = mark['features'][8]
diameter = mark['diameter']
return pos,np.array([soft,malig/5.0,diameter])
def plot_patch(image):
c = np.array(image.shape)/2
plt.subplot(311)
plt.imshow(np.squeeze(image[c[0],:,:]),cmap=plt.cm.gray)
plt.subplot(312)
plt.imshow(np.squeeze(image[:,c[1],:]),cmap=plt.cm.gray)
plt.subplot(313)
plt.imshow(np.squeeze(image[:,:,c[2]]),cmap=plt.cm.gray)
plt.show()
def crop(image,position):
corner1 = np.array(position)-np.array(CROP_SHAPE)/2
corner1 = np.maximum(corner1, np.array([0,0,0]))
corner2 = corner1+np.array(CROP_SHAPE)
corner2 = np.minimum(corner2,np.array(image.shape))
corner1 = corner2-np.array(CROP_SHAPE)
patch = image[corner1[0]:corner2[0],corner1[1]:corner2[1],corner1[2]:corner2[2]]
return patch
def bbox_in_patch(bbox,pos):
corner1 = np.array(pos) - np.array(CROP_SHAPE)/2
corner2 = corner1 + np.array(CROP_SHAPE)/2
if np.all(bbox[:,0] > corner1) and np.all(bbox[:,1] < corner2):
nodule = True
else:
nodule = False
return nodule
def check_centroid(centroid,pos):
check = False
diff = np.abs(np.array(centroid)-np.array(pos))
if np.all(diff < np.array(CROP_SHAPE)/4):
#print "check_centroid",diff, CROP_SHAPE
check = True
return check
def find_label(pos,annotation,min_agreement):
nodule = 0
malig = 0
biggest_diameter = 0
c = 0
for cluster in annotation:
if len(cluster) >= min_agreement:
# choose randomly one mark from each cluster
mark = cluster[random.randint(0,len(cluster)-1)]
#bbox = mark['bbox']
#if bbox_in_patch(bbox,pos):
centroid = mark['centroid']
soft = soft_focus(centroid,pos)
if soft > NODULE_THRESHOLD:
diameter = mark['diameter']
if diameter > biggest_diameter:
biggest_diameter = diameter
malig = mark['features'][8]
nodule = soft
c = np.array(centroid).astype(int)
#if nodule:
#print "find_label",biggest_diameter,pos,c
return np.array([nodule,malig/5.0,biggest_diameter]),c
def augment(patch):
if random.random() < 0.5:
patch = patch[::-1,:,:]
if random.random() < 0.5:
patch = patch[:,::-1,:]
if random.random() < 0.5:
patch = patch[:,:,::-1]
perm = [0,1]
random.shuffle(perm)
patch = np.transpose(patch,perm+[2])
return patch
def check_agreement(annotation,minimum):
n = 0
if len(annotation)>0:
n = [ len(x) for x in annotation]
ok = np.max(n) >= minimum
else:
ok = False
#print "check agreement",minimum,np.max(n),ok
return ok
@threadsafe_generator
def generate_lidc(data,annotations):
neg_fraction = 0.5
total = 1.
neg = 0.
min_agreement = 3
PLOT = False
skip = 0
while True:
for i in range(len(annotations)):
random_sample = False
annotation = annotations[i]
if neg/total > neg_fraction:
if check_agreement(annotation,min_agreement):
# get positive sample
pos,label = find_nodule(annotation,min_agreement)
image = data[i]
patch = crop(image,pos)
else:
skip += 1
#print total,skip
continue # continue to find another image with some nodule
else:
# get random sample
random_sample = True
margin = 30
image = data[i]
x = random.randint(margin,image.shape[0] - margin)
y = random.randint(margin,image.shape[1] - margin)
z = random.randint(margin,image.shape[2] - margin)
pos = (x,y,z)
label,centroid = find_label(pos,annotation,min_agreement)
#if label[0]==1:
# pos = centroid
patch = crop(image,pos)
if label[0] == 0:
neg += 1
total += 1
# random augmentation
patch = patch.astype(np.float32)/PIXEL_RANGE
patch = augment(patch)
if PLOT:
print i,label, patch.shape,np.max(patch)
plot_patch(patch)
patch = np.expand_dims(patch,axis=4)
yield patch,label
@threadsafe_generator
def generate_lidc_batch(data,annotations,batch_size=1):
seq=iter(generate_lidc(data,annotations))
while True:
inputs=[]
targets1=[]
targets2=[]
targets3=[]
for _ in range(batch_size):
x, y = seq.next()
inputs.append(x)
targets1.append(y[0])
targets2.append(y[1])
targets3.append(y[2])
inputs = np.asarray(inputs)
targets1 = np.asarray(targets1)
targets2 = np.asarray(targets2)
targets3 = np.asarray(targets3)/10. # mm -> cm
result = ({'inputs':inputs},{'nodule':targets1,'malig':targets2,'diameter': targets3})
yield result
if __name__ == "__main__":
qu = pl.query(pl.Scan)
print qu.count(),'scans'
for scan in tqdm(qu):
process_scan(scan)
| Focus_radius = 30 # mm
dist = np.linalg.norm( (np.array(pos)-np.array(centroid))*np.array(NEW_SPACING))/Focus_radius
return max(1-dist,0) | identifier_body |
preprocess_lidc1.py | import numpy as np
import scipy.ndimage
import matplotlib.pyplot as plt
import pylidc as pl
import warnings
import pickle
from tqdm import tqdm
import glob
import random
import threading
warnings.filterwarnings("ignore")
from joblib import Parallel, delayed
NEW_SPACING= (1.4,1.4,2.)
PROCESSED_DIR = '/home/ronens1/lidc/processed/'
PIXEL_RANGE = 65535
CROP_SHAPE = (32,32,32)
random.seed(1)
NODULE_THRESHOLD = 0.5
class threadsafe_iter:
"""Takes an iterator/generator and makes it thread-safe by
serializing call to the `next` method of given iterator/generator.
"""
def __init__(self, it):
self.it = it
self.lock = threading.Lock()
def __iter__(self):
return self
def next(self):
with self.lock:
return self.it.next()
def threadsafe_generator(f):
"""A decorator that takes a generator function and makes it thread-safe.
"""
def g(*a, **kw):
return threadsafe_iter(f(*a, **kw))
return g
def plot(vol,x,y,z):
corner1 = np.array([x,y,z])-np.array(CROP_SHAPE)/2
corner2 = corner1+np.array(CROP_SHAPE)
plt.subplot(311)
plt.imshow(vol[x,corner1[1]:corner2[1],corner1[2]:corner2[2]],cmap=plt.cm.gray)
plt.subplot(312)
plt.imshow(vol[corner1[0]:corner2[0],y,corner1[2]:corner2[2]],cmap=plt.cm.gray)
plt.subplot(313)
plt.imshow(vol[corner1[0]:corner2[0],corner1[1]:corner2[1],z],cmap=plt.cm.gray)
plt.show()
def process_scan(scan):
uid = scan.series_instance_uid
volume,spacing,orientation,z0 = scan.to_volume()
volume = volume.transpose([1,0,2])
if orientation[0]<0:
volume=volume[::-1,::-1,:]
resize_factor = np.array(spacing)/np.array(NEW_SPACING)
resampled = scipy.ndimage.interpolation.zoom(volume,resize_factor)
resampled = normalize(resampled)
shape = resampled.shape
clusters = scan.annotations_with_matching_overlap()
clusters_data=[]
for cluster in clusters:
cluster_group=[]
for ann in cluster:
diameter = ann.estimate_diameter()
features = ann.feature_vals()
c = ann.centroid()
c[:2]=c[:2]*np.array(spacing[:2])
c[2] = c[2]-z0
c = c/np.array(NEW_SPACING)
b = ann.bbox()
b[:2,:] = b[:2,:]*np.expand_dims(np.array(spacing[:2]),axis=1)
b[2,:] = b[2,:]-z0
b = b / np.expand_dims(np.array(NEW_SPACING),axis=1)
if orientation[0]<0:
c[:2] = np.array(resampled.shape)[:2] - c[:2]
b[:2,:] = np.expand_dims(np.array(resampled.shape)[:2],axis=1)-b[:2,:]
#plot(resampled,int(c[0]),int(c[1]),int(c[2]))
annotation= {'diameter': diameter,'features':features, 'centroid':c,'bbox':b}
cluster_group.append(annotation)
if c[2]<0 or b[2,0]<0 or b[2,1]<0:
print "Error",uid,orientation,c,b,ann.centroid(),ann.bbox()
clusters_data.append(cluster_group)
np.save(PROCESSED_DIR+uid+'.npy',resampled)
with open(PROCESSED_DIR+uid+'annotation.txt', 'w') as outfile:
pickle.dump(clusters_data, outfile)
def normalize(image):
MIN_BOUND = -1000.0
MAX_BOUND = 400.0
image = PIXEL_RANGE*(image - MIN_BOUND) / (MAX_BOUND - MIN_BOUND)
image[image>PIXEL_RANGE] = PIXEL_RANGE
image[image<0] = 0.
image = np.round(image).astype(np.uint16)
return image
def load_lidc():
filenames = glob.glob(PROCESSED_DIR+'*.npy')
data = []
annotations = []
for name in tqdm(filenames):
|
perm = range(len(annotations))
random.shuffle(perm)
data = [data[i] for i in perm]
annotations= [annotations[i] for i in perm]
data=np.asarray(data)
return data,annotations
def soft_focus(pos,centroid):
Focus_radius = 30 # mm
dist = np.linalg.norm( (np.array(pos)-np.array(centroid))*np.array(NEW_SPACING))/Focus_radius
return max(1-dist,0)
def shift_radius(shift):
r = 100
while r > shift:
v = np.random.uniform(-shift,high=shift,size=(3,))
r = np.linalg.norm(v)
vox_shift = (v/np.array(NEW_SPACING)).astype(int)
return vox_shift
def find_nodule(annotation,min_agreement):
good_clusters = [cluster for cluster in annotation if len(cluster)>=min_agreement]
marks = [mark for cluster in good_clusters for mark in cluster]
mark = marks[random.randint(0,len(marks)-1)]
centroid = np.array(mark['centroid']).astype(int)
shift = 12.0 # mm , shold be within soft noudle detection threshold
pos = centroid + shift_radius(shift)
#print "diameter",mark['diameter']
"""
feature_names = \
('subtlety',
'internalStructure',
'calcification',
'sphericity',
'margin',
'lobulation',
'spiculation',
'texture',
'malignancy')
"""
soft = soft_focus(pos,centroid)
if (soft < NODULE_THRESHOLD) :
print 'Error: nodule shifted too much'
malig = mark['features'][8]
diameter = mark['diameter']
return pos,np.array([soft,malig/5.0,diameter])
def plot_patch(image):
c = np.array(image.shape)/2
plt.subplot(311)
plt.imshow(np.squeeze(image[c[0],:,:]),cmap=plt.cm.gray)
plt.subplot(312)
plt.imshow(np.squeeze(image[:,c[1],:]),cmap=plt.cm.gray)
plt.subplot(313)
plt.imshow(np.squeeze(image[:,:,c[2]]),cmap=plt.cm.gray)
plt.show()
def crop(image,position):
corner1 = np.array(position)-np.array(CROP_SHAPE)/2
corner1 = np.maximum(corner1, np.array([0,0,0]))
corner2 = corner1+np.array(CROP_SHAPE)
corner2 = np.minimum(corner2,np.array(image.shape))
corner1 = corner2-np.array(CROP_SHAPE)
patch = image[corner1[0]:corner2[0],corner1[1]:corner2[1],corner1[2]:corner2[2]]
return patch
def bbox_in_patch(bbox,pos):
corner1 = np.array(pos) - np.array(CROP_SHAPE)/2
corner2 = corner1 + np.array(CROP_SHAPE)/2
if np.all(bbox[:,0] > corner1) and np.all(bbox[:,1] < corner2):
nodule = True
else:
nodule = False
return nodule
def check_centroid(centroid,pos):
check = False
diff = np.abs(np.array(centroid)-np.array(pos))
if np.all(diff < np.array(CROP_SHAPE)/4):
#print "check_centroid",diff, CROP_SHAPE
check = True
return check
def find_label(pos,annotation,min_agreement):
nodule = 0
malig = 0
biggest_diameter = 0
c = 0
for cluster in annotation:
if len(cluster) >= min_agreement:
# choose randomly one mark from each cluster
mark = cluster[random.randint(0,len(cluster)-1)]
#bbox = mark['bbox']
#if bbox_in_patch(bbox,pos):
centroid = mark['centroid']
soft = soft_focus(centroid,pos)
if soft > NODULE_THRESHOLD:
diameter = mark['diameter']
if diameter > biggest_diameter:
biggest_diameter = diameter
malig = mark['features'][8]
nodule = soft
c = np.array(centroid).astype(int)
#if nodule:
#print "find_label",biggest_diameter,pos,c
return np.array([nodule,malig/5.0,biggest_diameter]),c
def augment(patch):
if random.random() < 0.5:
patch = patch[::-1,:,:]
if random.random() < 0.5:
patch = patch[:,::-1,:]
if random.random() < 0.5:
patch = patch[:,:,::-1]
perm = [0,1]
random.shuffle(perm)
patch = np.transpose(patch,perm+[2])
return patch
def check_agreement(annotation,minimum):
n = 0
if len(annotation)>0:
n = [ len(x) for x in annotation]
ok = np.max(n) >= minimum
else:
ok = False
#print "check agreement",minimum,np.max(n),ok
return ok
@threadsafe_generator
def generate_lidc(data,annotations):
neg_fraction = 0.5
total = 1.
neg = 0.
min_agreement = 3
PLOT = False
skip = 0
while True:
for i in range(len(annotations)):
random_sample = False
annotation = annotations[i]
if neg/total > neg_fraction:
if check_agreement(annotation,min_agreement):
# get positive sample
pos,label = find_nodule(annotation,min_agreement)
image = data[i]
patch = crop(image,pos)
else:
skip += 1
#print total,skip
continue # continue to find another image with some nodule
else:
# get random sample
random_sample = True
margin = 30
image = data[i]
x = random.randint(margin,image.shape[0] - margin)
y = random.randint(margin,image.shape[1] - margin)
z = random.randint(margin,image.shape[2] - margin)
pos = (x,y,z)
label,centroid = find_label(pos,annotation,min_agreement)
#if label[0]==1:
# pos = centroid
patch = crop(image,pos)
if label[0] == 0:
neg += 1
total += 1
# random augmentation
patch = patch.astype(np.float32)/PIXEL_RANGE
patch = augment(patch)
if PLOT:
print i,label, patch.shape,np.max(patch)
plot_patch(patch)
patch = np.expand_dims(patch,axis=4)
yield patch,label
@threadsafe_generator
def generate_lidc_batch(data,annotations,batch_size=1):
seq=iter(generate_lidc(data,annotations))
while True:
inputs=[]
targets1=[]
targets2=[]
targets3=[]
for _ in range(batch_size):
x, y = seq.next()
inputs.append(x)
targets1.append(y[0])
targets2.append(y[1])
targets3.append(y[2])
inputs = np.asarray(inputs)
targets1 = np.asarray(targets1)
targets2 = np.asarray(targets2)
targets3 = np.asarray(targets3)/10. # mm -> cm
result = ({'inputs':inputs},{'nodule':targets1,'malig':targets2,'diameter': targets3})
yield result
if __name__ == "__main__":
qu = pl.query(pl.Scan)
print qu.count(),'scans'
for scan in tqdm(qu):
process_scan(scan)
| data.append(np.load(name))
annotation_file_name = '.'.join(name.split('.')[:-1])+'annotation.txt'
with open(annotation_file_name,'r') as pickle_file:
annotations.append(pickle.load(pickle_file)) | conditional_block |
preprocess_lidc1.py | import numpy as np
import scipy.ndimage
import matplotlib.pyplot as plt
import pylidc as pl
import warnings
import pickle
from tqdm import tqdm
import glob
import random
import threading
warnings.filterwarnings("ignore")
from joblib import Parallel, delayed
NEW_SPACING= (1.4,1.4,2.)
PROCESSED_DIR = '/home/ronens1/lidc/processed/'
PIXEL_RANGE = 65535
CROP_SHAPE = (32,32,32)
random.seed(1)
NODULE_THRESHOLD = 0.5
class threadsafe_iter:
"""Takes an iterator/generator and makes it thread-safe by
serializing call to the `next` method of given iterator/generator.
"""
def __init__(self, it):
self.it = it
self.lock = threading.Lock()
def __iter__(self):
return self
def next(self):
with self.lock:
return self.it.next()
def threadsafe_generator(f):
"""A decorator that takes a generator function and makes it thread-safe.
"""
def g(*a, **kw):
return threadsafe_iter(f(*a, **kw))
return g
def plot(vol,x,y,z):
corner1 = np.array([x,y,z])-np.array(CROP_SHAPE)/2
corner2 = corner1+np.array(CROP_SHAPE)
plt.subplot(311)
plt.imshow(vol[x,corner1[1]:corner2[1],corner1[2]:corner2[2]],cmap=plt.cm.gray)
plt.subplot(312)
plt.imshow(vol[corner1[0]:corner2[0],y,corner1[2]:corner2[2]],cmap=plt.cm.gray)
plt.subplot(313)
plt.imshow(vol[corner1[0]:corner2[0],corner1[1]:corner2[1],z],cmap=plt.cm.gray)
plt.show()
def process_scan(scan):
uid = scan.series_instance_uid
volume,spacing,orientation,z0 = scan.to_volume()
volume = volume.transpose([1,0,2])
if orientation[0]<0:
volume=volume[::-1,::-1,:]
resize_factor = np.array(spacing)/np.array(NEW_SPACING)
resampled = scipy.ndimage.interpolation.zoom(volume,resize_factor)
resampled = normalize(resampled)
shape = resampled.shape
clusters = scan.annotations_with_matching_overlap()
clusters_data=[]
for cluster in clusters:
cluster_group=[]
for ann in cluster:
diameter = ann.estimate_diameter()
features = ann.feature_vals()
c = ann.centroid()
c[:2]=c[:2]*np.array(spacing[:2])
c[2] = c[2]-z0
c = c/np.array(NEW_SPACING)
b = ann.bbox()
b[:2,:] = b[:2,:]*np.expand_dims(np.array(spacing[:2]),axis=1)
b[2,:] = b[2,:]-z0
b = b / np.expand_dims(np.array(NEW_SPACING),axis=1)
if orientation[0]<0:
c[:2] = np.array(resampled.shape)[:2] - c[:2]
b[:2,:] = np.expand_dims(np.array(resampled.shape)[:2],axis=1)-b[:2,:]
#plot(resampled,int(c[0]),int(c[1]),int(c[2]))
annotation= {'diameter': diameter,'features':features, 'centroid':c,'bbox':b}
cluster_group.append(annotation)
if c[2]<0 or b[2,0]<0 or b[2,1]<0:
print "Error",uid,orientation,c,b,ann.centroid(),ann.bbox()
clusters_data.append(cluster_group)
np.save(PROCESSED_DIR+uid+'.npy',resampled)
with open(PROCESSED_DIR+uid+'annotation.txt', 'w') as outfile:
pickle.dump(clusters_data, outfile)
def normalize(image):
MIN_BOUND = -1000.0
MAX_BOUND = 400.0
image = PIXEL_RANGE*(image - MIN_BOUND) / (MAX_BOUND - MIN_BOUND)
image[image>PIXEL_RANGE] = PIXEL_RANGE
image[image<0] = 0.
image = np.round(image).astype(np.uint16)
return image
def load_lidc():
filenames = glob.glob(PROCESSED_DIR+'*.npy')
data = []
annotations = []
for name in tqdm(filenames):
data.append(np.load(name))
annotation_file_name = '.'.join(name.split('.')[:-1])+'annotation.txt'
with open(annotation_file_name,'r') as pickle_file:
annotations.append(pickle.load(pickle_file))
perm = range(len(annotations))
random.shuffle(perm)
data = [data[i] for i in perm]
annotations= [annotations[i] for i in perm]
data=np.asarray(data)
return data,annotations
def soft_focus(pos,centroid):
Focus_radius = 30 # mm
dist = np.linalg.norm( (np.array(pos)-np.array(centroid))*np.array(NEW_SPACING))/Focus_radius
return max(1-dist,0)
def | (shift):
r = 100
while r > shift:
v = np.random.uniform(-shift,high=shift,size=(3,))
r = np.linalg.norm(v)
vox_shift = (v/np.array(NEW_SPACING)).astype(int)
return vox_shift
def find_nodule(annotation,min_agreement):
good_clusters = [cluster for cluster in annotation if len(cluster)>=min_agreement]
marks = [mark for cluster in good_clusters for mark in cluster]
mark = marks[random.randint(0,len(marks)-1)]
centroid = np.array(mark['centroid']).astype(int)
shift = 12.0 # mm , shold be within soft noudle detection threshold
pos = centroid + shift_radius(shift)
#print "diameter",mark['diameter']
"""
feature_names = \
('subtlety',
'internalStructure',
'calcification',
'sphericity',
'margin',
'lobulation',
'spiculation',
'texture',
'malignancy')
"""
soft = soft_focus(pos,centroid)
if (soft < NODULE_THRESHOLD) :
print 'Error: nodule shifted too much'
malig = mark['features'][8]
diameter = mark['diameter']
return pos,np.array([soft,malig/5.0,diameter])
def plot_patch(image):
c = np.array(image.shape)/2
plt.subplot(311)
plt.imshow(np.squeeze(image[c[0],:,:]),cmap=plt.cm.gray)
plt.subplot(312)
plt.imshow(np.squeeze(image[:,c[1],:]),cmap=plt.cm.gray)
plt.subplot(313)
plt.imshow(np.squeeze(image[:,:,c[2]]),cmap=plt.cm.gray)
plt.show()
def crop(image,position):
corner1 = np.array(position)-np.array(CROP_SHAPE)/2
corner1 = np.maximum(corner1, np.array([0,0,0]))
corner2 = corner1+np.array(CROP_SHAPE)
corner2 = np.minimum(corner2,np.array(image.shape))
corner1 = corner2-np.array(CROP_SHAPE)
patch = image[corner1[0]:corner2[0],corner1[1]:corner2[1],corner1[2]:corner2[2]]
return patch
def bbox_in_patch(bbox,pos):
corner1 = np.array(pos) - np.array(CROP_SHAPE)/2
corner2 = corner1 + np.array(CROP_SHAPE)/2
if np.all(bbox[:,0] > corner1) and np.all(bbox[:,1] < corner2):
nodule = True
else:
nodule = False
return nodule
def check_centroid(centroid,pos):
check = False
diff = np.abs(np.array(centroid)-np.array(pos))
if np.all(diff < np.array(CROP_SHAPE)/4):
#print "check_centroid",diff, CROP_SHAPE
check = True
return check
def find_label(pos,annotation,min_agreement):
nodule = 0
malig = 0
biggest_diameter = 0
c = 0
for cluster in annotation:
if len(cluster) >= min_agreement:
# choose randomly one mark from each cluster
mark = cluster[random.randint(0,len(cluster)-1)]
#bbox = mark['bbox']
#if bbox_in_patch(bbox,pos):
centroid = mark['centroid']
soft = soft_focus(centroid,pos)
if soft > NODULE_THRESHOLD:
diameter = mark['diameter']
if diameter > biggest_diameter:
biggest_diameter = diameter
malig = mark['features'][8]
nodule = soft
c = np.array(centroid).astype(int)
#if nodule:
#print "find_label",biggest_diameter,pos,c
return np.array([nodule,malig/5.0,biggest_diameter]),c
def augment(patch):
if random.random() < 0.5:
patch = patch[::-1,:,:]
if random.random() < 0.5:
patch = patch[:,::-1,:]
if random.random() < 0.5:
patch = patch[:,:,::-1]
perm = [0,1]
random.shuffle(perm)
patch = np.transpose(patch,perm+[2])
return patch
def check_agreement(annotation,minimum):
n = 0
if len(annotation)>0:
n = [ len(x) for x in annotation]
ok = np.max(n) >= minimum
else:
ok = False
#print "check agreement",minimum,np.max(n),ok
return ok
@threadsafe_generator
def generate_lidc(data,annotations):
neg_fraction = 0.5
total = 1.
neg = 0.
min_agreement = 3
PLOT = False
skip = 0
while True:
for i in range(len(annotations)):
random_sample = False
annotation = annotations[i]
if neg/total > neg_fraction:
if check_agreement(annotation,min_agreement):
# get positive sample
pos,label = find_nodule(annotation,min_agreement)
image = data[i]
patch = crop(image,pos)
else:
skip += 1
#print total,skip
continue # continue to find another image with some nodule
else:
# get random sample
random_sample = True
margin = 30
image = data[i]
x = random.randint(margin,image.shape[0] - margin)
y = random.randint(margin,image.shape[1] - margin)
z = random.randint(margin,image.shape[2] - margin)
pos = (x,y,z)
label,centroid = find_label(pos,annotation,min_agreement)
#if label[0]==1:
# pos = centroid
patch = crop(image,pos)
if label[0] == 0:
neg += 1
total += 1
# random augmentation
patch = patch.astype(np.float32)/PIXEL_RANGE
patch = augment(patch)
if PLOT:
print i,label, patch.shape,np.max(patch)
plot_patch(patch)
patch = np.expand_dims(patch,axis=4)
yield patch,label
@threadsafe_generator
def generate_lidc_batch(data,annotations,batch_size=1):
seq=iter(generate_lidc(data,annotations))
while True:
inputs=[]
targets1=[]
targets2=[]
targets3=[]
for _ in range(batch_size):
x, y = seq.next()
inputs.append(x)
targets1.append(y[0])
targets2.append(y[1])
targets3.append(y[2])
inputs = np.asarray(inputs)
targets1 = np.asarray(targets1)
targets2 = np.asarray(targets2)
targets3 = np.asarray(targets3)/10. # mm -> cm
result = ({'inputs':inputs},{'nodule':targets1,'malig':targets2,'diameter': targets3})
yield result
if __name__ == "__main__":
qu = pl.query(pl.Scan)
print qu.count(),'scans'
for scan in tqdm(qu):
process_scan(scan)
| shift_radius | identifier_name |
kinematics.py | # -*- coding: utf-8 -*-
"""
Kinematics analysis: multiple trials - single subject
Simon Marchant 2017
"""
import csv
import numpy as np
import pickle
def csvread(file):
"""
Read a CSV file and output a numpy array object of that file.
|
def tableread(filelist, start, frames, onlyinclude=''):
"""
Read a list of lists, from a Nexus CSV export, to extract vector data.
Output is a dictionary of lists.
Inputs:-
filelist = the data
start = the row number where data to be read starts
frames = dict of Start and End frames for strides
onlyinclude = optional argument, only columns where title includes this will be included
"""
filewidth = len(filelist[start-2])-1
output = {}
startframe = start - int(filelist[start][0]) + 1
for col in range(2,filewidth):
# Name the datatype in each column
if filelist[start-3][col]:
tmp = filelist[start-3][col]+filelist[start-2][col]
elif filelist[start-3][col-1]:
tmp = filelist[start-3][col-1]+filelist[start-2][col]
elif filelist[start-3][col-2]:
tmp = filelist[start-3][col-2]+filelist[start-2][col]
name = tmp[tmp.rfind(":")+1:]
if onlyinclude in name or not onlyinclude:
output[name] = []
side = ('Right') if name[0]=='R' else ('Left')
# Make a list of values within the marked stride frames
for row in range(startframe+frames[side+'Start'], startframe+frames[side+'End']):
if filelist[row][col] == '':
output[name].append('NA')
else:
output[name].append(float(filelist[row][col]))
#import pdb; pdb.set_trace()
return output
def readtrajectories(filelist, frames):
"""
Read a numpy array object in Vicon export format with marker trajectories.
Requires frame dictionary with initial contact, foot off, and end of swing.
Values that cannot be calculated from inputs will output 'NA'.
Output is a dictionary with the following parts:-
LeftToeZ/RightToeZ = vector of toe marker Z coord throughout trial
LeftStepTime/RightStepTime = time taken for marked step, in seconds
LeftFoffFraction/RightFoffFraction = footoff as fraction of total step time
LeftStepLen/RightStepLen = length of marked step length in metres
LeftSpeedCalc/RightSpeedCalc = walking speed calculated from these values
"""
filelen = len(filelist) - 2
trajstart = filelen
output = {}
LtoeZ = []
RtoeZ = []
LeftToeCol = 29
RightToeCol = 47
for row in range(filelen):
try:
# Assign gait parameters to dictionary
if filelist[row][0] == 'Trajectories':
trajstart = row + 5
except IndexError:
continue
output.update(tableread(filelist,trajstart,frames))
sides = ['Left', 'Right']
for side in sides:
output[side+'StepTime'] = (frames[side+'End']-frames[side+'Start'])/100
output[side+'FoffFraction'] = (frames[side+'Foff']-frames[side+'Start']) / output[side+'StepTime']
try:
output[side+'StepLen'] = abs(float(filelist[frames[side+'End']+trajstart][locals()[side+'ToeCol']+1]) - float(filelist[frames[side+'Start']+trajstart][locals()[side+'ToeCol']+1]))/1000
except ValueError:
output[side+'StepLen'] = 'NA'
output[side+'SpeedCalc'] = output[side+'StepLen'] / output[side+'StepTime']
#import pdb; pdb.set_trace()
return output
def readangles(filelist):
"""
Read a numpy array object in Vicon export format with model outputs.
Output is a dictionary with the following parts:-
LeftStartFrame/RightStartFrame = frame of initial footstrikes in marked stride
LeftEndFrame/RightEndFrame = frame of final footstrikes in marked stride
LeftAnkleAngle/RightAnkleAngle = list of absolute ankle angles throughout trial
LeftSpeed/RightSpeed = walking speed
LeftFoffFrame/RightFoffFrame = frame of foot off in marked stride
StrideLen = stride length in marked stride
"""
filelen = len(filelist) - 2
output = {'RightAnkleAngle': [], 'LeftAnkleAngle': [], 'Frames': {}}
anglestart = filelen
LeftStrike = []
RightStrike = []
events = 0
for n in range(filelen):
try:
if filelist[n][0] == 'Model Outputs':
anglestart = n + 5
elif filelist[n][0] == 'Events':
events = 1
elif filelist[n][2] == 'Walking Speed':
output[filelist[n][1]+'Speed'] = float(filelist[n][3])
elif filelist[n][2] == 'Foot Off' and events == 1:
# Footoff frame in events
output['Frames'].update({filelist[n][1]+'Foff' : int(float(filelist[n][3]) * 100)})
elif filelist[n][2] == 'Stride Length':
output[filelist[n][1]+'StrideLen'] = float(filelist[n][3])
elif filelist[n][2] == 'Foot Strike':
# Convert seconds to frames at 100Hz.
if filelist[n][1] == 'Left':
LeftStrike.append(int(float(filelist[n][3]) * 100))
elif filelist[n][1] == 'Right':
RightStrike.append(int(float(filelist[n][3]) * 100))
elif n >= anglestart:
# List ankle abs angles, convert to float if possible
try:
output['LeftAnkleAngle'].append(float(filelist[n][2]))
except ValueError:
output['LeftAnkleAngle'].append(filelist[n][2])
try:
output['RightAnkleAngle'].append(float(filelist[n][101]))
except ValueError:
output['RightAnkleAngle'].append(filelist[n][101])
except IndexError:
continue
sides = ['Left', 'Right']
for side in sides:
output['Frames'].update({side+'Start' : min(locals()[side+'Strike'])})
output['Frames'].update({side+'End' : max(locals()[side+'Strike'])})
output.update(tableread(filelist,anglestart,output['Frames'],'Angle'))
if anglestart == filelen:
raise NameError('No angles in angle file!')
for side in sides:
mintoe = min(output[side[0]+'AnkleAnglesX'])
midswingframe = int(output['Frames'][side+'Foff']/2 + output['Frames'][side+'End']/2 - output['Frames'][side+'Start'])
output[side+'Clearance'] = output[side[0]+'AnkleAnglesX'][midswingframe] - mintoe
#import pdb; pdb.set_trace()
return output
def onetrial(trialnum):
"""
Read in files for a single trial, extract useful information.
Data must be in CSV files in subdirectories Angles and Trajectories.
Gives an empty dictionary if no stride data present in angles file.
"""
eventsexist = False
anglelist = csvread("Angles/%s.csv" % (trialnum, ))
for n in range(len(anglelist)):
for m in range(len(anglelist[n])):
if anglelist[n][m] == 'Events':
eventsexist = True
if eventsexist == False:
print("WARNING: no events in angles file, aborting with empty output.")
return {}
angles = readangles(anglelist)
trajlist = csvread("Trajectories/%s.csv" % (trialnum, ))
trajectories = readtrajectories(trajlist, angles['Frames'])
output = {**trajectories, **angles}
output['TrialUsed'] = trialnum
#import pdb; pdb.set_trace()
return output
def minclearance(ToeZ, StartFrame, FootOff, EndFrame, MidSwingStart, MidSwingEnd):
"""
Returns the minimum foot clearance in middle of swing in marked stride.
Inputs: Toe marker Z (list), Start frame, foot off frame, end frame, start fraction of mid swing, end fraction of mid swing.
Output: minimum clearance, frame it occurs at.
"""
swing = ToeZ[FootOff:EndFrame]
ground = min(ToeZ[StartFrame:EndFrame])
middleframes = [(FootOff+int(MidSwingStart*len(swing))),(EndFrame-int(MidSwingEnd*len(swing)))]
MinZ = min(ToeZ[middleframes[0]:middleframes[1]])
clearance = MinZ - ground
return clearance
def arraycleaner(array):
"""
Make numpy array rows the same length by shortening long ones.
"""
lengths = [len(x) for x in array]
#shortindex = lengths.index(min(lengths))
shortest = min(lengths)
for n in range(len(array)):
line = array[n]
if len(array[n]) != shortest:
this = len(line)
cut = np.ceil(1/((this/shortest) - 1))
#import pdb; pdb.set_trace()
for m in range(len(array[n])):
if m % cut == 0 and m != 0:
line[m] = 'del'
for m in range(len(array[n])):
try:
if line[m] == 'del':
del line[m]
except IndexError:
continue
array[n] = line[0:shortest]
return array
if __name__ == '__main__':
trials = ['OWN','FACTORY','R0','R50','R100','R150','R300','X0','X50','X100','X150','X300','D50','D100','D150']
#TEST CASE (comment out the above line, uncomment line below this)
#trials = ['OWN']
subject = {}
print("Please enter the participant ID, e.g. 1")
participant = input()
print("Please enter the affected side, e.g. Left. If both, enter Bilateral.")
affectedside = input().capitalize()
if affectedside not in ('Left', 'Right', 'Bilateral'):
raise ValueError("GOD DAMNIT ENTER LEFT OR RIGHT OR BILATERAL. WHAT THE HELL.")
for trial in trials:
print("""Please enter the trial number for %s. If none, leave blank.""" % (trial, ))
trialnum = input()
if trialnum == '':
subject[trial] = {}
else:
subject[trial] = onetrial(trialnum)
subject[trial]['AffectedSide'] = affectedside
afile = open(r'Subject%s.pkl' % (participant, ), 'wb')
pickle.dump(subject, afile)
afile.close() | """
thisfile = open(file)
thisreader = csv.reader(thisfile)
filelist = np.array(list(thisreader))
return filelist
| random_line_split |
kinematics.py | # -*- coding: utf-8 -*-
"""
Kinematics analysis: multiple trials - single subject
Simon Marchant 2017
"""
import csv
import numpy as np
import pickle
def csvread(file):
"""
Read a CSV file and output a numpy array object of that file.
"""
thisfile = open(file)
thisreader = csv.reader(thisfile)
filelist = np.array(list(thisreader))
return filelist
def tableread(filelist, start, frames, onlyinclude=''):
"""
Read a list of lists, from a Nexus CSV export, to extract vector data.
Output is a dictionary of lists.
Inputs:-
filelist = the data
start = the row number where data to be read starts
frames = dict of Start and End frames for strides
onlyinclude = optional argument, only columns where title includes this will be included
"""
filewidth = len(filelist[start-2])-1
output = {}
startframe = start - int(filelist[start][0]) + 1
for col in range(2,filewidth):
# Name the datatype in each column
if filelist[start-3][col]:
tmp = filelist[start-3][col]+filelist[start-2][col]
elif filelist[start-3][col-1]:
tmp = filelist[start-3][col-1]+filelist[start-2][col]
elif filelist[start-3][col-2]:
tmp = filelist[start-3][col-2]+filelist[start-2][col]
name = tmp[tmp.rfind(":")+1:]
if onlyinclude in name or not onlyinclude:
output[name] = []
side = ('Right') if name[0]=='R' else ('Left')
# Make a list of values within the marked stride frames
for row in range(startframe+frames[side+'Start'], startframe+frames[side+'End']):
if filelist[row][col] == '':
output[name].append('NA')
else:
output[name].append(float(filelist[row][col]))
#import pdb; pdb.set_trace()
return output
def readtrajectories(filelist, frames):
"""
Read a numpy array object in Vicon export format with marker trajectories.
Requires frame dictionary with initial contact, foot off, and end of swing.
Values that cannot be calculated from inputs will output 'NA'.
Output is a dictionary with the following parts:-
LeftToeZ/RightToeZ = vector of toe marker Z coord throughout trial
LeftStepTime/RightStepTime = time taken for marked step, in seconds
LeftFoffFraction/RightFoffFraction = footoff as fraction of total step time
LeftStepLen/RightStepLen = length of marked step length in metres
LeftSpeedCalc/RightSpeedCalc = walking speed calculated from these values
"""
filelen = len(filelist) - 2
trajstart = filelen
output = {}
LtoeZ = []
RtoeZ = []
LeftToeCol = 29
RightToeCol = 47
for row in range(filelen):
try:
# Assign gait parameters to dictionary
if filelist[row][0] == 'Trajectories':
trajstart = row + 5
except IndexError:
continue
output.update(tableread(filelist,trajstart,frames))
sides = ['Left', 'Right']
for side in sides:
output[side+'StepTime'] = (frames[side+'End']-frames[side+'Start'])/100
output[side+'FoffFraction'] = (frames[side+'Foff']-frames[side+'Start']) / output[side+'StepTime']
try:
output[side+'StepLen'] = abs(float(filelist[frames[side+'End']+trajstart][locals()[side+'ToeCol']+1]) - float(filelist[frames[side+'Start']+trajstart][locals()[side+'ToeCol']+1]))/1000
except ValueError:
output[side+'StepLen'] = 'NA'
output[side+'SpeedCalc'] = output[side+'StepLen'] / output[side+'StepTime']
#import pdb; pdb.set_trace()
return output
def readangles(filelist):
"""
Read a numpy array object in Vicon export format with model outputs.
Output is a dictionary with the following parts:-
LeftStartFrame/RightStartFrame = frame of initial footstrikes in marked stride
LeftEndFrame/RightEndFrame = frame of final footstrikes in marked stride
LeftAnkleAngle/RightAnkleAngle = list of absolute ankle angles throughout trial
LeftSpeed/RightSpeed = walking speed
LeftFoffFrame/RightFoffFrame = frame of foot off in marked stride
StrideLen = stride length in marked stride
"""
filelen = len(filelist) - 2
output = {'RightAnkleAngle': [], 'LeftAnkleAngle': [], 'Frames': {}}
anglestart = filelen
LeftStrike = []
RightStrike = []
events = 0
for n in range(filelen):
try:
if filelist[n][0] == 'Model Outputs':
anglestart = n + 5
elif filelist[n][0] == 'Events':
events = 1
elif filelist[n][2] == 'Walking Speed':
output[filelist[n][1]+'Speed'] = float(filelist[n][3])
elif filelist[n][2] == 'Foot Off' and events == 1:
# Footoff frame in events
output['Frames'].update({filelist[n][1]+'Foff' : int(float(filelist[n][3]) * 100)})
elif filelist[n][2] == 'Stride Length':
output[filelist[n][1]+'StrideLen'] = float(filelist[n][3])
elif filelist[n][2] == 'Foot Strike':
# Convert seconds to frames at 100Hz.
if filelist[n][1] == 'Left':
LeftStrike.append(int(float(filelist[n][3]) * 100))
elif filelist[n][1] == 'Right':
RightStrike.append(int(float(filelist[n][3]) * 100))
elif n >= anglestart:
# List ankle abs angles, convert to float if possible
try:
output['LeftAnkleAngle'].append(float(filelist[n][2]))
except ValueError:
output['LeftAnkleAngle'].append(filelist[n][2])
try:
output['RightAnkleAngle'].append(float(filelist[n][101]))
except ValueError:
output['RightAnkleAngle'].append(filelist[n][101])
except IndexError:
continue
sides = ['Left', 'Right']
for side in sides:
output['Frames'].update({side+'Start' : min(locals()[side+'Strike'])})
output['Frames'].update({side+'End' : max(locals()[side+'Strike'])})
output.update(tableread(filelist,anglestart,output['Frames'],'Angle'))
if anglestart == filelen:
raise NameError('No angles in angle file!')
for side in sides:
mintoe = min(output[side[0]+'AnkleAnglesX'])
midswingframe = int(output['Frames'][side+'Foff']/2 + output['Frames'][side+'End']/2 - output['Frames'][side+'Start'])
output[side+'Clearance'] = output[side[0]+'AnkleAnglesX'][midswingframe] - mintoe
#import pdb; pdb.set_trace()
return output
def onetrial(trialnum):
"""
Read in files for a single trial, extract useful information.
Data must be in CSV files in subdirectories Angles and Trajectories.
Gives an empty dictionary if no stride data present in angles file.
"""
eventsexist = False
anglelist = csvread("Angles/%s.csv" % (trialnum, ))
for n in range(len(anglelist)):
for m in range(len(anglelist[n])):
if anglelist[n][m] == 'Events':
eventsexist = True
if eventsexist == False:
print("WARNING: no events in angles file, aborting with empty output.")
return {}
angles = readangles(anglelist)
trajlist = csvread("Trajectories/%s.csv" % (trialnum, ))
trajectories = readtrajectories(trajlist, angles['Frames'])
output = {**trajectories, **angles}
output['TrialUsed'] = trialnum
#import pdb; pdb.set_trace()
return output
def minclearance(ToeZ, StartFrame, FootOff, EndFrame, MidSwingStart, MidSwingEnd):
"""
Returns the minimum foot clearance in middle of swing in marked stride.
Inputs: Toe marker Z (list), Start frame, foot off frame, end frame, start fraction of mid swing, end fraction of mid swing.
Output: minimum clearance, frame it occurs at.
"""
swing = ToeZ[FootOff:EndFrame]
ground = min(ToeZ[StartFrame:EndFrame])
middleframes = [(FootOff+int(MidSwingStart*len(swing))),(EndFrame-int(MidSwingEnd*len(swing)))]
MinZ = min(ToeZ[middleframes[0]:middleframes[1]])
clearance = MinZ - ground
return clearance
def arraycleaner(array):
"""
Make numpy array rows the same length by shortening long ones.
"""
lengths = [len(x) for x in array]
#shortindex = lengths.index(min(lengths))
shortest = min(lengths)
for n in range(len(array)):
line = array[n]
if len(array[n]) != shortest:
this = len(line)
cut = np.ceil(1/((this/shortest) - 1))
#import pdb; pdb.set_trace()
for m in range(len(array[n])):
|
for m in range(len(array[n])):
try:
if line[m] == 'del':
del line[m]
except IndexError:
continue
array[n] = line[0:shortest]
return array
if __name__ == '__main__':
trials = ['OWN','FACTORY','R0','R50','R100','R150','R300','X0','X50','X100','X150','X300','D50','D100','D150']
#TEST CASE (comment out the above line, uncomment line below this)
#trials = ['OWN']
subject = {}
print("Please enter the participant ID, e.g. 1")
participant = input()
print("Please enter the affected side, e.g. Left. If both, enter Bilateral.")
affectedside = input().capitalize()
if affectedside not in ('Left', 'Right', 'Bilateral'):
raise ValueError("GOD DAMNIT ENTER LEFT OR RIGHT OR BILATERAL. WHAT THE HELL.")
for trial in trials:
print("""Please enter the trial number for %s. If none, leave blank.""" % (trial, ))
trialnum = input()
if trialnum == '':
subject[trial] = {}
else:
subject[trial] = onetrial(trialnum)
subject[trial]['AffectedSide'] = affectedside
afile = open(r'Subject%s.pkl' % (participant, ), 'wb')
pickle.dump(subject, afile)
afile.close() | if m % cut == 0 and m != 0:
line[m] = 'del' | conditional_block |
kinematics.py | # -*- coding: utf-8 -*-
"""
Kinematics analysis: multiple trials - single subject
Simon Marchant 2017
"""
import csv
import numpy as np
import pickle
def csvread(file):
"""
Read a CSV file and output a numpy array object of that file.
"""
thisfile = open(file)
thisreader = csv.reader(thisfile)
filelist = np.array(list(thisreader))
return filelist
def tableread(filelist, start, frames, onlyinclude=''):
"""
Read a list of lists, from a Nexus CSV export, to extract vector data.
Output is a dictionary of lists.
Inputs:-
filelist = the data
start = the row number where data to be read starts
frames = dict of Start and End frames for strides
onlyinclude = optional argument, only columns where title includes this will be included
"""
filewidth = len(filelist[start-2])-1
output = {}
startframe = start - int(filelist[start][0]) + 1
for col in range(2,filewidth):
# Name the datatype in each column
if filelist[start-3][col]:
tmp = filelist[start-3][col]+filelist[start-2][col]
elif filelist[start-3][col-1]:
tmp = filelist[start-3][col-1]+filelist[start-2][col]
elif filelist[start-3][col-2]:
tmp = filelist[start-3][col-2]+filelist[start-2][col]
name = tmp[tmp.rfind(":")+1:]
if onlyinclude in name or not onlyinclude:
output[name] = []
side = ('Right') if name[0]=='R' else ('Left')
# Make a list of values within the marked stride frames
for row in range(startframe+frames[side+'Start'], startframe+frames[side+'End']):
if filelist[row][col] == '':
output[name].append('NA')
else:
output[name].append(float(filelist[row][col]))
#import pdb; pdb.set_trace()
return output
def readtrajectories(filelist, frames):
"""
Read a numpy array object in Vicon export format with marker trajectories.
Requires frame dictionary with initial contact, foot off, and end of swing.
Values that cannot be calculated from inputs will output 'NA'.
Output is a dictionary with the following parts:-
LeftToeZ/RightToeZ = vector of toe marker Z coord throughout trial
LeftStepTime/RightStepTime = time taken for marked step, in seconds
LeftFoffFraction/RightFoffFraction = footoff as fraction of total step time
LeftStepLen/RightStepLen = length of marked step length in metres
LeftSpeedCalc/RightSpeedCalc = walking speed calculated from these values
"""
filelen = len(filelist) - 2
trajstart = filelen
output = {}
LtoeZ = []
RtoeZ = []
LeftToeCol = 29
RightToeCol = 47
for row in range(filelen):
try:
# Assign gait parameters to dictionary
if filelist[row][0] == 'Trajectories':
trajstart = row + 5
except IndexError:
continue
output.update(tableread(filelist,trajstart,frames))
sides = ['Left', 'Right']
for side in sides:
output[side+'StepTime'] = (frames[side+'End']-frames[side+'Start'])/100
output[side+'FoffFraction'] = (frames[side+'Foff']-frames[side+'Start']) / output[side+'StepTime']
try:
output[side+'StepLen'] = abs(float(filelist[frames[side+'End']+trajstart][locals()[side+'ToeCol']+1]) - float(filelist[frames[side+'Start']+trajstart][locals()[side+'ToeCol']+1]))/1000
except ValueError:
output[side+'StepLen'] = 'NA'
output[side+'SpeedCalc'] = output[side+'StepLen'] / output[side+'StepTime']
#import pdb; pdb.set_trace()
return output
def readangles(filelist):
"""
Read a numpy array object in Vicon export format with model outputs.
Output is a dictionary with the following parts:-
LeftStartFrame/RightStartFrame = frame of initial footstrikes in marked stride
LeftEndFrame/RightEndFrame = frame of final footstrikes in marked stride
LeftAnkleAngle/RightAnkleAngle = list of absolute ankle angles throughout trial
LeftSpeed/RightSpeed = walking speed
LeftFoffFrame/RightFoffFrame = frame of foot off in marked stride
StrideLen = stride length in marked stride
"""
filelen = len(filelist) - 2
output = {'RightAnkleAngle': [], 'LeftAnkleAngle': [], 'Frames': {}}
anglestart = filelen
LeftStrike = []
RightStrike = []
events = 0
for n in range(filelen):
try:
if filelist[n][0] == 'Model Outputs':
anglestart = n + 5
elif filelist[n][0] == 'Events':
events = 1
elif filelist[n][2] == 'Walking Speed':
output[filelist[n][1]+'Speed'] = float(filelist[n][3])
elif filelist[n][2] == 'Foot Off' and events == 1:
# Footoff frame in events
output['Frames'].update({filelist[n][1]+'Foff' : int(float(filelist[n][3]) * 100)})
elif filelist[n][2] == 'Stride Length':
output[filelist[n][1]+'StrideLen'] = float(filelist[n][3])
elif filelist[n][2] == 'Foot Strike':
# Convert seconds to frames at 100Hz.
if filelist[n][1] == 'Left':
LeftStrike.append(int(float(filelist[n][3]) * 100))
elif filelist[n][1] == 'Right':
RightStrike.append(int(float(filelist[n][3]) * 100))
elif n >= anglestart:
# List ankle abs angles, convert to float if possible
try:
output['LeftAnkleAngle'].append(float(filelist[n][2]))
except ValueError:
output['LeftAnkleAngle'].append(filelist[n][2])
try:
output['RightAnkleAngle'].append(float(filelist[n][101]))
except ValueError:
output['RightAnkleAngle'].append(filelist[n][101])
except IndexError:
continue
sides = ['Left', 'Right']
for side in sides:
output['Frames'].update({side+'Start' : min(locals()[side+'Strike'])})
output['Frames'].update({side+'End' : max(locals()[side+'Strike'])})
output.update(tableread(filelist,anglestart,output['Frames'],'Angle'))
if anglestart == filelen:
raise NameError('No angles in angle file!')
for side in sides:
mintoe = min(output[side[0]+'AnkleAnglesX'])
midswingframe = int(output['Frames'][side+'Foff']/2 + output['Frames'][side+'End']/2 - output['Frames'][side+'Start'])
output[side+'Clearance'] = output[side[0]+'AnkleAnglesX'][midswingframe] - mintoe
#import pdb; pdb.set_trace()
return output
def onetrial(trialnum):
"""
Read in files for a single trial, extract useful information.
Data must be in CSV files in subdirectories Angles and Trajectories.
Gives an empty dictionary if no stride data present in angles file.
"""
eventsexist = False
anglelist = csvread("Angles/%s.csv" % (trialnum, ))
for n in range(len(anglelist)):
for m in range(len(anglelist[n])):
if anglelist[n][m] == 'Events':
eventsexist = True
if eventsexist == False:
print("WARNING: no events in angles file, aborting with empty output.")
return {}
angles = readangles(anglelist)
trajlist = csvread("Trajectories/%s.csv" % (trialnum, ))
trajectories = readtrajectories(trajlist, angles['Frames'])
output = {**trajectories, **angles}
output['TrialUsed'] = trialnum
#import pdb; pdb.set_trace()
return output
def minclearance(ToeZ, StartFrame, FootOff, EndFrame, MidSwingStart, MidSwingEnd):
"""
Returns the minimum foot clearance in middle of swing in marked stride.
Inputs: Toe marker Z (list), Start frame, foot off frame, end frame, start fraction of mid swing, end fraction of mid swing.
Output: minimum clearance, frame it occurs at.
"""
swing = ToeZ[FootOff:EndFrame]
ground = min(ToeZ[StartFrame:EndFrame])
middleframes = [(FootOff+int(MidSwingStart*len(swing))),(EndFrame-int(MidSwingEnd*len(swing)))]
MinZ = min(ToeZ[middleframes[0]:middleframes[1]])
clearance = MinZ - ground
return clearance
def | (array):
"""
Make numpy array rows the same length by shortening long ones.
"""
lengths = [len(x) for x in array]
#shortindex = lengths.index(min(lengths))
shortest = min(lengths)
for n in range(len(array)):
line = array[n]
if len(array[n]) != shortest:
this = len(line)
cut = np.ceil(1/((this/shortest) - 1))
#import pdb; pdb.set_trace()
for m in range(len(array[n])):
if m % cut == 0 and m != 0:
line[m] = 'del'
for m in range(len(array[n])):
try:
if line[m] == 'del':
del line[m]
except IndexError:
continue
array[n] = line[0:shortest]
return array
if __name__ == '__main__':
trials = ['OWN','FACTORY','R0','R50','R100','R150','R300','X0','X50','X100','X150','X300','D50','D100','D150']
#TEST CASE (comment out the above line, uncomment line below this)
#trials = ['OWN']
subject = {}
print("Please enter the participant ID, e.g. 1")
participant = input()
print("Please enter the affected side, e.g. Left. If both, enter Bilateral.")
affectedside = input().capitalize()
if affectedside not in ('Left', 'Right', 'Bilateral'):
raise ValueError("GOD DAMNIT ENTER LEFT OR RIGHT OR BILATERAL. WHAT THE HELL.")
for trial in trials:
print("""Please enter the trial number for %s. If none, leave blank.""" % (trial, ))
trialnum = input()
if trialnum == '':
subject[trial] = {}
else:
subject[trial] = onetrial(trialnum)
subject[trial]['AffectedSide'] = affectedside
afile = open(r'Subject%s.pkl' % (participant, ), 'wb')
pickle.dump(subject, afile)
afile.close() | arraycleaner | identifier_name |
kinematics.py | # -*- coding: utf-8 -*-
"""
Kinematics analysis: multiple trials - single subject
Simon Marchant 2017
"""
import csv
import numpy as np
import pickle
def csvread(file):
"""
Read a CSV file and output a numpy array object of that file.
"""
thisfile = open(file)
thisreader = csv.reader(thisfile)
filelist = np.array(list(thisreader))
return filelist
def tableread(filelist, start, frames, onlyinclude=''):
"""
Read a list of lists, from a Nexus CSV export, to extract vector data.
Output is a dictionary of lists.
Inputs:-
filelist = the data
start = the row number where data to be read starts
frames = dict of Start and End frames for strides
onlyinclude = optional argument, only columns where title includes this will be included
"""
filewidth = len(filelist[start-2])-1
output = {}
startframe = start - int(filelist[start][0]) + 1
for col in range(2,filewidth):
# Name the datatype in each column
if filelist[start-3][col]:
tmp = filelist[start-3][col]+filelist[start-2][col]
elif filelist[start-3][col-1]:
tmp = filelist[start-3][col-1]+filelist[start-2][col]
elif filelist[start-3][col-2]:
tmp = filelist[start-3][col-2]+filelist[start-2][col]
name = tmp[tmp.rfind(":")+1:]
if onlyinclude in name or not onlyinclude:
output[name] = []
side = ('Right') if name[0]=='R' else ('Left')
# Make a list of values within the marked stride frames
for row in range(startframe+frames[side+'Start'], startframe+frames[side+'End']):
if filelist[row][col] == '':
output[name].append('NA')
else:
output[name].append(float(filelist[row][col]))
#import pdb; pdb.set_trace()
return output
def readtrajectories(filelist, frames):
"""
Read a numpy array object in Vicon export format with marker trajectories.
Requires frame dictionary with initial contact, foot off, and end of swing.
Values that cannot be calculated from inputs will output 'NA'.
Output is a dictionary with the following parts:-
LeftToeZ/RightToeZ = vector of toe marker Z coord throughout trial
LeftStepTime/RightStepTime = time taken for marked step, in seconds
LeftFoffFraction/RightFoffFraction = footoff as fraction of total step time
LeftStepLen/RightStepLen = length of marked step length in metres
LeftSpeedCalc/RightSpeedCalc = walking speed calculated from these values
"""
filelen = len(filelist) - 2
trajstart = filelen
output = {}
LtoeZ = []
RtoeZ = []
LeftToeCol = 29
RightToeCol = 47
for row in range(filelen):
try:
# Assign gait parameters to dictionary
if filelist[row][0] == 'Trajectories':
trajstart = row + 5
except IndexError:
continue
output.update(tableread(filelist,trajstart,frames))
sides = ['Left', 'Right']
for side in sides:
output[side+'StepTime'] = (frames[side+'End']-frames[side+'Start'])/100
output[side+'FoffFraction'] = (frames[side+'Foff']-frames[side+'Start']) / output[side+'StepTime']
try:
output[side+'StepLen'] = abs(float(filelist[frames[side+'End']+trajstart][locals()[side+'ToeCol']+1]) - float(filelist[frames[side+'Start']+trajstart][locals()[side+'ToeCol']+1]))/1000
except ValueError:
output[side+'StepLen'] = 'NA'
output[side+'SpeedCalc'] = output[side+'StepLen'] / output[side+'StepTime']
#import pdb; pdb.set_trace()
return output
def readangles(filelist):
"""
Read a numpy array object in Vicon export format with model outputs.
Output is a dictionary with the following parts:-
LeftStartFrame/RightStartFrame = frame of initial footstrikes in marked stride
LeftEndFrame/RightEndFrame = frame of final footstrikes in marked stride
LeftAnkleAngle/RightAnkleAngle = list of absolute ankle angles throughout trial
LeftSpeed/RightSpeed = walking speed
LeftFoffFrame/RightFoffFrame = frame of foot off in marked stride
StrideLen = stride length in marked stride
"""
filelen = len(filelist) - 2
output = {'RightAnkleAngle': [], 'LeftAnkleAngle': [], 'Frames': {}}
anglestart = filelen
LeftStrike = []
RightStrike = []
events = 0
for n in range(filelen):
try:
if filelist[n][0] == 'Model Outputs':
anglestart = n + 5
elif filelist[n][0] == 'Events':
events = 1
elif filelist[n][2] == 'Walking Speed':
output[filelist[n][1]+'Speed'] = float(filelist[n][3])
elif filelist[n][2] == 'Foot Off' and events == 1:
# Footoff frame in events
output['Frames'].update({filelist[n][1]+'Foff' : int(float(filelist[n][3]) * 100)})
elif filelist[n][2] == 'Stride Length':
output[filelist[n][1]+'StrideLen'] = float(filelist[n][3])
elif filelist[n][2] == 'Foot Strike':
# Convert seconds to frames at 100Hz.
if filelist[n][1] == 'Left':
LeftStrike.append(int(float(filelist[n][3]) * 100))
elif filelist[n][1] == 'Right':
RightStrike.append(int(float(filelist[n][3]) * 100))
elif n >= anglestart:
# List ankle abs angles, convert to float if possible
try:
output['LeftAnkleAngle'].append(float(filelist[n][2]))
except ValueError:
output['LeftAnkleAngle'].append(filelist[n][2])
try:
output['RightAnkleAngle'].append(float(filelist[n][101]))
except ValueError:
output['RightAnkleAngle'].append(filelist[n][101])
except IndexError:
continue
sides = ['Left', 'Right']
for side in sides:
output['Frames'].update({side+'Start' : min(locals()[side+'Strike'])})
output['Frames'].update({side+'End' : max(locals()[side+'Strike'])})
output.update(tableread(filelist,anglestart,output['Frames'],'Angle'))
if anglestart == filelen:
raise NameError('No angles in angle file!')
for side in sides:
mintoe = min(output[side[0]+'AnkleAnglesX'])
midswingframe = int(output['Frames'][side+'Foff']/2 + output['Frames'][side+'End']/2 - output['Frames'][side+'Start'])
output[side+'Clearance'] = output[side[0]+'AnkleAnglesX'][midswingframe] - mintoe
#import pdb; pdb.set_trace()
return output
def onetrial(trialnum):
"""
Read in files for a single trial, extract useful information.
Data must be in CSV files in subdirectories Angles and Trajectories.
Gives an empty dictionary if no stride data present in angles file.
"""
eventsexist = False
anglelist = csvread("Angles/%s.csv" % (trialnum, ))
for n in range(len(anglelist)):
for m in range(len(anglelist[n])):
if anglelist[n][m] == 'Events':
eventsexist = True
if eventsexist == False:
print("WARNING: no events in angles file, aborting with empty output.")
return {}
angles = readangles(anglelist)
trajlist = csvread("Trajectories/%s.csv" % (trialnum, ))
trajectories = readtrajectories(trajlist, angles['Frames'])
output = {**trajectories, **angles}
output['TrialUsed'] = trialnum
#import pdb; pdb.set_trace()
return output
def minclearance(ToeZ, StartFrame, FootOff, EndFrame, MidSwingStart, MidSwingEnd):
|
def arraycleaner(array):
"""
Make numpy array rows the same length by shortening long ones.
"""
lengths = [len(x) for x in array]
#shortindex = lengths.index(min(lengths))
shortest = min(lengths)
for n in range(len(array)):
line = array[n]
if len(array[n]) != shortest:
this = len(line)
cut = np.ceil(1/((this/shortest) - 1))
#import pdb; pdb.set_trace()
for m in range(len(array[n])):
if m % cut == 0 and m != 0:
line[m] = 'del'
for m in range(len(array[n])):
try:
if line[m] == 'del':
del line[m]
except IndexError:
continue
array[n] = line[0:shortest]
return array
if __name__ == '__main__':
trials = ['OWN','FACTORY','R0','R50','R100','R150','R300','X0','X50','X100','X150','X300','D50','D100','D150']
#TEST CASE (comment out the above line, uncomment line below this)
#trials = ['OWN']
subject = {}
print("Please enter the participant ID, e.g. 1")
participant = input()
print("Please enter the affected side, e.g. Left. If both, enter Bilateral.")
affectedside = input().capitalize()
if affectedside not in ('Left', 'Right', 'Bilateral'):
raise ValueError("GOD DAMNIT ENTER LEFT OR RIGHT OR BILATERAL. WHAT THE HELL.")
for trial in trials:
print("""Please enter the trial number for %s. If none, leave blank.""" % (trial, ))
trialnum = input()
if trialnum == '':
subject[trial] = {}
else:
subject[trial] = onetrial(trialnum)
subject[trial]['AffectedSide'] = affectedside
afile = open(r'Subject%s.pkl' % (participant, ), 'wb')
pickle.dump(subject, afile)
afile.close() | """
Returns the minimum foot clearance in middle of swing in marked stride.
Inputs: Toe marker Z (list), Start frame, foot off frame, end frame, start fraction of mid swing, end fraction of mid swing.
Output: minimum clearance, frame it occurs at.
"""
swing = ToeZ[FootOff:EndFrame]
ground = min(ToeZ[StartFrame:EndFrame])
middleframes = [(FootOff+int(MidSwingStart*len(swing))),(EndFrame-int(MidSwingEnd*len(swing)))]
MinZ = min(ToeZ[middleframes[0]:middleframes[1]])
clearance = MinZ - ground
return clearance | identifier_body |
client.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
import wx
import urllib2
import urllib
import time
import re
import httplib
import sys
import os
import socket
import cStringIO
#import io
from PIL import Image #dont use import Image ,maybe different
socket.setdefaulttimeout(15) #global
reload(sys)
sys.setdefaultencoding('utf-8')
#如果不设置编码,则unicode等问题较麻烦,in windows,just use u'xxx'
from gui import MsgDialog
import wx.richtext as rt
class MainFrame(wx.Frame):
def __init__(
self, parent=None, id=wx.ID_ANY, title='网文快存', pos=wx.DefaultPosition,
size=(776, 480), style=wx.DEFAULT_FRAME_STYLE):#size=wx.DEFAULT
wx.Frame.__init__(self, parent, id, title, pos, size, style)
self.SetIcon(wx.Icon('eye.png', wx.BITMAP_TYPE_PNG)) #loadIcon.ico
menubar=wx.MenuBar()
file_menu=wx.Menu()
help_menu=wx.Menu()
set_menu=wx.Menu()
file_menu.Append(101,'&Open','Open a new document')
file_menu.Append(102,'&Save','Save the document')
file_menu.AppendSeparator()
quit=wx.MenuItem(file_menu,105,'&Quit\tCtrl+Q','Quit the Application')
#quit.SetBitmap(wx.Image('stock_exit-16.png', wx.BITMAP_TYPE_PNG).ConvertToBitmap())
file_menu.AppendItem(quit)
help_menu.Append(103,'&Help','Seek help')
set_menu.Append(104,'&设置','set profiles')
menubar.Append(file_menu,'&File')
menubar.Append(help_menu,'&Help')
menubar.Append(set_menu,'&Setting')
self.SetMenuBar( menubar )
wx.EVT_MENU(self, 105, self.OnQuit)
wx.EVT_MENU(self, 101, self.OnOpenFile)
wx.EVT_MENU(self, 102, self.OnSaveAs)
wx.EVT_MENU(self, 103, self.OnHelp)
wx.EVT_MENU(self, 104, self.OnSet)
panel = wx.Panel(self, wx.ID_ANY)
button1 = wx.Button(panel, wx.ID_ANY, '快取')
button1.SetBackgroundColour("gray")
button1.SetForegroundColour("Navy")
self.Bind(wx.EVT_BUTTON, self.OnGet, button1)
button2 = wx.Button(panel, wx.ID_ANY, '退出') #
self.Bind(wx.EVT_BUTTON, self.OnQuit, button2)
button3 = wx.Button(panel, wx.ID_ANY, '保存修改') #
self.Bind(wx.EVT_BUTTON, self.OnSave2server, button3)
button2.SetBackgroundColour("gray")
button2.SetForegroundColour("Red")
button3.SetBackgroundColour("gray")
button3.SetForegroundColour("Navy")
self.urlButton = wx.Button(panel, wx.ID_ANY, 'URL:',size=(50,36))
self.urlButton.SetForegroundColour('blue')
self.Bind(wx.EVT_BUTTON, self.OnClear, self.urlButton)
self.Bind(wx.EVT_CLOSE, self.OnQuit)#因该是点击x按钮关闭时调用
self.Bind(wx.EVT_ICONIZE, self.OnIconfiy) # What is the meaning?设置缩小到底部任务栏和恢复时做的动作,比如发出提示,声音等
#urlLabel = wx.StaticText(panel, -1, "URL:")
default_url='http://192.168.1.6/test.html'
#"http://www.weixinxi.wang/blog/aitrcle.html?id=9";
#"http://www.linuxfan.com:1366/program/pageSlimer/linux-usage.html"
self.urlText = wx.TextCtrl(panel, -1, default_url,size=(250, 38), style=wx.TE_MULTILINE) #创建一个文本控件
titleLabel = wx.StaticText(panel, -1, "标题:")
titleLabel.SetForegroundColour('blue')
self.titleText = wx.TextCtrl(panel, -1, "",size=(200, 30))
self.titleText.SetInsertionPoint(0)
self.urlText.SetInsertionPoint(0)#设置插入点
catalogLabel = wx.StaticText(panel, -1, "归档目录:")
catalogLabel.SetForegroundColour('blue')
self.catalogText = wx.TextCtrl(panel, -1, "default",size=(200, 30))
richTextLabel = wx.StaticText(panel, -1, "正文(可编辑):")
#self.richText = wx.TextCtrl(panel, -1, "\n\n\n\n\t\t\t\t\t\t\t\t\t^_^",style=wx.TE_MULTILINE|wx.TE_RICH2) #创建丰富文本控件
self.richText = rt.RichTextCtrl(panel, -1)
#self.richText.SetInsertionPoint(0)
self.richText.SetForegroundColour('blue')
#self.richText.WriteImage(wx.Image('../core/image/UF3ui2.jpg',wx.BITMAP_TYPE_ANY))
#self.richText.Newline()
#self.richText.SetDefaultStyle(wx.TextAttr("blue")) #设置文本样式,从1到4的字符,前景色,背景色
#points = self.richText.GetFont().GetPointSize()
#f = wx.Font(points + 3, wx.ROMAN, wx.ITALIC, wx.BOLD, True) #创建一个字体
#self.richText.SetStyle(68, 82, wx.TextAttr("blue", wx.NullColour, f)) #用新字体设置样式
#sizer = wx.FlexGridSizer(cols=3, hgap=6, vgap=6)
#sizer = wx.BoxSizer(wx.HORIZONTAL) #wx.VERTICAL
vbox = wx.BoxSizer(wx.VERTICAL)
hbox1 = wx.BoxSizer(wx.HORIZONTAL)
hbox2 = wx.BoxSizer(wx.HORIZONTAL)
hbox3 = wx.BoxSizer(wx.HORIZONTAL)
hbox4 = wx.BoxSizer(wx.HORIZONTAL)
hbox1.Add(self.urlButton,flag=wx.LEFT)
hbox1.Add(self.urlText,proportion=1)
hbox1.Add(button1,flag=wx.LEFT,border=8)
vbox.Add(hbox1,flag=wx.EXPAND|wx.LEFT|wx.RIGHT|wx.TOP,border=10)
vbox.Add((-1, 10))
hbox2.Add(titleLabel,flag=wx.LEFT,border=8)
hbox2.Add(self.titleText,proportion=2)
hbox2.Add(catalogLabel,flag=wx.ALIGN_LEFT,border=8)
hbox2.Add(self.catalogText,proportion=1)
vbox.Add(hbox2,flag=wx.EXPAND|wx.LEFT|wx.RIGHT,border=10)
vbox.Add((-1, 10)) #25px space
vbox.Add(richTextLabel,flag=wx.LEFT|wx.ALIGN_LEFT,border=18)
vbox.Add((-1, 10))
hbox3.Add(self.richText,proportion=1, flag=wx.EXPAND)
vbox.Add(hbox3, proportion=1, flag=wx.LEFT|wx.RIGHT|wx.EXPAND, border=10)
vbox.Add((-1, 10))
hbox4.Add(button3,flag=wx.ALIGN_RIGHT|wx.RIGHT,border=10) #save
hbox4.Add(button2,wx.RIGHT,border=10) #exit
vbox.Add(hbox4,flag=wx.ALIGN_RIGHT|wx.RIGHT|wx.BOTTOM,border=10)
#sizer.Add(button1, 0) #0表示比例
#sizer.Add(button2, 3)
#sizer.Add(button3, 5,wx.BOTTOM|wx.LEFT,wx.ALIGN_BOTTOM)
#sizer.Add(button4, 5,wx.RIGHT|wx.BOTTOM,wx.ALIGN_BOTTOM)
#panel.SetSizer(sizer)
#sizer.AddMany([urlLabel, self.urlText,button1,titleLabel,self.titleText,-1 ,richTextLabel,self.richText,-1])
panel.SetSizer(vbox)
self.clip = wx.TheClipboard #系统剪贴板,但是用wx.Clipboard()却不正确,很奇怪
#http://www.wxpython.org/docs/api/wx.Clipboard-class.html
#当左键点击窗口上任意普通位置时查看系统剪贴板是否有新网址,或在重绘时wx.EVT_PAINT
#panel.Bind(wx.EVT_LEFT_DOWN, self.OnClickCheck)#对panel有效,但不知为什么对frame无效,改成:
self.Bind(wx.EVT_ENTER_WINDOW,self.OnEnterWin)
self.host=''
self.filename=''
self.user=''
self.pw=''
self.readConfigure()
def OnHide(self, event):
self.Hide()
def OnGet(self, event):
url=self.urlText.GetValue().strip()
catalog=self.catalogText.GetValue().strip()
#the dir and name indicate where to save in the server
if(url==''):
wx.MessageBox('您还没输入网址','^0^')
return
try:
src=urllib.urlopen('http://'+self.host+'/doslim?url='+url+'&dir='+catalog+'&name=default'+'&uid='+self.user)
#so strange that the urllib2.urlopen not work properly at times,is it beacause the server i write has problem of sending packet headers?
text=src.read()
src.close()
#print 'text:',text[0:40]
# flist=re.findall(r'^filename:(.*?)\n',text)
nm=re.search(r'(?<=filename:).+?(?=$)',text,re.M)
if nm!=None:
self.filename=nm.group(0)
# print 'filename(0):%s<<<'%self.filename[0]
self.filename=self.filename.strip()
print 'read size:',len(text)
print 'get filename:',self.filename #逗号变成空格
# text=re.sub('^filename:%s'%self.filename,'',text)
text=text.replace('filename:%s'%self.filename,'')
self.titleText.SetValue(self.filename)
self.showContent(text.strip()) #content text has some format such as URL
except Exception,e:
print e
wx.MessageBox('请检查您的网络', '网络连接出错')
def showContent(self,content):#解析文本内容中的特殊部分,如图片地址,显示到富文本框
#[[<img src="/image/imgFilename">]],服务器地址因该是/uid/catagrory/image/filename
#self.richText.WriteText(content)
#self.richText.WriteText('-------------------\n')
self.richText.SetValue('')
lines=content.split('\n')
for ln in lines:
if ln.find('##<img src=') >=0:
print ln
pat=re.compile(r'##<img src="(.*?)"/>##')
try:
img_src=pat.findall(ln)[0]
print 'find img_src:',img_src
catalog=self.catalogText.GetValue().strip()
url='http://'+self.host+'/dl?'+self.user+'/'+catalog+img_src
img_str=urllib2.urlopen(url).read() #type str
print 'size:',len(img_str)
image_i = cStringIO.StringIO(img_str)
# print 'type of image_file:',type(image_file)
pil_image=Image.open(image_i)
wx_img=self.PILToWX(pil_image)
self.richText.WriteImage(wx_img)
# self.richText.AddImage(image)
except Exception,e:
print e
else :
self.richText.WriteText(ln)#AppendText(ln)
self.richText.Newline()
#self.richText.SetValue(content)
#self.richText.WriteImage(wx.Image('../core/image/UF3ui2.jpg',wx.BITMAP_TYPE_ANY))
def PILToWX(self, pil_image):
#"convert a PIL imageto a wxImage"
if pil_image.mode != 'RGB': # SetData() requires an RGB image
pil_image = pil_image.convert('RGB')
imageData = pil_image.tostring('raw', 'RGB')
imageWx = wx.EmptyImage(pil_image.size[0], pil_image.size[1])
imageWx.SetData(imageData)
return imageWx
#bitmap = wx.BitmapFromImage(image)
def OnIconfiy(self, event):
wx.MessageBox('好好学习,天天向上!', '*送你一句良言*')
event.Skip()
def OnClear(self,event):
self.urlText.Clear()
def OnHelp(self,event):
wx.MessageBox('1.复制粘帖网址到输入框,点击获取即可,内容会保存到云端\n2.您可以对获取到的内容进行编辑并重新保存至服务端\n3.您还可以导入导出文本文件', '*使用帮助*')
def OnQuit(self, event):
self.Destroy() #or close()
def OnSave2server(self, event):
text=self.richText.GetValue()
catalog=self.catalogText.GetValue().strip()
if text==None or catalog==None:
wx.MessageBox('不能为空', '上传失败')
return
boundary='---------%s'%hex(int(time.time()*1000))
data=[] #a list
# data.append('\r\n')
data.append('--%s'%boundary)
data.append('uid=%s'%self.user)#username uid
data.append('dir=%s'%catalog)#= not : in my server
# print 'append data name:',self.filename
data.append('filename=%s'%self.filename)
data.append('\n')#因为是自己写的服务端,所以构造的这些数据比较随意了,按服务端的要求来写
data.append('%s'%(time.asctime()))#列表在转换为字符串后会在每一项后面加换行
#ignore the first line:filename
# body=''.join(data)
# body=body.join('%s'%content)
# body=body.join('\n--%s--\n'%boundary)
data.append(text.encode('utf-8'))
data.append('--%s--\n'%boundary)
body='\r\n'.join(data) #text in textCtrl is unicode
try:
conn=httplib.HTTPConnection(self.host)
conn.request(method="POST",url="/modify",body=body);
response=conn.getresponse();
if response.status==200: #302 etc
#self.richText.SetValue(response)
print '发布成功!^_^!';
wx.MessageBox('修改已保存至云端!', '恭喜')
else:
wx.MessageBox('请检查您的网络', '上传失败')
print "发布失败\^0^/"
conn.close()
except Exception,e:
wx.MessageBox('请检查您的网络', '网络连接出错')
print 'http error:',e
#self.Hide()
def OnCancel(self,event):
pass
def readConfigure(self):
try:
fh=open('server.conf')
size=len(fh.read())
fh.seek(0)
while(fh.tell()!=size):
data=fh.readline()
if(data[:4] == 'addr'):
self.host=data[5:].strip()#ip or domain,include port
elif(data[:7]=='catalog'):
self.catalogText.SetValue(data[8:].strip())
elif(data[:2]=='id'):
self.user=data[3:].strip()
elif(data[:2]=='pw'):
self.pw=data[3:].strip()
fh.close()
except:
self.host='configuration not found!'
def ReadFile(self,filepath):
if filepath:
try:
fh = open(filepath, 'r')
data = fh.read()
fh.close()
self.richText.SetValue(data)
except :
wx.MessageBox("%s is not a expected file."
% filepath, "error tip",
style = wx.OK | wx.ICON_EXCLAMATION)
def OnOpenFile(self,event):
file_wildcard="All files(*.*)|*.*"
dlg = wx.FileDialog(self,"Open file...", style = wx.OPEN,wildcard = file_wildcard)
if dlg.ShowModal() == wx.ID_OK:
filename = dlg.GetPath()
self.ReadFile(filename)
dlg.Destroy()
def SaveFile(self,filepath):
text=self.richText.GetValue()
fh=open(filepath,'w')
fh.write(text)
fh.close()
def OnSaveAs(self, event):
# 弹出文件保存对话框
file_wildcard="txt files(*.txt)|*.txt|All files(*.*)|*.*"
dlg = wx.FileDialog(self,"Save file as ...", style = wx.SAVE | wx.OVERWRITE_PROMPT,wildcard = file_wildcard)
if dlg.ShowModal() == wx.ID_OK:
filename = dlg.GetPath().encode('utf-8')
#if not os.path.splitext(filename)[1]: #如果没有文件名后缀
# filename = filename + '.txt'
self.SaveFile(filename)
#self.SetTitle(self.title + '--' + self.savefilename)
dlg.Destroy()
def OnSet(self,event):
set_win = Setting(size=(476, 280)) #640,480 #1.618:1
set_win.Centre()
set_win.Show()
def OnEnterWin(self, evt):
#print 'on enter win'
text_obj = wx.TextDataObject()
if self.clip.IsOpened() or self.clip.Open():
if self.clip.GetData(text_obj):
text_str=text_obj.GetText()
#print 'get text from clipboard',text_str
#check if the text is formal URL
if text_str !='' and re.match(r'^https?:/{2}\w.+$', text_str): #OK
#compare with the URL in input
old_url=self.urlText.GetValue().strip()
if text_str !=old_url :
self.urlText.SetValue(text_str)
# dlg = MsgDialog('URL已粘贴到输入框', '提示', ttl=2)
# dlg.ShowModal()
self.clip.Close()
def showUp(self):
#app = wx.PySimpleApp()
self.Centre()
self.Show() #可以让它设置是否在程序启动时一起显示出来
#app.MainLoop()
class Setting(wx.Frame):
def __init__(
self, parent=None, id=wx.ID_ANY, title='设置', pos=wx.DefaultPosition,
size=wx.DEFAULT, style=wx.DEFAULT_FRAME_STYLE):
wx.Frame.__init__(self, parent, id, title, pos, size, style)
panel = wx.Panel(self, wx.ID_ANY)
ipLabel = wx.StaticText(panel, -1, "服务器:")
ipLabel.SetForegroundColour('blue')
self.ipText = wx.TextCtrl(panel, -1, "192.168.1.5",size=(250, 38)) #文本控件
portLabel = wx.StaticText(panel, -1, "端口 号:")
portLabel.SetForegroundColour('blue')
self.portText = wx.TextCtrl(panel, -1, "1366",size=(200, 30))
self.portText.SetInsertionPoint(0)
self.ipText.SetInsertionPoint(0)#设置插入点
catalogLabel = wx.StaticText(panel, -1, "归档目录:")
catalogLabel.SetForegroundColour('blue')
self.catalogText = wx.TextCtrl(panel, -1, "default",size=(200, 30))
button1 = wx.Button(panel, wx.ID_ANY, '保存')
button2 = wx.Button(panel, wx.ID_ANY, '取消')
button1.SetBackgroundColour("gray")
button1.SetForegroundColour("Navy")
self.Bind(wx.EVT_BUTTON, self. | OnSaveConf, button1)
self.Bind(wx.EVT_BUTTON, self.OnCancel, button2)
vbox = wx.BoxSizer(wx.VERTICAL)
hbox1 = wx.BoxSizer(wx.HORIZONTAL)
hbox2 = wx.BoxSizer(wx.HORIZONTAL)
hbox3 = wx.BoxSizer(wx.HORIZONTAL)
hbox4 = wx.BoxSizer(wx.HORIZONTAL)
hbox1.Add(ipLabel,flag=wx.LEFT,border=8)
hbox1.Add(self.ipText,proportion=1)
vbox.Add(hbox1,flag=wx.EXPAND|wx.LEFT|wx.RIGHT|wx.TOP,border=10)
vbox.Add((-1, 10))
hbox2.Add(portLabel,flag=wx.LEFT,border=8)
hbox2.Add(self.portText,proportion=1)
vbox.Add(hbox2,flag=wx.EXPAND|wx.LEFT|wx.RIGHT,border=10)
vbox.Add((-1, 10))
hbox3.Add(catalogLabel,flag=wx.LEFT,border=8)
hbox3.Add(self.catalogText,proportion=1)
vbox.Add(hbox3,flag=wx.EXPAND|wx.LEFT|wx.RIGHT,border=10)
vbox.Add((-1, 50))
hbox4.Add(button1,flag=wx.LEFT,border=18)
hbox4.Add(button2,flag=wx.ALIGN_LEFT|wx.LEFT,border=8)
vbox.Add(hbox4,flag=wx.EXPAND|wx.LEFT|wx.RIGHT|wx.Bottom,border=10)
panel.SetSizer(vbox)
self.loadConf() #加载配置文件,显示在界面
def loadConf(self):
fh=open('server.conf')
size=len(fh.read())
fh.seek(0)
while(fh.tell()!=size):
data=fh.readline()
if(data[:4] == 'addr'):
host=data[5:].strip()#ip or domain,include port
elif(data[:7]=='catalog'):
self.catalogText.SetValue(data[8:].strip())
fh.close()
splits=host.split(':')
host=splits[0]
port=splits[1]
self.ipText.SetValue(host)
self.portText.SetValue(port)
def OnSaveConf(self,event):
host=self.ipText.GetValue()
port=self.portText.GetValue()
catalog=self.catalogText.GetValue()
fh=open('server.conf','w')
fh.write('#pageSlimer server configuration\n#addr=(ip/domain):(port)\n')
fh.write('addr=%s:%s'%(host,port))#will not write \n to the end
fh.write('\ncatalog=%s'%catalog)
fh.write('\nid=%s'%self.user)
fh.write('\npw=%s'%self.pw)
fh.close()
self.Destroy()
def OnCancel(self,event):
self.Destroy()
#self.Hide()
#pass
def TestFrame():
app = wx.App() #wx.PySimpleApp()
frame = MainFrame(size=(776, 480)) #640,480 #1.618:1
frame.Centre()
frame.Show() #可以让它设置是否在程序启动时一起显示出来
#frame.OnHide(wx.Frame) #让它启动时隐藏
app.MainLoop()
if __name__ == '__main__':
#src=urllib2.urlopen('http://127.0.0.1:1366/doslim?url=http://www.linuxfan.com:1366/program/pageSlimer/linux-usage.html&dir=default&name=default')
# text=src.read()
# src.close()
# print 'text:',text[0:40]
TestFrame()
| identifier_body |
|
client.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
import wx
import urllib2
import urllib
import time
import re
import httplib
import sys
import os
import socket
import cStringIO
#import io
from PIL import Image #dont use import Image ,maybe different
socket.setdefaulttimeout(15) #global
reload(sys)
sys.setdefaultencoding('utf-8')
#如果不设置编码,则unicode等问题较麻烦,in windows,just use u'xxx'
from gui import MsgDialog
import wx.richtext as rt
class MainFrame(wx.Frame):
def __init__(
self, parent=None, id=wx.ID_ANY, title='网文快存', pos=wx.DefaultPosition,
size=(776, 480), style=wx.DEFAULT_FRAME_STYLE):#size=wx.DEFAULT
wx.Frame.__init__(self, parent, id, title, pos, size, style)
self.SetIcon(wx.Icon('eye.png', wx.BITMAP_TYPE_PNG)) #loadIcon.ico
menubar=wx.MenuBar()
file_menu=wx.Menu()
help_menu=wx.Menu()
set_menu=wx.Menu()
file_menu.Append(101,'&Open','Open a new document')
file_menu.Append(102,'&Save','Save the document')
file_menu.AppendSeparator()
quit=wx.MenuItem(file_menu,105,'&Quit\tCtrl+Q','Quit the Application')
#quit.SetBitmap(wx.Image('stock_exit-16.png', wx.BITMAP_TYPE_PNG).ConvertToBitmap())
file_menu.AppendItem(quit)
help_menu.Append(103,'&Help','Seek help')
set_menu.Append(104,'&设置','set profiles')
menubar.Append(file_menu,'&File')
menubar.Append(help_menu,'&Help')
menubar.Append(set_menu,'&Setting')
self.SetMenuBar( menubar )
wx.EVT_MENU(self, 105, self.OnQuit)
wx.EVT_MENU(self, 101, self.OnOpenFile)
wx.EVT_MENU(self, 102, self.OnSaveAs)
wx.EVT_MENU(self, 103, self.OnHelp)
wx.EVT_MENU(self, 104, self.OnSet)
panel = wx.Panel(self, wx.ID_ANY)
button1 = wx.Button(panel, wx.ID_ANY, '快取')
button1.SetBackgroundColour("gray")
button1.SetForegroundColour("Navy")
self.Bind(wx.EVT_BUTTON, self.OnGet, button1)
button2 = wx.Button(panel, wx.ID_ANY, '退出') #
self.Bind(wx.EVT_BUTTON, self.OnQuit, button2)
button3 = wx.Button(panel, wx.ID_ANY, '保存修改') #
self.Bind(wx.EVT_BUTTON, self.OnSave2server, button3)
button2.SetBackgroundColour("gray")
button2.SetForegroundColour("Red")
button3.SetBackgroundColour("gray")
button3.SetForegroundColour("Navy")
self.urlButton = wx.Button(panel, wx.ID_ANY, 'URL:',size=(50,36))
self.urlButton.SetForegroundColour('blue')
self.Bind(wx.EVT_BUTTON, self.OnClear, self.urlButton)
self.Bind(wx.EVT_CLOSE, self.OnQuit)#因该是点击x按钮关闭时调用
self.Bind(wx.EVT_ICONIZE, self.OnIconfiy) # What is the meaning?设置缩小到底部任务栏和恢复时做的动作,比如发出提示,声音等
#urlLabel = wx.StaticText(panel, -1, "URL:")
default_url='http://192.168.1.6/test.html'
#"http://www.weixinxi.wang/blog/aitrcle.html?id=9";
#"http://www.linuxfan.com:1366/program/pageSlimer/linux-usage.html"
self.urlText = wx.TextCtrl(panel, -1, default_url,size=(250, 38), style=wx.TE_MULTILINE) #创建一个文本控件
titleLabel = wx.StaticText(panel, -1, "标题:")
titleLabel.SetForegroundColour('blue')
self.titleText = wx.TextCtrl(panel, -1, "",size=(200, 30))
self.titleText.SetInsertionPoint(0)
self.urlText.SetInsertionPoint(0)#设置插入点
catalogLabel = wx.StaticText(panel, -1, "归档目录:")
catalogLabel.SetForegroundColour('blue')
self.catalogText = wx.TextCtrl(panel, -1, "default",size=(200, 30))
richTextLabel = wx.StaticText(panel, -1, "正文(可编辑):")
#self.richText = wx.TextCtrl(panel, -1, "\n\n\n\n\t\t\t\t\t\t\t\t\t^_^",style=wx.TE_MULTILINE|wx.TE_RICH2) #创建丰富文本控件
self.richText = rt.RichTextCtrl(panel, -1)
#self.richText.SetInsertionPoint(0)
self.richText.SetForegroundColour('blue')
#self.richText.WriteImage(wx.Image('../core/image/UF3ui2.jpg',wx.BITMAP_TYPE_ANY))
#self.richText.Newline()
#self.richText.SetDefaultStyle(wx.TextAttr("blue")) #设置文本样式,从1到4的字符,前景色,背景色
#points = self.richText.GetFont().GetPointSize()
#f = wx.Font(points + 3, wx.ROMAN, wx.ITALIC, wx.BOLD, True) #创建一个字体
#self.richText.SetStyle(68, 82, wx.TextAttr("blue", wx.NullColour, f)) #用新字体设置样式
#sizer = wx.FlexGridSizer(cols=3, hgap=6, vgap=6)
#sizer = wx.BoxSizer(wx.HORIZONTAL) #wx.VERTICAL
vbox = wx.BoxSizer(wx.VERTICAL)
hbox1 = wx.BoxSizer(wx.HORIZONTAL)
hbox2 = wx.BoxSizer(wx.HORIZONTAL)
hbox3 = wx.BoxSizer(wx.HORIZONTAL)
hbox4 = wx.BoxSizer(wx.HORIZONTAL)
hbox1.Add(self.urlButton,flag=wx.LEFT)
hbox1.Add(self.urlText,proportion=1)
hbox1.Add(button1,flag=wx.LEFT,border=8)
vbox.Add(hbox1,flag=wx.EXPAND|wx.LEFT|wx.RIGHT|wx.TOP,border=10)
vbox.Add((-1, 10))
hbox2.Add(titleLabel,flag=wx.LEFT,border=8)
hbox2.Add(self.titleText,proportion=2)
hbox2.Add(catalogLabel,flag=wx.ALIGN_LEFT,border=8)
hbox2.Add(self.catalogText,proportion=1)
vbox.Add(hbox2,flag=wx.EXPAND|wx.LEFT|wx.RIGHT,border=10)
vbox.Add((-1, 10)) #25px space
vbox.Add(richTextLabel,flag=wx.LEFT|wx.ALIGN_LEFT,border=18)
vbox.Add((-1, 10))
hbox3.Add(self.richText,proportion=1, flag=wx.EXPAND)
vbox.Add(hbox3, proportion=1, flag=wx.LEFT|wx.RIGHT|wx.EXPAND, border=10)
vbox.Add((-1, 10))
hbox4.Add(button3,flag=wx.ALIGN_RIGHT|wx.RIGHT,border=10) #save
hbox4.Add(button2,wx.RIGHT,border=10) #exit
vbox.Add(hbox4,flag=wx.ALIGN_RIGHT|wx.RIGHT|wx.BOTTOM,border=10)
#sizer.Add(button1, 0) #0表示比例
#sizer.Add(button2, 3)
#sizer.Add(button3, 5,wx.BOTTOM|wx.LEFT,wx.ALIGN_BOTTOM)
#sizer.Add(button4, 5,wx.RIGHT|wx.BOTTOM,wx.ALIGN_BOTTOM)
#panel.SetSizer(sizer)
#sizer.AddMany([urlLabel, self.urlText,button1,titleLabel,self.titleText,-1 ,richTextLabel,self.richText,-1])
panel.SetSizer(vbox)
self.clip = wx.TheClipboard #系统剪贴板,但是用wx.Clipboard()却不正确,很奇怪
#http://www.wxpython.org/docs/api/wx.Clipboard-class.html
#当左键点击窗口上任意普通位置时查看系统剪贴板是否有新网址,或在重绘时wx.EVT_PAINT
#panel.Bind(wx.EVT_LEFT_DOWN, self.OnClickCheck)#对panel有效,但不知为什么对frame无效,改成:
self.Bind(wx.EVT_ENTER_WINDOW,self.OnEnterWin)
self.host=''
self.filename=''
self.user=''
self.pw=''
self.readConfigure()
def OnHide(self, event):
self.Hide()
def OnGet(self, event):
url=self.urlText.GetValue().strip()
catalog=self.catalogText.GetValue().strip()
#the dir and name indicate where to save in the server
if(url==''):
wx.MessageBox('您还没输入网址','^0^')
return
try:
src=urllib.urlopen('http://'+self.host+'/doslim?url='+url+'&dir='+catalog+'&name=default'+'&uid='+self.user)
#so strange that the urllib2.urlopen not work properly at times,is it beacause the server i write has problem of sending packet headers?
text=src.read()
src.close()
#print 'text:',text[0:40]
# flist=re.findall(r'^filename:(.*?)\n',text)
nm=re.search(r'(?<=filename:).+?(?=$)',text,re.M)
if nm!=None:
self.filename=nm.group(0)
# print 'filename(0):%s<<<'%self.filename[0]
self.filename=self.filename.strip()
print 'read size:',len(text)
print 'get filename:',self.filename #逗号变成空格
# text=re.sub('^filename:%s'%self.filename,'',text)
text=text.replace('filename:%s'%self.filename,'')
self.titleText.SetValue(self.filename)
self.showContent(text.strip()) #content text has some format such as URL
except Exception,e:
print e
wx.MessageBox('请检查您的网络', '网络连接出错')
def showContent(self,content):#解析文本内容中的特殊部分,如图片地址,显示到富文本框
#[[<img src="/image/imgFilename">]],服务器地址因该是/uid/catagrory/image/filename
#self.richText.WriteText(content)
#self.richText.WriteText('-------------------\n')
self.richText.SetValue('')
lines=content.split('\n')
for ln in lines:
if ln.find('##<img src=') >=0:
print ln
pat=re.compile(r'##<img src="(.*?)"/>##')
try:
img_src=pat.findall(ln)[0]
print 'find img_src:',img_src
catalog=self.catalogText.GetValue().strip()
url='http://'+self.host+'/dl?'+self.user+'/'+catalog+img_src
img_str=urllib2.urlopen(url).read() #type str
print 'size:',len(img_str)
image_i = cStringIO.StringIO(img_str)
# print 'type of image_file:',type(image_file)
pil_image=Image.open(image_i)
wx_img=self.PILToWX(pil_image)
self.richText.WriteImage(wx_img)
# self.richText.AddImage(image)
except Exception,e:
print e
else :
self.richText.WriteText(ln)#AppendText(ln)
self.richText.Newline()
#self.richText.SetValue(content)
#self.richText.WriteImage(wx.Image('../core/image/UF3ui2.jpg',wx.BITMAP_TYPE_ANY))
def PILToWX(self, pil_image):
#"convert a PIL imageto a wxImage"
if pil_image.mode != 'RGB': # SetData() requires an RGB image
pil_image = pil_image.convert('RGB')
imageData = pil_image.tostring('raw', 'RGB')
imageWx = wx.EmptyImage(pil_image.size[0], pil_image.size[1])
imageWx.SetData(imageData)
return imageWx
#bitmap = wx.BitmapFromImage(image)
def OnIconfiy(self, event):
wx.MessageBox('好好学习,天天向上!', '*送你一句良言*')
event.Skip()
def OnClear(self,event):
self.urlText.Clear()
def OnHelp(self,event):
wx.MessageBox('1.复制粘帖网址到输入框,点击获取即可,内容会保存到云端\n2.您可以对获取到的内容进行编辑并重新保存至服务端\n3.您还可以导入导出文本文件', '*使用帮助*')
def OnQuit(self, event):
self.Destroy() #or close()
def OnSave2server(self, event):
text=self.richText.GetValue()
catalog=self.catalogText.GetValue().strip()
if text==None or catalog==None:
wx.MessageBox('不能为空', '上传失败')
return
boundary='---------%s'%hex(int(time.time()*1000))
data=[] #a list
# data.append('\r\n')
data.append('--%s'%boundary)
data.append('uid=%s'%self.user)#username uid
data.append('dir=%s'%catalog)#= not : in my server
# print 'append data name:',self.filename
data.append('filename=%s'%self.filename)
data.append('\n')#因为是自己写的服务端,所以构造的这些数据比较随意了,按服务端的要求来写
data.append('%s'%(time.asctime()))#列表在转换为字符串后会在每一项后面加换行
#ignore the first line:filename
# body=''.join(data)
# body=body.join('%s'%content)
# body=body.join('\n--%s--\n'%boundary)
data.append(text.encode('utf-8'))
data.append('--%s--\n'%boundary)
body='\r\n'.join(data) #text in textCtrl is unicode
try:
conn=httplib.HTTPConnection(self.host)
conn.request(method="POST",url="/modify",body=body);
response=conn.getresponse();
if response.status==200: #302 etc
#self.richText.SetValue(response)
print '发布成功!^_^!';
wx.MessageBox('修改已保存至云端!', '恭喜')
else:
wx.MessageBox('请检查您的网络', '上传失败')
print "发布失败\^0^/"
conn.close()
except Exception,e:
wx.MessageBox('请检查您的网络', '网络连接出错')
print 'http error:',e
#self.Hide()
def OnCancel(self,event):
pass
def readConfigure(self):
try:
fh=open('server.conf')
size=len(fh.read())
fh.seek(0)
while(fh.tell()!=size):
data=fh.readline()
if(data[:4] == 'addr'):
self.host=data[5:].strip()#ip or domain,include port
elif(data[:7]=='catalog'):
self.catalogText.SetValue(data[8:].strip())
elif(data[:2]=='id'):
self.user=data[3:].strip()
elif(data[:2]=='pw'):
self.pw=data[3:].strip()
fh.close()
except:
self.host='configuration not found!'
def ReadFile(self,filepath):
if filepath:
try:
fh = open(filepath, 'r')
data = fh.read()
fh.close()
self.richText.SetValue(data)
except :
wx.MessageBox("%s is not a expected file."
% filepath, "error tip",
style = wx.OK | wx.ICON_EXCLAMATION)
def OnOpenFile(self,event):
file_wildcard="All files(*.*)|*.*"
dlg = wx.FileDialog(self,"Open file...", style = wx.OPEN,wildcard = file_wildcard)
if dlg.ShowModal() == wx.ID_OK:
filename = dlg.GetPath()
self.ReadFile(filename)
dlg.Destroy()
def SaveFile(self,filepath):
text=self.richText.GetValue()
fh=open(filepath,'w')
fh.write( | SaveAs(self, event):
# 弹出文件保存对话框
file_wildcard="txt files(*.txt)|*.txt|All files(*.*)|*.*"
dlg = wx.FileDialog(self,"Save file as ...", style = wx.SAVE | wx.OVERWRITE_PROMPT,wildcard = file_wildcard)
if dlg.ShowModal() == wx.ID_OK:
filename = dlg.GetPath().encode('utf-8')
#if not os.path.splitext(filename)[1]: #如果没有文件名后缀
# filename = filename + '.txt'
self.SaveFile(filename)
#self.SetTitle(self.title + '--' + self.savefilename)
dlg.Destroy()
def OnSet(self,event):
set_win = Setting(size=(476, 280)) #640,480 #1.618:1
set_win.Centre()
set_win.Show()
def OnEnterWin(self, evt):
#print 'on enter win'
text_obj = wx.TextDataObject()
if self.clip.IsOpened() or self.clip.Open():
if self.clip.GetData(text_obj):
text_str=text_obj.GetText()
#print 'get text from clipboard',text_str
#check if the text is formal URL
if text_str !='' and re.match(r'^https?:/{2}\w.+$', text_str): #OK
#compare with the URL in input
old_url=self.urlText.GetValue().strip()
if text_str !=old_url :
self.urlText.SetValue(text_str)
# dlg = MsgDialog('URL已粘贴到输入框', '提示', ttl=2)
# dlg.ShowModal()
self.clip.Close()
def showUp(self):
#app = wx.PySimpleApp()
self.Centre()
self.Show() #可以让它设置是否在程序启动时一起显示出来
#app.MainLoop()
class Setting(wx.Frame):
def __init__(
self, parent=None, id=wx.ID_ANY, title='设置', pos=wx.DefaultPosition,
size=wx.DEFAULT, style=wx.DEFAULT_FRAME_STYLE):
wx.Frame.__init__(self, parent, id, title, pos, size, style)
panel = wx.Panel(self, wx.ID_ANY)
ipLabel = wx.StaticText(panel, -1, "服务器:")
ipLabel.SetForegroundColour('blue')
self.ipText = wx.TextCtrl(panel, -1, "192.168.1.5",size=(250, 38)) #文本控件
portLabel = wx.StaticText(panel, -1, "端口 号:")
portLabel.SetForegroundColour('blue')
self.portText = wx.TextCtrl(panel, -1, "1366",size=(200, 30))
self.portText.SetInsertionPoint(0)
self.ipText.SetInsertionPoint(0)#设置插入点
catalogLabel = wx.StaticText(panel, -1, "归档目录:")
catalogLabel.SetForegroundColour('blue')
self.catalogText = wx.TextCtrl(panel, -1, "default",size=(200, 30))
button1 = wx.Button(panel, wx.ID_ANY, '保存')
button2 = wx.Button(panel, wx.ID_ANY, '取消')
button1.SetBackgroundColour("gray")
button1.SetForegroundColour("Navy")
self.Bind(wx.EVT_BUTTON, self.OnSaveConf, button1)
self.Bind(wx.EVT_BUTTON, self.OnCancel, button2)
vbox = wx.BoxSizer(wx.VERTICAL)
hbox1 = wx.BoxSizer(wx.HORIZONTAL)
hbox2 = wx.BoxSizer(wx.HORIZONTAL)
hbox3 = wx.BoxSizer(wx.HORIZONTAL)
hbox4 = wx.BoxSizer(wx.HORIZONTAL)
hbox1.Add(ipLabel,flag=wx.LEFT,border=8)
hbox1.Add(self.ipText,proportion=1)
vbox.Add(hbox1,flag=wx.EXPAND|wx.LEFT|wx.RIGHT|wx.TOP,border=10)
vbox.Add((-1, 10))
hbox2.Add(portLabel,flag=wx.LEFT,border=8)
hbox2.Add(self.portText,proportion=1)
vbox.Add(hbox2,flag=wx.EXPAND|wx.LEFT|wx.RIGHT,border=10)
vbox.Add((-1, 10))
hbox3.Add(catalogLabel,flag=wx.LEFT,border=8)
hbox3.Add(self.catalogText,proportion=1)
vbox.Add(hbox3,flag=wx.EXPAND|wx.LEFT|wx.RIGHT,border=10)
vbox.Add((-1, 50))
hbox4.Add(button1,flag=wx.LEFT,border=18)
hbox4.Add(button2,flag=wx.ALIGN_LEFT|wx.LEFT,border=8)
vbox.Add(hbox4,flag=wx.EXPAND|wx.LEFT|wx.RIGHT|wx.Bottom,border=10)
panel.SetSizer(vbox)
self.loadConf() #加载配置文件,显示在界面
def loadConf(self):
fh=open('server.conf')
size=len(fh.read())
fh.seek(0)
while(fh.tell()!=size):
data=fh.readline()
if(data[:4] == 'addr'):
host=data[5:].strip()#ip or domain,include port
elif(data[:7]=='catalog'):
self.catalogText.SetValue(data[8:].strip())
fh.close()
splits=host.split(':')
host=splits[0]
port=splits[1]
self.ipText.SetValue(host)
self.portText.SetValue(port)
def OnSaveConf(self,event):
host=self.ipText.GetValue()
port=self.portText.GetValue()
catalog=self.catalogText.GetValue()
fh=open('server.conf','w')
fh.write('#pageSlimer server configuration\n#addr=(ip/domain):(port)\n')
fh.write('addr=%s:%s'%(host,port))#will not write \n to the end
fh.write('\ncatalog=%s'%catalog)
fh.write('\nid=%s'%self.user)
fh.write('\npw=%s'%self.pw)
fh.close()
self.Destroy()
def OnCancel(self,event):
self.Destroy()
#self.Hide()
#pass
def TestFrame():
app = wx.App() #wx.PySimpleApp()
frame = MainFrame(size=(776, 480)) #640,480 #1.618:1
frame.Centre()
frame.Show() #可以让它设置是否在程序启动时一起显示出来
#frame.OnHide(wx.Frame) #让它启动时隐藏
app.MainLoop()
if __name__ == '__main__':
#src=urllib2.urlopen('http://127.0.0.1:1366/doslim?url=http://www.linuxfan.com:1366/program/pageSlimer/linux-usage.html&dir=default&name=default')
# text=src.read()
# src.close()
# print 'text:',text[0:40]
TestFrame()
| text)
fh.close()
def On | conditional_block |
client.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
import wx
import urllib2
import urllib
import time
import re
import httplib
import sys
import os
import socket
import cStringIO
#import io
from PIL import Image #dont use import Image ,maybe different
socket.setdefaulttimeout(15) #global
reload(sys)
sys.setdefaultencoding('utf-8')
#如果不设置编码,则unicode等问题较麻烦,in windows,just use u'xxx'
from gui import MsgDialog
import wx.richtext as rt
class MainFrame(wx.Frame):
def __init__(
self, parent=None, id=wx.ID_ANY, title='网文快存', pos=wx.DefaultPosition,
size=(776, 480), style=wx.DEFAULT_FRAME_STYLE):#size=wx.DEFAULT
wx.Frame.__init__(self, parent, id, title, pos, size, style)
self.SetIcon(wx.Icon('eye.png', wx.BITMAP_TYPE_PNG)) #loadIcon.ico
menubar=wx.MenuBar()
file_menu=wx.Menu()
help_menu=wx.Menu()
set_menu=wx.Menu()
file_menu.Append(101,'&Open','Open a new document')
file_menu.Append(102,'&Save','Save the document')
file_menu.AppendSeparator()
quit=wx.MenuItem(file_menu,105,'&Quit\tCtrl+Q','Quit the Application')
#quit.SetBitmap(wx.Image('stock_exit-16.png', wx.BITMAP_TYPE_PNG).ConvertToBitmap())
file_menu.AppendItem(quit)
help_menu.Append(103,'&Help','Seek help')
set_menu.Append(104,'&设置','set profiles')
menubar.Append(file_menu,'&File')
menubar.Append(help_menu,'&Help')
menubar.Append(set_menu,'&Setting')
self.SetMenuBar( menubar )
wx.EVT_MENU(self, 105, self.OnQuit)
wx.EVT_MENU(self, 101, self.OnOpenFile)
wx.EVT_MENU(self, 102, self.OnSaveAs)
wx.EVT_MENU(self, 103, self.OnHelp)
wx.EVT_MENU(self, 104, self.OnSet)
panel = wx.Panel(self, wx.ID_ANY)
button1 = wx.Button(panel, wx.ID_ANY, '快取')
button1.SetBackgroundColour("gray")
button1.SetForegroundColour("Navy")
self.Bind(wx.EVT_BUTTON, self.OnGet, button1)
button2 = wx.Button(panel, wx.ID_ANY, '退出') #
self.Bind(wx.EVT_BUTTON, self.OnQuit, button2)
button3 = wx.Button(panel, wx.ID_ANY, '保存修改') #
self.Bind(wx.EVT_BUTTON, self.OnSave2server, button3)
button2.SetBackgroundColour("gray")
button2.SetForegroundColour("Red")
button3.SetBackgroundColour("gray")
button3.SetForegroundColour("Navy")
self.urlButton = wx.Button(panel, wx.ID_ANY, 'URL:',size=(50,36))
self.urlButton.SetForegroundColour('blue')
self.Bind(wx.EVT_BUTTON, self.OnClear, self.urlButton)
self.Bind(wx.EVT_CLOSE, self.OnQuit)#因该是点击x按钮关闭时调用
self.Bind(wx.EVT_ICONIZE, self.OnIconfiy) # What is the meaning?设置缩小到底部任务栏和恢复时做的动作,比如发出提示,声音等
#urlLabel = wx.StaticText(panel, -1, "URL:")
default_url='http://192.168.1.6/test.html'
#"http://www.weixinxi.wang/blog/aitrcle.html?id=9";
#"http://www.linuxfan.com:1366/program/pageSlimer/linux-usage.html"
self.urlText = wx.TextCtrl(panel, -1, default_url,size=(250, 38), style=wx.TE_MULTILINE) #创建一个文本控件
titleLabel = wx.StaticText(panel, -1, "标题:")
titleLabel.SetForegroundColour('blue')
self.titleText = wx.TextCtrl(panel, -1, "",size=(200, 30))
self.titleText.SetInsertionPoint(0)
self.urlText.SetInsertionPoint(0)#设置插入点
catalogLabel = wx.StaticText(panel, -1, "归档目录:")
catalogLabel.SetForegroundColour('blue')
self.catalogText = wx.TextCtrl(panel, -1, "default",size=(200, 30))
richTextLabel = wx.StaticText(panel, -1, "正文(可编辑):")
#self.richText = wx.TextCtrl(panel, -1, "\n\n\n\n\t\t\t\t\t\t\t\t\t^_^",style=wx.TE_MULTILINE|wx.TE_RICH2) #创建丰富文本控件
self.richText = rt.RichTextCtrl(panel, -1)
#self.richText.SetInsertionPoint(0)
self.richText.SetForegroundColour('blue')
#self.richText.WriteImage(wx.Image('../core/image/UF3ui2.jpg',wx.BITMAP_TYPE_ANY))
#self.richText.Newline()
#self.richText.SetDefaultStyle(wx.TextAttr("blue")) #设置文本样式,从1到4的字符,前景色,背景色
#points = self.richText.GetFont().GetPointSize()
#f = wx.Font(points + 3, wx.ROMAN, wx.ITALIC, wx.BOLD, True) #创建一个字体
#self.richText.SetStyle(68, 82, wx.TextAttr("blue", wx.NullColour, f)) #用新字体设置样式
#sizer = wx.FlexGridSizer(cols=3, hgap=6, vgap=6)
#sizer = wx.BoxSizer(wx.HORIZONTAL) #wx.VERTICAL
vbox = wx.BoxSizer(wx.VERTICAL)
hbox1 = wx.BoxSizer(wx.HORIZONTAL)
hbox2 = wx.BoxSizer(wx.HORIZONTAL)
hbox3 = wx.BoxSizer(wx.HORIZONTAL)
hbox4 = wx.BoxSizer(wx.HORIZONTAL)
hbox1.Add(self.urlButton,flag=wx.LEFT)
hbox1.Add(self.urlText,proportion=1)
hbox1.Add(button1,flag=wx.LEFT,border=8)
vbox.Add(hbox1,flag=wx.EXPAND|wx.LEFT|wx.RIGHT|wx.TOP,border=10)
vbox.Add((-1, 10))
hbox2.Add(titleLabel,flag=wx.LEFT,border=8)
hbox2.Add(self.titleText,proportion=2)
hbox2.Add(catalogLabel,flag=wx.ALIGN_LEFT,border=8)
hbox2.Add(self.catalogText,proportion=1)
vbox.Add(hbox2,flag=wx.EXPAND|wx.LEFT|wx.RIGHT,border=10)
vbox.Add((-1, 10)) #25px space
vbox.Add(richTextLabel,flag=wx.LEFT|wx.ALIGN_LEFT,border=18)
vbox.Add((-1, 10))
hbox3.Add(self.richText,proportion=1, flag=wx.EXPAND)
vbox.Add(hbox3, proportion=1, flag=wx.LEFT|wx.RIGHT|wx.EXPAND, border=10)
vbox.Add((-1, 10))
hbox4.Add(button3,flag=wx.ALIGN_RIGHT|wx.RIGHT,border=10) #save
hbox4.Add(button2,wx.RIGHT,border=10) #exit
vbox.Add(hbox4,flag=wx.ALIGN_RIGHT|wx.RIGHT|wx.BOTTOM,border=10)
#sizer.Add(button1, 0) #0表示比例
#sizer.Add(button2, 3)
#sizer.Add(button3, 5,wx.BOTTOM|wx.LEFT,wx.ALIGN_BOTTOM)
#sizer.Add(button4, 5,wx.RIGHT|wx.BOTTOM,wx.ALIGN_BOTTOM)
#panel.SetSizer(sizer)
#sizer.AddMany([urlLabel, self.urlText,button1,titleLabel,self.titleText,-1 ,richTextLabel,self.richText,-1])
panel.SetSizer(vbox)
self.clip = wx.TheClipboard #系统剪贴板,但是用wx.Clipboard()却不正确,很奇怪
#http://www.wxpython.org/docs/api/wx.Clipboard-class.html
#当左键点击窗口上任意普通位置时查看系统剪贴板是否有新网址,或在重绘时wx.EVT_PAINT
#panel.Bind(wx.EVT_LEFT_DOWN, self.OnClickCheck)#对panel有效,但不知为什么对frame无效,改成:
self.Bind(wx.EVT_ENTER_WINDOW,self.OnEnterWin)
self.host=''
self.filename=''
self.user=''
self.pw=''
self.readConfigure()
def OnHide(self, event):
self.Hide()
def OnGet(self, event):
url=self.urlText.GetValue().strip()
catalog=self.catalogText.GetValue().strip()
#the dir and name indicate where to save in the server
if(url==''):
wx.MessageBox('您还没输入网址','^0^')
return
try:
src=urllib.urlopen('http://'+self.host+'/doslim?url='+url+'&dir='+catalog+'&name=default'+'&uid='+self.user)
#so strange that the urllib2.urlopen not work properly at times,is it beacause the server i write has problem of sending packet headers?
text=src.read()
src.close()
#print 'text:',text[0:40]
# flist=re.findall(r'^filename:(.*?)\n',text)
nm=re.search(r'(?<=filename:).+?(?=$)',text,re.M)
if nm!=None:
self.filename=nm.group(0)
# print 'filename(0):%s<<<'%self.filename[0]
self.filename=self.filename.strip()
print 'read size:',len(text)
print 'get filename:',self.filename #逗号变成空格
# text=re.sub('^filename:%s'%self.filename,'',text)
text=text.replace('filename:%s'%self.filename,'')
self.titleText.SetValue(self.filename)
self.showContent(text.strip()) #content text has some format such as URL
except Exception,e:
print e
wx.MessageBox('请检查您的网络', '网络连接出错')
def showContent(self,content):#解析文本内容中的特殊部分,如图片地址,显示到富文本框
#[[<img src="/image/imgFilename">]],服务器地址因该是/uid/catagrory/image/filename
#self.richText.WriteText(content)
#self.richText.WriteText('-------------------\n')
self.richText.SetValue('')
lines=content.split('\n')
for ln in lines:
if ln.find('##<img src=') >=0:
print ln
pat=re.compile(r'##<img src="(.*?)"/>##')
try:
img_src=pat.findall(ln)[0]
print 'find img_src:',img_src
catalog=self.catalogText.GetValue().strip()
url='http://'+self.host+'/dl?'+self.user+'/'+catalog+img_src
img_str=urllib2.urlopen(url).read() #type str
print 'size:',len(img_str)
image_i = cStringIO.StringIO(img_str)
# print 'type of image_file:',type(image_file)
pil_image=Image.open(image_i)
wx_img=self.PILToWX(pil_image)
self.richText.WriteImage(wx_img)
# self.richText.AddImage(image)
except Exception,e:
print e
else :
self.richText.WriteText(ln)#AppendText(ln)
self.richText.Newline()
#self.richText.SetValue(content)
#self.richText.WriteImage(wx.Image('../core/image/UF3ui2.jpg',wx.BITMAP_TYPE_ANY))
def PILToWX(self, pil_image):
#"convert a PIL imageto a wxImage"
if pil_image.mode != 'RGB': # SetData() requires an RGB image
pil_image = pil_image.convert('RGB')
imageData = pil_image.tostring('raw', 'RGB')
imageWx = wx.EmptyImage(pil_image.size[0], pil_image.size[1])
imageWx.SetData(imageData)
return imageWx
#bitmap = wx.BitmapFromImage(image)
def OnIconfiy(self, event):
wx.MessageBox('好好学习,天天向上!', '*送你一句良言*')
event.Skip()
def OnClear(self,event):
self.urlText.Clear()
def OnHelp(self,event):
wx.MessageBox('1.复制粘帖网址到输入框,点击获取即可,内容会保存到云端\n2.您可以对获取到的内容进行编辑并重新保存至服务端\n3.您还可以导入导出文本文件', '*使用帮助*')
def OnQuit(self, event):
self.Destroy() #or close()
def OnSave2server(self, event):
text=self.richText.GetValue()
catalog=self.catalogText.GetValue().strip()
if text==None or catalog==None:
wx.MessageBox('不能为空', '上传失败')
return
boundary='---------%s'%hex(int(time.time()*1000))
data=[] #a list
# data.append('\r\n')
data.append('--%s'%boundary)
data.append('uid=%s'%self.user)#username uid
data.append('dir=%s'%catalog)#= not : in my server
# print 'append data name:',self.filename
data.append('filename=%s'%self.filename)
data.append('\n')#因为是自己写的服务端,所以构造的这些数据比较随意了,按服务端的要求来写
data.append('%s'%(time.asctime()))#列表在转换为字符串后会在每一项后面加换行
#ignore the first line:filename
# body=''.join(data)
# body=body.join('%s'%content)
# body=body.join('\n--%s--\n'%boundary)
data.append(text.encode('utf-8'))
data.append('--%s--\n'%boundary)
body='\r\n'.join(data) #text in textCtrl is unicode
try:
conn=httplib.HTTPConnection(self.host)
conn.request(method="POST",url="/modify",body=body);
response=conn.getresponse();
if response.status==200: #302 etc
#self.richText.SetValue(response)
print '发布成功!^_^!';
wx.MessageBox('修改已保存至云端!', '恭喜')
else:
wx.MessageBox('请检查您的网络', '上传失败')
print "发布失败\^0^/"
conn.close()
except Exception,e:
wx.MessageBox('请检查您的网络', '网络连接出错')
print 'http error:',e
#self.Hide()
def OnCancel(self,event):
pass
def readConfigure(self):
try:
fh=open('server.conf')
size=len(fh.read())
fh.seek(0)
while(fh.tell()!=size):
data=fh.readline()
if(data[:4] == 'addr'):
self.host=data[5:].strip()#ip or domain,include port
elif(data[:7]=='catalog'):
self.catalogText.SetValue(data[8:].strip())
elif(data[:2]=='id'):
self.user=data[3:].strip()
elif(data[:2]=='pw'):
self.pw=data[3:].strip()
fh.close()
except:
self.host='configuration not found!'
def ReadFile(self,filepath):
if filepath:
try:
fh = open(filepath, 'r')
data = fh.read()
fh.close()
self.richText.SetValue(data)
except :
wx.MessageBox("%s is not a expected file."
% filepath, "error tip",
style = wx.OK | wx.ICON_EXCLAMATION)
def OnOpenFile(self,event):
file_wildcard="All files(*.*)|*.*"
dlg = wx.FileDialog(self,"Open file...", style = wx.OPEN,wildcard = file_wildcard)
if dlg.ShowModal() == wx.ID_OK:
filename = dlg.GetPath()
self.ReadFile(filename)
dlg.Destroy()
def SaveFile(self,filepath):
text=self.richText.GetValue()
fh=open(filepath,'w')
fh.write(text)
fh.close()
def OnSaveAs(self, event):
# 弹出文件保存对话框
file_wildcard="txt files(*.txt)|*.txt|All files(*.*)|*.*"
dlg = wx.FileDialog(self,"Save file as ...", style = wx.SAVE | wx.OVERWRITE_PROMPT,wildcard = file_wildcard)
if dlg.ShowModal() == wx.ID_OK:
filename = dlg.GetPath().encode('utf-8')
#if not os.path.splitext(filename)[1]: #如果没有文件名后缀
# filename = filename + '.txt'
self.SaveFile(filename)
#self.SetTitle(self.title + '--' + self.savefilename)
dlg.Destroy()
def OnSet(self,event):
set_win = Setting(size=(476, 280)) #640,480 #1.618:1
set_win.Centre()
set_win.Show()
def OnEnterWin(self, evt):
#print 'on enter win'
text_obj = wx.TextDataObject()
if self.clip.IsOpened() or self.clip.Open():
if self.clip.GetData(text_obj):
text_str=text_obj.GetText()
#print 'get text from clipboard',text_str
#check if the text is formal URL
if text_str !='' and re.match(r'^https?:/{2}\w.+$', text_str): #OK
#compare with the URL in input
old_url=self.urlText.GetValue().strip()
if text_str !=old_url :
self.urlText.SetValue(text_str)
# dlg = MsgDialog('URL已粘贴到输入框', '提示', ttl=2)
# dlg.ShowModal()
self.clip.Close()
def showUp(self):
#app = wx.PySimpleApp()
self.Centre()
self.Show() #可以让它设置是否在程序启动时一起显示出来
#app.MainLoop()
class Setting(wx.Frame):
def __init__(
self, parent=None, id=wx.ID_ANY, title='设置', pos=wx.DefaultPosition,
size=wx.DEFAULT, style=wx.DEFAULT_FRAME_STYLE):
wx.Frame.__init__(self, parent, id, title, pos, size, style)
panel = wx.Panel(self, wx.ID_ANY)
ipLabel = wx.StaticText(panel, -1, "服务器:")
ipLabel.SetForegroundColour('blue')
self.ipText = wx.TextCtrl(panel, -1, "192.168.1.5",size=(250, 38)) #文本控件
portLabel = wx.StaticText(panel, -1, "端口 号:")
portLabel.SetForegroundColour('blue')
self.portText = wx.TextCtrl(panel, -1, "1366",size=(200, 30))
self.portText.SetInsertionPoint(0)
self.ipText.SetInsertionPoint(0)#设置插入点
catalogLabel = wx.StaticText(panel, -1, "归档目录:")
catalogLabel.SetForegroundColour('blue')
self.catalogText = wx.TextCtrl(panel, -1, "default",size=(200, 30))
button1 = wx.Button(panel, wx.ID_ANY, '保存')
button2 = wx.Button(panel, wx.ID_ANY, '取消')
button1.SetBackgroundColour("gray")
button1.SetForegroundColour("Navy")
self.Bind(wx.EVT_BUTTON, self.OnSaveConf, button1)
self.Bind(wx.EVT_BUTTON, self.OnCancel, button2)
vbox = wx.BoxSizer(wx.VERTICAL)
hbox1 = wx.BoxSizer(wx.HORIZONTAL)
hbox2 = wx.BoxSizer(wx.HORIZONTAL)
hbox3 = wx.BoxSizer(wx.HORIZONTAL)
hbox4 = wx.BoxSizer(wx.HORIZONTAL)
hbox1.Add(ipLabel,flag=wx.LEFT,border=8)
hbox1.Add(self.ipText,proportion=1)
vbox.Add(hbox1,flag=wx.EXPAND|wx.LEFT|wx.RIGHT|wx.TOP,border=10)
vbox.Add((-1, 10))
hbox2.Add(portLabel,flag=wx.LEFT,border=8)
hbox2.Add(self.portText,proportion=1)
vbox.Add(hbox2,flag=wx.EXPAND|wx.LEFT|wx.RIGHT,border=10)
vbox.Add((-1, 10))
hbox3.Add(catalogLabel,flag=wx.LEFT,border=8)
hbox3.Add(self.catalogText,proportion=1)
vbox.Add(hbox3,flag=wx.EXPAND|wx.LEFT|wx.RIGHT,border=10) | hbox4.Add(button2,flag=wx.ALIGN_LEFT|wx.LEFT,border=8)
vbox.Add(hbox4,flag=wx.EXPAND|wx.LEFT|wx.RIGHT|wx.Bottom,border=10)
panel.SetSizer(vbox)
self.loadConf() #加载配置文件,显示在界面
def loadConf(self):
fh=open('server.conf')
size=len(fh.read())
fh.seek(0)
while(fh.tell()!=size):
data=fh.readline()
if(data[:4] == 'addr'):
host=data[5:].strip()#ip or domain,include port
elif(data[:7]=='catalog'):
self.catalogText.SetValue(data[8:].strip())
fh.close()
splits=host.split(':')
host=splits[0]
port=splits[1]
self.ipText.SetValue(host)
self.portText.SetValue(port)
def OnSaveConf(self,event):
host=self.ipText.GetValue()
port=self.portText.GetValue()
catalog=self.catalogText.GetValue()
fh=open('server.conf','w')
fh.write('#pageSlimer server configuration\n#addr=(ip/domain):(port)\n')
fh.write('addr=%s:%s'%(host,port))#will not write \n to the end
fh.write('\ncatalog=%s'%catalog)
fh.write('\nid=%s'%self.user)
fh.write('\npw=%s'%self.pw)
fh.close()
self.Destroy()
def OnCancel(self,event):
self.Destroy()
#self.Hide()
#pass
def TestFrame():
app = wx.App() #wx.PySimpleApp()
frame = MainFrame(size=(776, 480)) #640,480 #1.618:1
frame.Centre()
frame.Show() #可以让它设置是否在程序启动时一起显示出来
#frame.OnHide(wx.Frame) #让它启动时隐藏
app.MainLoop()
if __name__ == '__main__':
#src=urllib2.urlopen('http://127.0.0.1:1366/doslim?url=http://www.linuxfan.com:1366/program/pageSlimer/linux-usage.html&dir=default&name=default')
# text=src.read()
# src.close()
# print 'text:',text[0:40]
TestFrame() | vbox.Add((-1, 50))
hbox4.Add(button1,flag=wx.LEFT,border=18) | random_line_split |
client.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
import wx
import urllib2
import urllib
import time
import re
import httplib
import sys
import os
import socket
import cStringIO
#import io
from PIL import Image #dont use import Image ,maybe different
socket.setdefaulttimeout(15) #global
reload(sys)
sys.setdefaultencoding('utf-8')
#如果不设置编码,则unicode等问题较麻烦,in windows,just use u'xxx'
from gui import MsgDialog
import wx.richtext as rt
class MainFrame(wx.Frame):
def __init__(
self, parent=None, id=wx.ID_ANY, title='网文快存', pos=wx.DefaultPosition,
size=(776, 480), style=wx.DEFAULT_FRAME_STYLE):#size=wx.DEFAULT
wx.Frame.__init__(self, parent, id, title, pos, size, style)
self.SetIcon(wx.Icon('eye.png', wx.BITMAP_TYPE_PNG)) #loadIcon.ico
menubar=wx.MenuBar()
file_menu=wx.Menu()
help_menu=wx.Menu()
set_menu=wx.Menu()
file_menu.Append(101,'&Open','Open a new document')
file_menu.Append(102,'&Save','Save the document')
file_menu.AppendSeparator()
quit=wx.MenuItem(file_menu,105,'&Quit\tCtrl+Q','Quit the Application')
#quit.SetBitmap(wx.Image('stock_exit-16.png', wx.BITMAP_TYPE_PNG).ConvertToBitmap())
file_menu.AppendItem(quit)
help_menu.Append(103,'&Help','Seek help')
set_menu.Append(104,'&设置','set profiles')
menubar.Append(file_menu,'&File')
menubar.Append(help_menu,'&Help')
menubar.Append(set_menu,'&Setting')
self.SetMenuBar( menubar )
wx.EVT_MENU(self, 105, self.OnQuit)
wx.EVT_MENU(self, 101, self.OnOpenFile)
wx.EVT_MENU(self, 102, self.OnSaveAs)
wx.EVT_MENU(self, 103, self.OnHelp)
wx.EVT_MENU(self, 104, self.OnSet)
panel = wx.Panel(self, wx.ID_ANY)
button1 = wx.Button(panel, wx.ID_ANY, '快取')
button1.SetBackgroundColour("gray")
button1.SetForegroundColour("Navy")
self.Bind(wx.EVT_BUTTON, self.OnGet, button1)
button2 = wx.Button(panel, wx.ID_ANY, '退出') #
self.Bind(wx.EVT_BUTTON, self.OnQuit, button2)
button3 = wx.Button(panel, wx.ID_ANY, '保存修改') #
self.Bind(wx.EVT_BUTTON, self.OnSave2server, button3)
button2.SetBackgroundColour("gray")
button2.SetForegroundColour("Red")
button3.SetBackgroundColour("gray")
button3.SetForegroundColour("Navy")
self.urlButton = wx.Button(panel, wx.ID_ANY, 'URL:',size=(50,36))
self.urlButton.SetForegroundColour('blue')
self.Bind(wx.EVT_BUTTON, self.OnClear, self.urlButton)
self.Bind(wx.EVT_CLOSE, self.OnQuit)#因该是点击x按钮关闭时调用
self.Bind(wx.EVT_ICONIZE, self.OnIconfiy) # What is the meaning?设置缩小到底部任务栏和恢复时做的动作,比如发出提示,声音等
#urlLabel = wx.StaticText(panel, -1, "URL:")
default_url='http://192.168.1.6/test.html'
#"http://www.weixinxi.wang/blog/aitrcle.html?id=9";
#"http://www.linuxfan.com:1366/program/pageSlimer/linux-usage.html"
self.urlText = wx.TextCtrl(panel, -1, default_url,size=(250, 38), style=wx.TE_MULTILINE) #创建一个文本控件
titleLabel = wx.StaticText(panel, -1, "标题:")
titleLabel.SetForegroundColour('blue')
self.titleText = wx.TextCtrl(panel, -1, "",size=(200, 30))
self.titleText.SetInsertionPoint(0)
self.urlText.SetInsertionPoint(0)#设置插入点
catalogLabel = wx.StaticText(panel, -1, "归档目录:")
catalogLabel.SetForegroundColour('blue')
self.catalogText = wx.TextCtrl(panel, -1, "default",size=(200, 30))
richTextLabel = wx.StaticText(panel, -1, "正文(可编辑):")
#self.richText = wx.TextCtrl(panel, -1, "\n\n\n\n\t\t\t\t\t\t\t\t\t^_^",style=wx.TE_MULTILINE|wx.TE_RICH2) #创建丰富文本控件
self.richText = rt.RichTextCtrl(panel, -1)
#self.richText.SetInsertionPoint(0)
self.richText.SetForegroundColour('blue')
#self.richText.WriteImage(wx.Image('../core/image/UF3ui2.jpg',wx.BITMAP_TYPE_ANY))
#self.richText.Newline()
#self.richText.SetDefaultStyle(wx.TextAttr("blue")) #设置文本样式,从1到4的字符,前景色,背景色
#points = self.richText.GetFont().GetPointSize()
#f = wx.Font(points + 3, wx.ROMAN, wx.ITALIC, wx.BOLD, True) #创建一个字体
#self.richText.SetStyle(68, 82, wx.TextAttr("blue", wx.NullColour, f)) #用新字体设置样式
#sizer = wx.FlexGridSizer(cols=3, hgap=6, vgap=6)
#sizer = wx.BoxSizer(wx.HORIZONTAL) #wx.VERTICAL
vbox = wx.BoxSizer(wx.VERTICAL)
hbox1 = wx.BoxSizer(wx.HORIZONTAL)
hbox2 = wx.BoxSizer(wx.HORIZONTAL)
hbox3 = wx.BoxSizer(wx.HORIZONTAL)
hbox4 = wx.BoxSizer(wx.HORIZONTAL)
hbox1.Add(self.urlButton,flag=wx.LEFT)
hbox1.Add(self.urlText,proportion=1)
hbox1.Add(button1,flag=wx.LEFT,border=8)
vbox.Add(hbox1,flag=wx.EXPAND|wx.LEFT|wx.RIGHT|wx.TOP,border=10)
vbox.Add((-1, 10))
hbox2.Add(titleLabel,flag=wx.LEFT,border=8)
hbox2.Add(self.titleText,proportion=2)
hbox2.Add(catalogLabel,flag=wx.ALIGN_LEFT,border=8)
hbox2.Add(self.catalogText,proportion=1)
vbox.Add(hbox2,flag=wx.EXPAND|wx.LEFT|wx.RIGHT,border=10)
vbox.Add((-1, 10)) #25px space
vbox.Add(richTextLabel,flag=wx.LEFT|wx.ALIGN_LEFT,border=18)
vbox.Add((-1, 10))
hbox3.Add(self.richText,proportion=1, flag=wx.EXPAND)
vbox.Add(hbox3, proportion=1, flag=wx.LEFT|wx.RIGHT|wx.EXPAND, border=10)
vbox.Add((-1, 10))
hbox4.Add(button3,flag=wx.ALIGN_RIGHT|wx.RIGHT,border=10) #save
hbox4.Add(button2,wx.RIGHT,border=10) #exit
vbox.Add(hbox4,flag=wx.ALIGN_RIGHT|wx.RIGHT|wx.BOTTOM,border=10)
#sizer.Add(button1, 0) #0表示比例
#sizer.Add(button2, 3)
#sizer.Add(button3, 5,wx.BOTTOM|wx.LEFT,wx.ALIGN_BOTTOM)
#sizer.Add(button4, 5,wx.RIGHT|wx.BOTTOM,wx.ALIGN_BOTTOM)
#panel.SetSizer(sizer)
#sizer.AddMany([urlLabel, self.urlText,button1,titleLabel,self.titleText,-1 ,richTextLabel,self.richText,-1])
panel.SetSizer(vbox)
self.clip = wx.TheClipboard #系统剪贴板,但是用wx.Clipboard()却不正确,很奇怪
#http://www.wxpython.org/docs/api/wx.Clipboard-class.html
#当左键点击窗口上任意普通位置时查看系统剪贴板是否有新网址,或在重绘时wx.EVT_PAINT
#panel.Bind(wx.EVT_LEFT_DOWN, self.OnClickCheck)#对panel有效,但不知为什么对frame无效,改成:
self.Bind(wx.EVT_ENTER_WINDOW,self.OnEnterWin)
self.host=''
self.filename=''
self.user=''
self.pw=''
self.readConfigure()
def OnHide(self, event):
self.Hide()
def OnGet(self, event):
url=self.urlText.GetValue().strip()
catalog=self.catalogText.GetValue().strip()
#the dir and name indicate where to save in the server
if(url==''):
wx.MessageBox('您还没输入网址','^0^')
return
try:
src=urllib.urlopen('http://'+self.host+'/doslim?url='+url+'&dir='+catalog+'&name=default'+'&uid='+self.user)
#so strange that the urllib2.urlopen not work properly at times,is it beacause the server i write has problem of sending packet headers?
text=src.read()
src.close()
#print 'text:',text[0:40]
# flist=re.findall(r'^filename:(.*?)\n',text)
nm=re.search(r'(?<=filename:).+?(?=$)',text,re.M)
if nm!=None:
self.filename=nm.group(0)
# print 'filename(0):%s<<<'%self.filename[0]
self.filename=self.filename.strip()
print 'read size:',len(text)
print 'get filename:',self.filename #逗号变成空格
# text=re.sub('^filename:%s'%self.filename,'',text)
text=text.replace('filename:%s'%self.filename,'')
self.titleText.SetValue(self.filename)
self.showContent(text.strip()) #content text has some format such as URL
except Exception,e:
print e
wx.MessageBox('请检查您的网络', '网络连接出错')
def showContent(self,content):#解析文本内容中的特殊部分,如图片地址,显示到富文本框
#[[<img src="/image/imgFilename">]],服务器地址因该是/uid/catagrory/image/filename
#self.richText.WriteText(content)
#self.richText.WriteText('-------------------\n')
self.richText.SetValue('')
lines=content.split('\n')
for ln in lines:
if ln.find('##<img src=') >=0:
print ln
pat=re.compile(r'##<img src="(.*?)"/>##')
try:
img_src=pat.findall(ln)[0]
print 'find img_src:',img_src
catalog=self.catalogText.GetValue().strip()
url='http://'+self.host+'/dl?'+self.user+'/'+catalog+img_src
img_str=urllib2.urlopen(url).read() #type str
print 'size:',len(img_str)
image_i = cStringIO.StringIO(img_str)
# print 'type of image_file:',type(image_file)
pil_image=Image.open(image_i)
wx_img=self.PILToWX(pil_image)
self.richText.WriteImage(wx_img)
# self.richText.AddImage(image)
except Exception,e:
print e
else :
self.richText.WriteText(ln)#AppendText(ln)
self.richText.Newline()
#self.richText.SetValue(content)
#self.richText.WriteImage(wx.Image('../core/image/UF3ui2.jpg',wx.BITMAP_TYPE_ANY))
def PILToWX(self, pil_image):
#"convert a PIL imageto a wxImage"
if pil_image.mode != 'RGB': # SetData() requires an RGB image
pil_image = pil_image.convert('RGB')
imageData = pil_image.tostring('raw', 'RGB')
imageWx = wx.EmptyImage(pil_image.size[0], pil_image.size[1])
imageWx.SetData(imageData)
return imageWx
#bitmap = wx.BitmapFromImage(image)
def OnIconfiy(self, event):
wx.MessageBox('好好学习,天天向上!', '*送你一句良言*')
event.Skip()
def OnClear(self,event):
self.urlText.Clear()
def OnHelp(self,event):
wx.MessageBox('1.复制粘帖网址到输入框,点击获取即可,内容会保存到云端\n2.您可以对获取到的内容进行编辑并重新保存至服务端\n3.您还可以导入导出文本文件', '*使用帮助*')
def OnQuit(self, event):
self.Destroy() #or close()
def OnSave2server(self, event):
text=self.richText.GetValue()
catalog=self.catalogText.GetValue().strip()
if text==None or catalog==None:
wx.MessageBox('不能为空', '上传失败')
return
boundary='---------%s'%hex(int(time.time()*1000))
data=[] #a list
# data.append('\r\n')
data.append('--%s'%boundary)
data.append('uid=%s'%self.user)#username uid
data.append('dir=%s'%catalog)#= not : in my server
# print 'append data name:',self.filename
data.append('filename=%s'%self.filename)
data.append('\n')#因为是自己写的服务端,所以构造的这些数据比较随意了,按服务端的要求来写
data.append('%s'%(time.asctime()))#列表在转换为字符串后会在每一项后面加换行
#ignore the first line:filename
# body=''.join(data)
# body=body.join('%s'%content)
# body=body.join('\n--%s--\n'%boundary)
data.append(text.encode('utf-8'))
data.append('--%s--\n'%boundary)
body='\r\n'.join(data) #text in textCtrl is unicode
try:
conn=httplib.HTTPConnection(self.host)
conn.request(method="POST",url="/modify",body=body);
response=conn.getresponse();
if response.status==200: #302 etc
#self.richText.SetValue(response)
print '发布成功!^_^!';
wx.MessageBox('修改已保存至云端!', '恭喜')
else:
wx.MessageBox('请检查您的网络', '上传失败')
print "发布失败\^0^/"
conn.close()
except Exception,e:
wx.MessageBox('请检查您的网络', '网络连接出错')
print 'http error:',e
#self.Hide()
def OnCancel(self,event):
pass
def readConfigure(self):
try:
fh=open('server.conf')
size=len(fh.read())
fh.seek(0)
while(fh.tell()!=size):
data=fh.readline()
if(data[:4] == 'addr'):
self.host=data[5:].strip()#ip or domain,include port
elif(data[:7]=='catalog'):
self.catalogText.SetValue(data[8:].strip())
elif(data[:2]=='id'):
self.user=data[3:].strip()
elif(data[:2]=='pw'):
self.pw=data[3:].strip()
fh.close()
except:
self.host='configuration not found!'
def ReadFile(self,filepath):
if filepath:
try:
fh = open(filepath, 'r')
data = fh.read()
fh.close()
self.richText.SetValue(data)
except :
wx.MessageBox("%s is not a expected file."
% filepath, "error tip",
style = wx.OK | wx.ICON_EXCLAMATION)
def OnOpenFile(self,event):
file_wildcard="All files(*.*)|*.*"
dlg = wx.FileDialog(self,"Open file...", style = wx.OPEN,wildcard = file_wildcard)
if dlg.ShowModal() == wx.ID_OK:
filename = dlg.GetPath()
self.ReadFile(filename)
dlg.Destroy()
def SaveFile(self,filepath):
text=self.richText.GetValue()
fh=open(filepath,'w')
fh.write(text)
fh.close()
def OnSaveAs(self, event):
# 弹出文件保存对话框
file_wildcard="txt files(*.txt)|*.txt|All files(*.*)|*.*"
dlg = wx.FileDialog(self,"Save file as ...", style = wx.SAVE | wx.OVERWRITE_PROMPT,wildcard = file_wildcard)
if dlg.ShowModal() == wx.ID_OK:
filename = dlg.GetPath().encode('utf-8')
#if not os.path.splitext(filename)[1]: #如果没有文件名后缀
# filename = filename + '.txt'
self.SaveFile(filename)
#self.SetTitle(self.title + '--' + self.savefilename)
dlg.Destroy()
def OnSet(self,event):
set_win = Setting(size=(476, 280)) #640,480 #1.618:1
set_win.Centre()
set_win.Show()
def OnEnterWin(self, evt):
#print 'on enter win'
text_obj = wx.TextDataObject()
if self.clip.IsOpened() or self.clip.Open():
if self.clip.GetData(text_obj):
text_str=text_obj.GetText()
#print 'get text from clipboard',text_str
#check if the text is formal URL
if text_str !='' and re.match(r'^https?:/{2}\w.+$', text_str): #OK
#compare with the URL in input
old_url=self.urlText.GetValue().strip()
if text_str !=old_url :
self.urlText.SetValue(text_str)
# dlg = MsgDialog('URL已粘贴到输入框', '提示', ttl=2)
# dlg.ShowModal()
self.clip.Close()
def showUp(self):
#app = wx.PySimpleApp()
self.Centre()
self.Show() #可以让它设置是否在程序启动时一起显示出来
#app.MainLoop()
class Setting(wx.Frame):
def __init__(
self, parent=None, id=wx.ID_ANY, title='设置', pos=wx.DefaultPosition,
size=wx.DEFAULT, style=wx.DEFAULT_FRAME_STYLE):
wx.Frame.__init__(self, parent, id, title, pos, size, style)
panel = wx.Panel(self, wx.ID_ANY)
ipLabel = wx.StaticText(panel, -1, "服务器:")
ipLabel.SetForegroundColour('blue')
self.ipText = wx.TextCtrl(panel, -1, "192.168.1.5",size=(250, 38)) #文本控件
portLabel = wx.StaticText(panel, -1, "端口 号:")
portLabel.SetForegroundColour('blue')
self.portText = wx.TextCtrl(panel, -1, "1366",size=(200, 30))
self.portText.SetInsertionPoint(0)
self.ipText.SetInsertionPoint(0)#设置插入点
catalogLabel = wx.StaticText(panel, -1, "归档目录:")
catalogLabel.SetForegroundColour('blue')
self.catalogText = wx.TextCtrl(panel, -1, "default",size=(200, 30))
button1 = wx.Button(panel, wx.ID_ANY, '保存')
button2 = wx.Button(panel, wx.ID_ANY, '取消')
button1.SetBackgroundColour("gray")
button1.SetForegroundColour("Navy")
self.Bind(wx.EVT_BUTTON, self.OnSa | button1)
self.Bind(wx.EVT_BUTTON, self.OnCancel, button2)
vbox = wx.BoxSizer(wx.VERTICAL)
hbox1 = wx.BoxSizer(wx.HORIZONTAL)
hbox2 = wx.BoxSizer(wx.HORIZONTAL)
hbox3 = wx.BoxSizer(wx.HORIZONTAL)
hbox4 = wx.BoxSizer(wx.HORIZONTAL)
hbox1.Add(ipLabel,flag=wx.LEFT,border=8)
hbox1.Add(self.ipText,proportion=1)
vbox.Add(hbox1,flag=wx.EXPAND|wx.LEFT|wx.RIGHT|wx.TOP,border=10)
vbox.Add((-1, 10))
hbox2.Add(portLabel,flag=wx.LEFT,border=8)
hbox2.Add(self.portText,proportion=1)
vbox.Add(hbox2,flag=wx.EXPAND|wx.LEFT|wx.RIGHT,border=10)
vbox.Add((-1, 10))
hbox3.Add(catalogLabel,flag=wx.LEFT,border=8)
hbox3.Add(self.catalogText,proportion=1)
vbox.Add(hbox3,flag=wx.EXPAND|wx.LEFT|wx.RIGHT,border=10)
vbox.Add((-1, 50))
hbox4.Add(button1,flag=wx.LEFT,border=18)
hbox4.Add(button2,flag=wx.ALIGN_LEFT|wx.LEFT,border=8)
vbox.Add(hbox4,flag=wx.EXPAND|wx.LEFT|wx.RIGHT|wx.Bottom,border=10)
panel.SetSizer(vbox)
self.loadConf() #加载配置文件,显示在界面
def loadConf(self):
fh=open('server.conf')
size=len(fh.read())
fh.seek(0)
while(fh.tell()!=size):
data=fh.readline()
if(data[:4] == 'addr'):
host=data[5:].strip()#ip or domain,include port
elif(data[:7]=='catalog'):
self.catalogText.SetValue(data[8:].strip())
fh.close()
splits=host.split(':')
host=splits[0]
port=splits[1]
self.ipText.SetValue(host)
self.portText.SetValue(port)
def OnSaveConf(self,event):
host=self.ipText.GetValue()
port=self.portText.GetValue()
catalog=self.catalogText.GetValue()
fh=open('server.conf','w')
fh.write('#pageSlimer server configuration\n#addr=(ip/domain):(port)\n')
fh.write('addr=%s:%s'%(host,port))#will not write \n to the end
fh.write('\ncatalog=%s'%catalog)
fh.write('\nid=%s'%self.user)
fh.write('\npw=%s'%self.pw)
fh.close()
self.Destroy()
def OnCancel(self,event):
self.Destroy()
#self.Hide()
#pass
def TestFrame():
app = wx.App() #wx.PySimpleApp()
frame = MainFrame(size=(776, 480)) #640,480 #1.618:1
frame.Centre()
frame.Show() #可以让它设置是否在程序启动时一起显示出来
#frame.OnHide(wx.Frame) #让它启动时隐藏
app.MainLoop()
if __name__ == '__main__':
#src=urllib2.urlopen('http://127.0.0.1:1366/doslim?url=http://www.linuxfan.com:1366/program/pageSlimer/linux-usage.html&dir=default&name=default')
# text=src.read()
# src.close()
# print 'text:',text[0:40]
TestFrame()
| veConf, | identifier_name |
ftp_manage.py | #!/usr/bin/python3
#@Author:CaiDeyang
#@Time: 2018/9/14 7:36
import os
import time
import pickle
import hashlib
from conf import settings
from modules.files import Files
def get_md5(data, type="str"):
m = hashlib.md5()
if type == "str":
m.update(data.encode('utf-8'))
elif type == "file":
if not os.path.isfile(data):
return None
with open(data,'rb') as f:
for line in f:
m.update(line)
else:
exit("Type error !")
return m.hexdigest()
def get_info(name):
user_file = os.path.join(settings.db, name)
with open(user_file, 'rb') as user_obj:
user_info = pickle.load(user_obj)
return user_info
def save_info(user_info, name):
user_file = os.path.join(settings.db, name)
with open(user_file, 'wb') as user_obj:
pickle.dump(user_info, user_obj)
# pickle.dump()
class FtpManage():
def __init__(self, conn):
# 读取数据库
# self.name = None
self.conn = conn
self.is_certified = False # 是否已经认证
self.current_path = None # 当前路径
# self.db_file = os.path.join(settings.db, name)
self.db_file = None
self.user_info = None
# 用户登陆
def login(self, request):
request = eval(request)
self.name = request['name']
password = request['password']
send_data = {}
send_data['action'] = 'login'
send_data['response'] = {}
if self.name in os.listdir(settings.db):
self.user_info = get_info(self.name)
# print(self.user_info.__dict__)
if self.user_info.password == password:
self.is_certified = True
self.current_path = self.user_info.current_path
send_data['response']['msg'] = "认证成功!"
else:
send_data['response']['msg'] = "认证失败!用户名密码错误!"
else:
send_data['response']['msg'] = "认证失败!无此用户!"
send_data['is_certified'] = self.is_certified
print(send_data)
# self.conn.send(pickle.dumps(send_data))
return pickle.dumps(send_data)
def pwd(self, request):
# print(request)
msg = "当前路径:%s" % self.current_path
# self.conn.send(msg.encode('utf-8'))
return msg.encode('utf-8')
def mkdir(self, request):
print(request)
new_dir = request.split()[1]
abs_path = os.path.join(settings.BaseDir, self.current_path)
if new_dir in os.listdir(abs_path):
msg = "该目录名已经被占用!"
else:
os.makedirs(os.path.join(abs_path, new_dir))
msg = "目录【%s】创建成功" % new_dir
# self.conn.send(msg.encode('utf-8'))
return msg.encode('utf-8')
def df(self, request):
print(self.user_info.__dict__)
space_info = self.user_info.space_info
print(space_info)
print("空间限额:【%s】MB 已使用空间:【%s】MB 剩余空间: 【%s】 MB" %(space_info[0], space_info[1], space_info[2]))
msg = {}
msg['quota'] = space_info[0]
msg['used_space'] = space_info[1]
msg['aviable_space'] = space_info[2]
return pickle.dumps(msg)
# 切换目录
def cd(self, request):
print(request)
if request == "cd":
self.current_path = '%s/%s' %(settings.Ftp_Base_Dir, self.name)
msg = "切换成功,当前目录:%s" % self.current_path
else:
to_path = request.split()[1]
current_path_list = self.current_path.split('\\')
if '/' in to_path:
new_path_list = to_path.split('/')
# print(new_path_list)
flag = True
while flag:
for path in new_path_list:
if path == '..':
tmp_path = current_path_list.pop()
# print(tmp_path)
# print(self.name)
if len(current_path_list) == 0:
msg = "没有权限切换到对应目录!"
flag = False
break
else:
current_path_list.append(path)
new_path = "\\".join(current_path_list)
break
if flag == True:
if os.path.isdir(os.path.join(settings.BaseDir, new_path)):
self.current_path = new_path
msg = "切换成功,当前目录:%s" % self.current_path
else:
msg = "要切换的目录【%s】在当前路径不存在" % to_path
else:
pass
elif to_path == '..':
tmp_path = current_path_list.pop()
# if tmp_path == self.name:
if len(current_path_list) == 0:
msg = "没有权限切换到对应目录!"
else:
self.current_path = "\\".join(current_path_list)
msg = "切换成功,当前目录:%s" % self.current_path
else:
abs_path = os.path.join(settings.BaseDir, self.current_path)
# if to_path in os.listdir(abs_path):
if os.path.isdir(os.path.join(abs_path, to_path)):
self.current_path = os.path.join(self.current_path, to_path)
msg = "切换成功,当前目录:%s" % self.current_path
else:
msg = "要切换的目录【%s】在当前路径不存在" % to_path
# self.conn.send(msg.encode('utf-8'))
self.user_info.current_path = self.current_path
return msg.encode('utf-8')
# 查看目录下文件
def ls(self, request):
print(request)
# print(settings.BaseDir)
# print(self.current_path)
abs_path = os.path.join(settings.BaseDir, self.current_path)
# print(abs_path)
files = os.listdir(abs_path)
if files:
print(files)
msg = "当前目录的文件情况如下:\n文件名 文件大小 创建时间 修改时间 类型:\n"
for file_name in files:
file = os.path.join(abs_path, file_name)
print(file)
file_size = os.path.getsize(file)
import time
create_time = time.strftime("%x %X", time.localtime(os.path.getctime(file)))
modify_time = time.strftime("%x %X", time.localtime(os.path.getmtime(file)))
file_type = "文件夹" if os.path.isdir(file) else "文件"
file_info = "【%s】 【%s】 【%s】 【%s】 【%s】 \n" % (file_name, file_size, create_time, modify_time, file_type)
print(file_info)
msg += file_info
else:
msg = "当前目录没有文件!"
print(msg)
# send_data = {}
# send_data['action'] = 'pwd'
# send_data['is_certified'] = self.is_certified
# send_data['response'] = {}
# send_data['response']['msg'] = msg
# print(send_data)
# self.conn.send(pickle.dumps(send_data))
# self.conn.send(msg.encode('utf-8'))
return msg.encode('utf-8')
#上传
def put(self, request):
"""
2.收到客户端上传文件的请求,判断该文件是否在服务器端存在,直接查看文件md5值
2.1 如果md5值相同,则文件存在
2.2 如果md5值不同,则告知客户端文件大小,如果md5值为None,则文件不存在
2.3 校验服务器空间是否充足,如果不足,则在1.2中同时告知客户端文件不足的信息
send {"filename":'1.jpg', "server_file_md5":'123333', "filesize": 1111, "space_aviable": 2334556 }
4 服务端收到客户端数据,ruguo seek_size = -1则不上传
4.1 如果seek_size = 0 ,则 wb 模式打开文件
4.2 如果seek_size > 0 ,则ab 模式打开文件
4.3 开始接收客户端数据
6. 当数据接收完成时,返回接收到的数据md5校验结果
"""
print(request)
filename = request.split()[-1]
recv_data = pickle.loads(self.conn.recv(8192))
if recv_data['status']:
abs_file_path = os.path.join(settings.BaseDir, self.current_path, filename)
server_file_md5 = get_md5(abs_file_path, "file")
if server_file_md5 == recv_data['file_md5']:
print("服务器已经有相同文件!")
send_msg = {"filename": filename,
"server_file_md5": server_file_md5 }
self.conn.send(pickle.dumps(send_msg))
else:
if server_file_md5:
filesize = os.path.getsize(abs_file_path)
else:
filesize = 0
space_aviable = pickle.loads(self.df(""))['aviable_space'] * 1024 * 1024 + filesize - recv_data['filesize']
send_msg = {"filename": filename,
"server_file_md5": server_file_md5,
"filesize": filesize,
"space_aviable": space_aviable }
self.conn.send(pickle.dumps(send_msg))
if space_aviable <= 0:
print("服务器空间不够")
else: #等待客户端响应
recv_data = pickle.loads(self.conn.recv(8192))
# print(recv_data)
if recv_data['seek_size'] == 0:
f = open(abs_file_path, 'wb')
else:
f = open(abs_file_path, 'ab')
# 开始接收数据
flag = True
while flag:
data = self.conn.recv(8192)
# print(data)
time.sleep(0.000001)
f.write(data)
if len(data)< 8192:
flag = False
f.close()
server_file_md5 = get_md5(abs_file_path, "file")
if recv_data['file_md5'] == server_file_md5:
print("传输完成,md5校验通过!")
send_msg['status'] = 1
else:
print("传输完成,md5校验失败!")
send_msg['status'] = 0
self.user_info.change_space_size(recv_data['filesize'] - filesize)
save_info(self.user_info, self.name )
self.conn.send(pickle.dumps(send_msg))
else: # 客户端没有对应的文件,则不做任何操作
pass
msg = ''
return msg.encode('utf-8')
#下载
def get(self, request):
# print(request)
filename = request.split()[-1]
abs_file = os.path.join(settings.BaseDir, self.current_path, filename)
if os.path.isfile(abs_file):
file_md5 = get_md5(abs_file, type='file')
file_size = os.path.getsize(abs_file)
# 判断文件是否存在
res = {"status":1, "msg":"准备就绪", "md5":file_md5, "file_si |
res2 = pickle.loads(self.conn.recv(8192))
# print(res2)
seek_place = res2['seek_place'] # 获取客户端让开始传输的位置
to_send_size = file_size - seek_place #需要发送的字节数
# print(to_send_size)
with open(abs_file,'rb') as f:
f.seek(seek_place)
while to_send_size > 0:
self.conn.send(f.read(8192))
time.sleep(0.000001)
to_send_size -= 8192
else:
return pickle.dumps({"status":-1,"msg":"文件不存在"})
msg = ''
return msg.encode('utf-8')
def logout(self,request):
# print(self.current_path)
save_info(self.user_info, self.name)
msg = ''
return msg.encode('utf-8')
if __name__ == "__main__":
pass | ze": file_size }
self.conn.send(pickle.dumps(res))
# 接收客户端开始传输的指令 | conditional_block |
ftp_manage.py | #!/usr/bin/python3
#@Author:CaiDeyang
#@Time: 2018/9/14 7:36
import os
import time
import pickle
import hashlib
from conf import settings
from modules.files import Files
def get_md5(data, type="str"):
m = hashlib.md5()
if type == "str":
m.update(data.encode('utf-8'))
elif type == "file":
if not os.path.isfile(data):
return None
with open(data,'rb') as f:
for line in f:
m.update(line)
else:
exit("Type error !")
return m.hexdigest()
def get_info(name):
user_file = os.path.join(settings.db, name)
with open(user_file, 'rb') as user_obj:
user_info = pickle.load(user_obj)
return user_info
def save_info(user_info, name):
user_file = os.path.join(settings.db, name)
with open(user_file, 'wb') as user_obj:
pickle.dump(user_info, user_obj)
# pickle.dump()
class FtpManage():
def __init__(self, conn):
# 读取数据库
# self.name = None
self.conn = conn
self.is_certified = False # 是否已经认证
self.current_path = None # 当前路径
# self.db_file = os.path.join(settings.db, name)
self.db_file = None
self.user_info = None
# 用户登陆
def login(self, request):
request = eval(request)
self.name = request['name']
password = request['password']
send_data = {}
send_data['action'] = 'login'
send_data['response'] = {}
if self.name in os.listdir(settings.db): | self.user_info = get_info(self.name)
# print(self.user_info.__dict__)
if self.user_info.password == password:
self.is_certified = True
self.current_path = self.user_info.current_path
send_data['response']['msg'] = "认证成功!"
else:
send_data['response']['msg'] = "认证失败!用户名密码错误!"
else:
send_data['response']['msg'] = "认证失败!无此用户!"
send_data['is_certified'] = self.is_certified
print(send_data)
# self.conn.send(pickle.dumps(send_data))
return pickle.dumps(send_data)
def pwd(self, request):
# print(request)
msg = "当前路径:%s" % self.current_path
# self.conn.send(msg.encode('utf-8'))
return msg.encode('utf-8')
def mkdir(self, request):
print(request)
new_dir = request.split()[1]
abs_path = os.path.join(settings.BaseDir, self.current_path)
if new_dir in os.listdir(abs_path):
msg = "该目录名已经被占用!"
else:
os.makedirs(os.path.join(abs_path, new_dir))
msg = "目录【%s】创建成功" % new_dir
# self.conn.send(msg.encode('utf-8'))
return msg.encode('utf-8')
def df(self, request):
print(self.user_info.__dict__)
space_info = self.user_info.space_info
print(space_info)
print("空间限额:【%s】MB 已使用空间:【%s】MB 剩余空间: 【%s】 MB" %(space_info[0], space_info[1], space_info[2]))
msg = {}
msg['quota'] = space_info[0]
msg['used_space'] = space_info[1]
msg['aviable_space'] = space_info[2]
return pickle.dumps(msg)
# 切换目录
def cd(self, request):
print(request)
if request == "cd":
self.current_path = '%s/%s' %(settings.Ftp_Base_Dir, self.name)
msg = "切换成功,当前目录:%s" % self.current_path
else:
to_path = request.split()[1]
current_path_list = self.current_path.split('\\')
if '/' in to_path:
new_path_list = to_path.split('/')
# print(new_path_list)
flag = True
while flag:
for path in new_path_list:
if path == '..':
tmp_path = current_path_list.pop()
# print(tmp_path)
# print(self.name)
if len(current_path_list) == 0:
msg = "没有权限切换到对应目录!"
flag = False
break
else:
current_path_list.append(path)
new_path = "\\".join(current_path_list)
break
if flag == True:
if os.path.isdir(os.path.join(settings.BaseDir, new_path)):
self.current_path = new_path
msg = "切换成功,当前目录:%s" % self.current_path
else:
msg = "要切换的目录【%s】在当前路径不存在" % to_path
else:
pass
elif to_path == '..':
tmp_path = current_path_list.pop()
# if tmp_path == self.name:
if len(current_path_list) == 0:
msg = "没有权限切换到对应目录!"
else:
self.current_path = "\\".join(current_path_list)
msg = "切换成功,当前目录:%s" % self.current_path
else:
abs_path = os.path.join(settings.BaseDir, self.current_path)
# if to_path in os.listdir(abs_path):
if os.path.isdir(os.path.join(abs_path, to_path)):
self.current_path = os.path.join(self.current_path, to_path)
msg = "切换成功,当前目录:%s" % self.current_path
else:
msg = "要切换的目录【%s】在当前路径不存在" % to_path
# self.conn.send(msg.encode('utf-8'))
self.user_info.current_path = self.current_path
return msg.encode('utf-8')
# 查看目录下文件
def ls(self, request):
print(request)
# print(settings.BaseDir)
# print(self.current_path)
abs_path = os.path.join(settings.BaseDir, self.current_path)
# print(abs_path)
files = os.listdir(abs_path)
if files:
print(files)
msg = "当前目录的文件情况如下:\n文件名 文件大小 创建时间 修改时间 类型:\n"
for file_name in files:
file = os.path.join(abs_path, file_name)
print(file)
file_size = os.path.getsize(file)
import time
create_time = time.strftime("%x %X", time.localtime(os.path.getctime(file)))
modify_time = time.strftime("%x %X", time.localtime(os.path.getmtime(file)))
file_type = "文件夹" if os.path.isdir(file) else "文件"
file_info = "【%s】 【%s】 【%s】 【%s】 【%s】 \n" % (file_name, file_size, create_time, modify_time, file_type)
print(file_info)
msg += file_info
else:
msg = "当前目录没有文件!"
print(msg)
# send_data = {}
# send_data['action'] = 'pwd'
# send_data['is_certified'] = self.is_certified
# send_data['response'] = {}
# send_data['response']['msg'] = msg
# print(send_data)
# self.conn.send(pickle.dumps(send_data))
# self.conn.send(msg.encode('utf-8'))
return msg.encode('utf-8')
#上传
def put(self, request):
"""
2.收到客户端上传文件的请求,判断该文件是否在服务器端存在,直接查看文件md5值
2.1 如果md5值相同,则文件存在
2.2 如果md5值不同,则告知客户端文件大小,如果md5值为None,则文件不存在
2.3 校验服务器空间是否充足,如果不足,则在1.2中同时告知客户端文件不足的信息
send {"filename":'1.jpg', "server_file_md5":'123333', "filesize": 1111, "space_aviable": 2334556 }
4 服务端收到客户端数据,ruguo seek_size = -1则不上传
4.1 如果seek_size = 0 ,则 wb 模式打开文件
4.2 如果seek_size > 0 ,则ab 模式打开文件
4.3 开始接收客户端数据
6. 当数据接收完成时,返回接收到的数据md5校验结果
"""
print(request)
filename = request.split()[-1]
recv_data = pickle.loads(self.conn.recv(8192))
if recv_data['status']:
abs_file_path = os.path.join(settings.BaseDir, self.current_path, filename)
server_file_md5 = get_md5(abs_file_path, "file")
if server_file_md5 == recv_data['file_md5']:
print("服务器已经有相同文件!")
send_msg = {"filename": filename,
"server_file_md5": server_file_md5 }
self.conn.send(pickle.dumps(send_msg))
else:
if server_file_md5:
filesize = os.path.getsize(abs_file_path)
else:
filesize = 0
space_aviable = pickle.loads(self.df(""))['aviable_space'] * 1024 * 1024 + filesize - recv_data['filesize']
send_msg = {"filename": filename,
"server_file_md5": server_file_md5,
"filesize": filesize,
"space_aviable": space_aviable }
self.conn.send(pickle.dumps(send_msg))
if space_aviable <= 0:
print("服务器空间不够")
else: #等待客户端响应
recv_data = pickle.loads(self.conn.recv(8192))
# print(recv_data)
if recv_data['seek_size'] == 0:
f = open(abs_file_path, 'wb')
else:
f = open(abs_file_path, 'ab')
# 开始接收数据
flag = True
while flag:
data = self.conn.recv(8192)
# print(data)
time.sleep(0.000001)
f.write(data)
if len(data)< 8192:
flag = False
f.close()
server_file_md5 = get_md5(abs_file_path, "file")
if recv_data['file_md5'] == server_file_md5:
print("传输完成,md5校验通过!")
send_msg['status'] = 1
else:
print("传输完成,md5校验失败!")
send_msg['status'] = 0
self.user_info.change_space_size(recv_data['filesize'] - filesize)
save_info(self.user_info, self.name )
self.conn.send(pickle.dumps(send_msg))
else: # 客户端没有对应的文件,则不做任何操作
pass
msg = ''
return msg.encode('utf-8')
#下载
def get(self, request):
# print(request)
filename = request.split()[-1]
abs_file = os.path.join(settings.BaseDir, self.current_path, filename)
if os.path.isfile(abs_file):
file_md5 = get_md5(abs_file, type='file')
file_size = os.path.getsize(abs_file)
# 判断文件是否存在
res = {"status":1, "msg":"准备就绪", "md5":file_md5, "file_size": file_size }
self.conn.send(pickle.dumps(res))
# 接收客户端开始传输的指令
res2 = pickle.loads(self.conn.recv(8192))
# print(res2)
seek_place = res2['seek_place'] # 获取客户端让开始传输的位置
to_send_size = file_size - seek_place #需要发送的字节数
# print(to_send_size)
with open(abs_file,'rb') as f:
f.seek(seek_place)
while to_send_size > 0:
self.conn.send(f.read(8192))
time.sleep(0.000001)
to_send_size -= 8192
else:
return pickle.dumps({"status":-1,"msg":"文件不存在"})
msg = ''
return msg.encode('utf-8')
def logout(self,request):
# print(self.current_path)
save_info(self.user_info, self.name)
msg = ''
return msg.encode('utf-8')
if __name__ == "__main__":
pass | random_line_split |
|
ftp_manage.py | #!/usr/bin/python3
#@Author:CaiDeyang
#@Time: 2018/9/14 7:36
import os
import time
import pickle
import hashlib
from conf import settings
from modules.files import Files
def get_md5(data, type="str"):
m = hashlib.md5()
if type == "str":
m.update(data.encode('utf-8'))
elif type == "file":
if not os.path.isfile(data):
return None
with open(data,'rb') as f:
for line in f:
m.update(line)
else:
exit("Type error !")
return m.hexdigest()
def get_info(name):
user_file = os.path.join(settings.db, name)
with open(user_file, 'rb') as user_obj:
user_info = pickle.load(user_obj)
return user_info
def save_info(user_info, name):
user_file = os.path.join(settings.db, name)
with open(user_file, 'wb') as user_obj:
pickle.dump(user_info, user_obj)
# pickle.dump()
class FtpManage():
def __init__(self, conn):
# 读取数据库
# self.name = None
self.conn = conn
self.is_certified = False # 是否已经认证
self.current_path = None # 当前路径
# self.db_file = os.path.join(settings.db, name)
self.db_file = None
self.user_info = None
# 用户登陆
def login(self, request):
request = eval(request)
self.n | ath
# self.conn.send(msg.encode('utf-8'))
return msg.encode('utf-8')
def mkdir(self, request):
print(request)
new_dir = request.split()[1]
abs_path = os.path.join(settings.BaseDir, self.current_path)
if new_dir in os.listdir(abs_path):
msg = "该目录名已经被占用!"
else:
os.makedirs(os.path.join(abs_path, new_dir))
msg = "目录【%s】创建成功" % new_dir
# self.conn.send(msg.encode('utf-8'))
return msg.encode('utf-8')
def df(self, request):
print(self.user_info.__dict__)
space_info = self.user_info.space_info
print(space_info)
print("空间限额:【%s】MB 已使用空间:【%s】MB 剩余空间: 【%s】 MB" %(space_info[0], space_info[1], space_info[2]))
msg = {}
msg['quota'] = space_info[0]
msg['used_space'] = space_info[1]
msg['aviable_space'] = space_info[2]
return pickle.dumps(msg)
# 切换目录
def cd(self, request):
print(request)
if request == "cd":
self.current_path = '%s/%s' %(settings.Ftp_Base_Dir, self.name)
msg = "切换成功,当前目录:%s" % self.current_path
else:
to_path = request.split()[1]
current_path_list = self.current_path.split('\\')
if '/' in to_path:
new_path_list = to_path.split('/')
# print(new_path_list)
flag = True
while flag:
for path in new_path_list:
if path == '..':
tmp_path = current_path_list.pop()
# print(tmp_path)
# print(self.name)
if len(current_path_list) == 0:
msg = "没有权限切换到对应目录!"
flag = False
break
else:
current_path_list.append(path)
new_path = "\\".join(current_path_list)
break
if flag == True:
if os.path.isdir(os.path.join(settings.BaseDir, new_path)):
self.current_path = new_path
msg = "切换成功,当前目录:%s" % self.current_path
else:
msg = "要切换的目录【%s】在当前路径不存在" % to_path
else:
pass
elif to_path == '..':
tmp_path = current_path_list.pop()
# if tmp_path == self.name:
if len(current_path_list) == 0:
msg = "没有权限切换到对应目录!"
else:
self.current_path = "\\".join(current_path_list)
msg = "切换成功,当前目录:%s" % self.current_path
else:
abs_path = os.path.join(settings.BaseDir, self.current_path)
# if to_path in os.listdir(abs_path):
if os.path.isdir(os.path.join(abs_path, to_path)):
self.current_path = os.path.join(self.current_path, to_path)
msg = "切换成功,当前目录:%s" % self.current_path
else:
msg = "要切换的目录【%s】在当前路径不存在" % to_path
# self.conn.send(msg.encode('utf-8'))
self.user_info.current_path = self.current_path
return msg.encode('utf-8')
# 查看目录下文件
def ls(self, request):
print(request)
# print(settings.BaseDir)
# print(self.current_path)
abs_path = os.path.join(settings.BaseDir, self.current_path)
# print(abs_path)
files = os.listdir(abs_path)
if files:
print(files)
msg = "当前目录的文件情况如下:\n文件名 文件大小 创建时间 修改时间 类型:\n"
for file_name in files:
file = os.path.join(abs_path, file_name)
print(file)
file_size = os.path.getsize(file)
import time
create_time = time.strftime("%x %X", time.localtime(os.path.getctime(file)))
modify_time = time.strftime("%x %X", time.localtime(os.path.getmtime(file)))
file_type = "文件夹" if os.path.isdir(file) else "文件"
file_info = "【%s】 【%s】 【%s】 【%s】 【%s】 \n" % (file_name, file_size, create_time, modify_time, file_type)
print(file_info)
msg += file_info
else:
msg = "当前目录没有文件!"
print(msg)
# send_data = {}
# send_data['action'] = 'pwd'
# send_data['is_certified'] = self.is_certified
# send_data['response'] = {}
# send_data['response']['msg'] = msg
# print(send_data)
# self.conn.send(pickle.dumps(send_data))
# self.conn.send(msg.encode('utf-8'))
return msg.encode('utf-8')
#上传
def put(self, request):
"""
2.收到客户端上传文件的请求,判断该文件是否在服务器端存在,直接查看文件md5值
2.1 如果md5值相同,则文件存在
2.2 如果md5值不同,则告知客户端文件大小,如果md5值为None,则文件不存在
2.3 校验服务器空间是否充足,如果不足,则在1.2中同时告知客户端文件不足的信息
send {"filename":'1.jpg', "server_file_md5":'123333', "filesize": 1111, "space_aviable": 2334556 }
4 服务端收到客户端数据,ruguo seek_size = -1则不上传
4.1 如果seek_size = 0 ,则 wb 模式打开文件
4.2 如果seek_size > 0 ,则ab 模式打开文件
4.3 开始接收客户端数据
6. 当数据接收完成时,返回接收到的数据md5校验结果
"""
print(request)
filename = request.split()[-1]
recv_data = pickle.loads(self.conn.recv(8192))
if recv_data['status']:
abs_file_path = os.path.join(settings.BaseDir, self.current_path, filename)
server_file_md5 = get_md5(abs_file_path, "file")
if server_file_md5 == recv_data['file_md5']:
print("服务器已经有相同文件!")
send_msg = {"filename": filename,
"server_file_md5": server_file_md5 }
self.conn.send(pickle.dumps(send_msg))
else:
if server_file_md5:
filesize = os.path.getsize(abs_file_path)
else:
filesize = 0
space_aviable = pickle.loads(self.df(""))['aviable_space'] * 1024 * 1024 + filesize - recv_data['filesize']
send_msg = {"filename": filename,
"server_file_md5": server_file_md5,
"filesize": filesize,
"space_aviable": space_aviable }
self.conn.send(pickle.dumps(send_msg))
if space_aviable <= 0:
print("服务器空间不够")
else: #等待客户端响应
recv_data = pickle.loads(self.conn.recv(8192))
# print(recv_data)
if recv_data['seek_size'] == 0:
f = open(abs_file_path, 'wb')
else:
f = open(abs_file_path, 'ab')
# 开始接收数据
flag = True
while flag:
data = self.conn.recv(8192)
# print(data)
time.sleep(0.000001)
f.write(data)
if len(data)< 8192:
flag = False
f.close()
server_file_md5 = get_md5(abs_file_path, "file")
if recv_data['file_md5'] == server_file_md5:
print("传输完成,md5校验通过!")
send_msg['status'] = 1
else:
print("传输完成,md5校验失败!")
send_msg['status'] = 0
self.user_info.change_space_size(recv_data['filesize'] - filesize)
save_info(self.user_info, self.name )
self.conn.send(pickle.dumps(send_msg))
else: # 客户端没有对应的文件,则不做任何操作
pass
msg = ''
return msg.encode('utf-8')
#下载
def get(self, request):
# print(request)
filename = request.split()[-1]
abs_file = os.path.join(settings.BaseDir, self.current_path, filename)
if os.path.isfile(abs_file):
file_md5 = get_md5(abs_file, type='file')
file_size = os.path.getsize(abs_file)
# 判断文件是否存在
res = {"status":1, "msg":"准备就绪", "md5":file_md5, "file_size": file_size }
self.conn.send(pickle.dumps(res))
# 接收客户端开始传输的指令
res2 = pickle.loads(self.conn.recv(8192))
# print(res2)
seek_place = res2['seek_place'] # 获取客户端让开始传输的位置
to_send_size = file_size - seek_place #需要发送的字节数
# print(to_send_size)
with open(abs_file,'rb') as f:
f.seek(seek_place)
while to_send_size > 0:
self.conn.send(f.read(8192))
time.sleep(0.000001)
to_send_size -= 8192
else:
return pickle.dumps({"status":-1,"msg":"文件不存在"})
msg = ''
return msg.encode('utf-8')
def logout(self,request):
# print(self.current_path)
save_info(self.user_info, self.name)
msg = ''
return msg.encode('utf-8')
if __name__ == "__main__":
pass | ame = request['name']
password = request['password']
send_data = {}
send_data['action'] = 'login'
send_data['response'] = {}
if self.name in os.listdir(settings.db):
self.user_info = get_info(self.name)
# print(self.user_info.__dict__)
if self.user_info.password == password:
self.is_certified = True
self.current_path = self.user_info.current_path
send_data['response']['msg'] = "认证成功!"
else:
send_data['response']['msg'] = "认证失败!用户名密码错误!"
else:
send_data['response']['msg'] = "认证失败!无此用户!"
send_data['is_certified'] = self.is_certified
print(send_data)
# self.conn.send(pickle.dumps(send_data))
return pickle.dumps(send_data)
def pwd(self, request):
# print(request)
msg = "当前路径:%s" % self.current_p | identifier_body |
ftp_manage.py | #!/usr/bin/python3
#@Author:CaiDeyang
#@Time: 2018/9/14 7:36
import os
import time
import pickle
import hashlib
from conf import settings
from modules.files import Files
def get_md5(data, type="str"):
m = hashlib.md5()
if type == "str":
m.update(data.encode('utf-8'))
elif type == "file":
if not os.path.isfile(data):
return None
with open(data,'rb') as f:
for line in f:
m.update(line)
else:
exit("Type error !")
return m.hexdigest()
def get_info(name):
user_file = os.path.join(settings.db, name)
with open(user_file, 'rb') as user_obj:
user_info = pickle.load(user_obj)
return user_info
def save_info(user_info, name):
user_file = os.path.join(settings.db, name)
with open(user_file, 'wb') as user_obj:
pickle.dump(user_info, user_obj)
# pickle.dump()
class | ():
def __init__(self, conn):
# 读取数据库
# self.name = None
self.conn = conn
self.is_certified = False # 是否已经认证
self.current_path = None # 当前路径
# self.db_file = os.path.join(settings.db, name)
self.db_file = None
self.user_info = None
# 用户登陆
def login(self, request):
request = eval(request)
self.name = request['name']
password = request['password']
send_data = {}
send_data['action'] = 'login'
send_data['response'] = {}
if self.name in os.listdir(settings.db):
self.user_info = get_info(self.name)
# print(self.user_info.__dict__)
if self.user_info.password == password:
self.is_certified = True
self.current_path = self.user_info.current_path
send_data['response']['msg'] = "认证成功!"
else:
send_data['response']['msg'] = "认证失败!用户名密码错误!"
else:
send_data['response']['msg'] = "认证失败!无此用户!"
send_data['is_certified'] = self.is_certified
print(send_data)
# self.conn.send(pickle.dumps(send_data))
return pickle.dumps(send_data)
def pwd(self, request):
# print(request)
msg = "当前路径:%s" % self.current_path
# self.conn.send(msg.encode('utf-8'))
return msg.encode('utf-8')
def mkdir(self, request):
print(request)
new_dir = request.split()[1]
abs_path = os.path.join(settings.BaseDir, self.current_path)
if new_dir in os.listdir(abs_path):
msg = "该目录名已经被占用!"
else:
os.makedirs(os.path.join(abs_path, new_dir))
msg = "目录【%s】创建成功" % new_dir
# self.conn.send(msg.encode('utf-8'))
return msg.encode('utf-8')
def df(self, request):
print(self.user_info.__dict__)
space_info = self.user_info.space_info
print(space_info)
print("空间限额:【%s】MB 已使用空间:【%s】MB 剩余空间: 【%s】 MB" %(space_info[0], space_info[1], space_info[2]))
msg = {}
msg['quota'] = space_info[0]
msg['used_space'] = space_info[1]
msg['aviable_space'] = space_info[2]
return pickle.dumps(msg)
# 切换目录
def cd(self, request):
print(request)
if request == "cd":
self.current_path = '%s/%s' %(settings.Ftp_Base_Dir, self.name)
msg = "切换成功,当前目录:%s" % self.current_path
else:
to_path = request.split()[1]
current_path_list = self.current_path.split('\\')
if '/' in to_path:
new_path_list = to_path.split('/')
# print(new_path_list)
flag = True
while flag:
for path in new_path_list:
if path == '..':
tmp_path = current_path_list.pop()
# print(tmp_path)
# print(self.name)
if len(current_path_list) == 0:
msg = "没有权限切换到对应目录!"
flag = False
break
else:
current_path_list.append(path)
new_path = "\\".join(current_path_list)
break
if flag == True:
if os.path.isdir(os.path.join(settings.BaseDir, new_path)):
self.current_path = new_path
msg = "切换成功,当前目录:%s" % self.current_path
else:
msg = "要切换的目录【%s】在当前路径不存在" % to_path
else:
pass
elif to_path == '..':
tmp_path = current_path_list.pop()
# if tmp_path == self.name:
if len(current_path_list) == 0:
msg = "没有权限切换到对应目录!"
else:
self.current_path = "\\".join(current_path_list)
msg = "切换成功,当前目录:%s" % self.current_path
else:
abs_path = os.path.join(settings.BaseDir, self.current_path)
# if to_path in os.listdir(abs_path):
if os.path.isdir(os.path.join(abs_path, to_path)):
self.current_path = os.path.join(self.current_path, to_path)
msg = "切换成功,当前目录:%s" % self.current_path
else:
msg = "要切换的目录【%s】在当前路径不存在" % to_path
# self.conn.send(msg.encode('utf-8'))
self.user_info.current_path = self.current_path
return msg.encode('utf-8')
# 查看目录下文件
def ls(self, request):
print(request)
# print(settings.BaseDir)
# print(self.current_path)
abs_path = os.path.join(settings.BaseDir, self.current_path)
# print(abs_path)
files = os.listdir(abs_path)
if files:
print(files)
msg = "当前目录的文件情况如下:\n文件名 文件大小 创建时间 修改时间 类型:\n"
for file_name in files:
file = os.path.join(abs_path, file_name)
print(file)
file_size = os.path.getsize(file)
import time
create_time = time.strftime("%x %X", time.localtime(os.path.getctime(file)))
modify_time = time.strftime("%x %X", time.localtime(os.path.getmtime(file)))
file_type = "文件夹" if os.path.isdir(file) else "文件"
file_info = "【%s】 【%s】 【%s】 【%s】 【%s】 \n" % (file_name, file_size, create_time, modify_time, file_type)
print(file_info)
msg += file_info
else:
msg = "当前目录没有文件!"
print(msg)
# send_data = {}
# send_data['action'] = 'pwd'
# send_data['is_certified'] = self.is_certified
# send_data['response'] = {}
# send_data['response']['msg'] = msg
# print(send_data)
# self.conn.send(pickle.dumps(send_data))
# self.conn.send(msg.encode('utf-8'))
return msg.encode('utf-8')
#上传
def put(self, request):
"""
2.收到客户端上传文件的请求,判断该文件是否在服务器端存在,直接查看文件md5值
2.1 如果md5值相同,则文件存在
2.2 如果md5值不同,则告知客户端文件大小,如果md5值为None,则文件不存在
2.3 校验服务器空间是否充足,如果不足,则在1.2中同时告知客户端文件不足的信息
send {"filename":'1.jpg', "server_file_md5":'123333', "filesize": 1111, "space_aviable": 2334556 }
4 服务端收到客户端数据,ruguo seek_size = -1则不上传
4.1 如果seek_size = 0 ,则 wb 模式打开文件
4.2 如果seek_size > 0 ,则ab 模式打开文件
4.3 开始接收客户端数据
6. 当数据接收完成时,返回接收到的数据md5校验结果
"""
print(request)
filename = request.split()[-1]
recv_data = pickle.loads(self.conn.recv(8192))
if recv_data['status']:
abs_file_path = os.path.join(settings.BaseDir, self.current_path, filename)
server_file_md5 = get_md5(abs_file_path, "file")
if server_file_md5 == recv_data['file_md5']:
print("服务器已经有相同文件!")
send_msg = {"filename": filename,
"server_file_md5": server_file_md5 }
self.conn.send(pickle.dumps(send_msg))
else:
if server_file_md5:
filesize = os.path.getsize(abs_file_path)
else:
filesize = 0
space_aviable = pickle.loads(self.df(""))['aviable_space'] * 1024 * 1024 + filesize - recv_data['filesize']
send_msg = {"filename": filename,
"server_file_md5": server_file_md5,
"filesize": filesize,
"space_aviable": space_aviable }
self.conn.send(pickle.dumps(send_msg))
if space_aviable <= 0:
print("服务器空间不够")
else: #等待客户端响应
recv_data = pickle.loads(self.conn.recv(8192))
# print(recv_data)
if recv_data['seek_size'] == 0:
f = open(abs_file_path, 'wb')
else:
f = open(abs_file_path, 'ab')
# 开始接收数据
flag = True
while flag:
data = self.conn.recv(8192)
# print(data)
time.sleep(0.000001)
f.write(data)
if len(data)< 8192:
flag = False
f.close()
server_file_md5 = get_md5(abs_file_path, "file")
if recv_data['file_md5'] == server_file_md5:
print("传输完成,md5校验通过!")
send_msg['status'] = 1
else:
print("传输完成,md5校验失败!")
send_msg['status'] = 0
self.user_info.change_space_size(recv_data['filesize'] - filesize)
save_info(self.user_info, self.name )
self.conn.send(pickle.dumps(send_msg))
else: # 客户端没有对应的文件,则不做任何操作
pass
msg = ''
return msg.encode('utf-8')
#下载
def get(self, request):
# print(request)
filename = request.split()[-1]
abs_file = os.path.join(settings.BaseDir, self.current_path, filename)
if os.path.isfile(abs_file):
file_md5 = get_md5(abs_file, type='file')
file_size = os.path.getsize(abs_file)
# 判断文件是否存在
res = {"status":1, "msg":"准备就绪", "md5":file_md5, "file_size": file_size }
self.conn.send(pickle.dumps(res))
# 接收客户端开始传输的指令
res2 = pickle.loads(self.conn.recv(8192))
# print(res2)
seek_place = res2['seek_place'] # 获取客户端让开始传输的位置
to_send_size = file_size - seek_place #需要发送的字节数
# print(to_send_size)
with open(abs_file,'rb') as f:
f.seek(seek_place)
while to_send_size > 0:
self.conn.send(f.read(8192))
time.sleep(0.000001)
to_send_size -= 8192
else:
return pickle.dumps({"status":-1,"msg":"文件不存在"})
msg = ''
return msg.encode('utf-8')
def logout(self,request):
# print(self.current_path)
save_info(self.user_info, self.name)
msg = ''
return msg.encode('utf-8')
if __name__ == "__main__":
pass | FtpManage | identifier_name |
rows.go | package pgx
import (
"context"
"errors"
"fmt"
"reflect"
"strings"
"time"
"github.com/jackc/pgx/v5/internal/stmtcache"
"github.com/jackc/pgx/v5/pgconn"
"github.com/jackc/pgx/v5/pgtype"
)
// Rows is the result set returned from *Conn.Query. Rows must be closed before
// the *Conn can be used again. Rows are closed by explicitly calling Close(),
// calling Next() until it returns false, or when a fatal error occurs.
//
// Once a Rows is closed the only methods that may be called are Close(), Err(),
// and CommandTag().
//
// Rows is an interface instead of a struct to allow tests to mock Query. However,
// adding a method to an interface is technically a breaking change. Because of this
// the Rows interface is partially excluded from semantic version requirements.
// Methods will not be removed or changed, but new methods may be added.
type Rows interface {
// Close closes the rows, making the connection ready for use again. It is safe
// to call Close after rows is already closed.
Close()
// Err returns any error that occurred while reading. Err must only be called after the Rows is closed (either by
// calling Close or by Next returning false). If it is called early it may return nil even if there was an error
// executing the query.
Err() error
// CommandTag returns the command tag from this query. It is only available after Rows is closed.
CommandTag() pgconn.CommandTag
// FieldDescriptions returns the field descriptions of the columns. It may return nil. In particular this can occur
// when there was an error executing the query.
FieldDescriptions() []pgconn.FieldDescription
// Next prepares the next row for reading. It returns true if there is another
// row and false if no more rows are available or a fatal error has occurred.
// It automatically closes rows when all rows are read.
//
// Callers should check rows.Err() after rows.Next() returns false to detect
// whether result-set reading ended prematurely due to an error. See
// Conn.Query for details.
//
// For simpler error handling, consider using the higher-level pgx v5
// CollectRows() and ForEachRow() helpers instead.
Next() bool
// Scan reads the values from the current row into dest values positionally.
// dest can include pointers to core types, values implementing the Scanner
// interface, and nil. nil will skip the value entirely. It is an error to
// call Scan without first calling Next() and checking that it returned true.
Scan(dest ...any) error
// Values returns the decoded row values. As with Scan(), it is an error to
// call Values without first calling Next() and checking that it returned
// true.
Values() ([]any, error)
// RawValues returns the unparsed bytes of the row values. The returned data is only valid until the next Next
// call or the Rows is closed.
RawValues() [][]byte
// Conn returns the underlying *Conn on which the query was executed. This may return nil if Rows did not come from a
// *Conn (e.g. if it was created by RowsFromResultReader)
Conn() *Conn
}
// Row is a convenience wrapper over Rows that is returned by QueryRow.
//
// Row is an interface instead of a struct to allow tests to mock QueryRow. However,
// adding a method to an interface is technically a breaking change. Because of this
// the Row interface is partially excluded from semantic version requirements.
// Methods will not be removed or changed, but new methods may be added.
type Row interface {
// Scan works the same as Rows. with the following exceptions. If no
// rows were found it returns ErrNoRows. If multiple rows are returned it
// ignores all but the first.
Scan(dest ...any) error
}
// RowScanner scans an entire row at a time into the RowScanner.
type RowScanner interface {
// ScanRows scans the row.
ScanRow(rows Rows) error
}
// connRow implements the Row interface for Conn.QueryRow.
type connRow baseRows
func (r *connRow) Scan(dest ...any) (err error) {
rows := (*baseRows)(r)
if rows.Err() != nil {
return rows.Err()
}
for _, d := range dest {
if _, ok := d.(*pgtype.DriverBytes); ok {
rows.Close()
return fmt.Errorf("cannot scan into *pgtype.DriverBytes from QueryRow")
}
}
if !rows.Next() {
if rows.Err() == nil {
return ErrNoRows
}
return rows.Err()
}
rows.Scan(dest...)
rows.Close()
return rows.Err()
}
// baseRows implements the Rows interface for Conn.Query.
type baseRows struct {
typeMap *pgtype.Map
resultReader *pgconn.ResultReader
values [][]byte
commandTag pgconn.CommandTag
err error
closed bool
scanPlans []pgtype.ScanPlan
scanTypes []reflect.Type
conn *Conn
multiResultReader *pgconn.MultiResultReader
queryTracer QueryTracer
batchTracer BatchTracer
ctx context.Context
startTime time.Time
sql string
args []any
rowCount int
}
func (rows *baseRows) FieldDescriptions() []pgconn.FieldDescription {
return rows.resultReader.FieldDescriptions()
}
func (rows *baseRows) Close() {
if rows.closed {
return
}
rows.closed = true
if rows.resultReader != nil {
var closeErr error
rows.commandTag, closeErr = rows.resultReader.Close()
if rows.err == nil {
rows.err = closeErr
}
}
if rows.multiResultReader != nil {
closeErr := rows.multiResultReader.Close()
if rows.err == nil {
rows.err = closeErr
}
}
if rows.err != nil && rows.conn != nil && rows.sql != "" {
if stmtcache.IsStatementInvalid(rows.err) {
if sc := rows.conn.statementCache; sc != nil {
sc.Invalidate(rows.sql)
}
if sc := rows.conn.descriptionCache; sc != nil {
sc.Invalidate(rows.sql)
}
}
}
if rows.batchTracer != nil {
rows.batchTracer.TraceBatchQuery(rows.ctx, rows.conn, TraceBatchQueryData{SQL: rows.sql, Args: rows.args, CommandTag: rows.commandTag, Err: rows.err})
} else if rows.queryTracer != nil {
rows.queryTracer.TraceQueryEnd(rows.ctx, rows.conn, TraceQueryEndData{rows.commandTag, rows.err})
}
}
func (rows *baseRows) CommandTag() pgconn.CommandTag {
return rows.commandTag
}
func (rows *baseRows) Err() error {
return rows.err
}
// fatal signals an error occurred after the query was sent to the server. It
// closes the rows automatically.
func (rows *baseRows) fatal(err error) {
if rows.err != nil {
return
}
rows.err = err
rows.Close()
}
func (rows *baseRows) Next() bool {
if rows.closed {
return false
}
if rows.resultReader.NextRow() {
rows.rowCount++
rows.values = rows.resultReader.Values()
return true
} else {
rows.Close()
return false
}
}
func (rows *baseRows) Scan(dest ...any) error {
m := rows.typeMap
fieldDescriptions := rows.FieldDescriptions()
values := rows.values
if len(fieldDescriptions) != len(values) {
err := fmt.Errorf("number of field descriptions must equal number of values, got %d and %d", len(fieldDescriptions), len(values))
rows.fatal(err)
return err
}
if len(dest) == 1 {
if rc, ok := dest[0].(RowScanner); ok {
err := rc.ScanRow(rows)
if err != nil {
rows.fatal(err)
}
return err
}
}
if len(fieldDescriptions) != len(dest) {
err := fmt.Errorf("number of field descriptions must equal number of destinations, got %d and %d", len(fieldDescriptions), len(dest))
rows.fatal(err)
return err
}
if rows.scanPlans == nil {
rows.scanPlans = make([]pgtype.ScanPlan, len(values))
rows.scanTypes = make([]reflect.Type, len(values))
for i := range dest {
rows.scanPlans[i] = m.PlanScan(fieldDescriptions[i].DataTypeOID, fieldDescriptions[i].Format, dest[i])
rows.scanTypes[i] = reflect.TypeOf(dest[i])
}
}
for i, dst := range dest {
if dst == nil {
continue
}
if rows.scanTypes[i] != reflect.TypeOf(dst) {
rows.scanPlans[i] = m.PlanScan(fieldDescriptions[i].DataTypeOID, fieldDescriptions[i].Format, dest[i])
rows.scanTypes[i] = reflect.TypeOf(dest[i])
}
err := rows.scanPlans[i].Scan(values[i], dst)
if err != nil {
err = ScanArgError{ColumnIndex: i, Err: err}
rows.fatal(err)
return err
}
}
return nil
}
func (rows *baseRows) Values() ([]any, error) {
if rows.closed {
return nil, errors.New("rows is closed")
}
values := make([]any, 0, len(rows.FieldDescriptions()))
for i := range rows.FieldDescriptions() {
buf := rows.values[i]
fd := &rows.FieldDescriptions()[i]
if buf == nil {
values = append(values, nil)
continue
}
if dt, ok := rows.typeMap.TypeForOID(fd.DataTypeOID); ok {
value, err := dt.Codec.DecodeValue(rows.typeMap, fd.DataTypeOID, fd.Format, buf)
if err != nil {
rows.fatal(err)
}
values = append(values, value)
} else {
switch fd.Format {
case TextFormatCode:
values = append(values, string(buf))
case BinaryFormatCode:
newBuf := make([]byte, len(buf))
copy(newBuf, buf)
values = append(values, newBuf)
default:
rows.fatal(errors.New("unknown format code"))
}
}
if rows.Err() != nil {
return nil, rows.Err()
}
}
return values, rows.Err()
}
func (rows *baseRows) RawValues() [][]byte {
return rows.values
}
func (rows *baseRows) Conn() *Conn {
return rows.conn
}
type ScanArgError struct {
ColumnIndex int
Err error
}
func (e ScanArgError) Error() string {
return fmt.Sprintf("can't scan into dest[%d]: %v", e.ColumnIndex, e.Err)
}
func (e ScanArgError) Unwrap() error {
return e.Err
}
// ScanRow decodes raw row data into dest. It can be used to scan rows read from the lower level pgconn interface.
//
// typeMap - OID to Go type mapping.
// fieldDescriptions - OID and format of values
// values - the raw data as returned from the PostgreSQL server
// dest - the destination that values will be decoded into
func ScanRow(typeMap *pgtype.Map, fieldDescriptions []pgconn.FieldDescription, values [][]byte, dest ...any) error {
if len(fieldDescriptions) != len(values) {
return fmt.Errorf("number of field descriptions must equal number of values, got %d and %d", len(fieldDescriptions), len(values))
}
if len(fieldDescriptions) != len(dest) {
return fmt.Errorf("number of field descriptions must equal number of destinations, got %d and %d", len(fieldDescriptions), len(dest))
}
for i, d := range dest {
if d == nil {
continue
}
err := typeMap.Scan(fieldDescriptions[i].DataTypeOID, fieldDescriptions[i].Format, values[i], d)
if err != nil {
return ScanArgError{ColumnIndex: i, Err: err}
}
}
return nil
}
// RowsFromResultReader returns a Rows that will read from values resultReader and decode with typeMap. It can be used
// to read from the lower level pgconn interface.
func RowsFromResultReader(typeMap *pgtype.Map, resultReader *pgconn.ResultReader) Rows {
return &baseRows{
typeMap: typeMap,
resultReader: resultReader,
}
}
// ForEachRow iterates through rows. For each row it scans into the elements of scans and calls fn. If any row
// fails to scan or fn returns an error the query will be aborted and the error will be returned. Rows will be closed
// when ForEachRow returns.
func ForEachRow(rows Rows, scans []any, fn func() error) (pgconn.CommandTag, error) {
defer rows.Close()
for rows.Next() {
err := rows.Scan(scans...)
if err != nil {
return pgconn.CommandTag{}, err
}
err = fn()
if err != nil {
return pgconn.CommandTag{}, err
}
}
if err := rows.Err(); err != nil {
return pgconn.CommandTag{}, err
}
return rows.CommandTag(), nil
}
// CollectableRow is the subset of Rows methods that a RowToFunc is allowed to call.
type CollectableRow interface {
FieldDescriptions() []pgconn.FieldDescription
Scan(dest ...any) error
Values() ([]any, error)
RawValues() [][]byte
}
// RowToFunc is a function that scans or otherwise converts row to a T.
type RowToFunc[T any] func(row CollectableRow) (T, error)
// CollectRows iterates through rows, calling fn for each row, and collecting the results into a slice of T.
func CollectRows[T any](rows Rows, fn RowToFunc[T]) ([]T, error) {
defer rows.Close()
slice := []T{}
for rows.Next() {
value, err := fn(rows)
if err != nil {
return nil, err
}
slice = append(slice, value)
}
if err := rows.Err(); err != nil {
return nil, err
}
return slice, nil
}
// CollectOneRow calls fn for the first row in rows and returns the result. If no rows are found returns an error where errors.Is(ErrNoRows) is true.
// CollectOneRow is to CollectRows as QueryRow is to Query.
func CollectOneRow[T any](rows Rows, fn RowToFunc[T]) (T, error) {
defer rows.Close()
var value T
var err error
if !rows.Next() {
if err = rows.Err(); err != nil {
return value, err
}
return value, ErrNoRows
}
value, err = fn(rows)
if err != nil {
return value, err
}
rows.Close()
return value, rows.Err()
}
// RowTo returns a T scanned from row.
func RowTo[T any](row CollectableRow) (T, error) {
var value T
err := row.Scan(&value)
return value, err
}
// RowTo returns a the address of a T scanned from row.
func RowToAddrOf[T any](row CollectableRow) (*T, error) {
var value T
err := row.Scan(&value)
return &value, err
}
// RowToMap returns a map scanned from row.
func RowToMap(row CollectableRow) (map[string]any, error) {
var value map[string]any
err := row.Scan((*mapRowScanner)(&value))
return value, err
}
type mapRowScanner map[string]any
func (rs *mapRowScanner) ScanRow(rows Rows) error {
values, err := rows.Values()
if err != nil {
return err
}
*rs = make(mapRowScanner, len(values))
for i := range values {
(*rs)[string(rows.FieldDescriptions()[i].Name)] = values[i]
}
return nil
}
// RowToStructByPos returns a T scanned from row. T must be a struct. T must have the same number a public fields as row
// has fields. The row and T fields will by matched by position. If the "db" struct tag is "-" then the field will be
// ignored.
func RowToStructByPos[T any](row CollectableRow) (T, error) {
var value T
err := row.Scan(&positionalStructRowScanner{ptrToStruct: &value})
return value, err
}
// RowToAddrOfStructByPos returns the address of a T scanned from row. T must be a struct. T must have the same number a
// public fields as row has fields. The row and T fields will by matched by position. If the "db" struct tag is "-" then
// the field will be ignored.
func RowToAddrOfStructByPos[T any](row CollectableRow) (*T, error) {
var value T
err := row.Scan(&positionalStructRowScanner{ptrToStruct: &value})
return &value, err
}
type positionalStructRowScanner struct {
ptrToStruct any
}
func (rs *positionalStructRowScanner) ScanRow(rows Rows) error {
dst := rs.ptrToStruct
dstValue := reflect.ValueOf(dst)
if dstValue.Kind() != reflect.Ptr {
return fmt.Errorf("dst not a pointer")
}
dstElemValue := dstValue.Elem()
scanTargets := rs.appendScanTargets(dstElemValue, nil)
if len(rows.RawValues()) > len(scanTargets) {
return fmt.Errorf("got %d values, but dst struct has only %d fields", len(rows.RawValues()), len(scanTargets))
}
return rows.Scan(scanTargets...)
}
func (rs *positionalStructRowScanner) appendScanTargets(dstElemValue reflect.Value, scanTargets []any) []any {
dstElemType := dstElemValue.Type()
if scanTargets == nil {
scanTargets = make([]any, 0, dstElemType.NumField())
}
for i := 0; i < dstElemType.NumField(); i++ {
sf := dstElemType.Field(i)
// Handle anonymous struct embedding, but do not try to handle embedded pointers.
if sf.Anonymous && sf.Type.Kind() == reflect.Struct {
scanTargets = rs.appendScanTargets(dstElemValue.Field(i), scanTargets)
} else if sf.PkgPath == "" {
dbTag, _ := sf.Tag.Lookup(structTagKey)
if dbTag == "-" {
// Field is ignored, skip it.
continue
}
scanTargets = append(scanTargets, dstElemValue.Field(i).Addr().Interface())
}
}
return scanTargets
}
// RowToStructByName returns a T scanned from row. T must be a struct. T must have the same number of named public
// fields as row has fields. The row and T fields will by matched by name. The match is case-insensitive. The database
// column name can be overridden with a "db" struct tag. If the "db" struct tag is "-" then the field will be ignored.
func RowToStructByName[T any](row CollectableRow) (T, error) {
var value T
err := row.Scan(&namedStructRowScanner{ptrToStruct: &value})
return value, err
}
// RowToAddrOfStructByName returns the address of a T scanned from row. T must be a struct. T must have the same number
// of named public fields as row has fields. The row and T fields will by matched by name. The match is
// case-insensitive. The database column name can be overridden with a "db" struct tag. If the "db" struct tag is "-"
// then the field will be ignored.
func RowToAddrOfStructByName[T any](row CollectableRow) (*T, error) {
var value T
err := row.Scan(&namedStructRowScanner{ptrToStruct: &value})
return &value, err
}
// RowToStructByNameLax returns a T scanned from row. T must be a struct. T must have greater than or equal number of named public
// fields as row has fields. The row and T fields will by matched by name. The match is case-insensitive. The database
// column name can be overridden with a "db" struct tag. If the "db" struct tag is "-" then the field will be ignored.
func RowToStructByNameLax[T any](row CollectableRow) (T, error) {
var value T
err := row.Scan(&namedStructRowScanner{ptrToStruct: &value, lax: true})
return value, err
}
// RowToAddrOfStructByNameLax returns the address of a T scanned from row. T must be a struct. T must have greater than or
// equal number of named public fields as row has fields. The row and T fields will by matched by name. The match is
// case-insensitive. The database column name can be overridden with a "db" struct tag. If the "db" struct tag is "-"
// then the field will be ignored.
func RowToAddrOfStructByNameLax[T any](row CollectableRow) (*T, error) {
var value T
err := row.Scan(&namedStructRowScanner{ptrToStruct: &value, lax: true})
return &value, err
}
type namedStructRowScanner struct {
ptrToStruct any
lax bool
}
func (rs *namedStructRowScanner) ScanRow(rows Rows) error {
dst := rs.ptrToStruct
dstValue := reflect.ValueOf(dst)
if dstValue.Kind() != reflect.Ptr {
return fmt.Errorf("dst not a pointer")
}
dstElemValue := dstValue.Elem()
scanTargets, err := rs.appendScanTargets(dstElemValue, nil, rows.FieldDescriptions())
if err != nil {
return err
}
for i, t := range scanTargets {
if t == nil {
return fmt.Errorf("struct doesn't have corresponding row field %s", rows.FieldDescriptions()[i].Name)
}
}
return rows.Scan(scanTargets...)
}
const structTagKey = "db"
func fieldPosByName(fldDescs []pgconn.FieldDescription, field string) (i int) {
i = -1
for i, desc := range fldDescs {
if strings.EqualFold(desc.Name, field) {
return i
}
}
return
}
func (rs *namedStructRowScanner) appendScanTargets(dstElemValue reflect.Value, scanTargets []any, fldDescs []pgconn.FieldDescription) ([]any, error) {
var err error
dstElemType := dstElemValue.Type()
if scanTargets == nil {
scanTargets = make([]any, len(fldDescs))
}
for i := 0; i < dstElemType.NumField(); i++ |
return scanTargets, err
}
| {
sf := dstElemType.Field(i)
if sf.PkgPath != "" && !sf.Anonymous {
// Field is unexported, skip it.
continue
}
// Handle anoymous struct embedding, but do not try to handle embedded pointers.
if sf.Anonymous && sf.Type.Kind() == reflect.Struct {
scanTargets, err = rs.appendScanTargets(dstElemValue.Field(i), scanTargets, fldDescs)
if err != nil {
return nil, err
}
} else {
dbTag, dbTagPresent := sf.Tag.Lookup(structTagKey)
if dbTagPresent {
dbTag = strings.Split(dbTag, ",")[0]
}
if dbTag == "-" {
// Field is ignored, skip it.
continue
}
colName := dbTag
if !dbTagPresent {
colName = sf.Name
}
fpos := fieldPosByName(fldDescs, colName)
if fpos == -1 {
if rs.lax {
continue
}
return nil, fmt.Errorf("cannot find field %s in returned row", colName)
}
if fpos >= len(scanTargets) && !rs.lax {
return nil, fmt.Errorf("cannot find field %s in returned row", colName)
}
scanTargets[fpos] = dstElemValue.Field(i).Addr().Interface()
}
} | conditional_block |
rows.go | package pgx
import (
"context"
"errors"
"fmt"
"reflect"
"strings"
"time"
"github.com/jackc/pgx/v5/internal/stmtcache"
"github.com/jackc/pgx/v5/pgconn"
"github.com/jackc/pgx/v5/pgtype"
)
// Rows is the result set returned from *Conn.Query. Rows must be closed before
// the *Conn can be used again. Rows are closed by explicitly calling Close(),
// calling Next() until it returns false, or when a fatal error occurs.
//
// Once a Rows is closed the only methods that may be called are Close(), Err(),
// and CommandTag().
//
// Rows is an interface instead of a struct to allow tests to mock Query. However,
// adding a method to an interface is technically a breaking change. Because of this
// the Rows interface is partially excluded from semantic version requirements.
// Methods will not be removed or changed, but new methods may be added.
type Rows interface {
// Close closes the rows, making the connection ready for use again. It is safe
// to call Close after rows is already closed.
Close()
// Err returns any error that occurred while reading. Err must only be called after the Rows is closed (either by
// calling Close or by Next returning false). If it is called early it may return nil even if there was an error
// executing the query.
Err() error
// CommandTag returns the command tag from this query. It is only available after Rows is closed.
CommandTag() pgconn.CommandTag
// FieldDescriptions returns the field descriptions of the columns. It may return nil. In particular this can occur
// when there was an error executing the query.
FieldDescriptions() []pgconn.FieldDescription
// Next prepares the next row for reading. It returns true if there is another
// row and false if no more rows are available or a fatal error has occurred.
// It automatically closes rows when all rows are read.
//
// Callers should check rows.Err() after rows.Next() returns false to detect
// whether result-set reading ended prematurely due to an error. See
// Conn.Query for details.
//
// For simpler error handling, consider using the higher-level pgx v5
// CollectRows() and ForEachRow() helpers instead.
Next() bool
// Scan reads the values from the current row into dest values positionally.
// dest can include pointers to core types, values implementing the Scanner
// interface, and nil. nil will skip the value entirely. It is an error to
// call Scan without first calling Next() and checking that it returned true.
Scan(dest ...any) error
// Values returns the decoded row values. As with Scan(), it is an error to
// call Values without first calling Next() and checking that it returned
// true.
Values() ([]any, error)
// RawValues returns the unparsed bytes of the row values. The returned data is only valid until the next Next
// call or the Rows is closed.
RawValues() [][]byte
// Conn returns the underlying *Conn on which the query was executed. This may return nil if Rows did not come from a
// *Conn (e.g. if it was created by RowsFromResultReader)
Conn() *Conn
}
// Row is a convenience wrapper over Rows that is returned by QueryRow.
//
// Row is an interface instead of a struct to allow tests to mock QueryRow. However,
// adding a method to an interface is technically a breaking change. Because of this
// the Row interface is partially excluded from semantic version requirements.
// Methods will not be removed or changed, but new methods may be added.
type Row interface {
// Scan works the same as Rows. with the following exceptions. If no
// rows were found it returns ErrNoRows. If multiple rows are returned it
// ignores all but the first.
Scan(dest ...any) error
}
// RowScanner scans an entire row at a time into the RowScanner.
type RowScanner interface {
// ScanRows scans the row.
ScanRow(rows Rows) error
}
// connRow implements the Row interface for Conn.QueryRow.
type connRow baseRows
func (r *connRow) Scan(dest ...any) (err error) {
rows := (*baseRows)(r)
if rows.Err() != nil {
return rows.Err()
}
for _, d := range dest {
if _, ok := d.(*pgtype.DriverBytes); ok {
rows.Close()
return fmt.Errorf("cannot scan into *pgtype.DriverBytes from QueryRow")
}
}
if !rows.Next() {
if rows.Err() == nil {
return ErrNoRows
}
return rows.Err()
}
rows.Scan(dest...)
rows.Close()
return rows.Err()
}
// baseRows implements the Rows interface for Conn.Query.
type baseRows struct {
typeMap *pgtype.Map
resultReader *pgconn.ResultReader
values [][]byte
commandTag pgconn.CommandTag
err error
closed bool
scanPlans []pgtype.ScanPlan
scanTypes []reflect.Type
conn *Conn
multiResultReader *pgconn.MultiResultReader
queryTracer QueryTracer
batchTracer BatchTracer
ctx context.Context
startTime time.Time
sql string
args []any
rowCount int
}
func (rows *baseRows) FieldDescriptions() []pgconn.FieldDescription {
return rows.resultReader.FieldDescriptions()
}
func (rows *baseRows) Close() {
if rows.closed {
return
}
rows.closed = true
if rows.resultReader != nil {
var closeErr error
rows.commandTag, closeErr = rows.resultReader.Close()
if rows.err == nil {
rows.err = closeErr
}
}
if rows.multiResultReader != nil {
closeErr := rows.multiResultReader.Close()
if rows.err == nil {
rows.err = closeErr
}
}
if rows.err != nil && rows.conn != nil && rows.sql != "" {
if stmtcache.IsStatementInvalid(rows.err) {
if sc := rows.conn.statementCache; sc != nil {
sc.Invalidate(rows.sql)
}
if sc := rows.conn.descriptionCache; sc != nil {
sc.Invalidate(rows.sql)
}
}
}
if rows.batchTracer != nil {
rows.batchTracer.TraceBatchQuery(rows.ctx, rows.conn, TraceBatchQueryData{SQL: rows.sql, Args: rows.args, CommandTag: rows.commandTag, Err: rows.err})
} else if rows.queryTracer != nil {
rows.queryTracer.TraceQueryEnd(rows.ctx, rows.conn, TraceQueryEndData{rows.commandTag, rows.err})
}
}
func (rows *baseRows) CommandTag() pgconn.CommandTag {
return rows.commandTag
}
func (rows *baseRows) Err() error {
return rows.err
}
// fatal signals an error occurred after the query was sent to the server. It
// closes the rows automatically.
func (rows *baseRows) fatal(err error) {
if rows.err != nil {
return
}
rows.err = err
rows.Close()
}
func (rows *baseRows) Next() bool {
if rows.closed {
return false
}
if rows.resultReader.NextRow() {
rows.rowCount++
rows.values = rows.resultReader.Values()
return true
} else {
rows.Close()
return false
}
}
func (rows *baseRows) Scan(dest ...any) error {
m := rows.typeMap
fieldDescriptions := rows.FieldDescriptions()
values := rows.values
if len(fieldDescriptions) != len(values) {
err := fmt.Errorf("number of field descriptions must equal number of values, got %d and %d", len(fieldDescriptions), len(values))
rows.fatal(err)
return err
}
if len(dest) == 1 {
if rc, ok := dest[0].(RowScanner); ok {
err := rc.ScanRow(rows)
if err != nil {
rows.fatal(err)
}
return err
}
}
if len(fieldDescriptions) != len(dest) {
err := fmt.Errorf("number of field descriptions must equal number of destinations, got %d and %d", len(fieldDescriptions), len(dest))
rows.fatal(err)
return err
}
if rows.scanPlans == nil {
rows.scanPlans = make([]pgtype.ScanPlan, len(values))
rows.scanTypes = make([]reflect.Type, len(values))
for i := range dest {
rows.scanPlans[i] = m.PlanScan(fieldDescriptions[i].DataTypeOID, fieldDescriptions[i].Format, dest[i])
rows.scanTypes[i] = reflect.TypeOf(dest[i])
}
}
for i, dst := range dest {
if dst == nil {
continue
}
if rows.scanTypes[i] != reflect.TypeOf(dst) {
rows.scanPlans[i] = m.PlanScan(fieldDescriptions[i].DataTypeOID, fieldDescriptions[i].Format, dest[i])
rows.scanTypes[i] = reflect.TypeOf(dest[i])
}
err := rows.scanPlans[i].Scan(values[i], dst)
if err != nil {
err = ScanArgError{ColumnIndex: i, Err: err}
rows.fatal(err)
return err
}
}
return nil
}
func (rows *baseRows) Values() ([]any, error) {
if rows.closed {
return nil, errors.New("rows is closed")
}
values := make([]any, 0, len(rows.FieldDescriptions()))
for i := range rows.FieldDescriptions() {
buf := rows.values[i]
fd := &rows.FieldDescriptions()[i]
if buf == nil {
values = append(values, nil)
continue
}
if dt, ok := rows.typeMap.TypeForOID(fd.DataTypeOID); ok {
value, err := dt.Codec.DecodeValue(rows.typeMap, fd.DataTypeOID, fd.Format, buf)
if err != nil {
rows.fatal(err)
}
values = append(values, value)
} else {
switch fd.Format {
case TextFormatCode:
values = append(values, string(buf))
case BinaryFormatCode:
newBuf := make([]byte, len(buf))
copy(newBuf, buf)
values = append(values, newBuf)
default:
rows.fatal(errors.New("unknown format code"))
}
}
if rows.Err() != nil {
return nil, rows.Err()
}
}
return values, rows.Err()
}
func (rows *baseRows) RawValues() [][]byte {
return rows.values
}
func (rows *baseRows) Conn() *Conn {
return rows.conn
}
type ScanArgError struct {
ColumnIndex int
Err error
}
func (e ScanArgError) Error() string {
return fmt.Sprintf("can't scan into dest[%d]: %v", e.ColumnIndex, e.Err)
}
func (e ScanArgError) Unwrap() error {
return e.Err
}
// ScanRow decodes raw row data into dest. It can be used to scan rows read from the lower level pgconn interface.
//
// typeMap - OID to Go type mapping.
// fieldDescriptions - OID and format of values
// values - the raw data as returned from the PostgreSQL server
// dest - the destination that values will be decoded into
func ScanRow(typeMap *pgtype.Map, fieldDescriptions []pgconn.FieldDescription, values [][]byte, dest ...any) error {
if len(fieldDescriptions) != len(values) {
return fmt.Errorf("number of field descriptions must equal number of values, got %d and %d", len(fieldDescriptions), len(values))
}
if len(fieldDescriptions) != len(dest) {
return fmt.Errorf("number of field descriptions must equal number of destinations, got %d and %d", len(fieldDescriptions), len(dest))
}
for i, d := range dest {
if d == nil {
continue
}
err := typeMap.Scan(fieldDescriptions[i].DataTypeOID, fieldDescriptions[i].Format, values[i], d)
if err != nil {
return ScanArgError{ColumnIndex: i, Err: err}
}
}
return nil
}
// RowsFromResultReader returns a Rows that will read from values resultReader and decode with typeMap. It can be used
// to read from the lower level pgconn interface.
func RowsFromResultReader(typeMap *pgtype.Map, resultReader *pgconn.ResultReader) Rows {
return &baseRows{
typeMap: typeMap,
resultReader: resultReader,
}
}
// ForEachRow iterates through rows. For each row it scans into the elements of scans and calls fn. If any row
// fails to scan or fn returns an error the query will be aborted and the error will be returned. Rows will be closed
// when ForEachRow returns.
func ForEachRow(rows Rows, scans []any, fn func() error) (pgconn.CommandTag, error) {
defer rows.Close()
for rows.Next() {
err := rows.Scan(scans...)
if err != nil {
return pgconn.CommandTag{}, err
}
err = fn()
if err != nil {
return pgconn.CommandTag{}, err
}
}
if err := rows.Err(); err != nil {
return pgconn.CommandTag{}, err
}
return rows.CommandTag(), nil
}
// CollectableRow is the subset of Rows methods that a RowToFunc is allowed to call.
type CollectableRow interface {
FieldDescriptions() []pgconn.FieldDescription
Scan(dest ...any) error
Values() ([]any, error)
RawValues() [][]byte
}
// RowToFunc is a function that scans or otherwise converts row to a T.
type RowToFunc[T any] func(row CollectableRow) (T, error)
// CollectRows iterates through rows, calling fn for each row, and collecting the results into a slice of T.
func CollectRows[T any](rows Rows, fn RowToFunc[T]) ([]T, error) {
defer rows.Close()
slice := []T{}
for rows.Next() {
value, err := fn(rows)
if err != nil {
return nil, err
}
slice = append(slice, value)
}
if err := rows.Err(); err != nil {
return nil, err
}
return slice, nil
}
// CollectOneRow calls fn for the first row in rows and returns the result. If no rows are found returns an error where errors.Is(ErrNoRows) is true.
// CollectOneRow is to CollectRows as QueryRow is to Query.
func CollectOneRow[T any](rows Rows, fn RowToFunc[T]) (T, error) {
defer rows.Close()
var value T
var err error
if !rows.Next() {
if err = rows.Err(); err != nil {
return value, err
}
return value, ErrNoRows
}
value, err = fn(rows)
if err != nil {
return value, err
}
rows.Close()
return value, rows.Err()
}
// RowTo returns a T scanned from row.
func RowTo[T any](row CollectableRow) (T, error) {
var value T
err := row.Scan(&value)
return value, err
}
// RowTo returns a the address of a T scanned from row.
func RowToAddrOf[T any](row CollectableRow) (*T, error) {
var value T
err := row.Scan(&value)
return &value, err
}
// RowToMap returns a map scanned from row.
func RowToMap(row CollectableRow) (map[string]any, error) {
var value map[string]any
err := row.Scan((*mapRowScanner)(&value))
return value, err
}
type mapRowScanner map[string]any
func (rs *mapRowScanner) ScanRow(rows Rows) error {
values, err := rows.Values()
if err != nil {
return err
}
*rs = make(mapRowScanner, len(values))
for i := range values {
(*rs)[string(rows.FieldDescriptions()[i].Name)] = values[i]
}
return nil
}
// RowToStructByPos returns a T scanned from row. T must be a struct. T must have the same number a public fields as row
// has fields. The row and T fields will by matched by position. If the "db" struct tag is "-" then the field will be
// ignored.
func RowToStructByPos[T any](row CollectableRow) (T, error) {
var value T
err := row.Scan(&positionalStructRowScanner{ptrToStruct: &value})
return value, err
}
// RowToAddrOfStructByPos returns the address of a T scanned from row. T must be a struct. T must have the same number a
// public fields as row has fields. The row and T fields will by matched by position. If the "db" struct tag is "-" then
// the field will be ignored.
func RowToAddrOfStructByPos[T any](row CollectableRow) (*T, error) {
var value T
err := row.Scan(&positionalStructRowScanner{ptrToStruct: &value})
return &value, err
}
type positionalStructRowScanner struct {
ptrToStruct any
}
func (rs *positionalStructRowScanner) ScanRow(rows Rows) error {
dst := rs.ptrToStruct
dstValue := reflect.ValueOf(dst)
if dstValue.Kind() != reflect.Ptr {
return fmt.Errorf("dst not a pointer")
}
dstElemValue := dstValue.Elem()
scanTargets := rs.appendScanTargets(dstElemValue, nil)
if len(rows.RawValues()) > len(scanTargets) {
return fmt.Errorf("got %d values, but dst struct has only %d fields", len(rows.RawValues()), len(scanTargets))
}
return rows.Scan(scanTargets...)
}
func (rs *positionalStructRowScanner) appendScanTargets(dstElemValue reflect.Value, scanTargets []any) []any {
dstElemType := dstElemValue.Type()
if scanTargets == nil {
scanTargets = make([]any, 0, dstElemType.NumField())
}
for i := 0; i < dstElemType.NumField(); i++ {
sf := dstElemType.Field(i)
// Handle anonymous struct embedding, but do not try to handle embedded pointers.
if sf.Anonymous && sf.Type.Kind() == reflect.Struct {
scanTargets = rs.appendScanTargets(dstElemValue.Field(i), scanTargets)
} else if sf.PkgPath == "" {
dbTag, _ := sf.Tag.Lookup(structTagKey)
if dbTag == "-" {
// Field is ignored, skip it.
continue
}
scanTargets = append(scanTargets, dstElemValue.Field(i).Addr().Interface())
}
}
return scanTargets
}
// RowToStructByName returns a T scanned from row. T must be a struct. T must have the same number of named public
// fields as row has fields. The row and T fields will by matched by name. The match is case-insensitive. The database
// column name can be overridden with a "db" struct tag. If the "db" struct tag is "-" then the field will be ignored.
func RowToStructByName[T any](row CollectableRow) (T, error) {
var value T
err := row.Scan(&namedStructRowScanner{ptrToStruct: &value})
return value, err
}
// RowToAddrOfStructByName returns the address of a T scanned from row. T must be a struct. T must have the same number
// of named public fields as row has fields. The row and T fields will by matched by name. The match is
// case-insensitive. The database column name can be overridden with a "db" struct tag. If the "db" struct tag is "-"
// then the field will be ignored.
func RowToAddrOfStructByName[T any](row CollectableRow) (*T, error) {
var value T
err := row.Scan(&namedStructRowScanner{ptrToStruct: &value})
return &value, err
}
// RowToStructByNameLax returns a T scanned from row. T must be a struct. T must have greater than or equal number of named public
// fields as row has fields. The row and T fields will by matched by name. The match is case-insensitive. The database
// column name can be overridden with a "db" struct tag. If the "db" struct tag is "-" then the field will be ignored.
func RowToStructByNameLax[T any](row CollectableRow) (T, error) {
var value T
err := row.Scan(&namedStructRowScanner{ptrToStruct: &value, lax: true})
return value, err
}
// RowToAddrOfStructByNameLax returns the address of a T scanned from row. T must be a struct. T must have greater than or
// equal number of named public fields as row has fields. The row and T fields will by matched by name. The match is
// case-insensitive. The database column name can be overridden with a "db" struct tag. If the "db" struct tag is "-"
// then the field will be ignored.
func RowToAddrOfStructByNameLax[T any](row CollectableRow) (*T, error) {
var value T
err := row.Scan(&namedStructRowScanner{ptrToStruct: &value, lax: true})
return &value, err
}
type namedStructRowScanner struct {
ptrToStruct any
lax bool
}
func (rs *namedStructRowScanner) ScanRow(rows Rows) error {
dst := rs.ptrToStruct
dstValue := reflect.ValueOf(dst)
if dstValue.Kind() != reflect.Ptr {
return fmt.Errorf("dst not a pointer")
}
dstElemValue := dstValue.Elem()
scanTargets, err := rs.appendScanTargets(dstElemValue, nil, rows.FieldDescriptions())
if err != nil {
return err
}
for i, t := range scanTargets {
if t == nil {
return fmt.Errorf("struct doesn't have corresponding row field %s", rows.FieldDescriptions()[i].Name)
}
}
return rows.Scan(scanTargets...)
}
const structTagKey = "db"
func fieldPosByName(fldDescs []pgconn.FieldDescription, field string) (i int) {
i = -1 | if strings.EqualFold(desc.Name, field) {
return i
}
}
return
}
func (rs *namedStructRowScanner) appendScanTargets(dstElemValue reflect.Value, scanTargets []any, fldDescs []pgconn.FieldDescription) ([]any, error) {
var err error
dstElemType := dstElemValue.Type()
if scanTargets == nil {
scanTargets = make([]any, len(fldDescs))
}
for i := 0; i < dstElemType.NumField(); i++ {
sf := dstElemType.Field(i)
if sf.PkgPath != "" && !sf.Anonymous {
// Field is unexported, skip it.
continue
}
// Handle anoymous struct embedding, but do not try to handle embedded pointers.
if sf.Anonymous && sf.Type.Kind() == reflect.Struct {
scanTargets, err = rs.appendScanTargets(dstElemValue.Field(i), scanTargets, fldDescs)
if err != nil {
return nil, err
}
} else {
dbTag, dbTagPresent := sf.Tag.Lookup(structTagKey)
if dbTagPresent {
dbTag = strings.Split(dbTag, ",")[0]
}
if dbTag == "-" {
// Field is ignored, skip it.
continue
}
colName := dbTag
if !dbTagPresent {
colName = sf.Name
}
fpos := fieldPosByName(fldDescs, colName)
if fpos == -1 {
if rs.lax {
continue
}
return nil, fmt.Errorf("cannot find field %s in returned row", colName)
}
if fpos >= len(scanTargets) && !rs.lax {
return nil, fmt.Errorf("cannot find field %s in returned row", colName)
}
scanTargets[fpos] = dstElemValue.Field(i).Addr().Interface()
}
}
return scanTargets, err
} | for i, desc := range fldDescs { | random_line_split |
rows.go | package pgx
import (
"context"
"errors"
"fmt"
"reflect"
"strings"
"time"
"github.com/jackc/pgx/v5/internal/stmtcache"
"github.com/jackc/pgx/v5/pgconn"
"github.com/jackc/pgx/v5/pgtype"
)
// Rows is the result set returned from *Conn.Query. Rows must be closed before
// the *Conn can be used again. Rows are closed by explicitly calling Close(),
// calling Next() until it returns false, or when a fatal error occurs.
//
// Once a Rows is closed the only methods that may be called are Close(), Err(),
// and CommandTag().
//
// Rows is an interface instead of a struct to allow tests to mock Query. However,
// adding a method to an interface is technically a breaking change. Because of this
// the Rows interface is partially excluded from semantic version requirements.
// Methods will not be removed or changed, but new methods may be added.
type Rows interface {
// Close closes the rows, making the connection ready for use again. It is safe
// to call Close after rows is already closed.
Close()
// Err returns any error that occurred while reading. Err must only be called after the Rows is closed (either by
// calling Close or by Next returning false). If it is called early it may return nil even if there was an error
// executing the query.
Err() error
// CommandTag returns the command tag from this query. It is only available after Rows is closed.
CommandTag() pgconn.CommandTag
// FieldDescriptions returns the field descriptions of the columns. It may return nil. In particular this can occur
// when there was an error executing the query.
FieldDescriptions() []pgconn.FieldDescription
// Next prepares the next row for reading. It returns true if there is another
// row and false if no more rows are available or a fatal error has occurred.
// It automatically closes rows when all rows are read.
//
// Callers should check rows.Err() after rows.Next() returns false to detect
// whether result-set reading ended prematurely due to an error. See
// Conn.Query for details.
//
// For simpler error handling, consider using the higher-level pgx v5
// CollectRows() and ForEachRow() helpers instead.
Next() bool
// Scan reads the values from the current row into dest values positionally.
// dest can include pointers to core types, values implementing the Scanner
// interface, and nil. nil will skip the value entirely. It is an error to
// call Scan without first calling Next() and checking that it returned true.
Scan(dest ...any) error
// Values returns the decoded row values. As with Scan(), it is an error to
// call Values without first calling Next() and checking that it returned
// true.
Values() ([]any, error)
// RawValues returns the unparsed bytes of the row values. The returned data is only valid until the next Next
// call or the Rows is closed.
RawValues() [][]byte
// Conn returns the underlying *Conn on which the query was executed. This may return nil if Rows did not come from a
// *Conn (e.g. if it was created by RowsFromResultReader)
Conn() *Conn
}
// Row is a convenience wrapper over Rows that is returned by QueryRow.
//
// Row is an interface instead of a struct to allow tests to mock QueryRow. However,
// adding a method to an interface is technically a breaking change. Because of this
// the Row interface is partially excluded from semantic version requirements.
// Methods will not be removed or changed, but new methods may be added.
type Row interface {
// Scan works the same as Rows. with the following exceptions. If no
// rows were found it returns ErrNoRows. If multiple rows are returned it
// ignores all but the first.
Scan(dest ...any) error
}
// RowScanner scans an entire row at a time into the RowScanner.
type RowScanner interface {
// ScanRows scans the row.
ScanRow(rows Rows) error
}
// connRow implements the Row interface for Conn.QueryRow.
type connRow baseRows
func (r *connRow) Scan(dest ...any) (err error) {
rows := (*baseRows)(r)
if rows.Err() != nil {
return rows.Err()
}
for _, d := range dest {
if _, ok := d.(*pgtype.DriverBytes); ok {
rows.Close()
return fmt.Errorf("cannot scan into *pgtype.DriverBytes from QueryRow")
}
}
if !rows.Next() {
if rows.Err() == nil {
return ErrNoRows
}
return rows.Err()
}
rows.Scan(dest...)
rows.Close()
return rows.Err()
}
// baseRows implements the Rows interface for Conn.Query.
type baseRows struct {
typeMap *pgtype.Map
resultReader *pgconn.ResultReader
values [][]byte
commandTag pgconn.CommandTag
err error
closed bool
scanPlans []pgtype.ScanPlan
scanTypes []reflect.Type
conn *Conn
multiResultReader *pgconn.MultiResultReader
queryTracer QueryTracer
batchTracer BatchTracer
ctx context.Context
startTime time.Time
sql string
args []any
rowCount int
}
func (rows *baseRows) FieldDescriptions() []pgconn.FieldDescription {
return rows.resultReader.FieldDescriptions()
}
func (rows *baseRows) Close() {
if rows.closed {
return
}
rows.closed = true
if rows.resultReader != nil {
var closeErr error
rows.commandTag, closeErr = rows.resultReader.Close()
if rows.err == nil {
rows.err = closeErr
}
}
if rows.multiResultReader != nil {
closeErr := rows.multiResultReader.Close()
if rows.err == nil {
rows.err = closeErr
}
}
if rows.err != nil && rows.conn != nil && rows.sql != "" {
if stmtcache.IsStatementInvalid(rows.err) {
if sc := rows.conn.statementCache; sc != nil {
sc.Invalidate(rows.sql)
}
if sc := rows.conn.descriptionCache; sc != nil {
sc.Invalidate(rows.sql)
}
}
}
if rows.batchTracer != nil {
rows.batchTracer.TraceBatchQuery(rows.ctx, rows.conn, TraceBatchQueryData{SQL: rows.sql, Args: rows.args, CommandTag: rows.commandTag, Err: rows.err})
} else if rows.queryTracer != nil {
rows.queryTracer.TraceQueryEnd(rows.ctx, rows.conn, TraceQueryEndData{rows.commandTag, rows.err})
}
}
func (rows *baseRows) CommandTag() pgconn.CommandTag {
return rows.commandTag
}
func (rows *baseRows) Err() error {
return rows.err
}
// fatal signals an error occurred after the query was sent to the server. It
// closes the rows automatically.
func (rows *baseRows) fatal(err error) {
if rows.err != nil {
return
}
rows.err = err
rows.Close()
}
func (rows *baseRows) Next() bool {
if rows.closed {
return false
}
if rows.resultReader.NextRow() {
rows.rowCount++
rows.values = rows.resultReader.Values()
return true
} else {
rows.Close()
return false
}
}
func (rows *baseRows) Scan(dest ...any) error {
m := rows.typeMap
fieldDescriptions := rows.FieldDescriptions()
values := rows.values
if len(fieldDescriptions) != len(values) {
err := fmt.Errorf("number of field descriptions must equal number of values, got %d and %d", len(fieldDescriptions), len(values))
rows.fatal(err)
return err
}
if len(dest) == 1 {
if rc, ok := dest[0].(RowScanner); ok {
err := rc.ScanRow(rows)
if err != nil {
rows.fatal(err)
}
return err
}
}
if len(fieldDescriptions) != len(dest) {
err := fmt.Errorf("number of field descriptions must equal number of destinations, got %d and %d", len(fieldDescriptions), len(dest))
rows.fatal(err)
return err
}
if rows.scanPlans == nil {
rows.scanPlans = make([]pgtype.ScanPlan, len(values))
rows.scanTypes = make([]reflect.Type, len(values))
for i := range dest {
rows.scanPlans[i] = m.PlanScan(fieldDescriptions[i].DataTypeOID, fieldDescriptions[i].Format, dest[i])
rows.scanTypes[i] = reflect.TypeOf(dest[i])
}
}
for i, dst := range dest {
if dst == nil {
continue
}
if rows.scanTypes[i] != reflect.TypeOf(dst) {
rows.scanPlans[i] = m.PlanScan(fieldDescriptions[i].DataTypeOID, fieldDescriptions[i].Format, dest[i])
rows.scanTypes[i] = reflect.TypeOf(dest[i])
}
err := rows.scanPlans[i].Scan(values[i], dst)
if err != nil {
err = ScanArgError{ColumnIndex: i, Err: err}
rows.fatal(err)
return err
}
}
return nil
}
func (rows *baseRows) Values() ([]any, error) {
if rows.closed {
return nil, errors.New("rows is closed")
}
values := make([]any, 0, len(rows.FieldDescriptions()))
for i := range rows.FieldDescriptions() {
buf := rows.values[i]
fd := &rows.FieldDescriptions()[i]
if buf == nil {
values = append(values, nil)
continue
}
if dt, ok := rows.typeMap.TypeForOID(fd.DataTypeOID); ok {
value, err := dt.Codec.DecodeValue(rows.typeMap, fd.DataTypeOID, fd.Format, buf)
if err != nil {
rows.fatal(err)
}
values = append(values, value)
} else {
switch fd.Format {
case TextFormatCode:
values = append(values, string(buf))
case BinaryFormatCode:
newBuf := make([]byte, len(buf))
copy(newBuf, buf)
values = append(values, newBuf)
default:
rows.fatal(errors.New("unknown format code"))
}
}
if rows.Err() != nil {
return nil, rows.Err()
}
}
return values, rows.Err()
}
func (rows *baseRows) RawValues() [][]byte {
return rows.values
}
func (rows *baseRows) Conn() *Conn {
return rows.conn
}
type ScanArgError struct {
ColumnIndex int
Err error
}
func (e ScanArgError) Error() string {
return fmt.Sprintf("can't scan into dest[%d]: %v", e.ColumnIndex, e.Err)
}
func (e ScanArgError) Unwrap() error {
return e.Err
}
// ScanRow decodes raw row data into dest. It can be used to scan rows read from the lower level pgconn interface.
//
// typeMap - OID to Go type mapping.
// fieldDescriptions - OID and format of values
// values - the raw data as returned from the PostgreSQL server
// dest - the destination that values will be decoded into
func ScanRow(typeMap *pgtype.Map, fieldDescriptions []pgconn.FieldDescription, values [][]byte, dest ...any) error {
if len(fieldDescriptions) != len(values) {
return fmt.Errorf("number of field descriptions must equal number of values, got %d and %d", len(fieldDescriptions), len(values))
}
if len(fieldDescriptions) != len(dest) {
return fmt.Errorf("number of field descriptions must equal number of destinations, got %d and %d", len(fieldDescriptions), len(dest))
}
for i, d := range dest {
if d == nil {
continue
}
err := typeMap.Scan(fieldDescriptions[i].DataTypeOID, fieldDescriptions[i].Format, values[i], d)
if err != nil {
return ScanArgError{ColumnIndex: i, Err: err}
}
}
return nil
}
// RowsFromResultReader returns a Rows that will read from values resultReader and decode with typeMap. It can be used
// to read from the lower level pgconn interface.
func RowsFromResultReader(typeMap *pgtype.Map, resultReader *pgconn.ResultReader) Rows {
return &baseRows{
typeMap: typeMap,
resultReader: resultReader,
}
}
// ForEachRow iterates through rows. For each row it scans into the elements of scans and calls fn. If any row
// fails to scan or fn returns an error the query will be aborted and the error will be returned. Rows will be closed
// when ForEachRow returns.
func ForEachRow(rows Rows, scans []any, fn func() error) (pgconn.CommandTag, error) {
defer rows.Close()
for rows.Next() {
err := rows.Scan(scans...)
if err != nil {
return pgconn.CommandTag{}, err
}
err = fn()
if err != nil {
return pgconn.CommandTag{}, err
}
}
if err := rows.Err(); err != nil {
return pgconn.CommandTag{}, err
}
return rows.CommandTag(), nil
}
// CollectableRow is the subset of Rows methods that a RowToFunc is allowed to call.
type CollectableRow interface {
FieldDescriptions() []pgconn.FieldDescription
Scan(dest ...any) error
Values() ([]any, error)
RawValues() [][]byte
}
// RowToFunc is a function that scans or otherwise converts row to a T.
type RowToFunc[T any] func(row CollectableRow) (T, error)
// CollectRows iterates through rows, calling fn for each row, and collecting the results into a slice of T.
func CollectRows[T any](rows Rows, fn RowToFunc[T]) ([]T, error) {
defer rows.Close()
slice := []T{}
for rows.Next() {
value, err := fn(rows)
if err != nil {
return nil, err
}
slice = append(slice, value)
}
if err := rows.Err(); err != nil {
return nil, err
}
return slice, nil
}
// CollectOneRow calls fn for the first row in rows and returns the result. If no rows are found returns an error where errors.Is(ErrNoRows) is true.
// CollectOneRow is to CollectRows as QueryRow is to Query.
func CollectOneRow[T any](rows Rows, fn RowToFunc[T]) (T, error) {
defer rows.Close()
var value T
var err error
if !rows.Next() {
if err = rows.Err(); err != nil {
return value, err
}
return value, ErrNoRows
}
value, err = fn(rows)
if err != nil {
return value, err
}
rows.Close()
return value, rows.Err()
}
// RowTo returns a T scanned from row.
func RowTo[T any](row CollectableRow) (T, error) {
var value T
err := row.Scan(&value)
return value, err
}
// RowTo returns a the address of a T scanned from row.
func RowToAddrOf[T any](row CollectableRow) (*T, error) {
var value T
err := row.Scan(&value)
return &value, err
}
// RowToMap returns a map scanned from row.
func RowToMap(row CollectableRow) (map[string]any, error) {
var value map[string]any
err := row.Scan((*mapRowScanner)(&value))
return value, err
}
type mapRowScanner map[string]any
func (rs *mapRowScanner) ScanRow(rows Rows) error {
values, err := rows.Values()
if err != nil {
return err
}
*rs = make(mapRowScanner, len(values))
for i := range values {
(*rs)[string(rows.FieldDescriptions()[i].Name)] = values[i]
}
return nil
}
// RowToStructByPos returns a T scanned from row. T must be a struct. T must have the same number a public fields as row
// has fields. The row and T fields will by matched by position. If the "db" struct tag is "-" then the field will be
// ignored.
func RowToStructByPos[T any](row CollectableRow) (T, error) {
var value T
err := row.Scan(&positionalStructRowScanner{ptrToStruct: &value})
return value, err
}
// RowToAddrOfStructByPos returns the address of a T scanned from row. T must be a struct. T must have the same number a
// public fields as row has fields. The row and T fields will by matched by position. If the "db" struct tag is "-" then
// the field will be ignored.
func RowToAddrOfStructByPos[T any](row CollectableRow) (*T, error) {
var value T
err := row.Scan(&positionalStructRowScanner{ptrToStruct: &value})
return &value, err
}
type positionalStructRowScanner struct {
ptrToStruct any
}
func (rs *positionalStructRowScanner) ScanRow(rows Rows) error |
func (rs *positionalStructRowScanner) appendScanTargets(dstElemValue reflect.Value, scanTargets []any) []any {
dstElemType := dstElemValue.Type()
if scanTargets == nil {
scanTargets = make([]any, 0, dstElemType.NumField())
}
for i := 0; i < dstElemType.NumField(); i++ {
sf := dstElemType.Field(i)
// Handle anonymous struct embedding, but do not try to handle embedded pointers.
if sf.Anonymous && sf.Type.Kind() == reflect.Struct {
scanTargets = rs.appendScanTargets(dstElemValue.Field(i), scanTargets)
} else if sf.PkgPath == "" {
dbTag, _ := sf.Tag.Lookup(structTagKey)
if dbTag == "-" {
// Field is ignored, skip it.
continue
}
scanTargets = append(scanTargets, dstElemValue.Field(i).Addr().Interface())
}
}
return scanTargets
}
// RowToStructByName returns a T scanned from row. T must be a struct. T must have the same number of named public
// fields as row has fields. The row and T fields will by matched by name. The match is case-insensitive. The database
// column name can be overridden with a "db" struct tag. If the "db" struct tag is "-" then the field will be ignored.
func RowToStructByName[T any](row CollectableRow) (T, error) {
var value T
err := row.Scan(&namedStructRowScanner{ptrToStruct: &value})
return value, err
}
// RowToAddrOfStructByName returns the address of a T scanned from row. T must be a struct. T must have the same number
// of named public fields as row has fields. The row and T fields will by matched by name. The match is
// case-insensitive. The database column name can be overridden with a "db" struct tag. If the "db" struct tag is "-"
// then the field will be ignored.
func RowToAddrOfStructByName[T any](row CollectableRow) (*T, error) {
var value T
err := row.Scan(&namedStructRowScanner{ptrToStruct: &value})
return &value, err
}
// RowToStructByNameLax returns a T scanned from row. T must be a struct. T must have greater than or equal number of named public
// fields as row has fields. The row and T fields will by matched by name. The match is case-insensitive. The database
// column name can be overridden with a "db" struct tag. If the "db" struct tag is "-" then the field will be ignored.
func RowToStructByNameLax[T any](row CollectableRow) (T, error) {
var value T
err := row.Scan(&namedStructRowScanner{ptrToStruct: &value, lax: true})
return value, err
}
// RowToAddrOfStructByNameLax returns the address of a T scanned from row. T must be a struct. T must have greater than or
// equal number of named public fields as row has fields. The row and T fields will by matched by name. The match is
// case-insensitive. The database column name can be overridden with a "db" struct tag. If the "db" struct tag is "-"
// then the field will be ignored.
func RowToAddrOfStructByNameLax[T any](row CollectableRow) (*T, error) {
var value T
err := row.Scan(&namedStructRowScanner{ptrToStruct: &value, lax: true})
return &value, err
}
type namedStructRowScanner struct {
ptrToStruct any
lax bool
}
func (rs *namedStructRowScanner) ScanRow(rows Rows) error {
dst := rs.ptrToStruct
dstValue := reflect.ValueOf(dst)
if dstValue.Kind() != reflect.Ptr {
return fmt.Errorf("dst not a pointer")
}
dstElemValue := dstValue.Elem()
scanTargets, err := rs.appendScanTargets(dstElemValue, nil, rows.FieldDescriptions())
if err != nil {
return err
}
for i, t := range scanTargets {
if t == nil {
return fmt.Errorf("struct doesn't have corresponding row field %s", rows.FieldDescriptions()[i].Name)
}
}
return rows.Scan(scanTargets...)
}
const structTagKey = "db"
func fieldPosByName(fldDescs []pgconn.FieldDescription, field string) (i int) {
i = -1
for i, desc := range fldDescs {
if strings.EqualFold(desc.Name, field) {
return i
}
}
return
}
func (rs *namedStructRowScanner) appendScanTargets(dstElemValue reflect.Value, scanTargets []any, fldDescs []pgconn.FieldDescription) ([]any, error) {
var err error
dstElemType := dstElemValue.Type()
if scanTargets == nil {
scanTargets = make([]any, len(fldDescs))
}
for i := 0; i < dstElemType.NumField(); i++ {
sf := dstElemType.Field(i)
if sf.PkgPath != "" && !sf.Anonymous {
// Field is unexported, skip it.
continue
}
// Handle anoymous struct embedding, but do not try to handle embedded pointers.
if sf.Anonymous && sf.Type.Kind() == reflect.Struct {
scanTargets, err = rs.appendScanTargets(dstElemValue.Field(i), scanTargets, fldDescs)
if err != nil {
return nil, err
}
} else {
dbTag, dbTagPresent := sf.Tag.Lookup(structTagKey)
if dbTagPresent {
dbTag = strings.Split(dbTag, ",")[0]
}
if dbTag == "-" {
// Field is ignored, skip it.
continue
}
colName := dbTag
if !dbTagPresent {
colName = sf.Name
}
fpos := fieldPosByName(fldDescs, colName)
if fpos == -1 {
if rs.lax {
continue
}
return nil, fmt.Errorf("cannot find field %s in returned row", colName)
}
if fpos >= len(scanTargets) && !rs.lax {
return nil, fmt.Errorf("cannot find field %s in returned row", colName)
}
scanTargets[fpos] = dstElemValue.Field(i).Addr().Interface()
}
}
return scanTargets, err
}
| {
dst := rs.ptrToStruct
dstValue := reflect.ValueOf(dst)
if dstValue.Kind() != reflect.Ptr {
return fmt.Errorf("dst not a pointer")
}
dstElemValue := dstValue.Elem()
scanTargets := rs.appendScanTargets(dstElemValue, nil)
if len(rows.RawValues()) > len(scanTargets) {
return fmt.Errorf("got %d values, but dst struct has only %d fields", len(rows.RawValues()), len(scanTargets))
}
return rows.Scan(scanTargets...)
} | identifier_body |
rows.go | package pgx
import (
"context"
"errors"
"fmt"
"reflect"
"strings"
"time"
"github.com/jackc/pgx/v5/internal/stmtcache"
"github.com/jackc/pgx/v5/pgconn"
"github.com/jackc/pgx/v5/pgtype"
)
// Rows is the result set returned from *Conn.Query. Rows must be closed before
// the *Conn can be used again. Rows are closed by explicitly calling Close(),
// calling Next() until it returns false, or when a fatal error occurs.
//
// Once a Rows is closed the only methods that may be called are Close(), Err(),
// and CommandTag().
//
// Rows is an interface instead of a struct to allow tests to mock Query. However,
// adding a method to an interface is technically a breaking change. Because of this
// the Rows interface is partially excluded from semantic version requirements.
// Methods will not be removed or changed, but new methods may be added.
type Rows interface {
// Close closes the rows, making the connection ready for use again. It is safe
// to call Close after rows is already closed.
Close()
// Err returns any error that occurred while reading. Err must only be called after the Rows is closed (either by
// calling Close or by Next returning false). If it is called early it may return nil even if there was an error
// executing the query.
Err() error
// CommandTag returns the command tag from this query. It is only available after Rows is closed.
CommandTag() pgconn.CommandTag
// FieldDescriptions returns the field descriptions of the columns. It may return nil. In particular this can occur
// when there was an error executing the query.
FieldDescriptions() []pgconn.FieldDescription
// Next prepares the next row for reading. It returns true if there is another
// row and false if no more rows are available or a fatal error has occurred.
// It automatically closes rows when all rows are read.
//
// Callers should check rows.Err() after rows.Next() returns false to detect
// whether result-set reading ended prematurely due to an error. See
// Conn.Query for details.
//
// For simpler error handling, consider using the higher-level pgx v5
// CollectRows() and ForEachRow() helpers instead.
Next() bool
// Scan reads the values from the current row into dest values positionally.
// dest can include pointers to core types, values implementing the Scanner
// interface, and nil. nil will skip the value entirely. It is an error to
// call Scan without first calling Next() and checking that it returned true.
Scan(dest ...any) error
// Values returns the decoded row values. As with Scan(), it is an error to
// call Values without first calling Next() and checking that it returned
// true.
Values() ([]any, error)
// RawValues returns the unparsed bytes of the row values. The returned data is only valid until the next Next
// call or the Rows is closed.
RawValues() [][]byte
// Conn returns the underlying *Conn on which the query was executed. This may return nil if Rows did not come from a
// *Conn (e.g. if it was created by RowsFromResultReader)
Conn() *Conn
}
// Row is a convenience wrapper over Rows that is returned by QueryRow.
//
// Row is an interface instead of a struct to allow tests to mock QueryRow. However,
// adding a method to an interface is technically a breaking change. Because of this
// the Row interface is partially excluded from semantic version requirements.
// Methods will not be removed or changed, but new methods may be added.
type Row interface {
// Scan works the same as Rows. with the following exceptions. If no
// rows were found it returns ErrNoRows. If multiple rows are returned it
// ignores all but the first.
Scan(dest ...any) error
}
// RowScanner scans an entire row at a time into the RowScanner.
type RowScanner interface {
// ScanRows scans the row.
ScanRow(rows Rows) error
}
// connRow implements the Row interface for Conn.QueryRow.
type connRow baseRows
func (r *connRow) Scan(dest ...any) (err error) {
rows := (*baseRows)(r)
if rows.Err() != nil {
return rows.Err()
}
for _, d := range dest {
if _, ok := d.(*pgtype.DriverBytes); ok {
rows.Close()
return fmt.Errorf("cannot scan into *pgtype.DriverBytes from QueryRow")
}
}
if !rows.Next() {
if rows.Err() == nil {
return ErrNoRows
}
return rows.Err()
}
rows.Scan(dest...)
rows.Close()
return rows.Err()
}
// baseRows implements the Rows interface for Conn.Query.
type baseRows struct {
typeMap *pgtype.Map
resultReader *pgconn.ResultReader
values [][]byte
commandTag pgconn.CommandTag
err error
closed bool
scanPlans []pgtype.ScanPlan
scanTypes []reflect.Type
conn *Conn
multiResultReader *pgconn.MultiResultReader
queryTracer QueryTracer
batchTracer BatchTracer
ctx context.Context
startTime time.Time
sql string
args []any
rowCount int
}
func (rows *baseRows) FieldDescriptions() []pgconn.FieldDescription {
return rows.resultReader.FieldDescriptions()
}
func (rows *baseRows) Close() {
if rows.closed {
return
}
rows.closed = true
if rows.resultReader != nil {
var closeErr error
rows.commandTag, closeErr = rows.resultReader.Close()
if rows.err == nil {
rows.err = closeErr
}
}
if rows.multiResultReader != nil {
closeErr := rows.multiResultReader.Close()
if rows.err == nil {
rows.err = closeErr
}
}
if rows.err != nil && rows.conn != nil && rows.sql != "" {
if stmtcache.IsStatementInvalid(rows.err) {
if sc := rows.conn.statementCache; sc != nil {
sc.Invalidate(rows.sql)
}
if sc := rows.conn.descriptionCache; sc != nil {
sc.Invalidate(rows.sql)
}
}
}
if rows.batchTracer != nil {
rows.batchTracer.TraceBatchQuery(rows.ctx, rows.conn, TraceBatchQueryData{SQL: rows.sql, Args: rows.args, CommandTag: rows.commandTag, Err: rows.err})
} else if rows.queryTracer != nil {
rows.queryTracer.TraceQueryEnd(rows.ctx, rows.conn, TraceQueryEndData{rows.commandTag, rows.err})
}
}
func (rows *baseRows) CommandTag() pgconn.CommandTag {
return rows.commandTag
}
func (rows *baseRows) Err() error {
return rows.err
}
// fatal signals an error occurred after the query was sent to the server. It
// closes the rows automatically.
func (rows *baseRows) fatal(err error) {
if rows.err != nil {
return
}
rows.err = err
rows.Close()
}
func (rows *baseRows) Next() bool {
if rows.closed {
return false
}
if rows.resultReader.NextRow() {
rows.rowCount++
rows.values = rows.resultReader.Values()
return true
} else {
rows.Close()
return false
}
}
func (rows *baseRows) Scan(dest ...any) error {
m := rows.typeMap
fieldDescriptions := rows.FieldDescriptions()
values := rows.values
if len(fieldDescriptions) != len(values) {
err := fmt.Errorf("number of field descriptions must equal number of values, got %d and %d", len(fieldDescriptions), len(values))
rows.fatal(err)
return err
}
if len(dest) == 1 {
if rc, ok := dest[0].(RowScanner); ok {
err := rc.ScanRow(rows)
if err != nil {
rows.fatal(err)
}
return err
}
}
if len(fieldDescriptions) != len(dest) {
err := fmt.Errorf("number of field descriptions must equal number of destinations, got %d and %d", len(fieldDescriptions), len(dest))
rows.fatal(err)
return err
}
if rows.scanPlans == nil {
rows.scanPlans = make([]pgtype.ScanPlan, len(values))
rows.scanTypes = make([]reflect.Type, len(values))
for i := range dest {
rows.scanPlans[i] = m.PlanScan(fieldDescriptions[i].DataTypeOID, fieldDescriptions[i].Format, dest[i])
rows.scanTypes[i] = reflect.TypeOf(dest[i])
}
}
for i, dst := range dest {
if dst == nil {
continue
}
if rows.scanTypes[i] != reflect.TypeOf(dst) {
rows.scanPlans[i] = m.PlanScan(fieldDescriptions[i].DataTypeOID, fieldDescriptions[i].Format, dest[i])
rows.scanTypes[i] = reflect.TypeOf(dest[i])
}
err := rows.scanPlans[i].Scan(values[i], dst)
if err != nil {
err = ScanArgError{ColumnIndex: i, Err: err}
rows.fatal(err)
return err
}
}
return nil
}
func (rows *baseRows) Values() ([]any, error) {
if rows.closed {
return nil, errors.New("rows is closed")
}
values := make([]any, 0, len(rows.FieldDescriptions()))
for i := range rows.FieldDescriptions() {
buf := rows.values[i]
fd := &rows.FieldDescriptions()[i]
if buf == nil {
values = append(values, nil)
continue
}
if dt, ok := rows.typeMap.TypeForOID(fd.DataTypeOID); ok {
value, err := dt.Codec.DecodeValue(rows.typeMap, fd.DataTypeOID, fd.Format, buf)
if err != nil {
rows.fatal(err)
}
values = append(values, value)
} else {
switch fd.Format {
case TextFormatCode:
values = append(values, string(buf))
case BinaryFormatCode:
newBuf := make([]byte, len(buf))
copy(newBuf, buf)
values = append(values, newBuf)
default:
rows.fatal(errors.New("unknown format code"))
}
}
if rows.Err() != nil {
return nil, rows.Err()
}
}
return values, rows.Err()
}
func (rows *baseRows) RawValues() [][]byte {
return rows.values
}
func (rows *baseRows) Conn() *Conn {
return rows.conn
}
type ScanArgError struct {
ColumnIndex int
Err error
}
func (e ScanArgError) Error() string {
return fmt.Sprintf("can't scan into dest[%d]: %v", e.ColumnIndex, e.Err)
}
func (e ScanArgError) Unwrap() error {
return e.Err
}
// ScanRow decodes raw row data into dest. It can be used to scan rows read from the lower level pgconn interface.
//
// typeMap - OID to Go type mapping.
// fieldDescriptions - OID and format of values
// values - the raw data as returned from the PostgreSQL server
// dest - the destination that values will be decoded into
func ScanRow(typeMap *pgtype.Map, fieldDescriptions []pgconn.FieldDescription, values [][]byte, dest ...any) error {
if len(fieldDescriptions) != len(values) {
return fmt.Errorf("number of field descriptions must equal number of values, got %d and %d", len(fieldDescriptions), len(values))
}
if len(fieldDescriptions) != len(dest) {
return fmt.Errorf("number of field descriptions must equal number of destinations, got %d and %d", len(fieldDescriptions), len(dest))
}
for i, d := range dest {
if d == nil {
continue
}
err := typeMap.Scan(fieldDescriptions[i].DataTypeOID, fieldDescriptions[i].Format, values[i], d)
if err != nil {
return ScanArgError{ColumnIndex: i, Err: err}
}
}
return nil
}
// RowsFromResultReader returns a Rows that will read from values resultReader and decode with typeMap. It can be used
// to read from the lower level pgconn interface.
func RowsFromResultReader(typeMap *pgtype.Map, resultReader *pgconn.ResultReader) Rows {
return &baseRows{
typeMap: typeMap,
resultReader: resultReader,
}
}
// ForEachRow iterates through rows. For each row it scans into the elements of scans and calls fn. If any row
// fails to scan or fn returns an error the query will be aborted and the error will be returned. Rows will be closed
// when ForEachRow returns.
func ForEachRow(rows Rows, scans []any, fn func() error) (pgconn.CommandTag, error) {
defer rows.Close()
for rows.Next() {
err := rows.Scan(scans...)
if err != nil {
return pgconn.CommandTag{}, err
}
err = fn()
if err != nil {
return pgconn.CommandTag{}, err
}
}
if err := rows.Err(); err != nil {
return pgconn.CommandTag{}, err
}
return rows.CommandTag(), nil
}
// CollectableRow is the subset of Rows methods that a RowToFunc is allowed to call.
type CollectableRow interface {
FieldDescriptions() []pgconn.FieldDescription
Scan(dest ...any) error
Values() ([]any, error)
RawValues() [][]byte
}
// RowToFunc is a function that scans or otherwise converts row to a T.
type RowToFunc[T any] func(row CollectableRow) (T, error)
// CollectRows iterates through rows, calling fn for each row, and collecting the results into a slice of T.
func CollectRows[T any](rows Rows, fn RowToFunc[T]) ([]T, error) {
defer rows.Close()
slice := []T{}
for rows.Next() {
value, err := fn(rows)
if err != nil {
return nil, err
}
slice = append(slice, value)
}
if err := rows.Err(); err != nil {
return nil, err
}
return slice, nil
}
// CollectOneRow calls fn for the first row in rows and returns the result. If no rows are found returns an error where errors.Is(ErrNoRows) is true.
// CollectOneRow is to CollectRows as QueryRow is to Query.
func CollectOneRow[T any](rows Rows, fn RowToFunc[T]) (T, error) {
defer rows.Close()
var value T
var err error
if !rows.Next() {
if err = rows.Err(); err != nil {
return value, err
}
return value, ErrNoRows
}
value, err = fn(rows)
if err != nil {
return value, err
}
rows.Close()
return value, rows.Err()
}
// RowTo returns a T scanned from row.
func RowTo[T any](row CollectableRow) (T, error) {
var value T
err := row.Scan(&value)
return value, err
}
// RowTo returns a the address of a T scanned from row.
func RowToAddrOf[T any](row CollectableRow) (*T, error) {
var value T
err := row.Scan(&value)
return &value, err
}
// RowToMap returns a map scanned from row.
func RowToMap(row CollectableRow) (map[string]any, error) {
var value map[string]any
err := row.Scan((*mapRowScanner)(&value))
return value, err
}
type mapRowScanner map[string]any
func (rs *mapRowScanner) ScanRow(rows Rows) error {
values, err := rows.Values()
if err != nil {
return err
}
*rs = make(mapRowScanner, len(values))
for i := range values {
(*rs)[string(rows.FieldDescriptions()[i].Name)] = values[i]
}
return nil
}
// RowToStructByPos returns a T scanned from row. T must be a struct. T must have the same number a public fields as row
// has fields. The row and T fields will by matched by position. If the "db" struct tag is "-" then the field will be
// ignored.
func RowToStructByPos[T any](row CollectableRow) (T, error) {
var value T
err := row.Scan(&positionalStructRowScanner{ptrToStruct: &value})
return value, err
}
// RowToAddrOfStructByPos returns the address of a T scanned from row. T must be a struct. T must have the same number a
// public fields as row has fields. The row and T fields will by matched by position. If the "db" struct tag is "-" then
// the field will be ignored.
func RowToAddrOfStructByPos[T any](row CollectableRow) (*T, error) {
var value T
err := row.Scan(&positionalStructRowScanner{ptrToStruct: &value})
return &value, err
}
type positionalStructRowScanner struct {
ptrToStruct any
}
func (rs *positionalStructRowScanner) ScanRow(rows Rows) error {
dst := rs.ptrToStruct
dstValue := reflect.ValueOf(dst)
if dstValue.Kind() != reflect.Ptr {
return fmt.Errorf("dst not a pointer")
}
dstElemValue := dstValue.Elem()
scanTargets := rs.appendScanTargets(dstElemValue, nil)
if len(rows.RawValues()) > len(scanTargets) {
return fmt.Errorf("got %d values, but dst struct has only %d fields", len(rows.RawValues()), len(scanTargets))
}
return rows.Scan(scanTargets...)
}
func (rs *positionalStructRowScanner) appendScanTargets(dstElemValue reflect.Value, scanTargets []any) []any {
dstElemType := dstElemValue.Type()
if scanTargets == nil {
scanTargets = make([]any, 0, dstElemType.NumField())
}
for i := 0; i < dstElemType.NumField(); i++ {
sf := dstElemType.Field(i)
// Handle anonymous struct embedding, but do not try to handle embedded pointers.
if sf.Anonymous && sf.Type.Kind() == reflect.Struct {
scanTargets = rs.appendScanTargets(dstElemValue.Field(i), scanTargets)
} else if sf.PkgPath == "" {
dbTag, _ := sf.Tag.Lookup(structTagKey)
if dbTag == "-" {
// Field is ignored, skip it.
continue
}
scanTargets = append(scanTargets, dstElemValue.Field(i).Addr().Interface())
}
}
return scanTargets
}
// RowToStructByName returns a T scanned from row. T must be a struct. T must have the same number of named public
// fields as row has fields. The row and T fields will by matched by name. The match is case-insensitive. The database
// column name can be overridden with a "db" struct tag. If the "db" struct tag is "-" then the field will be ignored.
func RowToStructByName[T any](row CollectableRow) (T, error) {
var value T
err := row.Scan(&namedStructRowScanner{ptrToStruct: &value})
return value, err
}
// RowToAddrOfStructByName returns the address of a T scanned from row. T must be a struct. T must have the same number
// of named public fields as row has fields. The row and T fields will by matched by name. The match is
// case-insensitive. The database column name can be overridden with a "db" struct tag. If the "db" struct tag is "-"
// then the field will be ignored.
func RowToAddrOfStructByName[T any](row CollectableRow) (*T, error) {
var value T
err := row.Scan(&namedStructRowScanner{ptrToStruct: &value})
return &value, err
}
// RowToStructByNameLax returns a T scanned from row. T must be a struct. T must have greater than or equal number of named public
// fields as row has fields. The row and T fields will by matched by name. The match is case-insensitive. The database
// column name can be overridden with a "db" struct tag. If the "db" struct tag is "-" then the field will be ignored.
func RowToStructByNameLax[T any](row CollectableRow) (T, error) {
var value T
err := row.Scan(&namedStructRowScanner{ptrToStruct: &value, lax: true})
return value, err
}
// RowToAddrOfStructByNameLax returns the address of a T scanned from row. T must be a struct. T must have greater than or
// equal number of named public fields as row has fields. The row and T fields will by matched by name. The match is
// case-insensitive. The database column name can be overridden with a "db" struct tag. If the "db" struct tag is "-"
// then the field will be ignored.
func RowToAddrOfStructByNameLax[T any](row CollectableRow) (*T, error) {
var value T
err := row.Scan(&namedStructRowScanner{ptrToStruct: &value, lax: true})
return &value, err
}
type namedStructRowScanner struct {
ptrToStruct any
lax bool
}
func (rs *namedStructRowScanner) ScanRow(rows Rows) error {
dst := rs.ptrToStruct
dstValue := reflect.ValueOf(dst)
if dstValue.Kind() != reflect.Ptr {
return fmt.Errorf("dst not a pointer")
}
dstElemValue := dstValue.Elem()
scanTargets, err := rs.appendScanTargets(dstElemValue, nil, rows.FieldDescriptions())
if err != nil {
return err
}
for i, t := range scanTargets {
if t == nil {
return fmt.Errorf("struct doesn't have corresponding row field %s", rows.FieldDescriptions()[i].Name)
}
}
return rows.Scan(scanTargets...)
}
const structTagKey = "db"
func | (fldDescs []pgconn.FieldDescription, field string) (i int) {
i = -1
for i, desc := range fldDescs {
if strings.EqualFold(desc.Name, field) {
return i
}
}
return
}
func (rs *namedStructRowScanner) appendScanTargets(dstElemValue reflect.Value, scanTargets []any, fldDescs []pgconn.FieldDescription) ([]any, error) {
var err error
dstElemType := dstElemValue.Type()
if scanTargets == nil {
scanTargets = make([]any, len(fldDescs))
}
for i := 0; i < dstElemType.NumField(); i++ {
sf := dstElemType.Field(i)
if sf.PkgPath != "" && !sf.Anonymous {
// Field is unexported, skip it.
continue
}
// Handle anoymous struct embedding, but do not try to handle embedded pointers.
if sf.Anonymous && sf.Type.Kind() == reflect.Struct {
scanTargets, err = rs.appendScanTargets(dstElemValue.Field(i), scanTargets, fldDescs)
if err != nil {
return nil, err
}
} else {
dbTag, dbTagPresent := sf.Tag.Lookup(structTagKey)
if dbTagPresent {
dbTag = strings.Split(dbTag, ",")[0]
}
if dbTag == "-" {
// Field is ignored, skip it.
continue
}
colName := dbTag
if !dbTagPresent {
colName = sf.Name
}
fpos := fieldPosByName(fldDescs, colName)
if fpos == -1 {
if rs.lax {
continue
}
return nil, fmt.Errorf("cannot find field %s in returned row", colName)
}
if fpos >= len(scanTargets) && !rs.lax {
return nil, fmt.Errorf("cannot find field %s in returned row", colName)
}
scanTargets[fpos] = dstElemValue.Field(i).Addr().Interface()
}
}
return scanTargets, err
}
| fieldPosByName | identifier_name |
build.py | #!/usr/bin/python
# Script to build OpenVPN with support for OQS cipher suites
# Written/tested for Python 2.7
# Script assumes
# - it is being run from the openvpn/build directory
# - any necessary authentication tokens are already available to Git (if not using the public GitHub URLs)
# - Linux: all dependencies are installed
# - sudo apt-get install autoconf curl nsis libtool libssl-dev \
# liblz4-dev liblzo2-dev libpam0g-dev gcc-mingw-w64 man2html dos2unix unzip
# - Windows: Microsoft Visual Studio 2017 is installed in the default location on C:
# recent Perl is installed and in the system PATH
# - http://strawberryperl.com/releases.html (MSI and standalone ZIP versions available)
# Copyright (C) 2018 Microsoft Corporation
import os
import shutil
import subprocess
import re
import fileinput
import stat
import sys
import platform
OPENVPN_REPO = 'https://github.com/Microsoft/openvpn'
OPENVPN_BRANCH = 'pqcrypto'
OPENVPN_BUILD_REPO = 'https://github.com/Microsoft/openvpn-build'
OPENVPN_BUILD_BRANCH = 'pqcrypto'
OPENVPN_GUI_REPO = 'https://github.com/Microsoft/openvpn-gui'
OPENVPN_GUI_BRANCH = 'pqcrypto'
OPENSSL_OQS_REPO = 'https://github.com/open-quantum-safe/openssl'
OPENSSL_OQS_BRANCH = 'OpenSSL_1_0_2-stable'
OPENSSL_OQS_COMMIT = '01f211920aea41640c647f462e9d7c4c106e3240'
OPENVPN_TGZ_NAME = '/tmp/openvpn-2.4.4.tar.gz'
OPENVPN_GUI_TGZ_NAME = '/tmp/openvpn-gui-11.tar.gz'
OPENVPN_REPO_DIRNAME = 'openvpn-2.4.4'
OPENVPN_INSTALL_EXE_NAME = 'openvpn-install-2.4.4-I601.exe'
OPENVPN_GUI_REPO_DIRNAME = 'openvpn-gui'
OPENVPN_LINUX_PREFIX = '/usr/local/openvpn'
VCVARSALL = '"C:\\Program Files (x86)\\Microsoft Visual Studio\\2017\\Enterprise\\VC\\Auxiliary\\Build\\vcvarsall.bat"'
# Run an external command, block until it completes
def run_command(cmd):
print '***** Running command: %s' % ' '.join(map(str,cmd))
p = subprocess.Popen(cmd)
p.wait()
# Clone a git repo, using the default name, in the CWD
# If branch is specified, clone that branch
def git_clone(repo_url, branch, local_name, commit=None):
r = re.compile(".*/(.*)$")
m = r.match(repo_url)
repo_name = m.group(1)
print "Cloning %s ..." % repo_name
cmd = ['git', 'clone', '-q']
if branch:
|
cmd.append(repo_url)
if local_name:
cmd.append(local_name)
run_command(cmd)
if commit is not None:
if local_name:
os.chdir(local_name)
else:
print "git_clone with a commit ID only valid with a local_name"
sys.exit(1)
cmd = ['git', 'checkout', commit]
run_command(cmd)
os.chdir('..')
# Build oqs_openssl
def build_oqs_openssl():
if platform.system() == 'Windows':
# Create source trees for x86 and x64
# Note that there's no way to clean up one tree and re-use it for a different arch
git_clone(OPENSSL_OQS_REPO, OPENSSL_OQS_BRANCH, 'openssl-oqs-win-x86', OPENSSL_OQS_COMMIT)
shutil.copytree('openssl-oqs-win-x86', 'openssl-oqs-win-x64')
os.chdir('openssl-oqs-win-x86')
# Start the X86 build
run_command(['perl', 'Configure', 'VC-WIN32', 'no-asm', 'enable-static-engine'])
run_command(['ms\\do_ms.bat'])
# vcvarsall may change the current working directory. Remember where we were and cd back to it.
mycwd = os.getcwd()
os.system(VCVARSALL + ' x86 && cd /d ' + mycwd + ' && nmake -f ms\\ntdll.mak')
# Copy the binaries to ../oqs-openssl-win
shutil.copy('out32dll\\libeay32.dll', '..\\..\\oqs-openssl-win\\x86\\')
shutil.copy('out32dll\\ssleay32.dll', '..\\..\\oqs-openssl-win\\x86\\')
# TODO: is there a way to check that the other DLLs in
# oqs-openssl-win\x86 (e.g., vcruntime140.dll) have the right version to
# work with these openssl DLLs? somehow check that the dependencies of
# libeay32.dll and ssleay32.dll are present in the x86 folder.
# Start the x64 build
os.chdir('..')
os.chdir('openssl-oqs-win-x64')
run_command(['perl', 'Configure', 'VC-WIN64A', 'no-asm', 'enable-static-engine'])
run_command(['ms\\do_win64a.bat'])
mycwd = os.getcwd()
# Before running nmake, we have to run vcvarsall.bat to set the x64 env vars, in the same shell
mycwd = os.getcwd()
os.system(VCVARSALL + ' amd64 && cd /d ' + mycwd + ' && nmake -f ms\\ntdll.mak')
# Copy the binaries to ../oqs-openssl-win
shutil.copy('out32dll\\libeay32.dll', '..\\..\\oqs-openssl-win\\x64\\')
shutil.copy('out32dll\\ssleay32.dll', '..\\..\\oqs-openssl-win\\x64\\')
if platform.system() == 'Linux':
git_clone(OPENSSL_OQS_REPO, OPENSSL_OQS_BRANCH, 'openssl-oqs', OPENSSL_OQS_COMMIT)
os.makedirs('oqs-openssl-output/openssl')
os.makedirs('oqs-openssl-output/ssl')
prefix = os.path.abspath('oqs-openssl-output/openssl')
openssldir = os.path.abspath('oqs-openssl-output/ssl')
os.chdir('openssl-oqs')
run_command(['./config', 'shared', '--prefix='+prefix, '--openssldir='+openssldir])
run_command(['make'])
run_command(['make', 'test'])
run_command(['make', 'install'])
os.chdir('..')
def on_error(func, path, exc_info):
"""
Error handler for ``shutil.rmtree``.
If the error is due to an access error (read only file)
it attempts to add write permission and then retries.
If the error is for another reason it re-raises the error.
Usage : ``shutil.rmtree(path, onerror=onerror)``
"""
import stat
if not os.access(path, os.W_OK):
# Is the error an access error ?
os.chmod(path, stat.S_IWUSR)
func(path)
else:
raise
def build_openvpn_linux():
git_clone(OPENVPN_REPO, OPENVPN_BRANCH, 'openvpn-pq')
if os.path.exists('stage'):
shutil.rmtree('stage')
os.makedirs('stage')
stagepath = os.path.abspath('stage')
os.chdir('openvpn-pq')
run_command(['autoreconf', '-i', '-f', '-v'])
if not os.path.exists("../oqs-openssl-output/"):
print "Didn't find oqs-openssl-output directory, exiting"
sys.exit(1)
lib_path = os.path.abspath('../oqs-openssl-output/openssl/lib')
inc_path = os.path.abspath('../oqs-openssl-output/openssl/include')
openssl_cflags = 'OPENSSL_CFLAGS="-I' + inc_path + '"'
openssl_libs = 'OPENSSL_LIBS="-L' + lib_path + ' -Wl,-rpath='+ OPENVPN_LINUX_PREFIX + '/lib ' + ' -lssl -lcrypto"'
# we need to use os.system here so that the env vars are set correctly
os.system('./configure --prefix=' + OPENVPN_LINUX_PREFIX + ' ' + openssl_cflags + ' ' + openssl_libs + ' && make && make DESTDIR=' + stagepath + ' install')
# We need to copy our versions of libcrypto and libssl into the staging area
shutil.copy('../oqs-openssl-output/openssl/lib/libcrypto.so.1.0.0', stagepath + '/' + OPENVPN_LINUX_PREFIX + '/lib')
shutil.copy('../oqs-openssl-output/openssl/lib/libssl.so.1.0.0', stagepath + '/' + OPENVPN_LINUX_PREFIX + '/lib')
os.chdir('..')
# Create a tarball for linux (needed to do Raspberry Pi builds)
os.makedirs('pq-openvpn-linux')
shutil.move('oqs-openssl-output', 'pq-openvpn-linux')
shutil.move('openvpn-pq', 'pq-openvpn-linux')
run_command(['tar', 'czf', 'pq-openvpn-linux.tgz', 'pq-openvpn-linux'])
shutil.move('pq-openvpn-linux.tgz', '../pq-openvpn-linux.tgz')
## Create a staged tarball for Linux
os.chdir('stage')
# Create placeholders for etc and log directories so they'll be created
os.makedirs('.' + OPENVPN_LINUX_PREFIX + '/etc')
os.makedirs('.' + OPENVPN_LINUX_PREFIX + '/log')
run_command(['touch', '.' + OPENVPN_LINUX_PREFIX + '/etc/.placeholder', '.' + OPENVPN_LINUX_PREFIX + '/log/.placeholder'])
# Copy initial setup script into sbin directory
shutil.copy('../../initialsetup.sh', '.' + OPENVPN_LINUX_PREFIX + '/sbin')
# Copy pointer to privacy statement into doc directory
shutil.copy('../../PRIVACY.txt', '.' + OPENVPN_LINUX_PREFIX + '/share/doc/openvpn')
# Copy Third Party notice into doc directory
shutil.copy('../../../../ThirdPartyNotice.txt', '.' + OPENVPN_LINUX_PREFIX + '/share/doc/openvpn')
# Copy service file for systemd into the appropriate place
os.makedirs('etc/systemd/system')
shutil.copy('../../pq-openvpn.service', 'etc/systemd/system')
# Create staged tarball
run_command(['tar', '-cz', '--group=root', '--owner=root', '-f', '../../pq-openvpn-linux-staged.tar.gz', '.'])
os.chdir('..')
def build_openvpn_windows():
# clone Walrus/openvpn
git_clone(OPENVPN_REPO, OPENVPN_BRANCH, OPENVPN_REPO_DIRNAME)
os.chdir(OPENVPN_REPO_DIRNAME)
run_command(['autoreconf', '-i', '-v', '-f'])
run_command(['./configure'])
os.chdir('..')
# the OpenVPN build scripts need a tarball of the same code
if os.path.exists(OPENVPN_TGZ_NAME):
os.remove(OPENVPN_TGZ_NAME)
run_command(['tar', 'czvvf', OPENVPN_TGZ_NAME, OPENVPN_REPO_DIRNAME])
# clone Walrus/openvpn-build
git_clone(OPENVPN_BUILD_REPO, OPENVPN_BUILD_BRANCH, "")
# clone Walrus/openvpn-gui
git_clone(OPENVPN_GUI_REPO, OPENVPN_GUI_BRANCH, OPENVPN_GUI_REPO_DIRNAME)
os.chdir(OPENVPN_GUI_REPO_DIRNAME)
run_command(['autoreconf', '-i', '-v', '-f'])
os.chdir('..')
if os.path.exists(OPENVPN_GUI_TGZ_NAME):
os.remove(OPENVPN_GUI_TGZ_NAME)
run_command(['tar', 'czvvf', OPENVPN_GUI_TGZ_NAME, OPENVPN_GUI_REPO_DIRNAME])
# Start the build
os.chdir('openvpn-build')
run_command(['./windows-nsis/build-complete'])
shutil.move("windows-nsis/" + OPENVPN_INSTALL_EXE_NAME, "../../" + OPENVPN_INSTALL_EXE_NAME)
os.chdir('..')
######## main ##########
# (Re)create the scratch dir, switch to it
scratch_dir = "scratch"
if os.path.exists(scratch_dir):
shutil.rmtree(scratch_dir, False, on_error)
os.makedirs(scratch_dir)
os.chdir(scratch_dir)
build_oqs_openssl()
# If this is Windows, we're done
if platform.system() == 'Windows':
print "Operating system detected as Windows, building OQS-OpenSSL only"
print "The binaries in Walrus/openvpn/build/oqs-openssl-win should now be updated"
sys.exit(0)
build_openvpn_linux()
build_openvpn_windows()
print "The staged tarball provides a readily deployable set of binaries on a Linux VM to quickly"
print "bring up a VPN server. It has been tested with the Ubuntu image currently provided by Azure."
print "This installation may be usable as a client with a client configuration file instead, but this"
print "is untested, and the automatic service startup is configured to look for server.ovpn as a config file."
print "To use the staged Linux tarball, do the following as root/using sudo in your VM:"
print "1. cd /"
print "2. tar xvzf <path>/pq-openvpn-linux-staged.tar.gz"
print "3. Create /usr/local/openvpn/etc/server.ovpn and dependent cert/key files as"
print " needed."
print "4. /usr/local/openvpn/sbin/initialsetup.sh"
print ""
print "To upgrade an existing installation:"
print "1. systemctl stop pq-openvpn"
print "2. cd /"
print "3. tar xvzf <path>/pq-openvpn-linux-staged.tar.gz"
print "4. systemctl start pq-openvpn"
| cmd.extend(['--branch', branch]) | conditional_block |
build.py | #!/usr/bin/python
# Script to build OpenVPN with support for OQS cipher suites
# Written/tested for Python 2.7
# Script assumes
# - it is being run from the openvpn/build directory
# - any necessary authentication tokens are already available to Git (if not using the public GitHub URLs)
# - Linux: all dependencies are installed
# - sudo apt-get install autoconf curl nsis libtool libssl-dev \
# liblz4-dev liblzo2-dev libpam0g-dev gcc-mingw-w64 man2html dos2unix unzip
# - Windows: Microsoft Visual Studio 2017 is installed in the default location on C:
# recent Perl is installed and in the system PATH
# - http://strawberryperl.com/releases.html (MSI and standalone ZIP versions available)
# Copyright (C) 2018 Microsoft Corporation
import os
import shutil
import subprocess
import re
import fileinput
import stat
import sys
import platform
OPENVPN_REPO = 'https://github.com/Microsoft/openvpn'
OPENVPN_BRANCH = 'pqcrypto'
OPENVPN_BUILD_REPO = 'https://github.com/Microsoft/openvpn-build'
OPENVPN_BUILD_BRANCH = 'pqcrypto'
OPENVPN_GUI_REPO = 'https://github.com/Microsoft/openvpn-gui'
OPENVPN_GUI_BRANCH = 'pqcrypto'
OPENSSL_OQS_REPO = 'https://github.com/open-quantum-safe/openssl'
OPENSSL_OQS_BRANCH = 'OpenSSL_1_0_2-stable'
OPENSSL_OQS_COMMIT = '01f211920aea41640c647f462e9d7c4c106e3240'
OPENVPN_TGZ_NAME = '/tmp/openvpn-2.4.4.tar.gz'
OPENVPN_GUI_TGZ_NAME = '/tmp/openvpn-gui-11.tar.gz'
OPENVPN_REPO_DIRNAME = 'openvpn-2.4.4'
OPENVPN_INSTALL_EXE_NAME = 'openvpn-install-2.4.4-I601.exe'
OPENVPN_GUI_REPO_DIRNAME = 'openvpn-gui'
OPENVPN_LINUX_PREFIX = '/usr/local/openvpn'
VCVARSALL = '"C:\\Program Files (x86)\\Microsoft Visual Studio\\2017\\Enterprise\\VC\\Auxiliary\\Build\\vcvarsall.bat"'
# Run an external command, block until it completes
def run_command(cmd):
print '***** Running command: %s' % ' '.join(map(str,cmd))
p = subprocess.Popen(cmd)
p.wait()
# Clone a git repo, using the default name, in the CWD
# If branch is specified, clone that branch
def git_clone(repo_url, branch, local_name, commit=None):
r = re.compile(".*/(.*)$")
m = r.match(repo_url)
repo_name = m.group(1)
print "Cloning %s ..." % repo_name
cmd = ['git', 'clone', '-q']
if branch:
cmd.extend(['--branch', branch])
cmd.append(repo_url)
if local_name:
cmd.append(local_name)
run_command(cmd)
if commit is not None:
if local_name:
os.chdir(local_name)
else:
print "git_clone with a commit ID only valid with a local_name"
sys.exit(1)
cmd = ['git', 'checkout', commit]
run_command(cmd)
os.chdir('..')
# Build oqs_openssl
def build_oqs_openssl():
if platform.system() == 'Windows':
# Create source trees for x86 and x64
# Note that there's no way to clean up one tree and re-use it for a different arch
git_clone(OPENSSL_OQS_REPO, OPENSSL_OQS_BRANCH, 'openssl-oqs-win-x86', OPENSSL_OQS_COMMIT)
shutil.copytree('openssl-oqs-win-x86', 'openssl-oqs-win-x64')
os.chdir('openssl-oqs-win-x86')
# Start the X86 build
run_command(['perl', 'Configure', 'VC-WIN32', 'no-asm', 'enable-static-engine'])
run_command(['ms\\do_ms.bat'])
# vcvarsall may change the current working directory. Remember where we were and cd back to it.
mycwd = os.getcwd()
os.system(VCVARSALL + ' x86 && cd /d ' + mycwd + ' && nmake -f ms\\ntdll.mak')
# Copy the binaries to ../oqs-openssl-win
shutil.copy('out32dll\\libeay32.dll', '..\\..\\oqs-openssl-win\\x86\\')
shutil.copy('out32dll\\ssleay32.dll', '..\\..\\oqs-openssl-win\\x86\\')
# TODO: is there a way to check that the other DLLs in
# oqs-openssl-win\x86 (e.g., vcruntime140.dll) have the right version to
# work with these openssl DLLs? somehow check that the dependencies of
# libeay32.dll and ssleay32.dll are present in the x86 folder.
# Start the x64 build
os.chdir('..')
os.chdir('openssl-oqs-win-x64')
run_command(['perl', 'Configure', 'VC-WIN64A', 'no-asm', 'enable-static-engine'])
run_command(['ms\\do_win64a.bat'])
mycwd = os.getcwd()
# Before running nmake, we have to run vcvarsall.bat to set the x64 env vars, in the same shell
mycwd = os.getcwd()
os.system(VCVARSALL + ' amd64 && cd /d ' + mycwd + ' && nmake -f ms\\ntdll.mak')
# Copy the binaries to ../oqs-openssl-win
shutil.copy('out32dll\\libeay32.dll', '..\\..\\oqs-openssl-win\\x64\\')
shutil.copy('out32dll\\ssleay32.dll', '..\\..\\oqs-openssl-win\\x64\\')
if platform.system() == 'Linux':
git_clone(OPENSSL_OQS_REPO, OPENSSL_OQS_BRANCH, 'openssl-oqs', OPENSSL_OQS_COMMIT)
os.makedirs('oqs-openssl-output/openssl')
os.makedirs('oqs-openssl-output/ssl')
prefix = os.path.abspath('oqs-openssl-output/openssl')
openssldir = os.path.abspath('oqs-openssl-output/ssl')
os.chdir('openssl-oqs')
run_command(['./config', 'shared', '--prefix='+prefix, '--openssldir='+openssldir])
run_command(['make'])
run_command(['make', 'test'])
run_command(['make', 'install'])
os.chdir('..')
def on_error(func, path, exc_info):
"""
Error handler for ``shutil.rmtree``.
If the error is due to an access error (read only file)
it attempts to add write permission and then retries. |
If the error is for another reason it re-raises the error.
Usage : ``shutil.rmtree(path, onerror=onerror)``
"""
import stat
if not os.access(path, os.W_OK):
# Is the error an access error ?
os.chmod(path, stat.S_IWUSR)
func(path)
else:
raise
def build_openvpn_linux():
git_clone(OPENVPN_REPO, OPENVPN_BRANCH, 'openvpn-pq')
if os.path.exists('stage'):
shutil.rmtree('stage')
os.makedirs('stage')
stagepath = os.path.abspath('stage')
os.chdir('openvpn-pq')
run_command(['autoreconf', '-i', '-f', '-v'])
if not os.path.exists("../oqs-openssl-output/"):
print "Didn't find oqs-openssl-output directory, exiting"
sys.exit(1)
lib_path = os.path.abspath('../oqs-openssl-output/openssl/lib')
inc_path = os.path.abspath('../oqs-openssl-output/openssl/include')
openssl_cflags = 'OPENSSL_CFLAGS="-I' + inc_path + '"'
openssl_libs = 'OPENSSL_LIBS="-L' + lib_path + ' -Wl,-rpath='+ OPENVPN_LINUX_PREFIX + '/lib ' + ' -lssl -lcrypto"'
# we need to use os.system here so that the env vars are set correctly
os.system('./configure --prefix=' + OPENVPN_LINUX_PREFIX + ' ' + openssl_cflags + ' ' + openssl_libs + ' && make && make DESTDIR=' + stagepath + ' install')
# We need to copy our versions of libcrypto and libssl into the staging area
shutil.copy('../oqs-openssl-output/openssl/lib/libcrypto.so.1.0.0', stagepath + '/' + OPENVPN_LINUX_PREFIX + '/lib')
shutil.copy('../oqs-openssl-output/openssl/lib/libssl.so.1.0.0', stagepath + '/' + OPENVPN_LINUX_PREFIX + '/lib')
os.chdir('..')
# Create a tarball for linux (needed to do Raspberry Pi builds)
os.makedirs('pq-openvpn-linux')
shutil.move('oqs-openssl-output', 'pq-openvpn-linux')
shutil.move('openvpn-pq', 'pq-openvpn-linux')
run_command(['tar', 'czf', 'pq-openvpn-linux.tgz', 'pq-openvpn-linux'])
shutil.move('pq-openvpn-linux.tgz', '../pq-openvpn-linux.tgz')
## Create a staged tarball for Linux
os.chdir('stage')
# Create placeholders for etc and log directories so they'll be created
os.makedirs('.' + OPENVPN_LINUX_PREFIX + '/etc')
os.makedirs('.' + OPENVPN_LINUX_PREFIX + '/log')
run_command(['touch', '.' + OPENVPN_LINUX_PREFIX + '/etc/.placeholder', '.' + OPENVPN_LINUX_PREFIX + '/log/.placeholder'])
# Copy initial setup script into sbin directory
shutil.copy('../../initialsetup.sh', '.' + OPENVPN_LINUX_PREFIX + '/sbin')
# Copy pointer to privacy statement into doc directory
shutil.copy('../../PRIVACY.txt', '.' + OPENVPN_LINUX_PREFIX + '/share/doc/openvpn')
# Copy Third Party notice into doc directory
shutil.copy('../../../../ThirdPartyNotice.txt', '.' + OPENVPN_LINUX_PREFIX + '/share/doc/openvpn')
# Copy service file for systemd into the appropriate place
os.makedirs('etc/systemd/system')
shutil.copy('../../pq-openvpn.service', 'etc/systemd/system')
# Create staged tarball
run_command(['tar', '-cz', '--group=root', '--owner=root', '-f', '../../pq-openvpn-linux-staged.tar.gz', '.'])
os.chdir('..')
def build_openvpn_windows():
# clone Walrus/openvpn
git_clone(OPENVPN_REPO, OPENVPN_BRANCH, OPENVPN_REPO_DIRNAME)
os.chdir(OPENVPN_REPO_DIRNAME)
run_command(['autoreconf', '-i', '-v', '-f'])
run_command(['./configure'])
os.chdir('..')
# the OpenVPN build scripts need a tarball of the same code
if os.path.exists(OPENVPN_TGZ_NAME):
os.remove(OPENVPN_TGZ_NAME)
run_command(['tar', 'czvvf', OPENVPN_TGZ_NAME, OPENVPN_REPO_DIRNAME])
# clone Walrus/openvpn-build
git_clone(OPENVPN_BUILD_REPO, OPENVPN_BUILD_BRANCH, "")
# clone Walrus/openvpn-gui
git_clone(OPENVPN_GUI_REPO, OPENVPN_GUI_BRANCH, OPENVPN_GUI_REPO_DIRNAME)
os.chdir(OPENVPN_GUI_REPO_DIRNAME)
run_command(['autoreconf', '-i', '-v', '-f'])
os.chdir('..')
if os.path.exists(OPENVPN_GUI_TGZ_NAME):
os.remove(OPENVPN_GUI_TGZ_NAME)
run_command(['tar', 'czvvf', OPENVPN_GUI_TGZ_NAME, OPENVPN_GUI_REPO_DIRNAME])
# Start the build
os.chdir('openvpn-build')
run_command(['./windows-nsis/build-complete'])
shutil.move("windows-nsis/" + OPENVPN_INSTALL_EXE_NAME, "../../" + OPENVPN_INSTALL_EXE_NAME)
os.chdir('..')
######## main ##########
# (Re)create the scratch dir, switch to it
scratch_dir = "scratch"
if os.path.exists(scratch_dir):
shutil.rmtree(scratch_dir, False, on_error)
os.makedirs(scratch_dir)
os.chdir(scratch_dir)
build_oqs_openssl()
# If this is Windows, we're done
if platform.system() == 'Windows':
print "Operating system detected as Windows, building OQS-OpenSSL only"
print "The binaries in Walrus/openvpn/build/oqs-openssl-win should now be updated"
sys.exit(0)
build_openvpn_linux()
build_openvpn_windows()
print "The staged tarball provides a readily deployable set of binaries on a Linux VM to quickly"
print "bring up a VPN server. It has been tested with the Ubuntu image currently provided by Azure."
print "This installation may be usable as a client with a client configuration file instead, but this"
print "is untested, and the automatic service startup is configured to look for server.ovpn as a config file."
print "To use the staged Linux tarball, do the following as root/using sudo in your VM:"
print "1. cd /"
print "2. tar xvzf <path>/pq-openvpn-linux-staged.tar.gz"
print "3. Create /usr/local/openvpn/etc/server.ovpn and dependent cert/key files as"
print " needed."
print "4. /usr/local/openvpn/sbin/initialsetup.sh"
print ""
print "To upgrade an existing installation:"
print "1. systemctl stop pq-openvpn"
print "2. cd /"
print "3. tar xvzf <path>/pq-openvpn-linux-staged.tar.gz"
print "4. systemctl start pq-openvpn" | random_line_split |
|
build.py | #!/usr/bin/python
# Script to build OpenVPN with support for OQS cipher suites
# Written/tested for Python 2.7
# Script assumes
# - it is being run from the openvpn/build directory
# - any necessary authentication tokens are already available to Git (if not using the public GitHub URLs)
# - Linux: all dependencies are installed
# - sudo apt-get install autoconf curl nsis libtool libssl-dev \
# liblz4-dev liblzo2-dev libpam0g-dev gcc-mingw-w64 man2html dos2unix unzip
# - Windows: Microsoft Visual Studio 2017 is installed in the default location on C:
# recent Perl is installed and in the system PATH
# - http://strawberryperl.com/releases.html (MSI and standalone ZIP versions available)
# Copyright (C) 2018 Microsoft Corporation
import os
import shutil
import subprocess
import re
import fileinput
import stat
import sys
import platform
OPENVPN_REPO = 'https://github.com/Microsoft/openvpn'
OPENVPN_BRANCH = 'pqcrypto'
OPENVPN_BUILD_REPO = 'https://github.com/Microsoft/openvpn-build'
OPENVPN_BUILD_BRANCH = 'pqcrypto'
OPENVPN_GUI_REPO = 'https://github.com/Microsoft/openvpn-gui'
OPENVPN_GUI_BRANCH = 'pqcrypto'
OPENSSL_OQS_REPO = 'https://github.com/open-quantum-safe/openssl'
OPENSSL_OQS_BRANCH = 'OpenSSL_1_0_2-stable'
OPENSSL_OQS_COMMIT = '01f211920aea41640c647f462e9d7c4c106e3240'
OPENVPN_TGZ_NAME = '/tmp/openvpn-2.4.4.tar.gz'
OPENVPN_GUI_TGZ_NAME = '/tmp/openvpn-gui-11.tar.gz'
OPENVPN_REPO_DIRNAME = 'openvpn-2.4.4'
OPENVPN_INSTALL_EXE_NAME = 'openvpn-install-2.4.4-I601.exe'
OPENVPN_GUI_REPO_DIRNAME = 'openvpn-gui'
OPENVPN_LINUX_PREFIX = '/usr/local/openvpn'
VCVARSALL = '"C:\\Program Files (x86)\\Microsoft Visual Studio\\2017\\Enterprise\\VC\\Auxiliary\\Build\\vcvarsall.bat"'
# Run an external command, block until it completes
def | (cmd):
print '***** Running command: %s' % ' '.join(map(str,cmd))
p = subprocess.Popen(cmd)
p.wait()
# Clone a git repo, using the default name, in the CWD
# If branch is specified, clone that branch
def git_clone(repo_url, branch, local_name, commit=None):
r = re.compile(".*/(.*)$")
m = r.match(repo_url)
repo_name = m.group(1)
print "Cloning %s ..." % repo_name
cmd = ['git', 'clone', '-q']
if branch:
cmd.extend(['--branch', branch])
cmd.append(repo_url)
if local_name:
cmd.append(local_name)
run_command(cmd)
if commit is not None:
if local_name:
os.chdir(local_name)
else:
print "git_clone with a commit ID only valid with a local_name"
sys.exit(1)
cmd = ['git', 'checkout', commit]
run_command(cmd)
os.chdir('..')
# Build oqs_openssl
def build_oqs_openssl():
if platform.system() == 'Windows':
# Create source trees for x86 and x64
# Note that there's no way to clean up one tree and re-use it for a different arch
git_clone(OPENSSL_OQS_REPO, OPENSSL_OQS_BRANCH, 'openssl-oqs-win-x86', OPENSSL_OQS_COMMIT)
shutil.copytree('openssl-oqs-win-x86', 'openssl-oqs-win-x64')
os.chdir('openssl-oqs-win-x86')
# Start the X86 build
run_command(['perl', 'Configure', 'VC-WIN32', 'no-asm', 'enable-static-engine'])
run_command(['ms\\do_ms.bat'])
# vcvarsall may change the current working directory. Remember where we were and cd back to it.
mycwd = os.getcwd()
os.system(VCVARSALL + ' x86 && cd /d ' + mycwd + ' && nmake -f ms\\ntdll.mak')
# Copy the binaries to ../oqs-openssl-win
shutil.copy('out32dll\\libeay32.dll', '..\\..\\oqs-openssl-win\\x86\\')
shutil.copy('out32dll\\ssleay32.dll', '..\\..\\oqs-openssl-win\\x86\\')
# TODO: is there a way to check that the other DLLs in
# oqs-openssl-win\x86 (e.g., vcruntime140.dll) have the right version to
# work with these openssl DLLs? somehow check that the dependencies of
# libeay32.dll and ssleay32.dll are present in the x86 folder.
# Start the x64 build
os.chdir('..')
os.chdir('openssl-oqs-win-x64')
run_command(['perl', 'Configure', 'VC-WIN64A', 'no-asm', 'enable-static-engine'])
run_command(['ms\\do_win64a.bat'])
mycwd = os.getcwd()
# Before running nmake, we have to run vcvarsall.bat to set the x64 env vars, in the same shell
mycwd = os.getcwd()
os.system(VCVARSALL + ' amd64 && cd /d ' + mycwd + ' && nmake -f ms\\ntdll.mak')
# Copy the binaries to ../oqs-openssl-win
shutil.copy('out32dll\\libeay32.dll', '..\\..\\oqs-openssl-win\\x64\\')
shutil.copy('out32dll\\ssleay32.dll', '..\\..\\oqs-openssl-win\\x64\\')
if platform.system() == 'Linux':
git_clone(OPENSSL_OQS_REPO, OPENSSL_OQS_BRANCH, 'openssl-oqs', OPENSSL_OQS_COMMIT)
os.makedirs('oqs-openssl-output/openssl')
os.makedirs('oqs-openssl-output/ssl')
prefix = os.path.abspath('oqs-openssl-output/openssl')
openssldir = os.path.abspath('oqs-openssl-output/ssl')
os.chdir('openssl-oqs')
run_command(['./config', 'shared', '--prefix='+prefix, '--openssldir='+openssldir])
run_command(['make'])
run_command(['make', 'test'])
run_command(['make', 'install'])
os.chdir('..')
def on_error(func, path, exc_info):
"""
Error handler for ``shutil.rmtree``.
If the error is due to an access error (read only file)
it attempts to add write permission and then retries.
If the error is for another reason it re-raises the error.
Usage : ``shutil.rmtree(path, onerror=onerror)``
"""
import stat
if not os.access(path, os.W_OK):
# Is the error an access error ?
os.chmod(path, stat.S_IWUSR)
func(path)
else:
raise
def build_openvpn_linux():
git_clone(OPENVPN_REPO, OPENVPN_BRANCH, 'openvpn-pq')
if os.path.exists('stage'):
shutil.rmtree('stage')
os.makedirs('stage')
stagepath = os.path.abspath('stage')
os.chdir('openvpn-pq')
run_command(['autoreconf', '-i', '-f', '-v'])
if not os.path.exists("../oqs-openssl-output/"):
print "Didn't find oqs-openssl-output directory, exiting"
sys.exit(1)
lib_path = os.path.abspath('../oqs-openssl-output/openssl/lib')
inc_path = os.path.abspath('../oqs-openssl-output/openssl/include')
openssl_cflags = 'OPENSSL_CFLAGS="-I' + inc_path + '"'
openssl_libs = 'OPENSSL_LIBS="-L' + lib_path + ' -Wl,-rpath='+ OPENVPN_LINUX_PREFIX + '/lib ' + ' -lssl -lcrypto"'
# we need to use os.system here so that the env vars are set correctly
os.system('./configure --prefix=' + OPENVPN_LINUX_PREFIX + ' ' + openssl_cflags + ' ' + openssl_libs + ' && make && make DESTDIR=' + stagepath + ' install')
# We need to copy our versions of libcrypto and libssl into the staging area
shutil.copy('../oqs-openssl-output/openssl/lib/libcrypto.so.1.0.0', stagepath + '/' + OPENVPN_LINUX_PREFIX + '/lib')
shutil.copy('../oqs-openssl-output/openssl/lib/libssl.so.1.0.0', stagepath + '/' + OPENVPN_LINUX_PREFIX + '/lib')
os.chdir('..')
# Create a tarball for linux (needed to do Raspberry Pi builds)
os.makedirs('pq-openvpn-linux')
shutil.move('oqs-openssl-output', 'pq-openvpn-linux')
shutil.move('openvpn-pq', 'pq-openvpn-linux')
run_command(['tar', 'czf', 'pq-openvpn-linux.tgz', 'pq-openvpn-linux'])
shutil.move('pq-openvpn-linux.tgz', '../pq-openvpn-linux.tgz')
## Create a staged tarball for Linux
os.chdir('stage')
# Create placeholders for etc and log directories so they'll be created
os.makedirs('.' + OPENVPN_LINUX_PREFIX + '/etc')
os.makedirs('.' + OPENVPN_LINUX_PREFIX + '/log')
run_command(['touch', '.' + OPENVPN_LINUX_PREFIX + '/etc/.placeholder', '.' + OPENVPN_LINUX_PREFIX + '/log/.placeholder'])
# Copy initial setup script into sbin directory
shutil.copy('../../initialsetup.sh', '.' + OPENVPN_LINUX_PREFIX + '/sbin')
# Copy pointer to privacy statement into doc directory
shutil.copy('../../PRIVACY.txt', '.' + OPENVPN_LINUX_PREFIX + '/share/doc/openvpn')
# Copy Third Party notice into doc directory
shutil.copy('../../../../ThirdPartyNotice.txt', '.' + OPENVPN_LINUX_PREFIX + '/share/doc/openvpn')
# Copy service file for systemd into the appropriate place
os.makedirs('etc/systemd/system')
shutil.copy('../../pq-openvpn.service', 'etc/systemd/system')
# Create staged tarball
run_command(['tar', '-cz', '--group=root', '--owner=root', '-f', '../../pq-openvpn-linux-staged.tar.gz', '.'])
os.chdir('..')
def build_openvpn_windows():
# clone Walrus/openvpn
git_clone(OPENVPN_REPO, OPENVPN_BRANCH, OPENVPN_REPO_DIRNAME)
os.chdir(OPENVPN_REPO_DIRNAME)
run_command(['autoreconf', '-i', '-v', '-f'])
run_command(['./configure'])
os.chdir('..')
# the OpenVPN build scripts need a tarball of the same code
if os.path.exists(OPENVPN_TGZ_NAME):
os.remove(OPENVPN_TGZ_NAME)
run_command(['tar', 'czvvf', OPENVPN_TGZ_NAME, OPENVPN_REPO_DIRNAME])
# clone Walrus/openvpn-build
git_clone(OPENVPN_BUILD_REPO, OPENVPN_BUILD_BRANCH, "")
# clone Walrus/openvpn-gui
git_clone(OPENVPN_GUI_REPO, OPENVPN_GUI_BRANCH, OPENVPN_GUI_REPO_DIRNAME)
os.chdir(OPENVPN_GUI_REPO_DIRNAME)
run_command(['autoreconf', '-i', '-v', '-f'])
os.chdir('..')
if os.path.exists(OPENVPN_GUI_TGZ_NAME):
os.remove(OPENVPN_GUI_TGZ_NAME)
run_command(['tar', 'czvvf', OPENVPN_GUI_TGZ_NAME, OPENVPN_GUI_REPO_DIRNAME])
# Start the build
os.chdir('openvpn-build')
run_command(['./windows-nsis/build-complete'])
shutil.move("windows-nsis/" + OPENVPN_INSTALL_EXE_NAME, "../../" + OPENVPN_INSTALL_EXE_NAME)
os.chdir('..')
######## main ##########
# (Re)create the scratch dir, switch to it
scratch_dir = "scratch"
if os.path.exists(scratch_dir):
shutil.rmtree(scratch_dir, False, on_error)
os.makedirs(scratch_dir)
os.chdir(scratch_dir)
build_oqs_openssl()
# If this is Windows, we're done
if platform.system() == 'Windows':
print "Operating system detected as Windows, building OQS-OpenSSL only"
print "The binaries in Walrus/openvpn/build/oqs-openssl-win should now be updated"
sys.exit(0)
build_openvpn_linux()
build_openvpn_windows()
print "The staged tarball provides a readily deployable set of binaries on a Linux VM to quickly"
print "bring up a VPN server. It has been tested with the Ubuntu image currently provided by Azure."
print "This installation may be usable as a client with a client configuration file instead, but this"
print "is untested, and the automatic service startup is configured to look for server.ovpn as a config file."
print "To use the staged Linux tarball, do the following as root/using sudo in your VM:"
print "1. cd /"
print "2. tar xvzf <path>/pq-openvpn-linux-staged.tar.gz"
print "3. Create /usr/local/openvpn/etc/server.ovpn and dependent cert/key files as"
print " needed."
print "4. /usr/local/openvpn/sbin/initialsetup.sh"
print ""
print "To upgrade an existing installation:"
print "1. systemctl stop pq-openvpn"
print "2. cd /"
print "3. tar xvzf <path>/pq-openvpn-linux-staged.tar.gz"
print "4. systemctl start pq-openvpn"
| run_command | identifier_name |
build.py | #!/usr/bin/python
# Script to build OpenVPN with support for OQS cipher suites
# Written/tested for Python 2.7
# Script assumes
# - it is being run from the openvpn/build directory
# - any necessary authentication tokens are already available to Git (if not using the public GitHub URLs)
# - Linux: all dependencies are installed
# - sudo apt-get install autoconf curl nsis libtool libssl-dev \
# liblz4-dev liblzo2-dev libpam0g-dev gcc-mingw-w64 man2html dos2unix unzip
# - Windows: Microsoft Visual Studio 2017 is installed in the default location on C:
# recent Perl is installed and in the system PATH
# - http://strawberryperl.com/releases.html (MSI and standalone ZIP versions available)
# Copyright (C) 2018 Microsoft Corporation
import os
import shutil
import subprocess
import re
import fileinput
import stat
import sys
import platform
OPENVPN_REPO = 'https://github.com/Microsoft/openvpn'
OPENVPN_BRANCH = 'pqcrypto'
OPENVPN_BUILD_REPO = 'https://github.com/Microsoft/openvpn-build'
OPENVPN_BUILD_BRANCH = 'pqcrypto'
OPENVPN_GUI_REPO = 'https://github.com/Microsoft/openvpn-gui'
OPENVPN_GUI_BRANCH = 'pqcrypto'
OPENSSL_OQS_REPO = 'https://github.com/open-quantum-safe/openssl'
OPENSSL_OQS_BRANCH = 'OpenSSL_1_0_2-stable'
OPENSSL_OQS_COMMIT = '01f211920aea41640c647f462e9d7c4c106e3240'
OPENVPN_TGZ_NAME = '/tmp/openvpn-2.4.4.tar.gz'
OPENVPN_GUI_TGZ_NAME = '/tmp/openvpn-gui-11.tar.gz'
OPENVPN_REPO_DIRNAME = 'openvpn-2.4.4'
OPENVPN_INSTALL_EXE_NAME = 'openvpn-install-2.4.4-I601.exe'
OPENVPN_GUI_REPO_DIRNAME = 'openvpn-gui'
OPENVPN_LINUX_PREFIX = '/usr/local/openvpn'
VCVARSALL = '"C:\\Program Files (x86)\\Microsoft Visual Studio\\2017\\Enterprise\\VC\\Auxiliary\\Build\\vcvarsall.bat"'
# Run an external command, block until it completes
def run_command(cmd):
print '***** Running command: %s' % ' '.join(map(str,cmd))
p = subprocess.Popen(cmd)
p.wait()
# Clone a git repo, using the default name, in the CWD
# If branch is specified, clone that branch
def git_clone(repo_url, branch, local_name, commit=None):
r = re.compile(".*/(.*)$")
m = r.match(repo_url)
repo_name = m.group(1)
print "Cloning %s ..." % repo_name
cmd = ['git', 'clone', '-q']
if branch:
cmd.extend(['--branch', branch])
cmd.append(repo_url)
if local_name:
cmd.append(local_name)
run_command(cmd)
if commit is not None:
if local_name:
os.chdir(local_name)
else:
print "git_clone with a commit ID only valid with a local_name"
sys.exit(1)
cmd = ['git', 'checkout', commit]
run_command(cmd)
os.chdir('..')
# Build oqs_openssl
def build_oqs_openssl():
|
def on_error(func, path, exc_info):
"""
Error handler for ``shutil.rmtree``.
If the error is due to an access error (read only file)
it attempts to add write permission and then retries.
If the error is for another reason it re-raises the error.
Usage : ``shutil.rmtree(path, onerror=onerror)``
"""
import stat
if not os.access(path, os.W_OK):
# Is the error an access error ?
os.chmod(path, stat.S_IWUSR)
func(path)
else:
raise
def build_openvpn_linux():
git_clone(OPENVPN_REPO, OPENVPN_BRANCH, 'openvpn-pq')
if os.path.exists('stage'):
shutil.rmtree('stage')
os.makedirs('stage')
stagepath = os.path.abspath('stage')
os.chdir('openvpn-pq')
run_command(['autoreconf', '-i', '-f', '-v'])
if not os.path.exists("../oqs-openssl-output/"):
print "Didn't find oqs-openssl-output directory, exiting"
sys.exit(1)
lib_path = os.path.abspath('../oqs-openssl-output/openssl/lib')
inc_path = os.path.abspath('../oqs-openssl-output/openssl/include')
openssl_cflags = 'OPENSSL_CFLAGS="-I' + inc_path + '"'
openssl_libs = 'OPENSSL_LIBS="-L' + lib_path + ' -Wl,-rpath='+ OPENVPN_LINUX_PREFIX + '/lib ' + ' -lssl -lcrypto"'
# we need to use os.system here so that the env vars are set correctly
os.system('./configure --prefix=' + OPENVPN_LINUX_PREFIX + ' ' + openssl_cflags + ' ' + openssl_libs + ' && make && make DESTDIR=' + stagepath + ' install')
# We need to copy our versions of libcrypto and libssl into the staging area
shutil.copy('../oqs-openssl-output/openssl/lib/libcrypto.so.1.0.0', stagepath + '/' + OPENVPN_LINUX_PREFIX + '/lib')
shutil.copy('../oqs-openssl-output/openssl/lib/libssl.so.1.0.0', stagepath + '/' + OPENVPN_LINUX_PREFIX + '/lib')
os.chdir('..')
# Create a tarball for linux (needed to do Raspberry Pi builds)
os.makedirs('pq-openvpn-linux')
shutil.move('oqs-openssl-output', 'pq-openvpn-linux')
shutil.move('openvpn-pq', 'pq-openvpn-linux')
run_command(['tar', 'czf', 'pq-openvpn-linux.tgz', 'pq-openvpn-linux'])
shutil.move('pq-openvpn-linux.tgz', '../pq-openvpn-linux.tgz')
## Create a staged tarball for Linux
os.chdir('stage')
# Create placeholders for etc and log directories so they'll be created
os.makedirs('.' + OPENVPN_LINUX_PREFIX + '/etc')
os.makedirs('.' + OPENVPN_LINUX_PREFIX + '/log')
run_command(['touch', '.' + OPENVPN_LINUX_PREFIX + '/etc/.placeholder', '.' + OPENVPN_LINUX_PREFIX + '/log/.placeholder'])
# Copy initial setup script into sbin directory
shutil.copy('../../initialsetup.sh', '.' + OPENVPN_LINUX_PREFIX + '/sbin')
# Copy pointer to privacy statement into doc directory
shutil.copy('../../PRIVACY.txt', '.' + OPENVPN_LINUX_PREFIX + '/share/doc/openvpn')
# Copy Third Party notice into doc directory
shutil.copy('../../../../ThirdPartyNotice.txt', '.' + OPENVPN_LINUX_PREFIX + '/share/doc/openvpn')
# Copy service file for systemd into the appropriate place
os.makedirs('etc/systemd/system')
shutil.copy('../../pq-openvpn.service', 'etc/systemd/system')
# Create staged tarball
run_command(['tar', '-cz', '--group=root', '--owner=root', '-f', '../../pq-openvpn-linux-staged.tar.gz', '.'])
os.chdir('..')
def build_openvpn_windows():
# clone Walrus/openvpn
git_clone(OPENVPN_REPO, OPENVPN_BRANCH, OPENVPN_REPO_DIRNAME)
os.chdir(OPENVPN_REPO_DIRNAME)
run_command(['autoreconf', '-i', '-v', '-f'])
run_command(['./configure'])
os.chdir('..')
# the OpenVPN build scripts need a tarball of the same code
if os.path.exists(OPENVPN_TGZ_NAME):
os.remove(OPENVPN_TGZ_NAME)
run_command(['tar', 'czvvf', OPENVPN_TGZ_NAME, OPENVPN_REPO_DIRNAME])
# clone Walrus/openvpn-build
git_clone(OPENVPN_BUILD_REPO, OPENVPN_BUILD_BRANCH, "")
# clone Walrus/openvpn-gui
git_clone(OPENVPN_GUI_REPO, OPENVPN_GUI_BRANCH, OPENVPN_GUI_REPO_DIRNAME)
os.chdir(OPENVPN_GUI_REPO_DIRNAME)
run_command(['autoreconf', '-i', '-v', '-f'])
os.chdir('..')
if os.path.exists(OPENVPN_GUI_TGZ_NAME):
os.remove(OPENVPN_GUI_TGZ_NAME)
run_command(['tar', 'czvvf', OPENVPN_GUI_TGZ_NAME, OPENVPN_GUI_REPO_DIRNAME])
# Start the build
os.chdir('openvpn-build')
run_command(['./windows-nsis/build-complete'])
shutil.move("windows-nsis/" + OPENVPN_INSTALL_EXE_NAME, "../../" + OPENVPN_INSTALL_EXE_NAME)
os.chdir('..')
######## main ##########
# (Re)create the scratch dir, switch to it
scratch_dir = "scratch"
if os.path.exists(scratch_dir):
shutil.rmtree(scratch_dir, False, on_error)
os.makedirs(scratch_dir)
os.chdir(scratch_dir)
build_oqs_openssl()
# If this is Windows, we're done
if platform.system() == 'Windows':
print "Operating system detected as Windows, building OQS-OpenSSL only"
print "The binaries in Walrus/openvpn/build/oqs-openssl-win should now be updated"
sys.exit(0)
build_openvpn_linux()
build_openvpn_windows()
print "The staged tarball provides a readily deployable set of binaries on a Linux VM to quickly"
print "bring up a VPN server. It has been tested with the Ubuntu image currently provided by Azure."
print "This installation may be usable as a client with a client configuration file instead, but this"
print "is untested, and the automatic service startup is configured to look for server.ovpn as a config file."
print "To use the staged Linux tarball, do the following as root/using sudo in your VM:"
print "1. cd /"
print "2. tar xvzf <path>/pq-openvpn-linux-staged.tar.gz"
print "3. Create /usr/local/openvpn/etc/server.ovpn and dependent cert/key files as"
print " needed."
print "4. /usr/local/openvpn/sbin/initialsetup.sh"
print ""
print "To upgrade an existing installation:"
print "1. systemctl stop pq-openvpn"
print "2. cd /"
print "3. tar xvzf <path>/pq-openvpn-linux-staged.tar.gz"
print "4. systemctl start pq-openvpn"
| if platform.system() == 'Windows':
# Create source trees for x86 and x64
# Note that there's no way to clean up one tree and re-use it for a different arch
git_clone(OPENSSL_OQS_REPO, OPENSSL_OQS_BRANCH, 'openssl-oqs-win-x86', OPENSSL_OQS_COMMIT)
shutil.copytree('openssl-oqs-win-x86', 'openssl-oqs-win-x64')
os.chdir('openssl-oqs-win-x86')
# Start the X86 build
run_command(['perl', 'Configure', 'VC-WIN32', 'no-asm', 'enable-static-engine'])
run_command(['ms\\do_ms.bat'])
# vcvarsall may change the current working directory. Remember where we were and cd back to it.
mycwd = os.getcwd()
os.system(VCVARSALL + ' x86 && cd /d ' + mycwd + ' && nmake -f ms\\ntdll.mak')
# Copy the binaries to ../oqs-openssl-win
shutil.copy('out32dll\\libeay32.dll', '..\\..\\oqs-openssl-win\\x86\\')
shutil.copy('out32dll\\ssleay32.dll', '..\\..\\oqs-openssl-win\\x86\\')
# TODO: is there a way to check that the other DLLs in
# oqs-openssl-win\x86 (e.g., vcruntime140.dll) have the right version to
# work with these openssl DLLs? somehow check that the dependencies of
# libeay32.dll and ssleay32.dll are present in the x86 folder.
# Start the x64 build
os.chdir('..')
os.chdir('openssl-oqs-win-x64')
run_command(['perl', 'Configure', 'VC-WIN64A', 'no-asm', 'enable-static-engine'])
run_command(['ms\\do_win64a.bat'])
mycwd = os.getcwd()
# Before running nmake, we have to run vcvarsall.bat to set the x64 env vars, in the same shell
mycwd = os.getcwd()
os.system(VCVARSALL + ' amd64 && cd /d ' + mycwd + ' && nmake -f ms\\ntdll.mak')
# Copy the binaries to ../oqs-openssl-win
shutil.copy('out32dll\\libeay32.dll', '..\\..\\oqs-openssl-win\\x64\\')
shutil.copy('out32dll\\ssleay32.dll', '..\\..\\oqs-openssl-win\\x64\\')
if platform.system() == 'Linux':
git_clone(OPENSSL_OQS_REPO, OPENSSL_OQS_BRANCH, 'openssl-oqs', OPENSSL_OQS_COMMIT)
os.makedirs('oqs-openssl-output/openssl')
os.makedirs('oqs-openssl-output/ssl')
prefix = os.path.abspath('oqs-openssl-output/openssl')
openssldir = os.path.abspath('oqs-openssl-output/ssl')
os.chdir('openssl-oqs')
run_command(['./config', 'shared', '--prefix='+prefix, '--openssldir='+openssldir])
run_command(['make'])
run_command(['make', 'test'])
run_command(['make', 'install'])
os.chdir('..') | identifier_body |
utils.py | import copy
import os
from datetime import datetime
import numpy as np
from sklearn.neighbors import NearestNeighbors
from sklearn.preprocessing import normalize
ROOT_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../')
DATA_FILE = os.path.join(ROOT_DIR, 'data/data_train.csv')
TRAINING_FILE_NAME = os.path.join(
ROOT_DIR, 'data/trainingIndices.csv')
VALIDATION_FILE_NAME = os.path.join(
ROOT_DIR, 'data/validationIndices.csv')
VALIDATION_MASK_FILE_NAME = os.path.join(
ROOT_DIR, 'data/train_valid_80_10_10/validationIndices_mask.csv')
AUX = os.path.join(
ROOT_DIR, 'data/train_valid_80_10_10/validationIndices_first.csv')
META_VALIDATION_FILE_NAME = os.path.join(
ROOT_DIR, 'data/train_valid_80_10_10/validationIndices_second.csv')
SAMPLE_SUBMISSION = os.path.join(ROOT_DIR, \
'data/sampleSubmission.csv')
ENSEMBLE_INPUT_DIR = 'data/stacking/good_data'
ITEM_COUNT = 1000
USER_COUNT = 10000
WEIGHT_KNN = 0.001
N_NEIGHBORS = 3
USER_COUNT_WEIGHT = 10
SAVE_META_PREDICTIONS = False
def load_ratings(data_file=DATA_FILE):
ratings = []
with open(data_file, 'r') as file:
# Read header.
_ = file.readline()
for line in file:
key, value_string = line.split(",")
rating = float(value_string)
row_string, col_string = key.split("_")
row = int(row_string[1:])
col = int(col_string[1:])
ratings.append((row - 1, col - 1, rating))
return ratings
def ratings_to_matrix(ratings):
matrix_rows = USER_COUNT
matrix_cols = ITEM_COUNT
matrix = np.zeros([matrix_rows, matrix_cols])
for row, col, rating in ratings:
matrix[row, col] = rating
return matrix
def | (data, use_three_way):
masked_data = np.copy(data)
if use_three_way:
mask_file = VALIDATION_MASK_FILE_NAME
else:
mask_file = VALIDATION_FILE_NAME
mask_indices = get_indices_from_file(mask_file)
for row_index, col_index in mask_indices:
masked_data[row_index][col_index] = 0
return masked_data
def get_validation_indices(use_three_way):
if use_three_way:
validation_indices = get_indices_from_file(AUX)
else:
validation_indices = get_indices_from_file(VALIDATION_FILE_NAME)
return validation_indices
def get_meta_validation_indices():
return get_indices_from_file(META_VALIDATION_FILE_NAME)
def get_observed_indices(data):
row_indices, col_indices = np.where(data != 0)
return list(zip(row_indices, col_indices))
def get_unobserved_indices(data):
row_indices, col_indices = np.where(data == 0)
return list(zip(row_indices, col_indices))
def get_indices_from_file(file_name):
indices = []
with open(file_name, 'r') as file:
# Read header.
_ = file.readline()
for line in file:
i, j = line.split(",")
indices.append((int(i), int(j)))
return indices
def get_indices_to_predict():
"""Get list of indices to predict from sample submission file.
Returns:
indices_to_predict: list of tuples with indices"""
indices_to_predict = []
with open(SAMPLE_SUBMISSION, 'r') as file:
_ = file.readline()
for line in file:
key, _ = line.split(",")
row_string, col_string = key.split("_")
i = int(row_string[1:]) - 1
j = int(col_string[1:]) - 1
indices_to_predict.append((i, j))
return indices_to_predict
def write_ratings(predictions, submission_file):
with open(submission_file, 'w') as file:
file.write('Id,Prediction\n')
for i, j, prediction in predictions:
file.write('r%d_c%d,%f\n' % (i, j, prediction))
def reconstruction_to_predictions(
reconstruction, submission_file, indices_to_predict=None):
if indices_to_predict is None:
indices_to_predict = get_indices_to_predict()
enumerate_predictions = lambda t: (
t[0] + 1, t[1] + 1, reconstruction[t[0], t[1]])
predictions = list(map(enumerate_predictions, indices_to_predict))
write_ratings(predictions, submission_file)
def save_ensembling_predictions(reconstruction, name):
reconstruction_to_predictions(
reconstruction, ROOT_DIR + 'data/meta_training_' + name + '_stacking'
+ datetime.now().strftime('%Y-%b-%d-%H-%M-%S') + '.csv',
indices_to_predict=get_validation_indices(use_three_way=True))
reconstruction_to_predictions(
reconstruction, ROOT_DIR + 'data/meta_validation_' + name + '_stacking'
+ datetime.now().strftime('%Y-%b-%d-%H-%M-%S') + '.csv',
indices_to_predict=get_meta_validation_indices())
def clip(data):
data[data > 5] = 5
data[data < 1] = 1
return data
def ampute_reconstruction(reconstruction, data):
observed_indices = get_observed_indices(data)
for row_index, col_index in observed_indices:
reconstruction[row_index][col_index] = data[row_index][col_index]
def impute_by_avg(data, by_row):
data = data.T if by_row else data
for row in data:
empty = (row == 0)
row_sum = np.sum(row)
row[empty] = row_sum / np.count_nonzero(row)
return data.T if by_row else data
def impute_by_bias(data):
total_average = np.mean(data[np.nonzero(data)])
row_biases = np.zeros(data.shape[0])
col_biases = np.zeros(data.shape[1])
for row_index in range(data.shape[0]):
row_biases[row_index] = np.sum(data[row_index]) / \
np.count_nonzero(data[row_index]) - total_average
for col_index in range(data.shape[1]):
col_biases[col_index] = np.sum(data[:][col_index]) / \
np.count_nonzero(data[:][col_index]) - total_average
for row_index in range(data.shape[0]):
for col_index in range(data.shape[1]):
if data[row_index, col_index] == 0:
new_value = total_average + \
row_biases[row_index] + col_biases[col_index]
data[row_index, col_index] = new_value
return data
def impute_by_variance(data):
global_average = np.sum(data) / np.count_nonzero(data)
global_variance = np.var(data[data != 0])
adjusted_movie_means = np.zeros((data.shape[1],))
for i in range(data.shape[1]):
movie_ratings = data[:, i]
movie_ratings = movie_ratings[movie_ratings != 0]
movie_variance = np.var(movie_ratings)
relative_variance = movie_variance / global_variance
adjusted_movie_means[i] = (
global_average * relative_variance + np.sum(movie_ratings)) / (
relative_variance + np.count_nonzero(movie_ratings))
adjusted_user_deviation = np.zeros((data.shape[0],))
for i in range(data.shape[0]):
user_ratings = data[i]
user_deviations = adjusted_movie_means - user_ratings
user_deviations = user_deviations[user_ratings != 0]
user_deviation_variance = np.var(user_deviations)
relative_variance = user_deviation_variance / global_variance
adjusted_user_deviation[i] = (
global_average * relative_variance + sum(user_deviations)) / (
relative_variance + np.count_nonzero(user_deviations))
user_counts = np.count_nonzero(data, axis=1)
movie_counts = np.count_nonzero(data, axis=0)
movie_count_matrix = np.tile(movie_counts, (len(user_counts), 1))
user_count_matrix = np.tile(user_counts, (len(movie_counts), 1)).T
combined_matrix = copy.copy(
movie_count_matrix) + USER_COUNT_WEIGHT * copy.copy(user_count_matrix)
d_matrix = np.divide(movie_count_matrix, combined_matrix)
m_matrix = np.tile(
adjusted_movie_means, (len(adjusted_user_deviation), 1))
u_matrix = np.tile(
adjusted_user_deviation, (len(adjusted_movie_means), 1)).T
data = np.multiply(m_matrix, d_matrix) + \
np.multiply(u_matrix, np.ones(d_matrix.shape) - d_matrix)
return data
def compute_rmse(data, prediction, indices=None):
if indices is None:
indices = get_indices_from_file(VALIDATION_FILE_NAME)
squared_error = 0
for i, j in indices:
squared_error += (data[i][j] - prediction[i][j]) ** 2
return np.sqrt(squared_error / len(indices))
def knn_smoothing(reconstruction, user_embeddings):
normalized_user_embeddings = normalize(user_embeddings)
knn = NearestNeighbors(n_neighbors=N_NEIGHBORS + 1)
knn.fit(normalized_user_embeddings)
distances, neighbors = knn.kneighbors(normalized_user_embeddings)
distances = distances[:, 1:]
neighbors = neighbors[:, 1:]
ones = np.ones(distances.shape)
similarities = ones - distances
weights = np.square(np.square(similarities))
smoothed_data = np.zeros(reconstruction.shape)
aggregated_neighbor_ratings = np.zeros(reconstruction.shape)
for i in range(reconstruction.shape[0]):
stacked_ratings = []
for neighbor in neighbors[i]:
stacked_ratings.append(reconstruction[neighbor])
stacked_ratings = np.asarray(stacked_ratings)
aggregated_neighbor_ratings[i] =\
np.matmul(weights[i], stacked_ratings) / sum(weights[i])
for i in range(reconstruction.shape[0]):
smoothed_data[i] = (1 - WEIGHT_KNN) * reconstruction[i] + WEIGHT_KNN *\
aggregated_neighbor_ratings[i]
smoothed_data = clip(smoothed_data)
return smoothed_data
def load_predictions_from_files(file_prefix='submission_'):
path = os.path.join(ROOT_DIR, ENSEMBLE_INPUT_DIR)
files = [os.path.join(path, i) for i in os.listdir(path) if \
os.path.isfile(os.path.join(path, i)) and file_prefix in i]
all_ratings = []
for file in files:
print("loading {}".format(file))
ratings = load_ratings(file)
ratings = ratings_to_matrix(ratings)
all_ratings.append(ratings)
return all_ratings
def compute_mean_predictions(all_ratings):
reconstruction = np.mean(np.array(all_ratings), axis=0)
reconstruction = impute_by_avg(reconstruction, by_row=False)
return reconstruction
| mask_validation | identifier_name |
utils.py | import copy
import os
from datetime import datetime
import numpy as np
from sklearn.neighbors import NearestNeighbors
from sklearn.preprocessing import normalize
ROOT_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../')
DATA_FILE = os.path.join(ROOT_DIR, 'data/data_train.csv')
TRAINING_FILE_NAME = os.path.join(
ROOT_DIR, 'data/trainingIndices.csv')
VALIDATION_FILE_NAME = os.path.join(
ROOT_DIR, 'data/validationIndices.csv')
VALIDATION_MASK_FILE_NAME = os.path.join(
ROOT_DIR, 'data/train_valid_80_10_10/validationIndices_mask.csv')
AUX = os.path.join(
ROOT_DIR, 'data/train_valid_80_10_10/validationIndices_first.csv')
META_VALIDATION_FILE_NAME = os.path.join(
ROOT_DIR, 'data/train_valid_80_10_10/validationIndices_second.csv')
SAMPLE_SUBMISSION = os.path.join(ROOT_DIR, \
'data/sampleSubmission.csv')
ENSEMBLE_INPUT_DIR = 'data/stacking/good_data'
ITEM_COUNT = 1000
USER_COUNT = 10000
WEIGHT_KNN = 0.001
N_NEIGHBORS = 3
USER_COUNT_WEIGHT = 10
SAVE_META_PREDICTIONS = False
def load_ratings(data_file=DATA_FILE):
ratings = []
with open(data_file, 'r') as file:
# Read header.
_ = file.readline()
for line in file:
key, value_string = line.split(",")
rating = float(value_string)
row_string, col_string = key.split("_")
row = int(row_string[1:])
col = int(col_string[1:])
ratings.append((row - 1, col - 1, rating))
return ratings
def ratings_to_matrix(ratings):
matrix_rows = USER_COUNT
matrix_cols = ITEM_COUNT
matrix = np.zeros([matrix_rows, matrix_cols])
for row, col, rating in ratings:
matrix[row, col] = rating
return matrix
def mask_validation(data, use_three_way):
masked_data = np.copy(data)
if use_three_way:
mask_file = VALIDATION_MASK_FILE_NAME
else:
mask_file = VALIDATION_FILE_NAME
mask_indices = get_indices_from_file(mask_file)
for row_index, col_index in mask_indices:
masked_data[row_index][col_index] = 0
return masked_data
def get_validation_indices(use_three_way):
if use_three_way:
validation_indices = get_indices_from_file(AUX)
else:
validation_indices = get_indices_from_file(VALIDATION_FILE_NAME)
return validation_indices
def get_meta_validation_indices():
return get_indices_from_file(META_VALIDATION_FILE_NAME)
def get_observed_indices(data):
row_indices, col_indices = np.where(data != 0)
return list(zip(row_indices, col_indices))
def get_unobserved_indices(data):
row_indices, col_indices = np.where(data == 0)
return list(zip(row_indices, col_indices))
def get_indices_from_file(file_name):
indices = []
with open(file_name, 'r') as file:
# Read header.
_ = file.readline()
for line in file:
i, j = line.split(",")
indices.append((int(i), int(j)))
return indices
def get_indices_to_predict():
"""Get list of indices to predict from sample submission file.
Returns:
indices_to_predict: list of tuples with indices"""
indices_to_predict = []
with open(SAMPLE_SUBMISSION, 'r') as file:
_ = file.readline()
for line in file:
key, _ = line.split(",")
row_string, col_string = key.split("_")
i = int(row_string[1:]) - 1
j = int(col_string[1:]) - 1
indices_to_predict.append((i, j))
return indices_to_predict
def write_ratings(predictions, submission_file):
with open(submission_file, 'w') as file:
file.write('Id,Prediction\n')
for i, j, prediction in predictions:
file.write('r%d_c%d,%f\n' % (i, j, prediction))
def reconstruction_to_predictions(
reconstruction, submission_file, indices_to_predict=None):
if indices_to_predict is None:
indices_to_predict = get_indices_to_predict()
enumerate_predictions = lambda t: (
t[0] + 1, t[1] + 1, reconstruction[t[0], t[1]])
predictions = list(map(enumerate_predictions, indices_to_predict))
write_ratings(predictions, submission_file)
def save_ensembling_predictions(reconstruction, name):
reconstruction_to_predictions(
reconstruction, ROOT_DIR + 'data/meta_training_' + name + '_stacking'
+ datetime.now().strftime('%Y-%b-%d-%H-%M-%S') + '.csv',
indices_to_predict=get_validation_indices(use_three_way=True))
reconstruction_to_predictions(
reconstruction, ROOT_DIR + 'data/meta_validation_' + name + '_stacking'
+ datetime.now().strftime('%Y-%b-%d-%H-%M-%S') + '.csv',
indices_to_predict=get_meta_validation_indices())
def clip(data):
data[data > 5] = 5
data[data < 1] = 1
return data
def ampute_reconstruction(reconstruction, data):
observed_indices = get_observed_indices(data)
for row_index, col_index in observed_indices:
reconstruction[row_index][col_index] = data[row_index][col_index]
def impute_by_avg(data, by_row):
data = data.T if by_row else data
for row in data:
empty = (row == 0)
row_sum = np.sum(row)
row[empty] = row_sum / np.count_nonzero(row)
return data.T if by_row else data
def impute_by_bias(data):
total_average = np.mean(data[np.nonzero(data)])
row_biases = np.zeros(data.shape[0])
col_biases = np.zeros(data.shape[1])
for row_index in range(data.shape[0]):
row_biases[row_index] = np.sum(data[row_index]) / \
np.count_nonzero(data[row_index]) - total_average
for col_index in range(data.shape[1]):
col_biases[col_index] = np.sum(data[:][col_index]) / \
np.count_nonzero(data[:][col_index]) - total_average
for row_index in range(data.shape[0]):
for col_index in range(data.shape[1]):
if data[row_index, col_index] == 0:
new_value = total_average + \
row_biases[row_index] + col_biases[col_index]
data[row_index, col_index] = new_value
return data
def impute_by_variance(data):
global_average = np.sum(data) / np.count_nonzero(data)
global_variance = np.var(data[data != 0])
adjusted_movie_means = np.zeros((data.shape[1],))
for i in range(data.shape[1]):
movie_ratings = data[:, i]
movie_ratings = movie_ratings[movie_ratings != 0]
movie_variance = np.var(movie_ratings)
relative_variance = movie_variance / global_variance
adjusted_movie_means[i] = (
global_average * relative_variance + np.sum(movie_ratings)) / (
relative_variance + np.count_nonzero(movie_ratings))
adjusted_user_deviation = np.zeros((data.shape[0],))
for i in range(data.shape[0]):
user_ratings = data[i]
user_deviations = adjusted_movie_means - user_ratings
user_deviations = user_deviations[user_ratings != 0]
user_deviation_variance = np.var(user_deviations)
relative_variance = user_deviation_variance / global_variance
adjusted_user_deviation[i] = (
global_average * relative_variance + sum(user_deviations)) / (
relative_variance + np.count_nonzero(user_deviations))
user_counts = np.count_nonzero(data, axis=1)
movie_counts = np.count_nonzero(data, axis=0)
movie_count_matrix = np.tile(movie_counts, (len(user_counts), 1))
user_count_matrix = np.tile(user_counts, (len(movie_counts), 1)).T
combined_matrix = copy.copy(
movie_count_matrix) + USER_COUNT_WEIGHT * copy.copy(user_count_matrix)
d_matrix = np.divide(movie_count_matrix, combined_matrix)
m_matrix = np.tile(
adjusted_movie_means, (len(adjusted_user_deviation), 1))
u_matrix = np.tile(
adjusted_user_deviation, (len(adjusted_movie_means), 1)).T
data = np.multiply(m_matrix, d_matrix) + \
np.multiply(u_matrix, np.ones(d_matrix.shape) - d_matrix)
return data
def compute_rmse(data, prediction, indices=None):
if indices is None:
indices = get_indices_from_file(VALIDATION_FILE_NAME)
squared_error = 0
for i, j in indices:
squared_error += (data[i][j] - prediction[i][j]) ** 2
return np.sqrt(squared_error / len(indices))
def knn_smoothing(reconstruction, user_embeddings):
normalized_user_embeddings = normalize(user_embeddings)
knn = NearestNeighbors(n_neighbors=N_NEIGHBORS + 1)
knn.fit(normalized_user_embeddings)
distances, neighbors = knn.kneighbors(normalized_user_embeddings)
distances = distances[:, 1:]
neighbors = neighbors[:, 1:]
ones = np.ones(distances.shape) | for i in range(reconstruction.shape[0]):
stacked_ratings = []
for neighbor in neighbors[i]:
stacked_ratings.append(reconstruction[neighbor])
stacked_ratings = np.asarray(stacked_ratings)
aggregated_neighbor_ratings[i] =\
np.matmul(weights[i], stacked_ratings) / sum(weights[i])
for i in range(reconstruction.shape[0]):
smoothed_data[i] = (1 - WEIGHT_KNN) * reconstruction[i] + WEIGHT_KNN *\
aggregated_neighbor_ratings[i]
smoothed_data = clip(smoothed_data)
return smoothed_data
def load_predictions_from_files(file_prefix='submission_'):
path = os.path.join(ROOT_DIR, ENSEMBLE_INPUT_DIR)
files = [os.path.join(path, i) for i in os.listdir(path) if \
os.path.isfile(os.path.join(path, i)) and file_prefix in i]
all_ratings = []
for file in files:
print("loading {}".format(file))
ratings = load_ratings(file)
ratings = ratings_to_matrix(ratings)
all_ratings.append(ratings)
return all_ratings
def compute_mean_predictions(all_ratings):
reconstruction = np.mean(np.array(all_ratings), axis=0)
reconstruction = impute_by_avg(reconstruction, by_row=False)
return reconstruction | similarities = ones - distances
weights = np.square(np.square(similarities))
smoothed_data = np.zeros(reconstruction.shape)
aggregated_neighbor_ratings = np.zeros(reconstruction.shape)
| random_line_split |
utils.py | import copy
import os
from datetime import datetime
import numpy as np
from sklearn.neighbors import NearestNeighbors
from sklearn.preprocessing import normalize
ROOT_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../')
DATA_FILE = os.path.join(ROOT_DIR, 'data/data_train.csv')
TRAINING_FILE_NAME = os.path.join(
ROOT_DIR, 'data/trainingIndices.csv')
VALIDATION_FILE_NAME = os.path.join(
ROOT_DIR, 'data/validationIndices.csv')
VALIDATION_MASK_FILE_NAME = os.path.join(
ROOT_DIR, 'data/train_valid_80_10_10/validationIndices_mask.csv')
AUX = os.path.join(
ROOT_DIR, 'data/train_valid_80_10_10/validationIndices_first.csv')
META_VALIDATION_FILE_NAME = os.path.join(
ROOT_DIR, 'data/train_valid_80_10_10/validationIndices_second.csv')
SAMPLE_SUBMISSION = os.path.join(ROOT_DIR, \
'data/sampleSubmission.csv')
ENSEMBLE_INPUT_DIR = 'data/stacking/good_data'
ITEM_COUNT = 1000
USER_COUNT = 10000
WEIGHT_KNN = 0.001
N_NEIGHBORS = 3
USER_COUNT_WEIGHT = 10
SAVE_META_PREDICTIONS = False
def load_ratings(data_file=DATA_FILE):
ratings = []
with open(data_file, 'r') as file:
# Read header.
_ = file.readline()
for line in file:
key, value_string = line.split(",")
rating = float(value_string)
row_string, col_string = key.split("_")
row = int(row_string[1:])
col = int(col_string[1:])
ratings.append((row - 1, col - 1, rating))
return ratings
def ratings_to_matrix(ratings):
matrix_rows = USER_COUNT
matrix_cols = ITEM_COUNT
matrix = np.zeros([matrix_rows, matrix_cols])
for row, col, rating in ratings:
matrix[row, col] = rating
return matrix
def mask_validation(data, use_three_way):
masked_data = np.copy(data)
if use_three_way:
mask_file = VALIDATION_MASK_FILE_NAME
else:
mask_file = VALIDATION_FILE_NAME
mask_indices = get_indices_from_file(mask_file)
for row_index, col_index in mask_indices:
masked_data[row_index][col_index] = 0
return masked_data
def get_validation_indices(use_three_way):
if use_three_way:
validation_indices = get_indices_from_file(AUX)
else:
validation_indices = get_indices_from_file(VALIDATION_FILE_NAME)
return validation_indices
def get_meta_validation_indices():
return get_indices_from_file(META_VALIDATION_FILE_NAME)
def get_observed_indices(data):
row_indices, col_indices = np.where(data != 0)
return list(zip(row_indices, col_indices))
def get_unobserved_indices(data):
row_indices, col_indices = np.where(data == 0)
return list(zip(row_indices, col_indices))
def get_indices_from_file(file_name):
indices = []
with open(file_name, 'r') as file:
# Read header.
_ = file.readline()
for line in file:
i, j = line.split(",")
indices.append((int(i), int(j)))
return indices
def get_indices_to_predict():
"""Get list of indices to predict from sample submission file.
Returns:
indices_to_predict: list of tuples with indices"""
indices_to_predict = []
with open(SAMPLE_SUBMISSION, 'r') as file:
_ = file.readline()
for line in file:
|
return indices_to_predict
def write_ratings(predictions, submission_file):
with open(submission_file, 'w') as file:
file.write('Id,Prediction\n')
for i, j, prediction in predictions:
file.write('r%d_c%d,%f\n' % (i, j, prediction))
def reconstruction_to_predictions(
reconstruction, submission_file, indices_to_predict=None):
if indices_to_predict is None:
indices_to_predict = get_indices_to_predict()
enumerate_predictions = lambda t: (
t[0] + 1, t[1] + 1, reconstruction[t[0], t[1]])
predictions = list(map(enumerate_predictions, indices_to_predict))
write_ratings(predictions, submission_file)
def save_ensembling_predictions(reconstruction, name):
reconstruction_to_predictions(
reconstruction, ROOT_DIR + 'data/meta_training_' + name + '_stacking'
+ datetime.now().strftime('%Y-%b-%d-%H-%M-%S') + '.csv',
indices_to_predict=get_validation_indices(use_three_way=True))
reconstruction_to_predictions(
reconstruction, ROOT_DIR + 'data/meta_validation_' + name + '_stacking'
+ datetime.now().strftime('%Y-%b-%d-%H-%M-%S') + '.csv',
indices_to_predict=get_meta_validation_indices())
def clip(data):
data[data > 5] = 5
data[data < 1] = 1
return data
def ampute_reconstruction(reconstruction, data):
observed_indices = get_observed_indices(data)
for row_index, col_index in observed_indices:
reconstruction[row_index][col_index] = data[row_index][col_index]
def impute_by_avg(data, by_row):
data = data.T if by_row else data
for row in data:
empty = (row == 0)
row_sum = np.sum(row)
row[empty] = row_sum / np.count_nonzero(row)
return data.T if by_row else data
def impute_by_bias(data):
total_average = np.mean(data[np.nonzero(data)])
row_biases = np.zeros(data.shape[0])
col_biases = np.zeros(data.shape[1])
for row_index in range(data.shape[0]):
row_biases[row_index] = np.sum(data[row_index]) / \
np.count_nonzero(data[row_index]) - total_average
for col_index in range(data.shape[1]):
col_biases[col_index] = np.sum(data[:][col_index]) / \
np.count_nonzero(data[:][col_index]) - total_average
for row_index in range(data.shape[0]):
for col_index in range(data.shape[1]):
if data[row_index, col_index] == 0:
new_value = total_average + \
row_biases[row_index] + col_biases[col_index]
data[row_index, col_index] = new_value
return data
def impute_by_variance(data):
global_average = np.sum(data) / np.count_nonzero(data)
global_variance = np.var(data[data != 0])
adjusted_movie_means = np.zeros((data.shape[1],))
for i in range(data.shape[1]):
movie_ratings = data[:, i]
movie_ratings = movie_ratings[movie_ratings != 0]
movie_variance = np.var(movie_ratings)
relative_variance = movie_variance / global_variance
adjusted_movie_means[i] = (
global_average * relative_variance + np.sum(movie_ratings)) / (
relative_variance + np.count_nonzero(movie_ratings))
adjusted_user_deviation = np.zeros((data.shape[0],))
for i in range(data.shape[0]):
user_ratings = data[i]
user_deviations = adjusted_movie_means - user_ratings
user_deviations = user_deviations[user_ratings != 0]
user_deviation_variance = np.var(user_deviations)
relative_variance = user_deviation_variance / global_variance
adjusted_user_deviation[i] = (
global_average * relative_variance + sum(user_deviations)) / (
relative_variance + np.count_nonzero(user_deviations))
user_counts = np.count_nonzero(data, axis=1)
movie_counts = np.count_nonzero(data, axis=0)
movie_count_matrix = np.tile(movie_counts, (len(user_counts), 1))
user_count_matrix = np.tile(user_counts, (len(movie_counts), 1)).T
combined_matrix = copy.copy(
movie_count_matrix) + USER_COUNT_WEIGHT * copy.copy(user_count_matrix)
d_matrix = np.divide(movie_count_matrix, combined_matrix)
m_matrix = np.tile(
adjusted_movie_means, (len(adjusted_user_deviation), 1))
u_matrix = np.tile(
adjusted_user_deviation, (len(adjusted_movie_means), 1)).T
data = np.multiply(m_matrix, d_matrix) + \
np.multiply(u_matrix, np.ones(d_matrix.shape) - d_matrix)
return data
def compute_rmse(data, prediction, indices=None):
if indices is None:
indices = get_indices_from_file(VALIDATION_FILE_NAME)
squared_error = 0
for i, j in indices:
squared_error += (data[i][j] - prediction[i][j]) ** 2
return np.sqrt(squared_error / len(indices))
def knn_smoothing(reconstruction, user_embeddings):
normalized_user_embeddings = normalize(user_embeddings)
knn = NearestNeighbors(n_neighbors=N_NEIGHBORS + 1)
knn.fit(normalized_user_embeddings)
distances, neighbors = knn.kneighbors(normalized_user_embeddings)
distances = distances[:, 1:]
neighbors = neighbors[:, 1:]
ones = np.ones(distances.shape)
similarities = ones - distances
weights = np.square(np.square(similarities))
smoothed_data = np.zeros(reconstruction.shape)
aggregated_neighbor_ratings = np.zeros(reconstruction.shape)
for i in range(reconstruction.shape[0]):
stacked_ratings = []
for neighbor in neighbors[i]:
stacked_ratings.append(reconstruction[neighbor])
stacked_ratings = np.asarray(stacked_ratings)
aggregated_neighbor_ratings[i] =\
np.matmul(weights[i], stacked_ratings) / sum(weights[i])
for i in range(reconstruction.shape[0]):
smoothed_data[i] = (1 - WEIGHT_KNN) * reconstruction[i] + WEIGHT_KNN *\
aggregated_neighbor_ratings[i]
smoothed_data = clip(smoothed_data)
return smoothed_data
def load_predictions_from_files(file_prefix='submission_'):
path = os.path.join(ROOT_DIR, ENSEMBLE_INPUT_DIR)
files = [os.path.join(path, i) for i in os.listdir(path) if \
os.path.isfile(os.path.join(path, i)) and file_prefix in i]
all_ratings = []
for file in files:
print("loading {}".format(file))
ratings = load_ratings(file)
ratings = ratings_to_matrix(ratings)
all_ratings.append(ratings)
return all_ratings
def compute_mean_predictions(all_ratings):
reconstruction = np.mean(np.array(all_ratings), axis=0)
reconstruction = impute_by_avg(reconstruction, by_row=False)
return reconstruction
| key, _ = line.split(",")
row_string, col_string = key.split("_")
i = int(row_string[1:]) - 1
j = int(col_string[1:]) - 1
indices_to_predict.append((i, j)) | conditional_block |
utils.py | import copy
import os
from datetime import datetime
import numpy as np
from sklearn.neighbors import NearestNeighbors
from sklearn.preprocessing import normalize
ROOT_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../')
DATA_FILE = os.path.join(ROOT_DIR, 'data/data_train.csv')
TRAINING_FILE_NAME = os.path.join(
ROOT_DIR, 'data/trainingIndices.csv')
VALIDATION_FILE_NAME = os.path.join(
ROOT_DIR, 'data/validationIndices.csv')
VALIDATION_MASK_FILE_NAME = os.path.join(
ROOT_DIR, 'data/train_valid_80_10_10/validationIndices_mask.csv')
AUX = os.path.join(
ROOT_DIR, 'data/train_valid_80_10_10/validationIndices_first.csv')
META_VALIDATION_FILE_NAME = os.path.join(
ROOT_DIR, 'data/train_valid_80_10_10/validationIndices_second.csv')
SAMPLE_SUBMISSION = os.path.join(ROOT_DIR, \
'data/sampleSubmission.csv')
ENSEMBLE_INPUT_DIR = 'data/stacking/good_data'
ITEM_COUNT = 1000
USER_COUNT = 10000
WEIGHT_KNN = 0.001
N_NEIGHBORS = 3
USER_COUNT_WEIGHT = 10
SAVE_META_PREDICTIONS = False
def load_ratings(data_file=DATA_FILE):
ratings = []
with open(data_file, 'r') as file:
# Read header.
_ = file.readline()
for line in file:
key, value_string = line.split(",")
rating = float(value_string)
row_string, col_string = key.split("_")
row = int(row_string[1:])
col = int(col_string[1:])
ratings.append((row - 1, col - 1, rating))
return ratings
def ratings_to_matrix(ratings):
matrix_rows = USER_COUNT
matrix_cols = ITEM_COUNT
matrix = np.zeros([matrix_rows, matrix_cols])
for row, col, rating in ratings:
matrix[row, col] = rating
return matrix
def mask_validation(data, use_three_way):
masked_data = np.copy(data)
if use_three_way:
mask_file = VALIDATION_MASK_FILE_NAME
else:
mask_file = VALIDATION_FILE_NAME
mask_indices = get_indices_from_file(mask_file)
for row_index, col_index in mask_indices:
masked_data[row_index][col_index] = 0
return masked_data
def get_validation_indices(use_three_way):
if use_three_way:
validation_indices = get_indices_from_file(AUX)
else:
validation_indices = get_indices_from_file(VALIDATION_FILE_NAME)
return validation_indices
def get_meta_validation_indices():
return get_indices_from_file(META_VALIDATION_FILE_NAME)
def get_observed_indices(data):
row_indices, col_indices = np.where(data != 0)
return list(zip(row_indices, col_indices))
def get_unobserved_indices(data):
row_indices, col_indices = np.where(data == 0)
return list(zip(row_indices, col_indices))
def get_indices_from_file(file_name):
indices = []
with open(file_name, 'r') as file:
# Read header.
_ = file.readline()
for line in file:
i, j = line.split(",")
indices.append((int(i), int(j)))
return indices
def get_indices_to_predict():
"""Get list of indices to predict from sample submission file.
Returns:
indices_to_predict: list of tuples with indices"""
indices_to_predict = []
with open(SAMPLE_SUBMISSION, 'r') as file:
_ = file.readline()
for line in file:
key, _ = line.split(",")
row_string, col_string = key.split("_")
i = int(row_string[1:]) - 1
j = int(col_string[1:]) - 1
indices_to_predict.append((i, j))
return indices_to_predict
def write_ratings(predictions, submission_file):
with open(submission_file, 'w') as file:
file.write('Id,Prediction\n')
for i, j, prediction in predictions:
file.write('r%d_c%d,%f\n' % (i, j, prediction))
def reconstruction_to_predictions(
reconstruction, submission_file, indices_to_predict=None):
if indices_to_predict is None:
indices_to_predict = get_indices_to_predict()
enumerate_predictions = lambda t: (
t[0] + 1, t[1] + 1, reconstruction[t[0], t[1]])
predictions = list(map(enumerate_predictions, indices_to_predict))
write_ratings(predictions, submission_file)
def save_ensembling_predictions(reconstruction, name):
|
def clip(data):
data[data > 5] = 5
data[data < 1] = 1
return data
def ampute_reconstruction(reconstruction, data):
observed_indices = get_observed_indices(data)
for row_index, col_index in observed_indices:
reconstruction[row_index][col_index] = data[row_index][col_index]
def impute_by_avg(data, by_row):
data = data.T if by_row else data
for row in data:
empty = (row == 0)
row_sum = np.sum(row)
row[empty] = row_sum / np.count_nonzero(row)
return data.T if by_row else data
def impute_by_bias(data):
total_average = np.mean(data[np.nonzero(data)])
row_biases = np.zeros(data.shape[0])
col_biases = np.zeros(data.shape[1])
for row_index in range(data.shape[0]):
row_biases[row_index] = np.sum(data[row_index]) / \
np.count_nonzero(data[row_index]) - total_average
for col_index in range(data.shape[1]):
col_biases[col_index] = np.sum(data[:][col_index]) / \
np.count_nonzero(data[:][col_index]) - total_average
for row_index in range(data.shape[0]):
for col_index in range(data.shape[1]):
if data[row_index, col_index] == 0:
new_value = total_average + \
row_biases[row_index] + col_biases[col_index]
data[row_index, col_index] = new_value
return data
def impute_by_variance(data):
global_average = np.sum(data) / np.count_nonzero(data)
global_variance = np.var(data[data != 0])
adjusted_movie_means = np.zeros((data.shape[1],))
for i in range(data.shape[1]):
movie_ratings = data[:, i]
movie_ratings = movie_ratings[movie_ratings != 0]
movie_variance = np.var(movie_ratings)
relative_variance = movie_variance / global_variance
adjusted_movie_means[i] = (
global_average * relative_variance + np.sum(movie_ratings)) / (
relative_variance + np.count_nonzero(movie_ratings))
adjusted_user_deviation = np.zeros((data.shape[0],))
for i in range(data.shape[0]):
user_ratings = data[i]
user_deviations = adjusted_movie_means - user_ratings
user_deviations = user_deviations[user_ratings != 0]
user_deviation_variance = np.var(user_deviations)
relative_variance = user_deviation_variance / global_variance
adjusted_user_deviation[i] = (
global_average * relative_variance + sum(user_deviations)) / (
relative_variance + np.count_nonzero(user_deviations))
user_counts = np.count_nonzero(data, axis=1)
movie_counts = np.count_nonzero(data, axis=0)
movie_count_matrix = np.tile(movie_counts, (len(user_counts), 1))
user_count_matrix = np.tile(user_counts, (len(movie_counts), 1)).T
combined_matrix = copy.copy(
movie_count_matrix) + USER_COUNT_WEIGHT * copy.copy(user_count_matrix)
d_matrix = np.divide(movie_count_matrix, combined_matrix)
m_matrix = np.tile(
adjusted_movie_means, (len(adjusted_user_deviation), 1))
u_matrix = np.tile(
adjusted_user_deviation, (len(adjusted_movie_means), 1)).T
data = np.multiply(m_matrix, d_matrix) + \
np.multiply(u_matrix, np.ones(d_matrix.shape) - d_matrix)
return data
def compute_rmse(data, prediction, indices=None):
if indices is None:
indices = get_indices_from_file(VALIDATION_FILE_NAME)
squared_error = 0
for i, j in indices:
squared_error += (data[i][j] - prediction[i][j]) ** 2
return np.sqrt(squared_error / len(indices))
def knn_smoothing(reconstruction, user_embeddings):
normalized_user_embeddings = normalize(user_embeddings)
knn = NearestNeighbors(n_neighbors=N_NEIGHBORS + 1)
knn.fit(normalized_user_embeddings)
distances, neighbors = knn.kneighbors(normalized_user_embeddings)
distances = distances[:, 1:]
neighbors = neighbors[:, 1:]
ones = np.ones(distances.shape)
similarities = ones - distances
weights = np.square(np.square(similarities))
smoothed_data = np.zeros(reconstruction.shape)
aggregated_neighbor_ratings = np.zeros(reconstruction.shape)
for i in range(reconstruction.shape[0]):
stacked_ratings = []
for neighbor in neighbors[i]:
stacked_ratings.append(reconstruction[neighbor])
stacked_ratings = np.asarray(stacked_ratings)
aggregated_neighbor_ratings[i] =\
np.matmul(weights[i], stacked_ratings) / sum(weights[i])
for i in range(reconstruction.shape[0]):
smoothed_data[i] = (1 - WEIGHT_KNN) * reconstruction[i] + WEIGHT_KNN *\
aggregated_neighbor_ratings[i]
smoothed_data = clip(smoothed_data)
return smoothed_data
def load_predictions_from_files(file_prefix='submission_'):
path = os.path.join(ROOT_DIR, ENSEMBLE_INPUT_DIR)
files = [os.path.join(path, i) for i in os.listdir(path) if \
os.path.isfile(os.path.join(path, i)) and file_prefix in i]
all_ratings = []
for file in files:
print("loading {}".format(file))
ratings = load_ratings(file)
ratings = ratings_to_matrix(ratings)
all_ratings.append(ratings)
return all_ratings
def compute_mean_predictions(all_ratings):
reconstruction = np.mean(np.array(all_ratings), axis=0)
reconstruction = impute_by_avg(reconstruction, by_row=False)
return reconstruction
| reconstruction_to_predictions(
reconstruction, ROOT_DIR + 'data/meta_training_' + name + '_stacking'
+ datetime.now().strftime('%Y-%b-%d-%H-%M-%S') + '.csv',
indices_to_predict=get_validation_indices(use_three_way=True))
reconstruction_to_predictions(
reconstruction, ROOT_DIR + 'data/meta_validation_' + name + '_stacking'
+ datetime.now().strftime('%Y-%b-%d-%H-%M-%S') + '.csv',
indices_to_predict=get_meta_validation_indices()) | identifier_body |
Q_Learner.py | #deep Q-learning
import time
import itertools
import numpy as np
from tqdm import tqdm
import tensorflow as tf
from utils import Utility
from AgentBrain import Brain
from environment import Environment
from memory import ExperienceMemory
from StateProcessor import StateProcessor
from prioritizedExperienceMemory import PEM
from settings import AgentSetting, ArchitectureSetting, PerSettings
class DQN(object):
def __init__(self,env_name, doubleQ = False, dueling = False, perMemory = False, training = True, watch = False ):
pass
with tf.variable_scope('AgentEnvSteps'):
self.agentSteps = tf.get_variable(name='agentSteps',initializer= 0, trainable=False,dtype= tf.int32)
self.agentStepsUpdater = self.agentSteps.assign_add(1)
# keep in order
self.util = Utility(env_name, doubleQ, dueling, perMemory, training)
self.env = Environment(env_name, self.util.monitorDir)
self.state_process = StateProcessor()
self.num_action = self.env.VALID_ACTIONS
self.deepNet = Brain(self.num_action, dueling, training)
self.net_feed = self.deepNet.nn_input
self.onlineNet = self.deepNet.Q_nn(forSess=True)
#self.eee = self.add
self.actions = np.arange(self.num_action)
self.no_op_max = AgentSetting.no_op_max
self.startTime = 0.0
self.duration = 0.0
self.totalReward = 0.0
self.countR = 0
self.training = training
self.doubleQ = doubleQ
self.dueling = dueling
self.perMemory = perMemory
self.rendering = watch
pass
print ("POSSIBLE ACTIONS :", self.actions)
if training:
self.updates = 0
self.totalLoss = 0.0
self.countL = 0
self.minibatch = AgentSetting.minibatch
self.replay_memorySize = AgentSetting.replay_memory
self.t_net_update_freq = AgentSetting.t_net_update_freq
self.discount_factor = AgentSetting.discount_factor
self.update_freq = AgentSetting.update_freq
self.momentum = AgentSetting.momentum
self.e_greedy_init = AgentSetting.e_greedy_init
self.e_greedy_final = AgentSetting.e_greedy_final
self.e_final_at = AgentSetting.e_final_at
#self.e_decay_rate = (self.e_greedy_init - self.e_greedy_final) / self.e_final_at
self.epsilon = tf.Variable(0.0, trainable = False, dtype = tf.float32, name = "epsilon")
self.epsilonHolder = tf.placeholder(dtype = tf.float32)
self.epsilonUpdater = self.epsilon.assign(self.epsilonHolder)
self.replay_strt_size = AgentSetting.replay_strt_size
self.global_step = tf.Variable(0, trainable=False,name='global_step')
self.training_hrs = tf.Variable(0.0, trainable=False,name='training_hrs')
self.training_episodes = tf.Variable(0,trainable = False , name = "training_episodes")
self.training_hrsHolder = tf.placeholder(dtype = tf.float32)
self.training_hrsUpdater = self.training_hrs.assign_add((self.training_hrsHolder / 60.0) / 60.0)
self.training_episodesUpdater = self.training_episodes.assign_add(1)
self.targetNet = self.deepNet.T_nn(forSess=True)
if doubleQ:
'''DoubleQ aims to reduce overestimations of Q-values by decoupling action selection
from action evaluation in target calculation'''
# if double
# 1- action selection using Q-net(online net)
self.selectedActionIndices = tf.argmax(self.onlineNet, axis=1)
self.selectedAction = tf.one_hot(indices=self.selectedActionIndices, depth=self.num_action,
axis=-1, dtype=tf.float32, on_value=1.0, off_value=0.0)
# 2- action evaluation using T-net (target net)
self.nxtState_qValueSelected = tf.reduce_sum(tf.multiply(self.targetNet, self.selectedAction),
axis=1) # element wise
else:
# else
# 1,2- make a one step look ahead and follow a greed policy
self.nxtState_qValueSelected = tf.reduce_max(self.targetNet, axis=1)
#3- td-target
self.td_targetHolder = tf.placeholder(shape=[self.minibatch], name='td-target', dtype=tf.float32)
#4- current state chosen action value
self.actionBatchHolder = tf.placeholder(dtype=tf.uint8)
self.chosenAction = tf.one_hot(indices=self.actionBatchHolder, depth=self.num_action, axis=-1,
dtype=tf.float32, on_value=1.0,
off_value=0.0)
self.curState_qValueSelected = tf.reduce_sum(tf.multiply(self.onlineNet, self.chosenAction),
axis=1) # elementwise
pass
self.delta = tf.subtract(self.td_targetHolder, self.curState_qValueSelected)
#set learning rate
self._setLearningRate()
pass
#TODO Dueling (rescale and clipping of gradients)
pass
if perMemory:
self.replay_memory = PEM(ArchitectureSetting.in_shape, self.replay_memorySize)
self.weightedISHolder = tf.placeholder(shape=[self.minibatch], name='weighted-IS', dtype=tf.float32)
self.weightedDelta = tf.multiply(self.delta, self.weightedISHolder)
self.clipped_loss = tf.where(tf.abs(self.weightedDelta) < 1.0,
0.5 * tf.square(self.weightedDelta),
tf.abs(self.weightedDelta) - 0.5, name='clipped_loss')
else: #not dueling or per
self.replay_memory = ExperienceMemory(ArchitectureSetting.in_shape, self.replay_memorySize)
self.clipped_loss = tf.where(tf.abs(self.delta) < 1.0,
0.5 * tf.square(self.delta),
tf.abs(self.delta) - 0.5, name='clipped_loss')
pass
self.loss = tf.reduce_mean(self.clipped_loss, name='loss')
#$self.loss = tf.reduce_mean(tf.squared_difference(self.td_targetHolder, self.curState_qValueSelected))
pass
self.optimizer = tf.train.RMSPropOptimizer(self.learning_rate, decay=0.9, momentum=self.momentum,
epsilon=1e-10)
self.train_step = self.optimizer.minimize(self.loss, global_step=self.global_step)
pass # https://www.tensorflow.org/api_docs/python/tf/train/RMSPropOptimizer
# self.optimizer = tf.train.RMSPropOptimizer(self.learning_rate, decay=0.9, momentum=self.momentum, epsilon=1e-10)
# self.train_step = self.optimizer.minimize(self.loss,global_step = self.global_step)
else:
self.epsilon = tf.constant(AgentSetting.epsilon_eval,dtype=tf.float32)
#finallizee
self.util.summANDsave(self.training)
'''sets the agent learning rate '''
def _setLearningRate(self):
if self.dueling: # regardless of anything else
self.learning_rate = AgentSetting.duel_learining_rate
elif self.perMemory and not self.dueling:
self.learning_rate = PerSettings.step_size
else:
self.learning_rate = AgentSetting.learning_rate
#fill memory
def | (self,sess,reloadM):
self.env.reset(sess)
if not reloadM:
print ('Initializing my experience memory...')
else:
print('Restoring my experience memory (naive solution!)...')
state = self.state_process.get_state(sess)
done = False
for v in tqdm(range(self.replay_strt_size)):
if not reloadM:
#select an action randomly
action = self.env.takeRandomAction()
else:
action = self.behaviour_e_policy(state,sess)
reward , done = self.env.step(action,sess)
nxt_state = self.state_process.get_state(sess)
experience = (state , action , reward, done , nxt_state)
self.replay_memory.add(experience)
if done:
self.env.reset(sess)
state = self.state_process.get_state(sess)
else:
state = nxt_state
pass
print ("Waiting for current episode to be terminated...")
while not done:
action = self.env.takeRandomAction()
reward , done = self.env.step(action,sess)
def _epsilonDecay(self,sess):
pass
eps = self.e_greedy_final + max(0,(self.e_greedy_init - self.e_greedy_final) * (self.e_final_at - self.agentSteps.eval()) / self.e_final_at)
sess.run(self.epsilonUpdater, feed_dict={self.epsilonHolder: eps})
#Return the chosen action!
def behaviour_e_policy(self,state,sess):
#decay eps and calc prob for actions
action_probs = (np.ones(self.num_action, dtype =float) * self.epsilon.eval() ) / self.num_action
q_val = sess.run(self.onlineNet, feed_dict = { self.net_feed : np.expand_dims(state,0)})
greedy_choice = np.argmax(q_val)
action_probs[greedy_choice] += 1.0 - self.epsilon.eval()
action = np.random.choice(self.actions, p=action_probs)
pass
#decay epsilon
#if self.training:
# self._epsilonDecay(sess)
return action
#Playing
def playing(self,sess):
self.totalReward = 0.0
self.countR = 0
self.startTime = time.time()
self.env.reset(sess)
state = self.state_process.get_state(sess)
for t in itertools.count():
action = self.behaviour_e_policy(state,sess)
reward , done = self.env.step(action,sess)
self.totalReward += reward
self.countR += 1
nxt_state = self.state_process.get_state(sess)
print("playing well as much as you trained me :)")
if done:
self.duration = round(time.time() - self.startTime, 3)
self.summaries(sess)
break #end of episode
else:
state = nxt_state
pass
if (self.rendering):
self.env.render()
def learning(self,sess):
#loop for one episode
#reset vars
self.totalLoss =0.0
self.countL = 0
self.totalReward = 0.0
self.countR = 0
self.updates = 0
self.startTime = time.time()
self.env.reset(sess)
state = self.state_process.get_state(sess)
no_op = 0
for _ in itertools.count():
#take action
action = self.behaviour_e_policy(state,sess)
#step and observe
reward , done = self.env.step(action,sess)
#inc agent steps
sess.run(self.agentStepsUpdater)
#decay epsilon after every step
self._epsilonDecay(sess)
pass
if(action == 0):
no_op += 1
pass #can't force episode to end
#if(no_op == self.no_op_max): #end this boring episode
# done = True
self.totalReward += reward
self.countR += 1
nxt_state = self.state_process.get_state(sess)
experience = (state , action , reward, done , nxt_state)
self.replay_memory.add(experience)
if( self.agentSteps.eval() % self.update_freq == 0):
#sample a minibatch
state_batch, action_batch, reward_batch, done_batch, nxt_state_batch = self.replay_memory.sample(self.minibatch)
nxtStateFeedDict = {self.net_feed : nxt_state_batch}
nxtQVal = sess.run(self.nxtState_qValueSelected, feed_dict = nxtStateFeedDict)
#compute td-target
td_target = reward_batch + np.invert(done_batch).astype(np.float32) * self.discount_factor * nxtQVal
curStateFeedDict = {self.net_feed: state_batch, self.actionBatchHolder : action_batch, self.td_targetHolder : td_target }
if self.perMemory:
# update priorities with new td_errors(deltas)
self.replay_memory.update(sess.run(self.delta, feed_dict =curStateFeedDict ))
#add to feedDict ISW
curStateFeedDict.update({self.weightedISHolder : self.replay_memory.getISW()})
# anneal beta
self.replay_memory.betaAnneal(sess)
pass
#run...run...run
loss, _ = sess.run([self.loss,self.train_step],feed_dict = curStateFeedDict)
#print ("loss %.5f at step %d" %(loss, self.global_step.eval()))
#stats
self.totalLoss += loss
self.countL +=1
self.updates +=1 #num of updates made per episode
pass #TRY self.global_step.eval()
if ( self.agentSteps.eval() % self.t_net_update_freq == 0 ):
sess.run(self.deepNet.updateTparas(True))
print("Target net parameters updated!")
pass
if done:
self.duration = round(time.time() - self.startTime, 3) #secs
sess.run([self.training_hrsUpdater, self.training_episodesUpdater], feed_dict = { self.training_hrsHolder : self.duration})
#update tf board every episode
self.summaries(sess)
break #end of episode
else:
state = nxt_state
pass
if(self.rendering):
self.env.render()
pass #TO DO -> sample of Q-action values summaries
def summaries(self,sess):
#print "in summaries!"
#basics
listy = {'totReward' : self.totalReward, 'avgReward' : (self.totalReward / self.countR) , 'epDur' : self.duration }
if self.training:
listy.update({"totLoss" : self.totalLoss , "avgLoss" : (self.totalLoss/self.countL), 'epUpdates' : self.updates })
self.util.summary_board(sess,self.agentSteps.eval(), listy, self.training)
| fill_memory | identifier_name |
Q_Learner.py | #deep Q-learning
import time
import itertools
import numpy as np
from tqdm import tqdm
import tensorflow as tf
from utils import Utility
from AgentBrain import Brain
from environment import Environment
from memory import ExperienceMemory
from StateProcessor import StateProcessor
from prioritizedExperienceMemory import PEM
from settings import AgentSetting, ArchitectureSetting, PerSettings
class DQN(object):
def __init__(self,env_name, doubleQ = False, dueling = False, perMemory = False, training = True, watch = False ):
pass
with tf.variable_scope('AgentEnvSteps'):
self.agentSteps = tf.get_variable(name='agentSteps',initializer= 0, trainable=False,dtype= tf.int32)
self.agentStepsUpdater = self.agentSteps.assign_add(1)
# keep in order
self.util = Utility(env_name, doubleQ, dueling, perMemory, training)
self.env = Environment(env_name, self.util.monitorDir)
self.state_process = StateProcessor()
self.num_action = self.env.VALID_ACTIONS
self.deepNet = Brain(self.num_action, dueling, training)
self.net_feed = self.deepNet.nn_input
self.onlineNet = self.deepNet.Q_nn(forSess=True)
#self.eee = self.add
self.actions = np.arange(self.num_action)
self.no_op_max = AgentSetting.no_op_max
self.startTime = 0.0
self.duration = 0.0
self.totalReward = 0.0
self.countR = 0
self.training = training
self.doubleQ = doubleQ
self.dueling = dueling
self.perMemory = perMemory
self.rendering = watch
pass
print ("POSSIBLE ACTIONS :", self.actions)
if training:
self.updates = 0
self.totalLoss = 0.0
self.countL = 0
self.minibatch = AgentSetting.minibatch
self.replay_memorySize = AgentSetting.replay_memory
self.t_net_update_freq = AgentSetting.t_net_update_freq
self.discount_factor = AgentSetting.discount_factor
self.update_freq = AgentSetting.update_freq
self.momentum = AgentSetting.momentum
self.e_greedy_init = AgentSetting.e_greedy_init
self.e_greedy_final = AgentSetting.e_greedy_final
self.e_final_at = AgentSetting.e_final_at
#self.e_decay_rate = (self.e_greedy_init - self.e_greedy_final) / self.e_final_at
self.epsilon = tf.Variable(0.0, trainable = False, dtype = tf.float32, name = "epsilon")
self.epsilonHolder = tf.placeholder(dtype = tf.float32)
self.epsilonUpdater = self.epsilon.assign(self.epsilonHolder)
self.replay_strt_size = AgentSetting.replay_strt_size
self.global_step = tf.Variable(0, trainable=False,name='global_step')
self.training_hrs = tf.Variable(0.0, trainable=False,name='training_hrs')
self.training_episodes = tf.Variable(0,trainable = False , name = "training_episodes")
self.training_hrsHolder = tf.placeholder(dtype = tf.float32)
self.training_hrsUpdater = self.training_hrs.assign_add((self.training_hrsHolder / 60.0) / 60.0)
self.training_episodesUpdater = self.training_episodes.assign_add(1)
self.targetNet = self.deepNet.T_nn(forSess=True)
if doubleQ:
'''DoubleQ aims to reduce overestimations of Q-values by decoupling action selection
from action evaluation in target calculation'''
# if double
# 1- action selection using Q-net(online net)
self.selectedActionIndices = tf.argmax(self.onlineNet, axis=1)
self.selectedAction = tf.one_hot(indices=self.selectedActionIndices, depth=self.num_action,
axis=-1, dtype=tf.float32, on_value=1.0, off_value=0.0)
# 2- action evaluation using T-net (target net)
self.nxtState_qValueSelected = tf.reduce_sum(tf.multiply(self.targetNet, self.selectedAction),
axis=1) # element wise
else:
# else
# 1,2- make a one step look ahead and follow a greed policy
self.nxtState_qValueSelected = tf.reduce_max(self.targetNet, axis=1)
#3- td-target
self.td_targetHolder = tf.placeholder(shape=[self.minibatch], name='td-target', dtype=tf.float32)
#4- current state chosen action value
self.actionBatchHolder = tf.placeholder(dtype=tf.uint8)
self.chosenAction = tf.one_hot(indices=self.actionBatchHolder, depth=self.num_action, axis=-1,
dtype=tf.float32, on_value=1.0,
off_value=0.0)
self.curState_qValueSelected = tf.reduce_sum(tf.multiply(self.onlineNet, self.chosenAction),
axis=1) # elementwise
pass
self.delta = tf.subtract(self.td_targetHolder, self.curState_qValueSelected)
#set learning rate
self._setLearningRate()
pass
#TODO Dueling (rescale and clipping of gradients)
pass
if perMemory:
self.replay_memory = PEM(ArchitectureSetting.in_shape, self.replay_memorySize)
self.weightedISHolder = tf.placeholder(shape=[self.minibatch], name='weighted-IS', dtype=tf.float32)
self.weightedDelta = tf.multiply(self.delta, self.weightedISHolder)
self.clipped_loss = tf.where(tf.abs(self.weightedDelta) < 1.0,
0.5 * tf.square(self.weightedDelta),
tf.abs(self.weightedDelta) - 0.5, name='clipped_loss')
else: #not dueling or per
self.replay_memory = ExperienceMemory(ArchitectureSetting.in_shape, self.replay_memorySize)
self.clipped_loss = tf.where(tf.abs(self.delta) < 1.0,
0.5 * tf.square(self.delta),
tf.abs(self.delta) - 0.5, name='clipped_loss')
pass
self.loss = tf.reduce_mean(self.clipped_loss, name='loss')
#$self.loss = tf.reduce_mean(tf.squared_difference(self.td_targetHolder, self.curState_qValueSelected))
pass
self.optimizer = tf.train.RMSPropOptimizer(self.learning_rate, decay=0.9, momentum=self.momentum,
epsilon=1e-10)
self.train_step = self.optimizer.minimize(self.loss, global_step=self.global_step)
pass # https://www.tensorflow.org/api_docs/python/tf/train/RMSPropOptimizer
# self.optimizer = tf.train.RMSPropOptimizer(self.learning_rate, decay=0.9, momentum=self.momentum, epsilon=1e-10)
# self.train_step = self.optimizer.minimize(self.loss,global_step = self.global_step)
else:
self.epsilon = tf.constant(AgentSetting.epsilon_eval,dtype=tf.float32)
#finallizee
self.util.summANDsave(self.training)
'''sets the agent learning rate '''
def _setLearningRate(self):
if self.dueling: # regardless of anything else
self.learning_rate = AgentSetting.duel_learining_rate
elif self.perMemory and not self.dueling:
self.learning_rate = PerSettings.step_size
else:
self.learning_rate = AgentSetting.learning_rate
#fill memory
def fill_memory(self,sess,reloadM):
self.env.reset(sess)
if not reloadM:
print ('Initializing my experience memory...')
else:
print('Restoring my experience memory (naive solution!)...')
state = self.state_process.get_state(sess)
done = False
for v in tqdm(range(self.replay_strt_size)):
if not reloadM:
#select an action randomly
action = self.env.takeRandomAction()
else:
action = self.behaviour_e_policy(state,sess)
reward , done = self.env.step(action,sess)
nxt_state = self.state_process.get_state(sess)
experience = (state , action , reward, done , nxt_state)
self.replay_memory.add(experience)
if done:
self.env.reset(sess)
state = self.state_process.get_state(sess)
else:
state = nxt_state
pass
print ("Waiting for current episode to be terminated...")
while not done:
action = self.env.takeRandomAction()
reward , done = self.env.step(action,sess)
def _epsilonDecay(self,sess):
pass
eps = self.e_greedy_final + max(0,(self.e_greedy_init - self.e_greedy_final) * (self.e_final_at - self.agentSteps.eval()) / self.e_final_at)
sess.run(self.epsilonUpdater, feed_dict={self.epsilonHolder: eps})
#Return the chosen action!
def behaviour_e_policy(self,state,sess):
#decay eps and calc prob for actions
action_probs = (np.ones(self.num_action, dtype =float) * self.epsilon.eval() ) / self.num_action
q_val = sess.run(self.onlineNet, feed_dict = { self.net_feed : np.expand_dims(state,0)})
greedy_choice = np.argmax(q_val)
action_probs[greedy_choice] += 1.0 - self.epsilon.eval()
action = np.random.choice(self.actions, p=action_probs)
pass
#decay epsilon
#if self.training:
# self._epsilonDecay(sess)
return action
#Playing
def playing(self,sess):
self.totalReward = 0.0
self.countR = 0
self.startTime = time.time()
self.env.reset(sess)
state = self.state_process.get_state(sess)
for t in itertools.count():
action = self.behaviour_e_policy(state,sess)
reward , done = self.env.step(action,sess)
self.totalReward += reward
self.countR += 1
nxt_state = self.state_process.get_state(sess)
print("playing well as much as you trained me :)")
if done:
self.duration = round(time.time() - self.startTime, 3)
self.summaries(sess)
break #end of episode
else:
state = nxt_state
pass
if (self.rendering):
self.env.render()
def learning(self,sess):
#loop for one episode
#reset vars
self.totalLoss =0.0
self.countL = 0
self.totalReward = 0.0
self.countR = 0
self.updates = 0
self.startTime = time.time()
self.env.reset(sess)
state = self.state_process.get_state(sess)
no_op = 0
for _ in itertools.count():
#take action
action = self.behaviour_e_policy(state,sess)
#step and observe
reward , done = self.env.step(action,sess)
#inc agent steps
sess.run(self.agentStepsUpdater)
#decay epsilon after every step
self._epsilonDecay(sess)
pass
if(action == 0):
no_op += 1
pass #can't force episode to end
#if(no_op == self.no_op_max): #end this boring episode
# done = True
self.totalReward += reward
self.countR += 1
nxt_state = self.state_process.get_state(sess)
experience = (state , action , reward, done , nxt_state)
self.replay_memory.add(experience)
if( self.agentSteps.eval() % self.update_freq == 0):
#sample a minibatch
state_batch, action_batch, reward_batch, done_batch, nxt_state_batch = self.replay_memory.sample(self.minibatch)
nxtStateFeedDict = {self.net_feed : nxt_state_batch}
nxtQVal = sess.run(self.nxtState_qValueSelected, feed_dict = nxtStateFeedDict)
#compute td-target
td_target = reward_batch + np.invert(done_batch).astype(np.float32) * self.discount_factor * nxtQVal
curStateFeedDict = {self.net_feed: state_batch, self.actionBatchHolder : action_batch, self.td_targetHolder : td_target }
if self.perMemory:
# update priorities with new td_errors(deltas)
self.replay_memory.update(sess.run(self.delta, feed_dict =curStateFeedDict ))
#add to feedDict ISW
curStateFeedDict.update({self.weightedISHolder : self.replay_memory.getISW()})
# anneal beta
self.replay_memory.betaAnneal(sess)
pass
#run...run...run
loss, _ = sess.run([self.loss,self.train_step],feed_dict = curStateFeedDict)
#print ("loss %.5f at step %d" %(loss, self.global_step.eval()))
#stats
self.totalLoss += loss
self.countL +=1
self.updates +=1 #num of updates made per episode
pass #TRY self.global_step.eval()
if ( self.agentSteps.eval() % self.t_net_update_freq == 0 ):
sess.run(self.deepNet.updateTparas(True))
print("Target net parameters updated!")
pass
if done:
self.duration = round(time.time() - self.startTime, 3) #secs
sess.run([self.training_hrsUpdater, self.training_episodesUpdater], feed_dict = { self.training_hrsHolder : self.duration})
#update tf board every episode | else:
state = nxt_state
pass
if(self.rendering):
self.env.render()
pass #TO DO -> sample of Q-action values summaries
def summaries(self,sess):
#print "in summaries!"
#basics
listy = {'totReward' : self.totalReward, 'avgReward' : (self.totalReward / self.countR) , 'epDur' : self.duration }
if self.training:
listy.update({"totLoss" : self.totalLoss , "avgLoss" : (self.totalLoss/self.countL), 'epUpdates' : self.updates })
self.util.summary_board(sess,self.agentSteps.eval(), listy, self.training) | self.summaries(sess)
break #end of episode | random_line_split |
Q_Learner.py | #deep Q-learning
import time
import itertools
import numpy as np
from tqdm import tqdm
import tensorflow as tf
from utils import Utility
from AgentBrain import Brain
from environment import Environment
from memory import ExperienceMemory
from StateProcessor import StateProcessor
from prioritizedExperienceMemory import PEM
from settings import AgentSetting, ArchitectureSetting, PerSettings
class DQN(object):
def __init__(self,env_name, doubleQ = False, dueling = False, perMemory = False, training = True, watch = False ):
pass
with tf.variable_scope('AgentEnvSteps'):
self.agentSteps = tf.get_variable(name='agentSteps',initializer= 0, trainable=False,dtype= tf.int32)
self.agentStepsUpdater = self.agentSteps.assign_add(1)
# keep in order
self.util = Utility(env_name, doubleQ, dueling, perMemory, training)
self.env = Environment(env_name, self.util.monitorDir)
self.state_process = StateProcessor()
self.num_action = self.env.VALID_ACTIONS
self.deepNet = Brain(self.num_action, dueling, training)
self.net_feed = self.deepNet.nn_input
self.onlineNet = self.deepNet.Q_nn(forSess=True)
#self.eee = self.add
self.actions = np.arange(self.num_action)
self.no_op_max = AgentSetting.no_op_max
self.startTime = 0.0
self.duration = 0.0
self.totalReward = 0.0
self.countR = 0
self.training = training
self.doubleQ = doubleQ
self.dueling = dueling
self.perMemory = perMemory
self.rendering = watch
pass
print ("POSSIBLE ACTIONS :", self.actions)
if training:
self.updates = 0
self.totalLoss = 0.0
self.countL = 0
self.minibatch = AgentSetting.minibatch
self.replay_memorySize = AgentSetting.replay_memory
self.t_net_update_freq = AgentSetting.t_net_update_freq
self.discount_factor = AgentSetting.discount_factor
self.update_freq = AgentSetting.update_freq
self.momentum = AgentSetting.momentum
self.e_greedy_init = AgentSetting.e_greedy_init
self.e_greedy_final = AgentSetting.e_greedy_final
self.e_final_at = AgentSetting.e_final_at
#self.e_decay_rate = (self.e_greedy_init - self.e_greedy_final) / self.e_final_at
self.epsilon = tf.Variable(0.0, trainable = False, dtype = tf.float32, name = "epsilon")
self.epsilonHolder = tf.placeholder(dtype = tf.float32)
self.epsilonUpdater = self.epsilon.assign(self.epsilonHolder)
self.replay_strt_size = AgentSetting.replay_strt_size
self.global_step = tf.Variable(0, trainable=False,name='global_step')
self.training_hrs = tf.Variable(0.0, trainable=False,name='training_hrs')
self.training_episodes = tf.Variable(0,trainable = False , name = "training_episodes")
self.training_hrsHolder = tf.placeholder(dtype = tf.float32)
self.training_hrsUpdater = self.training_hrs.assign_add((self.training_hrsHolder / 60.0) / 60.0)
self.training_episodesUpdater = self.training_episodes.assign_add(1)
self.targetNet = self.deepNet.T_nn(forSess=True)
if doubleQ:
'''DoubleQ aims to reduce overestimations of Q-values by decoupling action selection
from action evaluation in target calculation'''
# if double
# 1- action selection using Q-net(online net)
self.selectedActionIndices = tf.argmax(self.onlineNet, axis=1)
self.selectedAction = tf.one_hot(indices=self.selectedActionIndices, depth=self.num_action,
axis=-1, dtype=tf.float32, on_value=1.0, off_value=0.0)
# 2- action evaluation using T-net (target net)
self.nxtState_qValueSelected = tf.reduce_sum(tf.multiply(self.targetNet, self.selectedAction),
axis=1) # element wise
else:
# else
# 1,2- make a one step look ahead and follow a greed policy
self.nxtState_qValueSelected = tf.reduce_max(self.targetNet, axis=1)
#3- td-target
self.td_targetHolder = tf.placeholder(shape=[self.minibatch], name='td-target', dtype=tf.float32)
#4- current state chosen action value
self.actionBatchHolder = tf.placeholder(dtype=tf.uint8)
self.chosenAction = tf.one_hot(indices=self.actionBatchHolder, depth=self.num_action, axis=-1,
dtype=tf.float32, on_value=1.0,
off_value=0.0)
self.curState_qValueSelected = tf.reduce_sum(tf.multiply(self.onlineNet, self.chosenAction),
axis=1) # elementwise
pass
self.delta = tf.subtract(self.td_targetHolder, self.curState_qValueSelected)
#set learning rate
self._setLearningRate()
pass
#TODO Dueling (rescale and clipping of gradients)
pass
if perMemory:
self.replay_memory = PEM(ArchitectureSetting.in_shape, self.replay_memorySize)
self.weightedISHolder = tf.placeholder(shape=[self.minibatch], name='weighted-IS', dtype=tf.float32)
self.weightedDelta = tf.multiply(self.delta, self.weightedISHolder)
self.clipped_loss = tf.where(tf.abs(self.weightedDelta) < 1.0,
0.5 * tf.square(self.weightedDelta),
tf.abs(self.weightedDelta) - 0.5, name='clipped_loss')
else: #not dueling or per
self.replay_memory = ExperienceMemory(ArchitectureSetting.in_shape, self.replay_memorySize)
self.clipped_loss = tf.where(tf.abs(self.delta) < 1.0,
0.5 * tf.square(self.delta),
tf.abs(self.delta) - 0.5, name='clipped_loss')
pass
self.loss = tf.reduce_mean(self.clipped_loss, name='loss')
#$self.loss = tf.reduce_mean(tf.squared_difference(self.td_targetHolder, self.curState_qValueSelected))
pass
self.optimizer = tf.train.RMSPropOptimizer(self.learning_rate, decay=0.9, momentum=self.momentum,
epsilon=1e-10)
self.train_step = self.optimizer.minimize(self.loss, global_step=self.global_step)
pass # https://www.tensorflow.org/api_docs/python/tf/train/RMSPropOptimizer
# self.optimizer = tf.train.RMSPropOptimizer(self.learning_rate, decay=0.9, momentum=self.momentum, epsilon=1e-10)
# self.train_step = self.optimizer.minimize(self.loss,global_step = self.global_step)
else:
self.epsilon = tf.constant(AgentSetting.epsilon_eval,dtype=tf.float32)
#finallizee
self.util.summANDsave(self.training)
'''sets the agent learning rate '''
def _setLearningRate(self):
if self.dueling: # regardless of anything else
self.learning_rate = AgentSetting.duel_learining_rate
elif self.perMemory and not self.dueling:
self.learning_rate = PerSettings.step_size
else:
self.learning_rate = AgentSetting.learning_rate
#fill memory
def fill_memory(self,sess,reloadM):
self.env.reset(sess)
if not reloadM:
print ('Initializing my experience memory...')
else:
print('Restoring my experience memory (naive solution!)...')
state = self.state_process.get_state(sess)
done = False
for v in tqdm(range(self.replay_strt_size)):
if not reloadM:
#select an action randomly
action = self.env.takeRandomAction()
else:
action = self.behaviour_e_policy(state,sess)
reward , done = self.env.step(action,sess)
nxt_state = self.state_process.get_state(sess)
experience = (state , action , reward, done , nxt_state)
self.replay_memory.add(experience)
if done:
self.env.reset(sess)
state = self.state_process.get_state(sess)
else:
state = nxt_state
pass
print ("Waiting for current episode to be terminated...")
while not done:
action = self.env.takeRandomAction()
reward , done = self.env.step(action,sess)
def _epsilonDecay(self,sess):
pass
eps = self.e_greedy_final + max(0,(self.e_greedy_init - self.e_greedy_final) * (self.e_final_at - self.agentSteps.eval()) / self.e_final_at)
sess.run(self.epsilonUpdater, feed_dict={self.epsilonHolder: eps})
#Return the chosen action!
def behaviour_e_policy(self,state,sess):
#decay eps and calc prob for actions
action_probs = (np.ones(self.num_action, dtype =float) * self.epsilon.eval() ) / self.num_action
q_val = sess.run(self.onlineNet, feed_dict = { self.net_feed : np.expand_dims(state,0)})
greedy_choice = np.argmax(q_val)
action_probs[greedy_choice] += 1.0 - self.epsilon.eval()
action = np.random.choice(self.actions, p=action_probs)
pass
#decay epsilon
#if self.training:
# self._epsilonDecay(sess)
return action
#Playing
def playing(self,sess):
self.totalReward = 0.0
self.countR = 0
self.startTime = time.time()
self.env.reset(sess)
state = self.state_process.get_state(sess)
for t in itertools.count():
action = self.behaviour_e_policy(state,sess)
reward , done = self.env.step(action,sess)
self.totalReward += reward
self.countR += 1
nxt_state = self.state_process.get_state(sess)
print("playing well as much as you trained me :)")
if done:
self.duration = round(time.time() - self.startTime, 3)
self.summaries(sess)
break #end of episode
else:
state = nxt_state
pass
if (self.rendering):
self.env.render()
def learning(self,sess):
#loop for one episode
#reset vars
|
pass #TO DO -> sample of Q-action values summaries
def summaries(self,sess):
#print "in summaries!"
#basics
listy = {'totReward' : self.totalReward, 'avgReward' : (self.totalReward / self.countR) , 'epDur' : self.duration }
if self.training:
listy.update({"totLoss" : self.totalLoss , "avgLoss" : (self.totalLoss/self.countL), 'epUpdates' : self.updates })
self.util.summary_board(sess,self.agentSteps.eval(), listy, self.training)
| self.totalLoss =0.0
self.countL = 0
self.totalReward = 0.0
self.countR = 0
self.updates = 0
self.startTime = time.time()
self.env.reset(sess)
state = self.state_process.get_state(sess)
no_op = 0
for _ in itertools.count():
#take action
action = self.behaviour_e_policy(state,sess)
#step and observe
reward , done = self.env.step(action,sess)
#inc agent steps
sess.run(self.agentStepsUpdater)
#decay epsilon after every step
self._epsilonDecay(sess)
pass
if(action == 0):
no_op += 1
pass #can't force episode to end
#if(no_op == self.no_op_max): #end this boring episode
# done = True
self.totalReward += reward
self.countR += 1
nxt_state = self.state_process.get_state(sess)
experience = (state , action , reward, done , nxt_state)
self.replay_memory.add(experience)
if( self.agentSteps.eval() % self.update_freq == 0):
#sample a minibatch
state_batch, action_batch, reward_batch, done_batch, nxt_state_batch = self.replay_memory.sample(self.minibatch)
nxtStateFeedDict = {self.net_feed : nxt_state_batch}
nxtQVal = sess.run(self.nxtState_qValueSelected, feed_dict = nxtStateFeedDict)
#compute td-target
td_target = reward_batch + np.invert(done_batch).astype(np.float32) * self.discount_factor * nxtQVal
curStateFeedDict = {self.net_feed: state_batch, self.actionBatchHolder : action_batch, self.td_targetHolder : td_target }
if self.perMemory:
# update priorities with new td_errors(deltas)
self.replay_memory.update(sess.run(self.delta, feed_dict =curStateFeedDict ))
#add to feedDict ISW
curStateFeedDict.update({self.weightedISHolder : self.replay_memory.getISW()})
# anneal beta
self.replay_memory.betaAnneal(sess)
pass
#run...run...run
loss, _ = sess.run([self.loss,self.train_step],feed_dict = curStateFeedDict)
#print ("loss %.5f at step %d" %(loss, self.global_step.eval()))
#stats
self.totalLoss += loss
self.countL +=1
self.updates +=1 #num of updates made per episode
pass #TRY self.global_step.eval()
if ( self.agentSteps.eval() % self.t_net_update_freq == 0 ):
sess.run(self.deepNet.updateTparas(True))
print("Target net parameters updated!")
pass
if done:
self.duration = round(time.time() - self.startTime, 3) #secs
sess.run([self.training_hrsUpdater, self.training_episodesUpdater], feed_dict = { self.training_hrsHolder : self.duration})
#update tf board every episode
self.summaries(sess)
break #end of episode
else:
state = nxt_state
pass
if(self.rendering):
self.env.render() | identifier_body |
Q_Learner.py | #deep Q-learning
import time
import itertools
import numpy as np
from tqdm import tqdm
import tensorflow as tf
from utils import Utility
from AgentBrain import Brain
from environment import Environment
from memory import ExperienceMemory
from StateProcessor import StateProcessor
from prioritizedExperienceMemory import PEM
from settings import AgentSetting, ArchitectureSetting, PerSettings
class DQN(object):
def __init__(self,env_name, doubleQ = False, dueling = False, perMemory = False, training = True, watch = False ):
pass
with tf.variable_scope('AgentEnvSteps'):
self.agentSteps = tf.get_variable(name='agentSteps',initializer= 0, trainable=False,dtype= tf.int32)
self.agentStepsUpdater = self.agentSteps.assign_add(1)
# keep in order
self.util = Utility(env_name, doubleQ, dueling, perMemory, training)
self.env = Environment(env_name, self.util.monitorDir)
self.state_process = StateProcessor()
self.num_action = self.env.VALID_ACTIONS
self.deepNet = Brain(self.num_action, dueling, training)
self.net_feed = self.deepNet.nn_input
self.onlineNet = self.deepNet.Q_nn(forSess=True)
#self.eee = self.add
self.actions = np.arange(self.num_action)
self.no_op_max = AgentSetting.no_op_max
self.startTime = 0.0
self.duration = 0.0
self.totalReward = 0.0
self.countR = 0
self.training = training
self.doubleQ = doubleQ
self.dueling = dueling
self.perMemory = perMemory
self.rendering = watch
pass
print ("POSSIBLE ACTIONS :", self.actions)
if training:
self.updates = 0
self.totalLoss = 0.0
self.countL = 0
self.minibatch = AgentSetting.minibatch
self.replay_memorySize = AgentSetting.replay_memory
self.t_net_update_freq = AgentSetting.t_net_update_freq
self.discount_factor = AgentSetting.discount_factor
self.update_freq = AgentSetting.update_freq
self.momentum = AgentSetting.momentum
self.e_greedy_init = AgentSetting.e_greedy_init
self.e_greedy_final = AgentSetting.e_greedy_final
self.e_final_at = AgentSetting.e_final_at
#self.e_decay_rate = (self.e_greedy_init - self.e_greedy_final) / self.e_final_at
self.epsilon = tf.Variable(0.0, trainable = False, dtype = tf.float32, name = "epsilon")
self.epsilonHolder = tf.placeholder(dtype = tf.float32)
self.epsilonUpdater = self.epsilon.assign(self.epsilonHolder)
self.replay_strt_size = AgentSetting.replay_strt_size
self.global_step = tf.Variable(0, trainable=False,name='global_step')
self.training_hrs = tf.Variable(0.0, trainable=False,name='training_hrs')
self.training_episodes = tf.Variable(0,trainable = False , name = "training_episodes")
self.training_hrsHolder = tf.placeholder(dtype = tf.float32)
self.training_hrsUpdater = self.training_hrs.assign_add((self.training_hrsHolder / 60.0) / 60.0)
self.training_episodesUpdater = self.training_episodes.assign_add(1)
self.targetNet = self.deepNet.T_nn(forSess=True)
if doubleQ:
'''DoubleQ aims to reduce overestimations of Q-values by decoupling action selection
from action evaluation in target calculation'''
# if double
# 1- action selection using Q-net(online net)
self.selectedActionIndices = tf.argmax(self.onlineNet, axis=1)
self.selectedAction = tf.one_hot(indices=self.selectedActionIndices, depth=self.num_action,
axis=-1, dtype=tf.float32, on_value=1.0, off_value=0.0)
# 2- action evaluation using T-net (target net)
self.nxtState_qValueSelected = tf.reduce_sum(tf.multiply(self.targetNet, self.selectedAction),
axis=1) # element wise
else:
# else
# 1,2- make a one step look ahead and follow a greed policy
self.nxtState_qValueSelected = tf.reduce_max(self.targetNet, axis=1)
#3- td-target
self.td_targetHolder = tf.placeholder(shape=[self.minibatch], name='td-target', dtype=tf.float32)
#4- current state chosen action value
self.actionBatchHolder = tf.placeholder(dtype=tf.uint8)
self.chosenAction = tf.one_hot(indices=self.actionBatchHolder, depth=self.num_action, axis=-1,
dtype=tf.float32, on_value=1.0,
off_value=0.0)
self.curState_qValueSelected = tf.reduce_sum(tf.multiply(self.onlineNet, self.chosenAction),
axis=1) # elementwise
pass
self.delta = tf.subtract(self.td_targetHolder, self.curState_qValueSelected)
#set learning rate
self._setLearningRate()
pass
#TODO Dueling (rescale and clipping of gradients)
pass
if perMemory:
self.replay_memory = PEM(ArchitectureSetting.in_shape, self.replay_memorySize)
self.weightedISHolder = tf.placeholder(shape=[self.minibatch], name='weighted-IS', dtype=tf.float32)
self.weightedDelta = tf.multiply(self.delta, self.weightedISHolder)
self.clipped_loss = tf.where(tf.abs(self.weightedDelta) < 1.0,
0.5 * tf.square(self.weightedDelta),
tf.abs(self.weightedDelta) - 0.5, name='clipped_loss')
else: #not dueling or per
self.replay_memory = ExperienceMemory(ArchitectureSetting.in_shape, self.replay_memorySize)
self.clipped_loss = tf.where(tf.abs(self.delta) < 1.0,
0.5 * tf.square(self.delta),
tf.abs(self.delta) - 0.5, name='clipped_loss')
pass
self.loss = tf.reduce_mean(self.clipped_loss, name='loss')
#$self.loss = tf.reduce_mean(tf.squared_difference(self.td_targetHolder, self.curState_qValueSelected))
pass
self.optimizer = tf.train.RMSPropOptimizer(self.learning_rate, decay=0.9, momentum=self.momentum,
epsilon=1e-10)
self.train_step = self.optimizer.minimize(self.loss, global_step=self.global_step)
pass # https://www.tensorflow.org/api_docs/python/tf/train/RMSPropOptimizer
# self.optimizer = tf.train.RMSPropOptimizer(self.learning_rate, decay=0.9, momentum=self.momentum, epsilon=1e-10)
# self.train_step = self.optimizer.minimize(self.loss,global_step = self.global_step)
else:
self.epsilon = tf.constant(AgentSetting.epsilon_eval,dtype=tf.float32)
#finallizee
self.util.summANDsave(self.training)
'''sets the agent learning rate '''
def _setLearningRate(self):
if self.dueling: # regardless of anything else
self.learning_rate = AgentSetting.duel_learining_rate
elif self.perMemory and not self.dueling:
self.learning_rate = PerSettings.step_size
else:
self.learning_rate = AgentSetting.learning_rate
#fill memory
def fill_memory(self,sess,reloadM):
self.env.reset(sess)
if not reloadM:
|
else:
print('Restoring my experience memory (naive solution!)...')
state = self.state_process.get_state(sess)
done = False
for v in tqdm(range(self.replay_strt_size)):
if not reloadM:
#select an action randomly
action = self.env.takeRandomAction()
else:
action = self.behaviour_e_policy(state,sess)
reward , done = self.env.step(action,sess)
nxt_state = self.state_process.get_state(sess)
experience = (state , action , reward, done , nxt_state)
self.replay_memory.add(experience)
if done:
self.env.reset(sess)
state = self.state_process.get_state(sess)
else:
state = nxt_state
pass
print ("Waiting for current episode to be terminated...")
while not done:
action = self.env.takeRandomAction()
reward , done = self.env.step(action,sess)
def _epsilonDecay(self,sess):
pass
eps = self.e_greedy_final + max(0,(self.e_greedy_init - self.e_greedy_final) * (self.e_final_at - self.agentSteps.eval()) / self.e_final_at)
sess.run(self.epsilonUpdater, feed_dict={self.epsilonHolder: eps})
#Return the chosen action!
def behaviour_e_policy(self,state,sess):
#decay eps and calc prob for actions
action_probs = (np.ones(self.num_action, dtype =float) * self.epsilon.eval() ) / self.num_action
q_val = sess.run(self.onlineNet, feed_dict = { self.net_feed : np.expand_dims(state,0)})
greedy_choice = np.argmax(q_val)
action_probs[greedy_choice] += 1.0 - self.epsilon.eval()
action = np.random.choice(self.actions, p=action_probs)
pass
#decay epsilon
#if self.training:
# self._epsilonDecay(sess)
return action
#Playing
def playing(self,sess):
self.totalReward = 0.0
self.countR = 0
self.startTime = time.time()
self.env.reset(sess)
state = self.state_process.get_state(sess)
for t in itertools.count():
action = self.behaviour_e_policy(state,sess)
reward , done = self.env.step(action,sess)
self.totalReward += reward
self.countR += 1
nxt_state = self.state_process.get_state(sess)
print("playing well as much as you trained me :)")
if done:
self.duration = round(time.time() - self.startTime, 3)
self.summaries(sess)
break #end of episode
else:
state = nxt_state
pass
if (self.rendering):
self.env.render()
def learning(self,sess):
#loop for one episode
#reset vars
self.totalLoss =0.0
self.countL = 0
self.totalReward = 0.0
self.countR = 0
self.updates = 0
self.startTime = time.time()
self.env.reset(sess)
state = self.state_process.get_state(sess)
no_op = 0
for _ in itertools.count():
#take action
action = self.behaviour_e_policy(state,sess)
#step and observe
reward , done = self.env.step(action,sess)
#inc agent steps
sess.run(self.agentStepsUpdater)
#decay epsilon after every step
self._epsilonDecay(sess)
pass
if(action == 0):
no_op += 1
pass #can't force episode to end
#if(no_op == self.no_op_max): #end this boring episode
# done = True
self.totalReward += reward
self.countR += 1
nxt_state = self.state_process.get_state(sess)
experience = (state , action , reward, done , nxt_state)
self.replay_memory.add(experience)
if( self.agentSteps.eval() % self.update_freq == 0):
#sample a minibatch
state_batch, action_batch, reward_batch, done_batch, nxt_state_batch = self.replay_memory.sample(self.minibatch)
nxtStateFeedDict = {self.net_feed : nxt_state_batch}
nxtQVal = sess.run(self.nxtState_qValueSelected, feed_dict = nxtStateFeedDict)
#compute td-target
td_target = reward_batch + np.invert(done_batch).astype(np.float32) * self.discount_factor * nxtQVal
curStateFeedDict = {self.net_feed: state_batch, self.actionBatchHolder : action_batch, self.td_targetHolder : td_target }
if self.perMemory:
# update priorities with new td_errors(deltas)
self.replay_memory.update(sess.run(self.delta, feed_dict =curStateFeedDict ))
#add to feedDict ISW
curStateFeedDict.update({self.weightedISHolder : self.replay_memory.getISW()})
# anneal beta
self.replay_memory.betaAnneal(sess)
pass
#run...run...run
loss, _ = sess.run([self.loss,self.train_step],feed_dict = curStateFeedDict)
#print ("loss %.5f at step %d" %(loss, self.global_step.eval()))
#stats
self.totalLoss += loss
self.countL +=1
self.updates +=1 #num of updates made per episode
pass #TRY self.global_step.eval()
if ( self.agentSteps.eval() % self.t_net_update_freq == 0 ):
sess.run(self.deepNet.updateTparas(True))
print("Target net parameters updated!")
pass
if done:
self.duration = round(time.time() - self.startTime, 3) #secs
sess.run([self.training_hrsUpdater, self.training_episodesUpdater], feed_dict = { self.training_hrsHolder : self.duration})
#update tf board every episode
self.summaries(sess)
break #end of episode
else:
state = nxt_state
pass
if(self.rendering):
self.env.render()
pass #TO DO -> sample of Q-action values summaries
def summaries(self,sess):
#print "in summaries!"
#basics
listy = {'totReward' : self.totalReward, 'avgReward' : (self.totalReward / self.countR) , 'epDur' : self.duration }
if self.training:
listy.update({"totLoss" : self.totalLoss , "avgLoss" : (self.totalLoss/self.countL), 'epUpdates' : self.updates })
self.util.summary_board(sess,self.agentSteps.eval(), listy, self.training)
| print ('Initializing my experience memory...') | conditional_block |
scripts.js | // v1
var $ = jQuery;
var WPTHEME = '/wp-content/themes/CorePaws-v2/';
var DOMAIN = location.protocol + "//" + location.host;
jQuery(document).ready(function ($) {
// Doc ready functions
GAtracking();
});
// ===================================================================
// Function with all of the general Google Analytics Tracking
// ===================================================================
function GAtracking() {
// Custom Google Analytics tracking code
// ...
}
// ===================================================================
// Function to gather all of the search criteria and submit the page
// ===================================================================
function petSearch() {
$('#sidebar .controls button').click(function () {
var search = {};
var url = '';
$("input[name='animal']:checked").each(function () {
if (search['animal'] === undefined) {
search['animal'] = $(this).val();
} else {
search['animal'] += ',' + $(this).val();
}
});
$("input[name='category']:checked").each(function () {
if (search['category'] === undefined) {
search['category'] = $(this).val();
} else {
search['category'] += ',' + $(this).val();
}
});
//Creates search URL
$.each(search, function (key, value) {
if (url.length === 0) {
url = '?' + key + '=' + value;
} else {
url += '&' + key + '=' + value;
}
});
// Use "search" variable to record events if desired
window.location = DOMAIN + '/adoption/' + url;
});
}
// ===================================================================
// Function to initialize Featured Pets Carousel
// ===================================================================
function initFeaturedCarousel() {
$('#featured .carousel').slick({
infinite: true,
slidesToShow: 4,
slidesToScroll: 1,
autoplay: true,
autoplaySpeed: 2000,
responsive: [
{breakpoint: 960, settings: {slidesToShow: 3}},
{breakpoint: 768, settings: {slidesToShow: 2}},
{breakpoint: 480, settings: {slidesToShow: 1}}
]
});
}
// ===================================================================
// Function to initialize Gallery Carousel
// ===================================================================
function initGalleryCarousel() {
$('#gallery .carousel').slick({
infinite: true,
slidesToShow: 1,
slidesToScroll: 1,
autoplay: false
});
$('#gallery .thumbnails .thumb').click(function () {
$('#gallery .carousel').slick('slickGoTo', $(this).attr('data-thumb'));
});
}
// ===================================================================
// Function for the FAQ show/hide feature
// ===================================================================
function initFAQ() {
$('.answer').hide();
$('h3.question').click(function () {
if ($(this).hasClass('active')) {
$(this).next('.answer').slideUp('fase', function () {
$(this).prev('h3.question').removeClass('active');
});
} else {
$(this).next('.answer').slideDown('slow', function () {
$(this).prev('h3.question').addClass('active');
});
}
});
}
// ===================================================================
// Global Function to determine what page is viewed based on main ID
// ===================================================================
function isPage(a) {
var array = a.split(',');
if (array.length === 2) {
return $("#" + array[0]).length && $("main").attr("data-sub") === array[1];
} else {
return $("#" + a).length;
}
}
// v2
function sizeElements(element) {
var maxHeight = 0;
console.log(element);
$(element).height('auto');
$(element).each(function () {
maxHeight = $(this).height() > maxHeight ? $(this).height() : maxHeight;
});
$(element).css('height', maxHeight);
}
// basic slider initialization function
function initSlick(slider, args) {
$(slider).slick(args);
}
// slider with custom pagination thumbnails. defined args, reusable on same-structural elements
function infoSlider(blockID) {
gallery = $(blockID).find('.gallery');
thumbs = $(blockID).find('.thumbnails');
$(gallery).slick({
dots: true,
infinite: true,
arrows: false,
appendDots: $(thumbs),
customPaging: function (slider, i) {
var thumb = $(slider.$slides[i]).data('thumb');
return '<a><img src="' + thumb + '"></a>';
},
})
}
function sizeFooterColumns() {
$('#footer-center').height($('#footer-left').height())
}
// active video player button on homepage
// muted: show iframe embed and hide thumbnail + play button\
function videoPlayer() {
$('a.video').click(function () {
$me = $(this);
$id = $me.attr('yt-id');
popVideo($id);
})
}
// resize iframe after play
function resizeVideo() {
var $frame = $('iframe');
var width = $('.video').width();
$frame.attr('width', width);
$frame.attr('height', (width * 3 / 5));
}
// mobile menu
function menu() {
// mobile menu clicks
$('#burger').on('click', function () {
$('#menu').toggleClass('open');
$('#burger').toggleClass('open');
$('html').toggleClass('scroll-lock');
});
}
function popVideo(id) {
$tar = $('#videobox');
$tar.addClass('on');
$str = '<div class="video-frame"><div class="videowrapper"><iframe width="560" height="315" src="https://www.youtube.com/embed/' + id + '?autoplay=1&controls=0" frameborder="0" allowfullscreen></iframe></div></div>';
$tar.html($str);
}
function killVideo() {
$tar = $('#videobox');
$tar.removeClass('on');
$tar.html('');
}
jQuery(document).ready(function ($) {
menu();
sizeFooterColumns();
$(window).resize(function () {
sizeFooterColumns();
});
if ($('header #navbar > li.current-menu-ancestor').length > 0) {
$('header #navbar > li.current-menu-ancestor').addClass('mobile-open').addClass('show-sub');
}
// MOBILE MENU SWITCH-A-ROO
$('header #navbar > li > a').each(function () {
$(this).on('click', function (e) {
if ($(window).width() < 980) {
e.preventDefault();
$it = $(this).parent();
console.log('hi');
if (!$it.hasClass('mobile-open show-sub')) {
if ($('#navbar.menu .mobile-open.show-sub').length > 0) {
$('#navbar.menu .mobile-open.show-sub').removeClass('mobile-open show-sub');
}
$it.addClass('mobile-open show-sub');
} else {
$it.removeClass('mobile-open show-sub');
$('#navbar.menu > li.current-menu-ancestor').addClass('mobile-open').addClass('show-sub');
}
}
});
});
// OFF SITE LINKS
$('a[href]').not('a[href*="' + DOMAIN + '"]').not('a[href*="mailto"]').each(function () { //
$(this).attr('target', '_blank');
});
// HOME LOAD FUNCTIONS
if ($('.page.home').length > 0) {
// sizing function on load and on window resize
sizeElements('.preview-text');
$(window).resize(function () {
sizeElements('.preview-text');
resizeVideo();
});
videoPlayer();
}
// YouTube lightbox link action
if ($('.yt-lb').length > 0) {
$('.yt-lb').each(function () {
$me = $(this);
$id = $me.attr('yt-id');
$me.on('click', function () {
popVideo($id);
});
});
$('.video-lightbox').on('click', function () {
killVideo();
});
$('body').keyup(function (event) {
if (event.which === 27) {
killVideo();
}
});
}
// Testimonial Carousel Functionality
if ($('#testimonial-slides').length > 0) {
initSlick($('#testimonial-slides'), {
nextArrow: '<button type="button" class="slick-next"><img src="' + theme + '/img/arrow_r.png"></button>',
prevArrow: '<button type="button" class="slick-prev"><img src="' + theme + '/img/arrow_l.png"></button>',
dots: true,
appendDots: $("#tesimonial-dots"),
autoplay: true,
autoplaySpeed: 13000,
});
}
// Hero Carousel Functionality
if ($('#hero .bg-frame .caro').length > 0) {
initSlick($('#hero .bg-frame .caro'), {
autoplay: true,
arrows: false
});
}
// FAQ Page functionality
if ($('.page-frequently-asked-questions').length > 0) {
$('.page-frequently-asked-questions .faq').addClass('armed');
$('.faq .question').each(function () {
$i = $(this);
$j = $i.next();
$j.hide(); | $me = $(this);
if (!$me.hasClass('active')) {
if ($('.faq .question.active').length > 0) {
$('.faq .active').removeClass('active').next().hide();
}
$me.addClass('active').next().slideDown();
} else {
$me.removeClass('active').next().hide();
}
});
});
}
if ($('.page.about').length > 0) {
$('.info-block').each(function () {
ID = '#' + $(this).attr('id');
console.log(ID);
infoSlider(ID);
})
}
if ($('.row.map-body').length > 0) {
initMap();
toggleMapView();
}
if ($('.page.donate').length > 0) {
initSlick($('#testimonial-slides'), {
nextArrow: '<button type="button" class="slick-next"><img src="images/arrow_r.png"></button>',
prevArrow: '<button type="button" class="slick-prev"><img src="images/arrow_l.png"></button>',
dots: true,
appendDots: $("#tesimonial-dots"),
});
// sizing function on load and on window resize
sizeElements('.preview-text');
$(window).resize(function () {
sizeElements('.preview-text');
})
}
if ($('.cloak-more').length > 0) {
$('.mobile-switch-more').on('click', function () {
$(this).prev().toggleClass('open');
});
}
}); |
$i.on('click', function () { | random_line_split |
scripts.js | // v1
var $ = jQuery;
var WPTHEME = '/wp-content/themes/CorePaws-v2/';
var DOMAIN = location.protocol + "//" + location.host;
jQuery(document).ready(function ($) {
// Doc ready functions
GAtracking();
});
// ===================================================================
// Function with all of the general Google Analytics Tracking
// ===================================================================
function GAtracking() {
// Custom Google Analytics tracking code
// ...
}
// ===================================================================
// Function to gather all of the search criteria and submit the page
// ===================================================================
function petSearch() {
$('#sidebar .controls button').click(function () {
var search = {};
var url = '';
$("input[name='animal']:checked").each(function () {
if (search['animal'] === undefined) {
search['animal'] = $(this).val();
} else {
search['animal'] += ',' + $(this).val();
}
});
$("input[name='category']:checked").each(function () {
if (search['category'] === undefined) {
search['category'] = $(this).val();
} else {
search['category'] += ',' + $(this).val();
}
});
//Creates search URL
$.each(search, function (key, value) {
if (url.length === 0) {
url = '?' + key + '=' + value;
} else {
url += '&' + key + '=' + value;
}
});
// Use "search" variable to record events if desired
window.location = DOMAIN + '/adoption/' + url;
});
}
// ===================================================================
// Function to initialize Featured Pets Carousel
// ===================================================================
function initFeaturedCarousel() {
$('#featured .carousel').slick({
infinite: true,
slidesToShow: 4,
slidesToScroll: 1,
autoplay: true,
autoplaySpeed: 2000,
responsive: [
{breakpoint: 960, settings: {slidesToShow: 3}},
{breakpoint: 768, settings: {slidesToShow: 2}},
{breakpoint: 480, settings: {slidesToShow: 1}}
]
});
}
// ===================================================================
// Function to initialize Gallery Carousel
// ===================================================================
function initGalleryCarousel() {
$('#gallery .carousel').slick({
infinite: true,
slidesToShow: 1,
slidesToScroll: 1,
autoplay: false
});
$('#gallery .thumbnails .thumb').click(function () {
$('#gallery .carousel').slick('slickGoTo', $(this).attr('data-thumb'));
});
}
// ===================================================================
// Function for the FAQ show/hide feature
// ===================================================================
function initFAQ() {
$('.answer').hide();
$('h3.question').click(function () {
if ($(this).hasClass('active')) {
$(this).next('.answer').slideUp('fase', function () {
$(this).prev('h3.question').removeClass('active');
});
} else {
$(this).next('.answer').slideDown('slow', function () {
$(this).prev('h3.question').addClass('active');
});
}
});
}
// ===================================================================
// Global Function to determine what page is viewed based on main ID
// ===================================================================
function isPage(a) {
var array = a.split(',');
if (array.length === 2) {
return $("#" + array[0]).length && $("main").attr("data-sub") === array[1];
} else {
return $("#" + a).length;
}
}
// v2
function sizeElements(element) {
var maxHeight = 0;
console.log(element);
$(element).height('auto');
$(element).each(function () {
maxHeight = $(this).height() > maxHeight ? $(this).height() : maxHeight;
});
$(element).css('height', maxHeight);
}
// basic slider initialization function
function initSlick(slider, args) {
$(slider).slick(args);
}
// slider with custom pagination thumbnails. defined args, reusable on same-structural elements
function infoSlider(blockID) {
gallery = $(blockID).find('.gallery');
thumbs = $(blockID).find('.thumbnails');
$(gallery).slick({
dots: true,
infinite: true,
arrows: false,
appendDots: $(thumbs),
customPaging: function (slider, i) {
var thumb = $(slider.$slides[i]).data('thumb');
return '<a><img src="' + thumb + '"></a>';
},
})
}
function sizeFooterColumns() {
$('#footer-center').height($('#footer-left').height())
}
// active video player button on homepage
// muted: show iframe embed and hide thumbnail + play button\
function videoPlayer() {
$('a.video').click(function () {
$me = $(this);
$id = $me.attr('yt-id');
popVideo($id);
})
}
// resize iframe after play
function resizeVideo() {
var $frame = $('iframe');
var width = $('.video').width();
$frame.attr('width', width);
$frame.attr('height', (width * 3 / 5));
}
// mobile menu
function | () {
// mobile menu clicks
$('#burger').on('click', function () {
$('#menu').toggleClass('open');
$('#burger').toggleClass('open');
$('html').toggleClass('scroll-lock');
});
}
function popVideo(id) {
$tar = $('#videobox');
$tar.addClass('on');
$str = '<div class="video-frame"><div class="videowrapper"><iframe width="560" height="315" src="https://www.youtube.com/embed/' + id + '?autoplay=1&controls=0" frameborder="0" allowfullscreen></iframe></div></div>';
$tar.html($str);
}
function killVideo() {
$tar = $('#videobox');
$tar.removeClass('on');
$tar.html('');
}
jQuery(document).ready(function ($) {
menu();
sizeFooterColumns();
$(window).resize(function () {
sizeFooterColumns();
});
if ($('header #navbar > li.current-menu-ancestor').length > 0) {
$('header #navbar > li.current-menu-ancestor').addClass('mobile-open').addClass('show-sub');
}
// MOBILE MENU SWITCH-A-ROO
$('header #navbar > li > a').each(function () {
$(this).on('click', function (e) {
if ($(window).width() < 980) {
e.preventDefault();
$it = $(this).parent();
console.log('hi');
if (!$it.hasClass('mobile-open show-sub')) {
if ($('#navbar.menu .mobile-open.show-sub').length > 0) {
$('#navbar.menu .mobile-open.show-sub').removeClass('mobile-open show-sub');
}
$it.addClass('mobile-open show-sub');
} else {
$it.removeClass('mobile-open show-sub');
$('#navbar.menu > li.current-menu-ancestor').addClass('mobile-open').addClass('show-sub');
}
}
});
});
// OFF SITE LINKS
$('a[href]').not('a[href*="' + DOMAIN + '"]').not('a[href*="mailto"]').each(function () { //
$(this).attr('target', '_blank');
});
// HOME LOAD FUNCTIONS
if ($('.page.home').length > 0) {
// sizing function on load and on window resize
sizeElements('.preview-text');
$(window).resize(function () {
sizeElements('.preview-text');
resizeVideo();
});
videoPlayer();
}
// YouTube lightbox link action
if ($('.yt-lb').length > 0) {
$('.yt-lb').each(function () {
$me = $(this);
$id = $me.attr('yt-id');
$me.on('click', function () {
popVideo($id);
});
});
$('.video-lightbox').on('click', function () {
killVideo();
});
$('body').keyup(function (event) {
if (event.which === 27) {
killVideo();
}
});
}
// Testimonial Carousel Functionality
if ($('#testimonial-slides').length > 0) {
initSlick($('#testimonial-slides'), {
nextArrow: '<button type="button" class="slick-next"><img src="' + theme + '/img/arrow_r.png"></button>',
prevArrow: '<button type="button" class="slick-prev"><img src="' + theme + '/img/arrow_l.png"></button>',
dots: true,
appendDots: $("#tesimonial-dots"),
autoplay: true,
autoplaySpeed: 13000,
});
}
// Hero Carousel Functionality
if ($('#hero .bg-frame .caro').length > 0) {
initSlick($('#hero .bg-frame .caro'), {
autoplay: true,
arrows: false
});
}
// FAQ Page functionality
if ($('.page-frequently-asked-questions').length > 0) {
$('.page-frequently-asked-questions .faq').addClass('armed');
$('.faq .question').each(function () {
$i = $(this);
$j = $i.next();
$j.hide();
$i.on('click', function () {
$me = $(this);
if (!$me.hasClass('active')) {
if ($('.faq .question.active').length > 0) {
$('.faq .active').removeClass('active').next().hide();
}
$me.addClass('active').next().slideDown();
} else {
$me.removeClass('active').next().hide();
}
});
});
}
if ($('.page.about').length > 0) {
$('.info-block').each(function () {
ID = '#' + $(this).attr('id');
console.log(ID);
infoSlider(ID);
})
}
if ($('.row.map-body').length > 0) {
initMap();
toggleMapView();
}
if ($('.page.donate').length > 0) {
initSlick($('#testimonial-slides'), {
nextArrow: '<button type="button" class="slick-next"><img src="images/arrow_r.png"></button>',
prevArrow: '<button type="button" class="slick-prev"><img src="images/arrow_l.png"></button>',
dots: true,
appendDots: $("#tesimonial-dots"),
});
// sizing function on load and on window resize
sizeElements('.preview-text');
$(window).resize(function () {
sizeElements('.preview-text');
})
}
if ($('.cloak-more').length > 0) {
$('.mobile-switch-more').on('click', function () {
$(this).prev().toggleClass('open');
});
}
});
| menu | identifier_name |
scripts.js | // v1
var $ = jQuery;
var WPTHEME = '/wp-content/themes/CorePaws-v2/';
var DOMAIN = location.protocol + "//" + location.host;
jQuery(document).ready(function ($) {
// Doc ready functions
GAtracking();
});
// ===================================================================
// Function with all of the general Google Analytics Tracking
// ===================================================================
function GAtracking() {
// Custom Google Analytics tracking code
// ...
}
// ===================================================================
// Function to gather all of the search criteria and submit the page
// ===================================================================
function petSearch() {
$('#sidebar .controls button').click(function () {
var search = {};
var url = '';
$("input[name='animal']:checked").each(function () {
if (search['animal'] === undefined) {
search['animal'] = $(this).val();
} else {
search['animal'] += ',' + $(this).val();
}
});
$("input[name='category']:checked").each(function () {
if (search['category'] === undefined) {
search['category'] = $(this).val();
} else {
search['category'] += ',' + $(this).val();
}
});
//Creates search URL
$.each(search, function (key, value) {
if (url.length === 0) {
url = '?' + key + '=' + value;
} else {
url += '&' + key + '=' + value;
}
});
// Use "search" variable to record events if desired
window.location = DOMAIN + '/adoption/' + url;
});
}
// ===================================================================
// Function to initialize Featured Pets Carousel
// ===================================================================
function initFeaturedCarousel() {
$('#featured .carousel').slick({
infinite: true,
slidesToShow: 4,
slidesToScroll: 1,
autoplay: true,
autoplaySpeed: 2000,
responsive: [
{breakpoint: 960, settings: {slidesToShow: 3}},
{breakpoint: 768, settings: {slidesToShow: 2}},
{breakpoint: 480, settings: {slidesToShow: 1}}
]
});
}
// ===================================================================
// Function to initialize Gallery Carousel
// ===================================================================
function initGalleryCarousel() {
$('#gallery .carousel').slick({
infinite: true,
slidesToShow: 1,
slidesToScroll: 1,
autoplay: false
});
$('#gallery .thumbnails .thumb').click(function () {
$('#gallery .carousel').slick('slickGoTo', $(this).attr('data-thumb'));
});
}
// ===================================================================
// Function for the FAQ show/hide feature
// ===================================================================
function initFAQ() {
$('.answer').hide();
$('h3.question').click(function () {
if ($(this).hasClass('active')) {
$(this).next('.answer').slideUp('fase', function () {
$(this).prev('h3.question').removeClass('active');
});
} else {
$(this).next('.answer').slideDown('slow', function () {
$(this).prev('h3.question').addClass('active');
});
}
});
}
// ===================================================================
// Global Function to determine what page is viewed based on main ID
// ===================================================================
function isPage(a) {
var array = a.split(',');
if (array.length === 2) {
return $("#" + array[0]).length && $("main").attr("data-sub") === array[1];
} else {
return $("#" + a).length;
}
}
// v2
function sizeElements(element) {
var maxHeight = 0;
console.log(element);
$(element).height('auto');
$(element).each(function () {
maxHeight = $(this).height() > maxHeight ? $(this).height() : maxHeight;
});
$(element).css('height', maxHeight);
}
// basic slider initialization function
function initSlick(slider, args) {
$(slider).slick(args);
}
// slider with custom pagination thumbnails. defined args, reusable on same-structural elements
function infoSlider(blockID) {
gallery = $(blockID).find('.gallery');
thumbs = $(blockID).find('.thumbnails');
$(gallery).slick({
dots: true,
infinite: true,
arrows: false,
appendDots: $(thumbs),
customPaging: function (slider, i) {
var thumb = $(slider.$slides[i]).data('thumb');
return '<a><img src="' + thumb + '"></a>';
},
})
}
function sizeFooterColumns() {
$('#footer-center').height($('#footer-left').height())
}
// active video player button on homepage
// muted: show iframe embed and hide thumbnail + play button\
function videoPlayer() {
$('a.video').click(function () {
$me = $(this);
$id = $me.attr('yt-id');
popVideo($id);
})
}
// resize iframe after play
function resizeVideo() |
// mobile menu
function menu() {
// mobile menu clicks
$('#burger').on('click', function () {
$('#menu').toggleClass('open');
$('#burger').toggleClass('open');
$('html').toggleClass('scroll-lock');
});
}
function popVideo(id) {
$tar = $('#videobox');
$tar.addClass('on');
$str = '<div class="video-frame"><div class="videowrapper"><iframe width="560" height="315" src="https://www.youtube.com/embed/' + id + '?autoplay=1&controls=0" frameborder="0" allowfullscreen></iframe></div></div>';
$tar.html($str);
}
function killVideo() {
$tar = $('#videobox');
$tar.removeClass('on');
$tar.html('');
}
jQuery(document).ready(function ($) {
menu();
sizeFooterColumns();
$(window).resize(function () {
sizeFooterColumns();
});
if ($('header #navbar > li.current-menu-ancestor').length > 0) {
$('header #navbar > li.current-menu-ancestor').addClass('mobile-open').addClass('show-sub');
}
// MOBILE MENU SWITCH-A-ROO
$('header #navbar > li > a').each(function () {
$(this).on('click', function (e) {
if ($(window).width() < 980) {
e.preventDefault();
$it = $(this).parent();
console.log('hi');
if (!$it.hasClass('mobile-open show-sub')) {
if ($('#navbar.menu .mobile-open.show-sub').length > 0) {
$('#navbar.menu .mobile-open.show-sub').removeClass('mobile-open show-sub');
}
$it.addClass('mobile-open show-sub');
} else {
$it.removeClass('mobile-open show-sub');
$('#navbar.menu > li.current-menu-ancestor').addClass('mobile-open').addClass('show-sub');
}
}
});
});
// OFF SITE LINKS
$('a[href]').not('a[href*="' + DOMAIN + '"]').not('a[href*="mailto"]').each(function () { //
$(this).attr('target', '_blank');
});
// HOME LOAD FUNCTIONS
if ($('.page.home').length > 0) {
// sizing function on load and on window resize
sizeElements('.preview-text');
$(window).resize(function () {
sizeElements('.preview-text');
resizeVideo();
});
videoPlayer();
}
// YouTube lightbox link action
if ($('.yt-lb').length > 0) {
$('.yt-lb').each(function () {
$me = $(this);
$id = $me.attr('yt-id');
$me.on('click', function () {
popVideo($id);
});
});
$('.video-lightbox').on('click', function () {
killVideo();
});
$('body').keyup(function (event) {
if (event.which === 27) {
killVideo();
}
});
}
// Testimonial Carousel Functionality
if ($('#testimonial-slides').length > 0) {
initSlick($('#testimonial-slides'), {
nextArrow: '<button type="button" class="slick-next"><img src="' + theme + '/img/arrow_r.png"></button>',
prevArrow: '<button type="button" class="slick-prev"><img src="' + theme + '/img/arrow_l.png"></button>',
dots: true,
appendDots: $("#tesimonial-dots"),
autoplay: true,
autoplaySpeed: 13000,
});
}
// Hero Carousel Functionality
if ($('#hero .bg-frame .caro').length > 0) {
initSlick($('#hero .bg-frame .caro'), {
autoplay: true,
arrows: false
});
}
// FAQ Page functionality
if ($('.page-frequently-asked-questions').length > 0) {
$('.page-frequently-asked-questions .faq').addClass('armed');
$('.faq .question').each(function () {
$i = $(this);
$j = $i.next();
$j.hide();
$i.on('click', function () {
$me = $(this);
if (!$me.hasClass('active')) {
if ($('.faq .question.active').length > 0) {
$('.faq .active').removeClass('active').next().hide();
}
$me.addClass('active').next().slideDown();
} else {
$me.removeClass('active').next().hide();
}
});
});
}
if ($('.page.about').length > 0) {
$('.info-block').each(function () {
ID = '#' + $(this).attr('id');
console.log(ID);
infoSlider(ID);
})
}
if ($('.row.map-body').length > 0) {
initMap();
toggleMapView();
}
if ($('.page.donate').length > 0) {
initSlick($('#testimonial-slides'), {
nextArrow: '<button type="button" class="slick-next"><img src="images/arrow_r.png"></button>',
prevArrow: '<button type="button" class="slick-prev"><img src="images/arrow_l.png"></button>',
dots: true,
appendDots: $("#tesimonial-dots"),
});
// sizing function on load and on window resize
sizeElements('.preview-text');
$(window).resize(function () {
sizeElements('.preview-text');
})
}
if ($('.cloak-more').length > 0) {
$('.mobile-switch-more').on('click', function () {
$(this).prev().toggleClass('open');
});
}
});
| {
var $frame = $('iframe');
var width = $('.video').width();
$frame.attr('width', width);
$frame.attr('height', (width * 3 / 5));
} | identifier_body |
scripts.js | // v1
var $ = jQuery;
var WPTHEME = '/wp-content/themes/CorePaws-v2/';
var DOMAIN = location.protocol + "//" + location.host;
jQuery(document).ready(function ($) {
// Doc ready functions
GAtracking();
});
// ===================================================================
// Function with all of the general Google Analytics Tracking
// ===================================================================
function GAtracking() {
// Custom Google Analytics tracking code
// ...
}
// ===================================================================
// Function to gather all of the search criteria and submit the page
// ===================================================================
function petSearch() {
$('#sidebar .controls button').click(function () {
var search = {};
var url = '';
$("input[name='animal']:checked").each(function () {
if (search['animal'] === undefined) {
search['animal'] = $(this).val();
} else {
search['animal'] += ',' + $(this).val();
}
});
$("input[name='category']:checked").each(function () {
if (search['category'] === undefined) {
search['category'] = $(this).val();
} else {
search['category'] += ',' + $(this).val();
}
});
//Creates search URL
$.each(search, function (key, value) {
if (url.length === 0) | else {
url += '&' + key + '=' + value;
}
});
// Use "search" variable to record events if desired
window.location = DOMAIN + '/adoption/' + url;
});
}
// ===================================================================
// Function to initialize Featured Pets Carousel
// ===================================================================
function initFeaturedCarousel() {
$('#featured .carousel').slick({
infinite: true,
slidesToShow: 4,
slidesToScroll: 1,
autoplay: true,
autoplaySpeed: 2000,
responsive: [
{breakpoint: 960, settings: {slidesToShow: 3}},
{breakpoint: 768, settings: {slidesToShow: 2}},
{breakpoint: 480, settings: {slidesToShow: 1}}
]
});
}
// ===================================================================
// Function to initialize Gallery Carousel
// ===================================================================
function initGalleryCarousel() {
$('#gallery .carousel').slick({
infinite: true,
slidesToShow: 1,
slidesToScroll: 1,
autoplay: false
});
$('#gallery .thumbnails .thumb').click(function () {
$('#gallery .carousel').slick('slickGoTo', $(this).attr('data-thumb'));
});
}
// ===================================================================
// Function for the FAQ show/hide feature
// ===================================================================
function initFAQ() {
$('.answer').hide();
$('h3.question').click(function () {
if ($(this).hasClass('active')) {
$(this).next('.answer').slideUp('fase', function () {
$(this).prev('h3.question').removeClass('active');
});
} else {
$(this).next('.answer').slideDown('slow', function () {
$(this).prev('h3.question').addClass('active');
});
}
});
}
// ===================================================================
// Global Function to determine what page is viewed based on main ID
// ===================================================================
function isPage(a) {
var array = a.split(',');
if (array.length === 2) {
return $("#" + array[0]).length && $("main").attr("data-sub") === array[1];
} else {
return $("#" + a).length;
}
}
// v2
function sizeElements(element) {
var maxHeight = 0;
console.log(element);
$(element).height('auto');
$(element).each(function () {
maxHeight = $(this).height() > maxHeight ? $(this).height() : maxHeight;
});
$(element).css('height', maxHeight);
}
// basic slider initialization function
function initSlick(slider, args) {
$(slider).slick(args);
}
// slider with custom pagination thumbnails. defined args, reusable on same-structural elements
function infoSlider(blockID) {
gallery = $(blockID).find('.gallery');
thumbs = $(blockID).find('.thumbnails');
$(gallery).slick({
dots: true,
infinite: true,
arrows: false,
appendDots: $(thumbs),
customPaging: function (slider, i) {
var thumb = $(slider.$slides[i]).data('thumb');
return '<a><img src="' + thumb + '"></a>';
},
})
}
function sizeFooterColumns() {
$('#footer-center').height($('#footer-left').height())
}
// active video player button on homepage
// muted: show iframe embed and hide thumbnail + play button\
function videoPlayer() {
$('a.video').click(function () {
$me = $(this);
$id = $me.attr('yt-id');
popVideo($id);
})
}
// resize iframe after play
function resizeVideo() {
var $frame = $('iframe');
var width = $('.video').width();
$frame.attr('width', width);
$frame.attr('height', (width * 3 / 5));
}
// mobile menu
function menu() {
// mobile menu clicks
$('#burger').on('click', function () {
$('#menu').toggleClass('open');
$('#burger').toggleClass('open');
$('html').toggleClass('scroll-lock');
});
}
function popVideo(id) {
$tar = $('#videobox');
$tar.addClass('on');
$str = '<div class="video-frame"><div class="videowrapper"><iframe width="560" height="315" src="https://www.youtube.com/embed/' + id + '?autoplay=1&controls=0" frameborder="0" allowfullscreen></iframe></div></div>';
$tar.html($str);
}
function killVideo() {
$tar = $('#videobox');
$tar.removeClass('on');
$tar.html('');
}
jQuery(document).ready(function ($) {
menu();
sizeFooterColumns();
$(window).resize(function () {
sizeFooterColumns();
});
if ($('header #navbar > li.current-menu-ancestor').length > 0) {
$('header #navbar > li.current-menu-ancestor').addClass('mobile-open').addClass('show-sub');
}
// MOBILE MENU SWITCH-A-ROO
$('header #navbar > li > a').each(function () {
$(this).on('click', function (e) {
if ($(window).width() < 980) {
e.preventDefault();
$it = $(this).parent();
console.log('hi');
if (!$it.hasClass('mobile-open show-sub')) {
if ($('#navbar.menu .mobile-open.show-sub').length > 0) {
$('#navbar.menu .mobile-open.show-sub').removeClass('mobile-open show-sub');
}
$it.addClass('mobile-open show-sub');
} else {
$it.removeClass('mobile-open show-sub');
$('#navbar.menu > li.current-menu-ancestor').addClass('mobile-open').addClass('show-sub');
}
}
});
});
// OFF SITE LINKS
$('a[href]').not('a[href*="' + DOMAIN + '"]').not('a[href*="mailto"]').each(function () { //
$(this).attr('target', '_blank');
});
// HOME LOAD FUNCTIONS
if ($('.page.home').length > 0) {
// sizing function on load and on window resize
sizeElements('.preview-text');
$(window).resize(function () {
sizeElements('.preview-text');
resizeVideo();
});
videoPlayer();
}
// YouTube lightbox link action
if ($('.yt-lb').length > 0) {
$('.yt-lb').each(function () {
$me = $(this);
$id = $me.attr('yt-id');
$me.on('click', function () {
popVideo($id);
});
});
$('.video-lightbox').on('click', function () {
killVideo();
});
$('body').keyup(function (event) {
if (event.which === 27) {
killVideo();
}
});
}
// Testimonial Carousel Functionality
if ($('#testimonial-slides').length > 0) {
initSlick($('#testimonial-slides'), {
nextArrow: '<button type="button" class="slick-next"><img src="' + theme + '/img/arrow_r.png"></button>',
prevArrow: '<button type="button" class="slick-prev"><img src="' + theme + '/img/arrow_l.png"></button>',
dots: true,
appendDots: $("#tesimonial-dots"),
autoplay: true,
autoplaySpeed: 13000,
});
}
// Hero Carousel Functionality
if ($('#hero .bg-frame .caro').length > 0) {
initSlick($('#hero .bg-frame .caro'), {
autoplay: true,
arrows: false
});
}
// FAQ Page functionality
if ($('.page-frequently-asked-questions').length > 0) {
$('.page-frequently-asked-questions .faq').addClass('armed');
$('.faq .question').each(function () {
$i = $(this);
$j = $i.next();
$j.hide();
$i.on('click', function () {
$me = $(this);
if (!$me.hasClass('active')) {
if ($('.faq .question.active').length > 0) {
$('.faq .active').removeClass('active').next().hide();
}
$me.addClass('active').next().slideDown();
} else {
$me.removeClass('active').next().hide();
}
});
});
}
if ($('.page.about').length > 0) {
$('.info-block').each(function () {
ID = '#' + $(this).attr('id');
console.log(ID);
infoSlider(ID);
})
}
if ($('.row.map-body').length > 0) {
initMap();
toggleMapView();
}
if ($('.page.donate').length > 0) {
initSlick($('#testimonial-slides'), {
nextArrow: '<button type="button" class="slick-next"><img src="images/arrow_r.png"></button>',
prevArrow: '<button type="button" class="slick-prev"><img src="images/arrow_l.png"></button>',
dots: true,
appendDots: $("#tesimonial-dots"),
});
// sizing function on load and on window resize
sizeElements('.preview-text');
$(window).resize(function () {
sizeElements('.preview-text');
})
}
if ($('.cloak-more').length > 0) {
$('.mobile-switch-more').on('click', function () {
$(this).prev().toggleClass('open');
});
}
});
| {
url = '?' + key + '=' + value;
} | conditional_block |
queued.rs | // Copyright (c) 2013-2016 Sandstorm Development Group, Inc. and contributors
// Licensed under the MIT License:
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
use capnp::any_pointer;
use capnp::capability::Promise;
use capnp::private::capability::{ClientHook, ParamsHook, PipelineHook, PipelineOp, ResultsHook};
use capnp::Error;
use futures::{Future, FutureExt, TryFutureExt};
use std::cell::RefCell;
use std::rc::{Rc, Weak};
use crate::attach::Attach;
use crate::sender_queue::SenderQueue;
use crate::{broken, local};
pub struct PipelineInner {
// Once the promise resolves, this will become non-null and point to the underlying object.
redirect: Option<Box<dyn PipelineHook>>,
promise_to_drive: futures::future::Shared<Promise<(), Error>>,
clients_to_resolve: SenderQueue<(Weak<RefCell<ClientInner>>, Vec<PipelineOp>), ()>,
}
impl PipelineInner {
fn resolve(this: &Rc<RefCell<Self>>, result: Result<Box<dyn PipelineHook>, Error>) {
assert!(this.borrow().redirect.is_none());
let pipeline = match result {
Ok(pipeline_hook) => pipeline_hook,
Err(e) => Box::new(broken::Pipeline::new(e)),
};
this.borrow_mut().redirect = Some(pipeline.add_ref());
for ((weak_client, ops), waiter) in this.borrow_mut().clients_to_resolve.drain() {
if let Some(client) = weak_client.upgrade() {
let clienthook = pipeline.get_pipelined_cap_move(ops);
ClientInner::resolve(&client, Ok(clienthook));
}
let _ = waiter.send(());
}
this.borrow_mut().promise_to_drive = Promise::ok(()).shared();
}
}
pub struct PipelineInnerSender {
inner: Option<Weak<RefCell<PipelineInner>>>,
}
impl Drop for PipelineInnerSender {
fn drop(&mut self) {
if let Some(weak_queued) = self.inner.take() {
if let Some(pipeline_inner) = weak_queued.upgrade() {
PipelineInner::resolve(
&pipeline_inner,
Ok(Box::new(crate::broken::Pipeline::new(Error::failed(
"PipelineInnerSender was canceled".into(),
)))),
);
}
}
}
}
impl PipelineInnerSender {
pub fn complete(mut self, pipeline: Box<dyn PipelineHook>) {
if let Some(weak_queued) = self.inner.take() {
if let Some(pipeline_inner) = weak_queued.upgrade() {
crate::queued::PipelineInner::resolve(&pipeline_inner, Ok(pipeline));
}
}
}
}
pub struct Pipeline {
inner: Rc<RefCell<PipelineInner>>,
}
impl Pipeline {
pub fn new() -> (PipelineInnerSender, Self) {
let inner = Rc::new(RefCell::new(PipelineInner {
redirect: None,
promise_to_drive: Promise::ok(()).shared(),
clients_to_resolve: SenderQueue::new(),
}));
(
PipelineInnerSender {
inner: Some(Rc::downgrade(&inner)),
},
Self { inner },
)
}
pub fn drive<F>(&mut self, promise: F)
where
F: Future<Output = Result<(), Error>> + 'static + Unpin,
{
let new = Promise::from_future(
futures::future::try_join(self.inner.borrow_mut().promise_to_drive.clone(), promise)
.map_ok(|_| ()),
)
.shared();
self.inner.borrow_mut().promise_to_drive = new;
}
}
impl Clone for Pipeline {
fn clone(&self) -> Self {
Self {
inner: self.inner.clone(),
}
}
}
impl PipelineHook for Pipeline {
fn | (&self) -> Box<dyn PipelineHook> {
Box::new(self.clone())
}
fn get_pipelined_cap(&self, ops: &[PipelineOp]) -> Box<dyn ClientHook> {
self.get_pipelined_cap_move(ops.into())
}
fn get_pipelined_cap_move(&self, ops: Vec<PipelineOp>) -> Box<dyn ClientHook> {
if let Some(p) = &self.inner.borrow().redirect {
return p.get_pipelined_cap_move(ops);
}
let mut queued_client = Client::new(Some(self.inner.clone()));
queued_client.drive(self.inner.borrow().promise_to_drive.clone());
let weak_queued = Rc::downgrade(&queued_client.inner);
self.inner
.borrow_mut()
.clients_to_resolve
.push_detach((weak_queued, ops));
Box::new(queued_client)
}
}
pub struct ClientInner {
// Once the promise resolves, this will become non-null and point to the underlying object.
redirect: Option<Box<dyn ClientHook>>,
// The queued::PipelineInner that this client is derived from, if any. We need to hold on
// to a reference to it so that it doesn't get canceled before the client is resolved.
pipeline_inner: Option<Rc<RefCell<PipelineInner>>>,
promise_to_drive: Option<futures::future::Shared<Promise<(), Error>>>,
// When this promise resolves, each queued call will be forwarded to the real client. This needs
// to occur *before* any 'whenMoreResolved()' promises resolve, because we want to make sure
// previously-queued calls are delivered before any new calls made in response to the resolution.
call_forwarding_queue:
SenderQueue<(u64, u16, Box<dyn ParamsHook>, Box<dyn ResultsHook>), Promise<(), Error>>,
// whenMoreResolved() returns forks of this promise. These must resolve *after* queued calls
// have been initiated (so that any calls made in the whenMoreResolved() handler are correctly
// delivered after calls made earlier), but *before* any queued calls return (because it might
// confuse the application if a queued call returns before the capability on which it was made
// resolves). Luckily, we know that queued calls will involve, at the very least, an
// eventLoop.evalLater.
client_resolution_queue: SenderQueue<(), Box<dyn ClientHook>>,
}
impl ClientInner {
pub fn resolve(state: &Rc<RefCell<Self>>, result: Result<Box<dyn ClientHook>, Error>) {
assert!(state.borrow().redirect.is_none());
let client = match result {
Ok(clienthook) => clienthook,
Err(e) => broken::new_cap(e),
};
state.borrow_mut().redirect = Some(client.add_ref());
for (args, waiter) in state.borrow_mut().call_forwarding_queue.drain() {
let (interface_id, method_id, params, results) = args;
let result_promise = client.call(interface_id, method_id, params, results);
let _ = waiter.send(result_promise);
}
for ((), waiter) in state.borrow_mut().client_resolution_queue.drain() {
let _ = waiter.send(client.add_ref());
}
state.borrow_mut().promise_to_drive.take();
state.borrow_mut().pipeline_inner.take();
}
}
pub struct Client {
pub inner: Rc<RefCell<ClientInner>>,
}
impl Client {
pub fn new(pipeline_inner: Option<Rc<RefCell<PipelineInner>>>) -> Self {
let inner = Rc::new(RefCell::new(ClientInner {
promise_to_drive: None,
pipeline_inner,
redirect: None,
call_forwarding_queue: SenderQueue::new(),
client_resolution_queue: SenderQueue::new(),
}));
Self { inner }
}
pub fn drive<F>(&mut self, promise: F)
where
F: Future<Output = Result<(), Error>> + 'static + Unpin,
{
assert!(self.inner.borrow().promise_to_drive.is_none());
self.inner.borrow_mut().promise_to_drive = Some(Promise::from_future(promise).shared());
}
}
impl ClientHook for Client {
fn add_ref(&self) -> Box<dyn ClientHook> {
Box::new(Self {
inner: self.inner.clone(),
})
}
fn new_call(
&self,
interface_id: u64,
method_id: u16,
size_hint: Option<::capnp::MessageSize>,
) -> ::capnp::capability::Request<any_pointer::Owned, any_pointer::Owned> {
::capnp::capability::Request::new(Box::new(local::Request::new(
interface_id,
method_id,
size_hint,
self.add_ref(),
)))
}
fn call(
&self,
interface_id: u64,
method_id: u16,
params: Box<dyn ParamsHook>,
results: Box<dyn ResultsHook>,
) -> Promise<(), Error> {
if let Some(client) = &self.inner.borrow().redirect {
return client.call(interface_id, method_id, params, results);
}
let inner_clone = self.inner.clone();
let promise = self
.inner
.borrow_mut()
.call_forwarding_queue
.push((interface_id, method_id, params, results))
.attach(inner_clone)
.and_then(|x| x);
match self.inner.borrow().promise_to_drive {
Some(ref p) => {
Promise::from_future(futures::future::try_join(p.clone(), promise).map_ok(|v| v.1))
}
None => Promise::from_future(promise),
}
}
fn get_ptr(&self) -> usize {
(&*self.inner.borrow()) as *const _ as usize
}
fn get_brand(&self) -> usize {
0
}
fn get_resolved(&self) -> Option<Box<dyn ClientHook>> {
match &self.inner.borrow().redirect {
Some(inner) => Some(inner.clone()),
None => None,
}
}
fn when_more_resolved(&self) -> Option<Promise<Box<dyn ClientHook>, Error>> {
if let Some(client) = &self.inner.borrow().redirect {
return Some(Promise::ok(client.add_ref()));
}
let promise = self.inner.borrow_mut().client_resolution_queue.push(());
match &self.inner.borrow().promise_to_drive {
Some(p) => Some(Promise::from_future(
futures::future::try_join(p.clone(), promise).map_ok(|v| v.1),
)),
None => Some(Promise::from_future(promise)),
}
}
fn when_resolved(&self) -> Promise<(), Error> {
crate::rpc::default_when_resolved_impl(self)
}
}
| add_ref | identifier_name |
queued.rs | // Copyright (c) 2013-2016 Sandstorm Development Group, Inc. and contributors
// Licensed under the MIT License:
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
use capnp::any_pointer;
use capnp::capability::Promise;
use capnp::private::capability::{ClientHook, ParamsHook, PipelineHook, PipelineOp, ResultsHook};
use capnp::Error;
use futures::{Future, FutureExt, TryFutureExt};
use std::cell::RefCell;
use std::rc::{Rc, Weak};
use crate::attach::Attach;
use crate::sender_queue::SenderQueue;
use crate::{broken, local};
pub struct PipelineInner {
// Once the promise resolves, this will become non-null and point to the underlying object.
redirect: Option<Box<dyn PipelineHook>>,
promise_to_drive: futures::future::Shared<Promise<(), Error>>,
clients_to_resolve: SenderQueue<(Weak<RefCell<ClientInner>>, Vec<PipelineOp>), ()>,
}
impl PipelineInner {
fn resolve(this: &Rc<RefCell<Self>>, result: Result<Box<dyn PipelineHook>, Error>) {
assert!(this.borrow().redirect.is_none());
let pipeline = match result {
Ok(pipeline_hook) => pipeline_hook,
Err(e) => Box::new(broken::Pipeline::new(e)),
};
this.borrow_mut().redirect = Some(pipeline.add_ref());
for ((weak_client, ops), waiter) in this.borrow_mut().clients_to_resolve.drain() {
if let Some(client) = weak_client.upgrade() {
let clienthook = pipeline.get_pipelined_cap_move(ops);
ClientInner::resolve(&client, Ok(clienthook));
}
let _ = waiter.send(());
}
this.borrow_mut().promise_to_drive = Promise::ok(()).shared();
}
}
pub struct PipelineInnerSender {
inner: Option<Weak<RefCell<PipelineInner>>>,
}
impl Drop for PipelineInnerSender {
fn drop(&mut self) {
if let Some(weak_queued) = self.inner.take() {
if let Some(pipeline_inner) = weak_queued.upgrade() {
PipelineInner::resolve(
&pipeline_inner,
Ok(Box::new(crate::broken::Pipeline::new(Error::failed(
"PipelineInnerSender was canceled".into(),
)))),
);
}
}
}
}
impl PipelineInnerSender {
pub fn complete(mut self, pipeline: Box<dyn PipelineHook>) {
if let Some(weak_queued) = self.inner.take() {
if let Some(pipeline_inner) = weak_queued.upgrade() {
crate::queued::PipelineInner::resolve(&pipeline_inner, Ok(pipeline));
}
}
}
}
pub struct Pipeline {
inner: Rc<RefCell<PipelineInner>>,
}
impl Pipeline {
pub fn new() -> (PipelineInnerSender, Self) {
let inner = Rc::new(RefCell::new(PipelineInner {
redirect: None,
promise_to_drive: Promise::ok(()).shared(),
clients_to_resolve: SenderQueue::new(),
}));
(
PipelineInnerSender {
inner: Some(Rc::downgrade(&inner)),
},
Self { inner },
)
}
pub fn drive<F>(&mut self, promise: F)
where
F: Future<Output = Result<(), Error>> + 'static + Unpin,
{
let new = Promise::from_future(
futures::future::try_join(self.inner.borrow_mut().promise_to_drive.clone(), promise)
.map_ok(|_| ()),
)
.shared();
self.inner.borrow_mut().promise_to_drive = new;
}
}
impl Clone for Pipeline {
fn clone(&self) -> Self {
Self {
inner: self.inner.clone(),
}
}
}
impl PipelineHook for Pipeline {
fn add_ref(&self) -> Box<dyn PipelineHook> {
Box::new(self.clone())
}
fn get_pipelined_cap(&self, ops: &[PipelineOp]) -> Box<dyn ClientHook> {
self.get_pipelined_cap_move(ops.into())
}
fn get_pipelined_cap_move(&self, ops: Vec<PipelineOp>) -> Box<dyn ClientHook> {
if let Some(p) = &self.inner.borrow().redirect {
return p.get_pipelined_cap_move(ops);
}
let mut queued_client = Client::new(Some(self.inner.clone()));
queued_client.drive(self.inner.borrow().promise_to_drive.clone());
let weak_queued = Rc::downgrade(&queued_client.inner);
self.inner
.borrow_mut()
.clients_to_resolve
.push_detach((weak_queued, ops));
Box::new(queued_client)
}
}
pub struct ClientInner {
// Once the promise resolves, this will become non-null and point to the underlying object.
redirect: Option<Box<dyn ClientHook>>,
// The queued::PipelineInner that this client is derived from, if any. We need to hold on
// to a reference to it so that it doesn't get canceled before the client is resolved.
pipeline_inner: Option<Rc<RefCell<PipelineInner>>>,
promise_to_drive: Option<futures::future::Shared<Promise<(), Error>>>,
// When this promise resolves, each queued call will be forwarded to the real client. This needs
// to occur *before* any 'whenMoreResolved()' promises resolve, because we want to make sure
// previously-queued calls are delivered before any new calls made in response to the resolution.
call_forwarding_queue:
SenderQueue<(u64, u16, Box<dyn ParamsHook>, Box<dyn ResultsHook>), Promise<(), Error>>,
// whenMoreResolved() returns forks of this promise. These must resolve *after* queued calls
// have been initiated (so that any calls made in the whenMoreResolved() handler are correctly | client_resolution_queue: SenderQueue<(), Box<dyn ClientHook>>,
}
impl ClientInner {
pub fn resolve(state: &Rc<RefCell<Self>>, result: Result<Box<dyn ClientHook>, Error>) {
assert!(state.borrow().redirect.is_none());
let client = match result {
Ok(clienthook) => clienthook,
Err(e) => broken::new_cap(e),
};
state.borrow_mut().redirect = Some(client.add_ref());
for (args, waiter) in state.borrow_mut().call_forwarding_queue.drain() {
let (interface_id, method_id, params, results) = args;
let result_promise = client.call(interface_id, method_id, params, results);
let _ = waiter.send(result_promise);
}
for ((), waiter) in state.borrow_mut().client_resolution_queue.drain() {
let _ = waiter.send(client.add_ref());
}
state.borrow_mut().promise_to_drive.take();
state.borrow_mut().pipeline_inner.take();
}
}
pub struct Client {
pub inner: Rc<RefCell<ClientInner>>,
}
impl Client {
pub fn new(pipeline_inner: Option<Rc<RefCell<PipelineInner>>>) -> Self {
let inner = Rc::new(RefCell::new(ClientInner {
promise_to_drive: None,
pipeline_inner,
redirect: None,
call_forwarding_queue: SenderQueue::new(),
client_resolution_queue: SenderQueue::new(),
}));
Self { inner }
}
pub fn drive<F>(&mut self, promise: F)
where
F: Future<Output = Result<(), Error>> + 'static + Unpin,
{
assert!(self.inner.borrow().promise_to_drive.is_none());
self.inner.borrow_mut().promise_to_drive = Some(Promise::from_future(promise).shared());
}
}
impl ClientHook for Client {
fn add_ref(&self) -> Box<dyn ClientHook> {
Box::new(Self {
inner: self.inner.clone(),
})
}
fn new_call(
&self,
interface_id: u64,
method_id: u16,
size_hint: Option<::capnp::MessageSize>,
) -> ::capnp::capability::Request<any_pointer::Owned, any_pointer::Owned> {
::capnp::capability::Request::new(Box::new(local::Request::new(
interface_id,
method_id,
size_hint,
self.add_ref(),
)))
}
fn call(
&self,
interface_id: u64,
method_id: u16,
params: Box<dyn ParamsHook>,
results: Box<dyn ResultsHook>,
) -> Promise<(), Error> {
if let Some(client) = &self.inner.borrow().redirect {
return client.call(interface_id, method_id, params, results);
}
let inner_clone = self.inner.clone();
let promise = self
.inner
.borrow_mut()
.call_forwarding_queue
.push((interface_id, method_id, params, results))
.attach(inner_clone)
.and_then(|x| x);
match self.inner.borrow().promise_to_drive {
Some(ref p) => {
Promise::from_future(futures::future::try_join(p.clone(), promise).map_ok(|v| v.1))
}
None => Promise::from_future(promise),
}
}
fn get_ptr(&self) -> usize {
(&*self.inner.borrow()) as *const _ as usize
}
fn get_brand(&self) -> usize {
0
}
fn get_resolved(&self) -> Option<Box<dyn ClientHook>> {
match &self.inner.borrow().redirect {
Some(inner) => Some(inner.clone()),
None => None,
}
}
fn when_more_resolved(&self) -> Option<Promise<Box<dyn ClientHook>, Error>> {
if let Some(client) = &self.inner.borrow().redirect {
return Some(Promise::ok(client.add_ref()));
}
let promise = self.inner.borrow_mut().client_resolution_queue.push(());
match &self.inner.borrow().promise_to_drive {
Some(p) => Some(Promise::from_future(
futures::future::try_join(p.clone(), promise).map_ok(|v| v.1),
)),
None => Some(Promise::from_future(promise)),
}
}
fn when_resolved(&self) -> Promise<(), Error> {
crate::rpc::default_when_resolved_impl(self)
}
} | // delivered after calls made earlier), but *before* any queued calls return (because it might
// confuse the application if a queued call returns before the capability on which it was made
// resolves). Luckily, we know that queued calls will involve, at the very least, an
// eventLoop.evalLater. | random_line_split |
framework.rs | use std::future::Future;
#[cfg(target_arch = "wasm32")]
use std::str::FromStr;
#[cfg(not(target_arch = "wasm32"))]
use std::time::Instant;
#[cfg(target_arch = "wasm32")]
use web_sys::{ImageBitmapRenderingContext, OffscreenCanvas};
use winit::{
event::{self, WindowEvent},
event_loop::{ControlFlow, EventLoop},
};
#[allow(dead_code)]
pub fn cast_slice<T>(data: &[T]) -> &[u8] {
use std::{mem::size_of_val, slice::from_raw_parts};
unsafe { from_raw_parts(data.as_ptr() as *const u8, size_of_val(data)) }
}
#[allow(dead_code)]
pub enum ShaderStage {
Vertex,
Fragment,
Compute,
}
pub trait Example: 'static + Sized {
fn optional_features() -> wgpu::Features {
wgpu::Features::empty()
}
fn required_features() -> wgpu::Features {
wgpu::Features::empty()
}
fn required_downlevel_capabilities() -> wgpu::DownlevelCapabilities {
wgpu::DownlevelCapabilities {
flags: wgpu::DownlevelFlags::empty(),
shader_model: wgpu::ShaderModel::Sm5,
..wgpu::DownlevelCapabilities::default()
}
}
fn required_limits() -> wgpu::Limits {
wgpu::Limits::downlevel_webgl2_defaults() // These downlevel limits will allow the code to run on all possible hardware
}
fn init(
config: &wgpu::SurfaceConfiguration,
adapter: &wgpu::Adapter,
device: &wgpu::Device,
queue: &wgpu::Queue,
) -> Self;
fn resize(
&mut self,
config: &wgpu::SurfaceConfiguration,
device: &wgpu::Device,
queue: &wgpu::Queue,
);
fn update(&mut self, event: WindowEvent);
fn render(
&mut self,
view: &wgpu::TextureView,
device: &wgpu::Device,
queue: &wgpu::Queue,
spawner: &Spawner,
);
}
struct Setup {
window: winit::window::Window,
event_loop: EventLoop<()>,
instance: wgpu::Instance,
size: winit::dpi::PhysicalSize<u32>,
surface: wgpu::Surface,
adapter: wgpu::Adapter,
device: wgpu::Device,
queue: wgpu::Queue,
#[cfg(target_arch = "wasm32")]
offscreen_canvas_setup: Option<OffscreenCanvasSetup>,
}
#[cfg(target_arch = "wasm32")]
struct OffscreenCanvasSetup {
offscreen_canvas: OffscreenCanvas,
bitmap_renderer: ImageBitmapRenderingContext,
}
async fn setup<E: Example>(title: &str) -> Setup {
#[cfg(not(target_arch = "wasm32"))]
{
env_logger::init();
};
let event_loop = EventLoop::new();
let mut builder = winit::window::WindowBuilder::new();
builder = builder.with_title(title);
#[cfg(windows_OFF)] // TODO
{
use winit::platform::windows::WindowBuilderExtWindows;
builder = builder.with_no_redirection_bitmap(true);
}
let window = builder.build(&event_loop).unwrap();
#[cfg(target_arch = "wasm32")]
{
use winit::platform::web::WindowExtWebSys;
let query_string = web_sys::window().unwrap().location().search().unwrap();
let level: log::Level = parse_url_query_string(&query_string, "RUST_LOG")
.and_then(|x| x.parse().ok())
.unwrap_or(log::Level::Error);
console_log::init_with_level(level).expect("could not initialize logger");
std::panic::set_hook(Box::new(console_error_panic_hook::hook));
// On wasm, append the canvas to the document body
web_sys::window()
.and_then(|win| win.document())
.and_then(|doc| doc.body())
.and_then(|body| {
body.append_child(&web_sys::Element::from(window.canvas()))
.ok()
})
.expect("couldn't append canvas to document body");
}
#[cfg(target_arch = "wasm32")]
let mut offscreen_canvas_setup: Option<OffscreenCanvasSetup> = None;
#[cfg(target_arch = "wasm32")]
{
use wasm_bindgen::JsCast;
use winit::platform::web::WindowExtWebSys;
let query_string = web_sys::window().unwrap().location().search().unwrap();
if let Some(offscreen_canvas_param) =
parse_url_query_string(&query_string, "offscreen_canvas")
{
if FromStr::from_str(offscreen_canvas_param) == Ok(true) {
log::info!("Creating OffscreenCanvasSetup");
let offscreen_canvas =
OffscreenCanvas::new(1024, 768).expect("couldn't create OffscreenCanvas");
let bitmap_renderer = window
.canvas()
.get_context("bitmaprenderer")
.expect("couldn't create ImageBitmapRenderingContext (Result)")
.expect("couldn't create ImageBitmapRenderingContext (Option)")
.dyn_into::<ImageBitmapRenderingContext>()
.expect("couldn't convert into ImageBitmapRenderingContext");
offscreen_canvas_setup = Some(OffscreenCanvasSetup {
offscreen_canvas,
bitmap_renderer,
})
}
}
};
log::info!("Initializing the surface...");
let backends = wgpu::util::backend_bits_from_env().unwrap_or_else(wgpu::Backends::all);
let dx12_shader_compiler = wgpu::util::dx12_shader_compiler_from_env().unwrap_or_default();
let gles_minor_version = wgpu::util::gles_minor_version_from_env().unwrap_or_default();
let instance = wgpu::Instance::new(wgpu::InstanceDescriptor {
backends,
dx12_shader_compiler,
gles_minor_version,
});
let (size, surface) = unsafe {
let size = window.inner_size();
#[cfg(any(not(target_arch = "wasm32"), target_os = "emscripten"))]
let surface = instance.create_surface(&window).unwrap();
#[cfg(all(target_arch = "wasm32", not(target_os = "emscripten")))]
let surface = {
if let Some(offscreen_canvas_setup) = &offscreen_canvas_setup {
log::info!("Creating surface from OffscreenCanvas");
instance.create_surface_from_offscreen_canvas(
offscreen_canvas_setup.offscreen_canvas.clone(),
)
} else {
instance.create_surface(&window)
}
}
.unwrap();
(size, surface)
};
let adapter = wgpu::util::initialize_adapter_from_env_or_default(&instance, Some(&surface))
.await
.expect("No suitable GPU adapters found on the system!");
#[cfg(not(target_arch = "wasm32"))]
{
let adapter_info = adapter.get_info();
println!("Using {} ({:?})", adapter_info.name, adapter_info.backend);
}
let optional_features = E::optional_features();
let required_features = E::required_features();
let adapter_features = adapter.features();
assert!(
adapter_features.contains(required_features),
"Adapter does not support required features for this example: {:?}",
required_features - adapter_features
);
let required_downlevel_capabilities = E::required_downlevel_capabilities();
let downlevel_capabilities = adapter.get_downlevel_capabilities();
assert!(
downlevel_capabilities.shader_model >= required_downlevel_capabilities.shader_model,
"Adapter does not support the minimum shader model required to run this example: {:?}",
required_downlevel_capabilities.shader_model
);
assert!(
downlevel_capabilities
.flags
.contains(required_downlevel_capabilities.flags),
"Adapter does not support the downlevel capabilities required to run this example: {:?}",
required_downlevel_capabilities.flags - downlevel_capabilities.flags
);
// Make sure we use the texture resolution limits from the adapter, so we can support images the size of the surface.
let needed_limits = E::required_limits().using_resolution(adapter.limits());
let trace_dir = std::env::var("WGPU_TRACE");
let (device, queue) = adapter
.request_device(
&wgpu::DeviceDescriptor {
label: None,
features: (optional_features & adapter_features) | required_features,
limits: needed_limits,
},
trace_dir.ok().as_ref().map(std::path::Path::new),
)
.await
.expect("Unable to find a suitable GPU adapter!");
Setup {
window,
event_loop,
instance,
size,
surface,
adapter,
device,
queue,
#[cfg(target_arch = "wasm32")]
offscreen_canvas_setup,
}
}
fn start<E: Example>(
#[cfg(not(target_arch = "wasm32"))] Setup {
window,
event_loop,
instance,
size,
surface,
adapter,
device,
queue,
}: Setup,
#[cfg(target_arch = "wasm32")] Setup {
window,
event_loop,
instance,
size,
surface,
adapter,
device,
queue,
offscreen_canvas_setup,
}: Setup,
) {
let spawner = Spawner::new();
let mut config = surface
.get_default_config(&adapter, size.width, size.height)
.expect("Surface isn't supported by the adapter.");
let surface_view_format = config.format.add_srgb_suffix();
config.view_formats.push(surface_view_format);
surface.configure(&device, &config);
log::info!("Initializing the example...");
let mut example = E::init(&config, &adapter, &device, &queue);
#[cfg(not(target_arch = "wasm32"))]
let mut last_frame_inst = Instant::now();
#[cfg(not(target_arch = "wasm32"))]
let (mut frame_count, mut accum_time) = (0, 0.0);
log::info!("Entering render loop...");
event_loop.run(move |event, _, control_flow| {
let _ = (&instance, &adapter); // force ownership by the closure
*control_flow = if cfg!(feature = "metal-auto-capture") {
ControlFlow::Exit
} else {
ControlFlow::Poll
};
match event {
event::Event::RedrawEventsCleared => {
#[cfg(not(target_arch = "wasm32"))]
spawner.run_until_stalled();
window.request_redraw();
}
event::Event::WindowEvent {
event:
WindowEvent::Resized(size)
| WindowEvent::ScaleFactorChanged {
new_inner_size: &mut size,
..
},
..
} => {
// Once winit is fixed, the detection conditions here can be removed.
// https://github.com/rust-windowing/winit/issues/2876
let max_dimension = adapter.limits().max_texture_dimension_2d;
if size.width > max_dimension || size.height > max_dimension {
log::warn!(
"The resizing size {:?} exceeds the limit of {}.",
size,
max_dimension
);
} else {
log::info!("Resizing to {:?}", size);
config.width = size.width.max(1);
config.height = size.height.max(1);
example.resize(&config, &device, &queue);
surface.configure(&device, &config);
}
}
event::Event::WindowEvent { event, .. } => match event {
WindowEvent::KeyboardInput {
input:
event::KeyboardInput {
virtual_keycode: Some(event::VirtualKeyCode::Escape),
state: event::ElementState::Pressed,
..
},
..
}
| WindowEvent::CloseRequested => {
*control_flow = ControlFlow::Exit;
}
#[cfg(not(target_arch = "wasm32"))]
WindowEvent::KeyboardInput {
input:
event::KeyboardInput {
virtual_keycode: Some(event::VirtualKeyCode::R),
state: event::ElementState::Pressed,
..
},
..
} => {
println!("{:#?}", instance.generate_report());
}
_ => {
example.update(event);
}
},
event::Event::RedrawRequested(_) => {
#[cfg(not(target_arch = "wasm32"))]
{
accum_time += last_frame_inst.elapsed().as_secs_f32();
last_frame_inst = Instant::now();
frame_count += 1;
if frame_count == 100 {
println!(
"Avg frame time {}ms",
accum_time * 1000.0 / frame_count as f32
);
accum_time = 0.0;
frame_count = 0;
}
}
let frame = match surface.get_current_texture() {
Ok(frame) => frame,
Err(_) => {
surface.configure(&device, &config);
surface
.get_current_texture()
.expect("Failed to acquire next surface texture!")
}
};
let view = frame.texture.create_view(&wgpu::TextureViewDescriptor {
format: Some(surface_view_format),
..wgpu::TextureViewDescriptor::default()
});
example.render(&view, &device, &queue, &spawner);
frame.present();
#[cfg(target_arch = "wasm32")]
{
if let Some(offscreen_canvas_setup) = &offscreen_canvas_setup {
let image_bitmap = offscreen_canvas_setup
.offscreen_canvas
.transfer_to_image_bitmap()
.expect("couldn't transfer offscreen canvas to image bitmap.");
offscreen_canvas_setup
.bitmap_renderer
.transfer_from_image_bitmap(&image_bitmap);
log::info!("Transferring OffscreenCanvas to ImageBitmapRenderer");
}
}
}
_ => {}
}
});
}
#[cfg(not(target_arch = "wasm32"))]
pub struct Spawner<'a> {
executor: async_executor::LocalExecutor<'a>,
}
#[cfg(not(target_arch = "wasm32"))]
impl<'a> Spawner<'a> {
fn new() -> Self {
Self {
executor: async_executor::LocalExecutor::new(),
}
}
#[allow(dead_code)]
pub fn spawn_local(&self, future: impl Future<Output = ()> + 'a) {
self.executor.spawn(future).detach();
}
|
#[cfg(target_arch = "wasm32")]
pub struct Spawner {}
#[cfg(target_arch = "wasm32")]
impl Spawner {
fn new() -> Self {
Self {}
}
#[allow(dead_code)]
pub fn spawn_local(&self, future: impl Future<Output = ()> + 'static) {
wasm_bindgen_futures::spawn_local(future);
}
}
#[cfg(not(target_arch = "wasm32"))]
pub fn run<E: Example>(title: &str) {
let setup = pollster::block_on(setup::<E>(title));
start::<E>(setup);
}
#[cfg(target_arch = "wasm32")]
pub fn run<E: Example>(title: &str) {
use wasm_bindgen::prelude::*;
let title = title.to_owned();
wasm_bindgen_futures::spawn_local(async move {
let setup = setup::<E>(&title).await;
let start_closure = Closure::once_into_js(move || start::<E>(setup));
// make sure to handle JS exceptions thrown inside start.
// Otherwise wasm_bindgen_futures Queue would break and never handle any tasks again.
// This is required, because winit uses JS exception for control flow to escape from `run`.
if let Err(error) = call_catch(&start_closure) {
let is_control_flow_exception = error.dyn_ref::<js_sys::Error>().map_or(false, |e| {
e.message().includes("Using exceptions for control flow", 0)
});
if !is_control_flow_exception {
web_sys::console::error_1(&error);
}
}
#[wasm_bindgen]
extern "C" {
#[wasm_bindgen(catch, js_namespace = Function, js_name = "prototype.call.call")]
fn call_catch(this: &JsValue) -> Result<(), JsValue>;
}
});
}
#[cfg(target_arch = "wasm32")]
/// Parse the query string as returned by `web_sys::window()?.location().search()?` and get a
/// specific key out of it.
pub fn parse_url_query_string<'a>(query: &'a str, search_key: &str) -> Option<&'a str> {
let query_string = query.strip_prefix('?')?;
for pair in query_string.split('&') {
let mut pair = pair.split('=');
let key = pair.next()?;
let value = pair.next()?;
if key == search_key {
return Some(value);
}
}
None
}
pub use wgpu_test::image::ComparisonType;
pub struct FrameworkRefTest {
// Path to the reference image, relative to the root of the repo.
pub image_path: &'static str,
pub width: u32,
pub height: u32,
pub optional_features: wgpu::Features,
pub base_test_parameters: wgpu_test::TestParameters,
/// Comparisons against FLIP statistics that determine if the test passes or fails.
pub comparisons: &'static [ComparisonType],
}
#[allow(dead_code)]
pub fn test<E: Example>(mut params: FrameworkRefTest) {
use std::mem;
assert_eq!(params.width % 64, 0, "width needs to be aligned 64");
let features = E::required_features() | params.optional_features;
wgpu_test::initialize_test(
mem::take(&mut params.base_test_parameters).features(features),
|ctx| {
let spawner = Spawner::new();
let dst_texture = ctx.device.create_texture(&wgpu::TextureDescriptor {
label: Some("destination"),
size: wgpu::Extent3d {
width: params.width,
height: params.height,
depth_or_array_layers: 1,
},
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Rgba8UnormSrgb,
usage: wgpu::TextureUsages::RENDER_ATTACHMENT | wgpu::TextureUsages::COPY_SRC,
view_formats: &[],
});
let dst_view = dst_texture.create_view(&wgpu::TextureViewDescriptor::default());
let dst_buffer = ctx.device.create_buffer(&wgpu::BufferDescriptor {
label: Some("image map buffer"),
size: params.width as u64 * params.height as u64 * 4,
usage: wgpu::BufferUsages::COPY_DST | wgpu::BufferUsages::MAP_READ,
mapped_at_creation: false,
});
let mut example = E::init(
&wgpu::SurfaceConfiguration {
usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
format: wgpu::TextureFormat::Rgba8UnormSrgb,
width: params.width,
height: params.height,
present_mode: wgpu::PresentMode::Fifo,
alpha_mode: wgpu::CompositeAlphaMode::Auto,
view_formats: vec![wgpu::TextureFormat::Rgba8UnormSrgb],
},
&ctx.adapter,
&ctx.device,
&ctx.queue,
);
example.render(&dst_view, &ctx.device, &ctx.queue, &spawner);
// Handle specific case for bunnymark
#[allow(deprecated)]
if params.image_path == "/examples/bunnymark/screenshot.png" {
// Press spacebar to spawn bunnies
example.update(winit::event::WindowEvent::KeyboardInput {
input: winit::event::KeyboardInput {
scancode: 0,
state: winit::event::ElementState::Pressed,
virtual_keycode: Some(winit::event::VirtualKeyCode::Space),
modifiers: winit::event::ModifiersState::empty(),
},
device_id: unsafe { winit::event::DeviceId::dummy() },
is_synthetic: false,
});
// Step 3 extra frames
for _ in 0..3 {
example.render(&dst_view, &ctx.device, &ctx.queue, &spawner);
}
}
let mut cmd_buf = ctx
.device
.create_command_encoder(&wgpu::CommandEncoderDescriptor::default());
cmd_buf.copy_texture_to_buffer(
wgpu::ImageCopyTexture {
texture: &dst_texture,
mip_level: 0,
origin: wgpu::Origin3d::ZERO,
aspect: wgpu::TextureAspect::All,
},
wgpu::ImageCopyBuffer {
buffer: &dst_buffer,
layout: wgpu::ImageDataLayout {
offset: 0,
bytes_per_row: Some(params.width * 4),
rows_per_image: None,
},
},
wgpu::Extent3d {
width: params.width,
height: params.height,
depth_or_array_layers: 1,
},
);
ctx.queue.submit(Some(cmd_buf.finish()));
let dst_buffer_slice = dst_buffer.slice(..);
dst_buffer_slice.map_async(wgpu::MapMode::Read, |_| ());
ctx.device.poll(wgpu::Maintain::Wait);
let bytes = dst_buffer_slice.get_mapped_range().to_vec();
wgpu_test::image::compare_image_output(
env!("CARGO_MANIFEST_DIR").to_string() + "/../../" + params.image_path,
&ctx.adapter_info,
params.width,
params.height,
&bytes,
params.comparisons,
);
},
);
}
// This allows treating the framework as a standalone example,
// thus avoiding listing the example names in `Cargo.toml`.
#[allow(dead_code)]
fn main() {} | fn run_until_stalled(&self) {
while self.executor.try_tick() {}
}
} | random_line_split |
framework.rs | use std::future::Future;
#[cfg(target_arch = "wasm32")]
use std::str::FromStr;
#[cfg(not(target_arch = "wasm32"))]
use std::time::Instant;
#[cfg(target_arch = "wasm32")]
use web_sys::{ImageBitmapRenderingContext, OffscreenCanvas};
use winit::{
event::{self, WindowEvent},
event_loop::{ControlFlow, EventLoop},
};
#[allow(dead_code)]
pub fn cast_slice<T>(data: &[T]) -> &[u8] {
use std::{mem::size_of_val, slice::from_raw_parts};
unsafe { from_raw_parts(data.as_ptr() as *const u8, size_of_val(data)) }
}
#[allow(dead_code)]
pub enum ShaderStage {
Vertex,
Fragment,
Compute,
}
pub trait Example: 'static + Sized {
fn optional_features() -> wgpu::Features {
wgpu::Features::empty()
}
fn required_features() -> wgpu::Features {
wgpu::Features::empty()
}
fn required_downlevel_capabilities() -> wgpu::DownlevelCapabilities {
wgpu::DownlevelCapabilities {
flags: wgpu::DownlevelFlags::empty(),
shader_model: wgpu::ShaderModel::Sm5,
..wgpu::DownlevelCapabilities::default()
}
}
fn required_limits() -> wgpu::Limits {
wgpu::Limits::downlevel_webgl2_defaults() // These downlevel limits will allow the code to run on all possible hardware
}
fn init(
config: &wgpu::SurfaceConfiguration,
adapter: &wgpu::Adapter,
device: &wgpu::Device,
queue: &wgpu::Queue,
) -> Self;
fn resize(
&mut self,
config: &wgpu::SurfaceConfiguration,
device: &wgpu::Device,
queue: &wgpu::Queue,
);
fn update(&mut self, event: WindowEvent);
fn render(
&mut self,
view: &wgpu::TextureView,
device: &wgpu::Device,
queue: &wgpu::Queue,
spawner: &Spawner,
);
}
struct Setup {
window: winit::window::Window,
event_loop: EventLoop<()>,
instance: wgpu::Instance,
size: winit::dpi::PhysicalSize<u32>,
surface: wgpu::Surface,
adapter: wgpu::Adapter,
device: wgpu::Device,
queue: wgpu::Queue,
#[cfg(target_arch = "wasm32")]
offscreen_canvas_setup: Option<OffscreenCanvasSetup>,
}
#[cfg(target_arch = "wasm32")]
struct OffscreenCanvasSetup {
offscreen_canvas: OffscreenCanvas,
bitmap_renderer: ImageBitmapRenderingContext,
}
async fn setup<E: Example>(title: &str) -> Setup |
fn start<E: Example>(
#[cfg(not(target_arch = "wasm32"))] Setup {
window,
event_loop,
instance,
size,
surface,
adapter,
device,
queue,
}: Setup,
#[cfg(target_arch = "wasm32")] Setup {
window,
event_loop,
instance,
size,
surface,
adapter,
device,
queue,
offscreen_canvas_setup,
}: Setup,
) {
let spawner = Spawner::new();
let mut config = surface
.get_default_config(&adapter, size.width, size.height)
.expect("Surface isn't supported by the adapter.");
let surface_view_format = config.format.add_srgb_suffix();
config.view_formats.push(surface_view_format);
surface.configure(&device, &config);
log::info!("Initializing the example...");
let mut example = E::init(&config, &adapter, &device, &queue);
#[cfg(not(target_arch = "wasm32"))]
let mut last_frame_inst = Instant::now();
#[cfg(not(target_arch = "wasm32"))]
let (mut frame_count, mut accum_time) = (0, 0.0);
log::info!("Entering render loop...");
event_loop.run(move |event, _, control_flow| {
let _ = (&instance, &adapter); // force ownership by the closure
*control_flow = if cfg!(feature = "metal-auto-capture") {
ControlFlow::Exit
} else {
ControlFlow::Poll
};
match event {
event::Event::RedrawEventsCleared => {
#[cfg(not(target_arch = "wasm32"))]
spawner.run_until_stalled();
window.request_redraw();
}
event::Event::WindowEvent {
event:
WindowEvent::Resized(size)
| WindowEvent::ScaleFactorChanged {
new_inner_size: &mut size,
..
},
..
} => {
// Once winit is fixed, the detection conditions here can be removed.
// https://github.com/rust-windowing/winit/issues/2876
let max_dimension = adapter.limits().max_texture_dimension_2d;
if size.width > max_dimension || size.height > max_dimension {
log::warn!(
"The resizing size {:?} exceeds the limit of {}.",
size,
max_dimension
);
} else {
log::info!("Resizing to {:?}", size);
config.width = size.width.max(1);
config.height = size.height.max(1);
example.resize(&config, &device, &queue);
surface.configure(&device, &config);
}
}
event::Event::WindowEvent { event, .. } => match event {
WindowEvent::KeyboardInput {
input:
event::KeyboardInput {
virtual_keycode: Some(event::VirtualKeyCode::Escape),
state: event::ElementState::Pressed,
..
},
..
}
| WindowEvent::CloseRequested => {
*control_flow = ControlFlow::Exit;
}
#[cfg(not(target_arch = "wasm32"))]
WindowEvent::KeyboardInput {
input:
event::KeyboardInput {
virtual_keycode: Some(event::VirtualKeyCode::R),
state: event::ElementState::Pressed,
..
},
..
} => {
println!("{:#?}", instance.generate_report());
}
_ => {
example.update(event);
}
},
event::Event::RedrawRequested(_) => {
#[cfg(not(target_arch = "wasm32"))]
{
accum_time += last_frame_inst.elapsed().as_secs_f32();
last_frame_inst = Instant::now();
frame_count += 1;
if frame_count == 100 {
println!(
"Avg frame time {}ms",
accum_time * 1000.0 / frame_count as f32
);
accum_time = 0.0;
frame_count = 0;
}
}
let frame = match surface.get_current_texture() {
Ok(frame) => frame,
Err(_) => {
surface.configure(&device, &config);
surface
.get_current_texture()
.expect("Failed to acquire next surface texture!")
}
};
let view = frame.texture.create_view(&wgpu::TextureViewDescriptor {
format: Some(surface_view_format),
..wgpu::TextureViewDescriptor::default()
});
example.render(&view, &device, &queue, &spawner);
frame.present();
#[cfg(target_arch = "wasm32")]
{
if let Some(offscreen_canvas_setup) = &offscreen_canvas_setup {
let image_bitmap = offscreen_canvas_setup
.offscreen_canvas
.transfer_to_image_bitmap()
.expect("couldn't transfer offscreen canvas to image bitmap.");
offscreen_canvas_setup
.bitmap_renderer
.transfer_from_image_bitmap(&image_bitmap);
log::info!("Transferring OffscreenCanvas to ImageBitmapRenderer");
}
}
}
_ => {}
}
});
}
#[cfg(not(target_arch = "wasm32"))]
pub struct Spawner<'a> {
executor: async_executor::LocalExecutor<'a>,
}
#[cfg(not(target_arch = "wasm32"))]
impl<'a> Spawner<'a> {
fn new() -> Self {
Self {
executor: async_executor::LocalExecutor::new(),
}
}
#[allow(dead_code)]
pub fn spawn_local(&self, future: impl Future<Output = ()> + 'a) {
self.executor.spawn(future).detach();
}
fn run_until_stalled(&self) {
while self.executor.try_tick() {}
}
}
#[cfg(target_arch = "wasm32")]
pub struct Spawner {}
#[cfg(target_arch = "wasm32")]
impl Spawner {
fn new() -> Self {
Self {}
}
#[allow(dead_code)]
pub fn spawn_local(&self, future: impl Future<Output = ()> + 'static) {
wasm_bindgen_futures::spawn_local(future);
}
}
#[cfg(not(target_arch = "wasm32"))]
pub fn run<E: Example>(title: &str) {
let setup = pollster::block_on(setup::<E>(title));
start::<E>(setup);
}
#[cfg(target_arch = "wasm32")]
pub fn run<E: Example>(title: &str) {
use wasm_bindgen::prelude::*;
let title = title.to_owned();
wasm_bindgen_futures::spawn_local(async move {
let setup = setup::<E>(&title).await;
let start_closure = Closure::once_into_js(move || start::<E>(setup));
// make sure to handle JS exceptions thrown inside start.
// Otherwise wasm_bindgen_futures Queue would break and never handle any tasks again.
// This is required, because winit uses JS exception for control flow to escape from `run`.
if let Err(error) = call_catch(&start_closure) {
let is_control_flow_exception = error.dyn_ref::<js_sys::Error>().map_or(false, |e| {
e.message().includes("Using exceptions for control flow", 0)
});
if !is_control_flow_exception {
web_sys::console::error_1(&error);
}
}
#[wasm_bindgen]
extern "C" {
#[wasm_bindgen(catch, js_namespace = Function, js_name = "prototype.call.call")]
fn call_catch(this: &JsValue) -> Result<(), JsValue>;
}
});
}
#[cfg(target_arch = "wasm32")]
/// Parse the query string as returned by `web_sys::window()?.location().search()?` and get a
/// specific key out of it.
pub fn parse_url_query_string<'a>(query: &'a str, search_key: &str) -> Option<&'a str> {
let query_string = query.strip_prefix('?')?;
for pair in query_string.split('&') {
let mut pair = pair.split('=');
let key = pair.next()?;
let value = pair.next()?;
if key == search_key {
return Some(value);
}
}
None
}
pub use wgpu_test::image::ComparisonType;
pub struct FrameworkRefTest {
// Path to the reference image, relative to the root of the repo.
pub image_path: &'static str,
pub width: u32,
pub height: u32,
pub optional_features: wgpu::Features,
pub base_test_parameters: wgpu_test::TestParameters,
/// Comparisons against FLIP statistics that determine if the test passes or fails.
pub comparisons: &'static [ComparisonType],
}
#[allow(dead_code)]
pub fn test<E: Example>(mut params: FrameworkRefTest) {
use std::mem;
assert_eq!(params.width % 64, 0, "width needs to be aligned 64");
let features = E::required_features() | params.optional_features;
wgpu_test::initialize_test(
mem::take(&mut params.base_test_parameters).features(features),
|ctx| {
let spawner = Spawner::new();
let dst_texture = ctx.device.create_texture(&wgpu::TextureDescriptor {
label: Some("destination"),
size: wgpu::Extent3d {
width: params.width,
height: params.height,
depth_or_array_layers: 1,
},
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Rgba8UnormSrgb,
usage: wgpu::TextureUsages::RENDER_ATTACHMENT | wgpu::TextureUsages::COPY_SRC,
view_formats: &[],
});
let dst_view = dst_texture.create_view(&wgpu::TextureViewDescriptor::default());
let dst_buffer = ctx.device.create_buffer(&wgpu::BufferDescriptor {
label: Some("image map buffer"),
size: params.width as u64 * params.height as u64 * 4,
usage: wgpu::BufferUsages::COPY_DST | wgpu::BufferUsages::MAP_READ,
mapped_at_creation: false,
});
let mut example = E::init(
&wgpu::SurfaceConfiguration {
usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
format: wgpu::TextureFormat::Rgba8UnormSrgb,
width: params.width,
height: params.height,
present_mode: wgpu::PresentMode::Fifo,
alpha_mode: wgpu::CompositeAlphaMode::Auto,
view_formats: vec![wgpu::TextureFormat::Rgba8UnormSrgb],
},
&ctx.adapter,
&ctx.device,
&ctx.queue,
);
example.render(&dst_view, &ctx.device, &ctx.queue, &spawner);
// Handle specific case for bunnymark
#[allow(deprecated)]
if params.image_path == "/examples/bunnymark/screenshot.png" {
// Press spacebar to spawn bunnies
example.update(winit::event::WindowEvent::KeyboardInput {
input: winit::event::KeyboardInput {
scancode: 0,
state: winit::event::ElementState::Pressed,
virtual_keycode: Some(winit::event::VirtualKeyCode::Space),
modifiers: winit::event::ModifiersState::empty(),
},
device_id: unsafe { winit::event::DeviceId::dummy() },
is_synthetic: false,
});
// Step 3 extra frames
for _ in 0..3 {
example.render(&dst_view, &ctx.device, &ctx.queue, &spawner);
}
}
let mut cmd_buf = ctx
.device
.create_command_encoder(&wgpu::CommandEncoderDescriptor::default());
cmd_buf.copy_texture_to_buffer(
wgpu::ImageCopyTexture {
texture: &dst_texture,
mip_level: 0,
origin: wgpu::Origin3d::ZERO,
aspect: wgpu::TextureAspect::All,
},
wgpu::ImageCopyBuffer {
buffer: &dst_buffer,
layout: wgpu::ImageDataLayout {
offset: 0,
bytes_per_row: Some(params.width * 4),
rows_per_image: None,
},
},
wgpu::Extent3d {
width: params.width,
height: params.height,
depth_or_array_layers: 1,
},
);
ctx.queue.submit(Some(cmd_buf.finish()));
let dst_buffer_slice = dst_buffer.slice(..);
dst_buffer_slice.map_async(wgpu::MapMode::Read, |_| ());
ctx.device.poll(wgpu::Maintain::Wait);
let bytes = dst_buffer_slice.get_mapped_range().to_vec();
wgpu_test::image::compare_image_output(
env!("CARGO_MANIFEST_DIR").to_string() + "/../../" + params.image_path,
&ctx.adapter_info,
params.width,
params.height,
&bytes,
params.comparisons,
);
},
);
}
// This allows treating the framework as a standalone example,
// thus avoiding listing the example names in `Cargo.toml`.
#[allow(dead_code)]
fn main() {}
| {
#[cfg(not(target_arch = "wasm32"))]
{
env_logger::init();
};
let event_loop = EventLoop::new();
let mut builder = winit::window::WindowBuilder::new();
builder = builder.with_title(title);
#[cfg(windows_OFF)] // TODO
{
use winit::platform::windows::WindowBuilderExtWindows;
builder = builder.with_no_redirection_bitmap(true);
}
let window = builder.build(&event_loop).unwrap();
#[cfg(target_arch = "wasm32")]
{
use winit::platform::web::WindowExtWebSys;
let query_string = web_sys::window().unwrap().location().search().unwrap();
let level: log::Level = parse_url_query_string(&query_string, "RUST_LOG")
.and_then(|x| x.parse().ok())
.unwrap_or(log::Level::Error);
console_log::init_with_level(level).expect("could not initialize logger");
std::panic::set_hook(Box::new(console_error_panic_hook::hook));
// On wasm, append the canvas to the document body
web_sys::window()
.and_then(|win| win.document())
.and_then(|doc| doc.body())
.and_then(|body| {
body.append_child(&web_sys::Element::from(window.canvas()))
.ok()
})
.expect("couldn't append canvas to document body");
}
#[cfg(target_arch = "wasm32")]
let mut offscreen_canvas_setup: Option<OffscreenCanvasSetup> = None;
#[cfg(target_arch = "wasm32")]
{
use wasm_bindgen::JsCast;
use winit::platform::web::WindowExtWebSys;
let query_string = web_sys::window().unwrap().location().search().unwrap();
if let Some(offscreen_canvas_param) =
parse_url_query_string(&query_string, "offscreen_canvas")
{
if FromStr::from_str(offscreen_canvas_param) == Ok(true) {
log::info!("Creating OffscreenCanvasSetup");
let offscreen_canvas =
OffscreenCanvas::new(1024, 768).expect("couldn't create OffscreenCanvas");
let bitmap_renderer = window
.canvas()
.get_context("bitmaprenderer")
.expect("couldn't create ImageBitmapRenderingContext (Result)")
.expect("couldn't create ImageBitmapRenderingContext (Option)")
.dyn_into::<ImageBitmapRenderingContext>()
.expect("couldn't convert into ImageBitmapRenderingContext");
offscreen_canvas_setup = Some(OffscreenCanvasSetup {
offscreen_canvas,
bitmap_renderer,
})
}
}
};
log::info!("Initializing the surface...");
let backends = wgpu::util::backend_bits_from_env().unwrap_or_else(wgpu::Backends::all);
let dx12_shader_compiler = wgpu::util::dx12_shader_compiler_from_env().unwrap_or_default();
let gles_minor_version = wgpu::util::gles_minor_version_from_env().unwrap_or_default();
let instance = wgpu::Instance::new(wgpu::InstanceDescriptor {
backends,
dx12_shader_compiler,
gles_minor_version,
});
let (size, surface) = unsafe {
let size = window.inner_size();
#[cfg(any(not(target_arch = "wasm32"), target_os = "emscripten"))]
let surface = instance.create_surface(&window).unwrap();
#[cfg(all(target_arch = "wasm32", not(target_os = "emscripten")))]
let surface = {
if let Some(offscreen_canvas_setup) = &offscreen_canvas_setup {
log::info!("Creating surface from OffscreenCanvas");
instance.create_surface_from_offscreen_canvas(
offscreen_canvas_setup.offscreen_canvas.clone(),
)
} else {
instance.create_surface(&window)
}
}
.unwrap();
(size, surface)
};
let adapter = wgpu::util::initialize_adapter_from_env_or_default(&instance, Some(&surface))
.await
.expect("No suitable GPU adapters found on the system!");
#[cfg(not(target_arch = "wasm32"))]
{
let adapter_info = adapter.get_info();
println!("Using {} ({:?})", adapter_info.name, adapter_info.backend);
}
let optional_features = E::optional_features();
let required_features = E::required_features();
let adapter_features = adapter.features();
assert!(
adapter_features.contains(required_features),
"Adapter does not support required features for this example: {:?}",
required_features - adapter_features
);
let required_downlevel_capabilities = E::required_downlevel_capabilities();
let downlevel_capabilities = adapter.get_downlevel_capabilities();
assert!(
downlevel_capabilities.shader_model >= required_downlevel_capabilities.shader_model,
"Adapter does not support the minimum shader model required to run this example: {:?}",
required_downlevel_capabilities.shader_model
);
assert!(
downlevel_capabilities
.flags
.contains(required_downlevel_capabilities.flags),
"Adapter does not support the downlevel capabilities required to run this example: {:?}",
required_downlevel_capabilities.flags - downlevel_capabilities.flags
);
// Make sure we use the texture resolution limits from the adapter, so we can support images the size of the surface.
let needed_limits = E::required_limits().using_resolution(adapter.limits());
let trace_dir = std::env::var("WGPU_TRACE");
let (device, queue) = adapter
.request_device(
&wgpu::DeviceDescriptor {
label: None,
features: (optional_features & adapter_features) | required_features,
limits: needed_limits,
},
trace_dir.ok().as_ref().map(std::path::Path::new),
)
.await
.expect("Unable to find a suitable GPU adapter!");
Setup {
window,
event_loop,
instance,
size,
surface,
adapter,
device,
queue,
#[cfg(target_arch = "wasm32")]
offscreen_canvas_setup,
}
} | identifier_body |
framework.rs | use std::future::Future;
#[cfg(target_arch = "wasm32")]
use std::str::FromStr;
#[cfg(not(target_arch = "wasm32"))]
use std::time::Instant;
#[cfg(target_arch = "wasm32")]
use web_sys::{ImageBitmapRenderingContext, OffscreenCanvas};
use winit::{
event::{self, WindowEvent},
event_loop::{ControlFlow, EventLoop},
};
#[allow(dead_code)]
pub fn cast_slice<T>(data: &[T]) -> &[u8] {
use std::{mem::size_of_val, slice::from_raw_parts};
unsafe { from_raw_parts(data.as_ptr() as *const u8, size_of_val(data)) }
}
#[allow(dead_code)]
pub enum ShaderStage {
Vertex,
Fragment,
Compute,
}
pub trait Example: 'static + Sized {
fn optional_features() -> wgpu::Features {
wgpu::Features::empty()
}
fn required_features() -> wgpu::Features {
wgpu::Features::empty()
}
fn required_downlevel_capabilities() -> wgpu::DownlevelCapabilities {
wgpu::DownlevelCapabilities {
flags: wgpu::DownlevelFlags::empty(),
shader_model: wgpu::ShaderModel::Sm5,
..wgpu::DownlevelCapabilities::default()
}
}
fn required_limits() -> wgpu::Limits {
wgpu::Limits::downlevel_webgl2_defaults() // These downlevel limits will allow the code to run on all possible hardware
}
fn init(
config: &wgpu::SurfaceConfiguration,
adapter: &wgpu::Adapter,
device: &wgpu::Device,
queue: &wgpu::Queue,
) -> Self;
fn resize(
&mut self,
config: &wgpu::SurfaceConfiguration,
device: &wgpu::Device,
queue: &wgpu::Queue,
);
fn update(&mut self, event: WindowEvent);
fn render(
&mut self,
view: &wgpu::TextureView,
device: &wgpu::Device,
queue: &wgpu::Queue,
spawner: &Spawner,
);
}
struct Setup {
window: winit::window::Window,
event_loop: EventLoop<()>,
instance: wgpu::Instance,
size: winit::dpi::PhysicalSize<u32>,
surface: wgpu::Surface,
adapter: wgpu::Adapter,
device: wgpu::Device,
queue: wgpu::Queue,
#[cfg(target_arch = "wasm32")]
offscreen_canvas_setup: Option<OffscreenCanvasSetup>,
}
#[cfg(target_arch = "wasm32")]
struct OffscreenCanvasSetup {
offscreen_canvas: OffscreenCanvas,
bitmap_renderer: ImageBitmapRenderingContext,
}
async fn setup<E: Example>(title: &str) -> Setup {
#[cfg(not(target_arch = "wasm32"))]
{
env_logger::init();
};
let event_loop = EventLoop::new();
let mut builder = winit::window::WindowBuilder::new();
builder = builder.with_title(title);
#[cfg(windows_OFF)] // TODO
{
use winit::platform::windows::WindowBuilderExtWindows;
builder = builder.with_no_redirection_bitmap(true);
}
let window = builder.build(&event_loop).unwrap();
#[cfg(target_arch = "wasm32")]
{
use winit::platform::web::WindowExtWebSys;
let query_string = web_sys::window().unwrap().location().search().unwrap();
let level: log::Level = parse_url_query_string(&query_string, "RUST_LOG")
.and_then(|x| x.parse().ok())
.unwrap_or(log::Level::Error);
console_log::init_with_level(level).expect("could not initialize logger");
std::panic::set_hook(Box::new(console_error_panic_hook::hook));
// On wasm, append the canvas to the document body
web_sys::window()
.and_then(|win| win.document())
.and_then(|doc| doc.body())
.and_then(|body| {
body.append_child(&web_sys::Element::from(window.canvas()))
.ok()
})
.expect("couldn't append canvas to document body");
}
#[cfg(target_arch = "wasm32")]
let mut offscreen_canvas_setup: Option<OffscreenCanvasSetup> = None;
#[cfg(target_arch = "wasm32")]
{
use wasm_bindgen::JsCast;
use winit::platform::web::WindowExtWebSys;
let query_string = web_sys::window().unwrap().location().search().unwrap();
if let Some(offscreen_canvas_param) =
parse_url_query_string(&query_string, "offscreen_canvas")
{
if FromStr::from_str(offscreen_canvas_param) == Ok(true) {
log::info!("Creating OffscreenCanvasSetup");
let offscreen_canvas =
OffscreenCanvas::new(1024, 768).expect("couldn't create OffscreenCanvas");
let bitmap_renderer = window
.canvas()
.get_context("bitmaprenderer")
.expect("couldn't create ImageBitmapRenderingContext (Result)")
.expect("couldn't create ImageBitmapRenderingContext (Option)")
.dyn_into::<ImageBitmapRenderingContext>()
.expect("couldn't convert into ImageBitmapRenderingContext");
offscreen_canvas_setup = Some(OffscreenCanvasSetup {
offscreen_canvas,
bitmap_renderer,
})
}
}
};
log::info!("Initializing the surface...");
let backends = wgpu::util::backend_bits_from_env().unwrap_or_else(wgpu::Backends::all);
let dx12_shader_compiler = wgpu::util::dx12_shader_compiler_from_env().unwrap_or_default();
let gles_minor_version = wgpu::util::gles_minor_version_from_env().unwrap_or_default();
let instance = wgpu::Instance::new(wgpu::InstanceDescriptor {
backends,
dx12_shader_compiler,
gles_minor_version,
});
let (size, surface) = unsafe {
let size = window.inner_size();
#[cfg(any(not(target_arch = "wasm32"), target_os = "emscripten"))]
let surface = instance.create_surface(&window).unwrap();
#[cfg(all(target_arch = "wasm32", not(target_os = "emscripten")))]
let surface = {
if let Some(offscreen_canvas_setup) = &offscreen_canvas_setup {
log::info!("Creating surface from OffscreenCanvas");
instance.create_surface_from_offscreen_canvas(
offscreen_canvas_setup.offscreen_canvas.clone(),
)
} else {
instance.create_surface(&window)
}
}
.unwrap();
(size, surface)
};
let adapter = wgpu::util::initialize_adapter_from_env_or_default(&instance, Some(&surface))
.await
.expect("No suitable GPU adapters found on the system!");
#[cfg(not(target_arch = "wasm32"))]
{
let adapter_info = adapter.get_info();
println!("Using {} ({:?})", adapter_info.name, adapter_info.backend);
}
let optional_features = E::optional_features();
let required_features = E::required_features();
let adapter_features = adapter.features();
assert!(
adapter_features.contains(required_features),
"Adapter does not support required features for this example: {:?}",
required_features - adapter_features
);
let required_downlevel_capabilities = E::required_downlevel_capabilities();
let downlevel_capabilities = adapter.get_downlevel_capabilities();
assert!(
downlevel_capabilities.shader_model >= required_downlevel_capabilities.shader_model,
"Adapter does not support the minimum shader model required to run this example: {:?}",
required_downlevel_capabilities.shader_model
);
assert!(
downlevel_capabilities
.flags
.contains(required_downlevel_capabilities.flags),
"Adapter does not support the downlevel capabilities required to run this example: {:?}",
required_downlevel_capabilities.flags - downlevel_capabilities.flags
);
// Make sure we use the texture resolution limits from the adapter, so we can support images the size of the surface.
let needed_limits = E::required_limits().using_resolution(adapter.limits());
let trace_dir = std::env::var("WGPU_TRACE");
let (device, queue) = adapter
.request_device(
&wgpu::DeviceDescriptor {
label: None,
features: (optional_features & adapter_features) | required_features,
limits: needed_limits,
},
trace_dir.ok().as_ref().map(std::path::Path::new),
)
.await
.expect("Unable to find a suitable GPU adapter!");
Setup {
window,
event_loop,
instance,
size,
surface,
adapter,
device,
queue,
#[cfg(target_arch = "wasm32")]
offscreen_canvas_setup,
}
}
fn start<E: Example>(
#[cfg(not(target_arch = "wasm32"))] Setup {
window,
event_loop,
instance,
size,
surface,
adapter,
device,
queue,
}: Setup,
#[cfg(target_arch = "wasm32")] Setup {
window,
event_loop,
instance,
size,
surface,
adapter,
device,
queue,
offscreen_canvas_setup,
}: Setup,
) {
let spawner = Spawner::new();
let mut config = surface
.get_default_config(&adapter, size.width, size.height)
.expect("Surface isn't supported by the adapter.");
let surface_view_format = config.format.add_srgb_suffix();
config.view_formats.push(surface_view_format);
surface.configure(&device, &config);
log::info!("Initializing the example...");
let mut example = E::init(&config, &adapter, &device, &queue);
#[cfg(not(target_arch = "wasm32"))]
let mut last_frame_inst = Instant::now();
#[cfg(not(target_arch = "wasm32"))]
let (mut frame_count, mut accum_time) = (0, 0.0);
log::info!("Entering render loop...");
event_loop.run(move |event, _, control_flow| {
let _ = (&instance, &adapter); // force ownership by the closure
*control_flow = if cfg!(feature = "metal-auto-capture") {
ControlFlow::Exit
} else {
ControlFlow::Poll
};
match event {
event::Event::RedrawEventsCleared => {
#[cfg(not(target_arch = "wasm32"))]
spawner.run_until_stalled();
window.request_redraw();
}
event::Event::WindowEvent {
event:
WindowEvent::Resized(size)
| WindowEvent::ScaleFactorChanged {
new_inner_size: &mut size,
..
},
..
} => {
// Once winit is fixed, the detection conditions here can be removed.
// https://github.com/rust-windowing/winit/issues/2876
let max_dimension = adapter.limits().max_texture_dimension_2d;
if size.width > max_dimension || size.height > max_dimension {
log::warn!(
"The resizing size {:?} exceeds the limit of {}.",
size,
max_dimension
);
} else {
log::info!("Resizing to {:?}", size);
config.width = size.width.max(1);
config.height = size.height.max(1);
example.resize(&config, &device, &queue);
surface.configure(&device, &config);
}
}
event::Event::WindowEvent { event, .. } => match event {
WindowEvent::KeyboardInput {
input:
event::KeyboardInput {
virtual_keycode: Some(event::VirtualKeyCode::Escape),
state: event::ElementState::Pressed,
..
},
..
}
| WindowEvent::CloseRequested => {
*control_flow = ControlFlow::Exit;
}
#[cfg(not(target_arch = "wasm32"))]
WindowEvent::KeyboardInput {
input:
event::KeyboardInput {
virtual_keycode: Some(event::VirtualKeyCode::R),
state: event::ElementState::Pressed,
..
},
..
} => {
println!("{:#?}", instance.generate_report());
}
_ => {
example.update(event);
}
},
event::Event::RedrawRequested(_) => {
#[cfg(not(target_arch = "wasm32"))]
{
accum_time += last_frame_inst.elapsed().as_secs_f32();
last_frame_inst = Instant::now();
frame_count += 1;
if frame_count == 100 {
println!(
"Avg frame time {}ms",
accum_time * 1000.0 / frame_count as f32
);
accum_time = 0.0;
frame_count = 0;
}
}
let frame = match surface.get_current_texture() {
Ok(frame) => frame,
Err(_) => |
};
let view = frame.texture.create_view(&wgpu::TextureViewDescriptor {
format: Some(surface_view_format),
..wgpu::TextureViewDescriptor::default()
});
example.render(&view, &device, &queue, &spawner);
frame.present();
#[cfg(target_arch = "wasm32")]
{
if let Some(offscreen_canvas_setup) = &offscreen_canvas_setup {
let image_bitmap = offscreen_canvas_setup
.offscreen_canvas
.transfer_to_image_bitmap()
.expect("couldn't transfer offscreen canvas to image bitmap.");
offscreen_canvas_setup
.bitmap_renderer
.transfer_from_image_bitmap(&image_bitmap);
log::info!("Transferring OffscreenCanvas to ImageBitmapRenderer");
}
}
}
_ => {}
}
});
}
#[cfg(not(target_arch = "wasm32"))]
pub struct Spawner<'a> {
executor: async_executor::LocalExecutor<'a>,
}
#[cfg(not(target_arch = "wasm32"))]
impl<'a> Spawner<'a> {
fn new() -> Self {
Self {
executor: async_executor::LocalExecutor::new(),
}
}
#[allow(dead_code)]
pub fn spawn_local(&self, future: impl Future<Output = ()> + 'a) {
self.executor.spawn(future).detach();
}
fn run_until_stalled(&self) {
while self.executor.try_tick() {}
}
}
#[cfg(target_arch = "wasm32")]
pub struct Spawner {}
#[cfg(target_arch = "wasm32")]
impl Spawner {
fn new() -> Self {
Self {}
}
#[allow(dead_code)]
pub fn spawn_local(&self, future: impl Future<Output = ()> + 'static) {
wasm_bindgen_futures::spawn_local(future);
}
}
#[cfg(not(target_arch = "wasm32"))]
pub fn run<E: Example>(title: &str) {
let setup = pollster::block_on(setup::<E>(title));
start::<E>(setup);
}
#[cfg(target_arch = "wasm32")]
pub fn run<E: Example>(title: &str) {
use wasm_bindgen::prelude::*;
let title = title.to_owned();
wasm_bindgen_futures::spawn_local(async move {
let setup = setup::<E>(&title).await;
let start_closure = Closure::once_into_js(move || start::<E>(setup));
// make sure to handle JS exceptions thrown inside start.
// Otherwise wasm_bindgen_futures Queue would break and never handle any tasks again.
// This is required, because winit uses JS exception for control flow to escape from `run`.
if let Err(error) = call_catch(&start_closure) {
let is_control_flow_exception = error.dyn_ref::<js_sys::Error>().map_or(false, |e| {
e.message().includes("Using exceptions for control flow", 0)
});
if !is_control_flow_exception {
web_sys::console::error_1(&error);
}
}
#[wasm_bindgen]
extern "C" {
#[wasm_bindgen(catch, js_namespace = Function, js_name = "prototype.call.call")]
fn call_catch(this: &JsValue) -> Result<(), JsValue>;
}
});
}
#[cfg(target_arch = "wasm32")]
/// Parse the query string as returned by `web_sys::window()?.location().search()?` and get a
/// specific key out of it.
pub fn parse_url_query_string<'a>(query: &'a str, search_key: &str) -> Option<&'a str> {
let query_string = query.strip_prefix('?')?;
for pair in query_string.split('&') {
let mut pair = pair.split('=');
let key = pair.next()?;
let value = pair.next()?;
if key == search_key {
return Some(value);
}
}
None
}
pub use wgpu_test::image::ComparisonType;
pub struct FrameworkRefTest {
// Path to the reference image, relative to the root of the repo.
pub image_path: &'static str,
pub width: u32,
pub height: u32,
pub optional_features: wgpu::Features,
pub base_test_parameters: wgpu_test::TestParameters,
/// Comparisons against FLIP statistics that determine if the test passes or fails.
pub comparisons: &'static [ComparisonType],
}
#[allow(dead_code)]
pub fn test<E: Example>(mut params: FrameworkRefTest) {
use std::mem;
assert_eq!(params.width % 64, 0, "width needs to be aligned 64");
let features = E::required_features() | params.optional_features;
wgpu_test::initialize_test(
mem::take(&mut params.base_test_parameters).features(features),
|ctx| {
let spawner = Spawner::new();
let dst_texture = ctx.device.create_texture(&wgpu::TextureDescriptor {
label: Some("destination"),
size: wgpu::Extent3d {
width: params.width,
height: params.height,
depth_or_array_layers: 1,
},
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Rgba8UnormSrgb,
usage: wgpu::TextureUsages::RENDER_ATTACHMENT | wgpu::TextureUsages::COPY_SRC,
view_formats: &[],
});
let dst_view = dst_texture.create_view(&wgpu::TextureViewDescriptor::default());
let dst_buffer = ctx.device.create_buffer(&wgpu::BufferDescriptor {
label: Some("image map buffer"),
size: params.width as u64 * params.height as u64 * 4,
usage: wgpu::BufferUsages::COPY_DST | wgpu::BufferUsages::MAP_READ,
mapped_at_creation: false,
});
let mut example = E::init(
&wgpu::SurfaceConfiguration {
usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
format: wgpu::TextureFormat::Rgba8UnormSrgb,
width: params.width,
height: params.height,
present_mode: wgpu::PresentMode::Fifo,
alpha_mode: wgpu::CompositeAlphaMode::Auto,
view_formats: vec![wgpu::TextureFormat::Rgba8UnormSrgb],
},
&ctx.adapter,
&ctx.device,
&ctx.queue,
);
example.render(&dst_view, &ctx.device, &ctx.queue, &spawner);
// Handle specific case for bunnymark
#[allow(deprecated)]
if params.image_path == "/examples/bunnymark/screenshot.png" {
// Press spacebar to spawn bunnies
example.update(winit::event::WindowEvent::KeyboardInput {
input: winit::event::KeyboardInput {
scancode: 0,
state: winit::event::ElementState::Pressed,
virtual_keycode: Some(winit::event::VirtualKeyCode::Space),
modifiers: winit::event::ModifiersState::empty(),
},
device_id: unsafe { winit::event::DeviceId::dummy() },
is_synthetic: false,
});
// Step 3 extra frames
for _ in 0..3 {
example.render(&dst_view, &ctx.device, &ctx.queue, &spawner);
}
}
let mut cmd_buf = ctx
.device
.create_command_encoder(&wgpu::CommandEncoderDescriptor::default());
cmd_buf.copy_texture_to_buffer(
wgpu::ImageCopyTexture {
texture: &dst_texture,
mip_level: 0,
origin: wgpu::Origin3d::ZERO,
aspect: wgpu::TextureAspect::All,
},
wgpu::ImageCopyBuffer {
buffer: &dst_buffer,
layout: wgpu::ImageDataLayout {
offset: 0,
bytes_per_row: Some(params.width * 4),
rows_per_image: None,
},
},
wgpu::Extent3d {
width: params.width,
height: params.height,
depth_or_array_layers: 1,
},
);
ctx.queue.submit(Some(cmd_buf.finish()));
let dst_buffer_slice = dst_buffer.slice(..);
dst_buffer_slice.map_async(wgpu::MapMode::Read, |_| ());
ctx.device.poll(wgpu::Maintain::Wait);
let bytes = dst_buffer_slice.get_mapped_range().to_vec();
wgpu_test::image::compare_image_output(
env!("CARGO_MANIFEST_DIR").to_string() + "/../../" + params.image_path,
&ctx.adapter_info,
params.width,
params.height,
&bytes,
params.comparisons,
);
},
);
}
// This allows treating the framework as a standalone example,
// thus avoiding listing the example names in `Cargo.toml`.
#[allow(dead_code)]
fn main() {}
| {
surface.configure(&device, &config);
surface
.get_current_texture()
.expect("Failed to acquire next surface texture!")
} | conditional_block |
framework.rs | use std::future::Future;
#[cfg(target_arch = "wasm32")]
use std::str::FromStr;
#[cfg(not(target_arch = "wasm32"))]
use std::time::Instant;
#[cfg(target_arch = "wasm32")]
use web_sys::{ImageBitmapRenderingContext, OffscreenCanvas};
use winit::{
event::{self, WindowEvent},
event_loop::{ControlFlow, EventLoop},
};
#[allow(dead_code)]
pub fn cast_slice<T>(data: &[T]) -> &[u8] {
use std::{mem::size_of_val, slice::from_raw_parts};
unsafe { from_raw_parts(data.as_ptr() as *const u8, size_of_val(data)) }
}
#[allow(dead_code)]
pub enum ShaderStage {
Vertex,
Fragment,
Compute,
}
pub trait Example: 'static + Sized {
fn optional_features() -> wgpu::Features {
wgpu::Features::empty()
}
fn required_features() -> wgpu::Features {
wgpu::Features::empty()
}
fn required_downlevel_capabilities() -> wgpu::DownlevelCapabilities {
wgpu::DownlevelCapabilities {
flags: wgpu::DownlevelFlags::empty(),
shader_model: wgpu::ShaderModel::Sm5,
..wgpu::DownlevelCapabilities::default()
}
}
fn required_limits() -> wgpu::Limits {
wgpu::Limits::downlevel_webgl2_defaults() // These downlevel limits will allow the code to run on all possible hardware
}
fn init(
config: &wgpu::SurfaceConfiguration,
adapter: &wgpu::Adapter,
device: &wgpu::Device,
queue: &wgpu::Queue,
) -> Self;
fn resize(
&mut self,
config: &wgpu::SurfaceConfiguration,
device: &wgpu::Device,
queue: &wgpu::Queue,
);
fn update(&mut self, event: WindowEvent);
fn render(
&mut self,
view: &wgpu::TextureView,
device: &wgpu::Device,
queue: &wgpu::Queue,
spawner: &Spawner,
);
}
struct Setup {
window: winit::window::Window,
event_loop: EventLoop<()>,
instance: wgpu::Instance,
size: winit::dpi::PhysicalSize<u32>,
surface: wgpu::Surface,
adapter: wgpu::Adapter,
device: wgpu::Device,
queue: wgpu::Queue,
#[cfg(target_arch = "wasm32")]
offscreen_canvas_setup: Option<OffscreenCanvasSetup>,
}
#[cfg(target_arch = "wasm32")]
struct OffscreenCanvasSetup {
offscreen_canvas: OffscreenCanvas,
bitmap_renderer: ImageBitmapRenderingContext,
}
async fn setup<E: Example>(title: &str) -> Setup {
#[cfg(not(target_arch = "wasm32"))]
{
env_logger::init();
};
let event_loop = EventLoop::new();
let mut builder = winit::window::WindowBuilder::new();
builder = builder.with_title(title);
#[cfg(windows_OFF)] // TODO
{
use winit::platform::windows::WindowBuilderExtWindows;
builder = builder.with_no_redirection_bitmap(true);
}
let window = builder.build(&event_loop).unwrap();
#[cfg(target_arch = "wasm32")]
{
use winit::platform::web::WindowExtWebSys;
let query_string = web_sys::window().unwrap().location().search().unwrap();
let level: log::Level = parse_url_query_string(&query_string, "RUST_LOG")
.and_then(|x| x.parse().ok())
.unwrap_or(log::Level::Error);
console_log::init_with_level(level).expect("could not initialize logger");
std::panic::set_hook(Box::new(console_error_panic_hook::hook));
// On wasm, append the canvas to the document body
web_sys::window()
.and_then(|win| win.document())
.and_then(|doc| doc.body())
.and_then(|body| {
body.append_child(&web_sys::Element::from(window.canvas()))
.ok()
})
.expect("couldn't append canvas to document body");
}
#[cfg(target_arch = "wasm32")]
let mut offscreen_canvas_setup: Option<OffscreenCanvasSetup> = None;
#[cfg(target_arch = "wasm32")]
{
use wasm_bindgen::JsCast;
use winit::platform::web::WindowExtWebSys;
let query_string = web_sys::window().unwrap().location().search().unwrap();
if let Some(offscreen_canvas_param) =
parse_url_query_string(&query_string, "offscreen_canvas")
{
if FromStr::from_str(offscreen_canvas_param) == Ok(true) {
log::info!("Creating OffscreenCanvasSetup");
let offscreen_canvas =
OffscreenCanvas::new(1024, 768).expect("couldn't create OffscreenCanvas");
let bitmap_renderer = window
.canvas()
.get_context("bitmaprenderer")
.expect("couldn't create ImageBitmapRenderingContext (Result)")
.expect("couldn't create ImageBitmapRenderingContext (Option)")
.dyn_into::<ImageBitmapRenderingContext>()
.expect("couldn't convert into ImageBitmapRenderingContext");
offscreen_canvas_setup = Some(OffscreenCanvasSetup {
offscreen_canvas,
bitmap_renderer,
})
}
}
};
log::info!("Initializing the surface...");
let backends = wgpu::util::backend_bits_from_env().unwrap_or_else(wgpu::Backends::all);
let dx12_shader_compiler = wgpu::util::dx12_shader_compiler_from_env().unwrap_or_default();
let gles_minor_version = wgpu::util::gles_minor_version_from_env().unwrap_or_default();
let instance = wgpu::Instance::new(wgpu::InstanceDescriptor {
backends,
dx12_shader_compiler,
gles_minor_version,
});
let (size, surface) = unsafe {
let size = window.inner_size();
#[cfg(any(not(target_arch = "wasm32"), target_os = "emscripten"))]
let surface = instance.create_surface(&window).unwrap();
#[cfg(all(target_arch = "wasm32", not(target_os = "emscripten")))]
let surface = {
if let Some(offscreen_canvas_setup) = &offscreen_canvas_setup {
log::info!("Creating surface from OffscreenCanvas");
instance.create_surface_from_offscreen_canvas(
offscreen_canvas_setup.offscreen_canvas.clone(),
)
} else {
instance.create_surface(&window)
}
}
.unwrap();
(size, surface)
};
let adapter = wgpu::util::initialize_adapter_from_env_or_default(&instance, Some(&surface))
.await
.expect("No suitable GPU adapters found on the system!");
#[cfg(not(target_arch = "wasm32"))]
{
let adapter_info = adapter.get_info();
println!("Using {} ({:?})", adapter_info.name, adapter_info.backend);
}
let optional_features = E::optional_features();
let required_features = E::required_features();
let adapter_features = adapter.features();
assert!(
adapter_features.contains(required_features),
"Adapter does not support required features for this example: {:?}",
required_features - adapter_features
);
let required_downlevel_capabilities = E::required_downlevel_capabilities();
let downlevel_capabilities = adapter.get_downlevel_capabilities();
assert!(
downlevel_capabilities.shader_model >= required_downlevel_capabilities.shader_model,
"Adapter does not support the minimum shader model required to run this example: {:?}",
required_downlevel_capabilities.shader_model
);
assert!(
downlevel_capabilities
.flags
.contains(required_downlevel_capabilities.flags),
"Adapter does not support the downlevel capabilities required to run this example: {:?}",
required_downlevel_capabilities.flags - downlevel_capabilities.flags
);
// Make sure we use the texture resolution limits from the adapter, so we can support images the size of the surface.
let needed_limits = E::required_limits().using_resolution(adapter.limits());
let trace_dir = std::env::var("WGPU_TRACE");
let (device, queue) = adapter
.request_device(
&wgpu::DeviceDescriptor {
label: None,
features: (optional_features & adapter_features) | required_features,
limits: needed_limits,
},
trace_dir.ok().as_ref().map(std::path::Path::new),
)
.await
.expect("Unable to find a suitable GPU adapter!");
Setup {
window,
event_loop,
instance,
size,
surface,
adapter,
device,
queue,
#[cfg(target_arch = "wasm32")]
offscreen_canvas_setup,
}
}
fn start<E: Example>(
#[cfg(not(target_arch = "wasm32"))] Setup {
window,
event_loop,
instance,
size,
surface,
adapter,
device,
queue,
}: Setup,
#[cfg(target_arch = "wasm32")] Setup {
window,
event_loop,
instance,
size,
surface,
adapter,
device,
queue,
offscreen_canvas_setup,
}: Setup,
) {
let spawner = Spawner::new();
let mut config = surface
.get_default_config(&adapter, size.width, size.height)
.expect("Surface isn't supported by the adapter.");
let surface_view_format = config.format.add_srgb_suffix();
config.view_formats.push(surface_view_format);
surface.configure(&device, &config);
log::info!("Initializing the example...");
let mut example = E::init(&config, &adapter, &device, &queue);
#[cfg(not(target_arch = "wasm32"))]
let mut last_frame_inst = Instant::now();
#[cfg(not(target_arch = "wasm32"))]
let (mut frame_count, mut accum_time) = (0, 0.0);
log::info!("Entering render loop...");
event_loop.run(move |event, _, control_flow| {
let _ = (&instance, &adapter); // force ownership by the closure
*control_flow = if cfg!(feature = "metal-auto-capture") {
ControlFlow::Exit
} else {
ControlFlow::Poll
};
match event {
event::Event::RedrawEventsCleared => {
#[cfg(not(target_arch = "wasm32"))]
spawner.run_until_stalled();
window.request_redraw();
}
event::Event::WindowEvent {
event:
WindowEvent::Resized(size)
| WindowEvent::ScaleFactorChanged {
new_inner_size: &mut size,
..
},
..
} => {
// Once winit is fixed, the detection conditions here can be removed.
// https://github.com/rust-windowing/winit/issues/2876
let max_dimension = adapter.limits().max_texture_dimension_2d;
if size.width > max_dimension || size.height > max_dimension {
log::warn!(
"The resizing size {:?} exceeds the limit of {}.",
size,
max_dimension
);
} else {
log::info!("Resizing to {:?}", size);
config.width = size.width.max(1);
config.height = size.height.max(1);
example.resize(&config, &device, &queue);
surface.configure(&device, &config);
}
}
event::Event::WindowEvent { event, .. } => match event {
WindowEvent::KeyboardInput {
input:
event::KeyboardInput {
virtual_keycode: Some(event::VirtualKeyCode::Escape),
state: event::ElementState::Pressed,
..
},
..
}
| WindowEvent::CloseRequested => {
*control_flow = ControlFlow::Exit;
}
#[cfg(not(target_arch = "wasm32"))]
WindowEvent::KeyboardInput {
input:
event::KeyboardInput {
virtual_keycode: Some(event::VirtualKeyCode::R),
state: event::ElementState::Pressed,
..
},
..
} => {
println!("{:#?}", instance.generate_report());
}
_ => {
example.update(event);
}
},
event::Event::RedrawRequested(_) => {
#[cfg(not(target_arch = "wasm32"))]
{
accum_time += last_frame_inst.elapsed().as_secs_f32();
last_frame_inst = Instant::now();
frame_count += 1;
if frame_count == 100 {
println!(
"Avg frame time {}ms",
accum_time * 1000.0 / frame_count as f32
);
accum_time = 0.0;
frame_count = 0;
}
}
let frame = match surface.get_current_texture() {
Ok(frame) => frame,
Err(_) => {
surface.configure(&device, &config);
surface
.get_current_texture()
.expect("Failed to acquire next surface texture!")
}
};
let view = frame.texture.create_view(&wgpu::TextureViewDescriptor {
format: Some(surface_view_format),
..wgpu::TextureViewDescriptor::default()
});
example.render(&view, &device, &queue, &spawner);
frame.present();
#[cfg(target_arch = "wasm32")]
{
if let Some(offscreen_canvas_setup) = &offscreen_canvas_setup {
let image_bitmap = offscreen_canvas_setup
.offscreen_canvas
.transfer_to_image_bitmap()
.expect("couldn't transfer offscreen canvas to image bitmap.");
offscreen_canvas_setup
.bitmap_renderer
.transfer_from_image_bitmap(&image_bitmap);
log::info!("Transferring OffscreenCanvas to ImageBitmapRenderer");
}
}
}
_ => {}
}
});
}
#[cfg(not(target_arch = "wasm32"))]
pub struct Spawner<'a> {
executor: async_executor::LocalExecutor<'a>,
}
#[cfg(not(target_arch = "wasm32"))]
impl<'a> Spawner<'a> {
fn new() -> Self {
Self {
executor: async_executor::LocalExecutor::new(),
}
}
#[allow(dead_code)]
pub fn spawn_local(&self, future: impl Future<Output = ()> + 'a) {
self.executor.spawn(future).detach();
}
fn run_until_stalled(&self) {
while self.executor.try_tick() {}
}
}
#[cfg(target_arch = "wasm32")]
pub struct Spawner {}
#[cfg(target_arch = "wasm32")]
impl Spawner {
fn new() -> Self {
Self {}
}
#[allow(dead_code)]
pub fn spawn_local(&self, future: impl Future<Output = ()> + 'static) {
wasm_bindgen_futures::spawn_local(future);
}
}
#[cfg(not(target_arch = "wasm32"))]
pub fn run<E: Example>(title: &str) {
let setup = pollster::block_on(setup::<E>(title));
start::<E>(setup);
}
#[cfg(target_arch = "wasm32")]
pub fn | <E: Example>(title: &str) {
use wasm_bindgen::prelude::*;
let title = title.to_owned();
wasm_bindgen_futures::spawn_local(async move {
let setup = setup::<E>(&title).await;
let start_closure = Closure::once_into_js(move || start::<E>(setup));
// make sure to handle JS exceptions thrown inside start.
// Otherwise wasm_bindgen_futures Queue would break and never handle any tasks again.
// This is required, because winit uses JS exception for control flow to escape from `run`.
if let Err(error) = call_catch(&start_closure) {
let is_control_flow_exception = error.dyn_ref::<js_sys::Error>().map_or(false, |e| {
e.message().includes("Using exceptions for control flow", 0)
});
if !is_control_flow_exception {
web_sys::console::error_1(&error);
}
}
#[wasm_bindgen]
extern "C" {
#[wasm_bindgen(catch, js_namespace = Function, js_name = "prototype.call.call")]
fn call_catch(this: &JsValue) -> Result<(), JsValue>;
}
});
}
#[cfg(target_arch = "wasm32")]
/// Parse the query string as returned by `web_sys::window()?.location().search()?` and get a
/// specific key out of it.
pub fn parse_url_query_string<'a>(query: &'a str, search_key: &str) -> Option<&'a str> {
let query_string = query.strip_prefix('?')?;
for pair in query_string.split('&') {
let mut pair = pair.split('=');
let key = pair.next()?;
let value = pair.next()?;
if key == search_key {
return Some(value);
}
}
None
}
pub use wgpu_test::image::ComparisonType;
pub struct FrameworkRefTest {
// Path to the reference image, relative to the root of the repo.
pub image_path: &'static str,
pub width: u32,
pub height: u32,
pub optional_features: wgpu::Features,
pub base_test_parameters: wgpu_test::TestParameters,
/// Comparisons against FLIP statistics that determine if the test passes or fails.
pub comparisons: &'static [ComparisonType],
}
#[allow(dead_code)]
pub fn test<E: Example>(mut params: FrameworkRefTest) {
use std::mem;
assert_eq!(params.width % 64, 0, "width needs to be aligned 64");
let features = E::required_features() | params.optional_features;
wgpu_test::initialize_test(
mem::take(&mut params.base_test_parameters).features(features),
|ctx| {
let spawner = Spawner::new();
let dst_texture = ctx.device.create_texture(&wgpu::TextureDescriptor {
label: Some("destination"),
size: wgpu::Extent3d {
width: params.width,
height: params.height,
depth_or_array_layers: 1,
},
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Rgba8UnormSrgb,
usage: wgpu::TextureUsages::RENDER_ATTACHMENT | wgpu::TextureUsages::COPY_SRC,
view_formats: &[],
});
let dst_view = dst_texture.create_view(&wgpu::TextureViewDescriptor::default());
let dst_buffer = ctx.device.create_buffer(&wgpu::BufferDescriptor {
label: Some("image map buffer"),
size: params.width as u64 * params.height as u64 * 4,
usage: wgpu::BufferUsages::COPY_DST | wgpu::BufferUsages::MAP_READ,
mapped_at_creation: false,
});
let mut example = E::init(
&wgpu::SurfaceConfiguration {
usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
format: wgpu::TextureFormat::Rgba8UnormSrgb,
width: params.width,
height: params.height,
present_mode: wgpu::PresentMode::Fifo,
alpha_mode: wgpu::CompositeAlphaMode::Auto,
view_formats: vec![wgpu::TextureFormat::Rgba8UnormSrgb],
},
&ctx.adapter,
&ctx.device,
&ctx.queue,
);
example.render(&dst_view, &ctx.device, &ctx.queue, &spawner);
// Handle specific case for bunnymark
#[allow(deprecated)]
if params.image_path == "/examples/bunnymark/screenshot.png" {
// Press spacebar to spawn bunnies
example.update(winit::event::WindowEvent::KeyboardInput {
input: winit::event::KeyboardInput {
scancode: 0,
state: winit::event::ElementState::Pressed,
virtual_keycode: Some(winit::event::VirtualKeyCode::Space),
modifiers: winit::event::ModifiersState::empty(),
},
device_id: unsafe { winit::event::DeviceId::dummy() },
is_synthetic: false,
});
// Step 3 extra frames
for _ in 0..3 {
example.render(&dst_view, &ctx.device, &ctx.queue, &spawner);
}
}
let mut cmd_buf = ctx
.device
.create_command_encoder(&wgpu::CommandEncoderDescriptor::default());
cmd_buf.copy_texture_to_buffer(
wgpu::ImageCopyTexture {
texture: &dst_texture,
mip_level: 0,
origin: wgpu::Origin3d::ZERO,
aspect: wgpu::TextureAspect::All,
},
wgpu::ImageCopyBuffer {
buffer: &dst_buffer,
layout: wgpu::ImageDataLayout {
offset: 0,
bytes_per_row: Some(params.width * 4),
rows_per_image: None,
},
},
wgpu::Extent3d {
width: params.width,
height: params.height,
depth_or_array_layers: 1,
},
);
ctx.queue.submit(Some(cmd_buf.finish()));
let dst_buffer_slice = dst_buffer.slice(..);
dst_buffer_slice.map_async(wgpu::MapMode::Read, |_| ());
ctx.device.poll(wgpu::Maintain::Wait);
let bytes = dst_buffer_slice.get_mapped_range().to_vec();
wgpu_test::image::compare_image_output(
env!("CARGO_MANIFEST_DIR").to_string() + "/../../" + params.image_path,
&ctx.adapter_info,
params.width,
params.height,
&bytes,
params.comparisons,
);
},
);
}
// This allows treating the framework as a standalone example,
// thus avoiding listing the example names in `Cargo.toml`.
#[allow(dead_code)]
fn main() {}
| run | identifier_name |
main.rs | use std::convert::Infallible;
use hyper::{Body, Request, Response, Server};
use hyper::service::{make_service_fn, service_fn};
mod github;
mod zulip;
const BOT_NAME: &'static str = "bisect-bot ";
const USER_AGENT: &'static str = "https://github.com/bjorn3/cargo-bisect-rustc-bot";
const REPO_WHITELIST: &'static [&'static str] = &["bjorn3/cargo-bisect-rustc-bot", JOB_REPO];
const JOB_REPO: &'static str = "bjorn3/cargo-bisect-rustc-bot-jobs";
lazy_static::lazy_static! {
static ref GITHUB_USERNAME: String = std::env::var("GITHUB_USERNAME").expect("github username not defined");
static ref GITHUB_TOKEN: String = std::env::var("GITHUB_TOKEN").expect("github personal access token not defined");
static ref ZULIP_USER: String = std::env::var("ZULIP_USERNAME").expect("zulip username not defined");
static ref ZULIP_TOKEN: String = std::env::var("ZULIP_TOKEN").expect("zulip api token not defined");
}
#[tokio::main]
async fn main() {
let _zulip = tokio::spawn(crate::zulip::zulip_task());
let addr = (
[0, 0, 0, 0],
std::env::var("PORT")
.unwrap_or("3000".to_string())
.parse::<u16>()
.unwrap(),
)
.into();
let make_svc = make_service_fn(|_conn| async {
Ok::<_, Infallible>(service_fn(request_handler))
});
let server = Server::bind(&addr).serve(make_svc);
// Run this server for... forever!
if let Err(e) = server.await {
eprintln!("server error: {}", e);
}
}
async fn request_handler(req: Request<Body>) -> Result<Response<Body>, Box<dyn std::error::Error + Send + Sync>> {
crate::github::web_hook(req).await.map_err(|err| {
println!("error: {}", err);
err
})
}
#[derive(Debug, PartialEq)]
enum ReplyTo {
Github {
repo: String,
issue_number: u64,
},
ZulipPublic {
stream_id: u64,
subject: String,
},
ZulipPrivate {
user_id: u64,
},
}
impl ReplyTo {
async fn comment(&self, body: &str) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
match *self {
ReplyTo::Github { ref repo, issue_number } => {
crate::github::gh_post_comment(repo, issue_number, body).await?;
Ok(())
}
ReplyTo::ZulipPublic { stream_id, ref subject } => {
crate::zulip::zulip_post_public_message(stream_id, subject, body).await
}
ReplyTo::ZulipPrivate { user_id } => {
crate::zulip::zulip_post_private_message(user_id, body).await
}
}
}
const COMMIT_HEADER: &'static str = "X-Bisectbot-Reply-To";
fn to_commit_header(&self) -> String {
match *self {
ReplyTo::Github { ref repo, issue_number } => {
format!("{}: github {}#{}", Self::COMMIT_HEADER, repo, issue_number)
}
ReplyTo::ZulipPublic { stream_id, ref subject } => {
format!("{}: zulip-public {} | {}", Self::COMMIT_HEADER, stream_id, subject)
}
ReplyTo::ZulipPrivate { user_id } => {
format!("{}: zulip-private {}", Self::COMMIT_HEADER, user_id)
}
}
}
fn from_commit_message(message: &str) -> Result<Self, ()> {
for line in message.lines() {
let line = line.trim();
if !line.starts_with(Self::COMMIT_HEADER) {
continue;
}
let header = line[Self::COMMIT_HEADER.len()+1..].trim();
let mut split = header.split(" ");
let kind = split.next().ok_or(())?.trim();
let to = split.next().ok_or(())?.trim();
match kind {
"github" => {
if split.next().is_some() {
return Err(());
}
let mut split = to.split("#");
let repo = split.next().ok_or(())?.trim();
let issue_number = split.next().ok_or(())?.trim().parse().map_err(|_| ())?;
if split.next().is_some() {
return Err(());
}
return Ok(ReplyTo::Github {
repo: repo.to_string(),
issue_number,
});
}
"zulip-public" => {
let stream_id: u64 = to.parse().map_err(|_| ())?;
let subject = header[header.find("|").ok_or(())?+2..].to_string();
return Ok(ReplyTo::ZulipPublic {
stream_id,
subject,
})
}
"zulip-private" => {
if split.next().is_some() {
return Err(());
}
let user_id = to.parse().map_err(|_| ())?;
return Ok(ReplyTo::ZulipPrivate {
user_id,
});
}
_ => return Err(()),
}
}
Err(())
}
}
#[test]
fn test_reply_to_parsing() {
assert_eq!(
ReplyTo::from_commit_message("X-Bisectbot-Reply-To: github a/b#5"),
Ok(ReplyTo::Github { repo: "a/b".to_string(), issue_number: 5}),
);
assert_eq!(
ReplyTo::from_commit_message("X-Bisectbot-Reply-To: zulip-public 123 | this is the #1 topic on this zulip instance!"),
Ok(ReplyTo::ZulipPublic { stream_id: 123, subject: "this is the #1 topic on this zulip instance!".to_string() }),
);
assert_eq!(
ReplyTo::from_commit_message("X-Bisectbot-Reply-To: zulip-private 123"),
Ok(ReplyTo::ZulipPrivate { user_id: 123 }),
);
}
enum Command {
Bisect {
start: Option<String>,
end: String,
code: String,
},
}
impl Command {
fn parse_comment(comment: &str) -> Result<Option<Command>, String> {
let mut lines = comment.lines();
while let Some(line) = lines.next() {
let line = line.trim();
if !line.starts_with(BOT_NAME) {
continue;
}
let line = line[BOT_NAME.len()..].trim();
let mut parts = line.split(" ").map(|part| part.trim());
match parts.next() {
Some("bisect") => {
let mut start = None;
let mut end = None;
for part in parts {
if part.starts_with("start=") {
if start.is_some() {
return Err(format!("start range specified twice"));
}
start = Some(part["start=".len()..].to_string());
} else if part.starts_with("end=") {
if end.is_some() {
return Err(format!("end range specified twice"));
}
end = Some(part["end=".len()..].to_string());
} else {
return Err(format!("unknown command part {:?}", part));
}
}
let end = end.ok_or("missing end range")?;
loop {
match lines.next() {
Some(line) if line.trim() == "```rust" => break,
Some(_) => {}
None => {
return Err("didn't find repro code".to_string());
}
}
}
let code = lines.take_while(|line| line.trim() != "```").collect::<Vec<_>>().join("\n");
return Ok(Some(Command::Bisect {
start,
end,
code,
}));
}
cmd => {
return Err(format!("unknown command {:?}", cmd));
}
}
}
return Ok(None);
}
}
async fn parse_comment(reply_to: &ReplyTo, comment_id: &str, comment: &str) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
match Command::parse_comment(comment)? {
Some(Command::Bisect {
start,
end,
code,
}) => {
let mut cmds = Vec::new();
if let Some(start) = start {
cmds.push(format!("--start={}", start)); | cmds.push(format!("--end={}", end));
println!("{:?}", &cmds);
push_job(&reply_to, comment_id, &cmds, &code).await?;
}
None => {}
}
Ok(())
}
async fn push_job(reply_to: &ReplyTo, job_id: &str, bisect_cmds: &[String], repro: &str) -> reqwest::Result<()> {
// Escape commands and join with whitespace
let bisect_cmds = bisect_cmds.iter().map(|cmd| format!("{:?}", cmd)).collect::<Vec<_>>().join(" ");
let src_lib = create_blob(repro).await?;
let src = create_tree(&[TreeEntry {
path: "lib.rs".to_string(),
mode: TreeEntryMode::File,
type_: TreeEntryType::Blob,
sha: src_lib,
}]).await?;
let github_workflow_bisect = create_blob(&format!(
r#"
name: Bisect
on:
- push
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Cache cargo installed crates
uses: actions/[email protected]
with:
path: ~/.cargo/bin
key: cargo-installed-crates-2
- run: cargo install cargo-bisect-rustc || true
- name: Bisect
run: cargo bisect-rustc {} --access=github | grep -v "for x86_64-unknown-linux-gnu" || true
"#,
bisect_cmds,
)).await?;
let github_workflow = create_tree(&[TreeEntry {
path: "bisect.yaml".to_string(),
mode: TreeEntryMode::File,
type_: TreeEntryType::Blob,
sha: github_workflow_bisect,
}]).await?;
let github = create_tree(&[TreeEntry {
path: "workflows".to_string(),
mode: TreeEntryMode::Subdirectory,
type_: TreeEntryType::Tree,
sha: github_workflow,
}]).await?;
let cargo = create_blob(r#"[package]
name = "cargo-bisect-bot-job"
version = "0.0.0"
edition = "2018"
publish = false
[dependencies]
"#).await?;
let root = create_tree(&[
TreeEntry {
path: "src".to_string(),
mode: TreeEntryMode::Subdirectory,
type_: TreeEntryType::Tree,
sha: src,
},
TreeEntry {
path: ".github".to_string(),
mode: TreeEntryMode::Subdirectory,
type_: TreeEntryType::Tree,
sha: github,
},
TreeEntry {
path: "Cargo.toml".to_string(),
mode: TreeEntryMode::File,
type_: TreeEntryType::Blob,
sha: cargo,
}
]).await?;
let commit = create_commit(
&format!("Bisect job for comment id {}\n\n{}", job_id, reply_to.to_commit_header()),
&root,
&[],
).await?;
push_branch(&format!("job-{}", job_id), &commit).await?;
Ok(())
}
async fn create_blob(content: &str) -> reqwest::Result<String> {
let res = crate::github::gh_api_post(&format!("https://api.github.com/repos/{}/git/blobs", JOB_REPO), serde_json::to_string(&serde_json::json!({
"content": content,
"encoding": "utf-8",
})).unwrap()).await?;
let res: serde_json::Value = serde_json::from_str(&res).unwrap();
let sha = res["sha"].as_str().unwrap().to_string();
println!("created blob: {}", sha);
Ok(sha)
}
async fn create_tree(content: &[TreeEntry]) -> reqwest::Result<String> {
let res = crate::github::gh_api_post(&format!("https://api.github.com/repos/{}/git/trees", JOB_REPO), serde_json::to_string(&serde_json::json!({
"tree": content,
})).unwrap()).await?;
let res: serde_json::Value = serde_json::from_str(&res).unwrap();
let sha = res["sha"].as_str().unwrap().to_string();
println!("created tree: {}", sha);
Ok(sha)
}
#[derive(serde::Serialize)]
struct TreeEntry {
path: String,
mode: TreeEntryMode,
#[serde(rename = "type")]
type_: TreeEntryType,
sha: String,
}
#[derive(serde::Serialize)]
enum TreeEntryMode {
#[serde(rename = "100644")]
File,
#[serde(rename = "100755")]
Executable,
#[serde(rename = "040000")]
Subdirectory,
#[serde(rename = "160000")]
Submodule,
#[serde(rename = "120000")]
Symlink,
}
#[derive(serde::Serialize)]
enum TreeEntryType {
#[serde(rename = "blob")]
Blob,
#[serde(rename = "tree")]
Tree,
#[serde(rename = "commit")]
Commit,
}
async fn create_commit(message: &str, tree: &str, parents: &[&str]) -> reqwest::Result<String> {
let res = crate::github::gh_api_post(&format!("https://api.github.com/repos/{}/git/commits", JOB_REPO), serde_json::to_string(&serde_json::json!({
"message": message,
"tree": tree,
"parents": parents,
})).unwrap()).await?;
let res: serde_json::Value = serde_json::from_str(&res).unwrap();
let sha = res["sha"].as_str().unwrap().to_string();
println!("created commit: {}", sha);
Ok(sha)
}
async fn push_branch(branch: &str, commit: &str) -> reqwest::Result<()> {
let res = crate::github::gh_api_post(&format!("https://api.github.com/repos/{}/git/refs", JOB_REPO), serde_json::to_string(&serde_json::json!({
"ref": format!("refs/heads/{}", branch),
"sha": commit,
})).unwrap()).await?;
println!("pushed branch: {}", res);
Ok(())
} | } | random_line_split |
main.rs | use std::convert::Infallible;
use hyper::{Body, Request, Response, Server};
use hyper::service::{make_service_fn, service_fn};
mod github;
mod zulip;
const BOT_NAME: &'static str = "bisect-bot ";
const USER_AGENT: &'static str = "https://github.com/bjorn3/cargo-bisect-rustc-bot";
const REPO_WHITELIST: &'static [&'static str] = &["bjorn3/cargo-bisect-rustc-bot", JOB_REPO];
const JOB_REPO: &'static str = "bjorn3/cargo-bisect-rustc-bot-jobs";
lazy_static::lazy_static! {
static ref GITHUB_USERNAME: String = std::env::var("GITHUB_USERNAME").expect("github username not defined");
static ref GITHUB_TOKEN: String = std::env::var("GITHUB_TOKEN").expect("github personal access token not defined");
static ref ZULIP_USER: String = std::env::var("ZULIP_USERNAME").expect("zulip username not defined");
static ref ZULIP_TOKEN: String = std::env::var("ZULIP_TOKEN").expect("zulip api token not defined");
}
#[tokio::main]
async fn main() {
let _zulip = tokio::spawn(crate::zulip::zulip_task());
let addr = (
[0, 0, 0, 0],
std::env::var("PORT")
.unwrap_or("3000".to_string())
.parse::<u16>()
.unwrap(),
)
.into();
let make_svc = make_service_fn(|_conn| async {
Ok::<_, Infallible>(service_fn(request_handler))
});
let server = Server::bind(&addr).serve(make_svc);
// Run this server for... forever!
if let Err(e) = server.await {
eprintln!("server error: {}", e);
}
}
async fn request_handler(req: Request<Body>) -> Result<Response<Body>, Box<dyn std::error::Error + Send + Sync>> {
crate::github::web_hook(req).await.map_err(|err| {
println!("error: {}", err);
err
})
}
#[derive(Debug, PartialEq)]
enum ReplyTo {
Github {
repo: String,
issue_number: u64,
},
ZulipPublic {
stream_id: u64,
subject: String,
},
ZulipPrivate {
user_id: u64,
},
}
impl ReplyTo {
async fn comment(&self, body: &str) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
match *self {
ReplyTo::Github { ref repo, issue_number } => {
crate::github::gh_post_comment(repo, issue_number, body).await?;
Ok(())
}
ReplyTo::ZulipPublic { stream_id, ref subject } => {
crate::zulip::zulip_post_public_message(stream_id, subject, body).await
}
ReplyTo::ZulipPrivate { user_id } => {
crate::zulip::zulip_post_private_message(user_id, body).await
}
}
}
const COMMIT_HEADER: &'static str = "X-Bisectbot-Reply-To";
fn to_commit_header(&self) -> String {
match *self {
ReplyTo::Github { ref repo, issue_number } => {
format!("{}: github {}#{}", Self::COMMIT_HEADER, repo, issue_number)
}
ReplyTo::ZulipPublic { stream_id, ref subject } => {
format!("{}: zulip-public {} | {}", Self::COMMIT_HEADER, stream_id, subject)
}
ReplyTo::ZulipPrivate { user_id } => {
format!("{}: zulip-private {}", Self::COMMIT_HEADER, user_id)
}
}
}
fn from_commit_message(message: &str) -> Result<Self, ()> {
for line in message.lines() {
let line = line.trim();
if !line.starts_with(Self::COMMIT_HEADER) {
continue;
}
let header = line[Self::COMMIT_HEADER.len()+1..].trim();
let mut split = header.split(" ");
let kind = split.next().ok_or(())?.trim();
let to = split.next().ok_or(())?.trim();
match kind {
"github" => {
if split.next().is_some() {
return Err(());
}
let mut split = to.split("#");
let repo = split.next().ok_or(())?.trim();
let issue_number = split.next().ok_or(())?.trim().parse().map_err(|_| ())?;
if split.next().is_some() {
return Err(());
}
return Ok(ReplyTo::Github {
repo: repo.to_string(),
issue_number,
});
}
"zulip-public" => {
let stream_id: u64 = to.parse().map_err(|_| ())?;
let subject = header[header.find("|").ok_or(())?+2..].to_string();
return Ok(ReplyTo::ZulipPublic {
stream_id,
subject,
})
}
"zulip-private" => {
if split.next().is_some() {
return Err(());
}
let user_id = to.parse().map_err(|_| ())?;
return Ok(ReplyTo::ZulipPrivate {
user_id,
});
}
_ => return Err(()),
}
}
Err(())
}
}
#[test]
fn test_reply_to_parsing() {
assert_eq!(
ReplyTo::from_commit_message("X-Bisectbot-Reply-To: github a/b#5"),
Ok(ReplyTo::Github { repo: "a/b".to_string(), issue_number: 5}),
);
assert_eq!(
ReplyTo::from_commit_message("X-Bisectbot-Reply-To: zulip-public 123 | this is the #1 topic on this zulip instance!"),
Ok(ReplyTo::ZulipPublic { stream_id: 123, subject: "this is the #1 topic on this zulip instance!".to_string() }),
);
assert_eq!(
ReplyTo::from_commit_message("X-Bisectbot-Reply-To: zulip-private 123"),
Ok(ReplyTo::ZulipPrivate { user_id: 123 }),
);
}
enum Command {
Bisect {
start: Option<String>,
end: String,
code: String,
},
}
impl Command {
fn parse_comment(comment: &str) -> Result<Option<Command>, String> {
let mut lines = comment.lines();
while let Some(line) = lines.next() {
let line = line.trim();
if !line.starts_with(BOT_NAME) {
continue;
}
let line = line[BOT_NAME.len()..].trim();
let mut parts = line.split(" ").map(|part| part.trim());
match parts.next() {
Some("bisect") => {
let mut start = None;
let mut end = None;
for part in parts {
if part.starts_with("start=") | else if part.starts_with("end=") {
if end.is_some() {
return Err(format!("end range specified twice"));
}
end = Some(part["end=".len()..].to_string());
} else {
return Err(format!("unknown command part {:?}", part));
}
}
let end = end.ok_or("missing end range")?;
loop {
match lines.next() {
Some(line) if line.trim() == "```rust" => break,
Some(_) => {}
None => {
return Err("didn't find repro code".to_string());
}
}
}
let code = lines.take_while(|line| line.trim() != "```").collect::<Vec<_>>().join("\n");
return Ok(Some(Command::Bisect {
start,
end,
code,
}));
}
cmd => {
return Err(format!("unknown command {:?}", cmd));
}
}
}
return Ok(None);
}
}
async fn parse_comment(reply_to: &ReplyTo, comment_id: &str, comment: &str) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
match Command::parse_comment(comment)? {
Some(Command::Bisect {
start,
end,
code,
}) => {
let mut cmds = Vec::new();
if let Some(start) = start {
cmds.push(format!("--start={}", start));
}
cmds.push(format!("--end={}", end));
println!("{:?}", &cmds);
push_job(&reply_to, comment_id, &cmds, &code).await?;
}
None => {}
}
Ok(())
}
async fn push_job(reply_to: &ReplyTo, job_id: &str, bisect_cmds: &[String], repro: &str) -> reqwest::Result<()> {
// Escape commands and join with whitespace
let bisect_cmds = bisect_cmds.iter().map(|cmd| format!("{:?}", cmd)).collect::<Vec<_>>().join(" ");
let src_lib = create_blob(repro).await?;
let src = create_tree(&[TreeEntry {
path: "lib.rs".to_string(),
mode: TreeEntryMode::File,
type_: TreeEntryType::Blob,
sha: src_lib,
}]).await?;
let github_workflow_bisect = create_blob(&format!(
r#"
name: Bisect
on:
- push
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Cache cargo installed crates
uses: actions/[email protected]
with:
path: ~/.cargo/bin
key: cargo-installed-crates-2
- run: cargo install cargo-bisect-rustc || true
- name: Bisect
run: cargo bisect-rustc {} --access=github | grep -v "for x86_64-unknown-linux-gnu" || true
"#,
bisect_cmds,
)).await?;
let github_workflow = create_tree(&[TreeEntry {
path: "bisect.yaml".to_string(),
mode: TreeEntryMode::File,
type_: TreeEntryType::Blob,
sha: github_workflow_bisect,
}]).await?;
let github = create_tree(&[TreeEntry {
path: "workflows".to_string(),
mode: TreeEntryMode::Subdirectory,
type_: TreeEntryType::Tree,
sha: github_workflow,
}]).await?;
let cargo = create_blob(r#"[package]
name = "cargo-bisect-bot-job"
version = "0.0.0"
edition = "2018"
publish = false
[dependencies]
"#).await?;
let root = create_tree(&[
TreeEntry {
path: "src".to_string(),
mode: TreeEntryMode::Subdirectory,
type_: TreeEntryType::Tree,
sha: src,
},
TreeEntry {
path: ".github".to_string(),
mode: TreeEntryMode::Subdirectory,
type_: TreeEntryType::Tree,
sha: github,
},
TreeEntry {
path: "Cargo.toml".to_string(),
mode: TreeEntryMode::File,
type_: TreeEntryType::Blob,
sha: cargo,
}
]).await?;
let commit = create_commit(
&format!("Bisect job for comment id {}\n\n{}", job_id, reply_to.to_commit_header()),
&root,
&[],
).await?;
push_branch(&format!("job-{}", job_id), &commit).await?;
Ok(())
}
async fn create_blob(content: &str) -> reqwest::Result<String> {
let res = crate::github::gh_api_post(&format!("https://api.github.com/repos/{}/git/blobs", JOB_REPO), serde_json::to_string(&serde_json::json!({
"content": content,
"encoding": "utf-8",
})).unwrap()).await?;
let res: serde_json::Value = serde_json::from_str(&res).unwrap();
let sha = res["sha"].as_str().unwrap().to_string();
println!("created blob: {}", sha);
Ok(sha)
}
async fn create_tree(content: &[TreeEntry]) -> reqwest::Result<String> {
let res = crate::github::gh_api_post(&format!("https://api.github.com/repos/{}/git/trees", JOB_REPO), serde_json::to_string(&serde_json::json!({
"tree": content,
})).unwrap()).await?;
let res: serde_json::Value = serde_json::from_str(&res).unwrap();
let sha = res["sha"].as_str().unwrap().to_string();
println!("created tree: {}", sha);
Ok(sha)
}
#[derive(serde::Serialize)]
struct TreeEntry {
path: String,
mode: TreeEntryMode,
#[serde(rename = "type")]
type_: TreeEntryType,
sha: String,
}
#[derive(serde::Serialize)]
enum TreeEntryMode {
#[serde(rename = "100644")]
File,
#[serde(rename = "100755")]
Executable,
#[serde(rename = "040000")]
Subdirectory,
#[serde(rename = "160000")]
Submodule,
#[serde(rename = "120000")]
Symlink,
}
#[derive(serde::Serialize)]
enum TreeEntryType {
#[serde(rename = "blob")]
Blob,
#[serde(rename = "tree")]
Tree,
#[serde(rename = "commit")]
Commit,
}
async fn create_commit(message: &str, tree: &str, parents: &[&str]) -> reqwest::Result<String> {
let res = crate::github::gh_api_post(&format!("https://api.github.com/repos/{}/git/commits", JOB_REPO), serde_json::to_string(&serde_json::json!({
"message": message,
"tree": tree,
"parents": parents,
})).unwrap()).await?;
let res: serde_json::Value = serde_json::from_str(&res).unwrap();
let sha = res["sha"].as_str().unwrap().to_string();
println!("created commit: {}", sha);
Ok(sha)
}
async fn push_branch(branch: &str, commit: &str) -> reqwest::Result<()> {
let res = crate::github::gh_api_post(&format!("https://api.github.com/repos/{}/git/refs", JOB_REPO), serde_json::to_string(&serde_json::json!({
"ref": format!("refs/heads/{}", branch),
"sha": commit,
})).unwrap()).await?;
println!("pushed branch: {}", res);
Ok(())
}
| {
if start.is_some() {
return Err(format!("start range specified twice"));
}
start = Some(part["start=".len()..].to_string());
} | conditional_block |
main.rs | use std::convert::Infallible;
use hyper::{Body, Request, Response, Server};
use hyper::service::{make_service_fn, service_fn};
mod github;
mod zulip;
const BOT_NAME: &'static str = "bisect-bot ";
const USER_AGENT: &'static str = "https://github.com/bjorn3/cargo-bisect-rustc-bot";
const REPO_WHITELIST: &'static [&'static str] = &["bjorn3/cargo-bisect-rustc-bot", JOB_REPO];
const JOB_REPO: &'static str = "bjorn3/cargo-bisect-rustc-bot-jobs";
lazy_static::lazy_static! {
static ref GITHUB_USERNAME: String = std::env::var("GITHUB_USERNAME").expect("github username not defined");
static ref GITHUB_TOKEN: String = std::env::var("GITHUB_TOKEN").expect("github personal access token not defined");
static ref ZULIP_USER: String = std::env::var("ZULIP_USERNAME").expect("zulip username not defined");
static ref ZULIP_TOKEN: String = std::env::var("ZULIP_TOKEN").expect("zulip api token not defined");
}
#[tokio::main]
async fn main() {
let _zulip = tokio::spawn(crate::zulip::zulip_task());
let addr = (
[0, 0, 0, 0],
std::env::var("PORT")
.unwrap_or("3000".to_string())
.parse::<u16>()
.unwrap(),
)
.into();
let make_svc = make_service_fn(|_conn| async {
Ok::<_, Infallible>(service_fn(request_handler))
});
let server = Server::bind(&addr).serve(make_svc);
// Run this server for... forever!
if let Err(e) = server.await {
eprintln!("server error: {}", e);
}
}
async fn request_handler(req: Request<Body>) -> Result<Response<Body>, Box<dyn std::error::Error + Send + Sync>> {
crate::github::web_hook(req).await.map_err(|err| {
println!("error: {}", err);
err
})
}
#[derive(Debug, PartialEq)]
enum ReplyTo {
Github {
repo: String,
issue_number: u64,
},
ZulipPublic {
stream_id: u64,
subject: String,
},
ZulipPrivate {
user_id: u64,
},
}
impl ReplyTo {
async fn comment(&self, body: &str) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
match *self {
ReplyTo::Github { ref repo, issue_number } => {
crate::github::gh_post_comment(repo, issue_number, body).await?;
Ok(())
}
ReplyTo::ZulipPublic { stream_id, ref subject } => {
crate::zulip::zulip_post_public_message(stream_id, subject, body).await
}
ReplyTo::ZulipPrivate { user_id } => {
crate::zulip::zulip_post_private_message(user_id, body).await
}
}
}
const COMMIT_HEADER: &'static str = "X-Bisectbot-Reply-To";
fn to_commit_header(&self) -> String {
match *self {
ReplyTo::Github { ref repo, issue_number } => {
format!("{}: github {}#{}", Self::COMMIT_HEADER, repo, issue_number)
}
ReplyTo::ZulipPublic { stream_id, ref subject } => {
format!("{}: zulip-public {} | {}", Self::COMMIT_HEADER, stream_id, subject)
}
ReplyTo::ZulipPrivate { user_id } => {
format!("{}: zulip-private {}", Self::COMMIT_HEADER, user_id)
}
}
}
fn from_commit_message(message: &str) -> Result<Self, ()> {
for line in message.lines() {
let line = line.trim();
if !line.starts_with(Self::COMMIT_HEADER) {
continue;
}
let header = line[Self::COMMIT_HEADER.len()+1..].trim();
let mut split = header.split(" ");
let kind = split.next().ok_or(())?.trim();
let to = split.next().ok_or(())?.trim();
match kind {
"github" => {
if split.next().is_some() {
return Err(());
}
let mut split = to.split("#");
let repo = split.next().ok_or(())?.trim();
let issue_number = split.next().ok_or(())?.trim().parse().map_err(|_| ())?;
if split.next().is_some() {
return Err(());
}
return Ok(ReplyTo::Github {
repo: repo.to_string(),
issue_number,
});
}
"zulip-public" => {
let stream_id: u64 = to.parse().map_err(|_| ())?;
let subject = header[header.find("|").ok_or(())?+2..].to_string();
return Ok(ReplyTo::ZulipPublic {
stream_id,
subject,
})
}
"zulip-private" => {
if split.next().is_some() {
return Err(());
}
let user_id = to.parse().map_err(|_| ())?;
return Ok(ReplyTo::ZulipPrivate {
user_id,
});
}
_ => return Err(()),
}
}
Err(())
}
}
#[test]
fn test_reply_to_parsing() {
assert_eq!(
ReplyTo::from_commit_message("X-Bisectbot-Reply-To: github a/b#5"),
Ok(ReplyTo::Github { repo: "a/b".to_string(), issue_number: 5}),
);
assert_eq!(
ReplyTo::from_commit_message("X-Bisectbot-Reply-To: zulip-public 123 | this is the #1 topic on this zulip instance!"),
Ok(ReplyTo::ZulipPublic { stream_id: 123, subject: "this is the #1 topic on this zulip instance!".to_string() }),
);
assert_eq!(
ReplyTo::from_commit_message("X-Bisectbot-Reply-To: zulip-private 123"),
Ok(ReplyTo::ZulipPrivate { user_id: 123 }),
);
}
enum Command {
Bisect {
start: Option<String>,
end: String,
code: String,
},
}
impl Command {
fn parse_comment(comment: &str) -> Result<Option<Command>, String> {
let mut lines = comment.lines();
while let Some(line) = lines.next() {
let line = line.trim();
if !line.starts_with(BOT_NAME) {
continue;
}
let line = line[BOT_NAME.len()..].trim();
let mut parts = line.split(" ").map(|part| part.trim());
match parts.next() {
Some("bisect") => {
let mut start = None;
let mut end = None;
for part in parts {
if part.starts_with("start=") {
if start.is_some() {
return Err(format!("start range specified twice"));
}
start = Some(part["start=".len()..].to_string());
} else if part.starts_with("end=") {
if end.is_some() {
return Err(format!("end range specified twice"));
}
end = Some(part["end=".len()..].to_string());
} else {
return Err(format!("unknown command part {:?}", part));
}
}
let end = end.ok_or("missing end range")?;
loop {
match lines.next() {
Some(line) if line.trim() == "```rust" => break,
Some(_) => {}
None => {
return Err("didn't find repro code".to_string());
}
}
}
let code = lines.take_while(|line| line.trim() != "```").collect::<Vec<_>>().join("\n");
return Ok(Some(Command::Bisect {
start,
end,
code,
}));
}
cmd => {
return Err(format!("unknown command {:?}", cmd));
}
}
}
return Ok(None);
}
}
async fn parse_comment(reply_to: &ReplyTo, comment_id: &str, comment: &str) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
match Command::parse_comment(comment)? {
Some(Command::Bisect {
start,
end,
code,
}) => {
let mut cmds = Vec::new();
if let Some(start) = start {
cmds.push(format!("--start={}", start));
}
cmds.push(format!("--end={}", end));
println!("{:?}", &cmds);
push_job(&reply_to, comment_id, &cmds, &code).await?;
}
None => {}
}
Ok(())
}
async fn push_job(reply_to: &ReplyTo, job_id: &str, bisect_cmds: &[String], repro: &str) -> reqwest::Result<()> {
// Escape commands and join with whitespace
let bisect_cmds = bisect_cmds.iter().map(|cmd| format!("{:?}", cmd)).collect::<Vec<_>>().join(" ");
let src_lib = create_blob(repro).await?;
let src = create_tree(&[TreeEntry {
path: "lib.rs".to_string(),
mode: TreeEntryMode::File,
type_: TreeEntryType::Blob,
sha: src_lib,
}]).await?;
let github_workflow_bisect = create_blob(&format!(
r#"
name: Bisect
on:
- push
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Cache cargo installed crates
uses: actions/[email protected]
with:
path: ~/.cargo/bin
key: cargo-installed-crates-2
- run: cargo install cargo-bisect-rustc || true
- name: Bisect
run: cargo bisect-rustc {} --access=github | grep -v "for x86_64-unknown-linux-gnu" || true
"#,
bisect_cmds,
)).await?;
let github_workflow = create_tree(&[TreeEntry {
path: "bisect.yaml".to_string(),
mode: TreeEntryMode::File,
type_: TreeEntryType::Blob,
sha: github_workflow_bisect,
}]).await?;
let github = create_tree(&[TreeEntry {
path: "workflows".to_string(),
mode: TreeEntryMode::Subdirectory,
type_: TreeEntryType::Tree,
sha: github_workflow,
}]).await?;
let cargo = create_blob(r#"[package]
name = "cargo-bisect-bot-job"
version = "0.0.0"
edition = "2018"
publish = false
[dependencies]
"#).await?;
let root = create_tree(&[
TreeEntry {
path: "src".to_string(),
mode: TreeEntryMode::Subdirectory,
type_: TreeEntryType::Tree,
sha: src,
},
TreeEntry {
path: ".github".to_string(),
mode: TreeEntryMode::Subdirectory,
type_: TreeEntryType::Tree,
sha: github,
},
TreeEntry {
path: "Cargo.toml".to_string(),
mode: TreeEntryMode::File,
type_: TreeEntryType::Blob,
sha: cargo,
}
]).await?;
let commit = create_commit(
&format!("Bisect job for comment id {}\n\n{}", job_id, reply_to.to_commit_header()),
&root,
&[],
).await?;
push_branch(&format!("job-{}", job_id), &commit).await?;
Ok(())
}
async fn create_blob(content: &str) -> reqwest::Result<String> {
let res = crate::github::gh_api_post(&format!("https://api.github.com/repos/{}/git/blobs", JOB_REPO), serde_json::to_string(&serde_json::json!({
"content": content,
"encoding": "utf-8",
})).unwrap()).await?;
let res: serde_json::Value = serde_json::from_str(&res).unwrap();
let sha = res["sha"].as_str().unwrap().to_string();
println!("created blob: {}", sha);
Ok(sha)
}
async fn create_tree(content: &[TreeEntry]) -> reqwest::Result<String> {
let res = crate::github::gh_api_post(&format!("https://api.github.com/repos/{}/git/trees", JOB_REPO), serde_json::to_string(&serde_json::json!({
"tree": content,
})).unwrap()).await?;
let res: serde_json::Value = serde_json::from_str(&res).unwrap();
let sha = res["sha"].as_str().unwrap().to_string();
println!("created tree: {}", sha);
Ok(sha)
}
#[derive(serde::Serialize)]
struct | {
path: String,
mode: TreeEntryMode,
#[serde(rename = "type")]
type_: TreeEntryType,
sha: String,
}
#[derive(serde::Serialize)]
enum TreeEntryMode {
#[serde(rename = "100644")]
File,
#[serde(rename = "100755")]
Executable,
#[serde(rename = "040000")]
Subdirectory,
#[serde(rename = "160000")]
Submodule,
#[serde(rename = "120000")]
Symlink,
}
#[derive(serde::Serialize)]
enum TreeEntryType {
#[serde(rename = "blob")]
Blob,
#[serde(rename = "tree")]
Tree,
#[serde(rename = "commit")]
Commit,
}
async fn create_commit(message: &str, tree: &str, parents: &[&str]) -> reqwest::Result<String> {
let res = crate::github::gh_api_post(&format!("https://api.github.com/repos/{}/git/commits", JOB_REPO), serde_json::to_string(&serde_json::json!({
"message": message,
"tree": tree,
"parents": parents,
})).unwrap()).await?;
let res: serde_json::Value = serde_json::from_str(&res).unwrap();
let sha = res["sha"].as_str().unwrap().to_string();
println!("created commit: {}", sha);
Ok(sha)
}
async fn push_branch(branch: &str, commit: &str) -> reqwest::Result<()> {
let res = crate::github::gh_api_post(&format!("https://api.github.com/repos/{}/git/refs", JOB_REPO), serde_json::to_string(&serde_json::json!({
"ref": format!("refs/heads/{}", branch),
"sha": commit,
})).unwrap()).await?;
println!("pushed branch: {}", res);
Ok(())
}
| TreeEntry | identifier_name |
probe_bert.py | """ Finetuning the AFTER models for sequence classification (Bert, XLM, XLNet, RoBERTa, Albert, XLM-RoBERTa)."""
import argparse
import json
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
from transformers import (
AdamW,
BertConfig,
BertTokenizer,
get_linear_schedule_with_warmup,
)
from after_models.after_bert import AfterBertForSequenceClassification
from after_models.bert_mean_pooled import BertForSequenceClassification
from sys_config import DATA_DIR
from utils.data_caching import cache_after_datasets, load_domain_examples
from utils.metrics import compute_metrics
from utils.general_processors import processors, output_modes
from utils.config import train_options
logger = logging.getLogger(__name__)
ALL_MODELS = sum(
(
tuple(conf.pretrained_config_archive_map.keys())
for conf in (
BertConfig,
)
),
(),
)
MODEL_CLASSES = {
"bert": (BertConfig, BertForSequenceClassification, BertTokenizer),
"bert-ft": (BertConfig, BertForSequenceClassification, BertTokenizer),
"afterbert": (BertConfig, BertForSequenceClassification, BertTokenizer),
}
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def train(args, train_domain_dataset, model, tokenizer):
""" Train the model """
# The batch size must be halved in order to fit one batch from each domain
args.train_batch_size = (args.per_gpu_train_batch_size * max(1, args.n_gpu))
train_domain_sampler = RandomSampler(train_domain_dataset) if args.local_rank == -1 else DistributedSampler(
train_domain_dataset)
train_domain_dataloader = DataLoader(train_domain_dataset, sampler=train_domain_sampler,
batch_size=args.train_batch_size)
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = args.max_steps // (len(train_domain_dataloader) // args.gradient_accumulation_steps) + 1
else:
t_total = len(train_domain_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
# AfterBERT
args.logging_steps = len(train_domain_dataloader) // args.num_evals
args.warmup_steps = args.warmup_proportion * t_total
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": args.weight_decay,
},
{"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0},
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon,
correct_bias=args.bias_correction)
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total
)
# Check if saved optimizer or scheduler states exist
if os.path.isfile(os.path.join(args.model_name_or_path, "optimizer.pt")) and os.path.isfile(
os.path.join(args.model_name_or_path, "scheduler.pt")
):
# Load in optimizer and scheduler states
optimizer.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "optimizer.pt")))
scheduler.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "scheduler.pt")))
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
# multi-gpu training (should be after apex fp16 initialization)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True,
)
# Train!
logger.info("***** Running training *****")
logger.info(" Num of Combined examples = %d", len(train_domain_dataloader))
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
logger.info(
" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size
* args.gradient_accumulation_steps
* (torch.distributed.get_world_size() if args.local_rank != -1 else 1),
)
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
global_step = 0
epochs_trained = 0
steps_trained_in_current_epoch = 0
# Check if continuing training from a checkpoint
if os.path.exists(args.model_name_or_path):
# set global_step to gobal_step of last saved checkpoint from model path
global_step = int(args.model_name_or_path.split("-")[-1].split("/")[0])
epochs_trained = global_step // (len(train_domain_dataloader) // args.gradient_accumulation_steps)
steps_trained_in_current_epoch = global_step % (
len(train_domain_dataloader) // args.gradient_accumulation_steps)
logger.info(" Continuing training from checkpoint, will skip to saved global_step")
logger.info(" Continuing training from epoch %d", epochs_trained)
logger.info(" Continuing training from global step %d", global_step)
logger.info(" Will skip the first %d steps in the first epoch", steps_trained_in_current_epoch)
tr_loss, logging_loss = 0.0, 0.0
# AfterBERT
best_val_loss = 1e5
model.zero_grad()
train_iterator = trange(
epochs_trained, int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0],
)
set_seed(args) # Added here for reproductibility
for _ in train_iterator:
epoch_iterator = tqdm(train_domain_dataloader, desc="Iteration",
disable=args.local_rank not in [-1, 0])
for step, batch in enumerate(epoch_iterator):
# Skip past any already trained steps if resuming training
if steps_trained_in_current_epoch > 0:
steps_trained_in_current_epoch -= 1
continue
model.train()
batch = tuple(t.to(args.device) for t in batch)
inputs = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if args.model_type != "distilbert":
inputs["token_type_ids"] = (
batch[2] if args.model_type in ["bert", "xlnet", "albert"] else None
) # XLM, DistilBERT, RoBERTa, and XLM-RoBERTa don't use segment_ids
outputs = model(**inputs)
loss = outputs[0]
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
# tr_loss += loss.item()
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
logs = {}
if (
args.local_rank == -1 and args.evaluate_during_training
): # Only evaluate when single GPU otherwise metrics may not average well
results = evaluate(args, model)
for key, value in results.items():
eval_key = "eval_{}".format(key)
logs[eval_key] = value
loss_scalar = (tr_loss - logging_loss) / args.logging_steps
learning_rate_scalar = scheduler.get_lr()[0]
logs["learning_rate"] = learning_rate_scalar
logs["loss"] = loss_scalar
logging_loss = tr_loss
print(json.dumps({**logs, **{"step": global_step}}))
# AfterBERT
if args.local_rank in [-1, 0] and results["loss"] < best_val_loss:
# AfterBERT
train_steps = global_step / len(train_domain_dataloader)
# Save model checkpoint
output_dir = os.path.join(args.output_dir,
"checkpoint".format(train_steps))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
model_to_save.save_pretrained(output_dir)
tokenizer.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, "training_args.bin"))
logger.info("Saving model checkpoint to %s", output_dir)
torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
torch.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
logger.info("Saving optimizer and scheduler states to %s", output_dir)
# AfterBERT
best_val_loss = results["loss"]
output_ckpt_file = os.path.join(output_dir, "best_loss.txt")
with open(output_ckpt_file, "w+") as writer:
for key in sorted(results.keys()):
writer.write("%s = %s\n" % (key, str(results[key])))
writer.write("steps = %s\n" % (str(train_steps)))
if args.max_steps > 0 and global_step > args.max_steps:
epoch_iterator.close()
break
if args.max_steps > 0 and global_step > args.max_steps:
train_iterator.close()
break
return global_step, tr_loss / global_step
def | (args, model, prefix=""):
# Loop to handle MNLI double evaluation (matched, mis-matched)
eval_task_names = ("mnli", "mnli-mm") if args.task_name == "mnli" else (args.task_name,)
eval_outputs_dirs = (args.output_dir, args.output_dir + "-MM") if args.task_name == "mnli" else (args.output_dir,)
results = {}
for eval_task, eval_output_dir in zip(eval_task_names, eval_outputs_dirs):
eval_dataset = load_domain_examples(args, eval_task, args.aux_name, mode="dev")
if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:
os.makedirs(eval_output_dir)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
# multi-gpu eval
if args.n_gpu > 1 and not isinstance(model, torch.nn.DataParallel):
model = torch.nn.DataParallel(model)
# Eval!
logger.info("***** Running evaluation {} *****".format(prefix))
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
preds = None
out_label_ids = None
for batch in tqdm(eval_dataloader, desc="Evaluating"):
model.eval()
batch = tuple(t.to(args.device) for t in batch)
with torch.no_grad():
inputs = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if args.model_type != "distilbert":
inputs["token_type_ids"] = (
batch[2] if args.model_type in ["bert", "xlnet", "albert"] else None
) # XLM, DistilBERT, RoBERTa, and XLM-RoBERTa don't use segment_ids
outputs = model(**inputs)
tmp_eval_loss, logits = outputs[:2]
eval_loss += tmp_eval_loss.mean().item()
nb_eval_steps += 1
if preds is None:
preds = logits.detach().cpu().numpy()
out_label_ids = inputs["labels"].detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
out_label_ids = np.append(out_label_ids, inputs["labels"].detach().cpu().numpy(), axis=0)
eval_loss = eval_loss / nb_eval_steps
if args.output_mode == "classification":
preds = np.argmax(preds, axis=1)
elif args.output_mode == "regression":
preds = np.squeeze(preds)
result = compute_metrics("domain", preds, out_label_ids)
# AfterBERT
result.update({"loss": eval_loss})
results.update(result)
output_eval_file = os.path.join(eval_output_dir, prefix, "eval_results.txt")
with open(output_eval_file, "a+") as writer:
logger.info("***** Eval results {} *****".format(prefix))
for key in sorted(result.keys()):
writer.write("%s = %s\n" % (key, str(result[key])))
return results
def main(args):
if (
os.path.exists(args.output_dir)
and os.listdir(args.output_dir)
and args.do_train
and not args.overwrite_output_dir
):
raise ValueError(
"Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(
args.output_dir
)
)
# Setup distant debugging if needed
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
# Setup CUDA, GPU & distributed training
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend="nccl")
args.n_gpu = 1
args.device = device
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN,
)
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
args.local_rank,
device,
args.n_gpu,
bool(args.local_rank != -1),
args.fp16,
)
# Set seed
set_seed(args)
# Prepare task
args.task_name = args.task_name.lower()
if args.task_name not in processors:
raise ValueError("Task not found: %s" % (args.task_name))
# Prepare auxiliary dataset
args.aux_name = args.auxiliary_name.lower()
if args.aux_name not in processors:
raise ValueError("Task not found: %s" % (args.aux_name))
args.output_mode = "classification"
label_list = ["0", "1"]
num_labels = len(label_list)
# Load pretrained model and tokenizer
if args.local_rank not in [-1, 0]:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
args.model_type = args.model_type.lower()
config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
config = config_class.from_pretrained(
args.config_name if args.config_name else args.model_name_or_path,
num_labels=num_labels,
# finetuning_task=args.task_name,
cache_dir=args.cache_dir if args.cache_dir else None,
)
tokenizer = tokenizer_class.from_pretrained(
args.tokenizer_name if args.tokenizer_name else args.model_name_or_path,
do_lower_case=args.do_lower_case,
cache_dir=args.cache_dir if args.cache_dir else None,
)
model = model_class.from_pretrained(
args.ckpt_file if args.ckpt_file else args.model_name_or_path,
from_tf=bool(".ckpt" in args.model_name_or_path),
config=config,
cache_dir=args.cache_dir if args.cache_dir else None,
# mean_pool=args.mean_pool
)
if args.local_rank == 0:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
model.classifier.reset_parameters()
model.to(args.device)
for param in model.bert.parameters():
param.requires_grad = False
logger.info("Training/evaluation parameters %s", args)
# AfterBert
cache_after_datasets(args, args.task_name, args.aux_name, tokenizer, test=False)
# cache_after_datasets(args, args.task_name, args.aux_name, tokenizer, test=True)
# Training
if args.do_train:
train_domain_dataset = load_domain_examples(args, args.task_name, args.aux_name, mode="train")
global_step, tr_loss = train(args, train_domain_dataset, model, tokenizer)
# logger.info(" global_step = %s, average loss = %s", global_step, tr_loss)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input", required=False,
default='probe_bert_mrpc_pubmed.yaml',
help="config file of input data")
parser.add_argument("--seed", type=int, default=2319, help="random seed for initialization")
parser.add_argument("--lambd", type=float, default=0.01, help="lambda hyperparameter for adversarial loss")
parser.add_argument("--mean_pool", type=bool, default=False,
help="Whether to use mean pooling of the output hidden states insted of CLS token for the domain classifier")
parser.add_argument("--do_train", default=True, help="Whether to run training.")
parser.add_argument(
"--eval_all_checkpoints",
action="store_true",
help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number",
)
parser.add_argument("--no_cuda", type=bool, default=False, help="Avoid using CUDA when available")
parser.add_argument(
"--overwrite_output_dir", action="store_true", help="Overwrite the content of the output directory",
)
parser.add_argument(
"--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets",
)
parser.add_argument(
"--fp16",
action="store_true",
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit",
)
parser.add_argument(
"--fp16_opt_level",
type=str,
default="O1",
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html",
)
args = parser.parse_args()
config = train_options(args.input)
# Merge the input arguments with the configuration yaml
args.__dict__.update(config)
if args.model_type == "bert-ft":
args.ckpt_file = "/data/data1/users/gvernikos/After_v1.0/AfterBERT/MRPC/seed_{}/checkpoint".format(args.seed)
args.lambd = "_"
elif args.model_type == "afterbert":
args.ckpt_file = "/data/data1/users/gvernikos/After_v1.0/AfterBERT/MRPC/AFTER_AG_News/seed_{}_lambda_{}/checkpoint".format(
args.seed, args.lambd
)
else:
args.ckpt_file = ""
args.lambd = "_"
# Create one folder per seed
args.output_dir = "".join(
(args.output_dir, "/Probe/{}/{}/seed_{}_lambda_{}/".format(args.auxiliary_name, args.model_type, args.seed, args.lambd)))
if args.mean_pool:
args.output_dir += "_mean"
args.data_dirs = [args.data_dir, "".join((DATA_DIR, config["auxiliary_name"]))]
main(args)
| evaluate | identifier_name |
probe_bert.py | """ Finetuning the AFTER models for sequence classification (Bert, XLM, XLNet, RoBERTa, Albert, XLM-RoBERTa)."""
import argparse
import json
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
from transformers import (
AdamW,
BertConfig,
BertTokenizer,
get_linear_schedule_with_warmup,
)
from after_models.after_bert import AfterBertForSequenceClassification
from after_models.bert_mean_pooled import BertForSequenceClassification
from sys_config import DATA_DIR
from utils.data_caching import cache_after_datasets, load_domain_examples
from utils.metrics import compute_metrics
from utils.general_processors import processors, output_modes
from utils.config import train_options
logger = logging.getLogger(__name__)
ALL_MODELS = sum(
(
tuple(conf.pretrained_config_archive_map.keys())
for conf in (
BertConfig,
)
),
(),
)
MODEL_CLASSES = {
"bert": (BertConfig, BertForSequenceClassification, BertTokenizer),
"bert-ft": (BertConfig, BertForSequenceClassification, BertTokenizer),
"afterbert": (BertConfig, BertForSequenceClassification, BertTokenizer),
}
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def train(args, train_domain_dataset, model, tokenizer):
""" Train the model """
# The batch size must be halved in order to fit one batch from each domain
args.train_batch_size = (args.per_gpu_train_batch_size * max(1, args.n_gpu))
train_domain_sampler = RandomSampler(train_domain_dataset) if args.local_rank == -1 else DistributedSampler(
train_domain_dataset)
train_domain_dataloader = DataLoader(train_domain_dataset, sampler=train_domain_sampler,
batch_size=args.train_batch_size)
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = args.max_steps // (len(train_domain_dataloader) // args.gradient_accumulation_steps) + 1
else:
t_total = len(train_domain_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
# AfterBERT
args.logging_steps = len(train_domain_dataloader) // args.num_evals
args.warmup_steps = args.warmup_proportion * t_total
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": args.weight_decay,
},
{"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0},
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon,
correct_bias=args.bias_correction)
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total
)
# Check if saved optimizer or scheduler states exist
if os.path.isfile(os.path.join(args.model_name_or_path, "optimizer.pt")) and os.path.isfile(
os.path.join(args.model_name_or_path, "scheduler.pt")
):
# Load in optimizer and scheduler states
optimizer.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "optimizer.pt")))
scheduler.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "scheduler.pt")))
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
# multi-gpu training (should be after apex fp16 initialization)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True,
)
# Train!
logger.info("***** Running training *****")
logger.info(" Num of Combined examples = %d", len(train_domain_dataloader))
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
logger.info(
" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size
* args.gradient_accumulation_steps
* (torch.distributed.get_world_size() if args.local_rank != -1 else 1),
)
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
global_step = 0
epochs_trained = 0
steps_trained_in_current_epoch = 0
# Check if continuing training from a checkpoint
if os.path.exists(args.model_name_or_path):
# set global_step to gobal_step of last saved checkpoint from model path
global_step = int(args.model_name_or_path.split("-")[-1].split("/")[0])
epochs_trained = global_step // (len(train_domain_dataloader) // args.gradient_accumulation_steps)
steps_trained_in_current_epoch = global_step % (
len(train_domain_dataloader) // args.gradient_accumulation_steps)
logger.info(" Continuing training from checkpoint, will skip to saved global_step")
logger.info(" Continuing training from epoch %d", epochs_trained)
logger.info(" Continuing training from global step %d", global_step)
logger.info(" Will skip the first %d steps in the first epoch", steps_trained_in_current_epoch)
tr_loss, logging_loss = 0.0, 0.0
# AfterBERT
best_val_loss = 1e5
model.zero_grad()
train_iterator = trange(
epochs_trained, int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0],
)
set_seed(args) # Added here for reproductibility
for _ in train_iterator:
epoch_iterator = tqdm(train_domain_dataloader, desc="Iteration",
disable=args.local_rank not in [-1, 0])
for step, batch in enumerate(epoch_iterator):
# Skip past any already trained steps if resuming training
if steps_trained_in_current_epoch > 0:
steps_trained_in_current_epoch -= 1
continue
model.train()
batch = tuple(t.to(args.device) for t in batch)
inputs = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if args.model_type != "distilbert":
inputs["token_type_ids"] = (
batch[2] if args.model_type in ["bert", "xlnet", "albert"] else None
) # XLM, DistilBERT, RoBERTa, and XLM-RoBERTa don't use segment_ids
outputs = model(**inputs)
loss = outputs[0]
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
# tr_loss += loss.item()
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
logs = {}
if (
args.local_rank == -1 and args.evaluate_during_training
): # Only evaluate when single GPU otherwise metrics may not average well
results = evaluate(args, model)
for key, value in results.items():
eval_key = "eval_{}".format(key)
logs[eval_key] = value
loss_scalar = (tr_loss - logging_loss) / args.logging_steps
learning_rate_scalar = scheduler.get_lr()[0]
logs["learning_rate"] = learning_rate_scalar
logs["loss"] = loss_scalar
logging_loss = tr_loss
print(json.dumps({**logs, **{"step": global_step}}))
# AfterBERT
if args.local_rank in [-1, 0] and results["loss"] < best_val_loss:
# AfterBERT
|
if args.max_steps > 0 and global_step > args.max_steps:
epoch_iterator.close()
break
if args.max_steps > 0 and global_step > args.max_steps:
train_iterator.close()
break
return global_step, tr_loss / global_step
def evaluate(args, model, prefix=""):
# Loop to handle MNLI double evaluation (matched, mis-matched)
eval_task_names = ("mnli", "mnli-mm") if args.task_name == "mnli" else (args.task_name,)
eval_outputs_dirs = (args.output_dir, args.output_dir + "-MM") if args.task_name == "mnli" else (args.output_dir,)
results = {}
for eval_task, eval_output_dir in zip(eval_task_names, eval_outputs_dirs):
eval_dataset = load_domain_examples(args, eval_task, args.aux_name, mode="dev")
if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:
os.makedirs(eval_output_dir)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
# multi-gpu eval
if args.n_gpu > 1 and not isinstance(model, torch.nn.DataParallel):
model = torch.nn.DataParallel(model)
# Eval!
logger.info("***** Running evaluation {} *****".format(prefix))
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
preds = None
out_label_ids = None
for batch in tqdm(eval_dataloader, desc="Evaluating"):
model.eval()
batch = tuple(t.to(args.device) for t in batch)
with torch.no_grad():
inputs = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if args.model_type != "distilbert":
inputs["token_type_ids"] = (
batch[2] if args.model_type in ["bert", "xlnet", "albert"] else None
) # XLM, DistilBERT, RoBERTa, and XLM-RoBERTa don't use segment_ids
outputs = model(**inputs)
tmp_eval_loss, logits = outputs[:2]
eval_loss += tmp_eval_loss.mean().item()
nb_eval_steps += 1
if preds is None:
preds = logits.detach().cpu().numpy()
out_label_ids = inputs["labels"].detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
out_label_ids = np.append(out_label_ids, inputs["labels"].detach().cpu().numpy(), axis=0)
eval_loss = eval_loss / nb_eval_steps
if args.output_mode == "classification":
preds = np.argmax(preds, axis=1)
elif args.output_mode == "regression":
preds = np.squeeze(preds)
result = compute_metrics("domain", preds, out_label_ids)
# AfterBERT
result.update({"loss": eval_loss})
results.update(result)
output_eval_file = os.path.join(eval_output_dir, prefix, "eval_results.txt")
with open(output_eval_file, "a+") as writer:
logger.info("***** Eval results {} *****".format(prefix))
for key in sorted(result.keys()):
writer.write("%s = %s\n" % (key, str(result[key])))
return results
def main(args):
if (
os.path.exists(args.output_dir)
and os.listdir(args.output_dir)
and args.do_train
and not args.overwrite_output_dir
):
raise ValueError(
"Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(
args.output_dir
)
)
# Setup distant debugging if needed
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
# Setup CUDA, GPU & distributed training
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend="nccl")
args.n_gpu = 1
args.device = device
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN,
)
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
args.local_rank,
device,
args.n_gpu,
bool(args.local_rank != -1),
args.fp16,
)
# Set seed
set_seed(args)
# Prepare task
args.task_name = args.task_name.lower()
if args.task_name not in processors:
raise ValueError("Task not found: %s" % (args.task_name))
# Prepare auxiliary dataset
args.aux_name = args.auxiliary_name.lower()
if args.aux_name not in processors:
raise ValueError("Task not found: %s" % (args.aux_name))
args.output_mode = "classification"
label_list = ["0", "1"]
num_labels = len(label_list)
# Load pretrained model and tokenizer
if args.local_rank not in [-1, 0]:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
args.model_type = args.model_type.lower()
config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
config = config_class.from_pretrained(
args.config_name if args.config_name else args.model_name_or_path,
num_labels=num_labels,
# finetuning_task=args.task_name,
cache_dir=args.cache_dir if args.cache_dir else None,
)
tokenizer = tokenizer_class.from_pretrained(
args.tokenizer_name if args.tokenizer_name else args.model_name_or_path,
do_lower_case=args.do_lower_case,
cache_dir=args.cache_dir if args.cache_dir else None,
)
model = model_class.from_pretrained(
args.ckpt_file if args.ckpt_file else args.model_name_or_path,
from_tf=bool(".ckpt" in args.model_name_or_path),
config=config,
cache_dir=args.cache_dir if args.cache_dir else None,
# mean_pool=args.mean_pool
)
if args.local_rank == 0:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
model.classifier.reset_parameters()
model.to(args.device)
for param in model.bert.parameters():
param.requires_grad = False
logger.info("Training/evaluation parameters %s", args)
# AfterBert
cache_after_datasets(args, args.task_name, args.aux_name, tokenizer, test=False)
# cache_after_datasets(args, args.task_name, args.aux_name, tokenizer, test=True)
# Training
if args.do_train:
train_domain_dataset = load_domain_examples(args, args.task_name, args.aux_name, mode="train")
global_step, tr_loss = train(args, train_domain_dataset, model, tokenizer)
# logger.info(" global_step = %s, average loss = %s", global_step, tr_loss)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input", required=False,
default='probe_bert_mrpc_pubmed.yaml',
help="config file of input data")
parser.add_argument("--seed", type=int, default=2319, help="random seed for initialization")
parser.add_argument("--lambd", type=float, default=0.01, help="lambda hyperparameter for adversarial loss")
parser.add_argument("--mean_pool", type=bool, default=False,
help="Whether to use mean pooling of the output hidden states insted of CLS token for the domain classifier")
parser.add_argument("--do_train", default=True, help="Whether to run training.")
parser.add_argument(
"--eval_all_checkpoints",
action="store_true",
help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number",
)
parser.add_argument("--no_cuda", type=bool, default=False, help="Avoid using CUDA when available")
parser.add_argument(
"--overwrite_output_dir", action="store_true", help="Overwrite the content of the output directory",
)
parser.add_argument(
"--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets",
)
parser.add_argument(
"--fp16",
action="store_true",
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit",
)
parser.add_argument(
"--fp16_opt_level",
type=str,
default="O1",
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html",
)
args = parser.parse_args()
config = train_options(args.input)
# Merge the input arguments with the configuration yaml
args.__dict__.update(config)
if args.model_type == "bert-ft":
args.ckpt_file = "/data/data1/users/gvernikos/After_v1.0/AfterBERT/MRPC/seed_{}/checkpoint".format(args.seed)
args.lambd = "_"
elif args.model_type == "afterbert":
args.ckpt_file = "/data/data1/users/gvernikos/After_v1.0/AfterBERT/MRPC/AFTER_AG_News/seed_{}_lambda_{}/checkpoint".format(
args.seed, args.lambd
)
else:
args.ckpt_file = ""
args.lambd = "_"
# Create one folder per seed
args.output_dir = "".join(
(args.output_dir, "/Probe/{}/{}/seed_{}_lambda_{}/".format(args.auxiliary_name, args.model_type, args.seed, args.lambd)))
if args.mean_pool:
args.output_dir += "_mean"
args.data_dirs = [args.data_dir, "".join((DATA_DIR, config["auxiliary_name"]))]
main(args)
| train_steps = global_step / len(train_domain_dataloader)
# Save model checkpoint
output_dir = os.path.join(args.output_dir,
"checkpoint".format(train_steps))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
model_to_save.save_pretrained(output_dir)
tokenizer.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, "training_args.bin"))
logger.info("Saving model checkpoint to %s", output_dir)
torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
torch.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
logger.info("Saving optimizer and scheduler states to %s", output_dir)
# AfterBERT
best_val_loss = results["loss"]
output_ckpt_file = os.path.join(output_dir, "best_loss.txt")
with open(output_ckpt_file, "w+") as writer:
for key in sorted(results.keys()):
writer.write("%s = %s\n" % (key, str(results[key])))
writer.write("steps = %s\n" % (str(train_steps))) | conditional_block |
probe_bert.py | """ Finetuning the AFTER models for sequence classification (Bert, XLM, XLNet, RoBERTa, Albert, XLM-RoBERTa)."""
import argparse
import json
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
from transformers import (
AdamW,
BertConfig,
BertTokenizer,
get_linear_schedule_with_warmup,
)
from after_models.after_bert import AfterBertForSequenceClassification
from after_models.bert_mean_pooled import BertForSequenceClassification
from sys_config import DATA_DIR
from utils.data_caching import cache_after_datasets, load_domain_examples
from utils.metrics import compute_metrics
from utils.general_processors import processors, output_modes
from utils.config import train_options
logger = logging.getLogger(__name__)
ALL_MODELS = sum(
(
tuple(conf.pretrained_config_archive_map.keys())
for conf in (
BertConfig,
)
),
(),
)
MODEL_CLASSES = {
"bert": (BertConfig, BertForSequenceClassification, BertTokenizer),
"bert-ft": (BertConfig, BertForSequenceClassification, BertTokenizer),
"afterbert": (BertConfig, BertForSequenceClassification, BertTokenizer),
}
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def train(args, train_domain_dataset, model, tokenizer):
""" Train the model """
# The batch size must be halved in order to fit one batch from each domain
args.train_batch_size = (args.per_gpu_train_batch_size * max(1, args.n_gpu))
train_domain_sampler = RandomSampler(train_domain_dataset) if args.local_rank == -1 else DistributedSampler(
train_domain_dataset)
train_domain_dataloader = DataLoader(train_domain_dataset, sampler=train_domain_sampler,
batch_size=args.train_batch_size)
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = args.max_steps // (len(train_domain_dataloader) // args.gradient_accumulation_steps) + 1
else:
t_total = len(train_domain_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
# AfterBERT
args.logging_steps = len(train_domain_dataloader) // args.num_evals
args.warmup_steps = args.warmup_proportion * t_total
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": args.weight_decay,
},
{"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0},
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon,
correct_bias=args.bias_correction)
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total
)
# Check if saved optimizer or scheduler states exist
if os.path.isfile(os.path.join(args.model_name_or_path, "optimizer.pt")) and os.path.isfile(
os.path.join(args.model_name_or_path, "scheduler.pt")
):
# Load in optimizer and scheduler states
optimizer.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "optimizer.pt")))
scheduler.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "scheduler.pt")))
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
# multi-gpu training (should be after apex fp16 initialization)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True,
)
# Train!
logger.info("***** Running training *****")
logger.info(" Num of Combined examples = %d", len(train_domain_dataloader))
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
logger.info(
" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size
* args.gradient_accumulation_steps
* (torch.distributed.get_world_size() if args.local_rank != -1 else 1),
)
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
global_step = 0
epochs_trained = 0
steps_trained_in_current_epoch = 0
# Check if continuing training from a checkpoint
if os.path.exists(args.model_name_or_path):
# set global_step to gobal_step of last saved checkpoint from model path
global_step = int(args.model_name_or_path.split("-")[-1].split("/")[0])
epochs_trained = global_step // (len(train_domain_dataloader) // args.gradient_accumulation_steps)
steps_trained_in_current_epoch = global_step % (
len(train_domain_dataloader) // args.gradient_accumulation_steps)
logger.info(" Continuing training from checkpoint, will skip to saved global_step")
logger.info(" Continuing training from epoch %d", epochs_trained)
logger.info(" Continuing training from global step %d", global_step)
logger.info(" Will skip the first %d steps in the first epoch", steps_trained_in_current_epoch)
tr_loss, logging_loss = 0.0, 0.0
# AfterBERT
best_val_loss = 1e5
model.zero_grad()
train_iterator = trange(
epochs_trained, int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0],
)
set_seed(args) # Added here for reproductibility
for _ in train_iterator:
epoch_iterator = tqdm(train_domain_dataloader, desc="Iteration",
disable=args.local_rank not in [-1, 0])
for step, batch in enumerate(epoch_iterator):
# Skip past any already trained steps if resuming training
if steps_trained_in_current_epoch > 0:
steps_trained_in_current_epoch -= 1
continue
model.train()
batch = tuple(t.to(args.device) for t in batch)
inputs = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if args.model_type != "distilbert":
inputs["token_type_ids"] = (
batch[2] if args.model_type in ["bert", "xlnet", "albert"] else None
) # XLM, DistilBERT, RoBERTa, and XLM-RoBERTa don't use segment_ids
outputs = model(**inputs)
loss = outputs[0]
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
# tr_loss += loss.item()
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
logs = {}
if (
args.local_rank == -1 and args.evaluate_during_training
): # Only evaluate when single GPU otherwise metrics may not average well
results = evaluate(args, model)
for key, value in results.items():
eval_key = "eval_{}".format(key)
logs[eval_key] = value
loss_scalar = (tr_loss - logging_loss) / args.logging_steps
learning_rate_scalar = scheduler.get_lr()[0]
logs["learning_rate"] = learning_rate_scalar
logs["loss"] = loss_scalar
logging_loss = tr_loss
print(json.dumps({**logs, **{"step": global_step}}))
# AfterBERT
if args.local_rank in [-1, 0] and results["loss"] < best_val_loss:
# AfterBERT
train_steps = global_step / len(train_domain_dataloader)
# Save model checkpoint
output_dir = os.path.join(args.output_dir,
"checkpoint".format(train_steps))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
model_to_save.save_pretrained(output_dir)
tokenizer.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, "training_args.bin"))
logger.info("Saving model checkpoint to %s", output_dir)
torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
torch.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
logger.info("Saving optimizer and scheduler states to %s", output_dir)
# AfterBERT
best_val_loss = results["loss"]
output_ckpt_file = os.path.join(output_dir, "best_loss.txt")
with open(output_ckpt_file, "w+") as writer:
for key in sorted(results.keys()):
writer.write("%s = %s\n" % (key, str(results[key])))
writer.write("steps = %s\n" % (str(train_steps)))
if args.max_steps > 0 and global_step > args.max_steps:
epoch_iterator.close()
break
if args.max_steps > 0 and global_step > args.max_steps:
train_iterator.close()
break
return global_step, tr_loss / global_step
def evaluate(args, model, prefix=""):
# Loop to handle MNLI double evaluation (matched, mis-matched)
eval_task_names = ("mnli", "mnli-mm") if args.task_name == "mnli" else (args.task_name,)
eval_outputs_dirs = (args.output_dir, args.output_dir + "-MM") if args.task_name == "mnli" else (args.output_dir,)
results = {}
for eval_task, eval_output_dir in zip(eval_task_names, eval_outputs_dirs):
eval_dataset = load_domain_examples(args, eval_task, args.aux_name, mode="dev")
if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:
os.makedirs(eval_output_dir)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
# multi-gpu eval
if args.n_gpu > 1 and not isinstance(model, torch.nn.DataParallel):
model = torch.nn.DataParallel(model)
# Eval!
logger.info("***** Running evaluation {} *****".format(prefix))
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
preds = None
out_label_ids = None
for batch in tqdm(eval_dataloader, desc="Evaluating"):
model.eval()
batch = tuple(t.to(args.device) for t in batch)
with torch.no_grad():
inputs = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if args.model_type != "distilbert":
inputs["token_type_ids"] = (
batch[2] if args.model_type in ["bert", "xlnet", "albert"] else None
) # XLM, DistilBERT, RoBERTa, and XLM-RoBERTa don't use segment_ids
outputs = model(**inputs)
tmp_eval_loss, logits = outputs[:2]
eval_loss += tmp_eval_loss.mean().item()
nb_eval_steps += 1
if preds is None:
preds = logits.detach().cpu().numpy()
out_label_ids = inputs["labels"].detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
out_label_ids = np.append(out_label_ids, inputs["labels"].detach().cpu().numpy(), axis=0)
eval_loss = eval_loss / nb_eval_steps
if args.output_mode == "classification":
preds = np.argmax(preds, axis=1)
elif args.output_mode == "regression":
preds = np.squeeze(preds)
result = compute_metrics("domain", preds, out_label_ids)
# AfterBERT
result.update({"loss": eval_loss})
results.update(result)
output_eval_file = os.path.join(eval_output_dir, prefix, "eval_results.txt")
with open(output_eval_file, "a+") as writer:
logger.info("***** Eval results {} *****".format(prefix))
for key in sorted(result.keys()):
writer.write("%s = %s\n" % (key, str(result[key])))
return results
def main(args):
|
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input", required=False,
default='probe_bert_mrpc_pubmed.yaml',
help="config file of input data")
parser.add_argument("--seed", type=int, default=2319, help="random seed for initialization")
parser.add_argument("--lambd", type=float, default=0.01, help="lambda hyperparameter for adversarial loss")
parser.add_argument("--mean_pool", type=bool, default=False,
help="Whether to use mean pooling of the output hidden states insted of CLS token for the domain classifier")
parser.add_argument("--do_train", default=True, help="Whether to run training.")
parser.add_argument(
"--eval_all_checkpoints",
action="store_true",
help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number",
)
parser.add_argument("--no_cuda", type=bool, default=False, help="Avoid using CUDA when available")
parser.add_argument(
"--overwrite_output_dir", action="store_true", help="Overwrite the content of the output directory",
)
parser.add_argument(
"--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets",
)
parser.add_argument(
"--fp16",
action="store_true",
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit",
)
parser.add_argument(
"--fp16_opt_level",
type=str,
default="O1",
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html",
)
args = parser.parse_args()
config = train_options(args.input)
# Merge the input arguments with the configuration yaml
args.__dict__.update(config)
if args.model_type == "bert-ft":
args.ckpt_file = "/data/data1/users/gvernikos/After_v1.0/AfterBERT/MRPC/seed_{}/checkpoint".format(args.seed)
args.lambd = "_"
elif args.model_type == "afterbert":
args.ckpt_file = "/data/data1/users/gvernikos/After_v1.0/AfterBERT/MRPC/AFTER_AG_News/seed_{}_lambda_{}/checkpoint".format(
args.seed, args.lambd
)
else:
args.ckpt_file = ""
args.lambd = "_"
# Create one folder per seed
args.output_dir = "".join(
(args.output_dir, "/Probe/{}/{}/seed_{}_lambda_{}/".format(args.auxiliary_name, args.model_type, args.seed, args.lambd)))
if args.mean_pool:
args.output_dir += "_mean"
args.data_dirs = [args.data_dir, "".join((DATA_DIR, config["auxiliary_name"]))]
main(args)
| if (
os.path.exists(args.output_dir)
and os.listdir(args.output_dir)
and args.do_train
and not args.overwrite_output_dir
):
raise ValueError(
"Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(
args.output_dir
)
)
# Setup distant debugging if needed
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
# Setup CUDA, GPU & distributed training
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend="nccl")
args.n_gpu = 1
args.device = device
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN,
)
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
args.local_rank,
device,
args.n_gpu,
bool(args.local_rank != -1),
args.fp16,
)
# Set seed
set_seed(args)
# Prepare task
args.task_name = args.task_name.lower()
if args.task_name not in processors:
raise ValueError("Task not found: %s" % (args.task_name))
# Prepare auxiliary dataset
args.aux_name = args.auxiliary_name.lower()
if args.aux_name not in processors:
raise ValueError("Task not found: %s" % (args.aux_name))
args.output_mode = "classification"
label_list = ["0", "1"]
num_labels = len(label_list)
# Load pretrained model and tokenizer
if args.local_rank not in [-1, 0]:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
args.model_type = args.model_type.lower()
config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
config = config_class.from_pretrained(
args.config_name if args.config_name else args.model_name_or_path,
num_labels=num_labels,
# finetuning_task=args.task_name,
cache_dir=args.cache_dir if args.cache_dir else None,
)
tokenizer = tokenizer_class.from_pretrained(
args.tokenizer_name if args.tokenizer_name else args.model_name_or_path,
do_lower_case=args.do_lower_case,
cache_dir=args.cache_dir if args.cache_dir else None,
)
model = model_class.from_pretrained(
args.ckpt_file if args.ckpt_file else args.model_name_or_path,
from_tf=bool(".ckpt" in args.model_name_or_path),
config=config,
cache_dir=args.cache_dir if args.cache_dir else None,
# mean_pool=args.mean_pool
)
if args.local_rank == 0:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
model.classifier.reset_parameters()
model.to(args.device)
for param in model.bert.parameters():
param.requires_grad = False
logger.info("Training/evaluation parameters %s", args)
# AfterBert
cache_after_datasets(args, args.task_name, args.aux_name, tokenizer, test=False)
# cache_after_datasets(args, args.task_name, args.aux_name, tokenizer, test=True)
# Training
if args.do_train:
train_domain_dataset = load_domain_examples(args, args.task_name, args.aux_name, mode="train")
global_step, tr_loss = train(args, train_domain_dataset, model, tokenizer)
# logger.info(" global_step = %s, average loss = %s", global_step, tr_loss) | identifier_body |
probe_bert.py | """ Finetuning the AFTER models for sequence classification (Bert, XLM, XLNet, RoBERTa, Albert, XLM-RoBERTa)."""
import argparse
import json
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
from transformers import (
AdamW,
BertConfig,
BertTokenizer,
get_linear_schedule_with_warmup,
)
from after_models.after_bert import AfterBertForSequenceClassification
from after_models.bert_mean_pooled import BertForSequenceClassification
from sys_config import DATA_DIR
from utils.data_caching import cache_after_datasets, load_domain_examples
from utils.metrics import compute_metrics
from utils.general_processors import processors, output_modes
from utils.config import train_options
logger = logging.getLogger(__name__)
ALL_MODELS = sum(
(
tuple(conf.pretrained_config_archive_map.keys())
for conf in (
BertConfig,
)
),
(),
)
MODEL_CLASSES = {
"bert": (BertConfig, BertForSequenceClassification, BertTokenizer),
"bert-ft": (BertConfig, BertForSequenceClassification, BertTokenizer),
"afterbert": (BertConfig, BertForSequenceClassification, BertTokenizer),
}
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def train(args, train_domain_dataset, model, tokenizer):
""" Train the model """
# The batch size must be halved in order to fit one batch from each domain
args.train_batch_size = (args.per_gpu_train_batch_size * max(1, args.n_gpu))
train_domain_sampler = RandomSampler(train_domain_dataset) if args.local_rank == -1 else DistributedSampler(
train_domain_dataset)
train_domain_dataloader = DataLoader(train_domain_dataset, sampler=train_domain_sampler,
batch_size=args.train_batch_size)
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = args.max_steps // (len(train_domain_dataloader) // args.gradient_accumulation_steps) + 1
else:
t_total = len(train_domain_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
# AfterBERT
args.logging_steps = len(train_domain_dataloader) // args.num_evals
args.warmup_steps = args.warmup_proportion * t_total
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": args.weight_decay,
},
{"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0},
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon,
correct_bias=args.bias_correction)
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total
)
# Check if saved optimizer or scheduler states exist
if os.path.isfile(os.path.join(args.model_name_or_path, "optimizer.pt")) and os.path.isfile(
os.path.join(args.model_name_or_path, "scheduler.pt")
):
# Load in optimizer and scheduler states
optimizer.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "optimizer.pt")))
scheduler.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "scheduler.pt")))
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
# multi-gpu training (should be after apex fp16 initialization)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True,
)
# Train!
logger.info("***** Running training *****")
logger.info(" Num of Combined examples = %d", len(train_domain_dataloader))
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
logger.info(
" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size
* args.gradient_accumulation_steps
* (torch.distributed.get_world_size() if args.local_rank != -1 else 1),
)
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
global_step = 0
epochs_trained = 0
steps_trained_in_current_epoch = 0
# Check if continuing training from a checkpoint
if os.path.exists(args.model_name_or_path):
# set global_step to gobal_step of last saved checkpoint from model path
global_step = int(args.model_name_or_path.split("-")[-1].split("/")[0])
epochs_trained = global_step // (len(train_domain_dataloader) // args.gradient_accumulation_steps)
steps_trained_in_current_epoch = global_step % (
len(train_domain_dataloader) // args.gradient_accumulation_steps)
logger.info(" Continuing training from checkpoint, will skip to saved global_step")
logger.info(" Continuing training from epoch %d", epochs_trained)
logger.info(" Continuing training from global step %d", global_step)
logger.info(" Will skip the first %d steps in the first epoch", steps_trained_in_current_epoch)
tr_loss, logging_loss = 0.0, 0.0
# AfterBERT
best_val_loss = 1e5
model.zero_grad()
train_iterator = trange(
epochs_trained, int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0],
)
set_seed(args) # Added here for reproductibility
for _ in train_iterator:
epoch_iterator = tqdm(train_domain_dataloader, desc="Iteration",
disable=args.local_rank not in [-1, 0])
for step, batch in enumerate(epoch_iterator):
# Skip past any already trained steps if resuming training
if steps_trained_in_current_epoch > 0:
steps_trained_in_current_epoch -= 1
continue
model.train()
batch = tuple(t.to(args.device) for t in batch)
inputs = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if args.model_type != "distilbert":
inputs["token_type_ids"] = (
batch[2] if args.model_type in ["bert", "xlnet", "albert"] else None
) # XLM, DistilBERT, RoBERTa, and XLM-RoBERTa don't use segment_ids
outputs = model(**inputs)
loss = outputs[0]
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
# tr_loss += loss.item()
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
logs = {}
if (
args.local_rank == -1 and args.evaluate_during_training
): # Only evaluate when single GPU otherwise metrics may not average well
results = evaluate(args, model)
for key, value in results.items():
eval_key = "eval_{}".format(key)
logs[eval_key] = value
loss_scalar = (tr_loss - logging_loss) / args.logging_steps
learning_rate_scalar = scheduler.get_lr()[0]
logs["learning_rate"] = learning_rate_scalar
logs["loss"] = loss_scalar
logging_loss = tr_loss
print(json.dumps({**logs, **{"step": global_step}}))
# AfterBERT
if args.local_rank in [-1, 0] and results["loss"] < best_val_loss:
# AfterBERT
train_steps = global_step / len(train_domain_dataloader)
# Save model checkpoint
output_dir = os.path.join(args.output_dir,
"checkpoint".format(train_steps))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
model_to_save.save_pretrained(output_dir)
tokenizer.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, "training_args.bin"))
logger.info("Saving model checkpoint to %s", output_dir)
torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
torch.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
logger.info("Saving optimizer and scheduler states to %s", output_dir)
# AfterBERT
best_val_loss = results["loss"]
output_ckpt_file = os.path.join(output_dir, "best_loss.txt")
with open(output_ckpt_file, "w+") as writer:
for key in sorted(results.keys()):
writer.write("%s = %s\n" % (key, str(results[key])))
writer.write("steps = %s\n" % (str(train_steps)))
if args.max_steps > 0 and global_step > args.max_steps:
epoch_iterator.close()
break
if args.max_steps > 0 and global_step > args.max_steps:
train_iterator.close()
break
return global_step, tr_loss / global_step
def evaluate(args, model, prefix=""):
# Loop to handle MNLI double evaluation (matched, mis-matched)
eval_task_names = ("mnli", "mnli-mm") if args.task_name == "mnli" else (args.task_name,)
eval_outputs_dirs = (args.output_dir, args.output_dir + "-MM") if args.task_name == "mnli" else (args.output_dir,)
results = {}
for eval_task, eval_output_dir in zip(eval_task_names, eval_outputs_dirs):
eval_dataset = load_domain_examples(args, eval_task, args.aux_name, mode="dev")
if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:
os.makedirs(eval_output_dir)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
# multi-gpu eval
if args.n_gpu > 1 and not isinstance(model, torch.nn.DataParallel):
model = torch.nn.DataParallel(model)
# Eval!
logger.info("***** Running evaluation {} *****".format(prefix))
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
preds = None
out_label_ids = None
for batch in tqdm(eval_dataloader, desc="Evaluating"):
model.eval()
batch = tuple(t.to(args.device) for t in batch)
with torch.no_grad():
inputs = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if args.model_type != "distilbert":
inputs["token_type_ids"] = (
batch[2] if args.model_type in ["bert", "xlnet", "albert"] else None
) # XLM, DistilBERT, RoBERTa, and XLM-RoBERTa don't use segment_ids
outputs = model(**inputs)
tmp_eval_loss, logits = outputs[:2]
eval_loss += tmp_eval_loss.mean().item()
nb_eval_steps += 1
if preds is None:
preds = logits.detach().cpu().numpy()
out_label_ids = inputs["labels"].detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
out_label_ids = np.append(out_label_ids, inputs["labels"].detach().cpu().numpy(), axis=0)
eval_loss = eval_loss / nb_eval_steps
if args.output_mode == "classification":
preds = np.argmax(preds, axis=1)
elif args.output_mode == "regression":
preds = np.squeeze(preds)
result = compute_metrics("domain", preds, out_label_ids)
# AfterBERT
result.update({"loss": eval_loss})
results.update(result)
output_eval_file = os.path.join(eval_output_dir, prefix, "eval_results.txt")
with open(output_eval_file, "a+") as writer:
logger.info("***** Eval results {} *****".format(prefix))
for key in sorted(result.keys()):
writer.write("%s = %s\n" % (key, str(result[key])))
return results
def main(args):
if (
os.path.exists(args.output_dir)
and os.listdir(args.output_dir)
and args.do_train
and not args.overwrite_output_dir
):
raise ValueError(
"Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(
args.output_dir
)
)
# Setup distant debugging if needed
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
# Setup CUDA, GPU & distributed training
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend="nccl")
args.n_gpu = 1
args.device = device
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN,
)
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
args.local_rank,
device,
args.n_gpu,
bool(args.local_rank != -1),
args.fp16,
)
# Set seed
set_seed(args)
# Prepare task
args.task_name = args.task_name.lower()
if args.task_name not in processors:
raise ValueError("Task not found: %s" % (args.task_name))
# Prepare auxiliary dataset
args.aux_name = args.auxiliary_name.lower()
if args.aux_name not in processors:
raise ValueError("Task not found: %s" % (args.aux_name))
args.output_mode = "classification"
label_list = ["0", "1"]
num_labels = len(label_list)
# Load pretrained model and tokenizer
if args.local_rank not in [-1, 0]:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
args.model_type = args.model_type.lower()
config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
config = config_class.from_pretrained(
args.config_name if args.config_name else args.model_name_or_path,
num_labels=num_labels,
# finetuning_task=args.task_name,
cache_dir=args.cache_dir if args.cache_dir else None,
)
tokenizer = tokenizer_class.from_pretrained(
args.tokenizer_name if args.tokenizer_name else args.model_name_or_path,
do_lower_case=args.do_lower_case,
cache_dir=args.cache_dir if args.cache_dir else None,
)
model = model_class.from_pretrained(
args.ckpt_file if args.ckpt_file else args.model_name_or_path,
from_tf=bool(".ckpt" in args.model_name_or_path),
config=config,
cache_dir=args.cache_dir if args.cache_dir else None,
# mean_pool=args.mean_pool
)
if args.local_rank == 0:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
model.classifier.reset_parameters()
model.to(args.device)
for param in model.bert.parameters():
param.requires_grad = False
logger.info("Training/evaluation parameters %s", args)
# AfterBert
cache_after_datasets(args, args.task_name, args.aux_name, tokenizer, test=False)
# cache_after_datasets(args, args.task_name, args.aux_name, tokenizer, test=True)
# Training
if args.do_train:
train_domain_dataset = load_domain_examples(args, args.task_name, args.aux_name, mode="train")
global_step, tr_loss = train(args, train_domain_dataset, model, tokenizer)
# logger.info(" global_step = %s, average loss = %s", global_step, tr_loss)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input", required=False,
default='probe_bert_mrpc_pubmed.yaml',
help="config file of input data")
parser.add_argument("--seed", type=int, default=2319, help="random seed for initialization")
parser.add_argument("--lambd", type=float, default=0.01, help="lambda hyperparameter for adversarial loss")
parser.add_argument("--mean_pool", type=bool, default=False,
help="Whether to use mean pooling of the output hidden states insted of CLS token for the domain classifier")
parser.add_argument("--do_train", default=True, help="Whether to run training.")
parser.add_argument(
"--eval_all_checkpoints",
action="store_true",
help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number",
)
parser.add_argument("--no_cuda", type=bool, default=False, help="Avoid using CUDA when available")
parser.add_argument(
"--overwrite_output_dir", action="store_true", help="Overwrite the content of the output directory",
)
parser.add_argument(
"--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets",
)
parser.add_argument(
"--fp16",
action="store_true",
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit",
)
parser.add_argument(
"--fp16_opt_level",
type=str,
default="O1",
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html",
)
| args.__dict__.update(config)
if args.model_type == "bert-ft":
args.ckpt_file = "/data/data1/users/gvernikos/After_v1.0/AfterBERT/MRPC/seed_{}/checkpoint".format(args.seed)
args.lambd = "_"
elif args.model_type == "afterbert":
args.ckpt_file = "/data/data1/users/gvernikos/After_v1.0/AfterBERT/MRPC/AFTER_AG_News/seed_{}_lambda_{}/checkpoint".format(
args.seed, args.lambd
)
else:
args.ckpt_file = ""
args.lambd = "_"
# Create one folder per seed
args.output_dir = "".join(
(args.output_dir, "/Probe/{}/{}/seed_{}_lambda_{}/".format(args.auxiliary_name, args.model_type, args.seed, args.lambd)))
if args.mean_pool:
args.output_dir += "_mean"
args.data_dirs = [args.data_dir, "".join((DATA_DIR, config["auxiliary_name"]))]
main(args) | args = parser.parse_args()
config = train_options(args.input)
# Merge the input arguments with the configuration yaml | random_line_split |
dri.go | // Copyright 2020 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Package graphics contains graphics-related utility functions for local tests.
package graphics
import (
"context"
"fmt"
"io/ioutil"
"math"
"os"
"strings"
"time"
"chromiumos/tast/errors"
"chromiumos/tast/testing"
)
const (
// The debugfs file with the information on allocated framebuffers for Intel i915 GPUs.
i915FramebufferFile = "/sys/kernel/debug/dri/0/i915_gem_framebuffer"
// The debugfs file with the information on allocated framebuffers for generic
// implementations, e.g. AMD, modern Intel GPUs, ARM-based devices.
genericFramebufferFilePattern = "/sys/kernel/debug/dri/%d/framebuffer"
// Maximum DRM device minor number.
maxDRMDeviceNumber = 64
// Immediately after login there's a lot of graphics activity; wait for a
// minute until it subsides. TODO(crbug.com/1047840): Remove when not needed.
coolDownTimeAfterLogin = 30 * time.Second
// Amount of graphics objects for a given resolution considered bad, regardless of codec.
maxGraphicsObjects = 25
)
// Size represents a Width x Height pair, for example for a video resolution.
type Size struct {
Width int
Height int
}
// Backend contains the necessary methods to interact with the platform debug
// interface and getting readings.
type Backend interface {
// Round implements the platform-specific graphic- or codec- rounding.
Round(value int) int
// ReadFramebufferCount tries to retrieve the number of framebuffers of width
// and height dimensions allocated by the Backend.
ReadFramebufferCount(ctx context.Context, width, height int) (framebuffers int, err error)
}
// I915Backend implements Backend for the Intel i915 case.
type I915Backend struct{}
func i915Backend() *I915Backend {
if _, err := os.Stat(i915FramebufferFile); err != nil {
return nil
}
return &I915Backend{}
}
// Round rounds up value for the Intel platforms and all codecs.
func (g I915Backend) Round(value int) int {
const i915Alignment = 16
// Inspired by Chromium's base/bits.h:Align() function.
return (value + i915Alignment - 1) & ^(i915Alignment - 1)
}
// ReadFramebufferCount tries to open the i915FramebufferFile and count the
// amount of lines of dimensions width x height, which corresponds to the amount
// of framebuffers allocated in the system.
// See https://dri.freedesktop.org/docs/drm/gpu/i915.html
func (g I915Backend) ReadFramebufferCount(ctx context.Context, width, height int) (framebuffers int, e error) {
f, err := os.Open(i915FramebufferFile)
if err != nil {
return framebuffers, errors.Wrap(err, "failed to open dri file")
}
text, err := ioutil.ReadAll(f)
if err != nil {
return framebuffers, errors.Wrap(err, "failed to read dri file")
}
lines := strings.Split(string(text), "\n")
for _, line := range lines {
// The line we're looking for looks like "user size: 1920 x 1080,..."
var fbWidth, fbHeight int
if _, err := fmt.Sscanf(line, "user size: %d x %d", &fbWidth, &fbHeight); err != nil {
continue
}
if fbWidth == width && fbHeight == height {
framebuffers++
}
}
return
}
// GenericBackend implements Backend for the Generic case (Intel and AMD).
type GenericBackend struct {
// Index of the DRM card device file (X in /dev/dri/cardX).
index int
}
func genericBackend() *GenericBackend {
for i := 0; i < maxDRMDeviceNumber; i++ {
if _, err := os.Stat(fmt.Sprintf(genericFramebufferFilePattern, i)); err == nil {
return &GenericBackend{index: i}
}
}
return nil
}
// Round rounds up value for the Generic Debugfs platforms and all codecs.
func (g GenericBackend) Round(value int) int {
const genericAlignment = 16
// Inspired by Chromium's base/bits.h:Align() function.
return (value + genericAlignment - 1) & ^(genericAlignment - 1)
}
// ReadFramebufferCount tries to open the DRM device file and count the amount
// of lines of dimensions width x height, which corresponds to the amount of
// framebuffers allocated in the system. See
// https://dri.freedesktop.org/docs/drm/gpu/amdgpu.html
func (g GenericBackend) ReadFramebufferCount(ctx context.Context, width, height int) (framebuffers int, e error) {
f, err := os.Open(fmt.Sprintf(genericFramebufferFilePattern, g.index))
if err != nil {
return framebuffers, errors.Wrap(err, "failed to open dri file")
}
text, err := ioutil.ReadAll(f)
if err != nil {
return framebuffers, errors.Wrap(err, "failed to read dri file")
}
lines := strings.Split(string(text), "\n")
for _, line := range lines {
// The line we're looking for looks like "...size=1920x1080"
var fbWidth, fbHeight int
if _, err := fmt.Sscanf(line, " size=%dx%d", &fbWidth, &fbHeight); err != nil {
continue
}
if fbWidth == width && fbHeight == height {
framebuffers++
}
}
return
}
// GetBackend tries to get the appropriate platform graphics debug backend and
// returns it, or returns an error.
func GetBackend() (Backend, error) {
// TODO(mcasas): In the future we might want to support systems with several GPUs.
// Prefer the genericBackend.
if be := genericBackend(); be != nil {
return be, nil
}
if be := i915Backend(); be != nil {
return be, nil
}
return nil, errors.New("could not find any Graphics backend")
}
// compareGraphicsMemoryBeforeAfter compares the graphics memory consumption
// before and after running the payload function, using the backend. The amount
// of graphics buffer during payload execution must also be non-zero.
func compareGraphicsMemoryBeforeAfter(ctx context.Context, payload func() error, backend Backend, roundedWidth, roundedHeight int) (err error) {
var before, during, after int
if before, err = readStableObjectCount(ctx, backend, roundedWidth, roundedHeight); err != nil {
return errors.Wrap(err, "failed to get the framebuffer object count")
}
testing.ContextLog(ctx, "Running the payload() and measuring the number of graphics objects during its execution")
c := make(chan error)
go func(c chan error) {
c <- payload()
}(c)
// Note: We don't wait for the ReadFramebufferCount() to finish, just keep
// measuring until we get a non-zero value in during, for further comparison
// below.
go func() {
const pollTimeout = 10 * time.Second
const pollInterval = 100 * time.Millisecond
_ = testing.Poll(ctx, func(ctx context.Context) error {
// TODO(crbug.com/1047514): instead of blindly sampling the amount of
// objects during the test and comparing them further down, verify them
// here directly.
if during, _ = backend.ReadFramebufferCount(ctx, roundedWidth, roundedHeight); during == before {
return errors.New("Still waiting for graphics objects")
}
return nil
}, &testing.PollOptions{Timeout: pollTimeout, Interval: pollInterval})
}()
err = <-c
if err != nil {
return err
}
if after, err = readStableObjectCount(ctx, backend, roundedWidth, roundedHeight); err != nil {
return errors.Wrap(err, "failed to get the framebuffer object count")
}
if before != after {
return errors.Wrapf(err, "graphics objects of size %d x %d do not coincide: before=%d, after=%d", roundedWidth, roundedHeight, before, after)
}
if during == before {
return errors.Wrapf(err, "graphics objects of size %d x %d did not increase during play back: before=%d, during=%d", roundedWidth, roundedHeight, before, during)
}
testing.ContextLogf(ctx, "Graphics objects of size %d x %d before=%d, during=%d, after=%d", roundedWidth, roundedHeight, before, during, after)
return nil
}
// monitorGraphicsMemoryDuring verifies that the graphics memory consumption
// while running the payload function, using the backend, does not spiral out
// of control, by comparing it to the appropriate threshold.
func monitorGraphicsMemoryDuring(ctx context.Context, payload func() error, backend Backend, roundedSizes []Size, threshold int) (err error) {
testing.ContextLog(ctx, "Running the payload() and measuring the number of graphics objects during its execution")
c := make(chan error)
go func(c chan error) {
c <- payload()
}(c)
const pollInterval = 1 * time.Second
ticker := time.NewTicker(pollInterval)
for {
select {
case <-ctx.Done():
ticker.Stop()
return errors.New("test timed out")
case pErr := <-c:
ticker.Stop()
return pErr
case <-ticker.C:
for _, roundedSize := range roundedSizes {
count, _ := backend.ReadFramebufferCount(ctx, roundedSize.Width, roundedSize.Height)
if count > threshold {
// TODO(mcasas): find a way to kill payload() at this point.
ticker.Stop()
err := errors.Errorf("too many objects of size %d x %d, got: %d, threshold: %d", roundedSize.Width, roundedSize.Height, count, threshold)
select {
case <-c:
case <-ctx.Done():
}
return err
}
}
}
}
}
// VerifyGraphicsMemory uses the backend to detect memory leaks during or after
// the execution of payload.
func VerifyGraphicsMemory(ctx context.Context, payload func() error, backend Backend, sizes []Size) (err error) {
testing.ContextLogf(ctx, "Cooling down %v after log in", coolDownTimeAfterLogin)
if err := testing.Sleep(ctx, coolDownTimeAfterLogin); err != nil {
return errors.Wrap(err, "error while cooling down after log in")
}
var roundedSizes []Size
for _, size := range sizes {
roundedSizes = append(roundedSizes, Size{Width: backend.Round(size.Width), Height: backend.Round(size.Height)})
}
if len(sizes) == 1 {
return compareGraphicsMemoryBeforeAfter(ctx, payload, backend, roundedSizes[0].Width, roundedSizes[0].Height)
}
return monitorGraphicsMemoryDuring(ctx, payload, backend, roundedSizes, maxGraphicsObjects)
}
// readStableObjectCount waits until a given graphics object count obtained with
// backend is stable, up to a certain timeout, progressively relaxing a
// similarity threshold criteria.
func | (ctx context.Context, backend Backend, width, height int) (objectCount int, err error) {
const (
pollingInterval = 1 * time.Second
// Time to wait for the object count to be stable.
waitTimeout = 120 * time.Second
// Threshold (in percentage) below which the object count is considered stable.
objectCountThresholdBase = 0.1
// Maximum threshold (in percentage) for the object count to be considered stable.
objectCountThresholdMax = 2.0
// Maximum steps of relaxing the object count similarity threshold.
relaxingThresholdSteps = 5
)
startTime := time.Now()
delta := (objectCountThresholdMax - objectCountThresholdBase) / (relaxingThresholdSteps - 1)
testing.ContextLogf(ctx, "Waiting at most %v for stable graphics object count, threshold will be gradually relaxed from %.1f%% to %.1f%%",
waitTimeout, objectCountThresholdBase, objectCountThresholdMax)
for i := 0; i < relaxingThresholdSteps; i++ {
idlePercent := objectCountThresholdBase + (delta * float64(i))
timeout := waitTimeout / relaxingThresholdSteps
testing.ContextLogf(ctx, "Waiting up to %v for object count to settle within %.1f%% (%d/%d)",
timeout.Round(time.Second), idlePercent, i+1, relaxingThresholdSteps)
objectCount, err = waitForStableReadings(ctx, backend, width, height, timeout, pollingInterval, idlePercent)
if err == nil {
testing.ContextLogf(ctx, "Waiting for object count stabilisation took %v (value %d, threshold: %.1f%%)",
time.Now().Sub(startTime).Round(time.Second), objectCount, idlePercent)
return objectCount, nil
}
}
return objectCount, err
}
// waitForStableReadings reads values using backend and waits for up to timeout
// for the moving average of the last numReadings to settle within threshold.
func waitForStableReadings(ctx context.Context, backend Backend, width, height int, timeout, interval time.Duration, threshold float64) (reading int, err error) {
// Keep the last numReadings for moving average purposes. Make it half the
// size that the current timeout and interval would allow.
numReadings := int(math.Floor(float64(timeout / (2.0 * interval))))
var currentNumReadings int
var values = make([]int, numReadings)
err = testing.Poll(ctx, func(ctx context.Context) error {
var e error
reading, e = backend.ReadFramebufferCount(ctx, width, height)
if e != nil {
return testing.PollBreak(errors.Wrap(e, "failed measuring"))
}
values[currentNumReadings%numReadings] = reading
currentNumReadings++
if currentNumReadings < numReadings {
return errors.Errorf("need more values (got: %d and want: %d)", currentNumReadings, numReadings)
}
average := mean(values)
if math.Abs(float64(reading)-average) > threshold {
return errors.Errorf("reading %d is not within %.1f of %.1f", reading, threshold, average)
}
return nil
}, &testing.PollOptions{Timeout: timeout, Interval: interval})
return reading, err
}
// mean returns the average of values.
func mean(values []int) float64 {
var sum float64
for _, v := range values {
sum += float64(v)
}
return sum / float64(len(values))
}
| readStableObjectCount | identifier_name |
dri.go | // Copyright 2020 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Package graphics contains graphics-related utility functions for local tests.
package graphics
import (
"context"
"fmt"
"io/ioutil"
"math"
"os"
"strings"
"time"
"chromiumos/tast/errors"
"chromiumos/tast/testing"
)
const (
// The debugfs file with the information on allocated framebuffers for Intel i915 GPUs.
i915FramebufferFile = "/sys/kernel/debug/dri/0/i915_gem_framebuffer"
// The debugfs file with the information on allocated framebuffers for generic
// implementations, e.g. AMD, modern Intel GPUs, ARM-based devices.
genericFramebufferFilePattern = "/sys/kernel/debug/dri/%d/framebuffer"
// Maximum DRM device minor number.
maxDRMDeviceNumber = 64
// Immediately after login there's a lot of graphics activity; wait for a
// minute until it subsides. TODO(crbug.com/1047840): Remove when not needed.
coolDownTimeAfterLogin = 30 * time.Second
// Amount of graphics objects for a given resolution considered bad, regardless of codec.
maxGraphicsObjects = 25
)
// Size represents a Width x Height pair, for example for a video resolution.
type Size struct {
Width int
Height int
}
// Backend contains the necessary methods to interact with the platform debug
// interface and getting readings.
type Backend interface {
// Round implements the platform-specific graphic- or codec- rounding.
Round(value int) int
// ReadFramebufferCount tries to retrieve the number of framebuffers of width
// and height dimensions allocated by the Backend.
ReadFramebufferCount(ctx context.Context, width, height int) (framebuffers int, err error)
}
// I915Backend implements Backend for the Intel i915 case.
type I915Backend struct{}
func i915Backend() *I915Backend |
// Round rounds up value for the Intel platforms and all codecs.
func (g I915Backend) Round(value int) int {
const i915Alignment = 16
// Inspired by Chromium's base/bits.h:Align() function.
return (value + i915Alignment - 1) & ^(i915Alignment - 1)
}
// ReadFramebufferCount tries to open the i915FramebufferFile and count the
// amount of lines of dimensions width x height, which corresponds to the amount
// of framebuffers allocated in the system.
// See https://dri.freedesktop.org/docs/drm/gpu/i915.html
func (g I915Backend) ReadFramebufferCount(ctx context.Context, width, height int) (framebuffers int, e error) {
f, err := os.Open(i915FramebufferFile)
if err != nil {
return framebuffers, errors.Wrap(err, "failed to open dri file")
}
text, err := ioutil.ReadAll(f)
if err != nil {
return framebuffers, errors.Wrap(err, "failed to read dri file")
}
lines := strings.Split(string(text), "\n")
for _, line := range lines {
// The line we're looking for looks like "user size: 1920 x 1080,..."
var fbWidth, fbHeight int
if _, err := fmt.Sscanf(line, "user size: %d x %d", &fbWidth, &fbHeight); err != nil {
continue
}
if fbWidth == width && fbHeight == height {
framebuffers++
}
}
return
}
// GenericBackend implements Backend for the Generic case (Intel and AMD).
type GenericBackend struct {
// Index of the DRM card device file (X in /dev/dri/cardX).
index int
}
func genericBackend() *GenericBackend {
for i := 0; i < maxDRMDeviceNumber; i++ {
if _, err := os.Stat(fmt.Sprintf(genericFramebufferFilePattern, i)); err == nil {
return &GenericBackend{index: i}
}
}
return nil
}
// Round rounds up value for the Generic Debugfs platforms and all codecs.
func (g GenericBackend) Round(value int) int {
const genericAlignment = 16
// Inspired by Chromium's base/bits.h:Align() function.
return (value + genericAlignment - 1) & ^(genericAlignment - 1)
}
// ReadFramebufferCount tries to open the DRM device file and count the amount
// of lines of dimensions width x height, which corresponds to the amount of
// framebuffers allocated in the system. See
// https://dri.freedesktop.org/docs/drm/gpu/amdgpu.html
func (g GenericBackend) ReadFramebufferCount(ctx context.Context, width, height int) (framebuffers int, e error) {
f, err := os.Open(fmt.Sprintf(genericFramebufferFilePattern, g.index))
if err != nil {
return framebuffers, errors.Wrap(err, "failed to open dri file")
}
text, err := ioutil.ReadAll(f)
if err != nil {
return framebuffers, errors.Wrap(err, "failed to read dri file")
}
lines := strings.Split(string(text), "\n")
for _, line := range lines {
// The line we're looking for looks like "...size=1920x1080"
var fbWidth, fbHeight int
if _, err := fmt.Sscanf(line, " size=%dx%d", &fbWidth, &fbHeight); err != nil {
continue
}
if fbWidth == width && fbHeight == height {
framebuffers++
}
}
return
}
// GetBackend tries to get the appropriate platform graphics debug backend and
// returns it, or returns an error.
func GetBackend() (Backend, error) {
// TODO(mcasas): In the future we might want to support systems with several GPUs.
// Prefer the genericBackend.
if be := genericBackend(); be != nil {
return be, nil
}
if be := i915Backend(); be != nil {
return be, nil
}
return nil, errors.New("could not find any Graphics backend")
}
// compareGraphicsMemoryBeforeAfter compares the graphics memory consumption
// before and after running the payload function, using the backend. The amount
// of graphics buffer during payload execution must also be non-zero.
func compareGraphicsMemoryBeforeAfter(ctx context.Context, payload func() error, backend Backend, roundedWidth, roundedHeight int) (err error) {
var before, during, after int
if before, err = readStableObjectCount(ctx, backend, roundedWidth, roundedHeight); err != nil {
return errors.Wrap(err, "failed to get the framebuffer object count")
}
testing.ContextLog(ctx, "Running the payload() and measuring the number of graphics objects during its execution")
c := make(chan error)
go func(c chan error) {
c <- payload()
}(c)
// Note: We don't wait for the ReadFramebufferCount() to finish, just keep
// measuring until we get a non-zero value in during, for further comparison
// below.
go func() {
const pollTimeout = 10 * time.Second
const pollInterval = 100 * time.Millisecond
_ = testing.Poll(ctx, func(ctx context.Context) error {
// TODO(crbug.com/1047514): instead of blindly sampling the amount of
// objects during the test and comparing them further down, verify them
// here directly.
if during, _ = backend.ReadFramebufferCount(ctx, roundedWidth, roundedHeight); during == before {
return errors.New("Still waiting for graphics objects")
}
return nil
}, &testing.PollOptions{Timeout: pollTimeout, Interval: pollInterval})
}()
err = <-c
if err != nil {
return err
}
if after, err = readStableObjectCount(ctx, backend, roundedWidth, roundedHeight); err != nil {
return errors.Wrap(err, "failed to get the framebuffer object count")
}
if before != after {
return errors.Wrapf(err, "graphics objects of size %d x %d do not coincide: before=%d, after=%d", roundedWidth, roundedHeight, before, after)
}
if during == before {
return errors.Wrapf(err, "graphics objects of size %d x %d did not increase during play back: before=%d, during=%d", roundedWidth, roundedHeight, before, during)
}
testing.ContextLogf(ctx, "Graphics objects of size %d x %d before=%d, during=%d, after=%d", roundedWidth, roundedHeight, before, during, after)
return nil
}
// monitorGraphicsMemoryDuring verifies that the graphics memory consumption
// while running the payload function, using the backend, does not spiral out
// of control, by comparing it to the appropriate threshold.
func monitorGraphicsMemoryDuring(ctx context.Context, payload func() error, backend Backend, roundedSizes []Size, threshold int) (err error) {
testing.ContextLog(ctx, "Running the payload() and measuring the number of graphics objects during its execution")
c := make(chan error)
go func(c chan error) {
c <- payload()
}(c)
const pollInterval = 1 * time.Second
ticker := time.NewTicker(pollInterval)
for {
select {
case <-ctx.Done():
ticker.Stop()
return errors.New("test timed out")
case pErr := <-c:
ticker.Stop()
return pErr
case <-ticker.C:
for _, roundedSize := range roundedSizes {
count, _ := backend.ReadFramebufferCount(ctx, roundedSize.Width, roundedSize.Height)
if count > threshold {
// TODO(mcasas): find a way to kill payload() at this point.
ticker.Stop()
err := errors.Errorf("too many objects of size %d x %d, got: %d, threshold: %d", roundedSize.Width, roundedSize.Height, count, threshold)
select {
case <-c:
case <-ctx.Done():
}
return err
}
}
}
}
}
// VerifyGraphicsMemory uses the backend to detect memory leaks during or after
// the execution of payload.
func VerifyGraphicsMemory(ctx context.Context, payload func() error, backend Backend, sizes []Size) (err error) {
testing.ContextLogf(ctx, "Cooling down %v after log in", coolDownTimeAfterLogin)
if err := testing.Sleep(ctx, coolDownTimeAfterLogin); err != nil {
return errors.Wrap(err, "error while cooling down after log in")
}
var roundedSizes []Size
for _, size := range sizes {
roundedSizes = append(roundedSizes, Size{Width: backend.Round(size.Width), Height: backend.Round(size.Height)})
}
if len(sizes) == 1 {
return compareGraphicsMemoryBeforeAfter(ctx, payload, backend, roundedSizes[0].Width, roundedSizes[0].Height)
}
return monitorGraphicsMemoryDuring(ctx, payload, backend, roundedSizes, maxGraphicsObjects)
}
// readStableObjectCount waits until a given graphics object count obtained with
// backend is stable, up to a certain timeout, progressively relaxing a
// similarity threshold criteria.
func readStableObjectCount(ctx context.Context, backend Backend, width, height int) (objectCount int, err error) {
const (
pollingInterval = 1 * time.Second
// Time to wait for the object count to be stable.
waitTimeout = 120 * time.Second
// Threshold (in percentage) below which the object count is considered stable.
objectCountThresholdBase = 0.1
// Maximum threshold (in percentage) for the object count to be considered stable.
objectCountThresholdMax = 2.0
// Maximum steps of relaxing the object count similarity threshold.
relaxingThresholdSteps = 5
)
startTime := time.Now()
delta := (objectCountThresholdMax - objectCountThresholdBase) / (relaxingThresholdSteps - 1)
testing.ContextLogf(ctx, "Waiting at most %v for stable graphics object count, threshold will be gradually relaxed from %.1f%% to %.1f%%",
waitTimeout, objectCountThresholdBase, objectCountThresholdMax)
for i := 0; i < relaxingThresholdSteps; i++ {
idlePercent := objectCountThresholdBase + (delta * float64(i))
timeout := waitTimeout / relaxingThresholdSteps
testing.ContextLogf(ctx, "Waiting up to %v for object count to settle within %.1f%% (%d/%d)",
timeout.Round(time.Second), idlePercent, i+1, relaxingThresholdSteps)
objectCount, err = waitForStableReadings(ctx, backend, width, height, timeout, pollingInterval, idlePercent)
if err == nil {
testing.ContextLogf(ctx, "Waiting for object count stabilisation took %v (value %d, threshold: %.1f%%)",
time.Now().Sub(startTime).Round(time.Second), objectCount, idlePercent)
return objectCount, nil
}
}
return objectCount, err
}
// waitForStableReadings reads values using backend and waits for up to timeout
// for the moving average of the last numReadings to settle within threshold.
func waitForStableReadings(ctx context.Context, backend Backend, width, height int, timeout, interval time.Duration, threshold float64) (reading int, err error) {
// Keep the last numReadings for moving average purposes. Make it half the
// size that the current timeout and interval would allow.
numReadings := int(math.Floor(float64(timeout / (2.0 * interval))))
var currentNumReadings int
var values = make([]int, numReadings)
err = testing.Poll(ctx, func(ctx context.Context) error {
var e error
reading, e = backend.ReadFramebufferCount(ctx, width, height)
if e != nil {
return testing.PollBreak(errors.Wrap(e, "failed measuring"))
}
values[currentNumReadings%numReadings] = reading
currentNumReadings++
if currentNumReadings < numReadings {
return errors.Errorf("need more values (got: %d and want: %d)", currentNumReadings, numReadings)
}
average := mean(values)
if math.Abs(float64(reading)-average) > threshold {
return errors.Errorf("reading %d is not within %.1f of %.1f", reading, threshold, average)
}
return nil
}, &testing.PollOptions{Timeout: timeout, Interval: interval})
return reading, err
}
// mean returns the average of values.
func mean(values []int) float64 {
var sum float64
for _, v := range values {
sum += float64(v)
}
return sum / float64(len(values))
}
| {
if _, err := os.Stat(i915FramebufferFile); err != nil {
return nil
}
return &I915Backend{}
} | identifier_body |
dri.go | // Copyright 2020 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Package graphics contains graphics-related utility functions for local tests.
package graphics
import (
"context"
"fmt"
"io/ioutil"
"math"
"os"
"strings"
"time"
"chromiumos/tast/errors"
"chromiumos/tast/testing"
)
const (
// The debugfs file with the information on allocated framebuffers for Intel i915 GPUs.
i915FramebufferFile = "/sys/kernel/debug/dri/0/i915_gem_framebuffer"
// The debugfs file with the information on allocated framebuffers for generic
// implementations, e.g. AMD, modern Intel GPUs, ARM-based devices.
genericFramebufferFilePattern = "/sys/kernel/debug/dri/%d/framebuffer"
// Maximum DRM device minor number.
maxDRMDeviceNumber = 64
// Immediately after login there's a lot of graphics activity; wait for a
// minute until it subsides. TODO(crbug.com/1047840): Remove when not needed.
coolDownTimeAfterLogin = 30 * time.Second
// Amount of graphics objects for a given resolution considered bad, regardless of codec.
maxGraphicsObjects = 25
)
// Size represents a Width x Height pair, for example for a video resolution.
type Size struct {
Width int
Height int
}
// Backend contains the necessary methods to interact with the platform debug
// interface and getting readings.
type Backend interface {
// Round implements the platform-specific graphic- or codec- rounding.
Round(value int) int
// ReadFramebufferCount tries to retrieve the number of framebuffers of width
// and height dimensions allocated by the Backend.
ReadFramebufferCount(ctx context.Context, width, height int) (framebuffers int, err error)
}
// I915Backend implements Backend for the Intel i915 case.
type I915Backend struct{}
| if _, err := os.Stat(i915FramebufferFile); err != nil {
return nil
}
return &I915Backend{}
}
// Round rounds up value for the Intel platforms and all codecs.
func (g I915Backend) Round(value int) int {
const i915Alignment = 16
// Inspired by Chromium's base/bits.h:Align() function.
return (value + i915Alignment - 1) & ^(i915Alignment - 1)
}
// ReadFramebufferCount tries to open the i915FramebufferFile and count the
// amount of lines of dimensions width x height, which corresponds to the amount
// of framebuffers allocated in the system.
// See https://dri.freedesktop.org/docs/drm/gpu/i915.html
func (g I915Backend) ReadFramebufferCount(ctx context.Context, width, height int) (framebuffers int, e error) {
f, err := os.Open(i915FramebufferFile)
if err != nil {
return framebuffers, errors.Wrap(err, "failed to open dri file")
}
text, err := ioutil.ReadAll(f)
if err != nil {
return framebuffers, errors.Wrap(err, "failed to read dri file")
}
lines := strings.Split(string(text), "\n")
for _, line := range lines {
// The line we're looking for looks like "user size: 1920 x 1080,..."
var fbWidth, fbHeight int
if _, err := fmt.Sscanf(line, "user size: %d x %d", &fbWidth, &fbHeight); err != nil {
continue
}
if fbWidth == width && fbHeight == height {
framebuffers++
}
}
return
}
// GenericBackend implements Backend for the Generic case (Intel and AMD).
type GenericBackend struct {
// Index of the DRM card device file (X in /dev/dri/cardX).
index int
}
func genericBackend() *GenericBackend {
for i := 0; i < maxDRMDeviceNumber; i++ {
if _, err := os.Stat(fmt.Sprintf(genericFramebufferFilePattern, i)); err == nil {
return &GenericBackend{index: i}
}
}
return nil
}
// Round rounds up value for the Generic Debugfs platforms and all codecs.
func (g GenericBackend) Round(value int) int {
const genericAlignment = 16
// Inspired by Chromium's base/bits.h:Align() function.
return (value + genericAlignment - 1) & ^(genericAlignment - 1)
}
// ReadFramebufferCount tries to open the DRM device file and count the amount
// of lines of dimensions width x height, which corresponds to the amount of
// framebuffers allocated in the system. See
// https://dri.freedesktop.org/docs/drm/gpu/amdgpu.html
func (g GenericBackend) ReadFramebufferCount(ctx context.Context, width, height int) (framebuffers int, e error) {
f, err := os.Open(fmt.Sprintf(genericFramebufferFilePattern, g.index))
if err != nil {
return framebuffers, errors.Wrap(err, "failed to open dri file")
}
text, err := ioutil.ReadAll(f)
if err != nil {
return framebuffers, errors.Wrap(err, "failed to read dri file")
}
lines := strings.Split(string(text), "\n")
for _, line := range lines {
// The line we're looking for looks like "...size=1920x1080"
var fbWidth, fbHeight int
if _, err := fmt.Sscanf(line, " size=%dx%d", &fbWidth, &fbHeight); err != nil {
continue
}
if fbWidth == width && fbHeight == height {
framebuffers++
}
}
return
}
// GetBackend tries to get the appropriate platform graphics debug backend and
// returns it, or returns an error.
func GetBackend() (Backend, error) {
// TODO(mcasas): In the future we might want to support systems with several GPUs.
// Prefer the genericBackend.
if be := genericBackend(); be != nil {
return be, nil
}
if be := i915Backend(); be != nil {
return be, nil
}
return nil, errors.New("could not find any Graphics backend")
}
// compareGraphicsMemoryBeforeAfter compares the graphics memory consumption
// before and after running the payload function, using the backend. The amount
// of graphics buffer during payload execution must also be non-zero.
func compareGraphicsMemoryBeforeAfter(ctx context.Context, payload func() error, backend Backend, roundedWidth, roundedHeight int) (err error) {
var before, during, after int
if before, err = readStableObjectCount(ctx, backend, roundedWidth, roundedHeight); err != nil {
return errors.Wrap(err, "failed to get the framebuffer object count")
}
testing.ContextLog(ctx, "Running the payload() and measuring the number of graphics objects during its execution")
c := make(chan error)
go func(c chan error) {
c <- payload()
}(c)
// Note: We don't wait for the ReadFramebufferCount() to finish, just keep
// measuring until we get a non-zero value in during, for further comparison
// below.
go func() {
const pollTimeout = 10 * time.Second
const pollInterval = 100 * time.Millisecond
_ = testing.Poll(ctx, func(ctx context.Context) error {
// TODO(crbug.com/1047514): instead of blindly sampling the amount of
// objects during the test and comparing them further down, verify them
// here directly.
if during, _ = backend.ReadFramebufferCount(ctx, roundedWidth, roundedHeight); during == before {
return errors.New("Still waiting for graphics objects")
}
return nil
}, &testing.PollOptions{Timeout: pollTimeout, Interval: pollInterval})
}()
err = <-c
if err != nil {
return err
}
if after, err = readStableObjectCount(ctx, backend, roundedWidth, roundedHeight); err != nil {
return errors.Wrap(err, "failed to get the framebuffer object count")
}
if before != after {
return errors.Wrapf(err, "graphics objects of size %d x %d do not coincide: before=%d, after=%d", roundedWidth, roundedHeight, before, after)
}
if during == before {
return errors.Wrapf(err, "graphics objects of size %d x %d did not increase during play back: before=%d, during=%d", roundedWidth, roundedHeight, before, during)
}
testing.ContextLogf(ctx, "Graphics objects of size %d x %d before=%d, during=%d, after=%d", roundedWidth, roundedHeight, before, during, after)
return nil
}
// monitorGraphicsMemoryDuring verifies that the graphics memory consumption
// while running the payload function, using the backend, does not spiral out
// of control, by comparing it to the appropriate threshold.
func monitorGraphicsMemoryDuring(ctx context.Context, payload func() error, backend Backend, roundedSizes []Size, threshold int) (err error) {
testing.ContextLog(ctx, "Running the payload() and measuring the number of graphics objects during its execution")
c := make(chan error)
go func(c chan error) {
c <- payload()
}(c)
const pollInterval = 1 * time.Second
ticker := time.NewTicker(pollInterval)
for {
select {
case <-ctx.Done():
ticker.Stop()
return errors.New("test timed out")
case pErr := <-c:
ticker.Stop()
return pErr
case <-ticker.C:
for _, roundedSize := range roundedSizes {
count, _ := backend.ReadFramebufferCount(ctx, roundedSize.Width, roundedSize.Height)
if count > threshold {
// TODO(mcasas): find a way to kill payload() at this point.
ticker.Stop()
err := errors.Errorf("too many objects of size %d x %d, got: %d, threshold: %d", roundedSize.Width, roundedSize.Height, count, threshold)
select {
case <-c:
case <-ctx.Done():
}
return err
}
}
}
}
}
// VerifyGraphicsMemory uses the backend to detect memory leaks during or after
// the execution of payload.
func VerifyGraphicsMemory(ctx context.Context, payload func() error, backend Backend, sizes []Size) (err error) {
testing.ContextLogf(ctx, "Cooling down %v after log in", coolDownTimeAfterLogin)
if err := testing.Sleep(ctx, coolDownTimeAfterLogin); err != nil {
return errors.Wrap(err, "error while cooling down after log in")
}
var roundedSizes []Size
for _, size := range sizes {
roundedSizes = append(roundedSizes, Size{Width: backend.Round(size.Width), Height: backend.Round(size.Height)})
}
if len(sizes) == 1 {
return compareGraphicsMemoryBeforeAfter(ctx, payload, backend, roundedSizes[0].Width, roundedSizes[0].Height)
}
return monitorGraphicsMemoryDuring(ctx, payload, backend, roundedSizes, maxGraphicsObjects)
}
// readStableObjectCount waits until a given graphics object count obtained with
// backend is stable, up to a certain timeout, progressively relaxing a
// similarity threshold criteria.
func readStableObjectCount(ctx context.Context, backend Backend, width, height int) (objectCount int, err error) {
const (
pollingInterval = 1 * time.Second
// Time to wait for the object count to be stable.
waitTimeout = 120 * time.Second
// Threshold (in percentage) below which the object count is considered stable.
objectCountThresholdBase = 0.1
// Maximum threshold (in percentage) for the object count to be considered stable.
objectCountThresholdMax = 2.0
// Maximum steps of relaxing the object count similarity threshold.
relaxingThresholdSteps = 5
)
startTime := time.Now()
delta := (objectCountThresholdMax - objectCountThresholdBase) / (relaxingThresholdSteps - 1)
testing.ContextLogf(ctx, "Waiting at most %v for stable graphics object count, threshold will be gradually relaxed from %.1f%% to %.1f%%",
waitTimeout, objectCountThresholdBase, objectCountThresholdMax)
for i := 0; i < relaxingThresholdSteps; i++ {
idlePercent := objectCountThresholdBase + (delta * float64(i))
timeout := waitTimeout / relaxingThresholdSteps
testing.ContextLogf(ctx, "Waiting up to %v for object count to settle within %.1f%% (%d/%d)",
timeout.Round(time.Second), idlePercent, i+1, relaxingThresholdSteps)
objectCount, err = waitForStableReadings(ctx, backend, width, height, timeout, pollingInterval, idlePercent)
if err == nil {
testing.ContextLogf(ctx, "Waiting for object count stabilisation took %v (value %d, threshold: %.1f%%)",
time.Now().Sub(startTime).Round(time.Second), objectCount, idlePercent)
return objectCount, nil
}
}
return objectCount, err
}
// waitForStableReadings reads values using backend and waits for up to timeout
// for the moving average of the last numReadings to settle within threshold.
func waitForStableReadings(ctx context.Context, backend Backend, width, height int, timeout, interval time.Duration, threshold float64) (reading int, err error) {
// Keep the last numReadings for moving average purposes. Make it half the
// size that the current timeout and interval would allow.
numReadings := int(math.Floor(float64(timeout / (2.0 * interval))))
var currentNumReadings int
var values = make([]int, numReadings)
err = testing.Poll(ctx, func(ctx context.Context) error {
var e error
reading, e = backend.ReadFramebufferCount(ctx, width, height)
if e != nil {
return testing.PollBreak(errors.Wrap(e, "failed measuring"))
}
values[currentNumReadings%numReadings] = reading
currentNumReadings++
if currentNumReadings < numReadings {
return errors.Errorf("need more values (got: %d and want: %d)", currentNumReadings, numReadings)
}
average := mean(values)
if math.Abs(float64(reading)-average) > threshold {
return errors.Errorf("reading %d is not within %.1f of %.1f", reading, threshold, average)
}
return nil
}, &testing.PollOptions{Timeout: timeout, Interval: interval})
return reading, err
}
// mean returns the average of values.
func mean(values []int) float64 {
var sum float64
for _, v := range values {
sum += float64(v)
}
return sum / float64(len(values))
} | func i915Backend() *I915Backend { | random_line_split |
dri.go | // Copyright 2020 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Package graphics contains graphics-related utility functions for local tests.
package graphics
import (
"context"
"fmt"
"io/ioutil"
"math"
"os"
"strings"
"time"
"chromiumos/tast/errors"
"chromiumos/tast/testing"
)
const (
// The debugfs file with the information on allocated framebuffers for Intel i915 GPUs.
i915FramebufferFile = "/sys/kernel/debug/dri/0/i915_gem_framebuffer"
// The debugfs file with the information on allocated framebuffers for generic
// implementations, e.g. AMD, modern Intel GPUs, ARM-based devices.
genericFramebufferFilePattern = "/sys/kernel/debug/dri/%d/framebuffer"
// Maximum DRM device minor number.
maxDRMDeviceNumber = 64
// Immediately after login there's a lot of graphics activity; wait for a
// minute until it subsides. TODO(crbug.com/1047840): Remove when not needed.
coolDownTimeAfterLogin = 30 * time.Second
// Amount of graphics objects for a given resolution considered bad, regardless of codec.
maxGraphicsObjects = 25
)
// Size represents a Width x Height pair, for example for a video resolution.
type Size struct {
Width int
Height int
}
// Backend contains the necessary methods to interact with the platform debug
// interface and getting readings.
type Backend interface {
// Round implements the platform-specific graphic- or codec- rounding.
Round(value int) int
// ReadFramebufferCount tries to retrieve the number of framebuffers of width
// and height dimensions allocated by the Backend.
ReadFramebufferCount(ctx context.Context, width, height int) (framebuffers int, err error)
}
// I915Backend implements Backend for the Intel i915 case.
type I915Backend struct{}
func i915Backend() *I915Backend {
if _, err := os.Stat(i915FramebufferFile); err != nil {
return nil
}
return &I915Backend{}
}
// Round rounds up value for the Intel platforms and all codecs.
func (g I915Backend) Round(value int) int {
const i915Alignment = 16
// Inspired by Chromium's base/bits.h:Align() function.
return (value + i915Alignment - 1) & ^(i915Alignment - 1)
}
// ReadFramebufferCount tries to open the i915FramebufferFile and count the
// amount of lines of dimensions width x height, which corresponds to the amount
// of framebuffers allocated in the system.
// See https://dri.freedesktop.org/docs/drm/gpu/i915.html
func (g I915Backend) ReadFramebufferCount(ctx context.Context, width, height int) (framebuffers int, e error) {
f, err := os.Open(i915FramebufferFile)
if err != nil {
return framebuffers, errors.Wrap(err, "failed to open dri file")
}
text, err := ioutil.ReadAll(f)
if err != nil {
return framebuffers, errors.Wrap(err, "failed to read dri file")
}
lines := strings.Split(string(text), "\n")
for _, line := range lines {
// The line we're looking for looks like "user size: 1920 x 1080,..."
var fbWidth, fbHeight int
if _, err := fmt.Sscanf(line, "user size: %d x %d", &fbWidth, &fbHeight); err != nil {
continue
}
if fbWidth == width && fbHeight == height {
framebuffers++
}
}
return
}
// GenericBackend implements Backend for the Generic case (Intel and AMD).
type GenericBackend struct {
// Index of the DRM card device file (X in /dev/dri/cardX).
index int
}
func genericBackend() *GenericBackend {
for i := 0; i < maxDRMDeviceNumber; i++ {
if _, err := os.Stat(fmt.Sprintf(genericFramebufferFilePattern, i)); err == nil {
return &GenericBackend{index: i}
}
}
return nil
}
// Round rounds up value for the Generic Debugfs platforms and all codecs.
func (g GenericBackend) Round(value int) int {
const genericAlignment = 16
// Inspired by Chromium's base/bits.h:Align() function.
return (value + genericAlignment - 1) & ^(genericAlignment - 1)
}
// ReadFramebufferCount tries to open the DRM device file and count the amount
// of lines of dimensions width x height, which corresponds to the amount of
// framebuffers allocated in the system. See
// https://dri.freedesktop.org/docs/drm/gpu/amdgpu.html
func (g GenericBackend) ReadFramebufferCount(ctx context.Context, width, height int) (framebuffers int, e error) {
f, err := os.Open(fmt.Sprintf(genericFramebufferFilePattern, g.index))
if err != nil |
text, err := ioutil.ReadAll(f)
if err != nil {
return framebuffers, errors.Wrap(err, "failed to read dri file")
}
lines := strings.Split(string(text), "\n")
for _, line := range lines {
// The line we're looking for looks like "...size=1920x1080"
var fbWidth, fbHeight int
if _, err := fmt.Sscanf(line, " size=%dx%d", &fbWidth, &fbHeight); err != nil {
continue
}
if fbWidth == width && fbHeight == height {
framebuffers++
}
}
return
}
// GetBackend tries to get the appropriate platform graphics debug backend and
// returns it, or returns an error.
func GetBackend() (Backend, error) {
// TODO(mcasas): In the future we might want to support systems with several GPUs.
// Prefer the genericBackend.
if be := genericBackend(); be != nil {
return be, nil
}
if be := i915Backend(); be != nil {
return be, nil
}
return nil, errors.New("could not find any Graphics backend")
}
// compareGraphicsMemoryBeforeAfter compares the graphics memory consumption
// before and after running the payload function, using the backend. The amount
// of graphics buffer during payload execution must also be non-zero.
func compareGraphicsMemoryBeforeAfter(ctx context.Context, payload func() error, backend Backend, roundedWidth, roundedHeight int) (err error) {
var before, during, after int
if before, err = readStableObjectCount(ctx, backend, roundedWidth, roundedHeight); err != nil {
return errors.Wrap(err, "failed to get the framebuffer object count")
}
testing.ContextLog(ctx, "Running the payload() and measuring the number of graphics objects during its execution")
c := make(chan error)
go func(c chan error) {
c <- payload()
}(c)
// Note: We don't wait for the ReadFramebufferCount() to finish, just keep
// measuring until we get a non-zero value in during, for further comparison
// below.
go func() {
const pollTimeout = 10 * time.Second
const pollInterval = 100 * time.Millisecond
_ = testing.Poll(ctx, func(ctx context.Context) error {
// TODO(crbug.com/1047514): instead of blindly sampling the amount of
// objects during the test and comparing them further down, verify them
// here directly.
if during, _ = backend.ReadFramebufferCount(ctx, roundedWidth, roundedHeight); during == before {
return errors.New("Still waiting for graphics objects")
}
return nil
}, &testing.PollOptions{Timeout: pollTimeout, Interval: pollInterval})
}()
err = <-c
if err != nil {
return err
}
if after, err = readStableObjectCount(ctx, backend, roundedWidth, roundedHeight); err != nil {
return errors.Wrap(err, "failed to get the framebuffer object count")
}
if before != after {
return errors.Wrapf(err, "graphics objects of size %d x %d do not coincide: before=%d, after=%d", roundedWidth, roundedHeight, before, after)
}
if during == before {
return errors.Wrapf(err, "graphics objects of size %d x %d did not increase during play back: before=%d, during=%d", roundedWidth, roundedHeight, before, during)
}
testing.ContextLogf(ctx, "Graphics objects of size %d x %d before=%d, during=%d, after=%d", roundedWidth, roundedHeight, before, during, after)
return nil
}
// monitorGraphicsMemoryDuring verifies that the graphics memory consumption
// while running the payload function, using the backend, does not spiral out
// of control, by comparing it to the appropriate threshold.
func monitorGraphicsMemoryDuring(ctx context.Context, payload func() error, backend Backend, roundedSizes []Size, threshold int) (err error) {
testing.ContextLog(ctx, "Running the payload() and measuring the number of graphics objects during its execution")
c := make(chan error)
go func(c chan error) {
c <- payload()
}(c)
const pollInterval = 1 * time.Second
ticker := time.NewTicker(pollInterval)
for {
select {
case <-ctx.Done():
ticker.Stop()
return errors.New("test timed out")
case pErr := <-c:
ticker.Stop()
return pErr
case <-ticker.C:
for _, roundedSize := range roundedSizes {
count, _ := backend.ReadFramebufferCount(ctx, roundedSize.Width, roundedSize.Height)
if count > threshold {
// TODO(mcasas): find a way to kill payload() at this point.
ticker.Stop()
err := errors.Errorf("too many objects of size %d x %d, got: %d, threshold: %d", roundedSize.Width, roundedSize.Height, count, threshold)
select {
case <-c:
case <-ctx.Done():
}
return err
}
}
}
}
}
// VerifyGraphicsMemory uses the backend to detect memory leaks during or after
// the execution of payload.
func VerifyGraphicsMemory(ctx context.Context, payload func() error, backend Backend, sizes []Size) (err error) {
testing.ContextLogf(ctx, "Cooling down %v after log in", coolDownTimeAfterLogin)
if err := testing.Sleep(ctx, coolDownTimeAfterLogin); err != nil {
return errors.Wrap(err, "error while cooling down after log in")
}
var roundedSizes []Size
for _, size := range sizes {
roundedSizes = append(roundedSizes, Size{Width: backend.Round(size.Width), Height: backend.Round(size.Height)})
}
if len(sizes) == 1 {
return compareGraphicsMemoryBeforeAfter(ctx, payload, backend, roundedSizes[0].Width, roundedSizes[0].Height)
}
return monitorGraphicsMemoryDuring(ctx, payload, backend, roundedSizes, maxGraphicsObjects)
}
// readStableObjectCount waits until a given graphics object count obtained with
// backend is stable, up to a certain timeout, progressively relaxing a
// similarity threshold criteria.
func readStableObjectCount(ctx context.Context, backend Backend, width, height int) (objectCount int, err error) {
const (
pollingInterval = 1 * time.Second
// Time to wait for the object count to be stable.
waitTimeout = 120 * time.Second
// Threshold (in percentage) below which the object count is considered stable.
objectCountThresholdBase = 0.1
// Maximum threshold (in percentage) for the object count to be considered stable.
objectCountThresholdMax = 2.0
// Maximum steps of relaxing the object count similarity threshold.
relaxingThresholdSteps = 5
)
startTime := time.Now()
delta := (objectCountThresholdMax - objectCountThresholdBase) / (relaxingThresholdSteps - 1)
testing.ContextLogf(ctx, "Waiting at most %v for stable graphics object count, threshold will be gradually relaxed from %.1f%% to %.1f%%",
waitTimeout, objectCountThresholdBase, objectCountThresholdMax)
for i := 0; i < relaxingThresholdSteps; i++ {
idlePercent := objectCountThresholdBase + (delta * float64(i))
timeout := waitTimeout / relaxingThresholdSteps
testing.ContextLogf(ctx, "Waiting up to %v for object count to settle within %.1f%% (%d/%d)",
timeout.Round(time.Second), idlePercent, i+1, relaxingThresholdSteps)
objectCount, err = waitForStableReadings(ctx, backend, width, height, timeout, pollingInterval, idlePercent)
if err == nil {
testing.ContextLogf(ctx, "Waiting for object count stabilisation took %v (value %d, threshold: %.1f%%)",
time.Now().Sub(startTime).Round(time.Second), objectCount, idlePercent)
return objectCount, nil
}
}
return objectCount, err
}
// waitForStableReadings reads values using backend and waits for up to timeout
// for the moving average of the last numReadings to settle within threshold.
func waitForStableReadings(ctx context.Context, backend Backend, width, height int, timeout, interval time.Duration, threshold float64) (reading int, err error) {
// Keep the last numReadings for moving average purposes. Make it half the
// size that the current timeout and interval would allow.
numReadings := int(math.Floor(float64(timeout / (2.0 * interval))))
var currentNumReadings int
var values = make([]int, numReadings)
err = testing.Poll(ctx, func(ctx context.Context) error {
var e error
reading, e = backend.ReadFramebufferCount(ctx, width, height)
if e != nil {
return testing.PollBreak(errors.Wrap(e, "failed measuring"))
}
values[currentNumReadings%numReadings] = reading
currentNumReadings++
if currentNumReadings < numReadings {
return errors.Errorf("need more values (got: %d and want: %d)", currentNumReadings, numReadings)
}
average := mean(values)
if math.Abs(float64(reading)-average) > threshold {
return errors.Errorf("reading %d is not within %.1f of %.1f", reading, threshold, average)
}
return nil
}, &testing.PollOptions{Timeout: timeout, Interval: interval})
return reading, err
}
// mean returns the average of values.
func mean(values []int) float64 {
var sum float64
for _, v := range values {
sum += float64(v)
}
return sum / float64(len(values))
}
| {
return framebuffers, errors.Wrap(err, "failed to open dri file")
} | conditional_block |
context.rs | use crate::*;
use winit::event::{ElementState, Event, KeyboardInput, VirtualKeyCode, WindowEvent};
use winit::event_loop::{ControlFlow, EventLoop};
use winit::platform::desktop::EventLoopExtDesktop;
const ENABLE_DEBUG_MESSENGER_CALLBACK: bool = true;
#[derive(Copy, Clone, Debug, Hash, PartialEq)]
pub struct BufferHandle(pub u64);
#[derive(Copy, Clone)]
pub struct | (pub u64);
#[derive(Copy, Clone, Debug, Hash, PartialEq)]
pub struct PassHandle(pub u64);
#[derive(Copy, Clone, Debug, Hash, PartialEq)]
pub struct ImageHandle(pub u64);
#[derive(Copy, Clone, Debug, Hash, PartialEq)]
pub struct ShaderHandle(pub u64);
pub struct Context {
window: winit::window::Window,
event_loop: winit::event_loop::EventLoop<()>,
// Graph being built in the current frame
pub builder_passes: Vec<(PassHandle, BuilderPass)>,
pub shader_list: ShaderList,
// TODO: Move these to the graph builder instead?
pub image_list: ImageList,
pub buffer_list: BufferList,
graph_cache: Vec<(Graph, GraphHandle)>, // (graph, hash) // TODO: Make this a proper LRU and move it to its own file
pub command_pool: vk::CommandPool,
pub sync_idx: usize, // Index of the synchronization primitives
pub swapchain_idx: usize, // Index of the swapchain frame
_watcher: notify::RecommendedWatcher, // Need to keep this alive to keep the receiver alive
watch_rx: std::sync::mpsc::Receiver<notify::DebouncedEvent>,
pub command_buffers: Vec<vk::CommandBuffer>,
pub facade: Facade, // Resolution-dependent apparatus
pub debug_utils: DebugUtils,
pub gpu: Gpu,
pub basis: Basis,
}
impl Drop for Context {
fn drop(&mut self) {
unsafe {
self.gpu
.device
.device_wait_idle()
.expect("Failed to wait device idle!");
self.gpu
.device
.free_command_buffers(self.command_pool, &self.command_buffers);
self.gpu
.device
.destroy_command_pool(self.command_pool, None);
self.facade.destroy(&mut self.image_list);
}
}
}
impl Context {
pub fn recreate_resolution_dependent_state(&mut self) {
unsafe {
self.gpu
.device
.device_wait_idle()
.expect("Failed to wait device idle.")
};
// Recreate swapchain
self.facade.destroy(&mut self.image_list);
self.facade = Facade::new(
&self.basis,
&self.gpu,
&self.window,
&mut self.image_list,
&self.debug_utils,
);
// Recreate the images which depend on the resolution of the swapchain
for i in 0..self.image_list.list.len() {
let (_, internal_image) = &mut self.image_list.list[i];
if let ImageKind::RelativeSized { scale } = internal_image.kind {
let w = (self.facade.swapchain_width as f32 * scale) as u32;
let h = (self.facade.swapchain_height as f32 * scale) as u32;
internal_image.image = Image::new(
&internal_image.image.name,
w,
h,
internal_image.image.format,
internal_image.image.usage,
internal_image.image.aspect_flags,
&self.gpu,
&self.debug_utils,
);
}
}
}
pub fn new() -> Context {
const APP_NAME: &str = "";
// # Init window
let event_loop = EventLoop::new();
let window = {
winit::window::WindowBuilder::new()
.with_title(APP_NAME)
.with_inner_size(winit::dpi::LogicalSize::new(800, 600))
.with_maximized(true)
.build(&event_loop)
.expect("Failed to create window.")
};
let basis = Basis::new(APP_NAME, &window);
let gpu = Gpu::new(&basis);
let debug_utils = DebugUtils::new(&basis, &gpu, ENABLE_DEBUG_MESSENGER_CALLBACK);
// # Create command pool
let command_pool = {
let info = vk::CommandPoolCreateInfo::builder()
.flags(vk::CommandPoolCreateFlags::RESET_COMMAND_BUFFER)
.queue_family_index(gpu.graphics_queue_idx);
unsafe {
gpu.device
.create_command_pool(&info, None)
.expect("Failed to create command pool")
}
};
let shader_list = ShaderList::new(gpu.device.clone());
// TODO: Move this up?
let mut image_list = ImageList::new();
let facade = Facade::new(&basis, &gpu, &window, &mut image_list, &debug_utils);
let buffer_list = BufferList::new();
// # Allocate command buffers
let command_buffers = {
let info = vk::CommandBufferAllocateInfo::builder()
.command_pool(command_pool)
.level(vk::CommandBufferLevel::PRIMARY)
.command_buffer_count(facade.num_frames as u32);
unsafe {
gpu.device
.allocate_command_buffers(&info)
.expect("Failed to allocate command buffer.")
}
};
// Add expect messages to all these unwraps
let (watcher, watch_rx) = {
use notify::{RecommendedWatcher, RecursiveMode, Watcher};
use std::sync::mpsc::channel;
use std::time::Duration;
let (tx, rx) = channel();
let mut watcher: RecommendedWatcher = Watcher::new(tx, Duration::from_secs(2)).unwrap();
watcher.watch("./assets", RecursiveMode::Recursive).unwrap();
(watcher, rx)
};
Context {
window,
event_loop,
builder_passes: Vec::new(),
shader_list,
image_list,
buffer_list,
graph_cache: Vec::new(),
command_pool,
sync_idx: 0,
swapchain_idx: 0,
_watcher: watcher,
watch_rx,
command_buffers,
facade,
debug_utils,
gpu,
basis,
}
}
pub fn build_graph(&mut self) -> GraphHandle {
// Get the hash of the graph builder
let req_hash: u64 = {
let mut hasher = DefaultHasher::new();
self.builder_passes.hash(&mut hasher);
hasher.finish()
};
// Try finding the requested graph in the cache
let opt_idx = self
.graph_cache
.iter()
.position(|(_, cached_hash)| cached_hash.0 == req_hash);
if opt_idx.is_none() {
// The requested graph doesn't exist. Build it and add it to the cache.
println!("Adding graph to cache");
self.graph_cache.push((
Graph::new(
&self.gpu,
&self.builder_passes,
&self.shader_list,
&self.buffer_list,
&self.image_list,
),
GraphHandle(req_hash),
));
}
GraphHandle(req_hash)
}
pub fn begin_frame(&mut self) -> bool {
// Clear the passes of the current graph
self.builder_passes.clear();
// Execute the event loop
let mut is_running = true;
let mut resize_needed = false;
let swapchain_width = self.facade.swapchain_width;
let swapchain_height = self.facade.swapchain_height;
self.event_loop.run_return(|event, _, control_flow| {
*control_flow = ControlFlow::Wait;
match event {
Event::WindowEvent { event, .. } => match event {
WindowEvent::CloseRequested => is_running = false,
#[allow(clippy::match_single_binding)] // TODO: Simplify this
WindowEvent::KeyboardInput { input, .. } => match input {
KeyboardInput {
virtual_keycode,
state,
..
} => match (virtual_keycode, state) {
(Some(VirtualKeyCode::Escape), ElementState::Pressed)
| (Some(VirtualKeyCode::Return), ElementState::Pressed) => {
is_running = false;
}
_ => {}
},
},
WindowEvent::Resized(physical_size) => {
if swapchain_width != physical_size.width
|| swapchain_height != physical_size.height
{
resize_needed = true;
}
}
_ => {}
},
Event::MainEventsCleared => {
*control_flow = ControlFlow::Exit;
}
_ => (),
}
});
// This mechanism is need on Windows:
if resize_needed {
self.recreate_resolution_dependent_state();
}
// This mechanism suffices on Linux:
// Acquiring the swapchain image fails if the window has been resized. If this happens, we need
// to loop over and recreate the resolution-dependent state, and then try again.
let mut opt_frame_idx = None;
loop {
let wait_fences = [self.facade.command_buffer_complete_fences[self.sync_idx]];
unsafe {
self.gpu
.device
.wait_for_fences(&wait_fences, true, std::u64::MAX)
.expect("Failed to wait for Fence.");
let result = self.facade.ext_swapchain.acquire_next_image(
self.facade.swapchain,
std::u64::MAX,
self.facade.image_available_semaphores[self.sync_idx],
vk::Fence::null(),
);
match result {
Ok((idx, _is_suboptimal)) => {
opt_frame_idx = Some(idx as usize);
}
Err(error_code) => {
match error_code {
vk::Result::ERROR_OUT_OF_DATE_KHR => {
// Window is resized. Recreate the swapchain
// and exit early without drawing this frame.
self.recreate_resolution_dependent_state();
}
_ => panic!("Failed to acquire swapchain image."),
}
}
}
}
if opt_frame_idx.is_some() {
break;
}
}
self.swapchain_idx = opt_frame_idx.unwrap();
let cmd_buf = self.command_buffers[self.swapchain_idx];
// Reset command buffer
unsafe {
self.gpu
.device
.reset_command_buffer(cmd_buf, vk::CommandBufferResetFlags::empty())
.unwrap();
}
// Begin command buffer. TODO: Is this in the right place?
let command_buffer_begin_info = vk::CommandBufferBeginInfo::builder()
.flags(vk::CommandBufferUsageFlags::SIMULTANEOUS_USE);
unsafe {
self.gpu
.device
.begin_command_buffer(cmd_buf, &command_buffer_begin_info)
.expect("Failed to begin recording command buffer.");
}
/* Naming the command buffer doesn't seem to work on creating it, so we
name it on every begin frame instead.*/
self.debug_utils
.set_command_buffer_name(cmd_buf, &format!("command_buffer_{}", self.swapchain_idx));
is_running
}
pub fn end_frame(&mut self) {
// End command buffer. TODO: Is this in the right place?
unsafe {
self.gpu
.device
.end_command_buffer(self.command_buffers[self.swapchain_idx])
.expect("Failed to end recording command buffer.");
}
let wait_stages = [vk::PipelineStageFlags::COLOR_ATTACHMENT_OUTPUT];
let wait_semaphores = [self.facade.image_available_semaphores[self.sync_idx]];
let signal_semaphores = [self.facade.render_finished_semaphores[self.sync_idx]];
let command_buffers = [self.command_buffers[self.swapchain_idx as usize]];
let submit_infos = [vk::SubmitInfo {
wait_semaphore_count: wait_semaphores.len() as u32,
p_wait_semaphores: wait_semaphores.as_ptr(),
p_wait_dst_stage_mask: wait_stages.as_ptr(),
command_buffer_count: command_buffers.len() as u32,
p_command_buffers: command_buffers.as_ptr(),
signal_semaphore_count: signal_semaphores.len() as u32,
p_signal_semaphores: signal_semaphores.as_ptr(),
..Default::default()
}];
let wait_fences = [self.facade.command_buffer_complete_fences[self.sync_idx]];
unsafe {
self.gpu
.device
.reset_fences(&wait_fences)
.expect("Failed to reset fence.");
self.gpu
.device
.queue_submit(
self.gpu.graphics_queue,
&submit_infos,
self.facade.command_buffer_complete_fences[self.sync_idx],
)
.expect("Failed to execute queue submit.");
}
self.sync_idx = (self.sync_idx + 1) % self.facade.num_frames;
let swapchains = [self.facade.swapchain];
let image_indices = [self.swapchain_idx as u32];
let present_info = vk::PresentInfoKHR::builder()
.wait_semaphores(&signal_semaphores)
.swapchains(&swapchains)
.image_indices(&image_indices);
/* Present the queue */
// According to Vulkan spec, queue_present() can fail if a resize occurs.
// We handle this in begin_frame(), so we should be able to ignore failure here,
// if it does happen. This works fine, when tested on Windows and on Linux on an
// integrated GPU. If this fails on some other platform, consider calling
// recreate_resolution_dependent_state() on error.
let _ = unsafe {
self.facade
.ext_swapchain
.queue_present(self.gpu.present_queue, &present_info)
};
for event in self.watch_rx.try_iter() {
use notify::DebouncedEvent::*;
match event {
Write(_) | Remove(_) | Rename(_, _) => {
unsafe {
self.gpu
.device
.device_wait_idle()
.expect("Failed to wait device idle!");
}
self.shader_list.hot_reload(&mut self.graph_cache);
}
_ => (),
}
}
}
pub fn begin_pass(&self, graph_handle: GraphHandle, pass_handle: PassHandle) {
let (graph, _) = self
.graph_cache
.iter()
.find(|(_, cached_hash)| cached_hash.0 == graph_handle.0)
.expect("Graph not found in cache. Have you called build_graph()?");
graph.begin_pass(pass_handle, self.command_buffers[self.swapchain_idx])
}
pub fn end_pass(&self, graph_handle: GraphHandle) {
let (graph, _) = self
.graph_cache
.iter()
.find(|(_, cached_hash)| cached_hash.0 == graph_handle.0)
.expect("Graph not found in cache. Have you called build_graph()?");
graph.end_pass(self.command_buffers[self.swapchain_idx]);
}
#[allow(clippy::too_many_arguments)]
pub fn add_pass(
&mut self,
name: &str,
vertex_shader: ShaderHandle,
fragment_shader: ShaderHandle,
output_images: &[ImageHandle],
opt_depth_image: Option<ImageHandle>,
uniform_buffer: BufferHandle,
image_handle: ImageHandle,
environment_sampler: &Sampler,
) -> Result<PassHandle, String> {
// TODO: Assert that color and depth images have the same resolution
let img = self
.image_list
.get_image_from_handle(image_handle)
.unwrap_or_else(|| {
panic!(
"Image with handle `{:?}` not found in the context.",
image_handle
)
});
let pass = BuilderPass {
name: String::from(name),
vertex_shader,
fragment_shader,
output_images: output_images.to_owned(),
input_image: (img.image.image_view, environment_sampler.vk_sampler),
opt_depth_image,
viewport_width: self.facade.swapchain_width,
viewport_height: self.facade.swapchain_height,
uniform_buffer,
};
let pass_handle = {
let mut hasher = DefaultHasher::new();
pass.hash(&mut hasher);
PassHandle(hasher.finish())
};
self.builder_passes.push((pass_handle, pass));
Ok(pass_handle)
}
/* Shaders */
pub fn new_shader(
&mut self,
name: &str,
shader_stage: ShaderStage,
path: &str,
) -> Result<ShaderHandle, String> {
self.shader_list.new_shader(name, shader_stage, path)
}
/* Buffers */
pub fn new_buffer(
&mut self,
name: &str,
size: usize,
usage: vk::BufferUsageFlags,
) -> Result<BufferHandle, String> {
self.buffer_list
.new_buffer(name, size, usage, &self.gpu, &self.debug_utils)
}
pub fn upload_data<T>(&self, buffer_handle: BufferHandle, data: &[T]) {
self.buffer_list.upload_data(buffer_handle, data);
}
/* Images */
pub fn new_image_relative_size(
&mut self,
name: &str,
scale: f32,
format: vk::Format,
usage: vk::ImageUsageFlags,
aspect_flags: vk::ImageAspectFlags,
) -> Result<ImageHandle, String> {
self.image_list.new_image_relative_size(
name,
scale,
format,
usage,
aspect_flags,
&self.facade,
&self.gpu,
&self.debug_utils,
)
}
pub fn new_image_from_file(&mut self, name: &str, path: &str) -> Result<ImageHandle, String> {
self.image_list.new_image_from_file(
name,
path,
&self.gpu,
self.command_pool,
&self.debug_utils,
)
}
}
| GraphHandle | identifier_name |
context.rs | use crate::*;
use winit::event::{ElementState, Event, KeyboardInput, VirtualKeyCode, WindowEvent};
use winit::event_loop::{ControlFlow, EventLoop};
use winit::platform::desktop::EventLoopExtDesktop;
const ENABLE_DEBUG_MESSENGER_CALLBACK: bool = true;
#[derive(Copy, Clone, Debug, Hash, PartialEq)]
pub struct BufferHandle(pub u64);
#[derive(Copy, Clone)]
pub struct GraphHandle(pub u64);
#[derive(Copy, Clone, Debug, Hash, PartialEq)]
pub struct PassHandle(pub u64);
#[derive(Copy, Clone, Debug, Hash, PartialEq)]
pub struct ImageHandle(pub u64);
#[derive(Copy, Clone, Debug, Hash, PartialEq)]
pub struct ShaderHandle(pub u64);
pub struct Context {
window: winit::window::Window,
event_loop: winit::event_loop::EventLoop<()>,
// Graph being built in the current frame
pub builder_passes: Vec<(PassHandle, BuilderPass)>,
pub shader_list: ShaderList,
// TODO: Move these to the graph builder instead?
pub image_list: ImageList,
pub buffer_list: BufferList,
graph_cache: Vec<(Graph, GraphHandle)>, // (graph, hash) // TODO: Make this a proper LRU and move it to its own file
pub command_pool: vk::CommandPool,
pub sync_idx: usize, // Index of the synchronization primitives
pub swapchain_idx: usize, // Index of the swapchain frame
_watcher: notify::RecommendedWatcher, // Need to keep this alive to keep the receiver alive
watch_rx: std::sync::mpsc::Receiver<notify::DebouncedEvent>,
pub command_buffers: Vec<vk::CommandBuffer>,
pub facade: Facade, // Resolution-dependent apparatus
pub debug_utils: DebugUtils,
pub gpu: Gpu,
pub basis: Basis,
}
impl Drop for Context {
fn drop(&mut self) {
unsafe {
self.gpu
.device
.device_wait_idle()
.expect("Failed to wait device idle!");
self.gpu
.device
.free_command_buffers(self.command_pool, &self.command_buffers);
self.gpu
.device
.destroy_command_pool(self.command_pool, None);
self.facade.destroy(&mut self.image_list);
}
}
}
impl Context {
pub fn recreate_resolution_dependent_state(&mut self) {
unsafe {
self.gpu
.device
.device_wait_idle()
.expect("Failed to wait device idle.")
};
// Recreate swapchain
self.facade.destroy(&mut self.image_list);
self.facade = Facade::new(
&self.basis,
&self.gpu,
&self.window, | &mut self.image_list,
&self.debug_utils,
);
// Recreate the images which depend on the resolution of the swapchain
for i in 0..self.image_list.list.len() {
let (_, internal_image) = &mut self.image_list.list[i];
if let ImageKind::RelativeSized { scale } = internal_image.kind {
let w = (self.facade.swapchain_width as f32 * scale) as u32;
let h = (self.facade.swapchain_height as f32 * scale) as u32;
internal_image.image = Image::new(
&internal_image.image.name,
w,
h,
internal_image.image.format,
internal_image.image.usage,
internal_image.image.aspect_flags,
&self.gpu,
&self.debug_utils,
);
}
}
}
pub fn new() -> Context {
const APP_NAME: &str = "";
// # Init window
let event_loop = EventLoop::new();
let window = {
winit::window::WindowBuilder::new()
.with_title(APP_NAME)
.with_inner_size(winit::dpi::LogicalSize::new(800, 600))
.with_maximized(true)
.build(&event_loop)
.expect("Failed to create window.")
};
let basis = Basis::new(APP_NAME, &window);
let gpu = Gpu::new(&basis);
let debug_utils = DebugUtils::new(&basis, &gpu, ENABLE_DEBUG_MESSENGER_CALLBACK);
// # Create command pool
let command_pool = {
let info = vk::CommandPoolCreateInfo::builder()
.flags(vk::CommandPoolCreateFlags::RESET_COMMAND_BUFFER)
.queue_family_index(gpu.graphics_queue_idx);
unsafe {
gpu.device
.create_command_pool(&info, None)
.expect("Failed to create command pool")
}
};
let shader_list = ShaderList::new(gpu.device.clone());
// TODO: Move this up?
let mut image_list = ImageList::new();
let facade = Facade::new(&basis, &gpu, &window, &mut image_list, &debug_utils);
let buffer_list = BufferList::new();
// # Allocate command buffers
let command_buffers = {
let info = vk::CommandBufferAllocateInfo::builder()
.command_pool(command_pool)
.level(vk::CommandBufferLevel::PRIMARY)
.command_buffer_count(facade.num_frames as u32);
unsafe {
gpu.device
.allocate_command_buffers(&info)
.expect("Failed to allocate command buffer.")
}
};
// Add expect messages to all these unwraps
let (watcher, watch_rx) = {
use notify::{RecommendedWatcher, RecursiveMode, Watcher};
use std::sync::mpsc::channel;
use std::time::Duration;
let (tx, rx) = channel();
let mut watcher: RecommendedWatcher = Watcher::new(tx, Duration::from_secs(2)).unwrap();
watcher.watch("./assets", RecursiveMode::Recursive).unwrap();
(watcher, rx)
};
Context {
window,
event_loop,
builder_passes: Vec::new(),
shader_list,
image_list,
buffer_list,
graph_cache: Vec::new(),
command_pool,
sync_idx: 0,
swapchain_idx: 0,
_watcher: watcher,
watch_rx,
command_buffers,
facade,
debug_utils,
gpu,
basis,
}
}
pub fn build_graph(&mut self) -> GraphHandle {
// Get the hash of the graph builder
let req_hash: u64 = {
let mut hasher = DefaultHasher::new();
self.builder_passes.hash(&mut hasher);
hasher.finish()
};
// Try finding the requested graph in the cache
let opt_idx = self
.graph_cache
.iter()
.position(|(_, cached_hash)| cached_hash.0 == req_hash);
if opt_idx.is_none() {
// The requested graph doesn't exist. Build it and add it to the cache.
println!("Adding graph to cache");
self.graph_cache.push((
Graph::new(
&self.gpu,
&self.builder_passes,
&self.shader_list,
&self.buffer_list,
&self.image_list,
),
GraphHandle(req_hash),
));
}
GraphHandle(req_hash)
}
pub fn begin_frame(&mut self) -> bool {
// Clear the passes of the current graph
self.builder_passes.clear();
// Execute the event loop
let mut is_running = true;
let mut resize_needed = false;
let swapchain_width = self.facade.swapchain_width;
let swapchain_height = self.facade.swapchain_height;
self.event_loop.run_return(|event, _, control_flow| {
*control_flow = ControlFlow::Wait;
match event {
Event::WindowEvent { event, .. } => match event {
WindowEvent::CloseRequested => is_running = false,
#[allow(clippy::match_single_binding)] // TODO: Simplify this
WindowEvent::KeyboardInput { input, .. } => match input {
KeyboardInput {
virtual_keycode,
state,
..
} => match (virtual_keycode, state) {
(Some(VirtualKeyCode::Escape), ElementState::Pressed)
| (Some(VirtualKeyCode::Return), ElementState::Pressed) => {
is_running = false;
}
_ => {}
},
},
WindowEvent::Resized(physical_size) => {
if swapchain_width != physical_size.width
|| swapchain_height != physical_size.height
{
resize_needed = true;
}
}
_ => {}
},
Event::MainEventsCleared => {
*control_flow = ControlFlow::Exit;
}
_ => (),
}
});
// This mechanism is need on Windows:
if resize_needed {
self.recreate_resolution_dependent_state();
}
// This mechanism suffices on Linux:
// Acquiring the swapchain image fails if the window has been resized. If this happens, we need
// to loop over and recreate the resolution-dependent state, and then try again.
let mut opt_frame_idx = None;
loop {
let wait_fences = [self.facade.command_buffer_complete_fences[self.sync_idx]];
unsafe {
self.gpu
.device
.wait_for_fences(&wait_fences, true, std::u64::MAX)
.expect("Failed to wait for Fence.");
let result = self.facade.ext_swapchain.acquire_next_image(
self.facade.swapchain,
std::u64::MAX,
self.facade.image_available_semaphores[self.sync_idx],
vk::Fence::null(),
);
match result {
Ok((idx, _is_suboptimal)) => {
opt_frame_idx = Some(idx as usize);
}
Err(error_code) => {
match error_code {
vk::Result::ERROR_OUT_OF_DATE_KHR => {
// Window is resized. Recreate the swapchain
// and exit early without drawing this frame.
self.recreate_resolution_dependent_state();
}
_ => panic!("Failed to acquire swapchain image."),
}
}
}
}
if opt_frame_idx.is_some() {
break;
}
}
self.swapchain_idx = opt_frame_idx.unwrap();
let cmd_buf = self.command_buffers[self.swapchain_idx];
// Reset command buffer
unsafe {
self.gpu
.device
.reset_command_buffer(cmd_buf, vk::CommandBufferResetFlags::empty())
.unwrap();
}
// Begin command buffer. TODO: Is this in the right place?
let command_buffer_begin_info = vk::CommandBufferBeginInfo::builder()
.flags(vk::CommandBufferUsageFlags::SIMULTANEOUS_USE);
unsafe {
self.gpu
.device
.begin_command_buffer(cmd_buf, &command_buffer_begin_info)
.expect("Failed to begin recording command buffer.");
}
/* Naming the command buffer doesn't seem to work on creating it, so we
name it on every begin frame instead.*/
self.debug_utils
.set_command_buffer_name(cmd_buf, &format!("command_buffer_{}", self.swapchain_idx));
is_running
}
pub fn end_frame(&mut self) {
// End command buffer. TODO: Is this in the right place?
unsafe {
self.gpu
.device
.end_command_buffer(self.command_buffers[self.swapchain_idx])
.expect("Failed to end recording command buffer.");
}
let wait_stages = [vk::PipelineStageFlags::COLOR_ATTACHMENT_OUTPUT];
let wait_semaphores = [self.facade.image_available_semaphores[self.sync_idx]];
let signal_semaphores = [self.facade.render_finished_semaphores[self.sync_idx]];
let command_buffers = [self.command_buffers[self.swapchain_idx as usize]];
let submit_infos = [vk::SubmitInfo {
wait_semaphore_count: wait_semaphores.len() as u32,
p_wait_semaphores: wait_semaphores.as_ptr(),
p_wait_dst_stage_mask: wait_stages.as_ptr(),
command_buffer_count: command_buffers.len() as u32,
p_command_buffers: command_buffers.as_ptr(),
signal_semaphore_count: signal_semaphores.len() as u32,
p_signal_semaphores: signal_semaphores.as_ptr(),
..Default::default()
}];
let wait_fences = [self.facade.command_buffer_complete_fences[self.sync_idx]];
unsafe {
self.gpu
.device
.reset_fences(&wait_fences)
.expect("Failed to reset fence.");
self.gpu
.device
.queue_submit(
self.gpu.graphics_queue,
&submit_infos,
self.facade.command_buffer_complete_fences[self.sync_idx],
)
.expect("Failed to execute queue submit.");
}
self.sync_idx = (self.sync_idx + 1) % self.facade.num_frames;
let swapchains = [self.facade.swapchain];
let image_indices = [self.swapchain_idx as u32];
let present_info = vk::PresentInfoKHR::builder()
.wait_semaphores(&signal_semaphores)
.swapchains(&swapchains)
.image_indices(&image_indices);
/* Present the queue */
// According to Vulkan spec, queue_present() can fail if a resize occurs.
// We handle this in begin_frame(), so we should be able to ignore failure here,
// if it does happen. This works fine, when tested on Windows and on Linux on an
// integrated GPU. If this fails on some other platform, consider calling
// recreate_resolution_dependent_state() on error.
let _ = unsafe {
self.facade
.ext_swapchain
.queue_present(self.gpu.present_queue, &present_info)
};
for event in self.watch_rx.try_iter() {
use notify::DebouncedEvent::*;
match event {
Write(_) | Remove(_) | Rename(_, _) => {
unsafe {
self.gpu
.device
.device_wait_idle()
.expect("Failed to wait device idle!");
}
self.shader_list.hot_reload(&mut self.graph_cache);
}
_ => (),
}
}
}
pub fn begin_pass(&self, graph_handle: GraphHandle, pass_handle: PassHandle) {
let (graph, _) = self
.graph_cache
.iter()
.find(|(_, cached_hash)| cached_hash.0 == graph_handle.0)
.expect("Graph not found in cache. Have you called build_graph()?");
graph.begin_pass(pass_handle, self.command_buffers[self.swapchain_idx])
}
pub fn end_pass(&self, graph_handle: GraphHandle) {
let (graph, _) = self
.graph_cache
.iter()
.find(|(_, cached_hash)| cached_hash.0 == graph_handle.0)
.expect("Graph not found in cache. Have you called build_graph()?");
graph.end_pass(self.command_buffers[self.swapchain_idx]);
}
#[allow(clippy::too_many_arguments)]
pub fn add_pass(
&mut self,
name: &str,
vertex_shader: ShaderHandle,
fragment_shader: ShaderHandle,
output_images: &[ImageHandle],
opt_depth_image: Option<ImageHandle>,
uniform_buffer: BufferHandle,
image_handle: ImageHandle,
environment_sampler: &Sampler,
) -> Result<PassHandle, String> {
// TODO: Assert that color and depth images have the same resolution
let img = self
.image_list
.get_image_from_handle(image_handle)
.unwrap_or_else(|| {
panic!(
"Image with handle `{:?}` not found in the context.",
image_handle
)
});
let pass = BuilderPass {
name: String::from(name),
vertex_shader,
fragment_shader,
output_images: output_images.to_owned(),
input_image: (img.image.image_view, environment_sampler.vk_sampler),
opt_depth_image,
viewport_width: self.facade.swapchain_width,
viewport_height: self.facade.swapchain_height,
uniform_buffer,
};
let pass_handle = {
let mut hasher = DefaultHasher::new();
pass.hash(&mut hasher);
PassHandle(hasher.finish())
};
self.builder_passes.push((pass_handle, pass));
Ok(pass_handle)
}
/* Shaders */
pub fn new_shader(
&mut self,
name: &str,
shader_stage: ShaderStage,
path: &str,
) -> Result<ShaderHandle, String> {
self.shader_list.new_shader(name, shader_stage, path)
}
/* Buffers */
pub fn new_buffer(
&mut self,
name: &str,
size: usize,
usage: vk::BufferUsageFlags,
) -> Result<BufferHandle, String> {
self.buffer_list
.new_buffer(name, size, usage, &self.gpu, &self.debug_utils)
}
pub fn upload_data<T>(&self, buffer_handle: BufferHandle, data: &[T]) {
self.buffer_list.upload_data(buffer_handle, data);
}
/* Images */
pub fn new_image_relative_size(
&mut self,
name: &str,
scale: f32,
format: vk::Format,
usage: vk::ImageUsageFlags,
aspect_flags: vk::ImageAspectFlags,
) -> Result<ImageHandle, String> {
self.image_list.new_image_relative_size(
name,
scale,
format,
usage,
aspect_flags,
&self.facade,
&self.gpu,
&self.debug_utils,
)
}
pub fn new_image_from_file(&mut self, name: &str, path: &str) -> Result<ImageHandle, String> {
self.image_list.new_image_from_file(
name,
path,
&self.gpu,
self.command_pool,
&self.debug_utils,
)
}
} | random_line_split |
|
context.rs | use crate::*;
use winit::event::{ElementState, Event, KeyboardInput, VirtualKeyCode, WindowEvent};
use winit::event_loop::{ControlFlow, EventLoop};
use winit::platform::desktop::EventLoopExtDesktop;
const ENABLE_DEBUG_MESSENGER_CALLBACK: bool = true;
#[derive(Copy, Clone, Debug, Hash, PartialEq)]
pub struct BufferHandle(pub u64);
#[derive(Copy, Clone)]
pub struct GraphHandle(pub u64);
#[derive(Copy, Clone, Debug, Hash, PartialEq)]
pub struct PassHandle(pub u64);
#[derive(Copy, Clone, Debug, Hash, PartialEq)]
pub struct ImageHandle(pub u64);
#[derive(Copy, Clone, Debug, Hash, PartialEq)]
pub struct ShaderHandle(pub u64);
pub struct Context {
window: winit::window::Window,
event_loop: winit::event_loop::EventLoop<()>,
// Graph being built in the current frame
pub builder_passes: Vec<(PassHandle, BuilderPass)>,
pub shader_list: ShaderList,
// TODO: Move these to the graph builder instead?
pub image_list: ImageList,
pub buffer_list: BufferList,
graph_cache: Vec<(Graph, GraphHandle)>, // (graph, hash) // TODO: Make this a proper LRU and move it to its own file
pub command_pool: vk::CommandPool,
pub sync_idx: usize, // Index of the synchronization primitives
pub swapchain_idx: usize, // Index of the swapchain frame
_watcher: notify::RecommendedWatcher, // Need to keep this alive to keep the receiver alive
watch_rx: std::sync::mpsc::Receiver<notify::DebouncedEvent>,
pub command_buffers: Vec<vk::CommandBuffer>,
pub facade: Facade, // Resolution-dependent apparatus
pub debug_utils: DebugUtils,
pub gpu: Gpu,
pub basis: Basis,
}
impl Drop for Context {
fn drop(&mut self) {
unsafe {
self.gpu
.device
.device_wait_idle()
.expect("Failed to wait device idle!");
self.gpu
.device
.free_command_buffers(self.command_pool, &self.command_buffers);
self.gpu
.device
.destroy_command_pool(self.command_pool, None);
self.facade.destroy(&mut self.image_list);
}
}
}
impl Context {
pub fn recreate_resolution_dependent_state(&mut self) {
unsafe {
self.gpu
.device
.device_wait_idle()
.expect("Failed to wait device idle.")
};
// Recreate swapchain
self.facade.destroy(&mut self.image_list);
self.facade = Facade::new(
&self.basis,
&self.gpu,
&self.window,
&mut self.image_list,
&self.debug_utils,
);
// Recreate the images which depend on the resolution of the swapchain
for i in 0..self.image_list.list.len() {
let (_, internal_image) = &mut self.image_list.list[i];
if let ImageKind::RelativeSized { scale } = internal_image.kind {
let w = (self.facade.swapchain_width as f32 * scale) as u32;
let h = (self.facade.swapchain_height as f32 * scale) as u32;
internal_image.image = Image::new(
&internal_image.image.name,
w,
h,
internal_image.image.format,
internal_image.image.usage,
internal_image.image.aspect_flags,
&self.gpu,
&self.debug_utils,
);
}
}
}
pub fn new() -> Context {
const APP_NAME: &str = "";
// # Init window
let event_loop = EventLoop::new();
let window = {
winit::window::WindowBuilder::new()
.with_title(APP_NAME)
.with_inner_size(winit::dpi::LogicalSize::new(800, 600))
.with_maximized(true)
.build(&event_loop)
.expect("Failed to create window.")
};
let basis = Basis::new(APP_NAME, &window);
let gpu = Gpu::new(&basis);
let debug_utils = DebugUtils::new(&basis, &gpu, ENABLE_DEBUG_MESSENGER_CALLBACK);
// # Create command pool
let command_pool = {
let info = vk::CommandPoolCreateInfo::builder()
.flags(vk::CommandPoolCreateFlags::RESET_COMMAND_BUFFER)
.queue_family_index(gpu.graphics_queue_idx);
unsafe {
gpu.device
.create_command_pool(&info, None)
.expect("Failed to create command pool")
}
};
let shader_list = ShaderList::new(gpu.device.clone());
// TODO: Move this up?
let mut image_list = ImageList::new();
let facade = Facade::new(&basis, &gpu, &window, &mut image_list, &debug_utils);
let buffer_list = BufferList::new();
// # Allocate command buffers
let command_buffers = {
let info = vk::CommandBufferAllocateInfo::builder()
.command_pool(command_pool)
.level(vk::CommandBufferLevel::PRIMARY)
.command_buffer_count(facade.num_frames as u32);
unsafe {
gpu.device
.allocate_command_buffers(&info)
.expect("Failed to allocate command buffer.")
}
};
// Add expect messages to all these unwraps
let (watcher, watch_rx) = {
use notify::{RecommendedWatcher, RecursiveMode, Watcher};
use std::sync::mpsc::channel;
use std::time::Duration;
let (tx, rx) = channel();
let mut watcher: RecommendedWatcher = Watcher::new(tx, Duration::from_secs(2)).unwrap();
watcher.watch("./assets", RecursiveMode::Recursive).unwrap();
(watcher, rx)
};
Context {
window,
event_loop,
builder_passes: Vec::new(),
shader_list,
image_list,
buffer_list,
graph_cache: Vec::new(),
command_pool,
sync_idx: 0,
swapchain_idx: 0,
_watcher: watcher,
watch_rx,
command_buffers,
facade,
debug_utils,
gpu,
basis,
}
}
pub fn build_graph(&mut self) -> GraphHandle {
// Get the hash of the graph builder
let req_hash: u64 = {
let mut hasher = DefaultHasher::new();
self.builder_passes.hash(&mut hasher);
hasher.finish()
};
// Try finding the requested graph in the cache
let opt_idx = self
.graph_cache
.iter()
.position(|(_, cached_hash)| cached_hash.0 == req_hash);
if opt_idx.is_none() {
// The requested graph doesn't exist. Build it and add it to the cache.
println!("Adding graph to cache");
self.graph_cache.push((
Graph::new(
&self.gpu,
&self.builder_passes,
&self.shader_list,
&self.buffer_list,
&self.image_list,
),
GraphHandle(req_hash),
));
}
GraphHandle(req_hash)
}
pub fn begin_frame(&mut self) -> bool {
// Clear the passes of the current graph
self.builder_passes.clear();
// Execute the event loop
let mut is_running = true;
let mut resize_needed = false;
let swapchain_width = self.facade.swapchain_width;
let swapchain_height = self.facade.swapchain_height;
self.event_loop.run_return(|event, _, control_flow| {
*control_flow = ControlFlow::Wait;
match event {
Event::WindowEvent { event, .. } => match event {
WindowEvent::CloseRequested => is_running = false,
#[allow(clippy::match_single_binding)] // TODO: Simplify this
WindowEvent::KeyboardInput { input, .. } => match input {
KeyboardInput {
virtual_keycode,
state,
..
} => match (virtual_keycode, state) {
(Some(VirtualKeyCode::Escape), ElementState::Pressed)
| (Some(VirtualKeyCode::Return), ElementState::Pressed) => {
is_running = false;
}
_ => {}
},
},
WindowEvent::Resized(physical_size) => {
if swapchain_width != physical_size.width
|| swapchain_height != physical_size.height
{
resize_needed = true;
}
}
_ => {}
},
Event::MainEventsCleared => {
*control_flow = ControlFlow::Exit;
}
_ => (),
}
});
// This mechanism is need on Windows:
if resize_needed {
self.recreate_resolution_dependent_state();
}
// This mechanism suffices on Linux:
// Acquiring the swapchain image fails if the window has been resized. If this happens, we need
// to loop over and recreate the resolution-dependent state, and then try again.
let mut opt_frame_idx = None;
loop {
let wait_fences = [self.facade.command_buffer_complete_fences[self.sync_idx]];
unsafe {
self.gpu
.device
.wait_for_fences(&wait_fences, true, std::u64::MAX)
.expect("Failed to wait for Fence.");
let result = self.facade.ext_swapchain.acquire_next_image(
self.facade.swapchain,
std::u64::MAX,
self.facade.image_available_semaphores[self.sync_idx],
vk::Fence::null(),
);
match result {
Ok((idx, _is_suboptimal)) => {
opt_frame_idx = Some(idx as usize);
}
Err(error_code) => {
match error_code {
vk::Result::ERROR_OUT_OF_DATE_KHR => {
// Window is resized. Recreate the swapchain
// and exit early without drawing this frame.
self.recreate_resolution_dependent_state();
}
_ => panic!("Failed to acquire swapchain image."),
}
}
}
}
if opt_frame_idx.is_some() {
break;
}
}
self.swapchain_idx = opt_frame_idx.unwrap();
let cmd_buf = self.command_buffers[self.swapchain_idx];
// Reset command buffer
unsafe {
self.gpu
.device
.reset_command_buffer(cmd_buf, vk::CommandBufferResetFlags::empty())
.unwrap();
}
// Begin command buffer. TODO: Is this in the right place?
let command_buffer_begin_info = vk::CommandBufferBeginInfo::builder()
.flags(vk::CommandBufferUsageFlags::SIMULTANEOUS_USE);
unsafe {
self.gpu
.device
.begin_command_buffer(cmd_buf, &command_buffer_begin_info)
.expect("Failed to begin recording command buffer.");
}
/* Naming the command buffer doesn't seem to work on creating it, so we
name it on every begin frame instead.*/
self.debug_utils
.set_command_buffer_name(cmd_buf, &format!("command_buffer_{}", self.swapchain_idx));
is_running
}
pub fn end_frame(&mut self) {
// End command buffer. TODO: Is this in the right place?
unsafe {
self.gpu
.device
.end_command_buffer(self.command_buffers[self.swapchain_idx])
.expect("Failed to end recording command buffer.");
}
let wait_stages = [vk::PipelineStageFlags::COLOR_ATTACHMENT_OUTPUT];
let wait_semaphores = [self.facade.image_available_semaphores[self.sync_idx]];
let signal_semaphores = [self.facade.render_finished_semaphores[self.sync_idx]];
let command_buffers = [self.command_buffers[self.swapchain_idx as usize]];
let submit_infos = [vk::SubmitInfo {
wait_semaphore_count: wait_semaphores.len() as u32,
p_wait_semaphores: wait_semaphores.as_ptr(),
p_wait_dst_stage_mask: wait_stages.as_ptr(),
command_buffer_count: command_buffers.len() as u32,
p_command_buffers: command_buffers.as_ptr(),
signal_semaphore_count: signal_semaphores.len() as u32,
p_signal_semaphores: signal_semaphores.as_ptr(),
..Default::default()
}];
let wait_fences = [self.facade.command_buffer_complete_fences[self.sync_idx]];
unsafe {
self.gpu
.device
.reset_fences(&wait_fences)
.expect("Failed to reset fence.");
self.gpu
.device
.queue_submit(
self.gpu.graphics_queue,
&submit_infos,
self.facade.command_buffer_complete_fences[self.sync_idx],
)
.expect("Failed to execute queue submit.");
}
self.sync_idx = (self.sync_idx + 1) % self.facade.num_frames;
let swapchains = [self.facade.swapchain];
let image_indices = [self.swapchain_idx as u32];
let present_info = vk::PresentInfoKHR::builder()
.wait_semaphores(&signal_semaphores)
.swapchains(&swapchains)
.image_indices(&image_indices);
/* Present the queue */
// According to Vulkan spec, queue_present() can fail if a resize occurs.
// We handle this in begin_frame(), so we should be able to ignore failure here,
// if it does happen. This works fine, when tested on Windows and on Linux on an
// integrated GPU. If this fails on some other platform, consider calling
// recreate_resolution_dependent_state() on error.
let _ = unsafe {
self.facade
.ext_swapchain
.queue_present(self.gpu.present_queue, &present_info)
};
for event in self.watch_rx.try_iter() {
use notify::DebouncedEvent::*;
match event {
Write(_) | Remove(_) | Rename(_, _) => {
unsafe {
self.gpu
.device
.device_wait_idle()
.expect("Failed to wait device idle!");
}
self.shader_list.hot_reload(&mut self.graph_cache);
}
_ => (),
}
}
}
pub fn begin_pass(&self, graph_handle: GraphHandle, pass_handle: PassHandle) {
let (graph, _) = self
.graph_cache
.iter()
.find(|(_, cached_hash)| cached_hash.0 == graph_handle.0)
.expect("Graph not found in cache. Have you called build_graph()?");
graph.begin_pass(pass_handle, self.command_buffers[self.swapchain_idx])
}
pub fn end_pass(&self, graph_handle: GraphHandle) {
let (graph, _) = self
.graph_cache
.iter()
.find(|(_, cached_hash)| cached_hash.0 == graph_handle.0)
.expect("Graph not found in cache. Have you called build_graph()?");
graph.end_pass(self.command_buffers[self.swapchain_idx]);
}
#[allow(clippy::too_many_arguments)]
pub fn add_pass(
&mut self,
name: &str,
vertex_shader: ShaderHandle,
fragment_shader: ShaderHandle,
output_images: &[ImageHandle],
opt_depth_image: Option<ImageHandle>,
uniform_buffer: BufferHandle,
image_handle: ImageHandle,
environment_sampler: &Sampler,
) -> Result<PassHandle, String> {
// TODO: Assert that color and depth images have the same resolution
let img = self
.image_list
.get_image_from_handle(image_handle)
.unwrap_or_else(|| {
panic!(
"Image with handle `{:?}` not found in the context.",
image_handle
)
});
let pass = BuilderPass {
name: String::from(name),
vertex_shader,
fragment_shader,
output_images: output_images.to_owned(),
input_image: (img.image.image_view, environment_sampler.vk_sampler),
opt_depth_image,
viewport_width: self.facade.swapchain_width,
viewport_height: self.facade.swapchain_height,
uniform_buffer,
};
let pass_handle = {
let mut hasher = DefaultHasher::new();
pass.hash(&mut hasher);
PassHandle(hasher.finish())
};
self.builder_passes.push((pass_handle, pass));
Ok(pass_handle)
}
/* Shaders */
pub fn new_shader(
&mut self,
name: &str,
shader_stage: ShaderStage,
path: &str,
) -> Result<ShaderHandle, String> {
self.shader_list.new_shader(name, shader_stage, path)
}
/* Buffers */
pub fn new_buffer(
&mut self,
name: &str,
size: usize,
usage: vk::BufferUsageFlags,
) -> Result<BufferHandle, String> {
self.buffer_list
.new_buffer(name, size, usage, &self.gpu, &self.debug_utils)
}
pub fn upload_data<T>(&self, buffer_handle: BufferHandle, data: &[T]) {
self.buffer_list.upload_data(buffer_handle, data);
}
/* Images */
pub fn new_image_relative_size(
&mut self,
name: &str,
scale: f32,
format: vk::Format,
usage: vk::ImageUsageFlags,
aspect_flags: vk::ImageAspectFlags,
) -> Result<ImageHandle, String> {
self.image_list.new_image_relative_size(
name,
scale,
format,
usage,
aspect_flags,
&self.facade,
&self.gpu,
&self.debug_utils,
)
}
pub fn new_image_from_file(&mut self, name: &str, path: &str) -> Result<ImageHandle, String> |
}
| {
self.image_list.new_image_from_file(
name,
path,
&self.gpu,
self.command_pool,
&self.debug_utils,
)
} | identifier_body |
context.rs | use crate::*;
use winit::event::{ElementState, Event, KeyboardInput, VirtualKeyCode, WindowEvent};
use winit::event_loop::{ControlFlow, EventLoop};
use winit::platform::desktop::EventLoopExtDesktop;
const ENABLE_DEBUG_MESSENGER_CALLBACK: bool = true;
#[derive(Copy, Clone, Debug, Hash, PartialEq)]
pub struct BufferHandle(pub u64);
#[derive(Copy, Clone)]
pub struct GraphHandle(pub u64);
#[derive(Copy, Clone, Debug, Hash, PartialEq)]
pub struct PassHandle(pub u64);
#[derive(Copy, Clone, Debug, Hash, PartialEq)]
pub struct ImageHandle(pub u64);
#[derive(Copy, Clone, Debug, Hash, PartialEq)]
pub struct ShaderHandle(pub u64);
pub struct Context {
window: winit::window::Window,
event_loop: winit::event_loop::EventLoop<()>,
// Graph being built in the current frame
pub builder_passes: Vec<(PassHandle, BuilderPass)>,
pub shader_list: ShaderList,
// TODO: Move these to the graph builder instead?
pub image_list: ImageList,
pub buffer_list: BufferList,
graph_cache: Vec<(Graph, GraphHandle)>, // (graph, hash) // TODO: Make this a proper LRU and move it to its own file
pub command_pool: vk::CommandPool,
pub sync_idx: usize, // Index of the synchronization primitives
pub swapchain_idx: usize, // Index of the swapchain frame
_watcher: notify::RecommendedWatcher, // Need to keep this alive to keep the receiver alive
watch_rx: std::sync::mpsc::Receiver<notify::DebouncedEvent>,
pub command_buffers: Vec<vk::CommandBuffer>,
pub facade: Facade, // Resolution-dependent apparatus
pub debug_utils: DebugUtils,
pub gpu: Gpu,
pub basis: Basis,
}
impl Drop for Context {
fn drop(&mut self) {
unsafe {
self.gpu
.device
.device_wait_idle()
.expect("Failed to wait device idle!");
self.gpu
.device
.free_command_buffers(self.command_pool, &self.command_buffers);
self.gpu
.device
.destroy_command_pool(self.command_pool, None);
self.facade.destroy(&mut self.image_list);
}
}
}
impl Context {
pub fn recreate_resolution_dependent_state(&mut self) {
unsafe {
self.gpu
.device
.device_wait_idle()
.expect("Failed to wait device idle.")
};
// Recreate swapchain
self.facade.destroy(&mut self.image_list);
self.facade = Facade::new(
&self.basis,
&self.gpu,
&self.window,
&mut self.image_list,
&self.debug_utils,
);
// Recreate the images which depend on the resolution of the swapchain
for i in 0..self.image_list.list.len() {
let (_, internal_image) = &mut self.image_list.list[i];
if let ImageKind::RelativeSized { scale } = internal_image.kind {
let w = (self.facade.swapchain_width as f32 * scale) as u32;
let h = (self.facade.swapchain_height as f32 * scale) as u32;
internal_image.image = Image::new(
&internal_image.image.name,
w,
h,
internal_image.image.format,
internal_image.image.usage,
internal_image.image.aspect_flags,
&self.gpu,
&self.debug_utils,
);
}
}
}
pub fn new() -> Context {
const APP_NAME: &str = "";
// # Init window
let event_loop = EventLoop::new();
let window = {
winit::window::WindowBuilder::new()
.with_title(APP_NAME)
.with_inner_size(winit::dpi::LogicalSize::new(800, 600))
.with_maximized(true)
.build(&event_loop)
.expect("Failed to create window.")
};
let basis = Basis::new(APP_NAME, &window);
let gpu = Gpu::new(&basis);
let debug_utils = DebugUtils::new(&basis, &gpu, ENABLE_DEBUG_MESSENGER_CALLBACK);
// # Create command pool
let command_pool = {
let info = vk::CommandPoolCreateInfo::builder()
.flags(vk::CommandPoolCreateFlags::RESET_COMMAND_BUFFER)
.queue_family_index(gpu.graphics_queue_idx);
unsafe {
gpu.device
.create_command_pool(&info, None)
.expect("Failed to create command pool")
}
};
let shader_list = ShaderList::new(gpu.device.clone());
// TODO: Move this up?
let mut image_list = ImageList::new();
let facade = Facade::new(&basis, &gpu, &window, &mut image_list, &debug_utils);
let buffer_list = BufferList::new();
// # Allocate command buffers
let command_buffers = {
let info = vk::CommandBufferAllocateInfo::builder()
.command_pool(command_pool)
.level(vk::CommandBufferLevel::PRIMARY)
.command_buffer_count(facade.num_frames as u32);
unsafe {
gpu.device
.allocate_command_buffers(&info)
.expect("Failed to allocate command buffer.")
}
};
// Add expect messages to all these unwraps
let (watcher, watch_rx) = {
use notify::{RecommendedWatcher, RecursiveMode, Watcher};
use std::sync::mpsc::channel;
use std::time::Duration;
let (tx, rx) = channel();
let mut watcher: RecommendedWatcher = Watcher::new(tx, Duration::from_secs(2)).unwrap();
watcher.watch("./assets", RecursiveMode::Recursive).unwrap();
(watcher, rx)
};
Context {
window,
event_loop,
builder_passes: Vec::new(),
shader_list,
image_list,
buffer_list,
graph_cache: Vec::new(),
command_pool,
sync_idx: 0,
swapchain_idx: 0,
_watcher: watcher,
watch_rx,
command_buffers,
facade,
debug_utils,
gpu,
basis,
}
}
pub fn build_graph(&mut self) -> GraphHandle {
// Get the hash of the graph builder
let req_hash: u64 = {
let mut hasher = DefaultHasher::new();
self.builder_passes.hash(&mut hasher);
hasher.finish()
};
// Try finding the requested graph in the cache
let opt_idx = self
.graph_cache
.iter()
.position(|(_, cached_hash)| cached_hash.0 == req_hash);
if opt_idx.is_none() {
// The requested graph doesn't exist. Build it and add it to the cache.
println!("Adding graph to cache");
self.graph_cache.push((
Graph::new(
&self.gpu,
&self.builder_passes,
&self.shader_list,
&self.buffer_list,
&self.image_list,
),
GraphHandle(req_hash),
));
}
GraphHandle(req_hash)
}
pub fn begin_frame(&mut self) -> bool {
// Clear the passes of the current graph
self.builder_passes.clear();
// Execute the event loop
let mut is_running = true;
let mut resize_needed = false;
let swapchain_width = self.facade.swapchain_width;
let swapchain_height = self.facade.swapchain_height;
self.event_loop.run_return(|event, _, control_flow| {
*control_flow = ControlFlow::Wait;
match event {
Event::WindowEvent { event, .. } => match event {
WindowEvent::CloseRequested => is_running = false,
#[allow(clippy::match_single_binding)] // TODO: Simplify this
WindowEvent::KeyboardInput { input, .. } => match input {
KeyboardInput {
virtual_keycode,
state,
..
} => match (virtual_keycode, state) {
(Some(VirtualKeyCode::Escape), ElementState::Pressed)
| (Some(VirtualKeyCode::Return), ElementState::Pressed) => {
is_running = false;
}
_ => {}
},
},
WindowEvent::Resized(physical_size) => {
if swapchain_width != physical_size.width
|| swapchain_height != physical_size.height
{
resize_needed = true;
}
}
_ => {}
},
Event::MainEventsCleared => |
_ => (),
}
});
// This mechanism is need on Windows:
if resize_needed {
self.recreate_resolution_dependent_state();
}
// This mechanism suffices on Linux:
// Acquiring the swapchain image fails if the window has been resized. If this happens, we need
// to loop over and recreate the resolution-dependent state, and then try again.
let mut opt_frame_idx = None;
loop {
let wait_fences = [self.facade.command_buffer_complete_fences[self.sync_idx]];
unsafe {
self.gpu
.device
.wait_for_fences(&wait_fences, true, std::u64::MAX)
.expect("Failed to wait for Fence.");
let result = self.facade.ext_swapchain.acquire_next_image(
self.facade.swapchain,
std::u64::MAX,
self.facade.image_available_semaphores[self.sync_idx],
vk::Fence::null(),
);
match result {
Ok((idx, _is_suboptimal)) => {
opt_frame_idx = Some(idx as usize);
}
Err(error_code) => {
match error_code {
vk::Result::ERROR_OUT_OF_DATE_KHR => {
// Window is resized. Recreate the swapchain
// and exit early without drawing this frame.
self.recreate_resolution_dependent_state();
}
_ => panic!("Failed to acquire swapchain image."),
}
}
}
}
if opt_frame_idx.is_some() {
break;
}
}
self.swapchain_idx = opt_frame_idx.unwrap();
let cmd_buf = self.command_buffers[self.swapchain_idx];
// Reset command buffer
unsafe {
self.gpu
.device
.reset_command_buffer(cmd_buf, vk::CommandBufferResetFlags::empty())
.unwrap();
}
// Begin command buffer. TODO: Is this in the right place?
let command_buffer_begin_info = vk::CommandBufferBeginInfo::builder()
.flags(vk::CommandBufferUsageFlags::SIMULTANEOUS_USE);
unsafe {
self.gpu
.device
.begin_command_buffer(cmd_buf, &command_buffer_begin_info)
.expect("Failed to begin recording command buffer.");
}
/* Naming the command buffer doesn't seem to work on creating it, so we
name it on every begin frame instead.*/
self.debug_utils
.set_command_buffer_name(cmd_buf, &format!("command_buffer_{}", self.swapchain_idx));
is_running
}
pub fn end_frame(&mut self) {
// End command buffer. TODO: Is this in the right place?
unsafe {
self.gpu
.device
.end_command_buffer(self.command_buffers[self.swapchain_idx])
.expect("Failed to end recording command buffer.");
}
let wait_stages = [vk::PipelineStageFlags::COLOR_ATTACHMENT_OUTPUT];
let wait_semaphores = [self.facade.image_available_semaphores[self.sync_idx]];
let signal_semaphores = [self.facade.render_finished_semaphores[self.sync_idx]];
let command_buffers = [self.command_buffers[self.swapchain_idx as usize]];
let submit_infos = [vk::SubmitInfo {
wait_semaphore_count: wait_semaphores.len() as u32,
p_wait_semaphores: wait_semaphores.as_ptr(),
p_wait_dst_stage_mask: wait_stages.as_ptr(),
command_buffer_count: command_buffers.len() as u32,
p_command_buffers: command_buffers.as_ptr(),
signal_semaphore_count: signal_semaphores.len() as u32,
p_signal_semaphores: signal_semaphores.as_ptr(),
..Default::default()
}];
let wait_fences = [self.facade.command_buffer_complete_fences[self.sync_idx]];
unsafe {
self.gpu
.device
.reset_fences(&wait_fences)
.expect("Failed to reset fence.");
self.gpu
.device
.queue_submit(
self.gpu.graphics_queue,
&submit_infos,
self.facade.command_buffer_complete_fences[self.sync_idx],
)
.expect("Failed to execute queue submit.");
}
self.sync_idx = (self.sync_idx + 1) % self.facade.num_frames;
let swapchains = [self.facade.swapchain];
let image_indices = [self.swapchain_idx as u32];
let present_info = vk::PresentInfoKHR::builder()
.wait_semaphores(&signal_semaphores)
.swapchains(&swapchains)
.image_indices(&image_indices);
/* Present the queue */
// According to Vulkan spec, queue_present() can fail if a resize occurs.
// We handle this in begin_frame(), so we should be able to ignore failure here,
// if it does happen. This works fine, when tested on Windows and on Linux on an
// integrated GPU. If this fails on some other platform, consider calling
// recreate_resolution_dependent_state() on error.
let _ = unsafe {
self.facade
.ext_swapchain
.queue_present(self.gpu.present_queue, &present_info)
};
for event in self.watch_rx.try_iter() {
use notify::DebouncedEvent::*;
match event {
Write(_) | Remove(_) | Rename(_, _) => {
unsafe {
self.gpu
.device
.device_wait_idle()
.expect("Failed to wait device idle!");
}
self.shader_list.hot_reload(&mut self.graph_cache);
}
_ => (),
}
}
}
pub fn begin_pass(&self, graph_handle: GraphHandle, pass_handle: PassHandle) {
let (graph, _) = self
.graph_cache
.iter()
.find(|(_, cached_hash)| cached_hash.0 == graph_handle.0)
.expect("Graph not found in cache. Have you called build_graph()?");
graph.begin_pass(pass_handle, self.command_buffers[self.swapchain_idx])
}
pub fn end_pass(&self, graph_handle: GraphHandle) {
let (graph, _) = self
.graph_cache
.iter()
.find(|(_, cached_hash)| cached_hash.0 == graph_handle.0)
.expect("Graph not found in cache. Have you called build_graph()?");
graph.end_pass(self.command_buffers[self.swapchain_idx]);
}
#[allow(clippy::too_many_arguments)]
pub fn add_pass(
&mut self,
name: &str,
vertex_shader: ShaderHandle,
fragment_shader: ShaderHandle,
output_images: &[ImageHandle],
opt_depth_image: Option<ImageHandle>,
uniform_buffer: BufferHandle,
image_handle: ImageHandle,
environment_sampler: &Sampler,
) -> Result<PassHandle, String> {
// TODO: Assert that color and depth images have the same resolution
let img = self
.image_list
.get_image_from_handle(image_handle)
.unwrap_or_else(|| {
panic!(
"Image with handle `{:?}` not found in the context.",
image_handle
)
});
let pass = BuilderPass {
name: String::from(name),
vertex_shader,
fragment_shader,
output_images: output_images.to_owned(),
input_image: (img.image.image_view, environment_sampler.vk_sampler),
opt_depth_image,
viewport_width: self.facade.swapchain_width,
viewport_height: self.facade.swapchain_height,
uniform_buffer,
};
let pass_handle = {
let mut hasher = DefaultHasher::new();
pass.hash(&mut hasher);
PassHandle(hasher.finish())
};
self.builder_passes.push((pass_handle, pass));
Ok(pass_handle)
}
/* Shaders */
pub fn new_shader(
&mut self,
name: &str,
shader_stage: ShaderStage,
path: &str,
) -> Result<ShaderHandle, String> {
self.shader_list.new_shader(name, shader_stage, path)
}
/* Buffers */
pub fn new_buffer(
&mut self,
name: &str,
size: usize,
usage: vk::BufferUsageFlags,
) -> Result<BufferHandle, String> {
self.buffer_list
.new_buffer(name, size, usage, &self.gpu, &self.debug_utils)
}
pub fn upload_data<T>(&self, buffer_handle: BufferHandle, data: &[T]) {
self.buffer_list.upload_data(buffer_handle, data);
}
/* Images */
pub fn new_image_relative_size(
&mut self,
name: &str,
scale: f32,
format: vk::Format,
usage: vk::ImageUsageFlags,
aspect_flags: vk::ImageAspectFlags,
) -> Result<ImageHandle, String> {
self.image_list.new_image_relative_size(
name,
scale,
format,
usage,
aspect_flags,
&self.facade,
&self.gpu,
&self.debug_utils,
)
}
pub fn new_image_from_file(&mut self, name: &str, path: &str) -> Result<ImageHandle, String> {
self.image_list.new_image_from_file(
name,
path,
&self.gpu,
self.command_pool,
&self.debug_utils,
)
}
}
| {
*control_flow = ControlFlow::Exit;
} | conditional_block |
ner.rs | // Copyright 2019-present, the HuggingFace Inc. team, The Google AI Language Team and Facebook, Inc.
// Copyright (c) 2018 chakki (https://github.com/chakki-works/seqeval/blob/master/seqeval/metrics/sequence_labeling.py)
// Copyright 2019 Guillaume Becquin
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! # Named Entity Recognition pipeline
//! Extracts entities (Person, Location, Organization, Miscellaneous) from text.
//! Pretrained models are available for the following languages:
//! - English
//! - German
//! - Spanish
//! - Dutch
//!
//! The default NER mode is an English BERT cased large model finetuned on CoNNL03, contributed by the [MDZ Digital Library team at the Bavarian State Library](https://github.com/dbmdz)
//! All resources for this model can be downloaded using the Python utility script included in this repository.
//! 1. Set-up a Python virtual environment and install dependencies (in ./requirements.txt)
//! 2. Run the conversion script python /utils/download-dependencies_bert_ner.py.
//! The dependencies will be downloaded to the user's home directory, under ~/rustbert/bert-ner
//!
//! The example below illustrate how to run the model for the default English NER model
//! ```no_run
//! use rust_bert::pipelines::ner::NERModel;
//! # fn main() -> anyhow::Result<()> {
//! let ner_model = NERModel::new(Default::default())?;
//!
//! let input = [
//! "My name is Amy. I live in Paris.",
//! "Paris is a city in France.",
//! ];
//! let output = ner_model.predict(&input);
//! # Ok(())
//! # }
//! ```
//! Output: \
//! ```no_run
//! # use rust_bert::pipelines::ner::Entity;
//! # use rust_tokenizers::Offset;
//! # let output =
//! [
//! [
//! Entity {
//! word: String::from("Amy"),
//! score: 0.9986,
//! label: String::from("I-PER"),
//! offset: Offset { begin: 11, end: 14 },
//! },
//! Entity {
//! word: String::from("Paris"),
//! score: 0.9985,
//! label: String::from("I-LOC"),
//! offset: Offset { begin: 26, end: 31 },
//! },
//! ],
//! [
//! Entity {
//! word: String::from("Paris"),
//! score: 0.9988,
//! label: String::from("I-LOC"),
//! offset: Offset { begin: 0, end: 5 },
//! },
//! Entity {
//! word: String::from("France"),
//! score: 0.9993,
//! label: String::from("I-LOC"),
//! offset: Offset { begin: 19, end: 25 },
//! },
//! ],
//! ]
//! # ;
//! ```
//!
//! To run the pipeline for another language, change the NERModel configuration from its default:
//!
//! ```no_run
//! use rust_bert::pipelines::common::ModelType;
//! use rust_bert::pipelines::ner::NERModel;
//! use rust_bert::pipelines::token_classification::TokenClassificationConfig;
//! use rust_bert::resources::RemoteResource;
//! use rust_bert::roberta::{
//! RobertaConfigResources, RobertaModelResources, RobertaVocabResources,
//! };
//! use tch::Device;
//!
//! # fn main() -> anyhow::Result<()> {
//! use rust_bert::pipelines::common::ModelResource;
//! let ner_config = TokenClassificationConfig {
//! model_type: ModelType::XLMRoberta,
//! model_resource: ModelResource::Torch(Box::new(RemoteResource::from_pretrained(
//! RobertaModelResources::XLM_ROBERTA_NER_DE,
//! ))),
//! config_resource: Box::new(RemoteResource::from_pretrained(
//! RobertaConfigResources::XLM_ROBERTA_NER_DE,
//! )),
//! vocab_resource: Box::new(RemoteResource::from_pretrained(
//! RobertaVocabResources::XLM_ROBERTA_NER_DE,
//! )),
//! lower_case: false,
//! device: Device::cuda_if_available(),
//! ..Default::default()
//! };
//!
//! let ner_model = NERModel::new(ner_config)?;
//!
//! // Define input
//! let input = [
//! "Mein Name ist Amélie. Ich lebe in Paris.",
//! "Paris ist eine Stadt in Frankreich.",
//! ];
//! let output = ner_model.predict(&input);
//! # Ok(())
//! # }
//! ```
//! The XLMRoberta models for the languages are defined as follows:
//!
//! | **Language** |**Model name**|
//! :-----:|:----:
//! English| XLM_ROBERTA_NER_EN |
//! German| XLM_ROBERTA_NER_DE |
//! Spanish| XLM_ROBERTA_NER_ES |
//! Dutch| XLM_ROBERTA_NER_NL |
use crate::common::error::RustBertError;
use crate::pipelines::common::TokenizerOption;
use crate::pipelines::token_classification::{
Token, TokenClassificationConfig, TokenClassificationModel,
};
use rust_tokenizers::Offset;
use serde::{Deserialize, Serialize};
#[derive(Debug, Clone, Serialize, Deserialize)]
/// # Entity generated by a `NERModel`
pub struct Entity {
/// String representation of the Entity
pub word: String,
/// Confidence score
pub score: f64,
/// Entity label (e.g. ORG, LOC...)
pub label: String,
/// Token offsets
pub offset: Offset,
}
//type alias for some backward compatibility
type NERConfig = TokenClassificationConfig;
/// # NERModel to extract named entities
pub struct NERModel {
token_classification_model: TokenClassificationModel,
}
impl NERModel {
/// Build a new `NERModel`
///
/// # Arguments
///
/// * `ner_config` - `NERConfig` object containing the resource references (model, vocabulary, configuration) and device placement (CPU/GPU)
///
/// # Example
///
/// ```no_run
/// # fn main() -> anyhow::Result<()> {
/// use rust_bert::pipelines::ner::NERModel;
///
/// let ner_model = NERModel::new(Default::default())?;
/// # Ok(())
/// # }
/// ```
pub fn new(ner_config: NERConfig) -> Result<NERModel, RustBertError> {
let model = TokenClassificationModel::new(ner_config)?;
Ok(NERModel {
token_classification_model: model,
})
}
/// Build a new `NERModel` with a provided tokenizer.
///
/// # Arguments
///
/// * `ner_config` - `NERConfig` object containing the resource references (model, vocabulary, configuration) and device placement (CPU/GPU)
/// * `tokenizer` - `TokenizerOption` tokenizer to use for token classification
///
/// # Example
///
/// ```no_run
/// # fn main() -> anyhow::Result<()> {
/// use rust_bert::pipelines::common::{ModelType, TokenizerOption};
/// use rust_bert::pipelines::ner::NERModel;
/// let tokenizer = TokenizerOption::from_file(
/// ModelType::Bert,
/// "path/to/vocab.txt",
/// None,
/// false,
/// None,
/// None,
/// )?;
/// let ner_model = NERModel::new_with_tokenizer(Default::default(), tokenizer)?;
/// # Ok(())
/// # }
/// ```
pub fn new_with_tokenizer(
ner_config: NERConfig,
tokenizer: TokenizerOption,
) -> Result<NERModel, RustBertError> {
let model = TokenClassificationModel::new_with_tokenizer(ner_config, tokenizer)?;
Ok(NERModel {
token_classification_model: model,
})
}
/// Get a reference to the model tokenizer.
pub fn get_tokenizer(&self) -> &TokenizerOption {
self.token_classification_model.get_tokenizer()
}
/// Get a mutable reference to the model tokenizer.
pub fn get_tokenizer_mut(&mut self) -> &mut TokenizerOption {
self.token_classification_model.get_tokenizer_mut()
}
/// Extract entities from a text
///
/// # Arguments
///
/// * `input` - `&[&str]` Array of texts to extract entities from.
///
/// # Returns
///
/// * `Vec<Vec<Entity>>` containing extracted entities
///
/// # Example
///
/// ```no_run
/// # fn main() -> anyhow::Result<()> {
/// # use rust_bert::pipelines::ner::NERModel;
///
/// let ner_model = NERModel::new(Default::default())?;
/// let input = [
/// "My name is Amy. I live in Paris.",
/// "Paris is a city in France.",
/// ];
/// let output = ner_model.predict(&input);
/// # Ok(())
/// # }
/// ```
pub fn predict<S>(&self, input: &[S]) -> Vec<Vec<Entity>>
where
S: AsRef<str>,
{
self.token_classification_model
.predict(input, true, false)
.into_iter()
.map(|sequence_tokens| {
sequence_tokens
.into_iter()
.filter(|token| token.label != "O")
.map(|token| Entity {
offset: token.offset.unwrap(),
word: token.text,
score: token.score,
label: token.label,
})
.collect::<Vec<Entity>>()
})
.collect::<Vec<Vec<Entity>>>()
}
/// Extract full entities from a text performing entity chunking. Follows the algorithm for entities
/// chunking described in [Erik F. Tjong Kim Sang, Jorn Veenstra, Representing Text Chunks](https://www.aclweb.org/anthology/E99-1023/)
/// The proposed implementation is inspired by the [Python seqeval library](https://github.com/chakki-works/seqeval) (shared under MIT license).
///
/// # Arguments
///
/// * `input` - `&[&str]` Array of texts to extract entities from.
///
/// # Returns
///
/// * `Vec<Entity>` containing consolidated extracted entities
///
/// # Example
///
/// ```no_run
/// # fn main() -> anyhow::Result<()> {
/// # use rust_bert::pipelines::ner::NERModel;
///
/// let ner_model = NERModel::new(Default::default())?;
/// let input = ["Asked John Smith about Acme Corp"];
/// let output = ner_model.predict_full_entities(&input);
/// # Ok(())
/// # }
/// ```
///
/// Outputs:
///
/// Output: \
/// ```no_run
/// # use rust_bert::pipelines::question_answering::Answer;
/// # use rust_bert::pipelines::ner::Entity;
/// # use rust_tokenizers::Offset;
/// # let output =
/// [[
/// Entity {
/// word: String::from("John Smith"),
/// score: 0.9747,
/// label: String::from("PER"),
/// offset: Offset { begin: 6, end: 16 },
/// },
/// Entity {
/// word: String::from("Acme Corp"),
/// score: 0.8847,
/// label: String::from("I-LOC"),
/// offset: Offset { begin: 23, end: 32 },
/// },
/// ]]
/// # ;
/// ```
pub fn predict_full_entities<S>(&self, input: &[S]) -> Vec<Vec<Entity>>
where
S: AsRef<str>,
{ |
fn consolidate_entities(tokens: &[Token]) -> Vec<Entity> {
let mut entities: Vec<Entity> = Vec::new();
let mut entity_builder = EntityBuilder::new();
for (position, token) in tokens.iter().enumerate() {
let tag = token.get_tag();
let label = token.get_label();
if let Some(entity) = entity_builder.handle_current_tag(tag, label, position, tokens) {
entities.push(entity)
}
}
if let Some(entity) = entity_builder.flush_and_reset(tokens.len(), tokens) {
entities.push(entity);
}
entities
}
}
struct EntityBuilder<'a> {
previous_node: Option<(usize, Tag, &'a str)>,
}
impl<'a> EntityBuilder<'a> {
fn new() -> Self {
EntityBuilder {
previous_node: None,
}
}
fn handle_current_tag(
&mut self,
tag: Tag,
label: &'a str,
position: usize,
tokens: &[Token],
) -> Option<Entity> {
match tag {
Tag::Outside => self.flush_and_reset(position, tokens),
Tag::Begin | Tag::Single => {
let entity = self.flush_and_reset(position, tokens);
self.start_new(position, tag, label);
entity
}
Tag::Inside | Tag::End => {
if let Some((_, previous_tag, previous_label)) = self.previous_node {
if (previous_tag == Tag::End)
| (previous_tag == Tag::Single)
| (previous_label != label)
{
let entity = self.flush_and_reset(position, tokens);
self.start_new(position, tag, label);
entity
} else {
None
}
} else {
self.start_new(position, tag, label);
None
}
}
}
}
fn flush_and_reset(&mut self, position: usize, tokens: &[Token]) -> Option<Entity> {
let entity = if let Some((start, _, label)) = self.previous_node {
let entity_tokens = &tokens[start..position];
Some(Entity {
word: entity_tokens
.iter()
.map(|token| token.text.as_str())
.collect::<Vec<&str>>()
.join(" "),
score: entity_tokens.iter().map(|token| token.score).product(),
label: label.to_string(),
offset: Offset {
begin: entity_tokens.first()?.offset?.begin,
end: entity_tokens.last()?.offset?.end,
},
})
} else {
None
};
self.previous_node = None;
entity
}
fn start_new(&mut self, position: usize, tag: Tag, label: &'a str) {
self.previous_node = Some((position, tag, label))
}
}
#[derive(Debug, Clone, Copy, PartialEq)]
enum Tag {
Begin,
Inside,
Outside,
End,
Single,
}
impl Token {
fn get_tag(&self) -> Tag {
match self.label.split('-').collect::<Vec<&str>>()[0] {
"B" => Tag::Begin,
"I" => Tag::Inside,
"O" => Tag::Outside,
"E" => Tag::End,
"S" => Tag::Single,
_ => panic!("Invalid tag encountered for token {:?}", self),
}
}
fn get_label(&self) -> &str {
let split_label = self.label.split('-').collect::<Vec<&str>>();
if split_label.len() > 1 {
split_label[1]
} else {
""
}
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
#[ignore] // no need to run, compilation is enough to verify it is Send
fn test() {
let config = NERConfig::default();
let _: Box<dyn Send> = Box::new(NERModel::new(config));
}
}
|
let tokens = self.token_classification_model.predict(input, true, false);
let mut entities: Vec<Vec<Entity>> = Vec::new();
for sequence_tokens in tokens {
entities.push(Self::consolidate_entities(&sequence_tokens));
}
entities
}
| identifier_body |
ner.rs | // Copyright 2019-present, the HuggingFace Inc. team, The Google AI Language Team and Facebook, Inc.
// Copyright (c) 2018 chakki (https://github.com/chakki-works/seqeval/blob/master/seqeval/metrics/sequence_labeling.py)
// Copyright 2019 Guillaume Becquin
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! # Named Entity Recognition pipeline
//! Extracts entities (Person, Location, Organization, Miscellaneous) from text.
//! Pretrained models are available for the following languages:
//! - English
//! - German
//! - Spanish
//! - Dutch
//!
//! The default NER mode is an English BERT cased large model finetuned on CoNNL03, contributed by the [MDZ Digital Library team at the Bavarian State Library](https://github.com/dbmdz)
//! All resources for this model can be downloaded using the Python utility script included in this repository.
//! 1. Set-up a Python virtual environment and install dependencies (in ./requirements.txt)
//! 2. Run the conversion script python /utils/download-dependencies_bert_ner.py.
//! The dependencies will be downloaded to the user's home directory, under ~/rustbert/bert-ner
//!
//! The example below illustrate how to run the model for the default English NER model
//! ```no_run
//! use rust_bert::pipelines::ner::NERModel;
//! # fn main() -> anyhow::Result<()> {
//! let ner_model = NERModel::new(Default::default())?;
//!
//! let input = [
//! "My name is Amy. I live in Paris.",
//! "Paris is a city in France.",
//! ];
//! let output = ner_model.predict(&input);
//! # Ok(())
//! # }
//! ```
//! Output: \
//! ```no_run
//! # use rust_bert::pipelines::ner::Entity;
//! # use rust_tokenizers::Offset;
//! # let output =
//! [
//! [
//! Entity {
//! word: String::from("Amy"),
//! score: 0.9986,
//! label: String::from("I-PER"),
//! offset: Offset { begin: 11, end: 14 },
//! },
//! Entity {
//! word: String::from("Paris"),
//! score: 0.9985,
//! label: String::from("I-LOC"),
//! offset: Offset { begin: 26, end: 31 },
//! },
//! ],
//! [
//! Entity {
//! word: String::from("Paris"),
//! score: 0.9988,
//! label: String::from("I-LOC"),
//! offset: Offset { begin: 0, end: 5 },
//! },
//! Entity {
//! word: String::from("France"),
//! score: 0.9993,
//! label: String::from("I-LOC"),
//! offset: Offset { begin: 19, end: 25 },
//! },
//! ],
//! ]
//! # ;
//! ```
//!
//! To run the pipeline for another language, change the NERModel configuration from its default:
//!
//! ```no_run
//! use rust_bert::pipelines::common::ModelType;
//! use rust_bert::pipelines::ner::NERModel;
//! use rust_bert::pipelines::token_classification::TokenClassificationConfig;
//! use rust_bert::resources::RemoteResource;
//! use rust_bert::roberta::{
//! RobertaConfigResources, RobertaModelResources, RobertaVocabResources,
//! };
//! use tch::Device;
//!
//! # fn main() -> anyhow::Result<()> {
//! use rust_bert::pipelines::common::ModelResource;
//! let ner_config = TokenClassificationConfig {
//! model_type: ModelType::XLMRoberta,
//! model_resource: ModelResource::Torch(Box::new(RemoteResource::from_pretrained(
//! RobertaModelResources::XLM_ROBERTA_NER_DE,
//! ))),
//! config_resource: Box::new(RemoteResource::from_pretrained(
//! RobertaConfigResources::XLM_ROBERTA_NER_DE,
//! )),
//! vocab_resource: Box::new(RemoteResource::from_pretrained(
//! RobertaVocabResources::XLM_ROBERTA_NER_DE,
//! )),
//! lower_case: false,
//! device: Device::cuda_if_available(),
//! ..Default::default()
//! };
//!
//! let ner_model = NERModel::new(ner_config)?;
//!
//! // Define input
//! let input = [
//! "Mein Name ist Amélie. Ich lebe in Paris.",
//! "Paris ist eine Stadt in Frankreich.",
//! ];
//! let output = ner_model.predict(&input);
//! # Ok(())
//! # }
//! ```
//! The XLMRoberta models for the languages are defined as follows:
//!
//! | **Language** |**Model name**|
//! :-----:|:----:
//! English| XLM_ROBERTA_NER_EN |
//! German| XLM_ROBERTA_NER_DE |
//! Spanish| XLM_ROBERTA_NER_ES |
//! Dutch| XLM_ROBERTA_NER_NL |
use crate::common::error::RustBertError;
use crate::pipelines::common::TokenizerOption;
use crate::pipelines::token_classification::{
Token, TokenClassificationConfig, TokenClassificationModel,
};
use rust_tokenizers::Offset;
use serde::{Deserialize, Serialize};
#[derive(Debug, Clone, Serialize, Deserialize)]
/// # Entity generated by a `NERModel`
pub struct Entity {
/// String representation of the Entity
pub word: String,
/// Confidence score
pub score: f64,
/// Entity label (e.g. ORG, LOC...)
pub label: String,
/// Token offsets
pub offset: Offset,
}
//type alias for some backward compatibility
type NERConfig = TokenClassificationConfig;
/// # NERModel to extract named entities
pub struct NERModel {
token_classification_model: TokenClassificationModel,
}
impl NERModel {
/// Build a new `NERModel`
///
/// # Arguments
///
/// * `ner_config` - `NERConfig` object containing the resource references (model, vocabulary, configuration) and device placement (CPU/GPU)
///
/// # Example
///
/// ```no_run
/// # fn main() -> anyhow::Result<()> {
/// use rust_bert::pipelines::ner::NERModel;
///
/// let ner_model = NERModel::new(Default::default())?;
/// # Ok(())
/// # }
/// ```
pub fn new(ner_config: NERConfig) -> Result<NERModel, RustBertError> {
let model = TokenClassificationModel::new(ner_config)?;
Ok(NERModel {
token_classification_model: model,
})
}
/// Build a new `NERModel` with a provided tokenizer.
///
/// # Arguments
///
/// * `ner_config` - `NERConfig` object containing the resource references (model, vocabulary, configuration) and device placement (CPU/GPU)
/// * `tokenizer` - `TokenizerOption` tokenizer to use for token classification
///
/// # Example
///
/// ```no_run
/// # fn main() -> anyhow::Result<()> {
/// use rust_bert::pipelines::common::{ModelType, TokenizerOption};
/// use rust_bert::pipelines::ner::NERModel;
/// let tokenizer = TokenizerOption::from_file(
/// ModelType::Bert,
/// "path/to/vocab.txt",
/// None,
/// false,
/// None,
/// None,
/// )?;
/// let ner_model = NERModel::new_with_tokenizer(Default::default(), tokenizer)?;
/// # Ok(())
/// # }
/// ```
pub fn new_with_tokenizer(
ner_config: NERConfig,
tokenizer: TokenizerOption,
) -> Result<NERModel, RustBertError> {
let model = TokenClassificationModel::new_with_tokenizer(ner_config, tokenizer)?;
Ok(NERModel {
token_classification_model: model,
})
}
/// Get a reference to the model tokenizer.
pub fn get_tokenizer(&self) -> &TokenizerOption {
self.token_classification_model.get_tokenizer()
}
/// Get a mutable reference to the model tokenizer.
pub fn get_tokenizer_mut(&mut self) -> &mut TokenizerOption {
self.token_classification_model.get_tokenizer_mut()
}
/// Extract entities from a text
///
/// # Arguments
///
/// * `input` - `&[&str]` Array of texts to extract entities from.
///
/// # Returns
///
/// * `Vec<Vec<Entity>>` containing extracted entities
///
/// # Example
///
/// ```no_run
/// # fn main() -> anyhow::Result<()> {
/// # use rust_bert::pipelines::ner::NERModel;
///
/// let ner_model = NERModel::new(Default::default())?;
/// let input = [
/// "My name is Amy. I live in Paris.",
/// "Paris is a city in France.",
/// ];
/// let output = ner_model.predict(&input);
/// # Ok(())
/// # }
/// ```
pub fn predict<S>(&self, input: &[S]) -> Vec<Vec<Entity>>
where
S: AsRef<str>,
{
self.token_classification_model
.predict(input, true, false)
.into_iter()
.map(|sequence_tokens| {
sequence_tokens
.into_iter()
.filter(|token| token.label != "O")
.map(|token| Entity {
offset: token.offset.unwrap(),
word: token.text,
score: token.score,
label: token.label,
})
.collect::<Vec<Entity>>()
})
.collect::<Vec<Vec<Entity>>>()
}
/// Extract full entities from a text performing entity chunking. Follows the algorithm for entities
/// chunking described in [Erik F. Tjong Kim Sang, Jorn Veenstra, Representing Text Chunks](https://www.aclweb.org/anthology/E99-1023/)
/// The proposed implementation is inspired by the [Python seqeval library](https://github.com/chakki-works/seqeval) (shared under MIT license).
///
/// # Arguments
///
/// * `input` - `&[&str]` Array of texts to extract entities from.
///
/// # Returns
///
/// * `Vec<Entity>` containing consolidated extracted entities
///
/// # Example
///
/// ```no_run
/// # fn main() -> anyhow::Result<()> {
/// # use rust_bert::pipelines::ner::NERModel;
///
/// let ner_model = NERModel::new(Default::default())?;
/// let input = ["Asked John Smith about Acme Corp"];
/// let output = ner_model.predict_full_entities(&input);
/// # Ok(())
/// # }
/// ```
///
/// Outputs:
///
/// Output: \
/// ```no_run
/// # use rust_bert::pipelines::question_answering::Answer;
/// # use rust_bert::pipelines::ner::Entity;
/// # use rust_tokenizers::Offset;
/// # let output =
/// [[
/// Entity {
/// word: String::from("John Smith"),
/// score: 0.9747,
/// label: String::from("PER"),
/// offset: Offset { begin: 6, end: 16 },
/// },
/// Entity {
/// word: String::from("Acme Corp"),
/// score: 0.8847,
/// label: String::from("I-LOC"),
/// offset: Offset { begin: 23, end: 32 },
/// },
/// ]]
/// # ;
/// ```
pub fn predict_full_entities<S>(&self, input: &[S]) -> Vec<Vec<Entity>>
where
S: AsRef<str>,
{
let tokens = self.token_classification_model.predict(input, true, false);
let mut entities: Vec<Vec<Entity>> = Vec::new();
for sequence_tokens in tokens {
entities.push(Self::consolidate_entities(&sequence_tokens));
}
entities
}
fn consolidate_entities(tokens: &[Token]) -> Vec<Entity> {
let mut entities: Vec<Entity> = Vec::new();
let mut entity_builder = EntityBuilder::new();
for (position, token) in tokens.iter().enumerate() {
let tag = token.get_tag();
let label = token.get_label();
if let Some(entity) = entity_builder.handle_current_tag(tag, label, position, tokens) {
entities.push(entity)
}
}
if let Some(entity) = entity_builder.flush_and_reset(tokens.len(), tokens) {
entities.push(entity);
}
entities
}
}
struct EntityBuilder<'a> {
previous_node: Option<(usize, Tag, &'a str)>,
}
impl<'a> EntityBuilder<'a> {
fn new() -> Self {
EntityBuilder {
previous_node: None,
}
}
fn handle_current_tag(
&mut self,
tag: Tag,
label: &'a str,
position: usize,
tokens: &[Token],
) -> Option<Entity> {
match tag {
Tag::Outside => self.flush_and_reset(position, tokens),
Tag::Begin | Tag::Single => {
let entity = self.flush_and_reset(position, tokens);
self.start_new(position, tag, label);
entity
}
Tag::Inside | Tag::End => { | }
}
fn flush_and_reset(&mut self, position: usize, tokens: &[Token]) -> Option<Entity> {
let entity = if let Some((start, _, label)) = self.previous_node {
let entity_tokens = &tokens[start..position];
Some(Entity {
word: entity_tokens
.iter()
.map(|token| token.text.as_str())
.collect::<Vec<&str>>()
.join(" "),
score: entity_tokens.iter().map(|token| token.score).product(),
label: label.to_string(),
offset: Offset {
begin: entity_tokens.first()?.offset?.begin,
end: entity_tokens.last()?.offset?.end,
},
})
} else {
None
};
self.previous_node = None;
entity
}
fn start_new(&mut self, position: usize, tag: Tag, label: &'a str) {
self.previous_node = Some((position, tag, label))
}
}
#[derive(Debug, Clone, Copy, PartialEq)]
enum Tag {
Begin,
Inside,
Outside,
End,
Single,
}
impl Token {
fn get_tag(&self) -> Tag {
match self.label.split('-').collect::<Vec<&str>>()[0] {
"B" => Tag::Begin,
"I" => Tag::Inside,
"O" => Tag::Outside,
"E" => Tag::End,
"S" => Tag::Single,
_ => panic!("Invalid tag encountered for token {:?}", self),
}
}
fn get_label(&self) -> &str {
let split_label = self.label.split('-').collect::<Vec<&str>>();
if split_label.len() > 1 {
split_label[1]
} else {
""
}
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
#[ignore] // no need to run, compilation is enough to verify it is Send
fn test() {
let config = NERConfig::default();
let _: Box<dyn Send> = Box::new(NERModel::new(config));
}
}
|
if let Some((_, previous_tag, previous_label)) = self.previous_node {
if (previous_tag == Tag::End)
| (previous_tag == Tag::Single)
| (previous_label != label)
{
let entity = self.flush_and_reset(position, tokens);
self.start_new(position, tag, label);
entity
} else {
None
}
} else {
self.start_new(position, tag, label);
None
}
}
| conditional_block |
ner.rs | // Copyright 2019-present, the HuggingFace Inc. team, The Google AI Language Team and Facebook, Inc.
// Copyright (c) 2018 chakki (https://github.com/chakki-works/seqeval/blob/master/seqeval/metrics/sequence_labeling.py)
// Copyright 2019 Guillaume Becquin
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! # Named Entity Recognition pipeline
//! Extracts entities (Person, Location, Organization, Miscellaneous) from text.
//! Pretrained models are available for the following languages:
//! - English
//! - German
//! - Spanish
//! - Dutch
//!
//! The default NER mode is an English BERT cased large model finetuned on CoNNL03, contributed by the [MDZ Digital Library team at the Bavarian State Library](https://github.com/dbmdz)
//! All resources for this model can be downloaded using the Python utility script included in this repository.
//! 1. Set-up a Python virtual environment and install dependencies (in ./requirements.txt)
//! 2. Run the conversion script python /utils/download-dependencies_bert_ner.py.
//! The dependencies will be downloaded to the user's home directory, under ~/rustbert/bert-ner
//!
//! The example below illustrate how to run the model for the default English NER model
//! ```no_run
//! use rust_bert::pipelines::ner::NERModel;
//! # fn main() -> anyhow::Result<()> {
//! let ner_model = NERModel::new(Default::default())?;
//!
//! let input = [
//! "My name is Amy. I live in Paris.",
//! "Paris is a city in France.",
//! ];
//! let output = ner_model.predict(&input);
//! # Ok(())
//! # }
//! ```
//! Output: \
//! ```no_run
//! # use rust_bert::pipelines::ner::Entity;
//! # use rust_tokenizers::Offset;
//! # let output =
//! [
//! [
//! Entity {
//! word: String::from("Amy"),
//! score: 0.9986,
//! label: String::from("I-PER"),
//! offset: Offset { begin: 11, end: 14 },
//! },
//! Entity {
//! word: String::from("Paris"),
//! score: 0.9985,
//! label: String::from("I-LOC"),
//! offset: Offset { begin: 26, end: 31 },
//! },
//! ],
//! [
//! Entity {
//! word: String::from("Paris"),
//! score: 0.9988,
//! label: String::from("I-LOC"),
//! offset: Offset { begin: 0, end: 5 },
//! },
//! Entity {
//! word: String::from("France"),
//! score: 0.9993,
//! label: String::from("I-LOC"),
//! offset: Offset { begin: 19, end: 25 },
//! },
//! ],
//! ]
//! # ;
//! ```
//!
//! To run the pipeline for another language, change the NERModel configuration from its default:
//!
//! ```no_run
//! use rust_bert::pipelines::common::ModelType;
//! use rust_bert::pipelines::ner::NERModel;
//! use rust_bert::pipelines::token_classification::TokenClassificationConfig;
//! use rust_bert::resources::RemoteResource;
//! use rust_bert::roberta::{
//! RobertaConfigResources, RobertaModelResources, RobertaVocabResources,
//! };
//! use tch::Device;
//!
//! # fn main() -> anyhow::Result<()> {
//! use rust_bert::pipelines::common::ModelResource;
//! let ner_config = TokenClassificationConfig {
//! model_type: ModelType::XLMRoberta,
//! model_resource: ModelResource::Torch(Box::new(RemoteResource::from_pretrained(
//! RobertaModelResources::XLM_ROBERTA_NER_DE,
//! ))),
//! config_resource: Box::new(RemoteResource::from_pretrained(
//! RobertaConfigResources::XLM_ROBERTA_NER_DE,
//! )),
//! vocab_resource: Box::new(RemoteResource::from_pretrained(
//! RobertaVocabResources::XLM_ROBERTA_NER_DE,
//! )),
//! lower_case: false,
//! device: Device::cuda_if_available(),
//! ..Default::default()
//! };
//!
//! let ner_model = NERModel::new(ner_config)?;
//!
//! // Define input
//! let input = [
//! "Mein Name ist Amélie. Ich lebe in Paris.",
//! "Paris ist eine Stadt in Frankreich.",
//! ];
//! let output = ner_model.predict(&input);
//! # Ok(())
//! # }
//! ```
//! The XLMRoberta models for the languages are defined as follows:
//!
//! | **Language** |**Model name**|
//! :-----:|:----:
//! English| XLM_ROBERTA_NER_EN |
//! German| XLM_ROBERTA_NER_DE |
//! Spanish| XLM_ROBERTA_NER_ES |
//! Dutch| XLM_ROBERTA_NER_NL |
use crate::common::error::RustBertError;
use crate::pipelines::common::TokenizerOption;
use crate::pipelines::token_classification::{
Token, TokenClassificationConfig, TokenClassificationModel,
};
use rust_tokenizers::Offset;
use serde::{Deserialize, Serialize};
#[derive(Debug, Clone, Serialize, Deserialize)]
/// # Entity generated by a `NERModel`
pub struct Entity {
/// String representation of the Entity
pub word: String,
/// Confidence score
pub score: f64,
/// Entity label (e.g. ORG, LOC...)
pub label: String,
/// Token offsets
pub offset: Offset,
}
//type alias for some backward compatibility
type NERConfig = TokenClassificationConfig;
/// # NERModel to extract named entities
pub struct NERModel {
token_classification_model: TokenClassificationModel,
}
impl NERModel {
/// Build a new `NERModel`
///
/// # Arguments
///
/// * `ner_config` - `NERConfig` object containing the resource references (model, vocabulary, configuration) and device placement (CPU/GPU)
///
/// # Example
///
/// ```no_run
/// # fn main() -> anyhow::Result<()> {
/// use rust_bert::pipelines::ner::NERModel;
///
/// let ner_model = NERModel::new(Default::default())?;
/// # Ok(())
/// # }
/// ```
pub fn new(ner_config: NERConfig) -> Result<NERModel, RustBertError> {
let model = TokenClassificationModel::new(ner_config)?;
Ok(NERModel {
token_classification_model: model,
})
}
/// Build a new `NERModel` with a provided tokenizer.
///
/// # Arguments
///
/// * `ner_config` - `NERConfig` object containing the resource references (model, vocabulary, configuration) and device placement (CPU/GPU)
/// * `tokenizer` - `TokenizerOption` tokenizer to use for token classification
///
/// # Example
///
/// ```no_run
/// # fn main() -> anyhow::Result<()> {
/// use rust_bert::pipelines::common::{ModelType, TokenizerOption};
/// use rust_bert::pipelines::ner::NERModel;
/// let tokenizer = TokenizerOption::from_file(
/// ModelType::Bert,
/// "path/to/vocab.txt",
/// None,
/// false,
/// None,
/// None,
/// )?;
/// let ner_model = NERModel::new_with_tokenizer(Default::default(), tokenizer)?;
/// # Ok(())
/// # }
/// ```
pub fn new_with_tokenizer(
ner_config: NERConfig,
tokenizer: TokenizerOption,
) -> Result<NERModel, RustBertError> {
let model = TokenClassificationModel::new_with_tokenizer(ner_config, tokenizer)?;
Ok(NERModel {
token_classification_model: model,
})
}
/// Get a reference to the model tokenizer.
pub fn get_tokenizer(&self) -> &TokenizerOption {
self.token_classification_model.get_tokenizer()
}
/// Get a mutable reference to the model tokenizer.
pub fn get_tokenizer_mut(&mut self) -> &mut TokenizerOption {
self.token_classification_model.get_tokenizer_mut()
}
/// Extract entities from a text
///
/// # Arguments
///
/// * `input` - `&[&str]` Array of texts to extract entities from.
///
/// # Returns
///
/// * `Vec<Vec<Entity>>` containing extracted entities
///
/// # Example
///
/// ```no_run
/// # fn main() -> anyhow::Result<()> {
/// # use rust_bert::pipelines::ner::NERModel;
///
/// let ner_model = NERModel::new(Default::default())?;
/// let input = [
/// "My name is Amy. I live in Paris.",
/// "Paris is a city in France.",
/// ];
/// let output = ner_model.predict(&input);
/// # Ok(())
/// # }
/// ```
pub fn predict<S>(&self, input: &[S]) -> Vec<Vec<Entity>>
where
S: AsRef<str>,
{
self.token_classification_model
.predict(input, true, false)
.into_iter()
.map(|sequence_tokens| {
sequence_tokens
.into_iter()
.filter(|token| token.label != "O")
.map(|token| Entity {
offset: token.offset.unwrap(),
word: token.text,
score: token.score,
label: token.label,
})
.collect::<Vec<Entity>>()
})
.collect::<Vec<Vec<Entity>>>()
}
/// Extract full entities from a text performing entity chunking. Follows the algorithm for entities
/// chunking described in [Erik F. Tjong Kim Sang, Jorn Veenstra, Representing Text Chunks](https://www.aclweb.org/anthology/E99-1023/)
/// The proposed implementation is inspired by the [Python seqeval library](https://github.com/chakki-works/seqeval) (shared under MIT license).
///
/// # Arguments
///
/// * `input` - `&[&str]` Array of texts to extract entities from.
///
/// # Returns
///
/// * `Vec<Entity>` containing consolidated extracted entities
///
/// # Example
///
/// ```no_run
/// # fn main() -> anyhow::Result<()> {
/// # use rust_bert::pipelines::ner::NERModel;
///
/// let ner_model = NERModel::new(Default::default())?;
/// let input = ["Asked John Smith about Acme Corp"];
/// let output = ner_model.predict_full_entities(&input);
/// # Ok(())
/// # }
/// ```
///
/// Outputs:
///
/// Output: \
/// ```no_run
/// # use rust_bert::pipelines::question_answering::Answer;
/// # use rust_bert::pipelines::ner::Entity;
/// # use rust_tokenizers::Offset;
/// # let output =
/// [[
/// Entity {
/// word: String::from("John Smith"),
/// score: 0.9747,
/// label: String::from("PER"),
/// offset: Offset { begin: 6, end: 16 },
/// },
/// Entity {
/// word: String::from("Acme Corp"),
/// score: 0.8847,
/// label: String::from("I-LOC"),
/// offset: Offset { begin: 23, end: 32 },
/// },
/// ]]
/// # ;
/// ```
pub fn predict_full_entities<S>(&self, input: &[S]) -> Vec<Vec<Entity>>
where
S: AsRef<str>,
{
let tokens = self.token_classification_model.predict(input, true, false);
let mut entities: Vec<Vec<Entity>> = Vec::new();
for sequence_tokens in tokens {
entities.push(Self::consolidate_entities(&sequence_tokens));
}
entities
}
fn consolidate_entities(tokens: &[Token]) -> Vec<Entity> {
let mut entities: Vec<Entity> = Vec::new();
let mut entity_builder = EntityBuilder::new();
for (position, token) in tokens.iter().enumerate() {
let tag = token.get_tag();
let label = token.get_label();
if let Some(entity) = entity_builder.handle_current_tag(tag, label, position, tokens) {
entities.push(entity)
}
}
if let Some(entity) = entity_builder.flush_and_reset(tokens.len(), tokens) {
entities.push(entity);
}
entities
}
}
struct EntityBuilder<'a> {
previous_node: Option<(usize, Tag, &'a str)>,
}
impl<'a> EntityBuilder<'a> {
fn new() -> Self {
EntityBuilder {
previous_node: None,
}
}
fn h |
&mut self,
tag: Tag,
label: &'a str,
position: usize,
tokens: &[Token],
) -> Option<Entity> {
match tag {
Tag::Outside => self.flush_and_reset(position, tokens),
Tag::Begin | Tag::Single => {
let entity = self.flush_and_reset(position, tokens);
self.start_new(position, tag, label);
entity
}
Tag::Inside | Tag::End => {
if let Some((_, previous_tag, previous_label)) = self.previous_node {
if (previous_tag == Tag::End)
| (previous_tag == Tag::Single)
| (previous_label != label)
{
let entity = self.flush_and_reset(position, tokens);
self.start_new(position, tag, label);
entity
} else {
None
}
} else {
self.start_new(position, tag, label);
None
}
}
}
}
fn flush_and_reset(&mut self, position: usize, tokens: &[Token]) -> Option<Entity> {
let entity = if let Some((start, _, label)) = self.previous_node {
let entity_tokens = &tokens[start..position];
Some(Entity {
word: entity_tokens
.iter()
.map(|token| token.text.as_str())
.collect::<Vec<&str>>()
.join(" "),
score: entity_tokens.iter().map(|token| token.score).product(),
label: label.to_string(),
offset: Offset {
begin: entity_tokens.first()?.offset?.begin,
end: entity_tokens.last()?.offset?.end,
},
})
} else {
None
};
self.previous_node = None;
entity
}
fn start_new(&mut self, position: usize, tag: Tag, label: &'a str) {
self.previous_node = Some((position, tag, label))
}
}
#[derive(Debug, Clone, Copy, PartialEq)]
enum Tag {
Begin,
Inside,
Outside,
End,
Single,
}
impl Token {
fn get_tag(&self) -> Tag {
match self.label.split('-').collect::<Vec<&str>>()[0] {
"B" => Tag::Begin,
"I" => Tag::Inside,
"O" => Tag::Outside,
"E" => Tag::End,
"S" => Tag::Single,
_ => panic!("Invalid tag encountered for token {:?}", self),
}
}
fn get_label(&self) -> &str {
let split_label = self.label.split('-').collect::<Vec<&str>>();
if split_label.len() > 1 {
split_label[1]
} else {
""
}
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
#[ignore] // no need to run, compilation is enough to verify it is Send
fn test() {
let config = NERConfig::default();
let _: Box<dyn Send> = Box::new(NERModel::new(config));
}
}
| andle_current_tag( | identifier_name |
ner.rs | // Copyright 2019-present, the HuggingFace Inc. team, The Google AI Language Team and Facebook, Inc.
// Copyright (c) 2018 chakki (https://github.com/chakki-works/seqeval/blob/master/seqeval/metrics/sequence_labeling.py)
// Copyright 2019 Guillaume Becquin
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! # Named Entity Recognition pipeline
//! Extracts entities (Person, Location, Organization, Miscellaneous) from text.
//! Pretrained models are available for the following languages:
//! - English
//! - German
//! - Spanish
//! - Dutch
//!
//! The default NER mode is an English BERT cased large model finetuned on CoNNL03, contributed by the [MDZ Digital Library team at the Bavarian State Library](https://github.com/dbmdz)
//! All resources for this model can be downloaded using the Python utility script included in this repository.
//! 1. Set-up a Python virtual environment and install dependencies (in ./requirements.txt)
//! 2. Run the conversion script python /utils/download-dependencies_bert_ner.py.
//! The dependencies will be downloaded to the user's home directory, under ~/rustbert/bert-ner
//!
//! The example below illustrate how to run the model for the default English NER model
//! ```no_run
//! use rust_bert::pipelines::ner::NERModel;
//! # fn main() -> anyhow::Result<()> {
//! let ner_model = NERModel::new(Default::default())?;
//!
//! let input = [
//! "My name is Amy. I live in Paris.",
//! "Paris is a city in France.",
//! ];
//! let output = ner_model.predict(&input);
//! # Ok(())
//! # }
//! ```
//! Output: \
//! ```no_run
//! # use rust_bert::pipelines::ner::Entity;
//! # use rust_tokenizers::Offset;
//! # let output =
//! [
//! [
//! Entity {
//! word: String::from("Amy"),
//! score: 0.9986,
//! label: String::from("I-PER"),
//! offset: Offset { begin: 11, end: 14 },
//! },
//! Entity {
//! word: String::from("Paris"),
//! score: 0.9985,
//! label: String::from("I-LOC"),
//! offset: Offset { begin: 26, end: 31 },
//! },
//! ],
//! [
//! Entity {
//! word: String::from("Paris"),
//! score: 0.9988,
//! label: String::from("I-LOC"),
//! offset: Offset { begin: 0, end: 5 },
//! },
//! Entity {
//! word: String::from("France"),
//! score: 0.9993,
//! label: String::from("I-LOC"),
//! offset: Offset { begin: 19, end: 25 },
//! },
//! ],
//! ]
//! # ;
//! ```
//!
//! To run the pipeline for another language, change the NERModel configuration from its default:
//!
//! ```no_run
//! use rust_bert::pipelines::common::ModelType;
//! use rust_bert::pipelines::ner::NERModel;
//! use rust_bert::pipelines::token_classification::TokenClassificationConfig;
//! use rust_bert::resources::RemoteResource;
//! use rust_bert::roberta::{
//! RobertaConfigResources, RobertaModelResources, RobertaVocabResources,
//! };
//! use tch::Device;
//!
//! # fn main() -> anyhow::Result<()> {
//! use rust_bert::pipelines::common::ModelResource;
//! let ner_config = TokenClassificationConfig {
//! model_type: ModelType::XLMRoberta,
//! model_resource: ModelResource::Torch(Box::new(RemoteResource::from_pretrained(
//! RobertaModelResources::XLM_ROBERTA_NER_DE,
//! ))),
//! config_resource: Box::new(RemoteResource::from_pretrained(
//! RobertaConfigResources::XLM_ROBERTA_NER_DE,
//! )),
//! vocab_resource: Box::new(RemoteResource::from_pretrained(
//! RobertaVocabResources::XLM_ROBERTA_NER_DE,
//! )), | //! device: Device::cuda_if_available(),
//! ..Default::default()
//! };
//!
//! let ner_model = NERModel::new(ner_config)?;
//!
//! // Define input
//! let input = [
//! "Mein Name ist Amélie. Ich lebe in Paris.",
//! "Paris ist eine Stadt in Frankreich.",
//! ];
//! let output = ner_model.predict(&input);
//! # Ok(())
//! # }
//! ```
//! The XLMRoberta models for the languages are defined as follows:
//!
//! | **Language** |**Model name**|
//! :-----:|:----:
//! English| XLM_ROBERTA_NER_EN |
//! German| XLM_ROBERTA_NER_DE |
//! Spanish| XLM_ROBERTA_NER_ES |
//! Dutch| XLM_ROBERTA_NER_NL |
use crate::common::error::RustBertError;
use crate::pipelines::common::TokenizerOption;
use crate::pipelines::token_classification::{
Token, TokenClassificationConfig, TokenClassificationModel,
};
use rust_tokenizers::Offset;
use serde::{Deserialize, Serialize};
#[derive(Debug, Clone, Serialize, Deserialize)]
/// # Entity generated by a `NERModel`
pub struct Entity {
/// String representation of the Entity
pub word: String,
/// Confidence score
pub score: f64,
/// Entity label (e.g. ORG, LOC...)
pub label: String,
/// Token offsets
pub offset: Offset,
}
//type alias for some backward compatibility
type NERConfig = TokenClassificationConfig;
/// # NERModel to extract named entities
pub struct NERModel {
token_classification_model: TokenClassificationModel,
}
impl NERModel {
/// Build a new `NERModel`
///
/// # Arguments
///
/// * `ner_config` - `NERConfig` object containing the resource references (model, vocabulary, configuration) and device placement (CPU/GPU)
///
/// # Example
///
/// ```no_run
/// # fn main() -> anyhow::Result<()> {
/// use rust_bert::pipelines::ner::NERModel;
///
/// let ner_model = NERModel::new(Default::default())?;
/// # Ok(())
/// # }
/// ```
pub fn new(ner_config: NERConfig) -> Result<NERModel, RustBertError> {
let model = TokenClassificationModel::new(ner_config)?;
Ok(NERModel {
token_classification_model: model,
})
}
/// Build a new `NERModel` with a provided tokenizer.
///
/// # Arguments
///
/// * `ner_config` - `NERConfig` object containing the resource references (model, vocabulary, configuration) and device placement (CPU/GPU)
/// * `tokenizer` - `TokenizerOption` tokenizer to use for token classification
///
/// # Example
///
/// ```no_run
/// # fn main() -> anyhow::Result<()> {
/// use rust_bert::pipelines::common::{ModelType, TokenizerOption};
/// use rust_bert::pipelines::ner::NERModel;
/// let tokenizer = TokenizerOption::from_file(
/// ModelType::Bert,
/// "path/to/vocab.txt",
/// None,
/// false,
/// None,
/// None,
/// )?;
/// let ner_model = NERModel::new_with_tokenizer(Default::default(), tokenizer)?;
/// # Ok(())
/// # }
/// ```
pub fn new_with_tokenizer(
ner_config: NERConfig,
tokenizer: TokenizerOption,
) -> Result<NERModel, RustBertError> {
let model = TokenClassificationModel::new_with_tokenizer(ner_config, tokenizer)?;
Ok(NERModel {
token_classification_model: model,
})
}
/// Get a reference to the model tokenizer.
pub fn get_tokenizer(&self) -> &TokenizerOption {
self.token_classification_model.get_tokenizer()
}
/// Get a mutable reference to the model tokenizer.
pub fn get_tokenizer_mut(&mut self) -> &mut TokenizerOption {
self.token_classification_model.get_tokenizer_mut()
}
/// Extract entities from a text
///
/// # Arguments
///
/// * `input` - `&[&str]` Array of texts to extract entities from.
///
/// # Returns
///
/// * `Vec<Vec<Entity>>` containing extracted entities
///
/// # Example
///
/// ```no_run
/// # fn main() -> anyhow::Result<()> {
/// # use rust_bert::pipelines::ner::NERModel;
///
/// let ner_model = NERModel::new(Default::default())?;
/// let input = [
/// "My name is Amy. I live in Paris.",
/// "Paris is a city in France.",
/// ];
/// let output = ner_model.predict(&input);
/// # Ok(())
/// # }
/// ```
pub fn predict<S>(&self, input: &[S]) -> Vec<Vec<Entity>>
where
S: AsRef<str>,
{
self.token_classification_model
.predict(input, true, false)
.into_iter()
.map(|sequence_tokens| {
sequence_tokens
.into_iter()
.filter(|token| token.label != "O")
.map(|token| Entity {
offset: token.offset.unwrap(),
word: token.text,
score: token.score,
label: token.label,
})
.collect::<Vec<Entity>>()
})
.collect::<Vec<Vec<Entity>>>()
}
/// Extract full entities from a text performing entity chunking. Follows the algorithm for entities
/// chunking described in [Erik F. Tjong Kim Sang, Jorn Veenstra, Representing Text Chunks](https://www.aclweb.org/anthology/E99-1023/)
/// The proposed implementation is inspired by the [Python seqeval library](https://github.com/chakki-works/seqeval) (shared under MIT license).
///
/// # Arguments
///
/// * `input` - `&[&str]` Array of texts to extract entities from.
///
/// # Returns
///
/// * `Vec<Entity>` containing consolidated extracted entities
///
/// # Example
///
/// ```no_run
/// # fn main() -> anyhow::Result<()> {
/// # use rust_bert::pipelines::ner::NERModel;
///
/// let ner_model = NERModel::new(Default::default())?;
/// let input = ["Asked John Smith about Acme Corp"];
/// let output = ner_model.predict_full_entities(&input);
/// # Ok(())
/// # }
/// ```
///
/// Outputs:
///
/// Output: \
/// ```no_run
/// # use rust_bert::pipelines::question_answering::Answer;
/// # use rust_bert::pipelines::ner::Entity;
/// # use rust_tokenizers::Offset;
/// # let output =
/// [[
/// Entity {
/// word: String::from("John Smith"),
/// score: 0.9747,
/// label: String::from("PER"),
/// offset: Offset { begin: 6, end: 16 },
/// },
/// Entity {
/// word: String::from("Acme Corp"),
/// score: 0.8847,
/// label: String::from("I-LOC"),
/// offset: Offset { begin: 23, end: 32 },
/// },
/// ]]
/// # ;
/// ```
pub fn predict_full_entities<S>(&self, input: &[S]) -> Vec<Vec<Entity>>
where
S: AsRef<str>,
{
let tokens = self.token_classification_model.predict(input, true, false);
let mut entities: Vec<Vec<Entity>> = Vec::new();
for sequence_tokens in tokens {
entities.push(Self::consolidate_entities(&sequence_tokens));
}
entities
}
fn consolidate_entities(tokens: &[Token]) -> Vec<Entity> {
let mut entities: Vec<Entity> = Vec::new();
let mut entity_builder = EntityBuilder::new();
for (position, token) in tokens.iter().enumerate() {
let tag = token.get_tag();
let label = token.get_label();
if let Some(entity) = entity_builder.handle_current_tag(tag, label, position, tokens) {
entities.push(entity)
}
}
if let Some(entity) = entity_builder.flush_and_reset(tokens.len(), tokens) {
entities.push(entity);
}
entities
}
}
struct EntityBuilder<'a> {
previous_node: Option<(usize, Tag, &'a str)>,
}
impl<'a> EntityBuilder<'a> {
fn new() -> Self {
EntityBuilder {
previous_node: None,
}
}
fn handle_current_tag(
&mut self,
tag: Tag,
label: &'a str,
position: usize,
tokens: &[Token],
) -> Option<Entity> {
match tag {
Tag::Outside => self.flush_and_reset(position, tokens),
Tag::Begin | Tag::Single => {
let entity = self.flush_and_reset(position, tokens);
self.start_new(position, tag, label);
entity
}
Tag::Inside | Tag::End => {
if let Some((_, previous_tag, previous_label)) = self.previous_node {
if (previous_tag == Tag::End)
| (previous_tag == Tag::Single)
| (previous_label != label)
{
let entity = self.flush_and_reset(position, tokens);
self.start_new(position, tag, label);
entity
} else {
None
}
} else {
self.start_new(position, tag, label);
None
}
}
}
}
fn flush_and_reset(&mut self, position: usize, tokens: &[Token]) -> Option<Entity> {
let entity = if let Some((start, _, label)) = self.previous_node {
let entity_tokens = &tokens[start..position];
Some(Entity {
word: entity_tokens
.iter()
.map(|token| token.text.as_str())
.collect::<Vec<&str>>()
.join(" "),
score: entity_tokens.iter().map(|token| token.score).product(),
label: label.to_string(),
offset: Offset {
begin: entity_tokens.first()?.offset?.begin,
end: entity_tokens.last()?.offset?.end,
},
})
} else {
None
};
self.previous_node = None;
entity
}
fn start_new(&mut self, position: usize, tag: Tag, label: &'a str) {
self.previous_node = Some((position, tag, label))
}
}
#[derive(Debug, Clone, Copy, PartialEq)]
enum Tag {
Begin,
Inside,
Outside,
End,
Single,
}
impl Token {
fn get_tag(&self) -> Tag {
match self.label.split('-').collect::<Vec<&str>>()[0] {
"B" => Tag::Begin,
"I" => Tag::Inside,
"O" => Tag::Outside,
"E" => Tag::End,
"S" => Tag::Single,
_ => panic!("Invalid tag encountered for token {:?}", self),
}
}
fn get_label(&self) -> &str {
let split_label = self.label.split('-').collect::<Vec<&str>>();
if split_label.len() > 1 {
split_label[1]
} else {
""
}
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
#[ignore] // no need to run, compilation is enough to verify it is Send
fn test() {
let config = NERConfig::default();
let _: Box<dyn Send> = Box::new(NERModel::new(config));
}
} | //! lower_case: false, | random_line_split |
membership.go | /*
Copyright 2016 The Smudge Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package smudge
import (
"errors"
"math"
"net"
"strconv"
"sync"
"time"
)
// A scalar value used to calculate a variety of limits
const lambda = 2.5
// How many standard deviations beyond the mean PING/ACK response time we
// allow before timing out an ACK.
const timeoutToleranceSigmas = 3.0
const defaultIPv4MulticastAddress = "224.0.0.0"
const defaultIPv6MulticastAddress = "[ff02::1]"
var currentHeartbeat uint32
var pendingAcks = struct {
sync.RWMutex
m map[string]*pendingAck
}{m: make(map[string]*pendingAck)}
var thisHostAddress string
var thisHost *Node
var ipLen = net.IPv4len
// This flag is set whenever a known node is added or removed.
var knownNodesModifiedFlag = false
var pingdata = newPingData(GetPingHistoryFrontload(), 50)
/******************************************************************************
* Exported functions (for public consumption)
*****************************************************************************/
// Begin starts the server by opening a UDP port and beginning the heartbeat.
// Note that this is a blocking function, so act appropriately.
func Begin() {
// Add this host.
logfInfo("Using listen IP: %s", listenIP)
// Use IPv6 address length if the listen IP is not an IPv4 address
if GetListenIP().To4() == nil {
ipLen = net.IPv6len
}
me := Node{
ip: GetListenIP(),
port: uint16(GetListenPort()),
timestamp: GetNowInMillis(),
pingMillis: PingNoData,
}
thisHostAddress = me.Address()
thisHost = &me
logInfo("My host address:", thisHostAddress)
// Add this node's status. Don't update any other node's statuses: they'll
// report those back to us.
updateNodeStatus(thisHost, StatusAlive, 0, thisHost)
AddNode(thisHost)
go listenUDP(GetListenPort())
// Add initial hosts as specified by the SMUDGE_INITIAL_HOSTS property
for _, address := range GetInitialHosts() {
n, err := CreateNodeByAddress(address)
if err != nil {
logfError("Could not create node %s: %v", address, err)
} else {
AddNode(n)
}
}
if GetMulticastEnabled() {
go listenUDPMulticast(GetMulticastPort())
go multicastAnnounce(GetMulticastAddress())
}
go startTimeoutCheckLoop()
// Loop over a randomized list of all known nodes (except for this host
// node), pinging one at a time. If the knownNodesModifiedFlag is set to
// true by AddNode() or RemoveNode(), the we get a fresh list and start
// again.
for {
var randomAllNodes = knownNodes.getRandomNodes(0, thisHost)
var pingCounter int
for _, node := range randomAllNodes {
// Exponential backoff of dead nodes, until such time as they are removed.
if node.status == StatusDead {
var dnc *deadNodeCounter
var ok bool
deadNodeRetries.Lock()
if dnc, ok = deadNodeRetries.m[node.Address()]; !ok {
dnc = &deadNodeCounter{retry: 1, retryCountdown: 2}
deadNodeRetries.m[node.Address()] = dnc
}
deadNodeRetries.Unlock()
dnc.retryCountdown--
if dnc.retryCountdown <= 0 {
dnc.retry++
dnc.retryCountdown = int(math.Pow(2.0, float64(dnc.retry)))
if dnc.retry > maxDeadNodeRetries |
} else {
continue
}
}
currentHeartbeat++
logfTrace("%d - hosts=%d (announce=%d forward=%d)",
currentHeartbeat,
len(randomAllNodes),
emitCount(),
pingRequestCount())
PingNode(node)
pingCounter++
time.Sleep(time.Millisecond * time.Duration(GetHeartbeatMillis()))
if knownNodesModifiedFlag {
knownNodesModifiedFlag = false
break
}
}
if pingCounter == 0 {
logDebug("No nodes to ping. So lonely. :(")
time.Sleep(time.Millisecond * time.Duration(GetHeartbeatMillis()))
}
}
}
// PingNode can be used to explicitly ping a node. Calls the low-level
// doPingNode(), and outputs a message (and returns an error) if it fails.
func PingNode(node *Node) error {
err := transmitVerbPingUDP(node, currentHeartbeat)
if err != nil {
logInfo("Failure to ping", node, "->", err)
}
return err
}
/******************************************************************************
* Private functions (for internal use only)
*****************************************************************************/
// Multicast announcements are constructed as:
// Byte 0 - 1 byte character byte length N
// Bytes 1 to N - Cluster name bytes
// Bytes N+1... - A message (without members)
func decodeMulticastAnnounceBytes(bytes []byte) (string, []byte, error) {
nameBytesLen := int(bytes[0])
if nameBytesLen+1 > len(bytes) {
return "", nil, errors.New("Invalid multicast message received")
}
nameBytes := bytes[1 : nameBytesLen+1]
name := string(nameBytes)
msgBytes := bytes[nameBytesLen+1 : len(bytes)]
return name, msgBytes, nil
}
func doForwardOnTimeout(pack *pendingAck) {
filteredNodes := getTargetNodes(pingRequestCount(), thisHost, pack.node)
if len(filteredNodes) == 0 {
logDebug(thisHost.Address(), "Cannot forward ping request: no more nodes")
updateNodeStatus(pack.node, StatusDead, currentHeartbeat, thisHost)
} else {
for i, n := range filteredNodes {
logfDebug("(%d/%d) Requesting indirect ping of %s via %s",
i+1,
len(filteredNodes),
pack.node.Address(),
n.Address())
transmitVerbForwardUDP(n, pack.node, currentHeartbeat)
}
}
}
// The number of times any node's new status should be emitted after changes.
// Currently set to (lambda * log(node count)).
func emitCount() int {
logn := math.Log(float64(knownNodes.length()))
mult := (lambda * logn) + 0.5
return int(mult)
}
// Multicast announcements are constructed as:
// Byte 0 - 1 byte character byte length N
// Bytes 1 to N - Cluster name bytes
// Bytes N+1... - A message (without members)
func encodeMulticastAnnounceBytes() []byte {
nameBytes := []byte(GetClusterName())
nameBytesLen := len(nameBytes)
if nameBytesLen > 0xFF {
panic("Cluster name too long: " +
strconv.FormatInt(int64(nameBytesLen), 10) +
" bytes (max 254)")
}
msg := newMessage(verbPing, thisHost, currentHeartbeat)
msgBytes := msg.encode()
msgBytesLen := len(msgBytes)
totalByteCount := 1 + nameBytesLen + msgBytesLen
bytes := make([]byte, totalByteCount, totalByteCount)
// Add name length byte
bytes[0] = byte(nameBytesLen)
// Copy the name bytes
copy(bytes[1:nameBytesLen+1], nameBytes)
// Copy the message proper
copy(bytes[nameBytesLen+1:totalByteCount], msgBytes)
return bytes
}
func guessMulticastAddress() string {
if multicastAddress == "" {
if ipLen == net.IPv6len {
multicastAddress = defaultIPv6MulticastAddress
} else if ipLen == net.IPv4len {
multicastAddress = defaultIPv4MulticastAddress
} else {
logFatal("Failed to determine IPv4/IPv6")
}
}
return multicastAddress
}
// getListenInterface gets the network interface for the listen IP
func getListenInterface() (*net.Interface, error) {
ifaces, err := net.Interfaces()
if err == nil {
for _, iface := range ifaces {
addrs, err := iface.Addrs()
if err != nil {
logfWarn("Can not get addresses of interface %s", iface.Name)
continue
}
for _, addr := range addrs {
ip, _, err := net.ParseCIDR(addr.String())
if err != nil {
continue
}
if ip.String() == GetListenIP().String() {
logfInfo("Found interface with listen IP: %s", iface.Name)
return &iface, nil
}
}
}
}
return nil, errors.New("Could not determine the interface of the listen IP address")
}
// Returns a random slice of valid ping/forward request targets; i.e., not
// this node, and not dead.
func getTargetNodes(count int, exclude ...*Node) []*Node {
randomNodes := knownNodes.getRandomNodes(0, exclude...)
filteredNodes := make([]*Node, 0, count)
for _, n := range randomNodes {
if len(filteredNodes) >= count {
break
}
if n.status == StatusDead {
continue
}
filteredNodes = append(filteredNodes, n)
}
return filteredNodes
}
func listenUDP(port int) error {
listenAddress, err := net.ResolveUDPAddr("udp", ":"+strconv.FormatInt(int64(port), 10))
if err != nil {
return err
}
/* Now listen at selected port */
c, err := net.ListenUDP("udp", listenAddress)
if err != nil {
return err
}
defer c.Close()
for {
buf := make([]byte, 2048) // big enough to fit 1280 IPv6 UDP message
n, addr, err := c.ReadFromUDP(buf)
if err != nil {
logError("UDP read error: ", err)
}
go func(addr *net.UDPAddr, msg []byte) {
err = receiveMessageUDP(addr, buf[0:n])
if err != nil {
logError(err)
}
}(addr, buf[0:n])
}
}
func listenUDPMulticast(port int) error {
addr := GetMulticastAddress()
if addr == "" {
addr = guessMulticastAddress()
}
listenAddress, err := net.ResolveUDPAddr("udp", addr+":"+strconv.FormatInt(int64(port), 10))
if err != nil {
return err
}
/* Now listen at selected port */
iface, err := getListenInterface()
if err != nil {
return err
}
c, err := net.ListenMulticastUDP("udp", iface, listenAddress)
if err != nil {
return err
}
defer c.Close()
for {
buf := make([]byte, 2048) // big enough to fit 1280 IPv6 UDP message
n, addr, err := c.ReadFromUDP(buf)
if err != nil {
logError("UDP read error:", err)
}
go func(addr *net.UDPAddr, bytes []byte) {
name, msgBytes, err := decodeMulticastAnnounceBytes(bytes)
if err != nil {
logDebug("Ignoring unexpected multicast message.")
} else {
if GetClusterName() == name {
msg, err := decodeMessage(addr.IP, msgBytes)
if err == nil {
logfTrace("Got multicast %v from %v code=%d",
msg.verb,
msg.sender.Address(),
msg.senderHeartbeat)
// Update statuses of the sender.
updateStatusesFromMessage(msg)
} else {
logError(err)
}
}
}
}(addr, buf[0:n])
}
}
// multicastAnnounce is called when the server first starts to broadcast its
// presence to all listening servers within the specified subnet and continues
// to broadcast its presence every multicastAnnounceIntervalSeconds in case
// this value is larger than zero.
func multicastAnnounce(addr string) error {
if addr == "" {
addr = guessMulticastAddress()
}
fullAddr := addr + ":" + strconv.FormatInt(int64(GetMulticastPort()), 10)
logInfo("Announcing presence on", fullAddr)
address, err := net.ResolveUDPAddr("udp", fullAddr)
if err != nil {
logError(err)
return err
}
laddr := &net.UDPAddr{
IP: GetListenIP(),
Port: 0,
}
for {
c, err := net.DialUDP("udp", laddr, address)
if err != nil {
logError(err)
return err
}
// Compose and send the multicast announcement
msgBytes := encodeMulticastAnnounceBytes()
_, err = c.Write(msgBytes)
if err != nil {
logError(err)
return err
}
logfTrace("Sent announcement multicast from %v to %v", laddr, fullAddr)
if GetMulticastAnnounceIntervalSeconds() > 0 {
time.Sleep(time.Second * time.Duration(GetMulticastAnnounceIntervalSeconds()))
} else {
return nil
}
}
}
// The number of nodes to send a PINGREQ to when a PING times out.
// Currently set to (lambda * log(node count)).
func pingRequestCount() int {
logn := math.Log(float64(knownNodes.length()))
mult := (lambda * logn) + 0.5
return int(mult)
}
func receiveMessageUDP(addr *net.UDPAddr, msgBytes []byte) error {
msg, err := decodeMessage(addr.IP, msgBytes)
if err != nil {
return err
}
logfTrace("Got %v from %v code=%d",
msg.verb,
msg.sender.Address(),
msg.senderHeartbeat)
// Synchronize heartbeats
if msg.senderHeartbeat > 0 && msg.senderHeartbeat-1 > currentHeartbeat {
logfTrace("Heartbeat advanced from %d to %d",
currentHeartbeat,
msg.senderHeartbeat-1)
currentHeartbeat = msg.senderHeartbeat - 1
}
// Update statuses of the sender and any members the message includes.
updateStatusesFromMessage(msg)
// If there are broadcast bytes in the message, handle them here.
receiveBroadcast(msg.broadcast)
// Handle the verb.
switch msg.verb {
case verbPing:
err = receiveVerbPingUDP(msg)
case verbAck:
err = receiveVerbAckUDP(msg)
case verbPingRequest:
err = receiveVerbForwardUDP(msg)
case verbNonForwardingPing:
err = receiveVerbNonForwardPingUDP(msg)
}
if err != nil {
return err
}
return nil
}
func receiveVerbAckUDP(msg message) error {
key := msg.sender.Address() + ":" + strconv.FormatInt(int64(msg.senderHeartbeat), 10)
pendingAcks.RLock()
_, ok := pendingAcks.m[key]
pendingAcks.RUnlock()
if ok {
msg.sender.Touch()
pendingAcks.Lock()
if pack, ok := pendingAcks.m[key]; ok {
// If this is a response to a requested ping, respond to the
// callback node
if pack.callback != nil {
go transmitVerbAckUDP(pack.callback, pack.callbackCode)
} else {
// Note the ping response time.
notePingResponseTime(pack)
}
}
delete(pendingAcks.m, key)
pendingAcks.Unlock()
}
return nil
}
func notePingResponseTime(pack *pendingAck) {
// Note the elapsed time
elapsedMillis := pack.elapsed()
pack.node.pingMillis = int(elapsedMillis)
// For the purposes of timeout tolerance, we treat all pings less than
// the ping lower bound as that lower bound.
minMillis := uint32(GetMinPingTime())
if elapsedMillis < minMillis {
elapsedMillis = minMillis
}
pingdata.add(elapsedMillis)
mean, stddev := pingdata.data()
sigmas := pingdata.nSigma(timeoutToleranceSigmas)
logfTrace("Got ACK in %dms (mean=%.02f stddev=%.02f sigmas=%.02f)",
elapsedMillis,
mean,
stddev,
sigmas)
}
func receiveVerbForwardUDP(msg message) error {
// We don't forward to a node that we don't know.
if len(msg.members) >= 0 &&
msg.members[0].status == StatusForwardTo {
member := msg.members[0]
node := member.node
code := member.heartbeat
key := node.Address() + ":" + strconv.FormatInt(int64(code), 10)
pack := pendingAck{
node: node,
startTime: GetNowInMillis(),
callback: msg.sender,
callbackCode: code,
packType: packNFP}
pendingAcks.Lock()
pendingAcks.m[key] = &pack
pendingAcks.Unlock()
return transmitVerbGenericUDP(node, nil, verbNonForwardingPing, code)
}
return nil
}
func receiveVerbPingUDP(msg message) error {
return transmitVerbAckUDP(msg.sender, msg.senderHeartbeat)
}
func receiveVerbNonForwardPingUDP(msg message) error {
return transmitVerbAckUDP(msg.sender, msg.senderHeartbeat)
}
func startTimeoutCheckLoop() {
for {
pendingAcks.Lock()
for k, pack := range pendingAcks.m {
elapsed := pack.elapsed()
timeoutMillis := uint32(pingdata.nSigma(timeoutToleranceSigmas))
// Ping requests are expected to take quite a bit longer.
// Just call it 2x for now.
if pack.packType == packPingReq {
timeoutMillis *= 2
}
// This pending ACK has taken longer than expected. Mark it as
// timed out.
if elapsed > timeoutMillis {
switch pack.packType {
case packPing:
go doForwardOnTimeout(pack)
case packPingReq:
logDebug(k, "timed out after", timeoutMillis, "milliseconds (dropped PINGREQ)")
if knownNodes.contains(pack.callback) {
switch pack.callback.Status() {
case StatusDead:
break
case StatusSuspected:
updateNodeStatus(pack.callback, StatusDead, currentHeartbeat, thisHost)
pack.callback.pingMillis = PingTimedOut
default:
updateNodeStatus(pack.callback, StatusSuspected, currentHeartbeat, thisHost)
pack.callback.pingMillis = PingTimedOut
}
}
case packNFP:
logDebug(k, "timed out after", timeoutMillis, "milliseconds (dropped NFP)")
if knownNodes.contains(pack.node) {
switch pack.node.Status() {
case StatusDead:
break
case StatusSuspected:
updateNodeStatus(pack.node, StatusDead, currentHeartbeat, thisHost)
pack.callback.pingMillis = PingTimedOut
default:
updateNodeStatus(pack.node, StatusSuspected, currentHeartbeat, thisHost)
pack.callback.pingMillis = PingTimedOut
}
}
}
delete(pendingAcks.m, k)
}
}
pendingAcks.Unlock()
time.Sleep(time.Millisecond * 100)
}
}
func transmitVerbGenericUDP(node *Node, forwardTo *Node, verb messageVerb, code uint32) error {
// Transmit the ACK
remoteAddr, err := net.ResolveUDPAddr("udp", node.Address())
c, err := net.DialUDP("udp", nil, remoteAddr)
if err != nil {
return err
}
defer c.Close()
msg := newMessage(verb, thisHost, code)
if forwardTo != nil {
msg.addMember(forwardTo, StatusForwardTo, code, forwardTo.statusSource)
}
// Add members for update.
nodes := getRandomUpdatedNodes(pingRequestCount(), node, thisHost)
// No updates to distribute? Send out a few updates on other known nodes.
if len(nodes) == 0 {
nodes = knownNodes.getRandomNodes(pingRequestCount(), node, thisHost)
}
for _, n := range nodes {
err = msg.addMember(n, n.status, n.heartbeat, n.statusSource)
if err != nil {
return err
}
n.emitCounter--
}
// Emit counters for broadcasts can be less than 0. We transmit positive
// numbers, and decrement all the others. At some value < 0, the broadcast
// is removed from the map all together.
broadcast := getBroadcastToEmit()
if broadcast != nil {
if broadcast.emitCounter > 0 {
msg.addBroadcast(broadcast)
}
broadcast.emitCounter--
}
_, err = c.Write(msg.encode())
if err != nil {
return err
}
// Decrement the update counters on those nodes
for _, m := range msg.members {
m.node.emitCounter--
}
logfTrace("Sent %v to %v", verb, node.Address())
return nil
}
func transmitVerbForwardUDP(node *Node, downstream *Node, code uint32) error {
key := node.Address() + ":" + strconv.FormatInt(int64(code), 10)
pack := pendingAck{
node: node,
startTime: GetNowInMillis(),
callback: downstream,
packType: packPingReq}
pendingAcks.Lock()
pendingAcks.m[key] = &pack
pendingAcks.Unlock()
return transmitVerbGenericUDP(node, downstream, verbPingRequest, code)
}
func transmitVerbAckUDP(node *Node, code uint32) error {
return transmitVerbGenericUDP(node, nil, verbAck, code)
}
func transmitVerbPingUDP(node *Node, code uint32) error {
key := node.Address() + ":" + strconv.FormatInt(int64(code), 10)
pack := pendingAck{
node: node,
startTime: GetNowInMillis(),
packType: packPing}
pendingAcks.Lock()
pendingAcks.m[key] = &pack
pendingAcks.Unlock()
return transmitVerbGenericUDP(node, nil, verbPing, code)
}
func updateStatusesFromMessage(msg message) {
for _, m := range msg.members {
// If the heartbeat in the message is less then the heartbeat
// associated with the last known status, then we conclude that the
// message is old and we drop it.
if m.heartbeat < m.node.heartbeat {
logfDebug("Message is old (%d vs %d): dropping",
m.node.heartbeat, m.heartbeat)
continue
}
switch m.status {
case StatusForwardTo:
// The FORWARD_TO status isn't useful here, so we ignore those.
continue
case StatusDead:
// Don't tell ME I'm dead.
if m.node.Address() != thisHost.Address() {
updateNodeStatus(m.node, m.status, m.heartbeat, m.source)
AddNode(m.node)
}
default:
updateNodeStatus(m.node, m.status, m.heartbeat, m.source)
AddNode(m.node)
}
}
// Obviously, we know the sender is alive. Report it as such.
if msg.senderHeartbeat > msg.sender.heartbeat {
updateNodeStatus(msg.sender, StatusAlive, msg.senderHeartbeat, thisHost)
}
// Finally, if we don't know the sender we add it to the known hosts map.
if !knownNodes.contains(msg.sender) {
AddNode(msg.sender)
}
}
// pendingAckType represents an expectation of a response to a previously
// emitted PING, PINGREQ, or NFP.
type pendingAck struct {
startTime uint32
node *Node
callback *Node
callbackCode uint32
packType pendingAckType
}
func (a *pendingAck) elapsed() uint32 {
return GetNowInMillis() - a.startTime
}
// pendingAckType represents the type of PING that a pendingAckType is waiting
// for a response for: PING, PINGREQ, or NFP.
type pendingAckType byte
const (
packPing pendingAckType = iota
packPingReq
packNFP
)
func (p pendingAckType) String() string {
switch p {
case packPing:
return "PING"
case packPingReq:
return "PINGREQ"
case packNFP:
return "NFP"
default:
return "UNDEFINED"
}
}
| {
logDebug("Forgetting dead node", node.Address())
deadNodeRetries.Lock()
delete(deadNodeRetries.m, node.Address())
deadNodeRetries.Unlock()
RemoveNode(node)
continue
} | conditional_block |
membership.go | /*
Copyright 2016 The Smudge Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package smudge
import (
"errors"
"math"
"net"
"strconv"
"sync"
"time"
)
// A scalar value used to calculate a variety of limits
const lambda = 2.5
// How many standard deviations beyond the mean PING/ACK response time we
// allow before timing out an ACK.
const timeoutToleranceSigmas = 3.0
const defaultIPv4MulticastAddress = "224.0.0.0"
const defaultIPv6MulticastAddress = "[ff02::1]"
var currentHeartbeat uint32
var pendingAcks = struct {
sync.RWMutex
m map[string]*pendingAck
}{m: make(map[string]*pendingAck)}
var thisHostAddress string
var thisHost *Node
var ipLen = net.IPv4len
// This flag is set whenever a known node is added or removed.
var knownNodesModifiedFlag = false
var pingdata = newPingData(GetPingHistoryFrontload(), 50)
/******************************************************************************
* Exported functions (for public consumption)
*****************************************************************************/
// Begin starts the server by opening a UDP port and beginning the heartbeat.
// Note that this is a blocking function, so act appropriately.
func Begin() {
// Add this host.
logfInfo("Using listen IP: %s", listenIP)
// Use IPv6 address length if the listen IP is not an IPv4 address
if GetListenIP().To4() == nil {
ipLen = net.IPv6len
}
me := Node{
ip: GetListenIP(),
port: uint16(GetListenPort()),
timestamp: GetNowInMillis(),
pingMillis: PingNoData,
}
thisHostAddress = me.Address()
thisHost = &me
logInfo("My host address:", thisHostAddress)
// Add this node's status. Don't update any other node's statuses: they'll
// report those back to us.
updateNodeStatus(thisHost, StatusAlive, 0, thisHost)
AddNode(thisHost)
go listenUDP(GetListenPort())
// Add initial hosts as specified by the SMUDGE_INITIAL_HOSTS property
for _, address := range GetInitialHosts() {
n, err := CreateNodeByAddress(address)
if err != nil {
logfError("Could not create node %s: %v", address, err)
} else {
AddNode(n)
}
}
if GetMulticastEnabled() {
go listenUDPMulticast(GetMulticastPort())
go multicastAnnounce(GetMulticastAddress())
}
go startTimeoutCheckLoop()
// Loop over a randomized list of all known nodes (except for this host
// node), pinging one at a time. If the knownNodesModifiedFlag is set to
// true by AddNode() or RemoveNode(), the we get a fresh list and start
// again.
for {
var randomAllNodes = knownNodes.getRandomNodes(0, thisHost)
var pingCounter int
for _, node := range randomAllNodes {
// Exponential backoff of dead nodes, until such time as they are removed.
if node.status == StatusDead {
var dnc *deadNodeCounter
var ok bool
deadNodeRetries.Lock()
if dnc, ok = deadNodeRetries.m[node.Address()]; !ok {
dnc = &deadNodeCounter{retry: 1, retryCountdown: 2}
deadNodeRetries.m[node.Address()] = dnc
}
deadNodeRetries.Unlock()
dnc.retryCountdown--
if dnc.retryCountdown <= 0 {
dnc.retry++
dnc.retryCountdown = int(math.Pow(2.0, float64(dnc.retry)))
if dnc.retry > maxDeadNodeRetries {
logDebug("Forgetting dead node", node.Address())
deadNodeRetries.Lock()
delete(deadNodeRetries.m, node.Address())
deadNodeRetries.Unlock()
RemoveNode(node)
continue
}
} else {
continue
}
}
currentHeartbeat++
logfTrace("%d - hosts=%d (announce=%d forward=%d)",
currentHeartbeat,
len(randomAllNodes),
emitCount(),
pingRequestCount())
PingNode(node)
pingCounter++
time.Sleep(time.Millisecond * time.Duration(GetHeartbeatMillis()))
if knownNodesModifiedFlag {
knownNodesModifiedFlag = false
break
}
}
if pingCounter == 0 {
logDebug("No nodes to ping. So lonely. :(")
time.Sleep(time.Millisecond * time.Duration(GetHeartbeatMillis()))
}
}
}
// PingNode can be used to explicitly ping a node. Calls the low-level
// doPingNode(), and outputs a message (and returns an error) if it fails.
func PingNode(node *Node) error {
err := transmitVerbPingUDP(node, currentHeartbeat)
if err != nil {
logInfo("Failure to ping", node, "->", err)
}
return err
}
/******************************************************************************
* Private functions (for internal use only)
*****************************************************************************/
// Multicast announcements are constructed as:
// Byte 0 - 1 byte character byte length N
// Bytes 1 to N - Cluster name bytes
// Bytes N+1... - A message (without members)
func decodeMulticastAnnounceBytes(bytes []byte) (string, []byte, error) {
nameBytesLen := int(bytes[0])
if nameBytesLen+1 > len(bytes) {
return "", nil, errors.New("Invalid multicast message received")
}
nameBytes := bytes[1 : nameBytesLen+1]
name := string(nameBytes)
msgBytes := bytes[nameBytesLen+1 : len(bytes)]
return name, msgBytes, nil
}
func doForwardOnTimeout(pack *pendingAck) {
filteredNodes := getTargetNodes(pingRequestCount(), thisHost, pack.node)
if len(filteredNodes) == 0 {
logDebug(thisHost.Address(), "Cannot forward ping request: no more nodes")
updateNodeStatus(pack.node, StatusDead, currentHeartbeat, thisHost)
} else {
for i, n := range filteredNodes {
logfDebug("(%d/%d) Requesting indirect ping of %s via %s",
i+1,
len(filteredNodes),
pack.node.Address(),
n.Address())
transmitVerbForwardUDP(n, pack.node, currentHeartbeat)
}
}
}
// The number of times any node's new status should be emitted after changes.
// Currently set to (lambda * log(node count)).
func emitCount() int {
logn := math.Log(float64(knownNodes.length()))
mult := (lambda * logn) + 0.5
return int(mult)
}
// Multicast announcements are constructed as:
// Byte 0 - 1 byte character byte length N
// Bytes 1 to N - Cluster name bytes
// Bytes N+1... - A message (without members)
func encodeMulticastAnnounceBytes() []byte {
nameBytes := []byte(GetClusterName())
nameBytesLen := len(nameBytes)
if nameBytesLen > 0xFF {
panic("Cluster name too long: " +
strconv.FormatInt(int64(nameBytesLen), 10) +
" bytes (max 254)")
}
msg := newMessage(verbPing, thisHost, currentHeartbeat)
msgBytes := msg.encode()
msgBytesLen := len(msgBytes)
totalByteCount := 1 + nameBytesLen + msgBytesLen
bytes := make([]byte, totalByteCount, totalByteCount)
// Add name length byte
bytes[0] = byte(nameBytesLen)
// Copy the name bytes
copy(bytes[1:nameBytesLen+1], nameBytes)
// Copy the message proper
copy(bytes[nameBytesLen+1:totalByteCount], msgBytes)
return bytes
}
func guessMulticastAddress() string {
if multicastAddress == "" {
if ipLen == net.IPv6len {
multicastAddress = defaultIPv6MulticastAddress
} else if ipLen == net.IPv4len {
multicastAddress = defaultIPv4MulticastAddress
} else {
logFatal("Failed to determine IPv4/IPv6")
}
}
return multicastAddress
}
// getListenInterface gets the network interface for the listen IP
func getListenInterface() (*net.Interface, error) {
ifaces, err := net.Interfaces()
if err == nil {
for _, iface := range ifaces {
addrs, err := iface.Addrs()
if err != nil {
logfWarn("Can not get addresses of interface %s", iface.Name)
continue
}
for _, addr := range addrs {
ip, _, err := net.ParseCIDR(addr.String())
if err != nil {
continue
}
if ip.String() == GetListenIP().String() {
logfInfo("Found interface with listen IP: %s", iface.Name)
return &iface, nil
}
}
}
}
return nil, errors.New("Could not determine the interface of the listen IP address")
}
// Returns a random slice of valid ping/forward request targets; i.e., not
// this node, and not dead.
func getTargetNodes(count int, exclude ...*Node) []*Node {
randomNodes := knownNodes.getRandomNodes(0, exclude...)
filteredNodes := make([]*Node, 0, count)
for _, n := range randomNodes {
if len(filteredNodes) >= count {
break
}
if n.status == StatusDead {
continue
}
filteredNodes = append(filteredNodes, n)
}
return filteredNodes
}
func listenUDP(port int) error {
listenAddress, err := net.ResolveUDPAddr("udp", ":"+strconv.FormatInt(int64(port), 10))
if err != nil {
return err
}
/* Now listen at selected port */
c, err := net.ListenUDP("udp", listenAddress)
if err != nil {
return err
}
defer c.Close()
for {
buf := make([]byte, 2048) // big enough to fit 1280 IPv6 UDP message
n, addr, err := c.ReadFromUDP(buf)
if err != nil {
logError("UDP read error: ", err)
}
go func(addr *net.UDPAddr, msg []byte) {
err = receiveMessageUDP(addr, buf[0:n])
if err != nil {
logError(err)
}
}(addr, buf[0:n])
}
}
func listenUDPMulticast(port int) error {
addr := GetMulticastAddress()
if addr == "" {
addr = guessMulticastAddress()
}
listenAddress, err := net.ResolveUDPAddr("udp", addr+":"+strconv.FormatInt(int64(port), 10))
if err != nil {
return err
}
/* Now listen at selected port */
iface, err := getListenInterface()
if err != nil {
return err
}
c, err := net.ListenMulticastUDP("udp", iface, listenAddress)
if err != nil {
return err
}
defer c.Close()
for {
buf := make([]byte, 2048) // big enough to fit 1280 IPv6 UDP message
n, addr, err := c.ReadFromUDP(buf)
if err != nil {
logError("UDP read error:", err)
}
go func(addr *net.UDPAddr, bytes []byte) {
name, msgBytes, err := decodeMulticastAnnounceBytes(bytes)
if err != nil {
logDebug("Ignoring unexpected multicast message.")
} else {
if GetClusterName() == name {
msg, err := decodeMessage(addr.IP, msgBytes)
if err == nil {
logfTrace("Got multicast %v from %v code=%d",
msg.verb,
msg.sender.Address(),
msg.senderHeartbeat)
// Update statuses of the sender.
updateStatusesFromMessage(msg)
} else {
logError(err)
}
}
}
}(addr, buf[0:n])
}
}
// multicastAnnounce is called when the server first starts to broadcast its
// presence to all listening servers within the specified subnet and continues
// to broadcast its presence every multicastAnnounceIntervalSeconds in case
// this value is larger than zero.
func multicastAnnounce(addr string) error {
if addr == "" {
addr = guessMulticastAddress()
}
fullAddr := addr + ":" + strconv.FormatInt(int64(GetMulticastPort()), 10)
logInfo("Announcing presence on", fullAddr)
address, err := net.ResolveUDPAddr("udp", fullAddr)
if err != nil {
logError(err)
return err
}
laddr := &net.UDPAddr{
IP: GetListenIP(),
Port: 0,
}
for {
c, err := net.DialUDP("udp", laddr, address)
if err != nil {
logError(err)
return err
}
// Compose and send the multicast announcement
msgBytes := encodeMulticastAnnounceBytes()
_, err = c.Write(msgBytes)
if err != nil {
logError(err)
return err
}
logfTrace("Sent announcement multicast from %v to %v", laddr, fullAddr)
if GetMulticastAnnounceIntervalSeconds() > 0 {
time.Sleep(time.Second * time.Duration(GetMulticastAnnounceIntervalSeconds()))
} else {
return nil
}
}
}
// The number of nodes to send a PINGREQ to when a PING times out.
// Currently set to (lambda * log(node count)).
func pingRequestCount() int {
logn := math.Log(float64(knownNodes.length()))
mult := (lambda * logn) + 0.5
return int(mult)
}
func receiveMessageUDP(addr *net.UDPAddr, msgBytes []byte) error {
msg, err := decodeMessage(addr.IP, msgBytes)
if err != nil {
return err
}
logfTrace("Got %v from %v code=%d",
msg.verb,
msg.sender.Address(),
msg.senderHeartbeat)
// Synchronize heartbeats
if msg.senderHeartbeat > 0 && msg.senderHeartbeat-1 > currentHeartbeat {
logfTrace("Heartbeat advanced from %d to %d",
currentHeartbeat,
msg.senderHeartbeat-1)
currentHeartbeat = msg.senderHeartbeat - 1
}
// Update statuses of the sender and any members the message includes.
updateStatusesFromMessage(msg)
// If there are broadcast bytes in the message, handle them here.
receiveBroadcast(msg.broadcast)
// Handle the verb.
switch msg.verb {
case verbPing:
err = receiveVerbPingUDP(msg)
case verbAck:
err = receiveVerbAckUDP(msg)
case verbPingRequest:
err = receiveVerbForwardUDP(msg)
case verbNonForwardingPing:
err = receiveVerbNonForwardPingUDP(msg)
}
if err != nil {
return err
}
return nil
}
func receiveVerbAckUDP(msg message) error {
key := msg.sender.Address() + ":" + strconv.FormatInt(int64(msg.senderHeartbeat), 10)
pendingAcks.RLock()
_, ok := pendingAcks.m[key]
pendingAcks.RUnlock()
if ok {
msg.sender.Touch()
pendingAcks.Lock()
if pack, ok := pendingAcks.m[key]; ok {
// If this is a response to a requested ping, respond to the
// callback node
if pack.callback != nil {
go transmitVerbAckUDP(pack.callback, pack.callbackCode)
} else {
// Note the ping response time.
notePingResponseTime(pack)
}
}
delete(pendingAcks.m, key)
pendingAcks.Unlock()
}
return nil
}
func notePingResponseTime(pack *pendingAck) {
// Note the elapsed time
elapsedMillis := pack.elapsed()
pack.node.pingMillis = int(elapsedMillis)
// For the purposes of timeout tolerance, we treat all pings less than
// the ping lower bound as that lower bound.
minMillis := uint32(GetMinPingTime())
if elapsedMillis < minMillis {
elapsedMillis = minMillis
}
pingdata.add(elapsedMillis)
mean, stddev := pingdata.data()
sigmas := pingdata.nSigma(timeoutToleranceSigmas)
logfTrace("Got ACK in %dms (mean=%.02f stddev=%.02f sigmas=%.02f)",
elapsedMillis,
mean,
stddev,
sigmas)
}
func receiveVerbForwardUDP(msg message) error {
// We don't forward to a node that we don't know.
if len(msg.members) >= 0 &&
msg.members[0].status == StatusForwardTo {
member := msg.members[0]
node := member.node
code := member.heartbeat
key := node.Address() + ":" + strconv.FormatInt(int64(code), 10)
pack := pendingAck{
node: node,
startTime: GetNowInMillis(),
callback: msg.sender,
callbackCode: code,
packType: packNFP}
pendingAcks.Lock()
pendingAcks.m[key] = &pack
pendingAcks.Unlock()
return transmitVerbGenericUDP(node, nil, verbNonForwardingPing, code)
}
return nil
}
func receiveVerbPingUDP(msg message) error {
return transmitVerbAckUDP(msg.sender, msg.senderHeartbeat)
}
func receiveVerbNonForwardPingUDP(msg message) error {
return transmitVerbAckUDP(msg.sender, msg.senderHeartbeat)
}
func startTimeoutCheckLoop() {
for {
pendingAcks.Lock()
for k, pack := range pendingAcks.m {
elapsed := pack.elapsed()
timeoutMillis := uint32(pingdata.nSigma(timeoutToleranceSigmas))
// Ping requests are expected to take quite a bit longer.
// Just call it 2x for now.
if pack.packType == packPingReq {
timeoutMillis *= 2
}
// This pending ACK has taken longer than expected. Mark it as
// timed out.
if elapsed > timeoutMillis {
switch pack.packType {
case packPing:
go doForwardOnTimeout(pack)
case packPingReq:
logDebug(k, "timed out after", timeoutMillis, "milliseconds (dropped PINGREQ)")
if knownNodes.contains(pack.callback) {
switch pack.callback.Status() {
case StatusDead:
break
case StatusSuspected:
updateNodeStatus(pack.callback, StatusDead, currentHeartbeat, thisHost)
pack.callback.pingMillis = PingTimedOut
default:
updateNodeStatus(pack.callback, StatusSuspected, currentHeartbeat, thisHost)
pack.callback.pingMillis = PingTimedOut
}
}
case packNFP:
logDebug(k, "timed out after", timeoutMillis, "milliseconds (dropped NFP)")
if knownNodes.contains(pack.node) {
switch pack.node.Status() {
case StatusDead:
break
case StatusSuspected:
updateNodeStatus(pack.node, StatusDead, currentHeartbeat, thisHost)
pack.callback.pingMillis = PingTimedOut
default:
updateNodeStatus(pack.node, StatusSuspected, currentHeartbeat, thisHost)
pack.callback.pingMillis = PingTimedOut
}
}
}
delete(pendingAcks.m, k)
}
}
pendingAcks.Unlock()
time.Sleep(time.Millisecond * 100)
}
}
func transmitVerbGenericUDP(node *Node, forwardTo *Node, verb messageVerb, code uint32) error {
// Transmit the ACK
remoteAddr, err := net.ResolveUDPAddr("udp", node.Address())
c, err := net.DialUDP("udp", nil, remoteAddr)
if err != nil {
return err
}
defer c.Close()
msg := newMessage(verb, thisHost, code)
if forwardTo != nil {
msg.addMember(forwardTo, StatusForwardTo, code, forwardTo.statusSource)
}
// Add members for update.
nodes := getRandomUpdatedNodes(pingRequestCount(), node, thisHost)
// No updates to distribute? Send out a few updates on other known nodes.
if len(nodes) == 0 {
nodes = knownNodes.getRandomNodes(pingRequestCount(), node, thisHost)
}
for _, n := range nodes {
err = msg.addMember(n, n.status, n.heartbeat, n.statusSource)
if err != nil {
return err
}
n.emitCounter--
}
// Emit counters for broadcasts can be less than 0. We transmit positive
// numbers, and decrement all the others. At some value < 0, the broadcast
// is removed from the map all together.
broadcast := getBroadcastToEmit()
if broadcast != nil {
if broadcast.emitCounter > 0 {
msg.addBroadcast(broadcast)
}
broadcast.emitCounter--
}
_, err = c.Write(msg.encode())
if err != nil {
return err
}
// Decrement the update counters on those nodes
for _, m := range msg.members {
m.node.emitCounter--
}
logfTrace("Sent %v to %v", verb, node.Address())
return nil
}
func transmitVerbForwardUDP(node *Node, downstream *Node, code uint32) error {
key := node.Address() + ":" + strconv.FormatInt(int64(code), 10)
pack := pendingAck{
node: node,
startTime: GetNowInMillis(),
callback: downstream,
packType: packPingReq}
pendingAcks.Lock()
pendingAcks.m[key] = &pack
pendingAcks.Unlock()
return transmitVerbGenericUDP(node, downstream, verbPingRequest, code)
}
func transmitVerbAckUDP(node *Node, code uint32) error |
func transmitVerbPingUDP(node *Node, code uint32) error {
key := node.Address() + ":" + strconv.FormatInt(int64(code), 10)
pack := pendingAck{
node: node,
startTime: GetNowInMillis(),
packType: packPing}
pendingAcks.Lock()
pendingAcks.m[key] = &pack
pendingAcks.Unlock()
return transmitVerbGenericUDP(node, nil, verbPing, code)
}
func updateStatusesFromMessage(msg message) {
for _, m := range msg.members {
// If the heartbeat in the message is less then the heartbeat
// associated with the last known status, then we conclude that the
// message is old and we drop it.
if m.heartbeat < m.node.heartbeat {
logfDebug("Message is old (%d vs %d): dropping",
m.node.heartbeat, m.heartbeat)
continue
}
switch m.status {
case StatusForwardTo:
// The FORWARD_TO status isn't useful here, so we ignore those.
continue
case StatusDead:
// Don't tell ME I'm dead.
if m.node.Address() != thisHost.Address() {
updateNodeStatus(m.node, m.status, m.heartbeat, m.source)
AddNode(m.node)
}
default:
updateNodeStatus(m.node, m.status, m.heartbeat, m.source)
AddNode(m.node)
}
}
// Obviously, we know the sender is alive. Report it as such.
if msg.senderHeartbeat > msg.sender.heartbeat {
updateNodeStatus(msg.sender, StatusAlive, msg.senderHeartbeat, thisHost)
}
// Finally, if we don't know the sender we add it to the known hosts map.
if !knownNodes.contains(msg.sender) {
AddNode(msg.sender)
}
}
// pendingAckType represents an expectation of a response to a previously
// emitted PING, PINGREQ, or NFP.
type pendingAck struct {
startTime uint32
node *Node
callback *Node
callbackCode uint32
packType pendingAckType
}
func (a *pendingAck) elapsed() uint32 {
return GetNowInMillis() - a.startTime
}
// pendingAckType represents the type of PING that a pendingAckType is waiting
// for a response for: PING, PINGREQ, or NFP.
type pendingAckType byte
const (
packPing pendingAckType = iota
packPingReq
packNFP
)
func (p pendingAckType) String() string {
switch p {
case packPing:
return "PING"
case packPingReq:
return "PINGREQ"
case packNFP:
return "NFP"
default:
return "UNDEFINED"
}
}
| {
return transmitVerbGenericUDP(node, nil, verbAck, code)
} | identifier_body |
membership.go | /*
Copyright 2016 The Smudge Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package smudge
import (
"errors"
"math"
"net"
"strconv"
"sync"
"time"
)
// A scalar value used to calculate a variety of limits
const lambda = 2.5
// How many standard deviations beyond the mean PING/ACK response time we
// allow before timing out an ACK.
const timeoutToleranceSigmas = 3.0
const defaultIPv4MulticastAddress = "224.0.0.0"
const defaultIPv6MulticastAddress = "[ff02::1]"
var currentHeartbeat uint32
var pendingAcks = struct {
sync.RWMutex
m map[string]*pendingAck
}{m: make(map[string]*pendingAck)}
var thisHostAddress string
var thisHost *Node
var ipLen = net.IPv4len
// This flag is set whenever a known node is added or removed.
var knownNodesModifiedFlag = false
var pingdata = newPingData(GetPingHistoryFrontload(), 50)
/******************************************************************************
* Exported functions (for public consumption)
*****************************************************************************/
// Begin starts the server by opening a UDP port and beginning the heartbeat.
// Note that this is a blocking function, so act appropriately.
func Begin() {
// Add this host.
logfInfo("Using listen IP: %s", listenIP)
// Use IPv6 address length if the listen IP is not an IPv4 address
if GetListenIP().To4() == nil {
ipLen = net.IPv6len
}
me := Node{
ip: GetListenIP(),
port: uint16(GetListenPort()),
timestamp: GetNowInMillis(),
pingMillis: PingNoData,
}
thisHostAddress = me.Address()
thisHost = &me
logInfo("My host address:", thisHostAddress)
// Add this node's status. Don't update any other node's statuses: they'll
// report those back to us.
updateNodeStatus(thisHost, StatusAlive, 0, thisHost)
AddNode(thisHost)
go listenUDP(GetListenPort())
// Add initial hosts as specified by the SMUDGE_INITIAL_HOSTS property
for _, address := range GetInitialHosts() {
n, err := CreateNodeByAddress(address)
if err != nil {
logfError("Could not create node %s: %v", address, err)
} else {
AddNode(n)
}
}
if GetMulticastEnabled() {
go listenUDPMulticast(GetMulticastPort())
go multicastAnnounce(GetMulticastAddress())
}
go startTimeoutCheckLoop()
// Loop over a randomized list of all known nodes (except for this host
// node), pinging one at a time. If the knownNodesModifiedFlag is set to
// true by AddNode() or RemoveNode(), the we get a fresh list and start
// again.
for {
var randomAllNodes = knownNodes.getRandomNodes(0, thisHost)
var pingCounter int
for _, node := range randomAllNodes {
// Exponential backoff of dead nodes, until such time as they are removed.
if node.status == StatusDead {
var dnc *deadNodeCounter
var ok bool
deadNodeRetries.Lock()
if dnc, ok = deadNodeRetries.m[node.Address()]; !ok {
dnc = &deadNodeCounter{retry: 1, retryCountdown: 2}
deadNodeRetries.m[node.Address()] = dnc
}
deadNodeRetries.Unlock()
dnc.retryCountdown--
if dnc.retryCountdown <= 0 {
dnc.retry++
dnc.retryCountdown = int(math.Pow(2.0, float64(dnc.retry)))
if dnc.retry > maxDeadNodeRetries {
logDebug("Forgetting dead node", node.Address())
deadNodeRetries.Lock()
delete(deadNodeRetries.m, node.Address())
deadNodeRetries.Unlock()
RemoveNode(node)
continue
}
} else {
continue
}
}
currentHeartbeat++
logfTrace("%d - hosts=%d (announce=%d forward=%d)",
currentHeartbeat,
len(randomAllNodes),
emitCount(),
pingRequestCount())
PingNode(node)
pingCounter++
time.Sleep(time.Millisecond * time.Duration(GetHeartbeatMillis()))
if knownNodesModifiedFlag {
knownNodesModifiedFlag = false
break
}
}
if pingCounter == 0 {
logDebug("No nodes to ping. So lonely. :(")
time.Sleep(time.Millisecond * time.Duration(GetHeartbeatMillis()))
}
}
}
// PingNode can be used to explicitly ping a node. Calls the low-level
// doPingNode(), and outputs a message (and returns an error) if it fails.
func PingNode(node *Node) error {
err := transmitVerbPingUDP(node, currentHeartbeat)
if err != nil {
logInfo("Failure to ping", node, "->", err)
}
return err
}
/******************************************************************************
* Private functions (for internal use only)
*****************************************************************************/
// Multicast announcements are constructed as:
// Byte 0 - 1 byte character byte length N
// Bytes 1 to N - Cluster name bytes
// Bytes N+1... - A message (without members)
func decodeMulticastAnnounceBytes(bytes []byte) (string, []byte, error) {
nameBytesLen := int(bytes[0])
if nameBytesLen+1 > len(bytes) {
return "", nil, errors.New("Invalid multicast message received")
}
nameBytes := bytes[1 : nameBytesLen+1]
name := string(nameBytes)
msgBytes := bytes[nameBytesLen+1 : len(bytes)] | filteredNodes := getTargetNodes(pingRequestCount(), thisHost, pack.node)
if len(filteredNodes) == 0 {
logDebug(thisHost.Address(), "Cannot forward ping request: no more nodes")
updateNodeStatus(pack.node, StatusDead, currentHeartbeat, thisHost)
} else {
for i, n := range filteredNodes {
logfDebug("(%d/%d) Requesting indirect ping of %s via %s",
i+1,
len(filteredNodes),
pack.node.Address(),
n.Address())
transmitVerbForwardUDP(n, pack.node, currentHeartbeat)
}
}
}
// The number of times any node's new status should be emitted after changes.
// Currently set to (lambda * log(node count)).
func emitCount() int {
logn := math.Log(float64(knownNodes.length()))
mult := (lambda * logn) + 0.5
return int(mult)
}
// Multicast announcements are constructed as:
// Byte 0 - 1 byte character byte length N
// Bytes 1 to N - Cluster name bytes
// Bytes N+1... - A message (without members)
func encodeMulticastAnnounceBytes() []byte {
nameBytes := []byte(GetClusterName())
nameBytesLen := len(nameBytes)
if nameBytesLen > 0xFF {
panic("Cluster name too long: " +
strconv.FormatInt(int64(nameBytesLen), 10) +
" bytes (max 254)")
}
msg := newMessage(verbPing, thisHost, currentHeartbeat)
msgBytes := msg.encode()
msgBytesLen := len(msgBytes)
totalByteCount := 1 + nameBytesLen + msgBytesLen
bytes := make([]byte, totalByteCount, totalByteCount)
// Add name length byte
bytes[0] = byte(nameBytesLen)
// Copy the name bytes
copy(bytes[1:nameBytesLen+1], nameBytes)
// Copy the message proper
copy(bytes[nameBytesLen+1:totalByteCount], msgBytes)
return bytes
}
func guessMulticastAddress() string {
if multicastAddress == "" {
if ipLen == net.IPv6len {
multicastAddress = defaultIPv6MulticastAddress
} else if ipLen == net.IPv4len {
multicastAddress = defaultIPv4MulticastAddress
} else {
logFatal("Failed to determine IPv4/IPv6")
}
}
return multicastAddress
}
// getListenInterface gets the network interface for the listen IP
func getListenInterface() (*net.Interface, error) {
ifaces, err := net.Interfaces()
if err == nil {
for _, iface := range ifaces {
addrs, err := iface.Addrs()
if err != nil {
logfWarn("Can not get addresses of interface %s", iface.Name)
continue
}
for _, addr := range addrs {
ip, _, err := net.ParseCIDR(addr.String())
if err != nil {
continue
}
if ip.String() == GetListenIP().String() {
logfInfo("Found interface with listen IP: %s", iface.Name)
return &iface, nil
}
}
}
}
return nil, errors.New("Could not determine the interface of the listen IP address")
}
// Returns a random slice of valid ping/forward request targets; i.e., not
// this node, and not dead.
func getTargetNodes(count int, exclude ...*Node) []*Node {
randomNodes := knownNodes.getRandomNodes(0, exclude...)
filteredNodes := make([]*Node, 0, count)
for _, n := range randomNodes {
if len(filteredNodes) >= count {
break
}
if n.status == StatusDead {
continue
}
filteredNodes = append(filteredNodes, n)
}
return filteredNodes
}
func listenUDP(port int) error {
listenAddress, err := net.ResolveUDPAddr("udp", ":"+strconv.FormatInt(int64(port), 10))
if err != nil {
return err
}
/* Now listen at selected port */
c, err := net.ListenUDP("udp", listenAddress)
if err != nil {
return err
}
defer c.Close()
for {
buf := make([]byte, 2048) // big enough to fit 1280 IPv6 UDP message
n, addr, err := c.ReadFromUDP(buf)
if err != nil {
logError("UDP read error: ", err)
}
go func(addr *net.UDPAddr, msg []byte) {
err = receiveMessageUDP(addr, buf[0:n])
if err != nil {
logError(err)
}
}(addr, buf[0:n])
}
}
func listenUDPMulticast(port int) error {
addr := GetMulticastAddress()
if addr == "" {
addr = guessMulticastAddress()
}
listenAddress, err := net.ResolveUDPAddr("udp", addr+":"+strconv.FormatInt(int64(port), 10))
if err != nil {
return err
}
/* Now listen at selected port */
iface, err := getListenInterface()
if err != nil {
return err
}
c, err := net.ListenMulticastUDP("udp", iface, listenAddress)
if err != nil {
return err
}
defer c.Close()
for {
buf := make([]byte, 2048) // big enough to fit 1280 IPv6 UDP message
n, addr, err := c.ReadFromUDP(buf)
if err != nil {
logError("UDP read error:", err)
}
go func(addr *net.UDPAddr, bytes []byte) {
name, msgBytes, err := decodeMulticastAnnounceBytes(bytes)
if err != nil {
logDebug("Ignoring unexpected multicast message.")
} else {
if GetClusterName() == name {
msg, err := decodeMessage(addr.IP, msgBytes)
if err == nil {
logfTrace("Got multicast %v from %v code=%d",
msg.verb,
msg.sender.Address(),
msg.senderHeartbeat)
// Update statuses of the sender.
updateStatusesFromMessage(msg)
} else {
logError(err)
}
}
}
}(addr, buf[0:n])
}
}
// multicastAnnounce is called when the server first starts to broadcast its
// presence to all listening servers within the specified subnet and continues
// to broadcast its presence every multicastAnnounceIntervalSeconds in case
// this value is larger than zero.
func multicastAnnounce(addr string) error {
if addr == "" {
addr = guessMulticastAddress()
}
fullAddr := addr + ":" + strconv.FormatInt(int64(GetMulticastPort()), 10)
logInfo("Announcing presence on", fullAddr)
address, err := net.ResolveUDPAddr("udp", fullAddr)
if err != nil {
logError(err)
return err
}
laddr := &net.UDPAddr{
IP: GetListenIP(),
Port: 0,
}
for {
c, err := net.DialUDP("udp", laddr, address)
if err != nil {
logError(err)
return err
}
// Compose and send the multicast announcement
msgBytes := encodeMulticastAnnounceBytes()
_, err = c.Write(msgBytes)
if err != nil {
logError(err)
return err
}
logfTrace("Sent announcement multicast from %v to %v", laddr, fullAddr)
if GetMulticastAnnounceIntervalSeconds() > 0 {
time.Sleep(time.Second * time.Duration(GetMulticastAnnounceIntervalSeconds()))
} else {
return nil
}
}
}
// The number of nodes to send a PINGREQ to when a PING times out.
// Currently set to (lambda * log(node count)).
func pingRequestCount() int {
logn := math.Log(float64(knownNodes.length()))
mult := (lambda * logn) + 0.5
return int(mult)
}
func receiveMessageUDP(addr *net.UDPAddr, msgBytes []byte) error {
msg, err := decodeMessage(addr.IP, msgBytes)
if err != nil {
return err
}
logfTrace("Got %v from %v code=%d",
msg.verb,
msg.sender.Address(),
msg.senderHeartbeat)
// Synchronize heartbeats
if msg.senderHeartbeat > 0 && msg.senderHeartbeat-1 > currentHeartbeat {
logfTrace("Heartbeat advanced from %d to %d",
currentHeartbeat,
msg.senderHeartbeat-1)
currentHeartbeat = msg.senderHeartbeat - 1
}
// Update statuses of the sender and any members the message includes.
updateStatusesFromMessage(msg)
// If there are broadcast bytes in the message, handle them here.
receiveBroadcast(msg.broadcast)
// Handle the verb.
switch msg.verb {
case verbPing:
err = receiveVerbPingUDP(msg)
case verbAck:
err = receiveVerbAckUDP(msg)
case verbPingRequest:
err = receiveVerbForwardUDP(msg)
case verbNonForwardingPing:
err = receiveVerbNonForwardPingUDP(msg)
}
if err != nil {
return err
}
return nil
}
func receiveVerbAckUDP(msg message) error {
key := msg.sender.Address() + ":" + strconv.FormatInt(int64(msg.senderHeartbeat), 10)
pendingAcks.RLock()
_, ok := pendingAcks.m[key]
pendingAcks.RUnlock()
if ok {
msg.sender.Touch()
pendingAcks.Lock()
if pack, ok := pendingAcks.m[key]; ok {
// If this is a response to a requested ping, respond to the
// callback node
if pack.callback != nil {
go transmitVerbAckUDP(pack.callback, pack.callbackCode)
} else {
// Note the ping response time.
notePingResponseTime(pack)
}
}
delete(pendingAcks.m, key)
pendingAcks.Unlock()
}
return nil
}
func notePingResponseTime(pack *pendingAck) {
// Note the elapsed time
elapsedMillis := pack.elapsed()
pack.node.pingMillis = int(elapsedMillis)
// For the purposes of timeout tolerance, we treat all pings less than
// the ping lower bound as that lower bound.
minMillis := uint32(GetMinPingTime())
if elapsedMillis < minMillis {
elapsedMillis = minMillis
}
pingdata.add(elapsedMillis)
mean, stddev := pingdata.data()
sigmas := pingdata.nSigma(timeoutToleranceSigmas)
logfTrace("Got ACK in %dms (mean=%.02f stddev=%.02f sigmas=%.02f)",
elapsedMillis,
mean,
stddev,
sigmas)
}
func receiveVerbForwardUDP(msg message) error {
// We don't forward to a node that we don't know.
if len(msg.members) >= 0 &&
msg.members[0].status == StatusForwardTo {
member := msg.members[0]
node := member.node
code := member.heartbeat
key := node.Address() + ":" + strconv.FormatInt(int64(code), 10)
pack := pendingAck{
node: node,
startTime: GetNowInMillis(),
callback: msg.sender,
callbackCode: code,
packType: packNFP}
pendingAcks.Lock()
pendingAcks.m[key] = &pack
pendingAcks.Unlock()
return transmitVerbGenericUDP(node, nil, verbNonForwardingPing, code)
}
return nil
}
func receiveVerbPingUDP(msg message) error {
return transmitVerbAckUDP(msg.sender, msg.senderHeartbeat)
}
func receiveVerbNonForwardPingUDP(msg message) error {
return transmitVerbAckUDP(msg.sender, msg.senderHeartbeat)
}
func startTimeoutCheckLoop() {
for {
pendingAcks.Lock()
for k, pack := range pendingAcks.m {
elapsed := pack.elapsed()
timeoutMillis := uint32(pingdata.nSigma(timeoutToleranceSigmas))
// Ping requests are expected to take quite a bit longer.
// Just call it 2x for now.
if pack.packType == packPingReq {
timeoutMillis *= 2
}
// This pending ACK has taken longer than expected. Mark it as
// timed out.
if elapsed > timeoutMillis {
switch pack.packType {
case packPing:
go doForwardOnTimeout(pack)
case packPingReq:
logDebug(k, "timed out after", timeoutMillis, "milliseconds (dropped PINGREQ)")
if knownNodes.contains(pack.callback) {
switch pack.callback.Status() {
case StatusDead:
break
case StatusSuspected:
updateNodeStatus(pack.callback, StatusDead, currentHeartbeat, thisHost)
pack.callback.pingMillis = PingTimedOut
default:
updateNodeStatus(pack.callback, StatusSuspected, currentHeartbeat, thisHost)
pack.callback.pingMillis = PingTimedOut
}
}
case packNFP:
logDebug(k, "timed out after", timeoutMillis, "milliseconds (dropped NFP)")
if knownNodes.contains(pack.node) {
switch pack.node.Status() {
case StatusDead:
break
case StatusSuspected:
updateNodeStatus(pack.node, StatusDead, currentHeartbeat, thisHost)
pack.callback.pingMillis = PingTimedOut
default:
updateNodeStatus(pack.node, StatusSuspected, currentHeartbeat, thisHost)
pack.callback.pingMillis = PingTimedOut
}
}
}
delete(pendingAcks.m, k)
}
}
pendingAcks.Unlock()
time.Sleep(time.Millisecond * 100)
}
}
func transmitVerbGenericUDP(node *Node, forwardTo *Node, verb messageVerb, code uint32) error {
// Transmit the ACK
remoteAddr, err := net.ResolveUDPAddr("udp", node.Address())
c, err := net.DialUDP("udp", nil, remoteAddr)
if err != nil {
return err
}
defer c.Close()
msg := newMessage(verb, thisHost, code)
if forwardTo != nil {
msg.addMember(forwardTo, StatusForwardTo, code, forwardTo.statusSource)
}
// Add members for update.
nodes := getRandomUpdatedNodes(pingRequestCount(), node, thisHost)
// No updates to distribute? Send out a few updates on other known nodes.
if len(nodes) == 0 {
nodes = knownNodes.getRandomNodes(pingRequestCount(), node, thisHost)
}
for _, n := range nodes {
err = msg.addMember(n, n.status, n.heartbeat, n.statusSource)
if err != nil {
return err
}
n.emitCounter--
}
// Emit counters for broadcasts can be less than 0. We transmit positive
// numbers, and decrement all the others. At some value < 0, the broadcast
// is removed from the map all together.
broadcast := getBroadcastToEmit()
if broadcast != nil {
if broadcast.emitCounter > 0 {
msg.addBroadcast(broadcast)
}
broadcast.emitCounter--
}
_, err = c.Write(msg.encode())
if err != nil {
return err
}
// Decrement the update counters on those nodes
for _, m := range msg.members {
m.node.emitCounter--
}
logfTrace("Sent %v to %v", verb, node.Address())
return nil
}
func transmitVerbForwardUDP(node *Node, downstream *Node, code uint32) error {
key := node.Address() + ":" + strconv.FormatInt(int64(code), 10)
pack := pendingAck{
node: node,
startTime: GetNowInMillis(),
callback: downstream,
packType: packPingReq}
pendingAcks.Lock()
pendingAcks.m[key] = &pack
pendingAcks.Unlock()
return transmitVerbGenericUDP(node, downstream, verbPingRequest, code)
}
func transmitVerbAckUDP(node *Node, code uint32) error {
return transmitVerbGenericUDP(node, nil, verbAck, code)
}
func transmitVerbPingUDP(node *Node, code uint32) error {
key := node.Address() + ":" + strconv.FormatInt(int64(code), 10)
pack := pendingAck{
node: node,
startTime: GetNowInMillis(),
packType: packPing}
pendingAcks.Lock()
pendingAcks.m[key] = &pack
pendingAcks.Unlock()
return transmitVerbGenericUDP(node, nil, verbPing, code)
}
func updateStatusesFromMessage(msg message) {
for _, m := range msg.members {
// If the heartbeat in the message is less then the heartbeat
// associated with the last known status, then we conclude that the
// message is old and we drop it.
if m.heartbeat < m.node.heartbeat {
logfDebug("Message is old (%d vs %d): dropping",
m.node.heartbeat, m.heartbeat)
continue
}
switch m.status {
case StatusForwardTo:
// The FORWARD_TO status isn't useful here, so we ignore those.
continue
case StatusDead:
// Don't tell ME I'm dead.
if m.node.Address() != thisHost.Address() {
updateNodeStatus(m.node, m.status, m.heartbeat, m.source)
AddNode(m.node)
}
default:
updateNodeStatus(m.node, m.status, m.heartbeat, m.source)
AddNode(m.node)
}
}
// Obviously, we know the sender is alive. Report it as such.
if msg.senderHeartbeat > msg.sender.heartbeat {
updateNodeStatus(msg.sender, StatusAlive, msg.senderHeartbeat, thisHost)
}
// Finally, if we don't know the sender we add it to the known hosts map.
if !knownNodes.contains(msg.sender) {
AddNode(msg.sender)
}
}
// pendingAckType represents an expectation of a response to a previously
// emitted PING, PINGREQ, or NFP.
type pendingAck struct {
startTime uint32
node *Node
callback *Node
callbackCode uint32
packType pendingAckType
}
func (a *pendingAck) elapsed() uint32 {
return GetNowInMillis() - a.startTime
}
// pendingAckType represents the type of PING that a pendingAckType is waiting
// for a response for: PING, PINGREQ, or NFP.
type pendingAckType byte
const (
packPing pendingAckType = iota
packPingReq
packNFP
)
func (p pendingAckType) String() string {
switch p {
case packPing:
return "PING"
case packPingReq:
return "PINGREQ"
case packNFP:
return "NFP"
default:
return "UNDEFINED"
}
} |
return name, msgBytes, nil
}
func doForwardOnTimeout(pack *pendingAck) { | random_line_split |
membership.go | /*
Copyright 2016 The Smudge Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package smudge
import (
"errors"
"math"
"net"
"strconv"
"sync"
"time"
)
// A scalar value used to calculate a variety of limits
const lambda = 2.5
// How many standard deviations beyond the mean PING/ACK response time we
// allow before timing out an ACK.
const timeoutToleranceSigmas = 3.0
const defaultIPv4MulticastAddress = "224.0.0.0"
const defaultIPv6MulticastAddress = "[ff02::1]"
var currentHeartbeat uint32
var pendingAcks = struct {
sync.RWMutex
m map[string]*pendingAck
}{m: make(map[string]*pendingAck)}
var thisHostAddress string
var thisHost *Node
var ipLen = net.IPv4len
// This flag is set whenever a known node is added or removed.
var knownNodesModifiedFlag = false
var pingdata = newPingData(GetPingHistoryFrontload(), 50)
/******************************************************************************
* Exported functions (for public consumption)
*****************************************************************************/
// Begin starts the server by opening a UDP port and beginning the heartbeat.
// Note that this is a blocking function, so act appropriately.
func Begin() {
// Add this host.
logfInfo("Using listen IP: %s", listenIP)
// Use IPv6 address length if the listen IP is not an IPv4 address
if GetListenIP().To4() == nil {
ipLen = net.IPv6len
}
me := Node{
ip: GetListenIP(),
port: uint16(GetListenPort()),
timestamp: GetNowInMillis(),
pingMillis: PingNoData,
}
thisHostAddress = me.Address()
thisHost = &me
logInfo("My host address:", thisHostAddress)
// Add this node's status. Don't update any other node's statuses: they'll
// report those back to us.
updateNodeStatus(thisHost, StatusAlive, 0, thisHost)
AddNode(thisHost)
go listenUDP(GetListenPort())
// Add initial hosts as specified by the SMUDGE_INITIAL_HOSTS property
for _, address := range GetInitialHosts() {
n, err := CreateNodeByAddress(address)
if err != nil {
logfError("Could not create node %s: %v", address, err)
} else {
AddNode(n)
}
}
if GetMulticastEnabled() {
go listenUDPMulticast(GetMulticastPort())
go multicastAnnounce(GetMulticastAddress())
}
go startTimeoutCheckLoop()
// Loop over a randomized list of all known nodes (except for this host
// node), pinging one at a time. If the knownNodesModifiedFlag is set to
// true by AddNode() or RemoveNode(), the we get a fresh list and start
// again.
for {
var randomAllNodes = knownNodes.getRandomNodes(0, thisHost)
var pingCounter int
for _, node := range randomAllNodes {
// Exponential backoff of dead nodes, until such time as they are removed.
if node.status == StatusDead {
var dnc *deadNodeCounter
var ok bool
deadNodeRetries.Lock()
if dnc, ok = deadNodeRetries.m[node.Address()]; !ok {
dnc = &deadNodeCounter{retry: 1, retryCountdown: 2}
deadNodeRetries.m[node.Address()] = dnc
}
deadNodeRetries.Unlock()
dnc.retryCountdown--
if dnc.retryCountdown <= 0 {
dnc.retry++
dnc.retryCountdown = int(math.Pow(2.0, float64(dnc.retry)))
if dnc.retry > maxDeadNodeRetries {
logDebug("Forgetting dead node", node.Address())
deadNodeRetries.Lock()
delete(deadNodeRetries.m, node.Address())
deadNodeRetries.Unlock()
RemoveNode(node)
continue
}
} else {
continue
}
}
currentHeartbeat++
logfTrace("%d - hosts=%d (announce=%d forward=%d)",
currentHeartbeat,
len(randomAllNodes),
emitCount(),
pingRequestCount())
PingNode(node)
pingCounter++
time.Sleep(time.Millisecond * time.Duration(GetHeartbeatMillis()))
if knownNodesModifiedFlag {
knownNodesModifiedFlag = false
break
}
}
if pingCounter == 0 {
logDebug("No nodes to ping. So lonely. :(")
time.Sleep(time.Millisecond * time.Duration(GetHeartbeatMillis()))
}
}
}
// PingNode can be used to explicitly ping a node. Calls the low-level
// doPingNode(), and outputs a message (and returns an error) if it fails.
func PingNode(node *Node) error {
err := transmitVerbPingUDP(node, currentHeartbeat)
if err != nil {
logInfo("Failure to ping", node, "->", err)
}
return err
}
/******************************************************************************
* Private functions (for internal use only)
*****************************************************************************/
// Multicast announcements are constructed as:
// Byte 0 - 1 byte character byte length N
// Bytes 1 to N - Cluster name bytes
// Bytes N+1... - A message (without members)
func decodeMulticastAnnounceBytes(bytes []byte) (string, []byte, error) {
nameBytesLen := int(bytes[0])
if nameBytesLen+1 > len(bytes) {
return "", nil, errors.New("Invalid multicast message received")
}
nameBytes := bytes[1 : nameBytesLen+1]
name := string(nameBytes)
msgBytes := bytes[nameBytesLen+1 : len(bytes)]
return name, msgBytes, nil
}
func doForwardOnTimeout(pack *pendingAck) {
filteredNodes := getTargetNodes(pingRequestCount(), thisHost, pack.node)
if len(filteredNodes) == 0 {
logDebug(thisHost.Address(), "Cannot forward ping request: no more nodes")
updateNodeStatus(pack.node, StatusDead, currentHeartbeat, thisHost)
} else {
for i, n := range filteredNodes {
logfDebug("(%d/%d) Requesting indirect ping of %s via %s",
i+1,
len(filteredNodes),
pack.node.Address(),
n.Address())
transmitVerbForwardUDP(n, pack.node, currentHeartbeat)
}
}
}
// The number of times any node's new status should be emitted after changes.
// Currently set to (lambda * log(node count)).
func emitCount() int {
logn := math.Log(float64(knownNodes.length()))
mult := (lambda * logn) + 0.5
return int(mult)
}
// Multicast announcements are constructed as:
// Byte 0 - 1 byte character byte length N
// Bytes 1 to N - Cluster name bytes
// Bytes N+1... - A message (without members)
func encodeMulticastAnnounceBytes() []byte {
nameBytes := []byte(GetClusterName())
nameBytesLen := len(nameBytes)
if nameBytesLen > 0xFF {
panic("Cluster name too long: " +
strconv.FormatInt(int64(nameBytesLen), 10) +
" bytes (max 254)")
}
msg := newMessage(verbPing, thisHost, currentHeartbeat)
msgBytes := msg.encode()
msgBytesLen := len(msgBytes)
totalByteCount := 1 + nameBytesLen + msgBytesLen
bytes := make([]byte, totalByteCount, totalByteCount)
// Add name length byte
bytes[0] = byte(nameBytesLen)
// Copy the name bytes
copy(bytes[1:nameBytesLen+1], nameBytes)
// Copy the message proper
copy(bytes[nameBytesLen+1:totalByteCount], msgBytes)
return bytes
}
func guessMulticastAddress() string {
if multicastAddress == "" {
if ipLen == net.IPv6len {
multicastAddress = defaultIPv6MulticastAddress
} else if ipLen == net.IPv4len {
multicastAddress = defaultIPv4MulticastAddress
} else {
logFatal("Failed to determine IPv4/IPv6")
}
}
return multicastAddress
}
// getListenInterface gets the network interface for the listen IP
func getListenInterface() (*net.Interface, error) {
ifaces, err := net.Interfaces()
if err == nil {
for _, iface := range ifaces {
addrs, err := iface.Addrs()
if err != nil {
logfWarn("Can not get addresses of interface %s", iface.Name)
continue
}
for _, addr := range addrs {
ip, _, err := net.ParseCIDR(addr.String())
if err != nil {
continue
}
if ip.String() == GetListenIP().String() {
logfInfo("Found interface with listen IP: %s", iface.Name)
return &iface, nil
}
}
}
}
return nil, errors.New("Could not determine the interface of the listen IP address")
}
// Returns a random slice of valid ping/forward request targets; i.e., not
// this node, and not dead.
func getTargetNodes(count int, exclude ...*Node) []*Node {
randomNodes := knownNodes.getRandomNodes(0, exclude...)
filteredNodes := make([]*Node, 0, count)
for _, n := range randomNodes {
if len(filteredNodes) >= count {
break
}
if n.status == StatusDead {
continue
}
filteredNodes = append(filteredNodes, n)
}
return filteredNodes
}
func listenUDP(port int) error {
listenAddress, err := net.ResolveUDPAddr("udp", ":"+strconv.FormatInt(int64(port), 10))
if err != nil {
return err
}
/* Now listen at selected port */
c, err := net.ListenUDP("udp", listenAddress)
if err != nil {
return err
}
defer c.Close()
for {
buf := make([]byte, 2048) // big enough to fit 1280 IPv6 UDP message
n, addr, err := c.ReadFromUDP(buf)
if err != nil {
logError("UDP read error: ", err)
}
go func(addr *net.UDPAddr, msg []byte) {
err = receiveMessageUDP(addr, buf[0:n])
if err != nil {
logError(err)
}
}(addr, buf[0:n])
}
}
func listenUDPMulticast(port int) error {
addr := GetMulticastAddress()
if addr == "" {
addr = guessMulticastAddress()
}
listenAddress, err := net.ResolveUDPAddr("udp", addr+":"+strconv.FormatInt(int64(port), 10))
if err != nil {
return err
}
/* Now listen at selected port */
iface, err := getListenInterface()
if err != nil {
return err
}
c, err := net.ListenMulticastUDP("udp", iface, listenAddress)
if err != nil {
return err
}
defer c.Close()
for {
buf := make([]byte, 2048) // big enough to fit 1280 IPv6 UDP message
n, addr, err := c.ReadFromUDP(buf)
if err != nil {
logError("UDP read error:", err)
}
go func(addr *net.UDPAddr, bytes []byte) {
name, msgBytes, err := decodeMulticastAnnounceBytes(bytes)
if err != nil {
logDebug("Ignoring unexpected multicast message.")
} else {
if GetClusterName() == name {
msg, err := decodeMessage(addr.IP, msgBytes)
if err == nil {
logfTrace("Got multicast %v from %v code=%d",
msg.verb,
msg.sender.Address(),
msg.senderHeartbeat)
// Update statuses of the sender.
updateStatusesFromMessage(msg)
} else {
logError(err)
}
}
}
}(addr, buf[0:n])
}
}
// multicastAnnounce is called when the server first starts to broadcast its
// presence to all listening servers within the specified subnet and continues
// to broadcast its presence every multicastAnnounceIntervalSeconds in case
// this value is larger than zero.
func multicastAnnounce(addr string) error {
if addr == "" {
addr = guessMulticastAddress()
}
fullAddr := addr + ":" + strconv.FormatInt(int64(GetMulticastPort()), 10)
logInfo("Announcing presence on", fullAddr)
address, err := net.ResolveUDPAddr("udp", fullAddr)
if err != nil {
logError(err)
return err
}
laddr := &net.UDPAddr{
IP: GetListenIP(),
Port: 0,
}
for {
c, err := net.DialUDP("udp", laddr, address)
if err != nil {
logError(err)
return err
}
// Compose and send the multicast announcement
msgBytes := encodeMulticastAnnounceBytes()
_, err = c.Write(msgBytes)
if err != nil {
logError(err)
return err
}
logfTrace("Sent announcement multicast from %v to %v", laddr, fullAddr)
if GetMulticastAnnounceIntervalSeconds() > 0 {
time.Sleep(time.Second * time.Duration(GetMulticastAnnounceIntervalSeconds()))
} else {
return nil
}
}
}
// The number of nodes to send a PINGREQ to when a PING times out.
// Currently set to (lambda * log(node count)).
func pingRequestCount() int {
logn := math.Log(float64(knownNodes.length()))
mult := (lambda * logn) + 0.5
return int(mult)
}
func receiveMessageUDP(addr *net.UDPAddr, msgBytes []byte) error {
msg, err := decodeMessage(addr.IP, msgBytes)
if err != nil {
return err
}
logfTrace("Got %v from %v code=%d",
msg.verb,
msg.sender.Address(),
msg.senderHeartbeat)
// Synchronize heartbeats
if msg.senderHeartbeat > 0 && msg.senderHeartbeat-1 > currentHeartbeat {
logfTrace("Heartbeat advanced from %d to %d",
currentHeartbeat,
msg.senderHeartbeat-1)
currentHeartbeat = msg.senderHeartbeat - 1
}
// Update statuses of the sender and any members the message includes.
updateStatusesFromMessage(msg)
// If there are broadcast bytes in the message, handle them here.
receiveBroadcast(msg.broadcast)
// Handle the verb.
switch msg.verb {
case verbPing:
err = receiveVerbPingUDP(msg)
case verbAck:
err = receiveVerbAckUDP(msg)
case verbPingRequest:
err = receiveVerbForwardUDP(msg)
case verbNonForwardingPing:
err = receiveVerbNonForwardPingUDP(msg)
}
if err != nil {
return err
}
return nil
}
func receiveVerbAckUDP(msg message) error {
key := msg.sender.Address() + ":" + strconv.FormatInt(int64(msg.senderHeartbeat), 10)
pendingAcks.RLock()
_, ok := pendingAcks.m[key]
pendingAcks.RUnlock()
if ok {
msg.sender.Touch()
pendingAcks.Lock()
if pack, ok := pendingAcks.m[key]; ok {
// If this is a response to a requested ping, respond to the
// callback node
if pack.callback != nil {
go transmitVerbAckUDP(pack.callback, pack.callbackCode)
} else {
// Note the ping response time.
notePingResponseTime(pack)
}
}
delete(pendingAcks.m, key)
pendingAcks.Unlock()
}
return nil
}
func notePingResponseTime(pack *pendingAck) {
// Note the elapsed time
elapsedMillis := pack.elapsed()
pack.node.pingMillis = int(elapsedMillis)
// For the purposes of timeout tolerance, we treat all pings less than
// the ping lower bound as that lower bound.
minMillis := uint32(GetMinPingTime())
if elapsedMillis < minMillis {
elapsedMillis = minMillis
}
pingdata.add(elapsedMillis)
mean, stddev := pingdata.data()
sigmas := pingdata.nSigma(timeoutToleranceSigmas)
logfTrace("Got ACK in %dms (mean=%.02f stddev=%.02f sigmas=%.02f)",
elapsedMillis,
mean,
stddev,
sigmas)
}
func receiveVerbForwardUDP(msg message) error {
// We don't forward to a node that we don't know.
if len(msg.members) >= 0 &&
msg.members[0].status == StatusForwardTo {
member := msg.members[0]
node := member.node
code := member.heartbeat
key := node.Address() + ":" + strconv.FormatInt(int64(code), 10)
pack := pendingAck{
node: node,
startTime: GetNowInMillis(),
callback: msg.sender,
callbackCode: code,
packType: packNFP}
pendingAcks.Lock()
pendingAcks.m[key] = &pack
pendingAcks.Unlock()
return transmitVerbGenericUDP(node, nil, verbNonForwardingPing, code)
}
return nil
}
func receiveVerbPingUDP(msg message) error {
return transmitVerbAckUDP(msg.sender, msg.senderHeartbeat)
}
func receiveVerbNonForwardPingUDP(msg message) error {
return transmitVerbAckUDP(msg.sender, msg.senderHeartbeat)
}
func startTimeoutCheckLoop() {
for {
pendingAcks.Lock()
for k, pack := range pendingAcks.m {
elapsed := pack.elapsed()
timeoutMillis := uint32(pingdata.nSigma(timeoutToleranceSigmas))
// Ping requests are expected to take quite a bit longer.
// Just call it 2x for now.
if pack.packType == packPingReq {
timeoutMillis *= 2
}
// This pending ACK has taken longer than expected. Mark it as
// timed out.
if elapsed > timeoutMillis {
switch pack.packType {
case packPing:
go doForwardOnTimeout(pack)
case packPingReq:
logDebug(k, "timed out after", timeoutMillis, "milliseconds (dropped PINGREQ)")
if knownNodes.contains(pack.callback) {
switch pack.callback.Status() {
case StatusDead:
break
case StatusSuspected:
updateNodeStatus(pack.callback, StatusDead, currentHeartbeat, thisHost)
pack.callback.pingMillis = PingTimedOut
default:
updateNodeStatus(pack.callback, StatusSuspected, currentHeartbeat, thisHost)
pack.callback.pingMillis = PingTimedOut
}
}
case packNFP:
logDebug(k, "timed out after", timeoutMillis, "milliseconds (dropped NFP)")
if knownNodes.contains(pack.node) {
switch pack.node.Status() {
case StatusDead:
break
case StatusSuspected:
updateNodeStatus(pack.node, StatusDead, currentHeartbeat, thisHost)
pack.callback.pingMillis = PingTimedOut
default:
updateNodeStatus(pack.node, StatusSuspected, currentHeartbeat, thisHost)
pack.callback.pingMillis = PingTimedOut
}
}
}
delete(pendingAcks.m, k)
}
}
pendingAcks.Unlock()
time.Sleep(time.Millisecond * 100)
}
}
func | (node *Node, forwardTo *Node, verb messageVerb, code uint32) error {
// Transmit the ACK
remoteAddr, err := net.ResolveUDPAddr("udp", node.Address())
c, err := net.DialUDP("udp", nil, remoteAddr)
if err != nil {
return err
}
defer c.Close()
msg := newMessage(verb, thisHost, code)
if forwardTo != nil {
msg.addMember(forwardTo, StatusForwardTo, code, forwardTo.statusSource)
}
// Add members for update.
nodes := getRandomUpdatedNodes(pingRequestCount(), node, thisHost)
// No updates to distribute? Send out a few updates on other known nodes.
if len(nodes) == 0 {
nodes = knownNodes.getRandomNodes(pingRequestCount(), node, thisHost)
}
for _, n := range nodes {
err = msg.addMember(n, n.status, n.heartbeat, n.statusSource)
if err != nil {
return err
}
n.emitCounter--
}
// Emit counters for broadcasts can be less than 0. We transmit positive
// numbers, and decrement all the others. At some value < 0, the broadcast
// is removed from the map all together.
broadcast := getBroadcastToEmit()
if broadcast != nil {
if broadcast.emitCounter > 0 {
msg.addBroadcast(broadcast)
}
broadcast.emitCounter--
}
_, err = c.Write(msg.encode())
if err != nil {
return err
}
// Decrement the update counters on those nodes
for _, m := range msg.members {
m.node.emitCounter--
}
logfTrace("Sent %v to %v", verb, node.Address())
return nil
}
func transmitVerbForwardUDP(node *Node, downstream *Node, code uint32) error {
key := node.Address() + ":" + strconv.FormatInt(int64(code), 10)
pack := pendingAck{
node: node,
startTime: GetNowInMillis(),
callback: downstream,
packType: packPingReq}
pendingAcks.Lock()
pendingAcks.m[key] = &pack
pendingAcks.Unlock()
return transmitVerbGenericUDP(node, downstream, verbPingRequest, code)
}
func transmitVerbAckUDP(node *Node, code uint32) error {
return transmitVerbGenericUDP(node, nil, verbAck, code)
}
func transmitVerbPingUDP(node *Node, code uint32) error {
key := node.Address() + ":" + strconv.FormatInt(int64(code), 10)
pack := pendingAck{
node: node,
startTime: GetNowInMillis(),
packType: packPing}
pendingAcks.Lock()
pendingAcks.m[key] = &pack
pendingAcks.Unlock()
return transmitVerbGenericUDP(node, nil, verbPing, code)
}
func updateStatusesFromMessage(msg message) {
for _, m := range msg.members {
// If the heartbeat in the message is less then the heartbeat
// associated with the last known status, then we conclude that the
// message is old and we drop it.
if m.heartbeat < m.node.heartbeat {
logfDebug("Message is old (%d vs %d): dropping",
m.node.heartbeat, m.heartbeat)
continue
}
switch m.status {
case StatusForwardTo:
// The FORWARD_TO status isn't useful here, so we ignore those.
continue
case StatusDead:
// Don't tell ME I'm dead.
if m.node.Address() != thisHost.Address() {
updateNodeStatus(m.node, m.status, m.heartbeat, m.source)
AddNode(m.node)
}
default:
updateNodeStatus(m.node, m.status, m.heartbeat, m.source)
AddNode(m.node)
}
}
// Obviously, we know the sender is alive. Report it as such.
if msg.senderHeartbeat > msg.sender.heartbeat {
updateNodeStatus(msg.sender, StatusAlive, msg.senderHeartbeat, thisHost)
}
// Finally, if we don't know the sender we add it to the known hosts map.
if !knownNodes.contains(msg.sender) {
AddNode(msg.sender)
}
}
// pendingAckType represents an expectation of a response to a previously
// emitted PING, PINGREQ, or NFP.
type pendingAck struct {
startTime uint32
node *Node
callback *Node
callbackCode uint32
packType pendingAckType
}
func (a *pendingAck) elapsed() uint32 {
return GetNowInMillis() - a.startTime
}
// pendingAckType represents the type of PING that a pendingAckType is waiting
// for a response for: PING, PINGREQ, or NFP.
type pendingAckType byte
const (
packPing pendingAckType = iota
packPingReq
packNFP
)
func (p pendingAckType) String() string {
switch p {
case packPing:
return "PING"
case packPingReq:
return "PINGREQ"
case packNFP:
return "NFP"
default:
return "UNDEFINED"
}
}
| transmitVerbGenericUDP | identifier_name |
connection.go | package ayame
import (
"context"
"encoding/json"
"fmt"
"math/rand"
"time"
"github.com/oklog/ulid/v2"
"github.com/rs/zerolog"
"github.com/shiguredo/websocket"
)
type connection struct {
ID string
roomID string
clientID string
authnMetadata *interface{}
signalingKey *string
// クライアント情報
ayameClient *string
environment *string
libwebrtc *string
authzMetadata *interface{}
// WebSocket コネクション
wsConn *websocket.Conn
// レジスターされているかどうか
registered bool
// 転送用のチャネル
forwardChannel chan forward
// config
config Config
signalingLogger zerolog.Logger
webhookLogger zerolog.Logger
// standalone mode
standalone bool
metrics *Metrics
}
const (
// socket の待ち受け時間
readTimeout = 90
// pong が送られてこないためタイムアウトにするまでの時間
pongTimeout = 60
// ping 送信の時間間隔
pingInterval = 5
)
func (c *connection) SendJSON(v interface{}) error {
if err := c.wsConn.WriteJSON(v); err != nil {
c.errLog().Err(err).Interface("msg", v).Msg("FailedToSendMsg")
return err
}
return nil
}
func (c *connection) sendPingMessage() error {
msg := &pingMessage{
Type: "ping",
}
if err := c.SendJSON(msg); err != nil {
return err
}
return nil
}
// reason の長さが不十分そうな場合は CloseMessage ではなく TextMessage を使用するように変更する
func (c *connection) sendCloseMessage(code int, reason string) error {
deadline := time.Now().Add(writeWait)
closeMessage := websocket.FormatCloseMessage(code, reason)
return c.wsConn.WriteControl(websocket.CloseMessage, closeMessage, deadline)
}
func (c *connection) sendAcceptMessage(isExistClient bool, iceServers *[]iceServer, authzMetadata *interface{}) error {
msg := &acceptMessage{
Type: "accept",
ConnectionID: c.ID,
IsExistClient: isExistClient,
// 下位互換性
IsExistUser: isExistClient,
AuthzMetadata: authzMetadata,
IceServers: iceServers,
}
if err := c.SendJSON(msg); err != nil {
return err
}
return nil
}
func (c *connection) sendRejectMessage(reason string) error {
msg := &rejectMessage{
Type: "reject",
Reason: reason,
}
if err := c.SendJSON(msg); err != nil {
return err
}
return nil
}
func (c *connection) sendByeMessage() error {
msg := &byeMessage{
Type: "bye",
}
if err := c.SendJSON(msg); err != nil {
return err
}
return nil
}
func (c *connection) closeWs() {
c.wsConn.Close()
c.debugLog().Msg("CLOSED-WS")
}
func (c *connection) register() int {
resultChannel := make(chan int)
registerChannel <- ®ister{
connection: c,
resultChannel: resultChannel,
}
// ここはブロックする candidate とかを並列で来てるかもしれないが知らん
result := <-resultChannel
// もう server で触ることはないのでここで閉じる
close(resultChannel)
return result
}
func (c *connection) unregister() {
if c.registered {
unregisterChannel <- &unregister{
connection: c,
}
}
}
func (c *connection) forward(msg []byte) {
// グローバルにあるチャンネルに対して投げ込む
forwardChannel <- forward{
connection: c,
rawMessage: msg,
}
}
func (c *connection) main(cancel context.CancelFunc, messageChannel chan []byte) {
pongTimeoutTimer := time.NewTimer(pongTimeout * time.Second)
pingTimer := time.NewTimer(pingInterval * time.Second)
defer func() {
timerStop(pongTimeoutTimer)
timerStop(pingTimer)
// キャンセルを呼ぶ
cancel()
c.debugLog().Msg("CANCEL")
// アンレジはここでやる
c.unregister()
c.debugLog().Msg("UNREGISTER")
c.debugLog().Msg("EXIT-MAIN")
}()
loop:
for {
select {
case <-pingTimer.C:
if !c.standalone {
if err := c.sendPingMessage(); err != nil {
break loop
}
}
pingTimer.Reset(pingInterval * time.Second)
case <-pongTimeoutTimer.C:
if !c.standalone {
// タイマーが発火してしまったので切断する
c.errLog().Msg("PongTimeout")
break loop
}
case rawMessage, ok := <-messageChannel:
// message チャンネルが閉じられた、main 終了待ち
if !ok {
c.debugLog().Msg("CLOSED-MESSAGE-CHANNEL")
// メッセージチャネルが閉じてるので return でもう抜けてしまう
return
}
if err := c.handleWsMessage(rawMessage, pongTimeoutTimer); err != nil {
// ここのエラーのログはすでに handleWsMessage でとってあるので不要
// エラーになったら抜ける
break loop
}
case forward, ok := <-c.forwardChannel:
if !ok {
// server 側で forwardChannel を閉じた
c.debugLog().Msg("UNREGISTERED")
if !c.standalone {
if err := c.sendByeMessage(); err != nil {
c.errLog().Err(err).Msg("FailedSendByeMessage")
// 送れなかったら閉じるメッセージも送れないので return
return
}
c.debugLog().Msg("SENT-BYE-MESSAGE")
}
break loop
}
if err := c.wsConn.WriteMessage(websocket.TextMessage, forward.rawMessage); err != nil {
c.errLog().Err(err).Msg("FailedWriteMessage")
// 送れなかったら閉じるメッセージも送れないので return
return
}
}
}
// こちらの都合で終了するので Websocket 終了のお知らせを送る
if err := c.sendCloseMessage(websocket.CloseNormalClosure, ""); err != nil {
c.debugLog().Err(err).Msg("FAILED-SEND-CLOSE-MESSAGE")
// 送れなかったら return する
return
}
c.debugLog().Msg("SENT-CLOSE-MESSAGE")
}
func (c *connection) wsRecv(ctx context.Context, messageChannel chan []byte) {
loop:
for {
readDeadline := time.Now().Add(time.Duration(readTimeout) * time.Second)
if err := c.wsConn.SetReadDeadline(readDeadline); err != nil {
c.errLog().Err(err).Msg("FailedSetReadDeadLine")
break loop
}
_, rawMessage, err := c.wsConn.ReadMessage()
if err != nil {
// ここに来るのはほぼ WebSocket が切断されたとき
c.debugLog().Err(err).Msg("WS-READ-MESSAGE-ERROR")
break loop
}
messageChannel <- rawMessage
}
close(messageChannel)
c.debugLog().Msg("CLOSE-MESSAGE-CHANNEL")
// メインが死ぬまで待つ
<-ctx.Done()
c.debugLog().Msg("EXITED-MAIN")
if !c.standalone {
c.closeWs()
}
c.debugLog().Msg("EXIT-WS-RECV")
if err := c.disconnectWebhook(); err != nil {
c.errLog().Err(err).Caller().Msg("DisconnectWebhookError")
return
}
}
// メッセージ系のエラーログはここですべて取る
func (c *connection) handleWsMessage(rawMessage []byte, pongTimeoutTimer *time.Timer) error {
message := &message{}
if err := json.Unmarshal(rawMessage, &message); err != nil {
c.errLog().Err(err).Bytes("rawMessage", rawMessage).Msg("InvalidJSON")
return errInvalidJSON
}
if message == nil {
c.errLog().Bytes("rawMessage", rawMessage).Msg("UnexpectedJSON")
return errUnexpectedJSON
}
// 受信したメッセージで message type がパースできたものをログとして保存する
c.signalingLog(*message, rawMessage)
switch message.Type {
case "pong":
timerStop(pongTimeoutTimer)
pongTimeoutTimer.Reset(pongTimeout * time.Second)
case "register":
// すでに登録されているのにもう一度登録しに来た
if c.registered {
c.errLog().Bytes("rawMessage", rawMessage).Msg("InternalServer")
return errInternalServer
}
c.ID = getULID()
registerMessage := ®isterMessage{}
if err := json.Unmarshal(rawMessage, ®isterMessage); err != nil {
c.errLog().Err(err).Bytes("rawMessage", rawMessage).Msg("InvalidRegisterMessageJSON")
return errInvalidJSON
}
if registerMessage.RoomID == "" {
c.errLog().Bytes("rawMessage", rawMessage).Msg("MissingRoomID")
return errMissingRoomID
}
c.roomID = registerMessage.RoomID
c.clientID = registerMessage.ClientID
if registerMessage.ClientID == "" {
c.clientID = c.ID
}
// 下位互換性
if registerMessage.Key != nil {
c.signalingKey = registerMessage.Key
}
if registerMessage.SignalingKey != nil {
c.signalingKey = registerMessage.SignalingKey
}
c.authnMetadata = registerMessage.AuthnMetadata
c.standalone = registerMessage.Standalone
// クライアント情報の登録
c.ayameClient = registerMessage.AyameClient
c.environment = registerMessage.Environment
c.libwebrtc = registerMessage.Libwebrtc
// Webhook 系のエラーログは Caller をつける
resp, err := c.authnWebhook()
if err != nil {
c.errLog().Err(err).Caller().Msg("AuthnWebhookError")
if err := c.sendRejectMessage("InternalServerError"); err != nil { | }
// 認証サーバの戻り値がおかしい場合は全部 Error にする
if resp.Allowed == nil {
c.errLog().Caller().Msg("AuthnWebhookResponseError")
if err := c.sendRejectMessage("InternalServerError"); err != nil {
c.errLog().Err(err).Caller().Msg("FailedSendRejectMessage")
return err
}
return errAuthnWebhookResponse
}
if !*resp.Allowed {
if resp.Reason == nil {
c.errLog().Caller().Msg("AuthnWebhookResponseError")
if err := c.sendRejectMessage("InternalServerError"); err != nil {
c.errLog().Err(err).Caller().Msg("FailedSendRejectMessage")
return err
}
return errAuthnWebhookResponse
}
if err := c.sendRejectMessage(*resp.Reason); err != nil {
c.errLog().Err(err).Caller().Msg("FailedSendRejectMessage")
return err
}
return errAuthnWebhookReject
}
c.authzMetadata = resp.AuthzMetadata
// 戻り値は手抜き
switch c.register() {
case one:
c.registered = true
// room がまだなかった、accept を返す
c.debugLog().Msg("REGISTERED-ONE")
if err := c.sendAcceptMessage(false, resp.IceServers, resp.AuthzMetadata); err != nil {
c.errLog().Err(err).Msg("FailedSendAcceptMessage")
return err
}
case two:
c.registered = true
// room がすでにあって、一人いた、二人目
c.debugLog().Msg("REGISTERED-TWO")
if err := c.sendAcceptMessage(true, resp.IceServers, resp.AuthzMetadata); err != nil {
c.errLog().Err(err).Msg("FailedSendAcceptMessage")
return err
}
case full:
// room が満杯だった
c.errLog().Msg("RoomFilled")
if err := c.sendRejectMessage("full"); err != nil {
c.errLog().Err(err).Msg("FailedSendRejectMessage")
return err
}
return errRoomFull
}
case "offer", "answer", "candidate":
// register が完了していない
if !c.registered {
c.errLog().Msg("RegistrationIncomplete")
return errRegistrationIncomplete
}
c.forward(rawMessage)
case "connected":
// register が完了していない
if !c.registered {
c.errLog().Msg("RegistrationIncomplete")
return errRegistrationIncomplete
}
// TODO: c.standalone == false で type: connected を受信した場合はエラーにするか検討する
if c.standalone {
err := fmt.Errorf("WS-CONNECTED")
c.errLog().Err(err).Send()
return err
}
default:
c.errLog().Msg("InvalidMessageType")
return errInvalidMessageType
}
return nil
}
func timerStop(timer *time.Timer) {
// タイマー終了からのリセットへは以下参考にした
// https://www.kaoriya.net/blog/2019/12/19/
if !timer.Stop() {
select {
case <-timer.C:
default:
}
}
}
func getULID() string {
t := time.Now()
entropy := ulid.Monotonic(rand.New(rand.NewSource(t.UnixNano())), 0)
return ulid.MustNew(ulid.Timestamp(t), entropy).String()
} | c.errLog().Err(err).Caller().Msg("FailedSendRejectMessage")
return err
}
return err | random_line_split |
connection.go | package ayame
import (
"context"
"encoding/json"
"fmt"
"math/rand"
"time"
"github.com/oklog/ulid/v2"
"github.com/rs/zerolog"
"github.com/shiguredo/websocket"
)
type connection struct {
ID string
roomID string
clientID string
authnMetadata *interface{}
signalingKey *string
// クライアント情報
ayameClient *string
environment *string
libwebrtc *string
authzMetadata *interface{}
// WebSocket コネクション
wsConn *websocket.Conn
// レジスターされているかどうか
registered bool
// 転送用のチャネル
forwardChannel chan forward
// config
config Config
signalingLogger zerolog.Logger
webhookLogger zerolog.Logger
// standalone mode
standalone bool
metrics *Metrics
}
const (
// socket の待ち受け時間
readTimeout = 90
// pong が送られてこないためタイムアウトにするまでの時間
pongTimeout = 60
// ping 送信の時間間隔
pingInterval = 5
)
func (c *connection) SendJSON(v interface{}) error {
if err := c.wsConn.WriteJSON(v); err != nil {
c.errLog().Err(err).Interface("msg", v).Msg("FailedToSendMsg")
re |
}
return nil
}
func (c *connection) sendPingMessage() error {
msg := &pingMessage{
Type: "ping",
}
if err := c.SendJSON(msg); err != nil {
return err
}
return nil
}
// reason の長さが不十分そうな場合は CloseMessage ではなく TextMessage を使用するように変更する
func (c *connection) sendCloseMessage(code int, reason string) error {
deadline := time.Now().Add(writeWait)
closeMessage := websocket.FormatCloseMessage(code, reason)
return c.wsConn.WriteControl(websocket.CloseMessage, closeMessage, deadline)
}
func (c *connection) sendAcceptMessage(isExistClient bool, iceServers *[]iceServer, authzMetadata *interface{}) error {
msg := &acceptMessage{
Type: "accept",
ConnectionID: c.ID,
IsExistClient: isExistClient,
// 下位互換性
IsExistUser: isExistClient,
AuthzMetadata: authzMetadata,
IceServers: iceServers,
}
if err := c.SendJSON(msg); err != nil {
return err
}
return nil
}
func (c *connection) sendRejectMessage(reason string) error {
msg := &rejectMessage{
Type: "reject",
Reason: reason,
}
if err := c.SendJSON(msg); err != nil {
return err
}
return nil
}
func (c *connection) sendByeMessage() error {
msg := &byeMessage{
Type: "bye",
}
if err := c.SendJSON(msg); err != nil {
return err
}
return nil
}
func (c *connection) closeWs() {
c.wsConn.Close()
c.debugLog().Msg("CLOSED-WS")
}
func (c *connection) register() int {
resultChannel := make(chan int)
registerChannel <- ®ister{
connection: c,
resultChannel: resultChannel,
}
// ここはブロックする candidate とかを並列で来てるかもしれないが知らん
result := <-resultChannel
// もう server で触ることはないのでここで閉じる
close(resultChannel)
return result
}
func (c *connection) unregister() {
if c.registered {
unregisterChannel <- &unregister{
connection: c,
}
}
}
func (c *connection) forward(msg []byte) {
// グローバルにあるチャンネルに対して投げ込む
forwardChannel <- forward{
connection: c,
rawMessage: msg,
}
}
func (c *connection) main(cancel context.CancelFunc, messageChannel chan []byte) {
pongTimeoutTimer := time.NewTimer(pongTimeout * time.Second)
pingTimer := time.NewTimer(pingInterval * time.Second)
defer func() {
timerStop(pongTimeoutTimer)
timerStop(pingTimer)
// キャンセルを呼ぶ
cancel()
c.debugLog().Msg("CANCEL")
// アンレジはここでやる
c.unregister()
c.debugLog().Msg("UNREGISTER")
c.debugLog().Msg("EXIT-MAIN")
}()
loop:
for {
select {
case <-pingTimer.C:
if !c.standalone {
if err := c.sendPingMessage(); err != nil {
break loop
}
}
pingTimer.Reset(pingInterval * time.Second)
case <-pongTimeoutTimer.C:
if !c.standalone {
// タイマーが発火してしまったので切断する
c.errLog().Msg("PongTimeout")
break loop
}
case rawMessage, ok := <-messageChannel:
// message チャンネルが閉じられた、main 終了待ち
if !ok {
c.debugLog().Msg("CLOSED-MESSAGE-CHANNEL")
// メッセージチャネルが閉じてるので return でもう抜けてしまう
return
}
if err := c.handleWsMessage(rawMessage, pongTimeoutTimer); err != nil {
// ここのエラーのログはすでに handleWsMessage でとってあるので不要
// エラーになったら抜ける
break loop
}
case forward, ok := <-c.forwardChannel:
if !ok {
// server 側で forwardChannel を閉じた
c.debugLog().Msg("UNREGISTERED")
if !c.standalone {
if err := c.sendByeMessage(); err != nil {
c.errLog().Err(err).Msg("FailedSendByeMessage")
// 送れなかったら閉じるメッセージも送れないので return
return
}
c.debugLog().Msg("SENT-BYE-MESSAGE")
}
break loop
}
if err := c.wsConn.WriteMessage(websocket.TextMessage, forward.rawMessage); err != nil {
c.errLog().Err(err).Msg("FailedWriteMessage")
// 送れなかったら閉じるメッセージも送れないので return
return
}
}
}
// こちらの都合で終了するので Websocket 終了のお知らせを送る
if err := c.sendCloseMessage(websocket.CloseNormalClosure, ""); err != nil {
c.debugLog().Err(err).Msg("FAILED-SEND-CLOSE-MESSAGE")
// 送れなかったら return する
return
}
c.debugLog().Msg("SENT-CLOSE-MESSAGE")
}
func (c *connection) wsRecv(ctx context.Context, messageChannel chan []byte) {
loop:
for {
readDeadline := time.Now().Add(time.Duration(readTimeout) * time.Second)
if err := c.wsConn.SetReadDeadline(readDeadline); err != nil {
c.errLog().Err(err).Msg("FailedSetReadDeadLine")
break loop
}
_, rawMessage, err := c.wsConn.ReadMessage()
if err != nil {
// ここに来るのはほぼ WebSocket が切断されたとき
c.debugLog().Err(err).Msg("WS-READ-MESSAGE-ERROR")
break loop
}
messageChannel <- rawMessage
}
close(messageChannel)
c.debugLog().Msg("CLOSE-MESSAGE-CHANNEL")
// メインが死ぬまで待つ
<-ctx.Done()
c.debugLog().Msg("EXITED-MAIN")
if !c.standalone {
c.closeWs()
}
c.debugLog().Msg("EXIT-WS-RECV")
if err := c.disconnectWebhook(); err != nil {
c.errLog().Err(err).Caller().Msg("DisconnectWebhookError")
return
}
}
// メッセージ系のエラーログはここですべて取る
func (c *connection) handleWsMessage(rawMessage []byte, pongTimeoutTimer *time.Timer) error {
message := &message{}
if err := json.Unmarshal(rawMessage, &message); err != nil {
c.errLog().Err(err).Bytes("rawMessage", rawMessage).Msg("InvalidJSON")
return errInvalidJSON
}
if message == nil {
c.errLog().Bytes("rawMessage", rawMessage).Msg("UnexpectedJSON")
return errUnexpectedJSON
}
// 受信したメッセージで message type がパースできたものをログとして保存する
c.signalingLog(*message, rawMessage)
switch message.Type {
case "pong":
timerStop(pongTimeoutTimer)
pongTimeoutTimer.Reset(pongTimeout * time.Second)
case "register":
// すでに登録されているのにもう一度登録しに来た
if c.registered {
c.errLog().Bytes("rawMessage", rawMessage).Msg("InternalServer")
return errInternalServer
}
c.ID = getULID()
registerMessage := ®isterMessage{}
if err := json.Unmarshal(rawMessage, ®isterMessage); err != nil {
c.errLog().Err(err).Bytes("rawMessage", rawMessage).Msg("InvalidRegisterMessageJSON")
return errInvalidJSON
}
if registerMessage.RoomID == "" {
c.errLog().Bytes("rawMessage", rawMessage).Msg("MissingRoomID")
return errMissingRoomID
}
c.roomID = registerMessage.RoomID
c.clientID = registerMessage.ClientID
if registerMessage.ClientID == "" {
c.clientID = c.ID
}
// 下位互換性
if registerMessage.Key != nil {
c.signalingKey = registerMessage.Key
}
if registerMessage.SignalingKey != nil {
c.signalingKey = registerMessage.SignalingKey
}
c.authnMetadata = registerMessage.AuthnMetadata
c.standalone = registerMessage.Standalone
// クライアント情報の登録
c.ayameClient = registerMessage.AyameClient
c.environment = registerMessage.Environment
c.libwebrtc = registerMessage.Libwebrtc
// Webhook 系のエラーログは Caller をつける
resp, err := c.authnWebhook()
if err != nil {
c.errLog().Err(err).Caller().Msg("AuthnWebhookError")
if err := c.sendRejectMessage("InternalServerError"); err != nil {
c.errLog().Err(err).Caller().Msg("FailedSendRejectMessage")
return err
}
return err
}
// 認証サーバの戻り値がおかしい場合は全部 Error にする
if resp.Allowed == nil {
c.errLog().Caller().Msg("AuthnWebhookResponseError")
if err := c.sendRejectMessage("InternalServerError"); err != nil {
c.errLog().Err(err).Caller().Msg("FailedSendRejectMessage")
return err
}
return errAuthnWebhookResponse
}
if !*resp.Allowed {
if resp.Reason == nil {
c.errLog().Caller().Msg("AuthnWebhookResponseError")
if err := c.sendRejectMessage("InternalServerError"); err != nil {
c.errLog().Err(err).Caller().Msg("FailedSendRejectMessage")
return err
}
return errAuthnWebhookResponse
}
if err := c.sendRejectMessage(*resp.Reason); err != nil {
c.errLog().Err(err).Caller().Msg("FailedSendRejectMessage")
return err
}
return errAuthnWebhookReject
}
c.authzMetadata = resp.AuthzMetadata
// 戻り値は手抜き
switch c.register() {
case one:
c.registered = true
// room がまだなかった、accept を返す
c.debugLog().Msg("REGISTERED-ONE")
if err := c.sendAcceptMessage(false, resp.IceServers, resp.AuthzMetadata); err != nil {
c.errLog().Err(err).Msg("FailedSendAcceptMessage")
return err
}
case two:
c.registered = true
// room がすでにあって、一人いた、二人目
c.debugLog().Msg("REGISTERED-TWO")
if err := c.sendAcceptMessage(true, resp.IceServers, resp.AuthzMetadata); err != nil {
c.errLog().Err(err).Msg("FailedSendAcceptMessage")
return err
}
case full:
// room が満杯だった
c.errLog().Msg("RoomFilled")
if err := c.sendRejectMessage("full"); err != nil {
c.errLog().Err(err).Msg("FailedSendRejectMessage")
return err
}
return errRoomFull
}
case "offer", "answer", "candidate":
// register が完了していない
if !c.registered {
c.errLog().Msg("RegistrationIncomplete")
return errRegistrationIncomplete
}
c.forward(rawMessage)
case "connected":
// register が完了していない
if !c.registered {
c.errLog().Msg("RegistrationIncomplete")
return errRegistrationIncomplete
}
// TODO: c.standalone == false で type: connected を受信した場合はエラーにするか検討する
if c.standalone {
err := fmt.Errorf("WS-CONNECTED")
c.errLog().Err(err).Send()
return err
}
default:
c.errLog().Msg("InvalidMessageType")
return errInvalidMessageType
}
return nil
}
func timerStop(timer *time.Timer) {
// タイマー終了からのリセットへは以下参考にした
// https://www.kaoriya.net/blog/2019/12/19/
if !timer.Stop() {
select {
case <-timer.C:
default:
}
}
}
func getULID() string {
t := time.Now()
entropy := ulid.Monotonic(rand.New(rand.NewSource(t.UnixNano())), 0)
return ulid.MustNew(ulid.Timestamp(t), entropy).String()
}
| turn err | identifier_name |
connection.go | package ayame
import (
"context"
"encoding/json"
"fmt"
"math/rand"
"time"
"github.com/oklog/ulid/v2"
"github.com/rs/zerolog"
"github.com/shiguredo/websocket"
)
type connection struct {
ID string
roomID string
clientID string
authnMetadata *interface{}
signalingKey *string
// クライアント情報
ayameClient *string
environment *string
libwebrtc *string
authzMetadata *interface{}
// WebSocket コネクション
wsConn *websocket.Conn
// レジスターされているかどうか
registered bool
// 転送用のチャネル
forwardChannel chan forward
// config
config Config
signalingLogger zerolog.Logger
webhookLogger zerolog.Logger
// standalone mode
standalone bool
metrics *Metrics
}
const (
// socket の待ち受け時間
readTimeout = 90
// pong が送られてこないためタイムアウトにするまでの時間
pongTimeout = 60
// ping 送信の時間間隔
pingInterval = 5
)
func (c *connection) SendJSON(v interface{}) error {
if err := c.wsConn.WriteJSON(v); err != nil {
c.errLog().Err(err).Interface("msg", v).Msg("FailedToSendMsg")
return err
}
return nil
}
fun |
return nil
}
// reason の長さが不十分そうな場合は CloseMessage ではなく TextMessage を使用するように変更する
func (c *connection) sendCloseMessage(code int, reason string) error {
deadline := time.Now().Add(writeWait)
closeMessage := websocket.FormatCloseMessage(code, reason)
return c.wsConn.WriteControl(websocket.CloseMessage, closeMessage, deadline)
}
func (c *connection) sendAcceptMessage(isExistClient bool, iceServers *[]iceServer, authzMetadata *interface{}) error {
msg := &acceptMessage{
Type: "accept",
ConnectionID: c.ID,
IsExistClient: isExistClient,
// 下位互換性
IsExistUser: isExistClient,
AuthzMetadata: authzMetadata,
IceServers: iceServers,
}
if err := c.SendJSON(msg); err != nil {
return err
}
return nil
}
func (c *connection) sendRejectMessage(reason string) error {
msg := &rejectMessage{
Type: "reject",
Reason: reason,
}
if err := c.SendJSON(msg); err != nil {
return err
}
return nil
}
func (c *connection) sendByeMessage() error {
msg := &byeMessage{
Type: "bye",
}
if err := c.SendJSON(msg); err != nil {
return err
}
return nil
}
func (c *connection) closeWs() {
c.wsConn.Close()
c.debugLog().Msg("CLOSED-WS")
}
func (c *connection) register() int {
resultChannel := make(chan int)
registerChannel <- ®ister{
connection: c,
resultChannel: resultChannel,
}
// ここはブロックする candidate とかを並列で来てるかもしれないが知らん
result := <-resultChannel
// もう server で触ることはないのでここで閉じる
close(resultChannel)
return result
}
func (c *connection) unregister() {
if c.registered {
unregisterChannel <- &unregister{
connection: c,
}
}
}
func (c *connection) forward(msg []byte) {
// グローバルにあるチャンネルに対して投げ込む
forwardChannel <- forward{
connection: c,
rawMessage: msg,
}
}
func (c *connection) main(cancel context.CancelFunc, messageChannel chan []byte) {
pongTimeoutTimer := time.NewTimer(pongTimeout * time.Second)
pingTimer := time.NewTimer(pingInterval * time.Second)
defer func() {
timerStop(pongTimeoutTimer)
timerStop(pingTimer)
// キャンセルを呼ぶ
cancel()
c.debugLog().Msg("CANCEL")
// アンレジはここでやる
c.unregister()
c.debugLog().Msg("UNREGISTER")
c.debugLog().Msg("EXIT-MAIN")
}()
loop:
for {
select {
case <-pingTimer.C:
if !c.standalone {
if err := c.sendPingMessage(); err != nil {
break loop
}
}
pingTimer.Reset(pingInterval * time.Second)
case <-pongTimeoutTimer.C:
if !c.standalone {
// タイマーが発火してしまったので切断する
c.errLog().Msg("PongTimeout")
break loop
}
case rawMessage, ok := <-messageChannel:
// message チャンネルが閉じられた、main 終了待ち
if !ok {
c.debugLog().Msg("CLOSED-MESSAGE-CHANNEL")
// メッセージチャネルが閉じてるので return でもう抜けてしまう
return
}
if err := c.handleWsMessage(rawMessage, pongTimeoutTimer); err != nil {
// ここのエラーのログはすでに handleWsMessage でとってあるので不要
// エラーになったら抜ける
break loop
}
case forward, ok := <-c.forwardChannel:
if !ok {
// server 側で forwardChannel を閉じた
c.debugLog().Msg("UNREGISTERED")
if !c.standalone {
if err := c.sendByeMessage(); err != nil {
c.errLog().Err(err).Msg("FailedSendByeMessage")
// 送れなかったら閉じるメッセージも送れないので return
return
}
c.debugLog().Msg("SENT-BYE-MESSAGE")
}
break loop
}
if err := c.wsConn.WriteMessage(websocket.TextMessage, forward.rawMessage); err != nil {
c.errLog().Err(err).Msg("FailedWriteMessage")
// 送れなかったら閉じるメッセージも送れないので return
return
}
}
}
// こちらの都合で終了するので Websocket 終了のお知らせを送る
if err := c.sendCloseMessage(websocket.CloseNormalClosure, ""); err != nil {
c.debugLog().Err(err).Msg("FAILED-SEND-CLOSE-MESSAGE")
// 送れなかったら return する
return
}
c.debugLog().Msg("SENT-CLOSE-MESSAGE")
}
func (c *connection) wsRecv(ctx context.Context, messageChannel chan []byte) {
loop:
for {
readDeadline := time.Now().Add(time.Duration(readTimeout) * time.Second)
if err := c.wsConn.SetReadDeadline(readDeadline); err != nil {
c.errLog().Err(err).Msg("FailedSetReadDeadLine")
break loop
}
_, rawMessage, err := c.wsConn.ReadMessage()
if err != nil {
// ここに来るのはほぼ WebSocket が切断されたとき
c.debugLog().Err(err).Msg("WS-READ-MESSAGE-ERROR")
break loop
}
messageChannel <- rawMessage
}
close(messageChannel)
c.debugLog().Msg("CLOSE-MESSAGE-CHANNEL")
// メインが死ぬまで待つ
<-ctx.Done()
c.debugLog().Msg("EXITED-MAIN")
if !c.standalone {
c.closeWs()
}
c.debugLog().Msg("EXIT-WS-RECV")
if err := c.disconnectWebhook(); err != nil {
c.errLog().Err(err).Caller().Msg("DisconnectWebhookError")
return
}
}
// メッセージ系のエラーログはここですべて取る
func (c *connection) handleWsMessage(rawMessage []byte, pongTimeoutTimer *time.Timer) error {
message := &message{}
if err := json.Unmarshal(rawMessage, &message); err != nil {
c.errLog().Err(err).Bytes("rawMessage", rawMessage).Msg("InvalidJSON")
return errInvalidJSON
}
if message == nil {
c.errLog().Bytes("rawMessage", rawMessage).Msg("UnexpectedJSON")
return errUnexpectedJSON
}
// 受信したメッセージで message type がパースできたものをログとして保存する
c.signalingLog(*message, rawMessage)
switch message.Type {
case "pong":
timerStop(pongTimeoutTimer)
pongTimeoutTimer.Reset(pongTimeout * time.Second)
case "register":
// すでに登録されているのにもう一度登録しに来た
if c.registered {
c.errLog().Bytes("rawMessage", rawMessage).Msg("InternalServer")
return errInternalServer
}
c.ID = getULID()
registerMessage := ®isterMessage{}
if err := json.Unmarshal(rawMessage, ®isterMessage); err != nil {
c.errLog().Err(err).Bytes("rawMessage", rawMessage).Msg("InvalidRegisterMessageJSON")
return errInvalidJSON
}
if registerMessage.RoomID == "" {
c.errLog().Bytes("rawMessage", rawMessage).Msg("MissingRoomID")
return errMissingRoomID
}
c.roomID = registerMessage.RoomID
c.clientID = registerMessage.ClientID
if registerMessage.ClientID == "" {
c.clientID = c.ID
}
// 下位互換性
if registerMessage.Key != nil {
c.signalingKey = registerMessage.Key
}
if registerMessage.SignalingKey != nil {
c.signalingKey = registerMessage.SignalingKey
}
c.authnMetadata = registerMessage.AuthnMetadata
c.standalone = registerMessage.Standalone
// クライアント情報の登録
c.ayameClient = registerMessage.AyameClient
c.environment = registerMessage.Environment
c.libwebrtc = registerMessage.Libwebrtc
// Webhook 系のエラーログは Caller をつける
resp, err := c.authnWebhook()
if err != nil {
c.errLog().Err(err).Caller().Msg("AuthnWebhookError")
if err := c.sendRejectMessage("InternalServerError"); err != nil {
c.errLog().Err(err).Caller().Msg("FailedSendRejectMessage")
return err
}
return err
}
// 認証サーバの戻り値がおかしい場合は全部 Error にする
if resp.Allowed == nil {
c.errLog().Caller().Msg("AuthnWebhookResponseError")
if err := c.sendRejectMessage("InternalServerError"); err != nil {
c.errLog().Err(err).Caller().Msg("FailedSendRejectMessage")
return err
}
return errAuthnWebhookResponse
}
if !*resp.Allowed {
if resp.Reason == nil {
c.errLog().Caller().Msg("AuthnWebhookResponseError")
if err := c.sendRejectMessage("InternalServerError"); err != nil {
c.errLog().Err(err).Caller().Msg("FailedSendRejectMessage")
return err
}
return errAuthnWebhookResponse
}
if err := c.sendRejectMessage(*resp.Reason); err != nil {
c.errLog().Err(err).Caller().Msg("FailedSendRejectMessage")
return err
}
return errAuthnWebhookReject
}
c.authzMetadata = resp.AuthzMetadata
// 戻り値は手抜き
switch c.register() {
case one:
c.registered = true
// room がまだなかった、accept を返す
c.debugLog().Msg("REGISTERED-ONE")
if err := c.sendAcceptMessage(false, resp.IceServers, resp.AuthzMetadata); err != nil {
c.errLog().Err(err).Msg("FailedSendAcceptMessage")
return err
}
case two:
c.registered = true
// room がすでにあって、一人いた、二人目
c.debugLog().Msg("REGISTERED-TWO")
if err := c.sendAcceptMessage(true, resp.IceServers, resp.AuthzMetadata); err != nil {
c.errLog().Err(err).Msg("FailedSendAcceptMessage")
return err
}
case full:
// room が満杯だった
c.errLog().Msg("RoomFilled")
if err := c.sendRejectMessage("full"); err != nil {
c.errLog().Err(err).Msg("FailedSendRejectMessage")
return err
}
return errRoomFull
}
case "offer", "answer", "candidate":
// register が完了していない
if !c.registered {
c.errLog().Msg("RegistrationIncomplete")
return errRegistrationIncomplete
}
c.forward(rawMessage)
case "connected":
// register が完了していない
if !c.registered {
c.errLog().Msg("RegistrationIncomplete")
return errRegistrationIncomplete
}
// TODO: c.standalone == false で type: connected を受信した場合はエラーにするか検討する
if c.standalone {
err := fmt.Errorf("WS-CONNECTED")
c.errLog().Err(err).Send()
return err
}
default:
c.errLog().Msg("InvalidMessageType")
return errInvalidMessageType
}
return nil
}
func timerStop(timer *time.Timer) {
// タイマー終了からのリセットへは以下参考にした
// https://www.kaoriya.net/blog/2019/12/19/
if !timer.Stop() {
select {
case <-timer.C:
default:
}
}
}
func getULID() string {
t := time.Now()
entropy := ulid.Monotonic(rand.New(rand.NewSource(t.UnixNano())), 0)
return ulid.MustNew(ulid.Timestamp(t), entropy).String()
}
| c (c *connection) sendPingMessage() error {
msg := &pingMessage{
Type: "ping",
}
if err := c.SendJSON(msg); err != nil {
return err
}
| identifier_body |
connection.go | package ayame
import (
"context"
"encoding/json"
"fmt"
"math/rand"
"time"
"github.com/oklog/ulid/v2"
"github.com/rs/zerolog"
"github.com/shiguredo/websocket"
)
type connection struct {
ID string
roomID string
clientID string
authnMetadata *interface{}
signalingKey *string
// クライアント情報
ayameClient *string
environment *string
libwebrtc *string
authzMetadata *interface{}
// WebSocket コネクション
wsConn *websocket.Conn
// レジスターされているかどうか
registered bool
// 転送用のチャネル
forwardChannel chan forward
// config
config Config
signalingLogger zerolog.Logger
webhookLogger zerolog.Logger
// standalone mode
standalone bool
metrics *Metrics
}
const (
// socket の待ち受け時間
readTimeout = 90
// pong が送られてこないためタイムアウトにするまでの時間
pongTimeout = 60
// ping 送信の時間間隔
pingInterval = 5
)
func (c *connection) SendJSON(v interface{}) error {
if err := c.wsConn.WriteJSON(v); err != nil {
c.errLog().Err(err).Interface("msg", v).Msg("FailedToSendMsg")
return err
}
return nil
}
func (c *connection) sendPingMessage() error {
msg := &pingMessage{
Type: "ping",
}
if err := c.SendJSON(msg); err != nil {
return err
}
return nil
}
// reason の長さが不十分そうな場合は CloseMessage ではなく TextMessage を使用するように変更する
func (c *connection) sendCloseMessage(code int, reason string) error {
deadline := time.Now().Add(writeWait)
closeMessage := websocket.FormatCloseMessage(code, reason)
return c.wsConn.WriteControl(websocket.CloseMessage, closeMessage, deadline)
}
func (c *connection) sendAcceptMessage(isExistClient bool, iceServers *[]iceServer, authzMetadata *interface{}) error {
msg := &acceptMessage{
Type: "accept",
ConnectionID: c.ID,
IsExistClient: isExistClient,
// 下位互換性
IsExistUser: isExistClient,
AuthzMetadata: authzMetadata,
IceServers: iceServers,
}
if err := c.SendJSON(msg); err != nil {
return err
}
return nil
}
func (c *connection) sendRejectMessage(reason string) error {
msg := &rejectMessage{
Type: "reject",
Reason: reason,
}
if err := c.SendJSON(msg); err != nil {
return err
}
return nil
}
func (c *connection) sendByeMessage() error {
msg := &byeMessage{
Type: "bye",
}
if err := c.SendJSON(msg); err != nil {
return err
}
return nil
}
func (c *connection) closeWs() {
c.wsConn.Close()
c.debugLog().Msg("CLOSED-WS")
}
func (c *connection) register() int {
resultChannel := make(chan int)
registerChannel <- ®ister{
connection: c,
resultChannel: resultChannel,
}
// ここはブロックする candidate とかを並列で来てるかもしれないが知らん
result := <-resultChannel
// もう server で触ることはないのでここで閉じる
close(resultChannel)
return result
}
func (c *connection) unregister() {
if c.registered {
unregisterChannel <- &unregister{
connection: c,
}
}
}
func (c *connection) forward(msg []byte) {
// グローバルにあるチャンネルに対して投げ込む
forwardChannel <- forward{
connection: c,
rawMessage: msg,
}
}
func (c *connection) main(cancel context.CancelFunc, messageChannel chan []byte) {
pongTimeoutTimer := time.NewTimer(pongTimeout * time.Second)
pingTimer := time.NewTimer(pingInterval * time.Second)
defer func() {
timerStop(pongTimeoutTimer)
timerStop(pingTimer)
// キャンセルを呼ぶ
cancel()
c.debugLog().Msg("CANCEL")
// アンレジはここでやる
c.unregister()
c.debugLog().Msg("UNREGISTER")
c.debugLog().Msg("EXIT-MAIN")
}()
loop:
for {
select {
case <-pingTimer.C:
if !c.standalone {
if err := c.sendPingMessage(); err != nil {
break loop
}
}
pingTimer.Reset(pingInterval * time.Second)
case <-pongTimeoutTimer.C:
if !c.standalone {
// タイマーが発火してしまったので切断する
c.errLog().Msg("PongTimeout")
break loop
}
case rawMessage, ok := <-messageChannel:
// message チャンネルが閉じられた、main 終了待ち
if !ok {
c.debugLog().Msg("CLOSED-MESSAGE-CHANNEL")
// メッセージチャネルが閉じてるので return でもう抜けてしまう
return
}
if err := c.handleWsMessage(rawMessage, pongTimeoutTimer); err != nil {
// ここのエラーのログはすでに handleWsMessage でとってあるので不要
// エラーになったら抜ける
break loop
}
case forward, ok := <-c.forwardChannel:
if !ok {
// server 側で forwardChannel を閉じた
c.debugLog().Msg("UNREGISTERED")
if !c.standalone {
if err := c.sendByeMessage(); er | -MESSAGE")
}
break loop
}
if err := c.wsConn.WriteMessage(websocket.TextMessage, forward.rawMessage); err != nil {
c.errLog().Err(err).Msg("FailedWriteMessage")
// 送れなかったら閉じるメッセージも送れないので return
return
}
}
}
// こちらの都合で終了するので Websocket 終了のお知らせを送る
if err := c.sendCloseMessage(websocket.CloseNormalClosure, ""); err != nil {
c.debugLog().Err(err).Msg("FAILED-SEND-CLOSE-MESSAGE")
// 送れなかったら return する
return
}
c.debugLog().Msg("SENT-CLOSE-MESSAGE")
}
func (c *connection) wsRecv(ctx context.Context, messageChannel chan []byte) {
loop:
for {
readDeadline := time.Now().Add(time.Duration(readTimeout) * time.Second)
if err := c.wsConn.SetReadDeadline(readDeadline); err != nil {
c.errLog().Err(err).Msg("FailedSetReadDeadLine")
break loop
}
_, rawMessage, err := c.wsConn.ReadMessage()
if err != nil {
// ここに来るのはほぼ WebSocket が切断されたとき
c.debugLog().Err(err).Msg("WS-READ-MESSAGE-ERROR")
break loop
}
messageChannel <- rawMessage
}
close(messageChannel)
c.debugLog().Msg("CLOSE-MESSAGE-CHANNEL")
// メインが死ぬまで待つ
<-ctx.Done()
c.debugLog().Msg("EXITED-MAIN")
if !c.standalone {
c.closeWs()
}
c.debugLog().Msg("EXIT-WS-RECV")
if err := c.disconnectWebhook(); err != nil {
c.errLog().Err(err).Caller().Msg("DisconnectWebhookError")
return
}
}
// メッセージ系のエラーログはここですべて取る
func (c *connection) handleWsMessage(rawMessage []byte, pongTimeoutTimer *time.Timer) error {
message := &message{}
if err := json.Unmarshal(rawMessage, &message); err != nil {
c.errLog().Err(err).Bytes("rawMessage", rawMessage).Msg("InvalidJSON")
return errInvalidJSON
}
if message == nil {
c.errLog().Bytes("rawMessage", rawMessage).Msg("UnexpectedJSON")
return errUnexpectedJSON
}
// 受信したメッセージで message type がパースできたものをログとして保存する
c.signalingLog(*message, rawMessage)
switch message.Type {
case "pong":
timerStop(pongTimeoutTimer)
pongTimeoutTimer.Reset(pongTimeout * time.Second)
case "register":
// すでに登録されているのにもう一度登録しに来た
if c.registered {
c.errLog().Bytes("rawMessage", rawMessage).Msg("InternalServer")
return errInternalServer
}
c.ID = getULID()
registerMessage := ®isterMessage{}
if err := json.Unmarshal(rawMessage, ®isterMessage); err != nil {
c.errLog().Err(err).Bytes("rawMessage", rawMessage).Msg("InvalidRegisterMessageJSON")
return errInvalidJSON
}
if registerMessage.RoomID == "" {
c.errLog().Bytes("rawMessage", rawMessage).Msg("MissingRoomID")
return errMissingRoomID
}
c.roomID = registerMessage.RoomID
c.clientID = registerMessage.ClientID
if registerMessage.ClientID == "" {
c.clientID = c.ID
}
// 下位互換性
if registerMessage.Key != nil {
c.signalingKey = registerMessage.Key
}
if registerMessage.SignalingKey != nil {
c.signalingKey = registerMessage.SignalingKey
}
c.authnMetadata = registerMessage.AuthnMetadata
c.standalone = registerMessage.Standalone
// クライアント情報の登録
c.ayameClient = registerMessage.AyameClient
c.environment = registerMessage.Environment
c.libwebrtc = registerMessage.Libwebrtc
// Webhook 系のエラーログは Caller をつける
resp, err := c.authnWebhook()
if err != nil {
c.errLog().Err(err).Caller().Msg("AuthnWebhookError")
if err := c.sendRejectMessage("InternalServerError"); err != nil {
c.errLog().Err(err).Caller().Msg("FailedSendRejectMessage")
return err
}
return err
}
// 認証サーバの戻り値がおかしい場合は全部 Error にする
if resp.Allowed == nil {
c.errLog().Caller().Msg("AuthnWebhookResponseError")
if err := c.sendRejectMessage("InternalServerError"); err != nil {
c.errLog().Err(err).Caller().Msg("FailedSendRejectMessage")
return err
}
return errAuthnWebhookResponse
}
if !*resp.Allowed {
if resp.Reason == nil {
c.errLog().Caller().Msg("AuthnWebhookResponseError")
if err := c.sendRejectMessage("InternalServerError"); err != nil {
c.errLog().Err(err).Caller().Msg("FailedSendRejectMessage")
return err
}
return errAuthnWebhookResponse
}
if err := c.sendRejectMessage(*resp.Reason); err != nil {
c.errLog().Err(err).Caller().Msg("FailedSendRejectMessage")
return err
}
return errAuthnWebhookReject
}
c.authzMetadata = resp.AuthzMetadata
// 戻り値は手抜き
switch c.register() {
case one:
c.registered = true
// room がまだなかった、accept を返す
c.debugLog().Msg("REGISTERED-ONE")
if err := c.sendAcceptMessage(false, resp.IceServers, resp.AuthzMetadata); err != nil {
c.errLog().Err(err).Msg("FailedSendAcceptMessage")
return err
}
case two:
c.registered = true
// room がすでにあって、一人いた、二人目
c.debugLog().Msg("REGISTERED-TWO")
if err := c.sendAcceptMessage(true, resp.IceServers, resp.AuthzMetadata); err != nil {
c.errLog().Err(err).Msg("FailedSendAcceptMessage")
return err
}
case full:
// room が満杯だった
c.errLog().Msg("RoomFilled")
if err := c.sendRejectMessage("full"); err != nil {
c.errLog().Err(err).Msg("FailedSendRejectMessage")
return err
}
return errRoomFull
}
case "offer", "answer", "candidate":
// register が完了していない
if !c.registered {
c.errLog().Msg("RegistrationIncomplete")
return errRegistrationIncomplete
}
c.forward(rawMessage)
case "connected":
// register が完了していない
if !c.registered {
c.errLog().Msg("RegistrationIncomplete")
return errRegistrationIncomplete
}
// TODO: c.standalone == false で type: connected を受信した場合はエラーにするか検討する
if c.standalone {
err := fmt.Errorf("WS-CONNECTED")
c.errLog().Err(err).Send()
return err
}
default:
c.errLog().Msg("InvalidMessageType")
return errInvalidMessageType
}
return nil
}
func timerStop(timer *time.Timer) {
// タイマー終了からのリセットへは以下参考にした
// https://www.kaoriya.net/blog/2019/12/19/
if !timer.Stop() {
select {
case <-timer.C:
default:
}
}
}
func getULID() string {
t := time.Now()
entropy := ulid.Monotonic(rand.New(rand.NewSource(t.UnixNano())), 0)
return ulid.MustNew(ulid.Timestamp(t), entropy).String()
}
| r != nil {
c.errLog().Err(err).Msg("FailedSendByeMessage")
// 送れなかったら閉じるメッセージも送れないので return
return
}
c.debugLog().Msg("SENT-BYE | conditional_block |
images.go | // Copyright 2016 Mender Software AS
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package model
import (
"io"
"io/ioutil"
"time"
"github.com/mendersoftware/deployments/resources/images"
"github.com/mendersoftware/deployments/resources/images/controller"
"github.com/mendersoftware/mender-artifact/metadata"
"github.com/mendersoftware/mender-artifact/parser"
"github.com/mendersoftware/mender-artifact/reader"
"github.com/pkg/errors"
"github.com/satori/go.uuid"
)
const (
ImageContentType = "application/vnd.mender-artifact"
)
type ImagesModel struct {
fileStorage FileStorage
deployments ImageUsedIn
imagesStorage SoftwareImagesStorage
}
func NewImagesModel(
fileStorage FileStorage,
checker ImageUsedIn,
imagesStorage SoftwareImagesStorage,
) *ImagesModel {
return &ImagesModel{
fileStorage: fileStorage,
deployments: checker,
imagesStorage: imagesStorage,
}
}
// CreateImage parses artifact and uploads artifact file to the file storage - in parallel,
// and creates image structure in the system.
// Returns image ID and nil on success.
func (i *ImagesModel) CreateImage(
metaConstructor *images.SoftwareImageMetaConstructor,
imageReader io.Reader) (string, error) {
if metaConstructor == nil {
return "", controller.ErrModelMissingInputMetadata
}
if imageReader == nil {
return "", controller.ErrModelMissingInputArtifact
}
artifactID, err := i.handleArtifact(metaConstructor, imageReader)
// try to remove artifact file from file storage on error
if err != nil {
if cleanupErr := i.fileStorage.Delete(artifactID); cleanupErr != nil {
return "", errors.Wrap(err, cleanupErr.Error())
}
}
return artifactID, err
}
// handleArtifact parses artifact and uploads artifact file to the file storage - in parallel,
// and creates image structure in the system.
// Returns image ID, artifact file ID and nil on success.
func (i *ImagesModel) handleArtifact(
metaConstructor *images.SoftwareImageMetaConstructor,
imageReader io.Reader) (string, error) {
// limit just for safety
// max image size - 10G
const MaxImageSize = 1024 * 1024 * 1024 * 10
// create pipe
pR, pW := io.Pipe()
// limit reader to max image size
lr := io.LimitReader(imageReader, MaxImageSize)
tee := io.TeeReader(lr, pW)
artifactID := uuid.NewV4().String()
ch := make(chan error)
// create goroutine for artifact upload
//
// reading from the pipe (which is done in UploadArtifact method) is a blocking operation
// and cannot be done in the same goroutine as writing to the pipe
//
// uploading and parsing artifact in the same process will cause in a deadlock!
go func() {
err := i.fileStorage.UploadArtifact(artifactID, pR, ImageContentType)
if err != nil {
pR.CloseWithError(err)
}
ch <- err
}()
// parse artifact
// artifact library reads all the data from the given reader
metaArtifactConstructor, err := getMetaFromArchive(&tee, MaxImageSize)
if err != nil {
pW.Close()
if uploadResponseErr := <-ch; uploadResponseErr != nil {
return "", controller.ErrModelArtifactUploadFailed
}
return "", controller.ErrModelInvalidMetadata
}
// read the rest of the data,
// just in case the artifact library did not read all the data from the reader
_, err = io.Copy(ioutil.Discard, tee)
if err != nil {
pW.Close()
_ = <-ch
return "", err
}
// close the pipe
pW.Close()
// collect output from the goroutine
if uploadResponseErr := <-ch; uploadResponseErr != nil {
return "", uploadResponseErr
}
// validate artifact metadata
if err = metaArtifactConstructor.Validate(); err != nil {
return "", controller.ErrModelInvalidMetadata
}
// check if artifact is unique
// artifact is considered to be unique if there is no artifact with the same name
// and supporing the same platform in the system
isArtifactUnique, err := i.imagesStorage.IsArtifactUnique(
metaArtifactConstructor.ArtifactName, metaArtifactConstructor.DeviceTypesCompatible)
if err != nil {
return "", errors.Wrap(err, "Fail to check if artifact is unique")
}
if !isArtifactUnique {
return "", controller.ErrModelArtifactNotUnique
}
image := images.NewSoftwareImage(artifactID, metaConstructor, metaArtifactConstructor)
// save image structure in the system
if err = i.imagesStorage.Insert(image); err != nil {
return "", errors.Wrap(err, "Fail to store the metadata")
}
return artifactID, nil
}
// GetImage allows to fetch image obeject with specified id
// Nil if not found
func (i *ImagesModel) GetImage(id string) (*images.SoftwareImage, error) {
image, err := i.imagesStorage.FindByID(id)
if err != nil {
return nil, errors.Wrap(err, "Searching for image with specified ID")
}
if image == nil {
return nil, nil
}
return image, nil
}
// DeleteImage removes metadata and image file
// Noop for not exisitng images
// Allowed to remove image only if image is not scheduled or in progress for an updates - then image file is needed
// In case of already finished updates only image file is not needed, metadata is attached directly to device deployment
// therefore we still have some information about image that have been used (but not the file)
func (i *ImagesModel) DeleteImage(imageID string) error {
found, err := i.GetImage(imageID)
if err != nil {
return errors.Wrap(err, "Getting image metadata")
}
if found == nil {
return controller.ErrImageMetaNotFound
}
inUse, err := i.deployments.ImageUsedInActiveDeployment(imageID)
if err != nil {
return errors.Wrap(err, "Checking if image is used in active deployment")
}
// Image is in use, not allowed to delete
if inUse {
return controller.ErrModelImageInActiveDeployment
}
// Delete image file (call to external service)
// Noop for not existing file
if err := i.fileStorage.Delete(imageID); err != nil {
return errors.Wrap(err, "Deleting image file")
}
// Delete metadata
if err := i.imagesStorage.Delete(imageID); err != nil {
return errors.Wrap(err, "Deleting image metadata")
}
return nil
}
// ListImages according to specified filers.
func (i *ImagesModel) ListImages(filters map[string]string) ([]*images.SoftwareImage, error) {
imageList, err := i.imagesStorage.FindAll()
if err != nil {
return nil, errors.Wrap(err, "Searching for image metadata")
}
if imageList == nil {
return make([]*images.SoftwareImage, 0), nil
}
return imageList, nil
}
// EditObject allows editing only if image have not been used yet in any deployment.
func (i *ImagesModel) EditImage(imageID string, constructor *images.SoftwareImageMetaConstructor) (bool, error) {
if err := constructor.Validate(); err != nil {
return false, errors.Wrap(err, "Validating image metadata")
}
found, err := i.deployments.ImageUsedInDeployment(imageID)
if err != nil {
return false, errors.Wrap(err, "Searching for usage of the image among deployments")
}
if found {
return false, controller.ErrModelImageUsedInAnyDeployment
}
foundImage, err := i.imagesStorage.FindByID(imageID)
if err != nil {
return false, errors.Wrap(err, "Searching for image with specified ID")
}
if foundImage == nil {
return false, nil
}
foundImage.SetModified(time.Now())
_, err = i.imagesStorage.Update(foundImage)
if err != nil {
return false, errors.Wrap(err, "Updating image matadata")
}
return true, nil
}
// DownloadLink presigned GET link to download image file.
// Returns error if image have not been uploaded.
func (i *ImagesModel) DownloadLink(imageID string, expire time.Duration) (*images.Link, error) {
found, err := i.imagesStorage.Exists(imageID)
if err != nil {
return nil, errors.Wrap(err, "Searching for image with specified ID")
}
if !found {
return nil, nil
}
found, err = i.fileStorage.Exists(imageID)
if err != nil {
return nil, errors.Wrap(err, "Searching for image file")
}
if !found {
return nil, nil
}
link, err := i.fileStorage.GetRequest(imageID, expire, ImageContentType)
if err != nil {
return nil, errors.Wrap(err, "Generating download link")
}
return link, nil
}
func getArtifactInfo(info metadata.Info) *images.ArtifactInfo {
return &images.ArtifactInfo{
Format: info.Format,
Version: uint(info.Version),
}
}
func getUpdateFiles(maxImageSize int64, uFiles map[string]parser.UpdateFile) ([]images.UpdateFile, error) {
var files []images.UpdateFile
for _, u := range uFiles {
if u.Size > maxImageSize {
return nil, errors.New("Image too large")
}
files = append(files, images.UpdateFile{
Name: u.Name,
Size: u.Size,
Signature: string(u.Signature),
Date: &u.Date,
Checksum: string(u.Checksum),
})
}
return files, nil
}
func getMetaFromArchive(
r *io.Reader, maxImageSize int64) (*images.SoftwareImageMetaArtifactConstructor, error) {
metaArtifact := images.NewSoftwareImageMetaArtifactConstructor()
aReader := areader.NewReader(*r)
defer aReader.Close()
data, err := aReader.Read()
if err != nil {
return nil, errors.Wrap(err, "reading artifact error")
}
metaArtifact.Info = getArtifactInfo(aReader.GetInfo())
metaArtifact.DeviceTypesCompatible = aReader.GetCompatibleDevices()
metaArtifact.ArtifactName = aReader.GetArtifactName()
for _, p := range data {
uFiles, err := getUpdateFiles(maxImageSize, p.GetUpdateFiles())
if err != nil |
metaArtifact.Updates = append(
metaArtifact.Updates,
images.Update{
TypeInfo: images.ArtifactUpdateTypeInfo{
Type: p.GetUpdateType().Type,
},
MetaData: p.GetMetadata(),
Files: uFiles,
})
}
return metaArtifact, nil
}
| {
return nil, errors.Wrap(err, "Cannot get update files:")
} | conditional_block |
images.go | // Copyright 2016 Mender Software AS
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package model
import (
"io"
"io/ioutil"
"time"
"github.com/mendersoftware/deployments/resources/images"
"github.com/mendersoftware/deployments/resources/images/controller"
"github.com/mendersoftware/mender-artifact/metadata"
"github.com/mendersoftware/mender-artifact/parser"
"github.com/mendersoftware/mender-artifact/reader"
"github.com/pkg/errors"
"github.com/satori/go.uuid"
)
const (
ImageContentType = "application/vnd.mender-artifact"
)
type ImagesModel struct {
fileStorage FileStorage
deployments ImageUsedIn
imagesStorage SoftwareImagesStorage
}
func NewImagesModel(
fileStorage FileStorage,
checker ImageUsedIn,
imagesStorage SoftwareImagesStorage,
) *ImagesModel {
return &ImagesModel{
fileStorage: fileStorage,
deployments: checker,
imagesStorage: imagesStorage,
}
}
// CreateImage parses artifact and uploads artifact file to the file storage - in parallel,
// and creates image structure in the system.
// Returns image ID and nil on success.
func (i *ImagesModel) CreateImage(
metaConstructor *images.SoftwareImageMetaConstructor,
imageReader io.Reader) (string, error) {
if metaConstructor == nil {
return "", controller.ErrModelMissingInputMetadata
}
if imageReader == nil {
return "", controller.ErrModelMissingInputArtifact
}
artifactID, err := i.handleArtifact(metaConstructor, imageReader)
// try to remove artifact file from file storage on error
if err != nil {
if cleanupErr := i.fileStorage.Delete(artifactID); cleanupErr != nil {
return "", errors.Wrap(err, cleanupErr.Error())
}
}
return artifactID, err
}
// handleArtifact parses artifact and uploads artifact file to the file storage - in parallel,
// and creates image structure in the system.
// Returns image ID, artifact file ID and nil on success.
func (i *ImagesModel) handleArtifact(
metaConstructor *images.SoftwareImageMetaConstructor,
imageReader io.Reader) (string, error) {
// limit just for safety
// max image size - 10G
const MaxImageSize = 1024 * 1024 * 1024 * 10
// create pipe
pR, pW := io.Pipe()
// limit reader to max image size
lr := io.LimitReader(imageReader, MaxImageSize)
tee := io.TeeReader(lr, pW)
artifactID := uuid.NewV4().String()
ch := make(chan error)
// create goroutine for artifact upload
//
// reading from the pipe (which is done in UploadArtifact method) is a blocking operation
// and cannot be done in the same goroutine as writing to the pipe
//
// uploading and parsing artifact in the same process will cause in a deadlock!
go func() {
err := i.fileStorage.UploadArtifact(artifactID, pR, ImageContentType)
if err != nil {
pR.CloseWithError(err)
}
ch <- err
}()
// parse artifact
// artifact library reads all the data from the given reader
metaArtifactConstructor, err := getMetaFromArchive(&tee, MaxImageSize)
if err != nil {
pW.Close()
if uploadResponseErr := <-ch; uploadResponseErr != nil {
return "", controller.ErrModelArtifactUploadFailed
}
return "", controller.ErrModelInvalidMetadata
}
// read the rest of the data,
// just in case the artifact library did not read all the data from the reader
_, err = io.Copy(ioutil.Discard, tee)
if err != nil {
pW.Close()
_ = <-ch
return "", err
}
// close the pipe
pW.Close()
// collect output from the goroutine
if uploadResponseErr := <-ch; uploadResponseErr != nil {
return "", uploadResponseErr
}
// validate artifact metadata
if err = metaArtifactConstructor.Validate(); err != nil {
return "", controller.ErrModelInvalidMetadata
}
// check if artifact is unique
// artifact is considered to be unique if there is no artifact with the same name
// and supporing the same platform in the system
isArtifactUnique, err := i.imagesStorage.IsArtifactUnique(
metaArtifactConstructor.ArtifactName, metaArtifactConstructor.DeviceTypesCompatible)
if err != nil {
return "", errors.Wrap(err, "Fail to check if artifact is unique")
}
if !isArtifactUnique {
return "", controller.ErrModelArtifactNotUnique
}
image := images.NewSoftwareImage(artifactID, metaConstructor, metaArtifactConstructor)
// save image structure in the system
if err = i.imagesStorage.Insert(image); err != nil {
return "", errors.Wrap(err, "Fail to store the metadata")
}
return artifactID, nil
}
// GetImage allows to fetch image obeject with specified id
// Nil if not found
func (i *ImagesModel) GetImage(id string) (*images.SoftwareImage, error) |
// DeleteImage removes metadata and image file
// Noop for not exisitng images
// Allowed to remove image only if image is not scheduled or in progress for an updates - then image file is needed
// In case of already finished updates only image file is not needed, metadata is attached directly to device deployment
// therefore we still have some information about image that have been used (but not the file)
func (i *ImagesModel) DeleteImage(imageID string) error {
found, err := i.GetImage(imageID)
if err != nil {
return errors.Wrap(err, "Getting image metadata")
}
if found == nil {
return controller.ErrImageMetaNotFound
}
inUse, err := i.deployments.ImageUsedInActiveDeployment(imageID)
if err != nil {
return errors.Wrap(err, "Checking if image is used in active deployment")
}
// Image is in use, not allowed to delete
if inUse {
return controller.ErrModelImageInActiveDeployment
}
// Delete image file (call to external service)
// Noop for not existing file
if err := i.fileStorage.Delete(imageID); err != nil {
return errors.Wrap(err, "Deleting image file")
}
// Delete metadata
if err := i.imagesStorage.Delete(imageID); err != nil {
return errors.Wrap(err, "Deleting image metadata")
}
return nil
}
// ListImages according to specified filers.
func (i *ImagesModel) ListImages(filters map[string]string) ([]*images.SoftwareImage, error) {
imageList, err := i.imagesStorage.FindAll()
if err != nil {
return nil, errors.Wrap(err, "Searching for image metadata")
}
if imageList == nil {
return make([]*images.SoftwareImage, 0), nil
}
return imageList, nil
}
// EditObject allows editing only if image have not been used yet in any deployment.
func (i *ImagesModel) EditImage(imageID string, constructor *images.SoftwareImageMetaConstructor) (bool, error) {
if err := constructor.Validate(); err != nil {
return false, errors.Wrap(err, "Validating image metadata")
}
found, err := i.deployments.ImageUsedInDeployment(imageID)
if err != nil {
return false, errors.Wrap(err, "Searching for usage of the image among deployments")
}
if found {
return false, controller.ErrModelImageUsedInAnyDeployment
}
foundImage, err := i.imagesStorage.FindByID(imageID)
if err != nil {
return false, errors.Wrap(err, "Searching for image with specified ID")
}
if foundImage == nil {
return false, nil
}
foundImage.SetModified(time.Now())
_, err = i.imagesStorage.Update(foundImage)
if err != nil {
return false, errors.Wrap(err, "Updating image matadata")
}
return true, nil
}
// DownloadLink presigned GET link to download image file.
// Returns error if image have not been uploaded.
func (i *ImagesModel) DownloadLink(imageID string, expire time.Duration) (*images.Link, error) {
found, err := i.imagesStorage.Exists(imageID)
if err != nil {
return nil, errors.Wrap(err, "Searching for image with specified ID")
}
if !found {
return nil, nil
}
found, err = i.fileStorage.Exists(imageID)
if err != nil {
return nil, errors.Wrap(err, "Searching for image file")
}
if !found {
return nil, nil
}
link, err := i.fileStorage.GetRequest(imageID, expire, ImageContentType)
if err != nil {
return nil, errors.Wrap(err, "Generating download link")
}
return link, nil
}
func getArtifactInfo(info metadata.Info) *images.ArtifactInfo {
return &images.ArtifactInfo{
Format: info.Format,
Version: uint(info.Version),
}
}
func getUpdateFiles(maxImageSize int64, uFiles map[string]parser.UpdateFile) ([]images.UpdateFile, error) {
var files []images.UpdateFile
for _, u := range uFiles {
if u.Size > maxImageSize {
return nil, errors.New("Image too large")
}
files = append(files, images.UpdateFile{
Name: u.Name,
Size: u.Size,
Signature: string(u.Signature),
Date: &u.Date,
Checksum: string(u.Checksum),
})
}
return files, nil
}
func getMetaFromArchive(
r *io.Reader, maxImageSize int64) (*images.SoftwareImageMetaArtifactConstructor, error) {
metaArtifact := images.NewSoftwareImageMetaArtifactConstructor()
aReader := areader.NewReader(*r)
defer aReader.Close()
data, err := aReader.Read()
if err != nil {
return nil, errors.Wrap(err, "reading artifact error")
}
metaArtifact.Info = getArtifactInfo(aReader.GetInfo())
metaArtifact.DeviceTypesCompatible = aReader.GetCompatibleDevices()
metaArtifact.ArtifactName = aReader.GetArtifactName()
for _, p := range data {
uFiles, err := getUpdateFiles(maxImageSize, p.GetUpdateFiles())
if err != nil {
return nil, errors.Wrap(err, "Cannot get update files:")
}
metaArtifact.Updates = append(
metaArtifact.Updates,
images.Update{
TypeInfo: images.ArtifactUpdateTypeInfo{
Type: p.GetUpdateType().Type,
},
MetaData: p.GetMetadata(),
Files: uFiles,
})
}
return metaArtifact, nil
}
| {
image, err := i.imagesStorage.FindByID(id)
if err != nil {
return nil, errors.Wrap(err, "Searching for image with specified ID")
}
if image == nil {
return nil, nil
}
return image, nil
} | identifier_body |
images.go | // Copyright 2016 Mender Software AS
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package model
import (
"io"
"io/ioutil"
"time"
"github.com/mendersoftware/deployments/resources/images"
"github.com/mendersoftware/deployments/resources/images/controller"
"github.com/mendersoftware/mender-artifact/metadata"
"github.com/mendersoftware/mender-artifact/parser"
"github.com/mendersoftware/mender-artifact/reader"
"github.com/pkg/errors"
"github.com/satori/go.uuid"
)
const (
ImageContentType = "application/vnd.mender-artifact"
)
type ImagesModel struct {
fileStorage FileStorage
deployments ImageUsedIn
imagesStorage SoftwareImagesStorage
}
func NewImagesModel(
fileStorage FileStorage,
checker ImageUsedIn,
imagesStorage SoftwareImagesStorage,
) *ImagesModel {
return &ImagesModel{
fileStorage: fileStorage,
deployments: checker,
imagesStorage: imagesStorage,
}
}
// CreateImage parses artifact and uploads artifact file to the file storage - in parallel,
// and creates image structure in the system.
// Returns image ID and nil on success.
func (i *ImagesModel) CreateImage(
metaConstructor *images.SoftwareImageMetaConstructor,
imageReader io.Reader) (string, error) {
if metaConstructor == nil {
return "", controller.ErrModelMissingInputMetadata
}
if imageReader == nil {
return "", controller.ErrModelMissingInputArtifact
}
artifactID, err := i.handleArtifact(metaConstructor, imageReader)
// try to remove artifact file from file storage on error
if err != nil {
if cleanupErr := i.fileStorage.Delete(artifactID); cleanupErr != nil {
return "", errors.Wrap(err, cleanupErr.Error())
}
}
return artifactID, err
}
// handleArtifact parses artifact and uploads artifact file to the file storage - in parallel,
// and creates image structure in the system.
// Returns image ID, artifact file ID and nil on success.
func (i *ImagesModel) handleArtifact(
metaConstructor *images.SoftwareImageMetaConstructor,
imageReader io.Reader) (string, error) {
// limit just for safety
// max image size - 10G
const MaxImageSize = 1024 * 1024 * 1024 * 10
// create pipe
pR, pW := io.Pipe()
// limit reader to max image size
lr := io.LimitReader(imageReader, MaxImageSize)
tee := io.TeeReader(lr, pW)
artifactID := uuid.NewV4().String()
ch := make(chan error)
// create goroutine for artifact upload
//
// reading from the pipe (which is done in UploadArtifact method) is a blocking operation
// and cannot be done in the same goroutine as writing to the pipe
//
// uploading and parsing artifact in the same process will cause in a deadlock!
go func() {
err := i.fileStorage.UploadArtifact(artifactID, pR, ImageContentType)
if err != nil {
pR.CloseWithError(err)
}
ch <- err
}()
// parse artifact
// artifact library reads all the data from the given reader
metaArtifactConstructor, err := getMetaFromArchive(&tee, MaxImageSize)
if err != nil {
pW.Close()
if uploadResponseErr := <-ch; uploadResponseErr != nil {
return "", controller.ErrModelArtifactUploadFailed
}
return "", controller.ErrModelInvalidMetadata
}
// read the rest of the data,
// just in case the artifact library did not read all the data from the reader
_, err = io.Copy(ioutil.Discard, tee)
if err != nil {
pW.Close()
_ = <-ch
return "", err
}
// close the pipe
pW.Close()
// collect output from the goroutine
if uploadResponseErr := <-ch; uploadResponseErr != nil {
return "", uploadResponseErr
}
// validate artifact metadata
if err = metaArtifactConstructor.Validate(); err != nil {
return "", controller.ErrModelInvalidMetadata
}
// check if artifact is unique
// artifact is considered to be unique if there is no artifact with the same name
// and supporing the same platform in the system
isArtifactUnique, err := i.imagesStorage.IsArtifactUnique(
metaArtifactConstructor.ArtifactName, metaArtifactConstructor.DeviceTypesCompatible)
if err != nil {
return "", errors.Wrap(err, "Fail to check if artifact is unique")
}
if !isArtifactUnique {
return "", controller.ErrModelArtifactNotUnique
}
image := images.NewSoftwareImage(artifactID, metaConstructor, metaArtifactConstructor)
// save image structure in the system
if err = i.imagesStorage.Insert(image); err != nil {
return "", errors.Wrap(err, "Fail to store the metadata")
}
return artifactID, nil
}
// GetImage allows to fetch image obeject with specified id
// Nil if not found
func (i *ImagesModel) GetImage(id string) (*images.SoftwareImage, error) {
image, err := i.imagesStorage.FindByID(id)
if err != nil {
return nil, errors.Wrap(err, "Searching for image with specified ID")
}
if image == nil {
return nil, nil
}
return image, nil
}
// DeleteImage removes metadata and image file
// Noop for not exisitng images
// Allowed to remove image only if image is not scheduled or in progress for an updates - then image file is needed
// In case of already finished updates only image file is not needed, metadata is attached directly to device deployment
// therefore we still have some information about image that have been used (but not the file)
func (i *ImagesModel) DeleteImage(imageID string) error {
found, err := i.GetImage(imageID)
if err != nil {
return errors.Wrap(err, "Getting image metadata")
}
if found == nil {
return controller.ErrImageMetaNotFound
}
inUse, err := i.deployments.ImageUsedInActiveDeployment(imageID)
if err != nil {
return errors.Wrap(err, "Checking if image is used in active deployment")
}
// Image is in use, not allowed to delete
if inUse {
return controller.ErrModelImageInActiveDeployment
}
// Delete image file (call to external service)
// Noop for not existing file
if err := i.fileStorage.Delete(imageID); err != nil {
return errors.Wrap(err, "Deleting image file")
}
// Delete metadata
if err := i.imagesStorage.Delete(imageID); err != nil {
return errors.Wrap(err, "Deleting image metadata")
}
return nil
}
// ListImages according to specified filers.
func (i *ImagesModel) ListImages(filters map[string]string) ([]*images.SoftwareImage, error) {
imageList, err := i.imagesStorage.FindAll()
if err != nil {
return nil, errors.Wrap(err, "Searching for image metadata")
}
if imageList == nil {
return make([]*images.SoftwareImage, 0), nil
}
return imageList, nil
}
// EditObject allows editing only if image have not been used yet in any deployment.
func (i *ImagesModel) EditImage(imageID string, constructor *images.SoftwareImageMetaConstructor) (bool, error) {
if err := constructor.Validate(); err != nil {
return false, errors.Wrap(err, "Validating image metadata")
}
found, err := i.deployments.ImageUsedInDeployment(imageID)
if err != nil {
return false, errors.Wrap(err, "Searching for usage of the image among deployments")
}
if found {
return false, controller.ErrModelImageUsedInAnyDeployment
}
foundImage, err := i.imagesStorage.FindByID(imageID)
if err != nil {
return false, errors.Wrap(err, "Searching for image with specified ID")
}
if foundImage == nil {
return false, nil
}
foundImage.SetModified(time.Now())
_, err = i.imagesStorage.Update(foundImage)
if err != nil {
return false, errors.Wrap(err, "Updating image matadata")
}
return true, nil
}
// DownloadLink presigned GET link to download image file.
// Returns error if image have not been uploaded.
func (i *ImagesModel) DownloadLink(imageID string, expire time.Duration) (*images.Link, error) {
found, err := i.imagesStorage.Exists(imageID)
if err != nil {
return nil, errors.Wrap(err, "Searching for image with specified ID")
}
if !found {
return nil, nil
}
found, err = i.fileStorage.Exists(imageID)
if err != nil {
return nil, errors.Wrap(err, "Searching for image file")
}
if !found {
return nil, nil
}
link, err := i.fileStorage.GetRequest(imageID, expire, ImageContentType)
if err != nil {
return nil, errors.Wrap(err, "Generating download link")
}
return link, nil
}
func getArtifactInfo(info metadata.Info) *images.ArtifactInfo {
return &images.ArtifactInfo{
Format: info.Format,
Version: uint(info.Version),
}
}
func getUpdateFiles(maxImageSize int64, uFiles map[string]parser.UpdateFile) ([]images.UpdateFile, error) {
var files []images.UpdateFile
for _, u := range uFiles {
if u.Size > maxImageSize {
return nil, errors.New("Image too large")
}
files = append(files, images.UpdateFile{
Name: u.Name,
Size: u.Size,
Signature: string(u.Signature),
Date: &u.Date,
Checksum: string(u.Checksum),
})
}
return files, nil
}
func getMetaFromArchive(
r *io.Reader, maxImageSize int64) (*images.SoftwareImageMetaArtifactConstructor, error) {
metaArtifact := images.NewSoftwareImageMetaArtifactConstructor()
aReader := areader.NewReader(*r)
defer aReader.Close()
data, err := aReader.Read()
if err != nil {
return nil, errors.Wrap(err, "reading artifact error")
}
metaArtifact.Info = getArtifactInfo(aReader.GetInfo())
metaArtifact.DeviceTypesCompatible = aReader.GetCompatibleDevices()
metaArtifact.ArtifactName = aReader.GetArtifactName()
for _, p := range data {
uFiles, err := getUpdateFiles(maxImageSize, p.GetUpdateFiles())
if err != nil {
return nil, errors.Wrap(err, "Cannot get update files:")
}
metaArtifact.Updates = append(
metaArtifact.Updates,
images.Update{
TypeInfo: images.ArtifactUpdateTypeInfo{
Type: p.GetUpdateType().Type,
},
MetaData: p.GetMetadata(),
Files: uFiles,
})
}
| return metaArtifact, nil
} | random_line_split |
|
images.go | // Copyright 2016 Mender Software AS
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package model
import (
"io"
"io/ioutil"
"time"
"github.com/mendersoftware/deployments/resources/images"
"github.com/mendersoftware/deployments/resources/images/controller"
"github.com/mendersoftware/mender-artifact/metadata"
"github.com/mendersoftware/mender-artifact/parser"
"github.com/mendersoftware/mender-artifact/reader"
"github.com/pkg/errors"
"github.com/satori/go.uuid"
)
const (
ImageContentType = "application/vnd.mender-artifact"
)
type ImagesModel struct {
fileStorage FileStorage
deployments ImageUsedIn
imagesStorage SoftwareImagesStorage
}
func NewImagesModel(
fileStorage FileStorage,
checker ImageUsedIn,
imagesStorage SoftwareImagesStorage,
) *ImagesModel {
return &ImagesModel{
fileStorage: fileStorage,
deployments: checker,
imagesStorage: imagesStorage,
}
}
// CreateImage parses artifact and uploads artifact file to the file storage - in parallel,
// and creates image structure in the system.
// Returns image ID and nil on success.
func (i *ImagesModel) CreateImage(
metaConstructor *images.SoftwareImageMetaConstructor,
imageReader io.Reader) (string, error) {
if metaConstructor == nil {
return "", controller.ErrModelMissingInputMetadata
}
if imageReader == nil {
return "", controller.ErrModelMissingInputArtifact
}
artifactID, err := i.handleArtifact(metaConstructor, imageReader)
// try to remove artifact file from file storage on error
if err != nil {
if cleanupErr := i.fileStorage.Delete(artifactID); cleanupErr != nil {
return "", errors.Wrap(err, cleanupErr.Error())
}
}
return artifactID, err
}
// handleArtifact parses artifact and uploads artifact file to the file storage - in parallel,
// and creates image structure in the system.
// Returns image ID, artifact file ID and nil on success.
func (i *ImagesModel) handleArtifact(
metaConstructor *images.SoftwareImageMetaConstructor,
imageReader io.Reader) (string, error) {
// limit just for safety
// max image size - 10G
const MaxImageSize = 1024 * 1024 * 1024 * 10
// create pipe
pR, pW := io.Pipe()
// limit reader to max image size
lr := io.LimitReader(imageReader, MaxImageSize)
tee := io.TeeReader(lr, pW)
artifactID := uuid.NewV4().String()
ch := make(chan error)
// create goroutine for artifact upload
//
// reading from the pipe (which is done in UploadArtifact method) is a blocking operation
// and cannot be done in the same goroutine as writing to the pipe
//
// uploading and parsing artifact in the same process will cause in a deadlock!
go func() {
err := i.fileStorage.UploadArtifact(artifactID, pR, ImageContentType)
if err != nil {
pR.CloseWithError(err)
}
ch <- err
}()
// parse artifact
// artifact library reads all the data from the given reader
metaArtifactConstructor, err := getMetaFromArchive(&tee, MaxImageSize)
if err != nil {
pW.Close()
if uploadResponseErr := <-ch; uploadResponseErr != nil {
return "", controller.ErrModelArtifactUploadFailed
}
return "", controller.ErrModelInvalidMetadata
}
// read the rest of the data,
// just in case the artifact library did not read all the data from the reader
_, err = io.Copy(ioutil.Discard, tee)
if err != nil {
pW.Close()
_ = <-ch
return "", err
}
// close the pipe
pW.Close()
// collect output from the goroutine
if uploadResponseErr := <-ch; uploadResponseErr != nil {
return "", uploadResponseErr
}
// validate artifact metadata
if err = metaArtifactConstructor.Validate(); err != nil {
return "", controller.ErrModelInvalidMetadata
}
// check if artifact is unique
// artifact is considered to be unique if there is no artifact with the same name
// and supporing the same platform in the system
isArtifactUnique, err := i.imagesStorage.IsArtifactUnique(
metaArtifactConstructor.ArtifactName, metaArtifactConstructor.DeviceTypesCompatible)
if err != nil {
return "", errors.Wrap(err, "Fail to check if artifact is unique")
}
if !isArtifactUnique {
return "", controller.ErrModelArtifactNotUnique
}
image := images.NewSoftwareImage(artifactID, metaConstructor, metaArtifactConstructor)
// save image structure in the system
if err = i.imagesStorage.Insert(image); err != nil {
return "", errors.Wrap(err, "Fail to store the metadata")
}
return artifactID, nil
}
// GetImage allows to fetch image obeject with specified id
// Nil if not found
func (i *ImagesModel) GetImage(id string) (*images.SoftwareImage, error) {
image, err := i.imagesStorage.FindByID(id)
if err != nil {
return nil, errors.Wrap(err, "Searching for image with specified ID")
}
if image == nil {
return nil, nil
}
return image, nil
}
// DeleteImage removes metadata and image file
// Noop for not exisitng images
// Allowed to remove image only if image is not scheduled or in progress for an updates - then image file is needed
// In case of already finished updates only image file is not needed, metadata is attached directly to device deployment
// therefore we still have some information about image that have been used (but not the file)
func (i *ImagesModel) DeleteImage(imageID string) error {
found, err := i.GetImage(imageID)
if err != nil {
return errors.Wrap(err, "Getting image metadata")
}
if found == nil {
return controller.ErrImageMetaNotFound
}
inUse, err := i.deployments.ImageUsedInActiveDeployment(imageID)
if err != nil {
return errors.Wrap(err, "Checking if image is used in active deployment")
}
// Image is in use, not allowed to delete
if inUse {
return controller.ErrModelImageInActiveDeployment
}
// Delete image file (call to external service)
// Noop for not existing file
if err := i.fileStorage.Delete(imageID); err != nil {
return errors.Wrap(err, "Deleting image file")
}
// Delete metadata
if err := i.imagesStorage.Delete(imageID); err != nil {
return errors.Wrap(err, "Deleting image metadata")
}
return nil
}
// ListImages according to specified filers.
func (i *ImagesModel) ListImages(filters map[string]string) ([]*images.SoftwareImage, error) {
imageList, err := i.imagesStorage.FindAll()
if err != nil {
return nil, errors.Wrap(err, "Searching for image metadata")
}
if imageList == nil {
return make([]*images.SoftwareImage, 0), nil
}
return imageList, nil
}
// EditObject allows editing only if image have not been used yet in any deployment.
func (i *ImagesModel) EditImage(imageID string, constructor *images.SoftwareImageMetaConstructor) (bool, error) {
if err := constructor.Validate(); err != nil {
return false, errors.Wrap(err, "Validating image metadata")
}
found, err := i.deployments.ImageUsedInDeployment(imageID)
if err != nil {
return false, errors.Wrap(err, "Searching for usage of the image among deployments")
}
if found {
return false, controller.ErrModelImageUsedInAnyDeployment
}
foundImage, err := i.imagesStorage.FindByID(imageID)
if err != nil {
return false, errors.Wrap(err, "Searching for image with specified ID")
}
if foundImage == nil {
return false, nil
}
foundImage.SetModified(time.Now())
_, err = i.imagesStorage.Update(foundImage)
if err != nil {
return false, errors.Wrap(err, "Updating image matadata")
}
return true, nil
}
// DownloadLink presigned GET link to download image file.
// Returns error if image have not been uploaded.
func (i *ImagesModel) | (imageID string, expire time.Duration) (*images.Link, error) {
found, err := i.imagesStorage.Exists(imageID)
if err != nil {
return nil, errors.Wrap(err, "Searching for image with specified ID")
}
if !found {
return nil, nil
}
found, err = i.fileStorage.Exists(imageID)
if err != nil {
return nil, errors.Wrap(err, "Searching for image file")
}
if !found {
return nil, nil
}
link, err := i.fileStorage.GetRequest(imageID, expire, ImageContentType)
if err != nil {
return nil, errors.Wrap(err, "Generating download link")
}
return link, nil
}
func getArtifactInfo(info metadata.Info) *images.ArtifactInfo {
return &images.ArtifactInfo{
Format: info.Format,
Version: uint(info.Version),
}
}
func getUpdateFiles(maxImageSize int64, uFiles map[string]parser.UpdateFile) ([]images.UpdateFile, error) {
var files []images.UpdateFile
for _, u := range uFiles {
if u.Size > maxImageSize {
return nil, errors.New("Image too large")
}
files = append(files, images.UpdateFile{
Name: u.Name,
Size: u.Size,
Signature: string(u.Signature),
Date: &u.Date,
Checksum: string(u.Checksum),
})
}
return files, nil
}
func getMetaFromArchive(
r *io.Reader, maxImageSize int64) (*images.SoftwareImageMetaArtifactConstructor, error) {
metaArtifact := images.NewSoftwareImageMetaArtifactConstructor()
aReader := areader.NewReader(*r)
defer aReader.Close()
data, err := aReader.Read()
if err != nil {
return nil, errors.Wrap(err, "reading artifact error")
}
metaArtifact.Info = getArtifactInfo(aReader.GetInfo())
metaArtifact.DeviceTypesCompatible = aReader.GetCompatibleDevices()
metaArtifact.ArtifactName = aReader.GetArtifactName()
for _, p := range data {
uFiles, err := getUpdateFiles(maxImageSize, p.GetUpdateFiles())
if err != nil {
return nil, errors.Wrap(err, "Cannot get update files:")
}
metaArtifact.Updates = append(
metaArtifact.Updates,
images.Update{
TypeInfo: images.ArtifactUpdateTypeInfo{
Type: p.GetUpdateType().Type,
},
MetaData: p.GetMetadata(),
Files: uFiles,
})
}
return metaArtifact, nil
}
| DownloadLink | identifier_name |
file.go | // Copyright 2013 Google Inc. All Rights Reserved.
// Copyright 2017 The Cockroach Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// This code originated in the github.com/golang/glog package.
// File I/O for logs.
package log
import (
"flag"
"fmt"
"io"
"io/ioutil"
"os"
"os/user"
"path/filepath"
"regexp"
"runtime"
"sort"
"strconv"
"strings"
"time"
"github.com/pkg/errors"
"github.com/cockroachdb/cockroach/pkg/util/syncutil"
"github.com/cockroachdb/cockroach/pkg/util/timeutil"
)
// LogFileMaxSize is the maximum size of a log file in bytes.
var LogFileMaxSize int64 = 10 << 20 // 10MiB
// LogFilesCombinedMaxSize is the maximum total size in bytes for log
// files. Note that this is only checked when log files are created,
// so the total size of log files per severity might temporarily be up
// to LogFileMaxSize larger.
var LogFilesCombinedMaxSize = LogFileMaxSize * 10 // 100MiB
// DirName overrides (if non-empty) the choice of directory in
// which to write logs. See createLogDirs for the full list of
// possible destinations. Note that the default is to log to stderr
// independent of this setting. See --logtostderr.
type DirName struct {
syncutil.Mutex
name string
}
var _ flag.Value = &DirName{}
// Set implements the flag.Value interface.
func (l *DirName) Set(dir string) error {
if len(dir) > 0 && dir[0] == '~' {
return fmt.Errorf("log directory cannot start with '~': %s", dir)
}
if len(dir) > 0 {
absDir, err := filepath.Abs(dir)
if err != nil {
return err
}
dir = absDir
}
l.Lock()
defer l.Unlock()
l.name = dir
return nil
}
// Type implements the flag.Value interface.
func (l *DirName) Type() string {
return "string"
}
// String implements the flag.Value interface.
func (l *DirName) String() string {
l.Lock()
defer l.Unlock()
return l.name
}
func (l *DirName) get() (string, error) {
l.Lock()
defer l.Unlock()
if len(l.name) == 0 {
return "", errDirectoryNotSet
}
return l.name, nil
}
// IsSet returns true iff the directory name is set.
func (l *DirName) IsSet() bool {
l.Lock()
res := l.name != ""
l.Unlock()
return res
}
// DirSet returns true of the log directory has been changed from its default.
func DirSet() bool { return logging.logDir.IsSet() }
// logFileRE matches log files to avoid exposing non-log files accidentally
// and it splits the details of the filename into groups for easy parsing.
// The log file format is {process}.{host}.{username}.{timestamp}.{pid}.log
// cockroach.Brams-MacBook-Pro.bram.2015-06-09T16-10-48Z.30209.log
// All underscore in process, host and username are escaped to double
// underscores and all periods are escaped to an underscore.
// For compatibility with Windows filenames, all colons from the timestamp
// (RFC3339) are converted from underscores.
var logFileRE = regexp.MustCompile(`^(?:.*/)?([^/.]+)\.([^/\.]+)\.([^/\.]+)\.([^/\.]+)\.(\d+)\.log$`)
var (
pid = os.Getpid()
program = filepath.Base(os.Args[0])
host = "unknownhost"
userName = "unknownuser"
)
func init() {
h, err := os.Hostname()
if err == nil {
host = shortHostname(h)
}
current, err := user.Current()
if err == nil {
userName = current.Username
}
// Sanitize userName since it may contain filepath separators on Windows.
userName = strings.Replace(userName, `\`, "_", -1)
}
// shortHostname returns its argument, truncating at the first period.
// For instance, given "www.google.com" it returns "www".
func shortHostname(hostname string) string {
if i := strings.Index(hostname, "."); i >= 0 {
return hostname[:i]
}
return hostname
}
// removePeriods removes all extraneous periods. This is required to ensure that
// the only periods in the filename are the ones added by logName so it can
// be easily parsed.
func removePeriods(s string) string {
return strings.Replace(s, ".", "", -1)
}
// logName returns a new log file name with start time t, and the name
// for the symlink.
func logName(prefix string, t time.Time) (name, link string) {
// Replace the ':'s in the time format with '_'s to allow for log files in
// Windows.
tFormatted := strings.Replace(t.Format(time.RFC3339), ":", "_", -1)
name = fmt.Sprintf("%s.%s.%s.%s.%06d.log",
removePeriods(prefix),
removePeriods(host),
removePeriods(userName),
tFormatted,
pid)
return name, removePeriods(prefix) + ".log"
}
var errMalformedName = errors.New("malformed log filename")
func parseLogFilename(filename string) (FileDetails, error) {
matches := logFileRE.FindStringSubmatch(filename)
if matches == nil || len(matches) != 6 {
return FileDetails{}, errMalformedName
}
// Replace the '_'s with ':'s to restore the correct time format.
fixTime := strings.Replace(matches[4], "_", ":", -1)
time, err := time.Parse(time.RFC3339, fixTime)
if err != nil {
return FileDetails{}, err
}
pid, err := strconv.ParseInt(matches[5], 10, 0)
if err != nil {
return FileDetails{}, err
}
return FileDetails{
Program: matches[1],
Host: matches[2],
UserName: matches[3],
Time: time.UnixNano(),
PID: pid,
}, nil
}
var errDirectoryNotSet = errors.New("log: log directory not set")
// create creates a new log file and returns the file and its
// filename. If the file is created successfully, create also attempts
// to update the symlink for that tag, ignoring errors.
func create(
logDir *DirName, prefix string, t time.Time, lastRotation int64,
) (f *os.File, updatedRotation int64, filename string, err error) {
dir, err := logDir.get()
if err != nil {
return nil, lastRotation, "", err
}
// Ensure that the timestamp of the new file name is greater than
// the timestamp of the previous generated file name.
unix := t.Unix()
if unix <= lastRotation {
unix = lastRotation + 1
}
updatedRotation = unix
t = timeutil.Unix(unix, 0)
// Generate the file name.
name, link := logName(prefix, t)
fname := filepath.Join(dir, name)
// Open the file os.O_APPEND|os.O_CREATE rather than use os.Create.
// Append is almost always more efficient than O_RDRW on most modern file systems.
f, err = os.OpenFile(fname, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0664)
if err == nil {
symlink := filepath.Join(dir, link)
// Symlinks are best-effort.
if err := os.Remove(symlink); err != nil && !os.IsNotExist(err) {
fmt.Fprintf(OrigStderr, "log: failed to remove symlink %s: %s", symlink, err)
}
if err := os.Symlink(filepath.Base(fname), symlink); err != nil {
// On Windows, this will be the common case, as symlink creation
// requires special privileges.
// See: https://docs.microsoft.com/en-us/windows/device-security/security-policy-settings/create-symbolic-links
if runtime.GOOS != "windows" {
fmt.Fprintf(OrigStderr, "log: failed to create symlink %s: %s", symlink, err)
}
}
}
return f, updatedRotation, fname, errors.Wrapf(err, "log: cannot create log")
}
// ListLogFiles returns a slice of FileInfo structs for each log file
// on the local node, in any of the configured log directories.
func ListLogFiles() ([]FileInfo, error) {
return logging.listLogFiles()
}
func (l *loggingT) listLogFiles() ([]FileInfo, error) {
var results []FileInfo
dir, err := logging.logDir.get()
if err != nil {
// No log directory configured: simply indicate that there are no
// log files.
return nil, nil
}
infos, err := ioutil.ReadDir(dir)
if err != nil {
return results, err
}
// The file names have a fixed structure with fields delimited by
// periods. create() for new files removes the periods from the
// provided prefix; do the same here to filter out selected names
// below.
programPrefix := removePeriods(l.prefix)
for _, info := range infos {
if info.Mode().IsRegular() {
details, err := parseLogFilename(info.Name())
if err == nil && details.Program == programPrefix {
results = append(results, FileInfo{
Name: info.Name(),
SizeBytes: info.Size(),
ModTimeNanos: info.ModTime().UnixNano(),
Details: details,
})
}
}
}
return results, nil
}
// GetLogReader returns a reader for the specified filename. In
// restricted mode, the filename must be the base name of a file in
// this process's log directory (this is safe for cases when the
// filename comes from external sources, such as the admin UI via
// HTTP). In unrestricted mode any path is allowed, relative to the
// current directory, with the added feature that simple (base name)
// file names will be searched in this process's log directory if not
// found in the current directory.
func GetLogReader(filename string, restricted bool) (io.ReadCloser, error) {
dir, err := logging.logDir.get()
if err != nil {
return nil, err
}
switch restricted {
case true:
// Verify there are no path separators in a restricted-mode pathname.
if filepath.Base(filename) != filename {
return nil, errors.Errorf("pathnames must be basenames only: %s", filename)
}
filename = filepath.Join(dir, filename)
// Symlinks are not followed in restricted mode.
info, err := os.Lstat(filename)
if err != nil {
if os.IsNotExist(err) {
return nil, errors.Errorf("no such file %s in the log directory", filename)
}
return nil, errors.Wrapf(err, "Lstat: %s", filename)
}
mode := info.Mode()
if mode&os.ModeSymlink != 0 {
return nil, errors.Errorf("symlinks are not allowed")
}
if !mode.IsRegular() {
return nil, errors.Errorf("not a regular file")
}
case false:
info, err := osStat(filename)
if err != nil {
if !os.IsNotExist(err) {
return nil, errors.Wrapf(err, "Stat: %s", filename)
}
// The absolute filename didn't work, so try within the log
// directory if the filename isn't a path.
if filepath.IsAbs(filename) {
return nil, errors.Errorf("no such file %s", filename)
}
filenameAttempt := filepath.Join(dir, filename)
info, err = osStat(filenameAttempt)
if err != nil {
if os.IsNotExist(err) {
return nil, errors.Errorf("no such file %s either in current directory or in %s", filename, dir)
}
return nil, errors.Wrapf(err, "Stat: %s", filename)
}
filename = filenameAttempt
}
filename, err = filepath.EvalSymlinks(filename)
if err != nil {
return nil, err
}
if !info.Mode().IsRegular() {
return nil, errors.Errorf("not a regular file")
}
}
// Check that the file name is valid.
if _, err := parseLogFilename(filepath.Base(filename)); err != nil {
return nil, err
}
return os.Open(filename)
}
// TODO(bram): remove when Go1.9 is required.
//
// See https://github.com/golang/go/issues/19870.
func osStat(path string) (os.FileInfo, error) {
path, err := filepath.EvalSymlinks(path)
if err != nil {
return nil, err
}
return os.Lstat(path)
}
// sortableFileInfoSlice is required so we can sort FileInfos.
type sortableFileInfoSlice []FileInfo
func (a sortableFileInfoSlice) Len() int { return len(a) }
func (a sortableFileInfoSlice) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a sortableFileInfoSlice) Less(i, j int) bool {
return a[i].Details.Time < a[j].Details.Time
}
// selectFiles selects all log files that have an timestamp before the
// endTime. It then sorts them in decreasing order, with the most
// recent as the first one.
func selectFiles(logFiles []FileInfo, endTimestamp int64) []FileInfo {
files := sortableFileInfoSlice{}
for _, logFile := range logFiles {
if logFile.Details.Time <= endTimestamp {
files = append(files, logFile)
}
}
// Sort the files in reverse order so we will fetch the newest first.
sort.Sort(sort.Reverse(files))
return files
}
// FetchEntriesFromFiles fetches all available log entries on disk
// that are between the 'startTimestamp' and 'endTimestamp'. It will
// stop reading new files if the number of entries exceeds
// 'maxEntries'. Log entries are further filtered by the regexp
// 'pattern' if provided. The logs entries are returned in reverse
// chronological order.
func FetchEntriesFromFiles(
startTimestamp, endTimestamp int64, maxEntries int, pattern *regexp.Regexp,
) ([]Entry, error) {
logFiles, err := ListLogFiles()
if err != nil {
return nil, err
}
selectedFiles := selectFiles(logFiles, endTimestamp)
entries := []Entry{}
for _, file := range selectedFiles {
newEntries, entryBeforeStart, err := readAllEntriesFromFile(
file,
startTimestamp,
endTimestamp,
maxEntries-len(entries),
pattern)
if err != nil {
return nil, err
}
entries = append(entries, newEntries...)
if len(entries) >= maxEntries {
break
}
if entryBeforeStart {
// Stop processing files that won't have any timestamps after
// startTime.
break
}
}
return entries, nil
}
// readAllEntriesFromFile reads in all log entries from a given file that are
// between the 'startTimestamp' and 'endTimestamp' and match the 'pattern' if it
// exists. It returns the entries in the reverse chronological order. It also
// returns a flag that denotes if any timestamp occurred before the
// 'startTimestamp' to inform the caller that no more log files need to be
// processed. If the number of entries returned exceeds 'maxEntries' then
// processing of new entries is stopped immediately.
func readAllEntriesFromFile(
file FileInfo, startTimestamp, endTimestamp int64, maxEntries int, pattern *regexp.Regexp,
) ([]Entry, bool, error) {
reader, err := GetLogReader(file.Name, true /* restricted */)
if reader == nil || err != nil {
return nil, false, err
}
defer reader.Close()
entries := []Entry{}
decoder := NewEntryDecoder(reader)
entryBeforeStart := false
for {
entry := Entry{}
if err := decoder.Decode(&entry); err != nil {
if err == io.EOF {
break
}
return nil, false, err
}
var match bool
if pattern == nil | else {
match = pattern.MatchString(entry.Message) ||
pattern.MatchString(entry.File)
}
if match && entry.Time >= startTimestamp && entry.Time <= endTimestamp {
entries = append([]Entry{entry}, entries...)
if len(entries) >= maxEntries {
break
}
}
if entry.Time < startTimestamp {
entryBeforeStart = true
}
}
return entries, entryBeforeStart, nil
}
| {
match = true
} | conditional_block |
file.go | // Copyright 2013 Google Inc. All Rights Reserved.
// Copyright 2017 The Cockroach Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// This code originated in the github.com/golang/glog package.
// File I/O for logs.
package log
import (
"flag"
"fmt"
"io"
"io/ioutil"
"os"
"os/user"
"path/filepath"
"regexp"
"runtime"
"sort"
"strconv"
"strings"
"time"
"github.com/pkg/errors"
"github.com/cockroachdb/cockroach/pkg/util/syncutil"
"github.com/cockroachdb/cockroach/pkg/util/timeutil"
)
// LogFileMaxSize is the maximum size of a log file in bytes.
var LogFileMaxSize int64 = 10 << 20 // 10MiB
// LogFilesCombinedMaxSize is the maximum total size in bytes for log
// files. Note that this is only checked when log files are created,
// so the total size of log files per severity might temporarily be up
// to LogFileMaxSize larger.
var LogFilesCombinedMaxSize = LogFileMaxSize * 10 // 100MiB
// DirName overrides (if non-empty) the choice of directory in
// which to write logs. See createLogDirs for the full list of
// possible destinations. Note that the default is to log to stderr
// independent of this setting. See --logtostderr.
type DirName struct {
syncutil.Mutex
name string
}
var _ flag.Value = &DirName{}
// Set implements the flag.Value interface.
func (l *DirName) Set(dir string) error {
if len(dir) > 0 && dir[0] == '~' {
return fmt.Errorf("log directory cannot start with '~': %s", dir)
}
if len(dir) > 0 {
absDir, err := filepath.Abs(dir)
if err != nil {
return err
}
dir = absDir
}
l.Lock()
defer l.Unlock()
l.name = dir
return nil
}
// Type implements the flag.Value interface.
func (l *DirName) Type() string {
return "string"
}
// String implements the flag.Value interface.
func (l *DirName) String() string {
l.Lock()
defer l.Unlock()
return l.name
}
func (l *DirName) get() (string, error) {
l.Lock()
defer l.Unlock()
if len(l.name) == 0 {
return "", errDirectoryNotSet
}
return l.name, nil
}
// IsSet returns true iff the directory name is set.
func (l *DirName) IsSet() bool {
l.Lock()
res := l.name != ""
l.Unlock()
return res
}
// DirSet returns true of the log directory has been changed from its default.
func DirSet() bool { return logging.logDir.IsSet() }
// logFileRE matches log files to avoid exposing non-log files accidentally
// and it splits the details of the filename into groups for easy parsing.
// The log file format is {process}.{host}.{username}.{timestamp}.{pid}.log
// cockroach.Brams-MacBook-Pro.bram.2015-06-09T16-10-48Z.30209.log
// All underscore in process, host and username are escaped to double
// underscores and all periods are escaped to an underscore.
// For compatibility with Windows filenames, all colons from the timestamp
// (RFC3339) are converted from underscores.
var logFileRE = regexp.MustCompile(`^(?:.*/)?([^/.]+)\.([^/\.]+)\.([^/\.]+)\.([^/\.]+)\.(\d+)\.log$`)
var (
pid = os.Getpid()
program = filepath.Base(os.Args[0])
host = "unknownhost"
userName = "unknownuser"
)
func init() {
h, err := os.Hostname()
if err == nil {
host = shortHostname(h)
}
current, err := user.Current()
if err == nil {
userName = current.Username
}
// Sanitize userName since it may contain filepath separators on Windows.
userName = strings.Replace(userName, `\`, "_", -1)
}
// shortHostname returns its argument, truncating at the first period.
// For instance, given "www.google.com" it returns "www".
func shortHostname(hostname string) string {
if i := strings.Index(hostname, "."); i >= 0 {
return hostname[:i]
}
return hostname
}
// removePeriods removes all extraneous periods. This is required to ensure that
// the only periods in the filename are the ones added by logName so it can
// be easily parsed.
func removePeriods(s string) string {
return strings.Replace(s, ".", "", -1)
}
// logName returns a new log file name with start time t, and the name
// for the symlink.
func logName(prefix string, t time.Time) (name, link string) {
// Replace the ':'s in the time format with '_'s to allow for log files in
// Windows.
tFormatted := strings.Replace(t.Format(time.RFC3339), ":", "_", -1)
name = fmt.Sprintf("%s.%s.%s.%s.%06d.log",
removePeriods(prefix),
removePeriods(host),
removePeriods(userName),
tFormatted,
pid)
return name, removePeriods(prefix) + ".log"
}
var errMalformedName = errors.New("malformed log filename")
func parseLogFilename(filename string) (FileDetails, error) {
matches := logFileRE.FindStringSubmatch(filename)
if matches == nil || len(matches) != 6 {
return FileDetails{}, errMalformedName
}
// Replace the '_'s with ':'s to restore the correct time format.
fixTime := strings.Replace(matches[4], "_", ":", -1)
time, err := time.Parse(time.RFC3339, fixTime)
if err != nil {
return FileDetails{}, err
}
pid, err := strconv.ParseInt(matches[5], 10, 0)
if err != nil {
return FileDetails{}, err
}
return FileDetails{
Program: matches[1],
Host: matches[2],
UserName: matches[3],
Time: time.UnixNano(),
PID: pid,
}, nil
}
var errDirectoryNotSet = errors.New("log: log directory not set")
// create creates a new log file and returns the file and its
// filename. If the file is created successfully, create also attempts
// to update the symlink for that tag, ignoring errors.
func create(
logDir *DirName, prefix string, t time.Time, lastRotation int64,
) (f *os.File, updatedRotation int64, filename string, err error) {
dir, err := logDir.get()
if err != nil {
return nil, lastRotation, "", err
}
// Ensure that the timestamp of the new file name is greater than
// the timestamp of the previous generated file name.
unix := t.Unix()
if unix <= lastRotation {
unix = lastRotation + 1
}
updatedRotation = unix
t = timeutil.Unix(unix, 0)
// Generate the file name.
name, link := logName(prefix, t)
fname := filepath.Join(dir, name)
// Open the file os.O_APPEND|os.O_CREATE rather than use os.Create.
// Append is almost always more efficient than O_RDRW on most modern file systems.
f, err = os.OpenFile(fname, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0664)
if err == nil {
symlink := filepath.Join(dir, link)
// Symlinks are best-effort.
if err := os.Remove(symlink); err != nil && !os.IsNotExist(err) {
fmt.Fprintf(OrigStderr, "log: failed to remove symlink %s: %s", symlink, err)
}
if err := os.Symlink(filepath.Base(fname), symlink); err != nil {
// On Windows, this will be the common case, as symlink creation
// requires special privileges.
// See: https://docs.microsoft.com/en-us/windows/device-security/security-policy-settings/create-symbolic-links
if runtime.GOOS != "windows" {
fmt.Fprintf(OrigStderr, "log: failed to create symlink %s: %s", symlink, err)
}
}
}
return f, updatedRotation, fname, errors.Wrapf(err, "log: cannot create log")
}
// ListLogFiles returns a slice of FileInfo structs for each log file
// on the local node, in any of the configured log directories.
func ListLogFiles() ([]FileInfo, error) |
func (l *loggingT) listLogFiles() ([]FileInfo, error) {
var results []FileInfo
dir, err := logging.logDir.get()
if err != nil {
// No log directory configured: simply indicate that there are no
// log files.
return nil, nil
}
infos, err := ioutil.ReadDir(dir)
if err != nil {
return results, err
}
// The file names have a fixed structure with fields delimited by
// periods. create() for new files removes the periods from the
// provided prefix; do the same here to filter out selected names
// below.
programPrefix := removePeriods(l.prefix)
for _, info := range infos {
if info.Mode().IsRegular() {
details, err := parseLogFilename(info.Name())
if err == nil && details.Program == programPrefix {
results = append(results, FileInfo{
Name: info.Name(),
SizeBytes: info.Size(),
ModTimeNanos: info.ModTime().UnixNano(),
Details: details,
})
}
}
}
return results, nil
}
// GetLogReader returns a reader for the specified filename. In
// restricted mode, the filename must be the base name of a file in
// this process's log directory (this is safe for cases when the
// filename comes from external sources, such as the admin UI via
// HTTP). In unrestricted mode any path is allowed, relative to the
// current directory, with the added feature that simple (base name)
// file names will be searched in this process's log directory if not
// found in the current directory.
func GetLogReader(filename string, restricted bool) (io.ReadCloser, error) {
dir, err := logging.logDir.get()
if err != nil {
return nil, err
}
switch restricted {
case true:
// Verify there are no path separators in a restricted-mode pathname.
if filepath.Base(filename) != filename {
return nil, errors.Errorf("pathnames must be basenames only: %s", filename)
}
filename = filepath.Join(dir, filename)
// Symlinks are not followed in restricted mode.
info, err := os.Lstat(filename)
if err != nil {
if os.IsNotExist(err) {
return nil, errors.Errorf("no such file %s in the log directory", filename)
}
return nil, errors.Wrapf(err, "Lstat: %s", filename)
}
mode := info.Mode()
if mode&os.ModeSymlink != 0 {
return nil, errors.Errorf("symlinks are not allowed")
}
if !mode.IsRegular() {
return nil, errors.Errorf("not a regular file")
}
case false:
info, err := osStat(filename)
if err != nil {
if !os.IsNotExist(err) {
return nil, errors.Wrapf(err, "Stat: %s", filename)
}
// The absolute filename didn't work, so try within the log
// directory if the filename isn't a path.
if filepath.IsAbs(filename) {
return nil, errors.Errorf("no such file %s", filename)
}
filenameAttempt := filepath.Join(dir, filename)
info, err = osStat(filenameAttempt)
if err != nil {
if os.IsNotExist(err) {
return nil, errors.Errorf("no such file %s either in current directory or in %s", filename, dir)
}
return nil, errors.Wrapf(err, "Stat: %s", filename)
}
filename = filenameAttempt
}
filename, err = filepath.EvalSymlinks(filename)
if err != nil {
return nil, err
}
if !info.Mode().IsRegular() {
return nil, errors.Errorf("not a regular file")
}
}
// Check that the file name is valid.
if _, err := parseLogFilename(filepath.Base(filename)); err != nil {
return nil, err
}
return os.Open(filename)
}
// TODO(bram): remove when Go1.9 is required.
//
// See https://github.com/golang/go/issues/19870.
func osStat(path string) (os.FileInfo, error) {
path, err := filepath.EvalSymlinks(path)
if err != nil {
return nil, err
}
return os.Lstat(path)
}
// sortableFileInfoSlice is required so we can sort FileInfos.
type sortableFileInfoSlice []FileInfo
func (a sortableFileInfoSlice) Len() int { return len(a) }
func (a sortableFileInfoSlice) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a sortableFileInfoSlice) Less(i, j int) bool {
return a[i].Details.Time < a[j].Details.Time
}
// selectFiles selects all log files that have an timestamp before the
// endTime. It then sorts them in decreasing order, with the most
// recent as the first one.
func selectFiles(logFiles []FileInfo, endTimestamp int64) []FileInfo {
files := sortableFileInfoSlice{}
for _, logFile := range logFiles {
if logFile.Details.Time <= endTimestamp {
files = append(files, logFile)
}
}
// Sort the files in reverse order so we will fetch the newest first.
sort.Sort(sort.Reverse(files))
return files
}
// FetchEntriesFromFiles fetches all available log entries on disk
// that are between the 'startTimestamp' and 'endTimestamp'. It will
// stop reading new files if the number of entries exceeds
// 'maxEntries'. Log entries are further filtered by the regexp
// 'pattern' if provided. The logs entries are returned in reverse
// chronological order.
func FetchEntriesFromFiles(
startTimestamp, endTimestamp int64, maxEntries int, pattern *regexp.Regexp,
) ([]Entry, error) {
logFiles, err := ListLogFiles()
if err != nil {
return nil, err
}
selectedFiles := selectFiles(logFiles, endTimestamp)
entries := []Entry{}
for _, file := range selectedFiles {
newEntries, entryBeforeStart, err := readAllEntriesFromFile(
file,
startTimestamp,
endTimestamp,
maxEntries-len(entries),
pattern)
if err != nil {
return nil, err
}
entries = append(entries, newEntries...)
if len(entries) >= maxEntries {
break
}
if entryBeforeStart {
// Stop processing files that won't have any timestamps after
// startTime.
break
}
}
return entries, nil
}
// readAllEntriesFromFile reads in all log entries from a given file that are
// between the 'startTimestamp' and 'endTimestamp' and match the 'pattern' if it
// exists. It returns the entries in the reverse chronological order. It also
// returns a flag that denotes if any timestamp occurred before the
// 'startTimestamp' to inform the caller that no more log files need to be
// processed. If the number of entries returned exceeds 'maxEntries' then
// processing of new entries is stopped immediately.
func readAllEntriesFromFile(
file FileInfo, startTimestamp, endTimestamp int64, maxEntries int, pattern *regexp.Regexp,
) ([]Entry, bool, error) {
reader, err := GetLogReader(file.Name, true /* restricted */)
if reader == nil || err != nil {
return nil, false, err
}
defer reader.Close()
entries := []Entry{}
decoder := NewEntryDecoder(reader)
entryBeforeStart := false
for {
entry := Entry{}
if err := decoder.Decode(&entry); err != nil {
if err == io.EOF {
break
}
return nil, false, err
}
var match bool
if pattern == nil {
match = true
} else {
match = pattern.MatchString(entry.Message) ||
pattern.MatchString(entry.File)
}
if match && entry.Time >= startTimestamp && entry.Time <= endTimestamp {
entries = append([]Entry{entry}, entries...)
if len(entries) >= maxEntries {
break
}
}
if entry.Time < startTimestamp {
entryBeforeStart = true
}
}
return entries, entryBeforeStart, nil
}
| {
return logging.listLogFiles()
} | identifier_body |
file.go | // Copyright 2013 Google Inc. All Rights Reserved.
// Copyright 2017 The Cockroach Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// This code originated in the github.com/golang/glog package.
// File I/O for logs.
package log
import (
"flag"
"fmt"
"io"
"io/ioutil"
"os"
"os/user"
"path/filepath"
"regexp"
"runtime"
"sort"
"strconv"
"strings"
"time"
"github.com/pkg/errors"
"github.com/cockroachdb/cockroach/pkg/util/syncutil"
"github.com/cockroachdb/cockroach/pkg/util/timeutil"
)
// LogFileMaxSize is the maximum size of a log file in bytes.
var LogFileMaxSize int64 = 10 << 20 // 10MiB
// LogFilesCombinedMaxSize is the maximum total size in bytes for log
// files. Note that this is only checked when log files are created,
// so the total size of log files per severity might temporarily be up
// to LogFileMaxSize larger.
var LogFilesCombinedMaxSize = LogFileMaxSize * 10 // 100MiB
// DirName overrides (if non-empty) the choice of directory in
// which to write logs. See createLogDirs for the full list of
// possible destinations. Note that the default is to log to stderr
// independent of this setting. See --logtostderr.
type DirName struct {
syncutil.Mutex
name string
}
var _ flag.Value = &DirName{}
// Set implements the flag.Value interface.
func (l *DirName) Set(dir string) error {
if len(dir) > 0 && dir[0] == '~' {
return fmt.Errorf("log directory cannot start with '~': %s", dir)
}
if len(dir) > 0 {
absDir, err := filepath.Abs(dir)
if err != nil {
return err
}
dir = absDir
}
l.Lock()
defer l.Unlock()
l.name = dir
return nil
}
// Type implements the flag.Value interface.
func (l *DirName) Type() string {
return "string"
}
// String implements the flag.Value interface.
func (l *DirName) String() string {
l.Lock()
defer l.Unlock()
return l.name
}
func (l *DirName) get() (string, error) {
l.Lock()
defer l.Unlock()
if len(l.name) == 0 {
return "", errDirectoryNotSet
}
return l.name, nil
}
// IsSet returns true iff the directory name is set.
func (l *DirName) IsSet() bool {
l.Lock()
res := l.name != ""
l.Unlock()
return res
}
// DirSet returns true of the log directory has been changed from its default.
func DirSet() bool { return logging.logDir.IsSet() }
// logFileRE matches log files to avoid exposing non-log files accidentally
// and it splits the details of the filename into groups for easy parsing.
// The log file format is {process}.{host}.{username}.{timestamp}.{pid}.log
// cockroach.Brams-MacBook-Pro.bram.2015-06-09T16-10-48Z.30209.log
// All underscore in process, host and username are escaped to double
// underscores and all periods are escaped to an underscore.
// For compatibility with Windows filenames, all colons from the timestamp
// (RFC3339) are converted from underscores.
var logFileRE = regexp.MustCompile(`^(?:.*/)?([^/.]+)\.([^/\.]+)\.([^/\.]+)\.([^/\.]+)\.(\d+)\.log$`)
var (
pid = os.Getpid()
program = filepath.Base(os.Args[0])
host = "unknownhost"
userName = "unknownuser"
)
func init() {
h, err := os.Hostname()
if err == nil {
host = shortHostname(h)
}
current, err := user.Current()
if err == nil {
userName = current.Username
}
// Sanitize userName since it may contain filepath separators on Windows.
userName = strings.Replace(userName, `\`, "_", -1)
}
// shortHostname returns its argument, truncating at the first period.
// For instance, given "www.google.com" it returns "www".
func shortHostname(hostname string) string {
if i := strings.Index(hostname, "."); i >= 0 {
return hostname[:i]
}
return hostname
}
// removePeriods removes all extraneous periods. This is required to ensure that
// the only periods in the filename are the ones added by logName so it can
// be easily parsed.
func removePeriods(s string) string {
return strings.Replace(s, ".", "", -1)
}
// logName returns a new log file name with start time t, and the name
// for the symlink.
func | (prefix string, t time.Time) (name, link string) {
// Replace the ':'s in the time format with '_'s to allow for log files in
// Windows.
tFormatted := strings.Replace(t.Format(time.RFC3339), ":", "_", -1)
name = fmt.Sprintf("%s.%s.%s.%s.%06d.log",
removePeriods(prefix),
removePeriods(host),
removePeriods(userName),
tFormatted,
pid)
return name, removePeriods(prefix) + ".log"
}
var errMalformedName = errors.New("malformed log filename")
func parseLogFilename(filename string) (FileDetails, error) {
matches := logFileRE.FindStringSubmatch(filename)
if matches == nil || len(matches) != 6 {
return FileDetails{}, errMalformedName
}
// Replace the '_'s with ':'s to restore the correct time format.
fixTime := strings.Replace(matches[4], "_", ":", -1)
time, err := time.Parse(time.RFC3339, fixTime)
if err != nil {
return FileDetails{}, err
}
pid, err := strconv.ParseInt(matches[5], 10, 0)
if err != nil {
return FileDetails{}, err
}
return FileDetails{
Program: matches[1],
Host: matches[2],
UserName: matches[3],
Time: time.UnixNano(),
PID: pid,
}, nil
}
var errDirectoryNotSet = errors.New("log: log directory not set")
// create creates a new log file and returns the file and its
// filename. If the file is created successfully, create also attempts
// to update the symlink for that tag, ignoring errors.
func create(
logDir *DirName, prefix string, t time.Time, lastRotation int64,
) (f *os.File, updatedRotation int64, filename string, err error) {
dir, err := logDir.get()
if err != nil {
return nil, lastRotation, "", err
}
// Ensure that the timestamp of the new file name is greater than
// the timestamp of the previous generated file name.
unix := t.Unix()
if unix <= lastRotation {
unix = lastRotation + 1
}
updatedRotation = unix
t = timeutil.Unix(unix, 0)
// Generate the file name.
name, link := logName(prefix, t)
fname := filepath.Join(dir, name)
// Open the file os.O_APPEND|os.O_CREATE rather than use os.Create.
// Append is almost always more efficient than O_RDRW on most modern file systems.
f, err = os.OpenFile(fname, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0664)
if err == nil {
symlink := filepath.Join(dir, link)
// Symlinks are best-effort.
if err := os.Remove(symlink); err != nil && !os.IsNotExist(err) {
fmt.Fprintf(OrigStderr, "log: failed to remove symlink %s: %s", symlink, err)
}
if err := os.Symlink(filepath.Base(fname), symlink); err != nil {
// On Windows, this will be the common case, as symlink creation
// requires special privileges.
// See: https://docs.microsoft.com/en-us/windows/device-security/security-policy-settings/create-symbolic-links
if runtime.GOOS != "windows" {
fmt.Fprintf(OrigStderr, "log: failed to create symlink %s: %s", symlink, err)
}
}
}
return f, updatedRotation, fname, errors.Wrapf(err, "log: cannot create log")
}
// ListLogFiles returns a slice of FileInfo structs for each log file
// on the local node, in any of the configured log directories.
func ListLogFiles() ([]FileInfo, error) {
return logging.listLogFiles()
}
func (l *loggingT) listLogFiles() ([]FileInfo, error) {
var results []FileInfo
dir, err := logging.logDir.get()
if err != nil {
// No log directory configured: simply indicate that there are no
// log files.
return nil, nil
}
infos, err := ioutil.ReadDir(dir)
if err != nil {
return results, err
}
// The file names have a fixed structure with fields delimited by
// periods. create() for new files removes the periods from the
// provided prefix; do the same here to filter out selected names
// below.
programPrefix := removePeriods(l.prefix)
for _, info := range infos {
if info.Mode().IsRegular() {
details, err := parseLogFilename(info.Name())
if err == nil && details.Program == programPrefix {
results = append(results, FileInfo{
Name: info.Name(),
SizeBytes: info.Size(),
ModTimeNanos: info.ModTime().UnixNano(),
Details: details,
})
}
}
}
return results, nil
}
// GetLogReader returns a reader for the specified filename. In
// restricted mode, the filename must be the base name of a file in
// this process's log directory (this is safe for cases when the
// filename comes from external sources, such as the admin UI via
// HTTP). In unrestricted mode any path is allowed, relative to the
// current directory, with the added feature that simple (base name)
// file names will be searched in this process's log directory if not
// found in the current directory.
func GetLogReader(filename string, restricted bool) (io.ReadCloser, error) {
dir, err := logging.logDir.get()
if err != nil {
return nil, err
}
switch restricted {
case true:
// Verify there are no path separators in a restricted-mode pathname.
if filepath.Base(filename) != filename {
return nil, errors.Errorf("pathnames must be basenames only: %s", filename)
}
filename = filepath.Join(dir, filename)
// Symlinks are not followed in restricted mode.
info, err := os.Lstat(filename)
if err != nil {
if os.IsNotExist(err) {
return nil, errors.Errorf("no such file %s in the log directory", filename)
}
return nil, errors.Wrapf(err, "Lstat: %s", filename)
}
mode := info.Mode()
if mode&os.ModeSymlink != 0 {
return nil, errors.Errorf("symlinks are not allowed")
}
if !mode.IsRegular() {
return nil, errors.Errorf("not a regular file")
}
case false:
info, err := osStat(filename)
if err != nil {
if !os.IsNotExist(err) {
return nil, errors.Wrapf(err, "Stat: %s", filename)
}
// The absolute filename didn't work, so try within the log
// directory if the filename isn't a path.
if filepath.IsAbs(filename) {
return nil, errors.Errorf("no such file %s", filename)
}
filenameAttempt := filepath.Join(dir, filename)
info, err = osStat(filenameAttempt)
if err != nil {
if os.IsNotExist(err) {
return nil, errors.Errorf("no such file %s either in current directory or in %s", filename, dir)
}
return nil, errors.Wrapf(err, "Stat: %s", filename)
}
filename = filenameAttempt
}
filename, err = filepath.EvalSymlinks(filename)
if err != nil {
return nil, err
}
if !info.Mode().IsRegular() {
return nil, errors.Errorf("not a regular file")
}
}
// Check that the file name is valid.
if _, err := parseLogFilename(filepath.Base(filename)); err != nil {
return nil, err
}
return os.Open(filename)
}
// TODO(bram): remove when Go1.9 is required.
//
// See https://github.com/golang/go/issues/19870.
func osStat(path string) (os.FileInfo, error) {
path, err := filepath.EvalSymlinks(path)
if err != nil {
return nil, err
}
return os.Lstat(path)
}
// sortableFileInfoSlice is required so we can sort FileInfos.
type sortableFileInfoSlice []FileInfo
func (a sortableFileInfoSlice) Len() int { return len(a) }
func (a sortableFileInfoSlice) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a sortableFileInfoSlice) Less(i, j int) bool {
return a[i].Details.Time < a[j].Details.Time
}
// selectFiles selects all log files that have an timestamp before the
// endTime. It then sorts them in decreasing order, with the most
// recent as the first one.
func selectFiles(logFiles []FileInfo, endTimestamp int64) []FileInfo {
files := sortableFileInfoSlice{}
for _, logFile := range logFiles {
if logFile.Details.Time <= endTimestamp {
files = append(files, logFile)
}
}
// Sort the files in reverse order so we will fetch the newest first.
sort.Sort(sort.Reverse(files))
return files
}
// FetchEntriesFromFiles fetches all available log entries on disk
// that are between the 'startTimestamp' and 'endTimestamp'. It will
// stop reading new files if the number of entries exceeds
// 'maxEntries'. Log entries are further filtered by the regexp
// 'pattern' if provided. The logs entries are returned in reverse
// chronological order.
func FetchEntriesFromFiles(
startTimestamp, endTimestamp int64, maxEntries int, pattern *regexp.Regexp,
) ([]Entry, error) {
logFiles, err := ListLogFiles()
if err != nil {
return nil, err
}
selectedFiles := selectFiles(logFiles, endTimestamp)
entries := []Entry{}
for _, file := range selectedFiles {
newEntries, entryBeforeStart, err := readAllEntriesFromFile(
file,
startTimestamp,
endTimestamp,
maxEntries-len(entries),
pattern)
if err != nil {
return nil, err
}
entries = append(entries, newEntries...)
if len(entries) >= maxEntries {
break
}
if entryBeforeStart {
// Stop processing files that won't have any timestamps after
// startTime.
break
}
}
return entries, nil
}
// readAllEntriesFromFile reads in all log entries from a given file that are
// between the 'startTimestamp' and 'endTimestamp' and match the 'pattern' if it
// exists. It returns the entries in the reverse chronological order. It also
// returns a flag that denotes if any timestamp occurred before the
// 'startTimestamp' to inform the caller that no more log files need to be
// processed. If the number of entries returned exceeds 'maxEntries' then
// processing of new entries is stopped immediately.
func readAllEntriesFromFile(
file FileInfo, startTimestamp, endTimestamp int64, maxEntries int, pattern *regexp.Regexp,
) ([]Entry, bool, error) {
reader, err := GetLogReader(file.Name, true /* restricted */)
if reader == nil || err != nil {
return nil, false, err
}
defer reader.Close()
entries := []Entry{}
decoder := NewEntryDecoder(reader)
entryBeforeStart := false
for {
entry := Entry{}
if err := decoder.Decode(&entry); err != nil {
if err == io.EOF {
break
}
return nil, false, err
}
var match bool
if pattern == nil {
match = true
} else {
match = pattern.MatchString(entry.Message) ||
pattern.MatchString(entry.File)
}
if match && entry.Time >= startTimestamp && entry.Time <= endTimestamp {
entries = append([]Entry{entry}, entries...)
if len(entries) >= maxEntries {
break
}
}
if entry.Time < startTimestamp {
entryBeforeStart = true
}
}
return entries, entryBeforeStart, nil
}
| logName | identifier_name |
file.go | // Copyright 2013 Google Inc. All Rights Reserved.
// Copyright 2017 The Cockroach Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// This code originated in the github.com/golang/glog package.
// File I/O for logs.
package log
import (
"flag"
"fmt"
"io"
"io/ioutil"
"os"
"os/user"
"path/filepath"
"regexp"
"runtime"
"sort"
"strconv"
"strings"
"time"
"github.com/pkg/errors"
|
// LogFileMaxSize is the maximum size of a log file in bytes.
var LogFileMaxSize int64 = 10 << 20 // 10MiB
// LogFilesCombinedMaxSize is the maximum total size in bytes for log
// files. Note that this is only checked when log files are created,
// so the total size of log files per severity might temporarily be up
// to LogFileMaxSize larger.
var LogFilesCombinedMaxSize = LogFileMaxSize * 10 // 100MiB
// DirName overrides (if non-empty) the choice of directory in
// which to write logs. See createLogDirs for the full list of
// possible destinations. Note that the default is to log to stderr
// independent of this setting. See --logtostderr.
type DirName struct {
syncutil.Mutex
name string
}
var _ flag.Value = &DirName{}
// Set implements the flag.Value interface.
func (l *DirName) Set(dir string) error {
if len(dir) > 0 && dir[0] == '~' {
return fmt.Errorf("log directory cannot start with '~': %s", dir)
}
if len(dir) > 0 {
absDir, err := filepath.Abs(dir)
if err != nil {
return err
}
dir = absDir
}
l.Lock()
defer l.Unlock()
l.name = dir
return nil
}
// Type implements the flag.Value interface.
func (l *DirName) Type() string {
return "string"
}
// String implements the flag.Value interface.
func (l *DirName) String() string {
l.Lock()
defer l.Unlock()
return l.name
}
func (l *DirName) get() (string, error) {
l.Lock()
defer l.Unlock()
if len(l.name) == 0 {
return "", errDirectoryNotSet
}
return l.name, nil
}
// IsSet returns true iff the directory name is set.
func (l *DirName) IsSet() bool {
l.Lock()
res := l.name != ""
l.Unlock()
return res
}
// DirSet returns true of the log directory has been changed from its default.
func DirSet() bool { return logging.logDir.IsSet() }
// logFileRE matches log files to avoid exposing non-log files accidentally
// and it splits the details of the filename into groups for easy parsing.
// The log file format is {process}.{host}.{username}.{timestamp}.{pid}.log
// cockroach.Brams-MacBook-Pro.bram.2015-06-09T16-10-48Z.30209.log
// All underscore in process, host and username are escaped to double
// underscores and all periods are escaped to an underscore.
// For compatibility with Windows filenames, all colons from the timestamp
// (RFC3339) are converted from underscores.
var logFileRE = regexp.MustCompile(`^(?:.*/)?([^/.]+)\.([^/\.]+)\.([^/\.]+)\.([^/\.]+)\.(\d+)\.log$`)
var (
pid = os.Getpid()
program = filepath.Base(os.Args[0])
host = "unknownhost"
userName = "unknownuser"
)
func init() {
h, err := os.Hostname()
if err == nil {
host = shortHostname(h)
}
current, err := user.Current()
if err == nil {
userName = current.Username
}
// Sanitize userName since it may contain filepath separators on Windows.
userName = strings.Replace(userName, `\`, "_", -1)
}
// shortHostname returns its argument, truncating at the first period.
// For instance, given "www.google.com" it returns "www".
func shortHostname(hostname string) string {
if i := strings.Index(hostname, "."); i >= 0 {
return hostname[:i]
}
return hostname
}
// removePeriods removes all extraneous periods. This is required to ensure that
// the only periods in the filename are the ones added by logName so it can
// be easily parsed.
func removePeriods(s string) string {
return strings.Replace(s, ".", "", -1)
}
// logName returns a new log file name with start time t, and the name
// for the symlink.
func logName(prefix string, t time.Time) (name, link string) {
// Replace the ':'s in the time format with '_'s to allow for log files in
// Windows.
tFormatted := strings.Replace(t.Format(time.RFC3339), ":", "_", -1)
name = fmt.Sprintf("%s.%s.%s.%s.%06d.log",
removePeriods(prefix),
removePeriods(host),
removePeriods(userName),
tFormatted,
pid)
return name, removePeriods(prefix) + ".log"
}
var errMalformedName = errors.New("malformed log filename")
func parseLogFilename(filename string) (FileDetails, error) {
matches := logFileRE.FindStringSubmatch(filename)
if matches == nil || len(matches) != 6 {
return FileDetails{}, errMalformedName
}
// Replace the '_'s with ':'s to restore the correct time format.
fixTime := strings.Replace(matches[4], "_", ":", -1)
time, err := time.Parse(time.RFC3339, fixTime)
if err != nil {
return FileDetails{}, err
}
pid, err := strconv.ParseInt(matches[5], 10, 0)
if err != nil {
return FileDetails{}, err
}
return FileDetails{
Program: matches[1],
Host: matches[2],
UserName: matches[3],
Time: time.UnixNano(),
PID: pid,
}, nil
}
var errDirectoryNotSet = errors.New("log: log directory not set")
// create creates a new log file and returns the file and its
// filename. If the file is created successfully, create also attempts
// to update the symlink for that tag, ignoring errors.
func create(
logDir *DirName, prefix string, t time.Time, lastRotation int64,
) (f *os.File, updatedRotation int64, filename string, err error) {
dir, err := logDir.get()
if err != nil {
return nil, lastRotation, "", err
}
// Ensure that the timestamp of the new file name is greater than
// the timestamp of the previous generated file name.
unix := t.Unix()
if unix <= lastRotation {
unix = lastRotation + 1
}
updatedRotation = unix
t = timeutil.Unix(unix, 0)
// Generate the file name.
name, link := logName(prefix, t)
fname := filepath.Join(dir, name)
// Open the file os.O_APPEND|os.O_CREATE rather than use os.Create.
// Append is almost always more efficient than O_RDRW on most modern file systems.
f, err = os.OpenFile(fname, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0664)
if err == nil {
symlink := filepath.Join(dir, link)
// Symlinks are best-effort.
if err := os.Remove(symlink); err != nil && !os.IsNotExist(err) {
fmt.Fprintf(OrigStderr, "log: failed to remove symlink %s: %s", symlink, err)
}
if err := os.Symlink(filepath.Base(fname), symlink); err != nil {
// On Windows, this will be the common case, as symlink creation
// requires special privileges.
// See: https://docs.microsoft.com/en-us/windows/device-security/security-policy-settings/create-symbolic-links
if runtime.GOOS != "windows" {
fmt.Fprintf(OrigStderr, "log: failed to create symlink %s: %s", symlink, err)
}
}
}
return f, updatedRotation, fname, errors.Wrapf(err, "log: cannot create log")
}
// ListLogFiles returns a slice of FileInfo structs for each log file
// on the local node, in any of the configured log directories.
func ListLogFiles() ([]FileInfo, error) {
return logging.listLogFiles()
}
func (l *loggingT) listLogFiles() ([]FileInfo, error) {
var results []FileInfo
dir, err := logging.logDir.get()
if err != nil {
// No log directory configured: simply indicate that there are no
// log files.
return nil, nil
}
infos, err := ioutil.ReadDir(dir)
if err != nil {
return results, err
}
// The file names have a fixed structure with fields delimited by
// periods. create() for new files removes the periods from the
// provided prefix; do the same here to filter out selected names
// below.
programPrefix := removePeriods(l.prefix)
for _, info := range infos {
if info.Mode().IsRegular() {
details, err := parseLogFilename(info.Name())
if err == nil && details.Program == programPrefix {
results = append(results, FileInfo{
Name: info.Name(),
SizeBytes: info.Size(),
ModTimeNanos: info.ModTime().UnixNano(),
Details: details,
})
}
}
}
return results, nil
}
// GetLogReader returns a reader for the specified filename. In
// restricted mode, the filename must be the base name of a file in
// this process's log directory (this is safe for cases when the
// filename comes from external sources, such as the admin UI via
// HTTP). In unrestricted mode any path is allowed, relative to the
// current directory, with the added feature that simple (base name)
// file names will be searched in this process's log directory if not
// found in the current directory.
func GetLogReader(filename string, restricted bool) (io.ReadCloser, error) {
dir, err := logging.logDir.get()
if err != nil {
return nil, err
}
switch restricted {
case true:
// Verify there are no path separators in a restricted-mode pathname.
if filepath.Base(filename) != filename {
return nil, errors.Errorf("pathnames must be basenames only: %s", filename)
}
filename = filepath.Join(dir, filename)
// Symlinks are not followed in restricted mode.
info, err := os.Lstat(filename)
if err != nil {
if os.IsNotExist(err) {
return nil, errors.Errorf("no such file %s in the log directory", filename)
}
return nil, errors.Wrapf(err, "Lstat: %s", filename)
}
mode := info.Mode()
if mode&os.ModeSymlink != 0 {
return nil, errors.Errorf("symlinks are not allowed")
}
if !mode.IsRegular() {
return nil, errors.Errorf("not a regular file")
}
case false:
info, err := osStat(filename)
if err != nil {
if !os.IsNotExist(err) {
return nil, errors.Wrapf(err, "Stat: %s", filename)
}
// The absolute filename didn't work, so try within the log
// directory if the filename isn't a path.
if filepath.IsAbs(filename) {
return nil, errors.Errorf("no such file %s", filename)
}
filenameAttempt := filepath.Join(dir, filename)
info, err = osStat(filenameAttempt)
if err != nil {
if os.IsNotExist(err) {
return nil, errors.Errorf("no such file %s either in current directory or in %s", filename, dir)
}
return nil, errors.Wrapf(err, "Stat: %s", filename)
}
filename = filenameAttempt
}
filename, err = filepath.EvalSymlinks(filename)
if err != nil {
return nil, err
}
if !info.Mode().IsRegular() {
return nil, errors.Errorf("not a regular file")
}
}
// Check that the file name is valid.
if _, err := parseLogFilename(filepath.Base(filename)); err != nil {
return nil, err
}
return os.Open(filename)
}
// TODO(bram): remove when Go1.9 is required.
//
// See https://github.com/golang/go/issues/19870.
func osStat(path string) (os.FileInfo, error) {
path, err := filepath.EvalSymlinks(path)
if err != nil {
return nil, err
}
return os.Lstat(path)
}
// sortableFileInfoSlice is required so we can sort FileInfos.
type sortableFileInfoSlice []FileInfo
func (a sortableFileInfoSlice) Len() int { return len(a) }
func (a sortableFileInfoSlice) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a sortableFileInfoSlice) Less(i, j int) bool {
return a[i].Details.Time < a[j].Details.Time
}
// selectFiles selects all log files that have an timestamp before the
// endTime. It then sorts them in decreasing order, with the most
// recent as the first one.
func selectFiles(logFiles []FileInfo, endTimestamp int64) []FileInfo {
files := sortableFileInfoSlice{}
for _, logFile := range logFiles {
if logFile.Details.Time <= endTimestamp {
files = append(files, logFile)
}
}
// Sort the files in reverse order so we will fetch the newest first.
sort.Sort(sort.Reverse(files))
return files
}
// FetchEntriesFromFiles fetches all available log entries on disk
// that are between the 'startTimestamp' and 'endTimestamp'. It will
// stop reading new files if the number of entries exceeds
// 'maxEntries'. Log entries are further filtered by the regexp
// 'pattern' if provided. The logs entries are returned in reverse
// chronological order.
func FetchEntriesFromFiles(
startTimestamp, endTimestamp int64, maxEntries int, pattern *regexp.Regexp,
) ([]Entry, error) {
logFiles, err := ListLogFiles()
if err != nil {
return nil, err
}
selectedFiles := selectFiles(logFiles, endTimestamp)
entries := []Entry{}
for _, file := range selectedFiles {
newEntries, entryBeforeStart, err := readAllEntriesFromFile(
file,
startTimestamp,
endTimestamp,
maxEntries-len(entries),
pattern)
if err != nil {
return nil, err
}
entries = append(entries, newEntries...)
if len(entries) >= maxEntries {
break
}
if entryBeforeStart {
// Stop processing files that won't have any timestamps after
// startTime.
break
}
}
return entries, nil
}
// readAllEntriesFromFile reads in all log entries from a given file that are
// between the 'startTimestamp' and 'endTimestamp' and match the 'pattern' if it
// exists. It returns the entries in the reverse chronological order. It also
// returns a flag that denotes if any timestamp occurred before the
// 'startTimestamp' to inform the caller that no more log files need to be
// processed. If the number of entries returned exceeds 'maxEntries' then
// processing of new entries is stopped immediately.
func readAllEntriesFromFile(
file FileInfo, startTimestamp, endTimestamp int64, maxEntries int, pattern *regexp.Regexp,
) ([]Entry, bool, error) {
reader, err := GetLogReader(file.Name, true /* restricted */)
if reader == nil || err != nil {
return nil, false, err
}
defer reader.Close()
entries := []Entry{}
decoder := NewEntryDecoder(reader)
entryBeforeStart := false
for {
entry := Entry{}
if err := decoder.Decode(&entry); err != nil {
if err == io.EOF {
break
}
return nil, false, err
}
var match bool
if pattern == nil {
match = true
} else {
match = pattern.MatchString(entry.Message) ||
pattern.MatchString(entry.File)
}
if match && entry.Time >= startTimestamp && entry.Time <= endTimestamp {
entries = append([]Entry{entry}, entries...)
if len(entries) >= maxEntries {
break
}
}
if entry.Time < startTimestamp {
entryBeforeStart = true
}
}
return entries, entryBeforeStart, nil
} | "github.com/cockroachdb/cockroach/pkg/util/syncutil"
"github.com/cockroachdb/cockroach/pkg/util/timeutil"
) | random_line_split |
guidestar.py | #!/usr/bin/env python3
#
# Copyright (C) 2018 Cambridge Astronomical Survey Unit
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program. If not, see <https://www.gnu.org/licenses/>.
#
"""
Retrieve of WEAVE guide stars for the LIFU or MOS.
The authors of this module are:
- David Murphy ([email protected]), Cambridge Astronomical Survey Unit
(CASU, IoA).
Notes
-----
The dependencies of this module are:
- Python core
- numpy
- astropy
- astropy_healpix
Log:
- v0.1: Still work in progress.
"""
from astropy.io import fits as pyfits
import os
from numpy import unique, arange, random, where
from copy import deepcopy
import xml.dom.minidom
from xml.dom.minidom import Node
from math import radians,cos
import numpy
class GuideStar:
"""
Handle the retrieval of WEAVE guide stars for the LIFU or MOS.
This class provides a mechanism for querying and retrieving guide star
targets for the construction of WEAVE 'protofields'.
Parameters
----------
ra : float
The Right Ascension (decimal degrees) - of either the central spaxel or
central FOV.
dec : float
The Declination (decimal degrees) - of either the central spaxel or
central FOV.
pa : float
The position angle (degrees) of rotation (LIFU only).
mode : str
Either LIFU or mIFU.
nside : int, optional
Override the default HEALPix nside value. Will likely end in tears.
"""
import xml.dom.minidom
from xml.dom.minidom import Node
def __init__(self,ra,dec,pa,mode,nside=32):
self.xml_template_url = 'http://casu.ast.cam.ac.uk/~dmurphy/opr3/swg/resources/BlankXMLTemplate.xml'
self.xml_template = 'BlankXMLTemplate.xml'
self.ra = ra
self.dec = dec
self.pa = pa
self.nside = nside
self.guides = None
self.plate_scale = 17.8 ## "/mm
self.guide_url = "http://casu.ast.cam.ac.uk/~dmurphy/opr3/swg/resources/guides/"
self.guide_filename = "Guides_S4_<HEALPIX>.fits"
self.cache_dir = './cache/'
if not mode in ['LIFU','mIFU']:
print 'ERROR: must specify LIFU or mIFU mode'
raise SystemExit(0)
self.lifu = (mode == 'LIFU')
if (not self.lifu) and (self.pa != 0):
print 'ERROR: mIFU mode requires PA=0'
raise SystemExit(0)
if not os.path.isdir(self.cache_dir):
cmd = 'mkdir -p %s'%(self.cache_dir)
os.system(cmd)
if not os.path.isfile(self.xml_template):
self.wget(self.xml_template_url)
def get_guide(self,annular_fail=True,as_xml=True,print_xml=False):
"""
Master function to return a guide star once the object is instantiated.
Parameters
----------
annular_fail : bool, optional
If there is no guidestar in GC FOV, search an annulus and define
the PA required to get a guidestar. Return most centralised
candidate.
as_xml : bool, optional
Returns the result as an XML <target> element that can be added to
a <field> element.
print_xml : bool, optional
Prints the XML results if as_xml=True.
Returns
-------
guide : astropy.Table or xml.dom.minidom.Element
Row from the Guide star catalogue. If as_xml is True, XML <target>
element that can be inserted into a field XML.
"""
self.set_geometry()
self.retrieve_guidecats()
guides = self.select_target()
if (type(guides) == type(None)) and (annular_fail == True):
print 'No guide(s) found at fixed position - performing annular search'
guides = self.annular_search()
if type(guides) == type(None):
print 'No guide star(s) found...'
return None
if as_xml:
if self.lifu:
return self.to_xml(guides)
else:
xmls = [self.to_xml(guide) for guide in guides]
if print_xml:
for x in xmls:
print x.toxml()
return xmls
else:
return guides
def wget(self,url,outname=None):
import os
import time
cmd = 'wget -q -t 1 -T 5 %s'%(url)
if outname != None:
print 'Downloading URL %s to %s'%(url,outname)
cmd += ' -O %s'%(outname)
os.system(cmd)
return
def annular_search(self):
from astropy_healpix import HEALPix
from astropy.coordinates import SkyCoord
import astropy.coordinates as cc
hp = HEALPix(nside=self.nside, order='nested', frame=cc.ICRS())
self.set_geometry(healpix=False)
r_min = self.ra_min-self.ra
r_max = self.ra_max-self.ra
radius = self.ra_max-self.ra
#get the healpix IDs covering an annulus centred on self.ra,dec
in_annulus = []
self.healpix_indices = []
print 'Populating annulus and determining HEALPix coverage...'
while(len(in_annulus)) < 500:
rnd_ra = self.ra+(2*(random.random()-0.5)*radius)
rnd_dec = self.dec+(2*(random.random()-0.5)*radius)
rnd_dist = (((rnd_ra-self.ra)**2)+((rnd_dec-self.dec)**2))**0.5
if rnd_dist > r_min:
if rnd_dist < r_max:
self.healpix_indices.append(hp.skycoord_to_healpix(SkyCoord(rnd_ra,rnd_dec,unit='deg')))
in_annulus.append([rnd_ra,rnd_dec])
#print len(in_annulus)
print '....done'
self.healpix_indices = unique(self.healpix_indices)
print self.healpix_indices
self.retrieve_guidecats()
target = self.select_target(annular=True)
return target
def set_geometry(self,healpix=True):
from astropy_healpix import HEALPix
from astropy.coordinates import SkyCoord
import astropy.coordinates as cc
from numpy import unique
if self.lifu:
self.g_dx = 3.75
self.g_dy = 4.0
#testing - let's make it bigger
# self.g_dx = 17.0
# self.g_dy = 20.0
#needs to be about 20x larger in area
# self.g_dx = 16.8
# self.g_dy = 17.9
self.ra_max = self.ra + ((27.7+(0.5*self.g_dx))/60.0)
self.ra_min = self.ra + ((27.7-(0.5*self.g_dx))/60.0)
self.dec_max = self.dec + ((0.5*self.g_dy)/60.0)
self.dec_min = self.dec - ((0.5*self.g_dy)/60.0)
self.ra_gc0 = self.ra + (27.7/60.0)
self.dec_gc0 = self.dec
else:
self.ra_min = self.ra - 1.0
self.ra_max = self.ra + 1.0
self.dec_min = self.dec - 1.0
self.dec_max = self.dec + 1.0
if healpix:
hp = HEALPix(nside=self.nside, order='nested', frame=cc.ICRS())
self.healpix_indices = []
ra = [self.ra_min,self.ra_min,self.ra_max,self.ra_max]
dec = [self.dec_min,self.dec_max,self.dec_min,self.dec_max]
dx = self.ra_max - self.ra_min
dy = self.dec_max - self.dec_min
for i in arange(500):
ra.append(self.ra_min+(random.random()*dx))
dec.append(self.dec_min+(random.random()*dy))
for r,d in zip(ra,dec):
self.healpix_indices.append(hp.skycoord_to_healpix(SkyCoord(r,d,unit='deg')))
self.healpix_indices = unique(self.healpix_indices)
return
def retrieve_guidecats(self,clobber=False):
from astropy.table import Table
import astropy.io.fits as pyfits
if len(self.healpix_indices) == 0:
self.set_geometry()
self.guide_files = []
for hp in self.healpix_indices:
fn = self.guide_filename.replace('<HEALPIX>',str(hp))
url = self.guide_url+fn
fn = self.cache_dir+fn
if os.path.isfile(fn):
if clobber == False:
print 'Using existing file %s'%(fn)
self.guide_files.append(fn)
continue
print url
self.wget(url,outname=fn)
self.guide_files.append(fn)
#self.guide_files = ['/scratch/Guides_S4_cname.fits']
tabs = []
for cat in self.guide_files:
fits = pyfits.open(cat)
tabs.append(Table(fits[1].data))
if len(tabs) == 1:
self.guides = tabs[0]
else:
t0 = tabs[0]
for t in tabs[1:]:
for g in t:
t0.add_row(g)
self.guides = t0
return
def select_target(self,annular=False):
import numpy
from operator import indexOf
from astropy.table import Table
self.ra_g = self.guides['GAIA_RA']
self.dec_g = self.guides['GAIA_DEC']
if (not self.lifu):
annular = True
#fig = plt.figure(); sp = plt.subplot(aspect='equal'); plt.plot(self.ra_g,self.dec_g,'ko'); cir = plt.Circle((gs.ra,gs.dec),radius=1.0,lw=2.0,color='red'); sp.add_patch(cir); plt.show()
if annular ==False:
filter1 = (self.ra_g > self.ra_min) & (self.ra_g < self.ra_max)
filter2 = (self.dec_g > self.dec_min) & (self.dec_g < self.dec_max)
filter = filter1 & filter2
if (True in filter) == False:
print 'No guide stars within the GC FOV!!'
return None
self.guides_filter = self.guides[where(filter)[0]]
self.ra_g = self.guides_filter['GAIA_RA']
self.dec_g = self.guides_filter['GAIA_DEC']
self.dist = [((abs(r-self.ra_gc0)**2)+(abs(d-self.dec_gc0)**2))**0.5 for r,d in zip(self.ra_g,self.dec_g)]
else:
if self.lifu:
#in annulus, want closest to the central radius of the annulus
r_min = self.ra_min-self.ra
r_max = self.ra_max-self.ra
else:
r_min = 0.95
r_max = 1.0
self.radii = numpy.array([(((_ra-self.ra)**2)+((_dec-self.dec)**2))**0.5 for _ra,_dec in zip(self.ra_g,self.dec_g)])
filter = (self.radii > r_min) & (self.radii < r_max)
self.guides_filter = self.guides[filter]
self.ra_g = self.guides_filter['GAIA_RA']
self.dec_g = self.guides_filter['GAIA_DEC']
radii = self.radii[filter]
if self.lifu:
r0 = r_min + (0.5*(r_max-r_min))
self.dist = [abs(d-r0) for d in radii]
else:
self.dist = radii
minval = min(self.dist)
g_index = indexOf(self.dist,minval)
guide_sel = self.guides_filter[g_index]
if (annular == False):
if (guide_sel['GAIA_RA'] > self.ra_min) and (guide_sel['GAIA_RA'] < self.ra_max) and (guide_sel['GAIA_DEC'] > self.dec_min) and (guide_sel['GAIA_DEC'] < self.dec_max):
print 'Guidestar candidate is %f arcmin from GC centre'%(minval*60.0)
print self.guides[g_index]
self.guide_sel = self.guides[g_index]
else:
print 'Closest candidate still lies outside of the GC FOV!!'
self.guide_sel = self.guides[g_index]
return None
else:
#do a quick report
from copy import deepcopy
dist_sort = deepcopy(self.dist)
dist_sort.sort()
if self.lifu:
print 'Annular search summary (selected closest to centre of a rotated guidecam):'
else:
print 'Annular search summary:'
print "#\t Dist (') CNAME\t\t RA\t Dec\t angle\t Gaia_G mag"
i = 0
self.guides_filter['dist'] = self.dist
angles = []
for d in dist_sort:
i = i + 1
sel = ''
if (i == 1) and (self.lifu):
sel = ' <-----'
index = indexOf(self.dist,d)
guide_candidate = self.guides_filter[index]
ra_trans = self.ra - guide_candidate['GAIA_RA']
dec_trans = self.dec - guide_candidate['GAIA_DEC']
ang = numpy.arctan2(dec_trans,ra_trans)
ang = (ang*180.0) / numpy.pi
if ang < 0:
ang += 360.0
angles.append(ang)
print '#%d\t %1.2f\t %s\t %1.4f %1.4f %1.3f\t %1.3f%s'%(i,d*60.0,guide_candidate['CNAME'],guide_candidate['GAIA_RA'],guide_candidate['GAIA_DEC'],ang,guide_candidate['GAIA_MAG_GG'],sel)
self.guides_filter['ANGLE'] = angles
self.guides_filter.sort('dist')
if self.lifu:
return guide_sel
return self.guides_filter
def ingest_xml(self,dom):
self.dom = dom
self.root = dom.childNodes[0]
self.programme = self.root.childNodes[3]
self.observation = self.root.childNodes[5]
self.configure = dom.getElementsByTagName('configure')[0]
self.field = dom.getElementsByTagName('field')[0]
self.base_target = self.field.getElementsByTagName('target')[0]
self.offset = self.observation.getElementsByTagName('offsets')[0]
self.targets_base = self.field.getElementsByTagName('target')
def new_xml(self):
#init the new XML
try:
dom = xml.dom.minidom.parse(self.xml_template)
except xml.parsers.expat.ExpatError:
print("File {0} would not parse".format(self.xml_template))
raise SystemExit(0)
self.ingest_xml(dom)
def | (self,guide):
self.new_xml()
xml_target = self.targets_base[0].cloneNode(True)
guide_ra = guide['GAIA_RA']
guide_dec = guide['GAIA_DEC']
dx = (self.ra - guide_ra)*self.plate_scale
dy = (self.dec - guide_dec)*self.plate_scale
xml_target.setAttribute('targx',str(dx))
xml_target.setAttribute('targy',str(dy))
# print 'WARNING - overriding targx, targy for now!'
#manual override for the moment, position of targx,y
# guide_targ.setAttribute('targx',"-110.0")
# guide_targ.setAttribute('targy',"-500.55")
#xml_target.setAttribute('fibreid',"9999")
#xml_target.setAttribute('configid',"9999")
xml_target.setAttribute('fibreid',"")
xml_target.setAttribute('configid',"")
xml_target.setAttribute('cname',str(guide['CNAME']))
xml_target.setAttribute('targid',str(guide['TARGID']))
xml_target.setAttribute('targra',str(guide['GAIA_RA']))
xml_target.setAttribute('targdec',str(guide['GAIA_DEC']))
xml_target.setAttribute('targpmra',str(guide['GAIA_PMRA']))
xml_target.setAttribute('targpmdec',str(guide['GAIA_PMDEC']))
xml_target.setAttribute('targprio',str(guide['TARGPRIO']))
xml_target.setAttribute('targuse',str(guide['TARGUSE']))
xml_target.setAttribute('targsrvy',str(guide['TARGSRVY']))
xml_target.setAttribute('targname',str(guide['TARGNAME']))
xml_target.setAttribute('targprog',str(guide['TARGPROG']))
xml_target.setAttribute('targclass',str(guide['TARGCLASS']))
xml_target.setAttribute('targcat',str(guide['TARGCAT']))
xml_target.setAttribute('targepoch',str(guide['GAIA_EPOCH']))
xml_target.setAttribute('targparal',str(guide['GAIA_PARAL']))
xml_target.setAttribute('targprio',"10")
# xml_target.setAttribute('ifu_spaxel',"")
#xml_photom = self.targets_base[0].getElementsByTagName('photometry')[0]
xml_photom = xml_target.getElementsByTagName('photometry')[0]
bands = ['g','r','i']
for b in bands:
xml_photom.setAttribute('mag_%s'%(b),"")
xml_photom.setAttribute('emag_%s'%(b),"")
xml_photom.setAttribute('mag_gg',str(guide['GAIA_MAG_GG']))
xml_photom.setAttribute('emag_gg',str(guide['GAIA_EMAG_GG']))
xml_photom.setAttribute('mag_bp',str(guide['GAIA_MAG_BP']))
xml_photom.setAttribute('emag_bp',str(guide['GAIA_EMAG_BP']))
xml_photom.setAttribute('mag_rp',str(guide['GAIA_MAG_RP']))
xml_photom.setAttribute('emag_rp',str(guide['GAIA_EMAG_RP']))
# xml_target.appendChild(xml_photom)
#now do photometry
return xml_target
if __name__ =='__main__':
if 0:
import ifu
ra = 178.835488822
dec = 58.2835493041
pa = 0.0
gs = ifu.guidestar(ra,dec,pa)
gs.set_geometry()
guide = gs.get_guide(annular_fail=True,as_xml=True)
#gs.retrieve_guidecats()
#gs.select_target()
if 1:
gs = GuideStar(316.369609537,-4.71060356792,0,'mIFU')
guides = gs.get_guide()
import numpy
index = numpy.arange(len(guides))
numpy.random.shuffle(index)
for i in index[:8]:
g = guides[i]
if 1:
print gs.guides_filter['ANGLE'][i]
print g.toxml()
if 0:
gs = GuideStar(316.369609537,-4.71060356792,0,'LIFU')
guide = gs.get_guide()
print
print guide.toxml()
# gs.set_geometry()
# gs.retrieve_guidecats()
# gs.select_target(annular=True)
print 'Fin'
| to_xml | identifier_name |
guidestar.py | #!/usr/bin/env python3
#
# Copyright (C) 2018 Cambridge Astronomical Survey Unit
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program. If not, see <https://www.gnu.org/licenses/>.
#
"""
Retrieve of WEAVE guide stars for the LIFU or MOS.
The authors of this module are:
- David Murphy ([email protected]), Cambridge Astronomical Survey Unit
(CASU, IoA).
Notes
-----
The dependencies of this module are:
- Python core
- numpy
- astropy
- astropy_healpix
Log:
- v0.1: Still work in progress.
"""
from astropy.io import fits as pyfits
import os
from numpy import unique, arange, random, where
from copy import deepcopy
import xml.dom.minidom
from xml.dom.minidom import Node
from math import radians,cos
import numpy
class GuideStar:
"""
Handle the retrieval of WEAVE guide stars for the LIFU or MOS.
This class provides a mechanism for querying and retrieving guide star
targets for the construction of WEAVE 'protofields'.
Parameters
----------
ra : float
The Right Ascension (decimal degrees) - of either the central spaxel or
central FOV.
dec : float
The Declination (decimal degrees) - of either the central spaxel or
central FOV.
pa : float
The position angle (degrees) of rotation (LIFU only).
mode : str
Either LIFU or mIFU.
nside : int, optional
Override the default HEALPix nside value. Will likely end in tears.
"""
import xml.dom.minidom
from xml.dom.minidom import Node
def __init__(self,ra,dec,pa,mode,nside=32):
self.xml_template_url = 'http://casu.ast.cam.ac.uk/~dmurphy/opr3/swg/resources/BlankXMLTemplate.xml'
self.xml_template = 'BlankXMLTemplate.xml'
self.ra = ra
self.dec = dec
self.pa = pa
self.nside = nside
self.guides = None
self.plate_scale = 17.8 ## "/mm
self.guide_url = "http://casu.ast.cam.ac.uk/~dmurphy/opr3/swg/resources/guides/"
self.guide_filename = "Guides_S4_<HEALPIX>.fits"
self.cache_dir = './cache/'
if not mode in ['LIFU','mIFU']:
print 'ERROR: must specify LIFU or mIFU mode'
raise SystemExit(0)
self.lifu = (mode == 'LIFU')
if (not self.lifu) and (self.pa != 0):
print 'ERROR: mIFU mode requires PA=0'
raise SystemExit(0)
if not os.path.isdir(self.cache_dir):
cmd = 'mkdir -p %s'%(self.cache_dir)
os.system(cmd)
if not os.path.isfile(self.xml_template):
self.wget(self.xml_template_url)
def get_guide(self,annular_fail=True,as_xml=True,print_xml=False):
"""
Master function to return a guide star once the object is instantiated.
Parameters
----------
annular_fail : bool, optional
If there is no guidestar in GC FOV, search an annulus and define
the PA required to get a guidestar. Return most centralised
candidate.
as_xml : bool, optional
Returns the result as an XML <target> element that can be added to
a <field> element.
print_xml : bool, optional
Prints the XML results if as_xml=True.
Returns
-------
guide : astropy.Table or xml.dom.minidom.Element
Row from the Guide star catalogue. If as_xml is True, XML <target>
element that can be inserted into a field XML.
"""
self.set_geometry()
self.retrieve_guidecats()
guides = self.select_target()
if (type(guides) == type(None)) and (annular_fail == True):
print 'No guide(s) found at fixed position - performing annular search'
guides = self.annular_search()
if type(guides) == type(None):
print 'No guide star(s) found...'
return None
if as_xml:
if self.lifu:
return self.to_xml(guides)
else:
xmls = [self.to_xml(guide) for guide in guides]
if print_xml:
for x in xmls:
print x.toxml()
return xmls
else:
return guides
def wget(self,url,outname=None):
import os
import time
cmd = 'wget -q -t 1 -T 5 %s'%(url)
if outname != None:
print 'Downloading URL %s to %s'%(url,outname)
cmd += ' -O %s'%(outname)
os.system(cmd)
return
def annular_search(self):
from astropy_healpix import HEALPix
from astropy.coordinates import SkyCoord
import astropy.coordinates as cc
hp = HEALPix(nside=self.nside, order='nested', frame=cc.ICRS())
self.set_geometry(healpix=False)
r_min = self.ra_min-self.ra
r_max = self.ra_max-self.ra
radius = self.ra_max-self.ra
#get the healpix IDs covering an annulus centred on self.ra,dec
in_annulus = []
self.healpix_indices = []
print 'Populating annulus and determining HEALPix coverage...'
while(len(in_annulus)) < 500:
rnd_ra = self.ra+(2*(random.random()-0.5)*radius)
rnd_dec = self.dec+(2*(random.random()-0.5)*radius)
rnd_dist = (((rnd_ra-self.ra)**2)+((rnd_dec-self.dec)**2))**0.5
if rnd_dist > r_min:
if rnd_dist < r_max:
self.healpix_indices.append(hp.skycoord_to_healpix(SkyCoord(rnd_ra,rnd_dec,unit='deg')))
in_annulus.append([rnd_ra,rnd_dec])
#print len(in_annulus)
print '....done'
self.healpix_indices = unique(self.healpix_indices)
print self.healpix_indices
self.retrieve_guidecats()
target = self.select_target(annular=True)
return target
def set_geometry(self,healpix=True):
from astropy_healpix import HEALPix
from astropy.coordinates import SkyCoord
import astropy.coordinates as cc
from numpy import unique
if self.lifu:
self.g_dx = 3.75
self.g_dy = 4.0
#testing - let's make it bigger
# self.g_dx = 17.0
# self.g_dy = 20.0
#needs to be about 20x larger in area
# self.g_dx = 16.8
# self.g_dy = 17.9
self.ra_max = self.ra + ((27.7+(0.5*self.g_dx))/60.0)
self.ra_min = self.ra + ((27.7-(0.5*self.g_dx))/60.0)
self.dec_max = self.dec + ((0.5*self.g_dy)/60.0)
self.dec_min = self.dec - ((0.5*self.g_dy)/60.0)
self.ra_gc0 = self.ra + (27.7/60.0)
self.dec_gc0 = self.dec
else:
self.ra_min = self.ra - 1.0
self.ra_max = self.ra + 1.0
self.dec_min = self.dec - 1.0
self.dec_max = self.dec + 1.0
if healpix:
hp = HEALPix(nside=self.nside, order='nested', frame=cc.ICRS())
self.healpix_indices = []
ra = [self.ra_min,self.ra_min,self.ra_max,self.ra_max]
dec = [self.dec_min,self.dec_max,self.dec_min,self.dec_max]
dx = self.ra_max - self.ra_min
dy = self.dec_max - self.dec_min
for i in arange(500):
ra.append(self.ra_min+(random.random()*dx))
dec.append(self.dec_min+(random.random()*dy))
for r,d in zip(ra,dec):
self.healpix_indices.append(hp.skycoord_to_healpix(SkyCoord(r,d,unit='deg')))
self.healpix_indices = unique(self.healpix_indices)
return
def retrieve_guidecats(self,clobber=False):
from astropy.table import Table
import astropy.io.fits as pyfits
if len(self.healpix_indices) == 0:
self.set_geometry()
self.guide_files = []
for hp in self.healpix_indices:
fn = self.guide_filename.replace('<HEALPIX>',str(hp))
url = self.guide_url+fn
fn = self.cache_dir+fn
if os.path.isfile(fn):
if clobber == False:
print 'Using existing file %s'%(fn)
self.guide_files.append(fn)
continue
print url
self.wget(url,outname=fn)
self.guide_files.append(fn)
#self.guide_files = ['/scratch/Guides_S4_cname.fits']
tabs = []
for cat in self.guide_files:
fits = pyfits.open(cat)
tabs.append(Table(fits[1].data))
if len(tabs) == 1:
|
else:
t0 = tabs[0]
for t in tabs[1:]:
for g in t:
t0.add_row(g)
self.guides = t0
return
def select_target(self,annular=False):
import numpy
from operator import indexOf
from astropy.table import Table
self.ra_g = self.guides['GAIA_RA']
self.dec_g = self.guides['GAIA_DEC']
if (not self.lifu):
annular = True
#fig = plt.figure(); sp = plt.subplot(aspect='equal'); plt.plot(self.ra_g,self.dec_g,'ko'); cir = plt.Circle((gs.ra,gs.dec),radius=1.0,lw=2.0,color='red'); sp.add_patch(cir); plt.show()
if annular ==False:
filter1 = (self.ra_g > self.ra_min) & (self.ra_g < self.ra_max)
filter2 = (self.dec_g > self.dec_min) & (self.dec_g < self.dec_max)
filter = filter1 & filter2
if (True in filter) == False:
print 'No guide stars within the GC FOV!!'
return None
self.guides_filter = self.guides[where(filter)[0]]
self.ra_g = self.guides_filter['GAIA_RA']
self.dec_g = self.guides_filter['GAIA_DEC']
self.dist = [((abs(r-self.ra_gc0)**2)+(abs(d-self.dec_gc0)**2))**0.5 for r,d in zip(self.ra_g,self.dec_g)]
else:
if self.lifu:
#in annulus, want closest to the central radius of the annulus
r_min = self.ra_min-self.ra
r_max = self.ra_max-self.ra
else:
r_min = 0.95
r_max = 1.0
self.radii = numpy.array([(((_ra-self.ra)**2)+((_dec-self.dec)**2))**0.5 for _ra,_dec in zip(self.ra_g,self.dec_g)])
filter = (self.radii > r_min) & (self.radii < r_max)
self.guides_filter = self.guides[filter]
self.ra_g = self.guides_filter['GAIA_RA']
self.dec_g = self.guides_filter['GAIA_DEC']
radii = self.radii[filter]
if self.lifu:
r0 = r_min + (0.5*(r_max-r_min))
self.dist = [abs(d-r0) for d in radii]
else:
self.dist = radii
minval = min(self.dist)
g_index = indexOf(self.dist,minval)
guide_sel = self.guides_filter[g_index]
if (annular == False):
if (guide_sel['GAIA_RA'] > self.ra_min) and (guide_sel['GAIA_RA'] < self.ra_max) and (guide_sel['GAIA_DEC'] > self.dec_min) and (guide_sel['GAIA_DEC'] < self.dec_max):
print 'Guidestar candidate is %f arcmin from GC centre'%(minval*60.0)
print self.guides[g_index]
self.guide_sel = self.guides[g_index]
else:
print 'Closest candidate still lies outside of the GC FOV!!'
self.guide_sel = self.guides[g_index]
return None
else:
#do a quick report
from copy import deepcopy
dist_sort = deepcopy(self.dist)
dist_sort.sort()
if self.lifu:
print 'Annular search summary (selected closest to centre of a rotated guidecam):'
else:
print 'Annular search summary:'
print "#\t Dist (') CNAME\t\t RA\t Dec\t angle\t Gaia_G mag"
i = 0
self.guides_filter['dist'] = self.dist
angles = []
for d in dist_sort:
i = i + 1
sel = ''
if (i == 1) and (self.lifu):
sel = ' <-----'
index = indexOf(self.dist,d)
guide_candidate = self.guides_filter[index]
ra_trans = self.ra - guide_candidate['GAIA_RA']
dec_trans = self.dec - guide_candidate['GAIA_DEC']
ang = numpy.arctan2(dec_trans,ra_trans)
ang = (ang*180.0) / numpy.pi
if ang < 0:
ang += 360.0
angles.append(ang)
print '#%d\t %1.2f\t %s\t %1.4f %1.4f %1.3f\t %1.3f%s'%(i,d*60.0,guide_candidate['CNAME'],guide_candidate['GAIA_RA'],guide_candidate['GAIA_DEC'],ang,guide_candidate['GAIA_MAG_GG'],sel)
self.guides_filter['ANGLE'] = angles
self.guides_filter.sort('dist')
if self.lifu:
return guide_sel
return self.guides_filter
def ingest_xml(self,dom):
self.dom = dom
self.root = dom.childNodes[0]
self.programme = self.root.childNodes[3]
self.observation = self.root.childNodes[5]
self.configure = dom.getElementsByTagName('configure')[0]
self.field = dom.getElementsByTagName('field')[0]
self.base_target = self.field.getElementsByTagName('target')[0]
self.offset = self.observation.getElementsByTagName('offsets')[0]
self.targets_base = self.field.getElementsByTagName('target')
def new_xml(self):
#init the new XML
try:
dom = xml.dom.minidom.parse(self.xml_template)
except xml.parsers.expat.ExpatError:
print("File {0} would not parse".format(self.xml_template))
raise SystemExit(0)
self.ingest_xml(dom)
def to_xml(self,guide):
self.new_xml()
xml_target = self.targets_base[0].cloneNode(True)
guide_ra = guide['GAIA_RA']
guide_dec = guide['GAIA_DEC']
dx = (self.ra - guide_ra)*self.plate_scale
dy = (self.dec - guide_dec)*self.plate_scale
xml_target.setAttribute('targx',str(dx))
xml_target.setAttribute('targy',str(dy))
# print 'WARNING - overriding targx, targy for now!'
#manual override for the moment, position of targx,y
# guide_targ.setAttribute('targx',"-110.0")
# guide_targ.setAttribute('targy',"-500.55")
#xml_target.setAttribute('fibreid',"9999")
#xml_target.setAttribute('configid',"9999")
xml_target.setAttribute('fibreid',"")
xml_target.setAttribute('configid',"")
xml_target.setAttribute('cname',str(guide['CNAME']))
xml_target.setAttribute('targid',str(guide['TARGID']))
xml_target.setAttribute('targra',str(guide['GAIA_RA']))
xml_target.setAttribute('targdec',str(guide['GAIA_DEC']))
xml_target.setAttribute('targpmra',str(guide['GAIA_PMRA']))
xml_target.setAttribute('targpmdec',str(guide['GAIA_PMDEC']))
xml_target.setAttribute('targprio',str(guide['TARGPRIO']))
xml_target.setAttribute('targuse',str(guide['TARGUSE']))
xml_target.setAttribute('targsrvy',str(guide['TARGSRVY']))
xml_target.setAttribute('targname',str(guide['TARGNAME']))
xml_target.setAttribute('targprog',str(guide['TARGPROG']))
xml_target.setAttribute('targclass',str(guide['TARGCLASS']))
xml_target.setAttribute('targcat',str(guide['TARGCAT']))
xml_target.setAttribute('targepoch',str(guide['GAIA_EPOCH']))
xml_target.setAttribute('targparal',str(guide['GAIA_PARAL']))
xml_target.setAttribute('targprio',"10")
# xml_target.setAttribute('ifu_spaxel',"")
#xml_photom = self.targets_base[0].getElementsByTagName('photometry')[0]
xml_photom = xml_target.getElementsByTagName('photometry')[0]
bands = ['g','r','i']
for b in bands:
xml_photom.setAttribute('mag_%s'%(b),"")
xml_photom.setAttribute('emag_%s'%(b),"")
xml_photom.setAttribute('mag_gg',str(guide['GAIA_MAG_GG']))
xml_photom.setAttribute('emag_gg',str(guide['GAIA_EMAG_GG']))
xml_photom.setAttribute('mag_bp',str(guide['GAIA_MAG_BP']))
xml_photom.setAttribute('emag_bp',str(guide['GAIA_EMAG_BP']))
xml_photom.setAttribute('mag_rp',str(guide['GAIA_MAG_RP']))
xml_photom.setAttribute('emag_rp',str(guide['GAIA_EMAG_RP']))
# xml_target.appendChild(xml_photom)
#now do photometry
return xml_target
if __name__ =='__main__':
if 0:
import ifu
ra = 178.835488822
dec = 58.2835493041
pa = 0.0
gs = ifu.guidestar(ra,dec,pa)
gs.set_geometry()
guide = gs.get_guide(annular_fail=True,as_xml=True)
#gs.retrieve_guidecats()
#gs.select_target()
if 1:
gs = GuideStar(316.369609537,-4.71060356792,0,'mIFU')
guides = gs.get_guide()
import numpy
index = numpy.arange(len(guides))
numpy.random.shuffle(index)
for i in index[:8]:
g = guides[i]
if 1:
print gs.guides_filter['ANGLE'][i]
print g.toxml()
if 0:
gs = GuideStar(316.369609537,-4.71060356792,0,'LIFU')
guide = gs.get_guide()
print
print guide.toxml()
# gs.set_geometry()
# gs.retrieve_guidecats()
# gs.select_target(annular=True)
print 'Fin'
| self.guides = tabs[0] | conditional_block |
guidestar.py | #!/usr/bin/env python3
#
# Copyright (C) 2018 Cambridge Astronomical Survey Unit
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program. If not, see <https://www.gnu.org/licenses/>.
#
"""
Retrieve of WEAVE guide stars for the LIFU or MOS.
The authors of this module are:
- David Murphy ([email protected]), Cambridge Astronomical Survey Unit
(CASU, IoA).
Notes
-----
The dependencies of this module are:
- Python core
- numpy
- astropy
- astropy_healpix
Log:
- v0.1: Still work in progress.
"""
from astropy.io import fits as pyfits
import os
from numpy import unique, arange, random, where
from copy import deepcopy
import xml.dom.minidom
from xml.dom.minidom import Node
from math import radians,cos
import numpy
class GuideStar:
"""
Handle the retrieval of WEAVE guide stars for the LIFU or MOS.
This class provides a mechanism for querying and retrieving guide star
targets for the construction of WEAVE 'protofields'.
Parameters
----------
ra : float
The Right Ascension (decimal degrees) - of either the central spaxel or
central FOV.
dec : float
The Declination (decimal degrees) - of either the central spaxel or
central FOV.
pa : float
The position angle (degrees) of rotation (LIFU only).
mode : str
Either LIFU or mIFU.
nside : int, optional
Override the default HEALPix nside value. Will likely end in tears.
"""
import xml.dom.minidom
from xml.dom.minidom import Node
def __init__(self,ra,dec,pa,mode,nside=32):
self.xml_template_url = 'http://casu.ast.cam.ac.uk/~dmurphy/opr3/swg/resources/BlankXMLTemplate.xml'
self.xml_template = 'BlankXMLTemplate.xml'
self.ra = ra
self.dec = dec
self.pa = pa
self.nside = nside
self.guides = None
self.plate_scale = 17.8 ## "/mm
self.guide_url = "http://casu.ast.cam.ac.uk/~dmurphy/opr3/swg/resources/guides/"
self.guide_filename = "Guides_S4_<HEALPIX>.fits"
self.cache_dir = './cache/'
if not mode in ['LIFU','mIFU']:
print 'ERROR: must specify LIFU or mIFU mode'
raise SystemExit(0)
self.lifu = (mode == 'LIFU')
if (not self.lifu) and (self.pa != 0):
print 'ERROR: mIFU mode requires PA=0'
raise SystemExit(0)
if not os.path.isdir(self.cache_dir):
cmd = 'mkdir -p %s'%(self.cache_dir)
os.system(cmd)
if not os.path.isfile(self.xml_template):
self.wget(self.xml_template_url)
def get_guide(self,annular_fail=True,as_xml=True,print_xml=False):
"""
Master function to return a guide star once the object is instantiated.
Parameters
----------
annular_fail : bool, optional
If there is no guidestar in GC FOV, search an annulus and define
the PA required to get a guidestar. Return most centralised
candidate.
as_xml : bool, optional
Returns the result as an XML <target> element that can be added to
a <field> element.
print_xml : bool, optional
Prints the XML results if as_xml=True.
Returns
-------
guide : astropy.Table or xml.dom.minidom.Element
Row from the Guide star catalogue. If as_xml is True, XML <target>
element that can be inserted into a field XML.
"""
self.set_geometry()
self.retrieve_guidecats()
guides = self.select_target()
if (type(guides) == type(None)) and (annular_fail == True):
print 'No guide(s) found at fixed position - performing annular search'
guides = self.annular_search()
if type(guides) == type(None):
print 'No guide star(s) found...'
return None
if as_xml:
if self.lifu:
return self.to_xml(guides)
else:
xmls = [self.to_xml(guide) for guide in guides]
if print_xml:
for x in xmls:
print x.toxml()
return xmls
else:
return guides
def wget(self,url,outname=None):
import os
import time
cmd = 'wget -q -t 1 -T 5 %s'%(url)
if outname != None:
print 'Downloading URL %s to %s'%(url,outname)
cmd += ' -O %s'%(outname)
os.system(cmd)
return
def annular_search(self):
from astropy_healpix import HEALPix
from astropy.coordinates import SkyCoord
import astropy.coordinates as cc
hp = HEALPix(nside=self.nside, order='nested', frame=cc.ICRS())
self.set_geometry(healpix=False)
r_min = self.ra_min-self.ra
r_max = self.ra_max-self.ra
radius = self.ra_max-self.ra
#get the healpix IDs covering an annulus centred on self.ra,dec
in_annulus = []
self.healpix_indices = []
print 'Populating annulus and determining HEALPix coverage...'
while(len(in_annulus)) < 500:
rnd_ra = self.ra+(2*(random.random()-0.5)*radius)
rnd_dec = self.dec+(2*(random.random()-0.5)*radius)
rnd_dist = (((rnd_ra-self.ra)**2)+((rnd_dec-self.dec)**2))**0.5
if rnd_dist > r_min:
if rnd_dist < r_max:
self.healpix_indices.append(hp.skycoord_to_healpix(SkyCoord(rnd_ra,rnd_dec,unit='deg')))
in_annulus.append([rnd_ra,rnd_dec])
#print len(in_annulus)
print '....done'
self.healpix_indices = unique(self.healpix_indices)
print self.healpix_indices
self.retrieve_guidecats()
target = self.select_target(annular=True)
return target
def set_geometry(self,healpix=True):
from astropy_healpix import HEALPix
from astropy.coordinates import SkyCoord
import astropy.coordinates as cc
from numpy import unique
if self.lifu:
self.g_dx = 3.75
self.g_dy = 4.0
#testing - let's make it bigger
# self.g_dx = 17.0
# self.g_dy = 20.0
#needs to be about 20x larger in area
# self.g_dx = 16.8
# self.g_dy = 17.9
self.ra_max = self.ra + ((27.7+(0.5*self.g_dx))/60.0)
self.ra_min = self.ra + ((27.7-(0.5*self.g_dx))/60.0)
self.dec_max = self.dec + ((0.5*self.g_dy)/60.0)
self.dec_min = self.dec - ((0.5*self.g_dy)/60.0)
self.ra_gc0 = self.ra + (27.7/60.0)
self.dec_gc0 = self.dec
else:
self.ra_min = self.ra - 1.0
self.ra_max = self.ra + 1.0
self.dec_min = self.dec - 1.0
self.dec_max = self.dec + 1.0
if healpix:
hp = HEALPix(nside=self.nside, order='nested', frame=cc.ICRS())
self.healpix_indices = []
ra = [self.ra_min,self.ra_min,self.ra_max,self.ra_max]
dec = [self.dec_min,self.dec_max,self.dec_min,self.dec_max]
dx = self.ra_max - self.ra_min
dy = self.dec_max - self.dec_min
for i in arange(500):
ra.append(self.ra_min+(random.random()*dx))
dec.append(self.dec_min+(random.random()*dy))
for r,d in zip(ra,dec):
self.healpix_indices.append(hp.skycoord_to_healpix(SkyCoord(r,d,unit='deg')))
self.healpix_indices = unique(self.healpix_indices)
return
def retrieve_guidecats(self,clobber=False):
from astropy.table import Table
import astropy.io.fits as pyfits
if len(self.healpix_indices) == 0:
self.set_geometry()
self.guide_files = []
for hp in self.healpix_indices:
fn = self.guide_filename.replace('<HEALPIX>',str(hp))
url = self.guide_url+fn
fn = self.cache_dir+fn
if os.path.isfile(fn):
if clobber == False:
print 'Using existing file %s'%(fn)
self.guide_files.append(fn)
continue
print url
self.wget(url,outname=fn)
self.guide_files.append(fn)
#self.guide_files = ['/scratch/Guides_S4_cname.fits']
tabs = []
for cat in self.guide_files:
fits = pyfits.open(cat)
tabs.append(Table(fits[1].data))
if len(tabs) == 1:
self.guides = tabs[0]
else:
t0 = tabs[0]
for t in tabs[1:]:
for g in t:
t0.add_row(g)
self.guides = t0
return
def select_target(self,annular=False):
import numpy
from operator import indexOf
from astropy.table import Table
self.ra_g = self.guides['GAIA_RA']
self.dec_g = self.guides['GAIA_DEC']
if (not self.lifu):
annular = True
#fig = plt.figure(); sp = plt.subplot(aspect='equal'); plt.plot(self.ra_g,self.dec_g,'ko'); cir = plt.Circle((gs.ra,gs.dec),radius=1.0,lw=2.0,color='red'); sp.add_patch(cir); plt.show()
if annular ==False:
filter1 = (self.ra_g > self.ra_min) & (self.ra_g < self.ra_max)
filter2 = (self.dec_g > self.dec_min) & (self.dec_g < self.dec_max)
filter = filter1 & filter2
if (True in filter) == False:
print 'No guide stars within the GC FOV!!'
return None
self.guides_filter = self.guides[where(filter)[0]]
self.ra_g = self.guides_filter['GAIA_RA']
self.dec_g = self.guides_filter['GAIA_DEC']
self.dist = [((abs(r-self.ra_gc0)**2)+(abs(d-self.dec_gc0)**2))**0.5 for r,d in zip(self.ra_g,self.dec_g)]
else:
if self.lifu:
#in annulus, want closest to the central radius of the annulus
r_min = self.ra_min-self.ra
r_max = self.ra_max-self.ra
else:
r_min = 0.95
r_max = 1.0
self.radii = numpy.array([(((_ra-self.ra)**2)+((_dec-self.dec)**2))**0.5 for _ra,_dec in zip(self.ra_g,self.dec_g)])
filter = (self.radii > r_min) & (self.radii < r_max)
self.guides_filter = self.guides[filter]
self.ra_g = self.guides_filter['GAIA_RA']
self.dec_g = self.guides_filter['GAIA_DEC']
radii = self.radii[filter]
if self.lifu:
r0 = r_min + (0.5*(r_max-r_min))
self.dist = [abs(d-r0) for d in radii]
else:
self.dist = radii
minval = min(self.dist)
g_index = indexOf(self.dist,minval)
guide_sel = self.guides_filter[g_index]
if (annular == False):
if (guide_sel['GAIA_RA'] > self.ra_min) and (guide_sel['GAIA_RA'] < self.ra_max) and (guide_sel['GAIA_DEC'] > self.dec_min) and (guide_sel['GAIA_DEC'] < self.dec_max):
print 'Guidestar candidate is %f arcmin from GC centre'%(minval*60.0)
print self.guides[g_index]
self.guide_sel = self.guides[g_index]
else:
print 'Closest candidate still lies outside of the GC FOV!!'
self.guide_sel = self.guides[g_index]
return None
else:
#do a quick report
from copy import deepcopy
dist_sort = deepcopy(self.dist)
dist_sort.sort()
if self.lifu:
print 'Annular search summary (selected closest to centre of a rotated guidecam):'
else:
print 'Annular search summary:'
print "#\t Dist (') CNAME\t\t RA\t Dec\t angle\t Gaia_G mag"
i = 0
self.guides_filter['dist'] = self.dist
angles = []
for d in dist_sort:
i = i + 1
sel = ''
if (i == 1) and (self.lifu):
sel = ' <-----'
index = indexOf(self.dist,d)
guide_candidate = self.guides_filter[index]
ra_trans = self.ra - guide_candidate['GAIA_RA']
dec_trans = self.dec - guide_candidate['GAIA_DEC']
ang = numpy.arctan2(dec_trans,ra_trans)
ang = (ang*180.0) / numpy.pi
if ang < 0:
ang += 360.0
angles.append(ang)
print '#%d\t %1.2f\t %s\t %1.4f %1.4f %1.3f\t %1.3f%s'%(i,d*60.0,guide_candidate['CNAME'],guide_candidate['GAIA_RA'],guide_candidate['GAIA_DEC'],ang,guide_candidate['GAIA_MAG_GG'],sel)
self.guides_filter['ANGLE'] = angles
self.guides_filter.sort('dist')
if self.lifu:
return guide_sel
return self.guides_filter
def ingest_xml(self,dom):
self.dom = dom
self.root = dom.childNodes[0]
self.programme = self.root.childNodes[3]
self.observation = self.root.childNodes[5]
self.configure = dom.getElementsByTagName('configure')[0]
self.field = dom.getElementsByTagName('field')[0]
self.base_target = self.field.getElementsByTagName('target')[0]
self.offset = self.observation.getElementsByTagName('offsets')[0]
self.targets_base = self.field.getElementsByTagName('target')
def new_xml(self):
#init the new XML
try:
dom = xml.dom.minidom.parse(self.xml_template)
except xml.parsers.expat.ExpatError:
print("File {0} would not parse".format(self.xml_template))
raise SystemExit(0)
self.ingest_xml(dom)
def to_xml(self,guide):
|
if __name__ =='__main__':
if 0:
import ifu
ra = 178.835488822
dec = 58.2835493041
pa = 0.0
gs = ifu.guidestar(ra,dec,pa)
gs.set_geometry()
guide = gs.get_guide(annular_fail=True,as_xml=True)
#gs.retrieve_guidecats()
#gs.select_target()
if 1:
gs = GuideStar(316.369609537,-4.71060356792,0,'mIFU')
guides = gs.get_guide()
import numpy
index = numpy.arange(len(guides))
numpy.random.shuffle(index)
for i in index[:8]:
g = guides[i]
if 1:
print gs.guides_filter['ANGLE'][i]
print g.toxml()
if 0:
gs = GuideStar(316.369609537,-4.71060356792,0,'LIFU')
guide = gs.get_guide()
print
print guide.toxml()
# gs.set_geometry()
# gs.retrieve_guidecats()
# gs.select_target(annular=True)
print 'Fin'
| self.new_xml()
xml_target = self.targets_base[0].cloneNode(True)
guide_ra = guide['GAIA_RA']
guide_dec = guide['GAIA_DEC']
dx = (self.ra - guide_ra)*self.plate_scale
dy = (self.dec - guide_dec)*self.plate_scale
xml_target.setAttribute('targx',str(dx))
xml_target.setAttribute('targy',str(dy))
# print 'WARNING - overriding targx, targy for now!'
#manual override for the moment, position of targx,y
# guide_targ.setAttribute('targx',"-110.0")
# guide_targ.setAttribute('targy',"-500.55")
#xml_target.setAttribute('fibreid',"9999")
#xml_target.setAttribute('configid',"9999")
xml_target.setAttribute('fibreid',"")
xml_target.setAttribute('configid',"")
xml_target.setAttribute('cname',str(guide['CNAME']))
xml_target.setAttribute('targid',str(guide['TARGID']))
xml_target.setAttribute('targra',str(guide['GAIA_RA']))
xml_target.setAttribute('targdec',str(guide['GAIA_DEC']))
xml_target.setAttribute('targpmra',str(guide['GAIA_PMRA']))
xml_target.setAttribute('targpmdec',str(guide['GAIA_PMDEC']))
xml_target.setAttribute('targprio',str(guide['TARGPRIO']))
xml_target.setAttribute('targuse',str(guide['TARGUSE']))
xml_target.setAttribute('targsrvy',str(guide['TARGSRVY']))
xml_target.setAttribute('targname',str(guide['TARGNAME']))
xml_target.setAttribute('targprog',str(guide['TARGPROG']))
xml_target.setAttribute('targclass',str(guide['TARGCLASS']))
xml_target.setAttribute('targcat',str(guide['TARGCAT']))
xml_target.setAttribute('targepoch',str(guide['GAIA_EPOCH']))
xml_target.setAttribute('targparal',str(guide['GAIA_PARAL']))
xml_target.setAttribute('targprio',"10")
# xml_target.setAttribute('ifu_spaxel',"")
#xml_photom = self.targets_base[0].getElementsByTagName('photometry')[0]
xml_photom = xml_target.getElementsByTagName('photometry')[0]
bands = ['g','r','i']
for b in bands:
xml_photom.setAttribute('mag_%s'%(b),"")
xml_photom.setAttribute('emag_%s'%(b),"")
xml_photom.setAttribute('mag_gg',str(guide['GAIA_MAG_GG']))
xml_photom.setAttribute('emag_gg',str(guide['GAIA_EMAG_GG']))
xml_photom.setAttribute('mag_bp',str(guide['GAIA_MAG_BP']))
xml_photom.setAttribute('emag_bp',str(guide['GAIA_EMAG_BP']))
xml_photom.setAttribute('mag_rp',str(guide['GAIA_MAG_RP']))
xml_photom.setAttribute('emag_rp',str(guide['GAIA_EMAG_RP']))
# xml_target.appendChild(xml_photom)
#now do photometry
return xml_target | identifier_body |
guidestar.py | #!/usr/bin/env python3
#
# Copyright (C) 2018 Cambridge Astronomical Survey Unit
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program. If not, see <https://www.gnu.org/licenses/>.
#
"""
Retrieve of WEAVE guide stars for the LIFU or MOS.
The authors of this module are:
- David Murphy ([email protected]), Cambridge Astronomical Survey Unit
(CASU, IoA).
Notes
-----
The dependencies of this module are:
- Python core
- numpy
- astropy
- astropy_healpix
Log:
- v0.1: Still work in progress.
"""
from astropy.io import fits as pyfits
import os
from numpy import unique, arange, random, where
from copy import deepcopy
import xml.dom.minidom
from xml.dom.minidom import Node
from math import radians,cos
import numpy
class GuideStar:
"""
Handle the retrieval of WEAVE guide stars for the LIFU or MOS.
This class provides a mechanism for querying and retrieving guide star
targets for the construction of WEAVE 'protofields'.
Parameters
----------
ra : float
The Right Ascension (decimal degrees) - of either the central spaxel or
central FOV.
dec : float
The Declination (decimal degrees) - of either the central spaxel or
central FOV.
pa : float
The position angle (degrees) of rotation (LIFU only).
mode : str
Either LIFU or mIFU.
nside : int, optional
Override the default HEALPix nside value. Will likely end in tears.
"""
import xml.dom.minidom
from xml.dom.minidom import Node
def __init__(self,ra,dec,pa,mode,nside=32):
self.xml_template_url = 'http://casu.ast.cam.ac.uk/~dmurphy/opr3/swg/resources/BlankXMLTemplate.xml'
self.xml_template = 'BlankXMLTemplate.xml'
self.ra = ra
self.dec = dec
self.pa = pa
self.nside = nside
self.guides = None
self.plate_scale = 17.8 ## "/mm
self.guide_url = "http://casu.ast.cam.ac.uk/~dmurphy/opr3/swg/resources/guides/"
self.guide_filename = "Guides_S4_<HEALPIX>.fits"
self.cache_dir = './cache/'
if not mode in ['LIFU','mIFU']:
print 'ERROR: must specify LIFU or mIFU mode'
raise SystemExit(0)
self.lifu = (mode == 'LIFU')
if (not self.lifu) and (self.pa != 0):
print 'ERROR: mIFU mode requires PA=0'
raise SystemExit(0)
if not os.path.isdir(self.cache_dir):
cmd = 'mkdir -p %s'%(self.cache_dir)
os.system(cmd)
if not os.path.isfile(self.xml_template):
self.wget(self.xml_template_url)
def get_guide(self,annular_fail=True,as_xml=True,print_xml=False):
"""
Master function to return a guide star once the object is instantiated.
Parameters
----------
annular_fail : bool, optional
If there is no guidestar in GC FOV, search an annulus and define
the PA required to get a guidestar. Return most centralised
candidate.
as_xml : bool, optional
Returns the result as an XML <target> element that can be added to
a <field> element.
print_xml : bool, optional
Prints the XML results if as_xml=True.
Returns
-------
guide : astropy.Table or xml.dom.minidom.Element
Row from the Guide star catalogue. If as_xml is True, XML <target>
element that can be inserted into a field XML.
"""
self.set_geometry()
self.retrieve_guidecats()
guides = self.select_target()
if (type(guides) == type(None)) and (annular_fail == True):
print 'No guide(s) found at fixed position - performing annular search'
guides = self.annular_search()
if type(guides) == type(None):
print 'No guide star(s) found...'
return None
if as_xml:
if self.lifu:
return self.to_xml(guides)
else:
xmls = [self.to_xml(guide) for guide in guides]
if print_xml:
for x in xmls:
print x.toxml()
return xmls
else:
return guides
def wget(self,url,outname=None):
import os
import time
cmd = 'wget -q -t 1 -T 5 %s'%(url)
if outname != None:
print 'Downloading URL %s to %s'%(url,outname)
cmd += ' -O %s'%(outname)
os.system(cmd)
return
def annular_search(self):
from astropy_healpix import HEALPix
from astropy.coordinates import SkyCoord
import astropy.coordinates as cc
hp = HEALPix(nside=self.nside, order='nested', frame=cc.ICRS())
self.set_geometry(healpix=False)
r_min = self.ra_min-self.ra
r_max = self.ra_max-self.ra
radius = self.ra_max-self.ra
#get the healpix IDs covering an annulus centred on self.ra,dec
in_annulus = []
self.healpix_indices = []
print 'Populating annulus and determining HEALPix coverage...'
while(len(in_annulus)) < 500: | if rnd_dist > r_min:
if rnd_dist < r_max:
self.healpix_indices.append(hp.skycoord_to_healpix(SkyCoord(rnd_ra,rnd_dec,unit='deg')))
in_annulus.append([rnd_ra,rnd_dec])
#print len(in_annulus)
print '....done'
self.healpix_indices = unique(self.healpix_indices)
print self.healpix_indices
self.retrieve_guidecats()
target = self.select_target(annular=True)
return target
def set_geometry(self,healpix=True):
from astropy_healpix import HEALPix
from astropy.coordinates import SkyCoord
import astropy.coordinates as cc
from numpy import unique
if self.lifu:
self.g_dx = 3.75
self.g_dy = 4.0
#testing - let's make it bigger
# self.g_dx = 17.0
# self.g_dy = 20.0
#needs to be about 20x larger in area
# self.g_dx = 16.8
# self.g_dy = 17.9
self.ra_max = self.ra + ((27.7+(0.5*self.g_dx))/60.0)
self.ra_min = self.ra + ((27.7-(0.5*self.g_dx))/60.0)
self.dec_max = self.dec + ((0.5*self.g_dy)/60.0)
self.dec_min = self.dec - ((0.5*self.g_dy)/60.0)
self.ra_gc0 = self.ra + (27.7/60.0)
self.dec_gc0 = self.dec
else:
self.ra_min = self.ra - 1.0
self.ra_max = self.ra + 1.0
self.dec_min = self.dec - 1.0
self.dec_max = self.dec + 1.0
if healpix:
hp = HEALPix(nside=self.nside, order='nested', frame=cc.ICRS())
self.healpix_indices = []
ra = [self.ra_min,self.ra_min,self.ra_max,self.ra_max]
dec = [self.dec_min,self.dec_max,self.dec_min,self.dec_max]
dx = self.ra_max - self.ra_min
dy = self.dec_max - self.dec_min
for i in arange(500):
ra.append(self.ra_min+(random.random()*dx))
dec.append(self.dec_min+(random.random()*dy))
for r,d in zip(ra,dec):
self.healpix_indices.append(hp.skycoord_to_healpix(SkyCoord(r,d,unit='deg')))
self.healpix_indices = unique(self.healpix_indices)
return
def retrieve_guidecats(self,clobber=False):
from astropy.table import Table
import astropy.io.fits as pyfits
if len(self.healpix_indices) == 0:
self.set_geometry()
self.guide_files = []
for hp in self.healpix_indices:
fn = self.guide_filename.replace('<HEALPIX>',str(hp))
url = self.guide_url+fn
fn = self.cache_dir+fn
if os.path.isfile(fn):
if clobber == False:
print 'Using existing file %s'%(fn)
self.guide_files.append(fn)
continue
print url
self.wget(url,outname=fn)
self.guide_files.append(fn)
#self.guide_files = ['/scratch/Guides_S4_cname.fits']
tabs = []
for cat in self.guide_files:
fits = pyfits.open(cat)
tabs.append(Table(fits[1].data))
if len(tabs) == 1:
self.guides = tabs[0]
else:
t0 = tabs[0]
for t in tabs[1:]:
for g in t:
t0.add_row(g)
self.guides = t0
return
def select_target(self,annular=False):
import numpy
from operator import indexOf
from astropy.table import Table
self.ra_g = self.guides['GAIA_RA']
self.dec_g = self.guides['GAIA_DEC']
if (not self.lifu):
annular = True
#fig = plt.figure(); sp = plt.subplot(aspect='equal'); plt.plot(self.ra_g,self.dec_g,'ko'); cir = plt.Circle((gs.ra,gs.dec),radius=1.0,lw=2.0,color='red'); sp.add_patch(cir); plt.show()
if annular ==False:
filter1 = (self.ra_g > self.ra_min) & (self.ra_g < self.ra_max)
filter2 = (self.dec_g > self.dec_min) & (self.dec_g < self.dec_max)
filter = filter1 & filter2
if (True in filter) == False:
print 'No guide stars within the GC FOV!!'
return None
self.guides_filter = self.guides[where(filter)[0]]
self.ra_g = self.guides_filter['GAIA_RA']
self.dec_g = self.guides_filter['GAIA_DEC']
self.dist = [((abs(r-self.ra_gc0)**2)+(abs(d-self.dec_gc0)**2))**0.5 for r,d in zip(self.ra_g,self.dec_g)]
else:
if self.lifu:
#in annulus, want closest to the central radius of the annulus
r_min = self.ra_min-self.ra
r_max = self.ra_max-self.ra
else:
r_min = 0.95
r_max = 1.0
self.radii = numpy.array([(((_ra-self.ra)**2)+((_dec-self.dec)**2))**0.5 for _ra,_dec in zip(self.ra_g,self.dec_g)])
filter = (self.radii > r_min) & (self.radii < r_max)
self.guides_filter = self.guides[filter]
self.ra_g = self.guides_filter['GAIA_RA']
self.dec_g = self.guides_filter['GAIA_DEC']
radii = self.radii[filter]
if self.lifu:
r0 = r_min + (0.5*(r_max-r_min))
self.dist = [abs(d-r0) for d in radii]
else:
self.dist = radii
minval = min(self.dist)
g_index = indexOf(self.dist,minval)
guide_sel = self.guides_filter[g_index]
if (annular == False):
if (guide_sel['GAIA_RA'] > self.ra_min) and (guide_sel['GAIA_RA'] < self.ra_max) and (guide_sel['GAIA_DEC'] > self.dec_min) and (guide_sel['GAIA_DEC'] < self.dec_max):
print 'Guidestar candidate is %f arcmin from GC centre'%(minval*60.0)
print self.guides[g_index]
self.guide_sel = self.guides[g_index]
else:
print 'Closest candidate still lies outside of the GC FOV!!'
self.guide_sel = self.guides[g_index]
return None
else:
#do a quick report
from copy import deepcopy
dist_sort = deepcopy(self.dist)
dist_sort.sort()
if self.lifu:
print 'Annular search summary (selected closest to centre of a rotated guidecam):'
else:
print 'Annular search summary:'
print "#\t Dist (') CNAME\t\t RA\t Dec\t angle\t Gaia_G mag"
i = 0
self.guides_filter['dist'] = self.dist
angles = []
for d in dist_sort:
i = i + 1
sel = ''
if (i == 1) and (self.lifu):
sel = ' <-----'
index = indexOf(self.dist,d)
guide_candidate = self.guides_filter[index]
ra_trans = self.ra - guide_candidate['GAIA_RA']
dec_trans = self.dec - guide_candidate['GAIA_DEC']
ang = numpy.arctan2(dec_trans,ra_trans)
ang = (ang*180.0) / numpy.pi
if ang < 0:
ang += 360.0
angles.append(ang)
print '#%d\t %1.2f\t %s\t %1.4f %1.4f %1.3f\t %1.3f%s'%(i,d*60.0,guide_candidate['CNAME'],guide_candidate['GAIA_RA'],guide_candidate['GAIA_DEC'],ang,guide_candidate['GAIA_MAG_GG'],sel)
self.guides_filter['ANGLE'] = angles
self.guides_filter.sort('dist')
if self.lifu:
return guide_sel
return self.guides_filter
def ingest_xml(self,dom):
self.dom = dom
self.root = dom.childNodes[0]
self.programme = self.root.childNodes[3]
self.observation = self.root.childNodes[5]
self.configure = dom.getElementsByTagName('configure')[0]
self.field = dom.getElementsByTagName('field')[0]
self.base_target = self.field.getElementsByTagName('target')[0]
self.offset = self.observation.getElementsByTagName('offsets')[0]
self.targets_base = self.field.getElementsByTagName('target')
def new_xml(self):
#init the new XML
try:
dom = xml.dom.minidom.parse(self.xml_template)
except xml.parsers.expat.ExpatError:
print("File {0} would not parse".format(self.xml_template))
raise SystemExit(0)
self.ingest_xml(dom)
def to_xml(self,guide):
self.new_xml()
xml_target = self.targets_base[0].cloneNode(True)
guide_ra = guide['GAIA_RA']
guide_dec = guide['GAIA_DEC']
dx = (self.ra - guide_ra)*self.plate_scale
dy = (self.dec - guide_dec)*self.plate_scale
xml_target.setAttribute('targx',str(dx))
xml_target.setAttribute('targy',str(dy))
# print 'WARNING - overriding targx, targy for now!'
#manual override for the moment, position of targx,y
# guide_targ.setAttribute('targx',"-110.0")
# guide_targ.setAttribute('targy',"-500.55")
#xml_target.setAttribute('fibreid',"9999")
#xml_target.setAttribute('configid',"9999")
xml_target.setAttribute('fibreid',"")
xml_target.setAttribute('configid',"")
xml_target.setAttribute('cname',str(guide['CNAME']))
xml_target.setAttribute('targid',str(guide['TARGID']))
xml_target.setAttribute('targra',str(guide['GAIA_RA']))
xml_target.setAttribute('targdec',str(guide['GAIA_DEC']))
xml_target.setAttribute('targpmra',str(guide['GAIA_PMRA']))
xml_target.setAttribute('targpmdec',str(guide['GAIA_PMDEC']))
xml_target.setAttribute('targprio',str(guide['TARGPRIO']))
xml_target.setAttribute('targuse',str(guide['TARGUSE']))
xml_target.setAttribute('targsrvy',str(guide['TARGSRVY']))
xml_target.setAttribute('targname',str(guide['TARGNAME']))
xml_target.setAttribute('targprog',str(guide['TARGPROG']))
xml_target.setAttribute('targclass',str(guide['TARGCLASS']))
xml_target.setAttribute('targcat',str(guide['TARGCAT']))
xml_target.setAttribute('targepoch',str(guide['GAIA_EPOCH']))
xml_target.setAttribute('targparal',str(guide['GAIA_PARAL']))
xml_target.setAttribute('targprio',"10")
# xml_target.setAttribute('ifu_spaxel',"")
#xml_photom = self.targets_base[0].getElementsByTagName('photometry')[0]
xml_photom = xml_target.getElementsByTagName('photometry')[0]
bands = ['g','r','i']
for b in bands:
xml_photom.setAttribute('mag_%s'%(b),"")
xml_photom.setAttribute('emag_%s'%(b),"")
xml_photom.setAttribute('mag_gg',str(guide['GAIA_MAG_GG']))
xml_photom.setAttribute('emag_gg',str(guide['GAIA_EMAG_GG']))
xml_photom.setAttribute('mag_bp',str(guide['GAIA_MAG_BP']))
xml_photom.setAttribute('emag_bp',str(guide['GAIA_EMAG_BP']))
xml_photom.setAttribute('mag_rp',str(guide['GAIA_MAG_RP']))
xml_photom.setAttribute('emag_rp',str(guide['GAIA_EMAG_RP']))
# xml_target.appendChild(xml_photom)
#now do photometry
return xml_target
if __name__ =='__main__':
if 0:
import ifu
ra = 178.835488822
dec = 58.2835493041
pa = 0.0
gs = ifu.guidestar(ra,dec,pa)
gs.set_geometry()
guide = gs.get_guide(annular_fail=True,as_xml=True)
#gs.retrieve_guidecats()
#gs.select_target()
if 1:
gs = GuideStar(316.369609537,-4.71060356792,0,'mIFU')
guides = gs.get_guide()
import numpy
index = numpy.arange(len(guides))
numpy.random.shuffle(index)
for i in index[:8]:
g = guides[i]
if 1:
print gs.guides_filter['ANGLE'][i]
print g.toxml()
if 0:
gs = GuideStar(316.369609537,-4.71060356792,0,'LIFU')
guide = gs.get_guide()
print
print guide.toxml()
# gs.set_geometry()
# gs.retrieve_guidecats()
# gs.select_target(annular=True)
print 'Fin' | rnd_ra = self.ra+(2*(random.random()-0.5)*radius)
rnd_dec = self.dec+(2*(random.random()-0.5)*radius)
rnd_dist = (((rnd_ra-self.ra)**2)+((rnd_dec-self.dec)**2))**0.5 | random_line_split |
detours.rs | #![allow(non_camel_case_types)]
extern crate winapi;
extern crate kernel32;
extern crate field_offset;
use winapi::*;
#[allow(unused_imports)]
use self::field_offset::*;
use std::mem;
use std::ffi::CStr;
use std::ptr::null_mut;
use std::iter::once;
use std::ffi::OsStr;
use std::os::windows::ffi::OsStrExt;
// Copied from winapi-rs since we are having issues with macro-use
macro_rules! DEF_STRUCT {
{$(#[$attrs:meta])* nodebug struct $name:ident { $($field:ident: $ftype:ty,)+ }} => {
#[repr(C)] $(#[$attrs])*
pub struct $name {
$(pub $field: $ftype,)+
}
impl Copy for $name {}
impl Clone for $name { fn clone(&self) -> $name { *self } }
};
{$(#[$attrs:meta])* struct $name:ident { $($field:ident: $ftype:ty,)+ }} => {
#[repr(C)] #[derive(Debug)] $(#[$attrs])*
pub struct $name {
$(pub $field: $ftype,)+
}
impl Copy for $name {}
impl Clone for $name { fn clone(&self) -> $name { *self } }
};
}
DEF_STRUCT!{struct IMAGE_DOS_HEADER {
e_magic: WORD,
e_cblp: WORD,
e_cp: WORD,
e_crlc: WORD,
e_cparhdr: WORD,
e_minalloc: WORD,
e_maxalloc: WORD,
e_ss: WORD,
e_sp: WORD,
e_csum: WORD,
e_ip: WORD,
e_cs: WORD,
e_lfarlc: WORD,
e_ovno: WORD,
e_res: [WORD; 4],
e_oemid: WORD,
e_oeminfo: WORD,
e_res2: [WORD; 10],
e_lfanew: LONG,
}}
pub type PIMAGE_DOS_HEADER = *mut IMAGE_DOS_HEADER;
DEF_STRUCT!{struct IMAGE_IMPORT_DESCRIPTOR {
OriginalFirstThunk: DWORD,
TimeDateStamp: DWORD,
ForwarderChain: DWORD,
Name: DWORD,
FirstThunk: DWORD,
}}
pub type PIMAGE_IMPORT_DESCRIPTOR = *mut IMAGE_IMPORT_DESCRIPTOR;
DEF_STRUCT!{struct IMAGE_THUNK_DATA32 {
u1: DWORD,
}}
pub type PIMAGE_THUNK_DATA32 = *mut IMAGE_THUNK_DATA32;
DEF_STRUCT!{struct IMAGE_IMPORT_BY_NAME {
Hint: WORD,
Name: BYTE,
}}
pub type PIMAGE_IMPORT_BY_NAME = *mut IMAGE_IMPORT_BY_NAME;
const IMAGE_DOS_SIGNATURE: WORD = 0x5a4d;
const IMAGE_NT_SIGNATURE: DWORD = 0x4550;
const IMAGE_ORDINAL_FLAG: DWORD = 0x80000000;
struct MemoryWriteLock {
addr: LPVOID,
size: SIZE_T,
old_protect: DWORD,
}
impl MemoryWriteLock {
pub fn new(addr: LPVOID, size: SIZE_T) -> Option<MemoryWriteLock> {
let mut lock = MemoryWriteLock {
addr: addr,
size: size,
old_protect: 0 as DWORD,
};
if unsafe {
kernel32::VirtualProtect(addr, size, PAGE_READWRITE, &mut lock.old_protect)
} == 0 {
return None;
}
Some(lock)
}
}
impl Drop for MemoryWriteLock {
fn drop(&mut self) {
let mut old_protect: DWORD = 0 as DWORD;
unsafe {
kernel32::VirtualProtect(self.addr, self.size, self.old_protect, &mut old_protect)
};
}
}
#[cfg(test)]
fn assert_mem_protect(addr: LPVOID, size: SIZE_T, protect: DWORD) {
let mut mbi: MEMORY_BASIC_INFORMATION = unsafe { mem::zeroed() };
assert!(unsafe { kernel32::VirtualQuery(addr, &mut mbi, size) } != 0);
assert_eq!(mbi.Protect, protect);
}
#[test]
fn test_memorywritelock() {
let size = 0x1000;
let addr = unsafe { kernel32::VirtualAlloc(null_mut(), size, MEM_COMMIT, PAGE_READONLY) };
assert!(addr != 0 as LPVOID);
assert_mem_protect(addr, size, PAGE_READONLY);
{
let lock = MemoryWriteLock::new(addr, size);
assert!(lock.is_some());
assert_mem_protect(addr, size, PAGE_READWRITE);
}
assert_mem_protect(addr, size, PAGE_READONLY);
}
pub struct | {
module: HMODULE,
}
impl Module {
#[allow(dead_code)]
pub fn target(moduleName: &str) -> Option<Module> {
let mut library = Module { module: 0 as HMODULE };
let wModuleName: Vec<u16> = OsStr::new(moduleName)
.encode_wide()
.chain(once(0))
.collect();
library.module = unsafe { kernel32::GetModuleHandleW(wModuleName.as_ptr()) };
if library.module == 0 as HMODULE {
return None;
}
Some(library)
}
#[allow(dead_code)]
pub fn self_target() -> Module {
Module { module: unsafe { kernel32::GetModuleHandleW(null_mut()) } }
}
pub fn intercept(&self,
targetModule: &str,
funcName: &str,
replaceFuncPtr: LPVOID)
-> Option<LPVOID> {
let base_addr: PBYTE = unsafe { mem::transmute::<HMODULE, PBYTE>(self.module) };
let dos_hdr: PIMAGE_DOS_HEADER =
unsafe { mem::transmute::<HMODULE, PIMAGE_DOS_HEADER>(self.module) };
if unsafe { (*dos_hdr).e_magic } != IMAGE_DOS_SIGNATURE {
return None;
}
let nt_hdr: PIMAGE_NT_HEADERS32 =
unsafe {
mem::transmute::<PBYTE, PIMAGE_NT_HEADERS32>(base_addr.offset((*dos_hdr).e_lfanew as
isize))
};
if unsafe { (*nt_hdr).Signature } != IMAGE_NT_SIGNATURE {
return None;
}
if unsafe { (*nt_hdr).FileHeader.Machine } != IMAGE_FILE_MACHINE_I386 {
// TODO: Think about adding support for IMAGE_FILE_MACHINE_AMD64 later
return None;
}
let import_desc_array: PIMAGE_IMPORT_DESCRIPTOR = unsafe {
mem::transmute::<PBYTE, PIMAGE_IMPORT_DESCRIPTOR>(
base_addr.offset((*nt_hdr).OptionalHeader.DataDirectory[IMAGE_DIRECTORY_ENTRY_IMPORT as usize].VirtualAddress as isize)
)
};
let mut i = 0;
loop {
let import_desc = unsafe { (*import_desc_array.offset(i)) };
if import_desc.OriginalFirstThunk == 0 {
break;
}
let dll_name =
unsafe { CStr::from_ptr(base_addr.offset(import_desc.Name as isize) as *const i8) }
.to_string_lossy();
if targetModule.to_string().to_lowercase() == dll_name.to_lowercase() {
if import_desc.FirstThunk == 0 || import_desc.OriginalFirstThunk == 0 {
return None;
}
let thunk_ptr: PIMAGE_THUNK_DATA32 =
unsafe {
mem::transmute::<PBYTE,
PIMAGE_THUNK_DATA32>(base_addr
.offset(import_desc.FirstThunk as
isize))
};
let orig_thunk_ptr: PIMAGE_THUNK_DATA32 =
unsafe {
mem::transmute::<PBYTE,
PIMAGE_THUNK_DATA32>(base_addr
.offset(import_desc.OriginalFirstThunk as
isize))
};
let mut j = 0;
loop {
let orig_thunk = unsafe { *orig_thunk_ptr.offset(j) };
if orig_thunk.u1 == 0 {
break;
}
if (orig_thunk.u1 & IMAGE_ORDINAL_FLAG) != 0 {
continue;
}
let import: PIMAGE_IMPORT_BY_NAME =
unsafe {
mem::transmute::<PBYTE,
PIMAGE_IMPORT_BY_NAME>(base_addr
.offset(orig_thunk.u1 as
isize))
};
let name_field = offset_of!(IMAGE_IMPORT_BY_NAME => Name);
let func_name =
unsafe { CStr::from_ptr(name_field.apply_ptr(import) as *const i8) }
.to_string_lossy();
if funcName == func_name {
let old_func_ptr: LONG;
let iat_ptr_field = offset_of!(IMAGE_THUNK_DATA32 => u1);
{
#[allow(unused_variables)]
let lock =
MemoryWriteLock::new(iat_ptr_field.apply_ptr(unsafe { thunk_ptr.offset(j) }) as
LPVOID,
mem::size_of::<LPVOID>() as u32);
old_func_ptr = unsafe {
kernel32::InterlockedExchange(
iat_ptr_field.apply_ptr_mut(thunk_ptr.offset(j)) as *mut LONG,
replaceFuncPtr as LONG)
};
}
return Some(old_func_ptr as LPVOID);
}
j += 1;
}
}
i += 1;
}
None
}
}
#[allow(unused_variables)]
#[cfg(test)]
extern "system" fn myCreatePipe(hReadPipe: PHANDLE,
hWritePipe: PHANDLE,
lpPipeAttributes: LPVOID,
nSize: DWORD)
-> BOOL {
0x31337
}
#[test]
fn test_intercept() {
let target = Module::self_target();
let mut result = target.intercept("kernel32.dll", "CreatePipe", unsafe {
mem::transmute::<extern "system" fn(PHANDLE,
PHANDLE,
LPVOID,
DWORD)
-> BOOL,
LPVOID>(myCreatePipe)
});
assert!(result.is_some());
let ret = unsafe { kernel32::CreatePipe(null_mut(), null_mut(), null_mut(), 0x1337) };
assert_eq!(ret, 0x31337);
result = target.intercept("kernel32.dll", "CreatePipe", result.unwrap());
assert!(result.is_some());
} | Module | identifier_name |
detours.rs | #![allow(non_camel_case_types)]
extern crate winapi;
extern crate kernel32;
extern crate field_offset;
use winapi::*;
#[allow(unused_imports)]
use self::field_offset::*;
use std::mem;
use std::ffi::CStr;
use std::ptr::null_mut;
use std::iter::once;
use std::ffi::OsStr;
use std::os::windows::ffi::OsStrExt;
// Copied from winapi-rs since we are having issues with macro-use
macro_rules! DEF_STRUCT {
{$(#[$attrs:meta])* nodebug struct $name:ident { $($field:ident: $ftype:ty,)+ }} => {
#[repr(C)] $(#[$attrs])*
pub struct $name {
$(pub $field: $ftype,)+
}
impl Copy for $name {}
impl Clone for $name { fn clone(&self) -> $name { *self } }
};
{$(#[$attrs:meta])* struct $name:ident { $($field:ident: $ftype:ty,)+ }} => {
#[repr(C)] #[derive(Debug)] $(#[$attrs])*
pub struct $name {
$(pub $field: $ftype,)+
}
impl Copy for $name {}
impl Clone for $name { fn clone(&self) -> $name { *self } }
};
}
DEF_STRUCT!{struct IMAGE_DOS_HEADER {
e_magic: WORD,
e_cblp: WORD,
e_cp: WORD,
e_crlc: WORD,
e_cparhdr: WORD,
e_minalloc: WORD,
e_maxalloc: WORD,
e_ss: WORD,
e_sp: WORD,
e_csum: WORD,
e_ip: WORD,
e_cs: WORD,
e_lfarlc: WORD,
e_ovno: WORD,
e_res: [WORD; 4],
e_oemid: WORD,
e_oeminfo: WORD,
e_res2: [WORD; 10],
e_lfanew: LONG,
}}
pub type PIMAGE_DOS_HEADER = *mut IMAGE_DOS_HEADER;
DEF_STRUCT!{struct IMAGE_IMPORT_DESCRIPTOR {
OriginalFirstThunk: DWORD,
TimeDateStamp: DWORD,
ForwarderChain: DWORD,
Name: DWORD,
FirstThunk: DWORD,
}}
pub type PIMAGE_IMPORT_DESCRIPTOR = *mut IMAGE_IMPORT_DESCRIPTOR;
DEF_STRUCT!{struct IMAGE_THUNK_DATA32 {
u1: DWORD,
}}
pub type PIMAGE_THUNK_DATA32 = *mut IMAGE_THUNK_DATA32;
DEF_STRUCT!{struct IMAGE_IMPORT_BY_NAME {
Hint: WORD,
Name: BYTE,
}}
pub type PIMAGE_IMPORT_BY_NAME = *mut IMAGE_IMPORT_BY_NAME;
const IMAGE_DOS_SIGNATURE: WORD = 0x5a4d;
const IMAGE_NT_SIGNATURE: DWORD = 0x4550;
const IMAGE_ORDINAL_FLAG: DWORD = 0x80000000;
struct MemoryWriteLock {
addr: LPVOID,
size: SIZE_T,
old_protect: DWORD,
}
impl MemoryWriteLock {
pub fn new(addr: LPVOID, size: SIZE_T) -> Option<MemoryWriteLock> {
let mut lock = MemoryWriteLock {
addr: addr,
size: size,
old_protect: 0 as DWORD,
};
if unsafe {
kernel32::VirtualProtect(addr, size, PAGE_READWRITE, &mut lock.old_protect)
} == 0 {
return None;
}
Some(lock)
}
}
impl Drop for MemoryWriteLock {
fn drop(&mut self) {
let mut old_protect: DWORD = 0 as DWORD;
unsafe {
kernel32::VirtualProtect(self.addr, self.size, self.old_protect, &mut old_protect)
};
}
}
#[cfg(test)]
fn assert_mem_protect(addr: LPVOID, size: SIZE_T, protect: DWORD) {
let mut mbi: MEMORY_BASIC_INFORMATION = unsafe { mem::zeroed() };
assert!(unsafe { kernel32::VirtualQuery(addr, &mut mbi, size) } != 0);
assert_eq!(mbi.Protect, protect);
}
#[test]
fn test_memorywritelock() {
let size = 0x1000;
let addr = unsafe { kernel32::VirtualAlloc(null_mut(), size, MEM_COMMIT, PAGE_READONLY) };
assert!(addr != 0 as LPVOID);
assert_mem_protect(addr, size, PAGE_READONLY);
{
let lock = MemoryWriteLock::new(addr, size);
assert!(lock.is_some());
assert_mem_protect(addr, size, PAGE_READWRITE);
}
assert_mem_protect(addr, size, PAGE_READONLY);
}
pub struct Module {
module: HMODULE,
}
impl Module {
#[allow(dead_code)]
pub fn target(moduleName: &str) -> Option<Module> {
let mut library = Module { module: 0 as HMODULE };
let wModuleName: Vec<u16> = OsStr::new(moduleName)
.encode_wide()
.chain(once(0))
.collect();
library.module = unsafe { kernel32::GetModuleHandleW(wModuleName.as_ptr()) };
if library.module == 0 as HMODULE {
return None;
}
Some(library)
}
#[allow(dead_code)]
pub fn self_target() -> Module {
Module { module: unsafe { kernel32::GetModuleHandleW(null_mut()) } }
}
pub fn intercept(&self,
targetModule: &str,
funcName: &str,
replaceFuncPtr: LPVOID)
-> Option<LPVOID> |
}
#[allow(unused_variables)]
#[cfg(test)]
extern "system" fn myCreatePipe(hReadPipe: PHANDLE,
hWritePipe: PHANDLE,
lpPipeAttributes: LPVOID,
nSize: DWORD)
-> BOOL {
0x31337
}
#[test]
fn test_intercept() {
let target = Module::self_target();
let mut result = target.intercept("kernel32.dll", "CreatePipe", unsafe {
mem::transmute::<extern "system" fn(PHANDLE,
PHANDLE,
LPVOID,
DWORD)
-> BOOL,
LPVOID>(myCreatePipe)
});
assert!(result.is_some());
let ret = unsafe { kernel32::CreatePipe(null_mut(), null_mut(), null_mut(), 0x1337) };
assert_eq!(ret, 0x31337);
result = target.intercept("kernel32.dll", "CreatePipe", result.unwrap());
assert!(result.is_some());
} | {
let base_addr: PBYTE = unsafe { mem::transmute::<HMODULE, PBYTE>(self.module) };
let dos_hdr: PIMAGE_DOS_HEADER =
unsafe { mem::transmute::<HMODULE, PIMAGE_DOS_HEADER>(self.module) };
if unsafe { (*dos_hdr).e_magic } != IMAGE_DOS_SIGNATURE {
return None;
}
let nt_hdr: PIMAGE_NT_HEADERS32 =
unsafe {
mem::transmute::<PBYTE, PIMAGE_NT_HEADERS32>(base_addr.offset((*dos_hdr).e_lfanew as
isize))
};
if unsafe { (*nt_hdr).Signature } != IMAGE_NT_SIGNATURE {
return None;
}
if unsafe { (*nt_hdr).FileHeader.Machine } != IMAGE_FILE_MACHINE_I386 {
// TODO: Think about adding support for IMAGE_FILE_MACHINE_AMD64 later
return None;
}
let import_desc_array: PIMAGE_IMPORT_DESCRIPTOR = unsafe {
mem::transmute::<PBYTE, PIMAGE_IMPORT_DESCRIPTOR>(
base_addr.offset((*nt_hdr).OptionalHeader.DataDirectory[IMAGE_DIRECTORY_ENTRY_IMPORT as usize].VirtualAddress as isize)
)
};
let mut i = 0;
loop {
let import_desc = unsafe { (*import_desc_array.offset(i)) };
if import_desc.OriginalFirstThunk == 0 {
break;
}
let dll_name =
unsafe { CStr::from_ptr(base_addr.offset(import_desc.Name as isize) as *const i8) }
.to_string_lossy();
if targetModule.to_string().to_lowercase() == dll_name.to_lowercase() {
if import_desc.FirstThunk == 0 || import_desc.OriginalFirstThunk == 0 {
return None;
}
let thunk_ptr: PIMAGE_THUNK_DATA32 =
unsafe {
mem::transmute::<PBYTE,
PIMAGE_THUNK_DATA32>(base_addr
.offset(import_desc.FirstThunk as
isize))
};
let orig_thunk_ptr: PIMAGE_THUNK_DATA32 =
unsafe {
mem::transmute::<PBYTE,
PIMAGE_THUNK_DATA32>(base_addr
.offset(import_desc.OriginalFirstThunk as
isize))
};
let mut j = 0;
loop {
let orig_thunk = unsafe { *orig_thunk_ptr.offset(j) };
if orig_thunk.u1 == 0 {
break;
}
if (orig_thunk.u1 & IMAGE_ORDINAL_FLAG) != 0 {
continue;
}
let import: PIMAGE_IMPORT_BY_NAME =
unsafe {
mem::transmute::<PBYTE,
PIMAGE_IMPORT_BY_NAME>(base_addr
.offset(orig_thunk.u1 as
isize))
};
let name_field = offset_of!(IMAGE_IMPORT_BY_NAME => Name);
let func_name =
unsafe { CStr::from_ptr(name_field.apply_ptr(import) as *const i8) }
.to_string_lossy();
if funcName == func_name {
let old_func_ptr: LONG;
let iat_ptr_field = offset_of!(IMAGE_THUNK_DATA32 => u1);
{
#[allow(unused_variables)]
let lock =
MemoryWriteLock::new(iat_ptr_field.apply_ptr(unsafe { thunk_ptr.offset(j) }) as
LPVOID,
mem::size_of::<LPVOID>() as u32);
old_func_ptr = unsafe {
kernel32::InterlockedExchange(
iat_ptr_field.apply_ptr_mut(thunk_ptr.offset(j)) as *mut LONG,
replaceFuncPtr as LONG)
};
}
return Some(old_func_ptr as LPVOID);
}
j += 1;
}
}
i += 1;
}
None
} | identifier_body |
detours.rs | #![allow(non_camel_case_types)]
extern crate winapi;
extern crate kernel32;
extern crate field_offset;
use winapi::*;
#[allow(unused_imports)]
use self::field_offset::*;
use std::mem;
use std::ffi::CStr;
use std::ptr::null_mut;
use std::iter::once;
use std::ffi::OsStr;
use std::os::windows::ffi::OsStrExt;
// Copied from winapi-rs since we are having issues with macro-use
macro_rules! DEF_STRUCT {
{$(#[$attrs:meta])* nodebug struct $name:ident { $($field:ident: $ftype:ty,)+ }} => {
#[repr(C)] $(#[$attrs])*
pub struct $name {
$(pub $field: $ftype,)+
}
impl Copy for $name {}
impl Clone for $name { fn clone(&self) -> $name { *self } }
};
{$(#[$attrs:meta])* struct $name:ident { $($field:ident: $ftype:ty,)+ }} => {
#[repr(C)] #[derive(Debug)] $(#[$attrs])*
pub struct $name {
$(pub $field: $ftype,)+
}
impl Copy for $name {}
impl Clone for $name { fn clone(&self) -> $name { *self } }
};
}
DEF_STRUCT!{struct IMAGE_DOS_HEADER {
e_magic: WORD,
e_cblp: WORD,
e_cp: WORD,
e_crlc: WORD,
e_cparhdr: WORD,
e_minalloc: WORD,
e_maxalloc: WORD,
e_ss: WORD,
e_sp: WORD,
e_csum: WORD,
e_ip: WORD,
e_cs: WORD,
e_lfarlc: WORD,
e_ovno: WORD,
e_res: [WORD; 4],
e_oemid: WORD,
e_oeminfo: WORD,
e_res2: [WORD; 10],
e_lfanew: LONG,
}}
pub type PIMAGE_DOS_HEADER = *mut IMAGE_DOS_HEADER;
DEF_STRUCT!{struct IMAGE_IMPORT_DESCRIPTOR {
OriginalFirstThunk: DWORD,
TimeDateStamp: DWORD,
ForwarderChain: DWORD,
Name: DWORD,
FirstThunk: DWORD,
}}
pub type PIMAGE_IMPORT_DESCRIPTOR = *mut IMAGE_IMPORT_DESCRIPTOR;
DEF_STRUCT!{struct IMAGE_THUNK_DATA32 {
u1: DWORD,
}}
pub type PIMAGE_THUNK_DATA32 = *mut IMAGE_THUNK_DATA32;
DEF_STRUCT!{struct IMAGE_IMPORT_BY_NAME {
Hint: WORD,
Name: BYTE,
}}
pub type PIMAGE_IMPORT_BY_NAME = *mut IMAGE_IMPORT_BY_NAME;
const IMAGE_DOS_SIGNATURE: WORD = 0x5a4d;
const IMAGE_NT_SIGNATURE: DWORD = 0x4550;
const IMAGE_ORDINAL_FLAG: DWORD = 0x80000000;
struct MemoryWriteLock {
addr: LPVOID,
size: SIZE_T,
old_protect: DWORD,
}
impl MemoryWriteLock {
pub fn new(addr: LPVOID, size: SIZE_T) -> Option<MemoryWriteLock> {
let mut lock = MemoryWriteLock {
addr: addr,
size: size,
old_protect: 0 as DWORD,
};
if unsafe {
kernel32::VirtualProtect(addr, size, PAGE_READWRITE, &mut lock.old_protect)
} == 0 {
return None;
}
Some(lock)
}
}
impl Drop for MemoryWriteLock {
fn drop(&mut self) {
let mut old_protect: DWORD = 0 as DWORD;
unsafe {
kernel32::VirtualProtect(self.addr, self.size, self.old_protect, &mut old_protect)
};
}
}
#[cfg(test)]
fn assert_mem_protect(addr: LPVOID, size: SIZE_T, protect: DWORD) {
let mut mbi: MEMORY_BASIC_INFORMATION = unsafe { mem::zeroed() };
assert!(unsafe { kernel32::VirtualQuery(addr, &mut mbi, size) } != 0);
assert_eq!(mbi.Protect, protect);
}
#[test]
fn test_memorywritelock() {
let size = 0x1000;
let addr = unsafe { kernel32::VirtualAlloc(null_mut(), size, MEM_COMMIT, PAGE_READONLY) };
assert!(addr != 0 as LPVOID);
assert_mem_protect(addr, size, PAGE_READONLY);
{
let lock = MemoryWriteLock::new(addr, size);
assert!(lock.is_some());
assert_mem_protect(addr, size, PAGE_READWRITE);
}
assert_mem_protect(addr, size, PAGE_READONLY);
}
pub struct Module {
module: HMODULE,
}
impl Module {
#[allow(dead_code)]
pub fn target(moduleName: &str) -> Option<Module> {
let mut library = Module { module: 0 as HMODULE };
let wModuleName: Vec<u16> = OsStr::new(moduleName)
.encode_wide()
.chain(once(0))
.collect();
library.module = unsafe { kernel32::GetModuleHandleW(wModuleName.as_ptr()) };
if library.module == 0 as HMODULE {
return None;
}
Some(library)
}
#[allow(dead_code)]
pub fn self_target() -> Module {
Module { module: unsafe { kernel32::GetModuleHandleW(null_mut()) } }
}
pub fn intercept(&self,
targetModule: &str,
funcName: &str,
replaceFuncPtr: LPVOID)
-> Option<LPVOID> {
let base_addr: PBYTE = unsafe { mem::transmute::<HMODULE, PBYTE>(self.module) };
let dos_hdr: PIMAGE_DOS_HEADER =
unsafe { mem::transmute::<HMODULE, PIMAGE_DOS_HEADER>(self.module) };
if unsafe { (*dos_hdr).e_magic } != IMAGE_DOS_SIGNATURE {
return None;
}
let nt_hdr: PIMAGE_NT_HEADERS32 =
unsafe {
mem::transmute::<PBYTE, PIMAGE_NT_HEADERS32>(base_addr.offset((*dos_hdr).e_lfanew as
isize))
};
if unsafe { (*nt_hdr).Signature } != IMAGE_NT_SIGNATURE {
return None;
}
if unsafe { (*nt_hdr).FileHeader.Machine } != IMAGE_FILE_MACHINE_I386 {
// TODO: Think about adding support for IMAGE_FILE_MACHINE_AMD64 later
return None;
}
let import_desc_array: PIMAGE_IMPORT_DESCRIPTOR = unsafe {
mem::transmute::<PBYTE, PIMAGE_IMPORT_DESCRIPTOR>(
base_addr.offset((*nt_hdr).OptionalHeader.DataDirectory[IMAGE_DIRECTORY_ENTRY_IMPORT as usize].VirtualAddress as isize)
)
};
let mut i = 0;
loop {
let import_desc = unsafe { (*import_desc_array.offset(i)) };
if import_desc.OriginalFirstThunk == 0 {
break;
}
let dll_name =
unsafe { CStr::from_ptr(base_addr.offset(import_desc.Name as isize) as *const i8) }
.to_string_lossy();
if targetModule.to_string().to_lowercase() == dll_name.to_lowercase() {
if import_desc.FirstThunk == 0 || import_desc.OriginalFirstThunk == 0 {
return None;
}
let thunk_ptr: PIMAGE_THUNK_DATA32 =
unsafe {
mem::transmute::<PBYTE,
PIMAGE_THUNK_DATA32>(base_addr
.offset(import_desc.FirstThunk as
isize))
};
let orig_thunk_ptr: PIMAGE_THUNK_DATA32 =
unsafe {
mem::transmute::<PBYTE,
PIMAGE_THUNK_DATA32>(base_addr
.offset(import_desc.OriginalFirstThunk as
isize))
};
let mut j = 0;
loop {
let orig_thunk = unsafe { *orig_thunk_ptr.offset(j) };
if orig_thunk.u1 == 0 |
if (orig_thunk.u1 & IMAGE_ORDINAL_FLAG) != 0 {
continue;
}
let import: PIMAGE_IMPORT_BY_NAME =
unsafe {
mem::transmute::<PBYTE,
PIMAGE_IMPORT_BY_NAME>(base_addr
.offset(orig_thunk.u1 as
isize))
};
let name_field = offset_of!(IMAGE_IMPORT_BY_NAME => Name);
let func_name =
unsafe { CStr::from_ptr(name_field.apply_ptr(import) as *const i8) }
.to_string_lossy();
if funcName == func_name {
let old_func_ptr: LONG;
let iat_ptr_field = offset_of!(IMAGE_THUNK_DATA32 => u1);
{
#[allow(unused_variables)]
let lock =
MemoryWriteLock::new(iat_ptr_field.apply_ptr(unsafe { thunk_ptr.offset(j) }) as
LPVOID,
mem::size_of::<LPVOID>() as u32);
old_func_ptr = unsafe {
kernel32::InterlockedExchange(
iat_ptr_field.apply_ptr_mut(thunk_ptr.offset(j)) as *mut LONG,
replaceFuncPtr as LONG)
};
}
return Some(old_func_ptr as LPVOID);
}
j += 1;
}
}
i += 1;
}
None
}
}
#[allow(unused_variables)]
#[cfg(test)]
extern "system" fn myCreatePipe(hReadPipe: PHANDLE,
hWritePipe: PHANDLE,
lpPipeAttributes: LPVOID,
nSize: DWORD)
-> BOOL {
0x31337
}
#[test]
fn test_intercept() {
let target = Module::self_target();
let mut result = target.intercept("kernel32.dll", "CreatePipe", unsafe {
mem::transmute::<extern "system" fn(PHANDLE,
PHANDLE,
LPVOID,
DWORD)
-> BOOL,
LPVOID>(myCreatePipe)
});
assert!(result.is_some());
let ret = unsafe { kernel32::CreatePipe(null_mut(), null_mut(), null_mut(), 0x1337) };
assert_eq!(ret, 0x31337);
result = target.intercept("kernel32.dll", "CreatePipe", result.unwrap());
assert!(result.is_some());
} | {
break;
} | conditional_block |
detours.rs | #![allow(non_camel_case_types)]
extern crate winapi;
extern crate kernel32;
extern crate field_offset;
use winapi::*;
#[allow(unused_imports)]
use self::field_offset::*;
use std::mem;
use std::ffi::CStr;
use std::ptr::null_mut;
use std::iter::once;
use std::ffi::OsStr;
use std::os::windows::ffi::OsStrExt;
// Copied from winapi-rs since we are having issues with macro-use
macro_rules! DEF_STRUCT {
{$(#[$attrs:meta])* nodebug struct $name:ident { $($field:ident: $ftype:ty,)+ }} => {
#[repr(C)] $(#[$attrs])*
pub struct $name {
$(pub $field: $ftype,)+
}
impl Copy for $name {}
impl Clone for $name { fn clone(&self) -> $name { *self } }
};
{$(#[$attrs:meta])* struct $name:ident { $($field:ident: $ftype:ty,)+ }} => {
#[repr(C)] #[derive(Debug)] $(#[$attrs])*
pub struct $name {
$(pub $field: $ftype,)+
}
impl Copy for $name {}
impl Clone for $name { fn clone(&self) -> $name { *self } }
};
}
DEF_STRUCT!{struct IMAGE_DOS_HEADER {
e_magic: WORD,
e_cblp: WORD,
e_cp: WORD,
e_crlc: WORD,
e_cparhdr: WORD,
e_minalloc: WORD,
e_maxalloc: WORD,
e_ss: WORD,
e_sp: WORD,
e_csum: WORD,
e_ip: WORD,
e_cs: WORD,
e_lfarlc: WORD,
e_ovno: WORD,
e_res: [WORD; 4],
e_oemid: WORD,
e_oeminfo: WORD,
e_res2: [WORD; 10],
e_lfanew: LONG,
}}
pub type PIMAGE_DOS_HEADER = *mut IMAGE_DOS_HEADER;
DEF_STRUCT!{struct IMAGE_IMPORT_DESCRIPTOR {
OriginalFirstThunk: DWORD,
TimeDateStamp: DWORD,
ForwarderChain: DWORD,
Name: DWORD,
FirstThunk: DWORD,
}}
pub type PIMAGE_IMPORT_DESCRIPTOR = *mut IMAGE_IMPORT_DESCRIPTOR;
DEF_STRUCT!{struct IMAGE_THUNK_DATA32 {
u1: DWORD,
}}
pub type PIMAGE_THUNK_DATA32 = *mut IMAGE_THUNK_DATA32;
DEF_STRUCT!{struct IMAGE_IMPORT_BY_NAME {
Hint: WORD,
Name: BYTE,
}}
pub type PIMAGE_IMPORT_BY_NAME = *mut IMAGE_IMPORT_BY_NAME;
const IMAGE_DOS_SIGNATURE: WORD = 0x5a4d;
const IMAGE_NT_SIGNATURE: DWORD = 0x4550;
const IMAGE_ORDINAL_FLAG: DWORD = 0x80000000;
struct MemoryWriteLock {
addr: LPVOID,
size: SIZE_T,
old_protect: DWORD,
}
impl MemoryWriteLock {
pub fn new(addr: LPVOID, size: SIZE_T) -> Option<MemoryWriteLock> {
let mut lock = MemoryWriteLock {
addr: addr,
size: size,
old_protect: 0 as DWORD,
};
if unsafe {
kernel32::VirtualProtect(addr, size, PAGE_READWRITE, &mut lock.old_protect)
} == 0 {
return None;
}
Some(lock)
}
}
impl Drop for MemoryWriteLock {
fn drop(&mut self) {
let mut old_protect: DWORD = 0 as DWORD;
unsafe {
kernel32::VirtualProtect(self.addr, self.size, self.old_protect, &mut old_protect)
};
}
}
#[cfg(test)]
fn assert_mem_protect(addr: LPVOID, size: SIZE_T, protect: DWORD) {
let mut mbi: MEMORY_BASIC_INFORMATION = unsafe { mem::zeroed() };
assert!(unsafe { kernel32::VirtualQuery(addr, &mut mbi, size) } != 0);
assert_eq!(mbi.Protect, protect);
}
#[test]
fn test_memorywritelock() {
let size = 0x1000;
let addr = unsafe { kernel32::VirtualAlloc(null_mut(), size, MEM_COMMIT, PAGE_READONLY) };
assert!(addr != 0 as LPVOID);
assert_mem_protect(addr, size, PAGE_READONLY);
{
let lock = MemoryWriteLock::new(addr, size);
assert!(lock.is_some());
assert_mem_protect(addr, size, PAGE_READWRITE);
}
assert_mem_protect(addr, size, PAGE_READONLY);
}
pub struct Module {
module: HMODULE,
}
impl Module {
#[allow(dead_code)]
pub fn target(moduleName: &str) -> Option<Module> {
let mut library = Module { module: 0 as HMODULE };
let wModuleName: Vec<u16> = OsStr::new(moduleName)
.encode_wide()
.chain(once(0))
.collect();
library.module = unsafe { kernel32::GetModuleHandleW(wModuleName.as_ptr()) };
if library.module == 0 as HMODULE {
return None;
}
Some(library)
}
#[allow(dead_code)]
pub fn self_target() -> Module {
Module { module: unsafe { kernel32::GetModuleHandleW(null_mut()) } }
}
pub fn intercept(&self,
targetModule: &str,
funcName: &str,
replaceFuncPtr: LPVOID)
-> Option<LPVOID> {
let base_addr: PBYTE = unsafe { mem::transmute::<HMODULE, PBYTE>(self.module) };
let dos_hdr: PIMAGE_DOS_HEADER =
unsafe { mem::transmute::<HMODULE, PIMAGE_DOS_HEADER>(self.module) };
if unsafe { (*dos_hdr).e_magic } != IMAGE_DOS_SIGNATURE {
return None;
}
let nt_hdr: PIMAGE_NT_HEADERS32 =
unsafe {
mem::transmute::<PBYTE, PIMAGE_NT_HEADERS32>(base_addr.offset((*dos_hdr).e_lfanew as
isize))
};
if unsafe { (*nt_hdr).Signature } != IMAGE_NT_SIGNATURE {
return None;
}
if unsafe { (*nt_hdr).FileHeader.Machine } != IMAGE_FILE_MACHINE_I386 {
// TODO: Think about adding support for IMAGE_FILE_MACHINE_AMD64 later
return None;
}
let import_desc_array: PIMAGE_IMPORT_DESCRIPTOR = unsafe {
mem::transmute::<PBYTE, PIMAGE_IMPORT_DESCRIPTOR>(
base_addr.offset((*nt_hdr).OptionalHeader.DataDirectory[IMAGE_DIRECTORY_ENTRY_IMPORT as usize].VirtualAddress as isize)
)
};
let mut i = 0;
loop {
let import_desc = unsafe { (*import_desc_array.offset(i)) };
if import_desc.OriginalFirstThunk == 0 {
break;
}
let dll_name =
unsafe { CStr::from_ptr(base_addr.offset(import_desc.Name as isize) as *const i8) }
.to_string_lossy();
if targetModule.to_string().to_lowercase() == dll_name.to_lowercase() {
if import_desc.FirstThunk == 0 || import_desc.OriginalFirstThunk == 0 {
return None;
}
let thunk_ptr: PIMAGE_THUNK_DATA32 =
unsafe {
mem::transmute::<PBYTE,
PIMAGE_THUNK_DATA32>(base_addr
.offset(import_desc.FirstThunk as
isize))
};
let orig_thunk_ptr: PIMAGE_THUNK_DATA32 =
unsafe {
mem::transmute::<PBYTE,
PIMAGE_THUNK_DATA32>(base_addr
.offset(import_desc.OriginalFirstThunk as
isize))
};
let mut j = 0;
loop {
let orig_thunk = unsafe { *orig_thunk_ptr.offset(j) };
if orig_thunk.u1 == 0 {
break;
}
if (orig_thunk.u1 & IMAGE_ORDINAL_FLAG) != 0 {
continue;
}
let import: PIMAGE_IMPORT_BY_NAME =
unsafe {
mem::transmute::<PBYTE,
PIMAGE_IMPORT_BY_NAME>(base_addr
.offset(orig_thunk.u1 as
isize))
};
let name_field = offset_of!(IMAGE_IMPORT_BY_NAME => Name);
let func_name =
unsafe { CStr::from_ptr(name_field.apply_ptr(import) as *const i8) }
.to_string_lossy();
if funcName == func_name {
let old_func_ptr: LONG;
let iat_ptr_field = offset_of!(IMAGE_THUNK_DATA32 => u1);
{
#[allow(unused_variables)]
let lock =
MemoryWriteLock::new(iat_ptr_field.apply_ptr(unsafe { thunk_ptr.offset(j) }) as
LPVOID,
mem::size_of::<LPVOID>() as u32);
old_func_ptr = unsafe {
kernel32::InterlockedExchange(
iat_ptr_field.apply_ptr_mut(thunk_ptr.offset(j)) as *mut LONG,
replaceFuncPtr as LONG)
};
}
return Some(old_func_ptr as LPVOID);
}
j += 1;
}
}
i += 1;
}
None
}
}
#[allow(unused_variables)]
#[cfg(test)]
extern "system" fn myCreatePipe(hReadPipe: PHANDLE,
hWritePipe: PHANDLE,
lpPipeAttributes: LPVOID,
nSize: DWORD)
-> BOOL {
0x31337
} | let mut result = target.intercept("kernel32.dll", "CreatePipe", unsafe {
mem::transmute::<extern "system" fn(PHANDLE,
PHANDLE,
LPVOID,
DWORD)
-> BOOL,
LPVOID>(myCreatePipe)
});
assert!(result.is_some());
let ret = unsafe { kernel32::CreatePipe(null_mut(), null_mut(), null_mut(), 0x1337) };
assert_eq!(ret, 0x31337);
result = target.intercept("kernel32.dll", "CreatePipe", result.unwrap());
assert!(result.is_some());
} |
#[test]
fn test_intercept() {
let target = Module::self_target();
| random_line_split |
amap.go | package amap
import (
"encoding/json"
"fmt"
"math"
"strconv"
"strings"
"time"
"github.com/go-resty/resty"
)
// PoiResult PoiResult
type PoiResult struct {
Count string `json:"count"`
Info string `json:"info"`
Infocode string `json:"infocode"`
Pois []Poi `json:"pois"`
Status string `json:"status"`
Suggestion struct {
Cities []interface{} `json:"cities"`
Keywords []interface{} `json:"keywords"`
} `json:"suggestion"`
}
// Poi Poi
type Poi struct {
Adcode string `json:"adcode"`
Address string `json:"address"`
Adname string `json:"adname"`
Alias string `json:"alias"`
BizExt struct {
Cost string `json:"cost"`
Rating string `json:"rating"`
} `json:"biz_ext"`
BizType string `json:"biz_type"`
BusinessArea string `json:"business_area"`
Children []interface{} `json:"children"`
Citycode string `json:"citycode"`
Cityname string `json:"cityname"`
DiscountNum string `json:"discount_num"`
Distance string `json:"distance"`
Email string `json:"email"`
EntrLocation string `json:"entr_location"`
Event []interface{} `json:"event"`
ExitLocation []interface{} `json:"exit_location"`
Gridcode string `json:"gridcode"`
GroupbuyNum string `json:"groupbuy_num"`
ID string `json:"id"`
Importance []interface{} `json:"importance"`
IndoorData struct {
Cmsid []interface{} `json:"cmsid"`
Cpid []interface{} `json:"cpid"`
Floor []interface{} `json:"floor"`
Truefloor []interface{} `json:"truefloor"`
} `json:"indoor_data"`
IndoorMap string `json:"indoor_map"`
Location string `json:"location"`
Match string `json:"match"`
Name string `json:"name"`
NaviPoiid string `json:"navi_poiid"`
Pcode string `json:"pcode"`
Photos []struct {
Title []interface{} `json:"title"`
URL string `json:"url"`
} `json:"photos"`
Pname string `json:"pname"`
Poiweight []interface{} `json:"poiweight"`
Postcode []interface{} `json:"postcode"`
Recommend string `json:"recommend"`
Shopid []interface{} `json:"shopid"`
Shopinfo string `json:"shopinfo"`
Tag []interface{} `json:"tag"`
Tel string `json:"tel"`
Timestamp []interface{} `json:"timestamp"`
Type string `json:"type"`
Typecode string `json:"typecode"`
Website []interface{} `json:"website"`
}
func (p Poi) String() string {
return fmt.Sprintln(spaceD(p.ID), spaceD(p.Name), spaceD(p.Type), spaceD(p.Typecode), spaceD(p.Address), spaceD(p.Cityname), spaceD(p.Adname), spaceD(p.Location), spaceD(p.Alias))
}
func spaceD(s string) string {
return strings.Join(strings.Fields(s), "")
}
// Point Point
type Point struct {
Lng float64
Lat float64
}
// Rectangle Rectangle
type Rectangle struct {
PointLT Point
PointRB Point
}
func (r Rectangle) check() bool {
return r.PointLT.Lng < r.PointRB.Lng && r.PointLT.Lat > r.PointRB.Lat
}
func (r Rectangle) polygon() string {
return fmt.Sprintf("%f,%f|%f,%f", r.PointLT.Lng, r.PointLT.Lat, r.PointRB.Lng, r.PointRB.Lat)
}
func (r Rectangle) quadtree() []Rectangle {
halflng, halflat := math.Abs(r.PointRB.Lng-r.PointLT.Lng)/2, math.Abs(r.PointLT.Lat-r.PointRB.Lat)/2
return []Rectangle{
{r.PointLT, Point{round(r.PointLT.Lng + halflng), round(r.PointLT.Lat - halflat)}},
{Point{round(r.PointLT.Lng + halflng), r.PointLT.Lat}, Point{r.PointRB.Lng, round(r.PointLT.Lat - halflat)}},
{Point{r.PointLT.Lng, round(r.PointLT.Lat - halflat)}, Point{round(r.PointLT.Lng + halflng), r.PointRB.Lat}},
{Point{round(r.PointLT.Lng + halflng), round(r.PointLT.Lat - halflat)}, r.PointRB}}
}
type minRec struct {
Rec Rectangle
Types string
Count int
Err error
}
type minRecPage struct {
Rec Rectangle
Types string
Page string
}
func round(f float64) float64 {
n10 := math.Pow10(6)
return math.Trunc(f*n10) / n10
}
var gaoDePolygonURL = "https://restapi.amap.com/v3/place/polygon"
var gaoDeDetailURL = "https://www.amap.com/detail/get/detail"
var key = "aaa8abdaf05433e3702eae99964cc8c6"
// var key = "935c7385f239000f98ade53bbbc002e7"
func cutRec(rec Rectangle, types string) (recCutresult []minRec) {
count, err := recCount(rec, types)
if err != nil {
fmt.Println(rec, types, count, err)
recCutresult = append(recCutresult, minRec{rec, types, count, err})
} else if count <= 800 && count > 0 {
fmt.Println(rec, types, count, err)
recCutresult = append(recCutresult, minRec{rec, types, count, err})
} else if count > 800 {
// fmt.Println("cuting:", rec, types, count, err)
rec4s := rec.quadtree()
for _, rec4 := range rec4s {
recCutresult = append(recCutresult, cutRec(rec4, types)...)
}
}
return
}
func recCount(rec Rectangle, types string) (count int, err error) {
para := map[string]string{
"types": types,
"offset": "1",
"polygon": rec.polygon(),
}
poiResult1, err := recRequest(para)
if err != nil {
return
}
count, err = strconv.Atoi(poiResult1.Count)
if err != nil {
return
}
return
}
func minRecPagePois(minRecPage minRecPage) (pois []Poi, err error) {
para := map[string]string{
"types": minRecPage.Types,
"offset": "20",
"polygon": minRecPage.Rec.polygon(),
"page": minRecPage.Page,
}
result, err := recRequest(para)
if err != nil |
pois = result.Pois
return
}
func minRecPagesPois(minRecPages []minRecPage) (pois []Poi) {
for _, minRecPage := range minRecPages {
pagePois, err := minRecPagePois(minRecPage)
if err == nil {
pois = append(pois, pagePois...)
} else {
fmt.Println(minRecPages, err)
}
}
return
}
func minRecPages(mRec minRec) (minRecPages []minRecPage) {
for page := int(math.Ceil(float64(mRec.Count) / 20)); page > 0; page-- {
minRecPages = append(minRecPages, minRecPage{mRec.Rec, mRec.Types, strconv.Itoa(page)})
}
return
}
func minRecsPages(mRecs []minRec) (mrp []minRecPage) {
for _, mRec := range mRecs {
mrp = append(mrp, minRecPages(mRec)...)
}
return
}
func recTypePages(rec Rectangle, types string) (mrp []minRecPage) {
cutrec := cutRec(rec, types)
mrp = minRecsPages(cutrec)
return
}
// RecTypePois RecTypePois
func RecTypePois(rec Rectangle, types string) (pois []Poi) {
pages := recTypePages(rec, types)
pois = minRecPagesPois(pages)
return
}
func recRequest(para map[string]string) (result PoiResult, err error) {
para["key"] = key
resp, err := resty.
SetTimeout(10 * time.Second).
SetRetryCount(5).
SetRetryWaitTime(10 * time.Second).
SetRetryMaxWaitTime(65 * time.Second).
R().
SetQueryParams(para).
Get(gaoDePolygonURL)
if err != nil {
return
}
json.Unmarshal(resp.Body(), &result)
if err != nil {
return
}
if result.Status != "1" || result.Infocode != "10000" {
err = fmt.Errorf(result.Status, result.Infocode, result.Info)
return
}
return
}
// Detail Detail
type Detail struct {
Status string `json:"status"`
Data struct {
Base struct {
PoiTag string `json:"poi_tag"`
Code string `json:"code"`
ImportanceVipFlag int `json:"importance_vip_flag"`
CityAdcode string `json:"city_adcode"`
Telephone string `json:"telephone"`
NewType string `json:"new_type"`
CityName string `json:"city_name"`
NewKeytype string `json:"new_keytype"`
Checked string `json:"checked"`
Title string `json:"title"`
CreFlag int `json:"cre_flag"`
StdTTag0V string `json:"std_t_tag_0_v"`
NaviGeometry string `json:"navi_geometry"`
Classify string `json:"classify"`
Business string `json:"business"`
ShopInfo struct {
Claim int `json:"claim"`
} `json:"shop_info"`
PoiTagHasTTag int `json:"poi_tag_has_t_tag"`
Pixelx string `json:"pixelx"`
Pixely string `json:"pixely"`
Geodata struct {
Aoi []struct {
Name string `json:"name"`
Mainpoi string `json:"mainpoi"`
Area float64 `json:"area"`
} `json:"aoi"`
} `json:"geodata"`
Poiid string `json:"poiid"`
Distance int `json:"distance"`
Name string `json:"name"`
StdVTag0V string `json:"std_v_tag_0_v"`
EndPoiExtension string `json:"end_poi_extension"`
Y string `json:"y"`
X string `json:"x"`
Address string `json:"address"`
Bcs string `json:"bcs"`
Tag string `json:"tag"`
} `json:"base"`
Spec struct {
MiningShape struct {
Aoiid string `json:"aoiid"`
Center string `json:"center"`
Level int `json:"level"`
SpType string `json:"sp_type"`
Area string `json:"area"`
Shape string `json:"shape"`
Type int `json:"type"`
} `json:"mining_shape"`
SpPic []interface{} `json:"sp_pic"`
} `json:"spec"`
Residential struct {
BuildingTypes string `json:"building_types"`
SrcTypeMix string `json:"src_type_mix"`
SrcID string `json:"src_id"`
IsCommunity int `json:"is_community"`
Business string `json:"business"`
Price string `json:"price"`
HaveSchDistrict int `json:"have_sch_district"`
PropertyFee string `json:"property_fee"`
AreaTotal string `json:"area_total"`
PropertyCompany string `json:"property_company"`
VolumeRate float64 `json:"volume_rate"`
GreenRate string `json:"green_rate"`
SrcType string `json:"src_type"`
Intro string `json:"intro"`
HxpicInfo []interface{} `json:"hxpic_info"`
Developer string `json:"developer"`
} `json:"residential"`
Deep struct {
BuildingTypes string `json:"building_types"`
SrcTypeMix string `json:"src_type_mix"`
SrcID string `json:"src_id"`
IsCommunity int `json:"is_community"`
Business string `json:"business"`
Price string `json:"price"`
HaveSchDistrict int `json:"have_sch_district"`
PropertyFee string `json:"property_fee"`
AreaTotal string `json:"area_total"`
PropertyCompany string `json:"property_company"`
VolumeRate float64 `json:"volume_rate"`
GreenRate string `json:"green_rate"`
SrcType string `json:"src_type"`
Intro string `json:"intro"`
HxpicInfo []interface{} `json:"hxpic_info"`
Developer string `json:"developer"`
} `json:"deep"`
Rti struct {
ReviewEntrance int `json:"review_entrance"`
ReviewSummary string `json:"review_summary"`
ReviewCount int `json:"review_count"`
HasDiscountFlag int `json:"has_discount_flag"`
ReviewLabels []interface{} `json:"review_labels"`
} `json:"rti"`
Review struct {
Comment []struct {
AosTagScore float64 `json:"aos_tag_score"`
Recommend string `json:"recommend"`
HighQuality int `json:"high_quality"`
Labels []interface{} `json:"labels"`
ReviewID string `json:"review_id"`
AuthorProfileurl string `json:"author_profileurl"`
ReviewWeburl string `json:"review_weburl"`
ReviewWapurl string `json:"review_wapurl"`
Review string `json:"review"`
Author string `json:"author"`
GoldNum int `json:"gold_num"`
QualityFlag int `json:"quality_flag"`
GoldType string `json:"gold_type"`
Score int `json:"score"`
LikeNum string `json:"like_num"`
ReviewAppurl struct {
IosAppurl string `json:"ios_appurl"`
AndroidAppurl string `json:"android_appurl"`
} `json:"review_appurl"`
Time string `json:"time"`
SrcName string `json:"src_name"`
SrcType string `json:"src_type"`
AuthorID int `json:"author_id"`
} `json:"comment"`
} `json:"review"`
SrcInfo []interface{} `json:"src_info"`
ShareURL string `json:"share_url"`
} `json:"data"`
}
func requestDetail(id string) (result Detail, err error) {
resp, err := resty.
R().
SetQueryParams(map[string]string{"id": id}).
Get(gaoDeDetailURL)
if err != nil {
return
}
json.Unmarshal(resp.Body(), &result)
if err != nil {
return
}
if result.Status != "1" {
err = fmt.Errorf(id, result.Status)
return
}
return
}
func requestDetails(ids []string) (result []Detail) {
for _, id := range ids {
r, err1 := requestDetail(id)
if err1 == nil {
result = append(result, r)
}
}
return
}
func printResult(id string, ch chan string) {
r, err := requestDetail(id)
if err == nil {
fmt.Println(id, r.Data.Spec.MiningShape.Shape, "type:"+strconv.Itoa(r.Data.Spec.MiningShape.Type), "sptype:"+r.Data.Spec.MiningShape.SpType)
} else if r.Status == "6" {
fmt.Println(id, "err:toofast")
time.Sleep(10 * time.Second)
} else if r.Status == "8" {
fmt.Println(id, "err:notfounddetail")
} else {
fmt.Println(id, "err"+r.Status)
time.Sleep(10 * time.Second)
}
<-ch
}
| {
return
} | conditional_block |
amap.go | package amap
import (
"encoding/json"
"fmt"
"math"
"strconv"
"strings"
"time"
"github.com/go-resty/resty"
)
// PoiResult PoiResult
type PoiResult struct {
Count string `json:"count"`
Info string `json:"info"`
Infocode string `json:"infocode"`
Pois []Poi `json:"pois"`
Status string `json:"status"`
Suggestion struct {
Cities []interface{} `json:"cities"`
Keywords []interface{} `json:"keywords"`
} `json:"suggestion"`
}
// Poi Poi
type Poi struct {
Adcode string `json:"adcode"`
Address string `json:"address"`
Adname string `json:"adname"`
Alias string `json:"alias"`
BizExt struct {
Cost string `json:"cost"`
Rating string `json:"rating"`
} `json:"biz_ext"`
BizType string `json:"biz_type"`
BusinessArea string `json:"business_area"`
Children []interface{} `json:"children"`
Citycode string `json:"citycode"`
Cityname string `json:"cityname"`
DiscountNum string `json:"discount_num"`
Distance string `json:"distance"`
Email string `json:"email"`
EntrLocation string `json:"entr_location"`
Event []interface{} `json:"event"`
ExitLocation []interface{} `json:"exit_location"`
Gridcode string `json:"gridcode"`
GroupbuyNum string `json:"groupbuy_num"`
ID string `json:"id"`
Importance []interface{} `json:"importance"`
IndoorData struct {
Cmsid []interface{} `json:"cmsid"`
Cpid []interface{} `json:"cpid"`
Floor []interface{} `json:"floor"`
Truefloor []interface{} `json:"truefloor"`
} `json:"indoor_data"`
IndoorMap string `json:"indoor_map"`
Location string `json:"location"`
Match string `json:"match"`
Name string `json:"name"`
NaviPoiid string `json:"navi_poiid"`
Pcode string `json:"pcode"`
Photos []struct {
Title []interface{} `json:"title"`
URL string `json:"url"`
} `json:"photos"`
Pname string `json:"pname"`
Poiweight []interface{} `json:"poiweight"`
Postcode []interface{} `json:"postcode"`
Recommend string `json:"recommend"`
Shopid []interface{} `json:"shopid"`
Shopinfo string `json:"shopinfo"`
Tag []interface{} `json:"tag"`
Tel string `json:"tel"`
Timestamp []interface{} `json:"timestamp"`
Type string `json:"type"`
Typecode string `json:"typecode"`
Website []interface{} `json:"website"`
}
func (p Poi) String() string {
return fmt.Sprintln(spaceD(p.ID), spaceD(p.Name), spaceD(p.Type), spaceD(p.Typecode), spaceD(p.Address), spaceD(p.Cityname), spaceD(p.Adname), spaceD(p.Location), spaceD(p.Alias))
}
func spaceD(s string) string {
return strings.Join(strings.Fields(s), "")
}
// Point Point
type Point struct {
Lng float64
Lat float64
}
// Rectangle Rectangle
type Rectangle struct {
PointLT Point
PointRB Point
}
func (r Rectangle) check() bool {
return r.PointLT.Lng < r.PointRB.Lng && r.PointLT.Lat > r.PointRB.Lat
}
func (r Rectangle) polygon() string {
return fmt.Sprintf("%f,%f|%f,%f", r.PointLT.Lng, r.PointLT.Lat, r.PointRB.Lng, r.PointRB.Lat)
}
func (r Rectangle) quadtree() []Rectangle {
halflng, halflat := math.Abs(r.PointRB.Lng-r.PointLT.Lng)/2, math.Abs(r.PointLT.Lat-r.PointRB.Lat)/2
return []Rectangle{
{r.PointLT, Point{round(r.PointLT.Lng + halflng), round(r.PointLT.Lat - halflat)}},
{Point{round(r.PointLT.Lng + halflng), r.PointLT.Lat}, Point{r.PointRB.Lng, round(r.PointLT.Lat - halflat)}},
{Point{r.PointLT.Lng, round(r.PointLT.Lat - halflat)}, Point{round(r.PointLT.Lng + halflng), r.PointRB.Lat}},
{Point{round(r.PointLT.Lng + halflng), round(r.PointLT.Lat - halflat)}, r.PointRB}}
}
type minRec struct {
Rec Rectangle
Types string
Count int
Err error
}
type minRecPage struct {
Rec Rectangle
Types string
Page string
}
func round(f float64) float64 {
n10 := math.Pow10(6)
return math.Trunc(f*n10) / n10
}
var gaoDePolygonURL = "https://restapi.amap.com/v3/place/polygon"
var gaoDeDetailURL = "https://www.amap.com/detail/get/detail"
var key = "aaa8abdaf05433e3702eae99964cc8c6"
// var key = "935c7385f239000f98ade53bbbc002e7"
func cutRec(rec Rectangle, types string) (recCutresult []minRec) {
count, err := recCount(rec, types)
if err != nil {
fmt.Println(rec, types, count, err)
recCutresult = append(recCutresult, minRec{rec, types, count, err})
} else if count <= 800 && count > 0 {
fmt.Println(rec, types, count, err)
recCutresult = append(recCutresult, minRec{rec, types, count, err})
} else if count > 800 {
// fmt.Println("cuting:", rec, types, count, err)
rec4s := rec.quadtree()
for _, rec4 := range rec4s {
recCutresult = append(recCutresult, cutRec(rec4, types)...)
}
}
return
}
func recCount(rec Rectangle, types string) (count int, err error) {
para := map[string]string{
"types": types,
"offset": "1",
"polygon": rec.polygon(),
}
poiResult1, err := recRequest(para)
if err != nil {
return
}
count, err = strconv.Atoi(poiResult1.Count)
if err != nil {
return
}
return
}
func minRecPagePois(minRecPage minRecPage) (pois []Poi, err error) {
para := map[string]string{
"types": minRecPage.Types,
"offset": "20",
"polygon": minRecPage.Rec.polygon(),
"page": minRecPage.Page,
}
result, err := recRequest(para)
if err != nil {
return
}
pois = result.Pois
return
}
func minRecPagesPois(minRecPages []minRecPage) (pois []Poi) {
for _, minRecPage := range minRecPages {
pagePois, err := minRecPagePois(minRecPage)
if err == nil {
pois = append(pois, pagePois...)
} else {
fmt.Println(minRecPages, err)
}
}
return
}
func minRecPages(mRec minRec) (minRecPages []minRecPage) {
for page := int(math.Ceil(float64(mRec.Count) / 20)); page > 0; page-- {
minRecPages = append(minRecPages, minRecPage{mRec.Rec, mRec.Types, strconv.Itoa(page)})
}
return
}
func minRecsPages(mRecs []minRec) (mrp []minRecPage) {
for _, mRec := range mRecs {
mrp = append(mrp, minRecPages(mRec)...)
}
return
}
func recTypePages(rec Rectangle, types string) (mrp []minRecPage) {
cutrec := cutRec(rec, types)
mrp = minRecsPages(cutrec)
return
}
// RecTypePois RecTypePois
func RecTypePois(rec Rectangle, types string) (pois []Poi) |
func recRequest(para map[string]string) (result PoiResult, err error) {
para["key"] = key
resp, err := resty.
SetTimeout(10 * time.Second).
SetRetryCount(5).
SetRetryWaitTime(10 * time.Second).
SetRetryMaxWaitTime(65 * time.Second).
R().
SetQueryParams(para).
Get(gaoDePolygonURL)
if err != nil {
return
}
json.Unmarshal(resp.Body(), &result)
if err != nil {
return
}
if result.Status != "1" || result.Infocode != "10000" {
err = fmt.Errorf(result.Status, result.Infocode, result.Info)
return
}
return
}
// Detail Detail
type Detail struct {
Status string `json:"status"`
Data struct {
Base struct {
PoiTag string `json:"poi_tag"`
Code string `json:"code"`
ImportanceVipFlag int `json:"importance_vip_flag"`
CityAdcode string `json:"city_adcode"`
Telephone string `json:"telephone"`
NewType string `json:"new_type"`
CityName string `json:"city_name"`
NewKeytype string `json:"new_keytype"`
Checked string `json:"checked"`
Title string `json:"title"`
CreFlag int `json:"cre_flag"`
StdTTag0V string `json:"std_t_tag_0_v"`
NaviGeometry string `json:"navi_geometry"`
Classify string `json:"classify"`
Business string `json:"business"`
ShopInfo struct {
Claim int `json:"claim"`
} `json:"shop_info"`
PoiTagHasTTag int `json:"poi_tag_has_t_tag"`
Pixelx string `json:"pixelx"`
Pixely string `json:"pixely"`
Geodata struct {
Aoi []struct {
Name string `json:"name"`
Mainpoi string `json:"mainpoi"`
Area float64 `json:"area"`
} `json:"aoi"`
} `json:"geodata"`
Poiid string `json:"poiid"`
Distance int `json:"distance"`
Name string `json:"name"`
StdVTag0V string `json:"std_v_tag_0_v"`
EndPoiExtension string `json:"end_poi_extension"`
Y string `json:"y"`
X string `json:"x"`
Address string `json:"address"`
Bcs string `json:"bcs"`
Tag string `json:"tag"`
} `json:"base"`
Spec struct {
MiningShape struct {
Aoiid string `json:"aoiid"`
Center string `json:"center"`
Level int `json:"level"`
SpType string `json:"sp_type"`
Area string `json:"area"`
Shape string `json:"shape"`
Type int `json:"type"`
} `json:"mining_shape"`
SpPic []interface{} `json:"sp_pic"`
} `json:"spec"`
Residential struct {
BuildingTypes string `json:"building_types"`
SrcTypeMix string `json:"src_type_mix"`
SrcID string `json:"src_id"`
IsCommunity int `json:"is_community"`
Business string `json:"business"`
Price string `json:"price"`
HaveSchDistrict int `json:"have_sch_district"`
PropertyFee string `json:"property_fee"`
AreaTotal string `json:"area_total"`
PropertyCompany string `json:"property_company"`
VolumeRate float64 `json:"volume_rate"`
GreenRate string `json:"green_rate"`
SrcType string `json:"src_type"`
Intro string `json:"intro"`
HxpicInfo []interface{} `json:"hxpic_info"`
Developer string `json:"developer"`
} `json:"residential"`
Deep struct {
BuildingTypes string `json:"building_types"`
SrcTypeMix string `json:"src_type_mix"`
SrcID string `json:"src_id"`
IsCommunity int `json:"is_community"`
Business string `json:"business"`
Price string `json:"price"`
HaveSchDistrict int `json:"have_sch_district"`
PropertyFee string `json:"property_fee"`
AreaTotal string `json:"area_total"`
PropertyCompany string `json:"property_company"`
VolumeRate float64 `json:"volume_rate"`
GreenRate string `json:"green_rate"`
SrcType string `json:"src_type"`
Intro string `json:"intro"`
HxpicInfo []interface{} `json:"hxpic_info"`
Developer string `json:"developer"`
} `json:"deep"`
Rti struct {
ReviewEntrance int `json:"review_entrance"`
ReviewSummary string `json:"review_summary"`
ReviewCount int `json:"review_count"`
HasDiscountFlag int `json:"has_discount_flag"`
ReviewLabels []interface{} `json:"review_labels"`
} `json:"rti"`
Review struct {
Comment []struct {
AosTagScore float64 `json:"aos_tag_score"`
Recommend string `json:"recommend"`
HighQuality int `json:"high_quality"`
Labels []interface{} `json:"labels"`
ReviewID string `json:"review_id"`
AuthorProfileurl string `json:"author_profileurl"`
ReviewWeburl string `json:"review_weburl"`
ReviewWapurl string `json:"review_wapurl"`
Review string `json:"review"`
Author string `json:"author"`
GoldNum int `json:"gold_num"`
QualityFlag int `json:"quality_flag"`
GoldType string `json:"gold_type"`
Score int `json:"score"`
LikeNum string `json:"like_num"`
ReviewAppurl struct {
IosAppurl string `json:"ios_appurl"`
AndroidAppurl string `json:"android_appurl"`
} `json:"review_appurl"`
Time string `json:"time"`
SrcName string `json:"src_name"`
SrcType string `json:"src_type"`
AuthorID int `json:"author_id"`
} `json:"comment"`
} `json:"review"`
SrcInfo []interface{} `json:"src_info"`
ShareURL string `json:"share_url"`
} `json:"data"`
}
func requestDetail(id string) (result Detail, err error) {
resp, err := resty.
R().
SetQueryParams(map[string]string{"id": id}).
Get(gaoDeDetailURL)
if err != nil {
return
}
json.Unmarshal(resp.Body(), &result)
if err != nil {
return
}
if result.Status != "1" {
err = fmt.Errorf(id, result.Status)
return
}
return
}
func requestDetails(ids []string) (result []Detail) {
for _, id := range ids {
r, err1 := requestDetail(id)
if err1 == nil {
result = append(result, r)
}
}
return
}
func printResult(id string, ch chan string) {
r, err := requestDetail(id)
if err == nil {
fmt.Println(id, r.Data.Spec.MiningShape.Shape, "type:"+strconv.Itoa(r.Data.Spec.MiningShape.Type), "sptype:"+r.Data.Spec.MiningShape.SpType)
} else if r.Status == "6" {
fmt.Println(id, "err:toofast")
time.Sleep(10 * time.Second)
} else if r.Status == "8" {
fmt.Println(id, "err:notfounddetail")
} else {
fmt.Println(id, "err"+r.Status)
time.Sleep(10 * time.Second)
}
<-ch
}
| {
pages := recTypePages(rec, types)
pois = minRecPagesPois(pages)
return
} | identifier_body |
amap.go | package amap
import (
"encoding/json"
"fmt"
"math"
"strconv"
"strings"
"time"
"github.com/go-resty/resty"
)
// PoiResult PoiResult
type PoiResult struct {
Count string `json:"count"`
Info string `json:"info"`
Infocode string `json:"infocode"`
Pois []Poi `json:"pois"`
Status string `json:"status"`
Suggestion struct {
Cities []interface{} `json:"cities"`
Keywords []interface{} `json:"keywords"`
} `json:"suggestion"`
}
// Poi Poi
type Poi struct {
Adcode string `json:"adcode"`
Address string `json:"address"`
Adname string `json:"adname"`
Alias string `json:"alias"`
BizExt struct {
Cost string `json:"cost"`
Rating string `json:"rating"`
} `json:"biz_ext"`
BizType string `json:"biz_type"`
BusinessArea string `json:"business_area"`
Children []interface{} `json:"children"`
Citycode string `json:"citycode"`
Cityname string `json:"cityname"`
DiscountNum string `json:"discount_num"`
Distance string `json:"distance"`
Email string `json:"email"`
EntrLocation string `json:"entr_location"`
Event []interface{} `json:"event"`
ExitLocation []interface{} `json:"exit_location"`
Gridcode string `json:"gridcode"`
GroupbuyNum string `json:"groupbuy_num"`
ID string `json:"id"`
Importance []interface{} `json:"importance"`
IndoorData struct {
Cmsid []interface{} `json:"cmsid"`
Cpid []interface{} `json:"cpid"`
Floor []interface{} `json:"floor"`
Truefloor []interface{} `json:"truefloor"`
} `json:"indoor_data"`
IndoorMap string `json:"indoor_map"`
Location string `json:"location"`
Match string `json:"match"`
Name string `json:"name"`
NaviPoiid string `json:"navi_poiid"`
Pcode string `json:"pcode"`
Photos []struct {
Title []interface{} `json:"title"`
URL string `json:"url"`
} `json:"photos"`
Pname string `json:"pname"`
Poiweight []interface{} `json:"poiweight"`
Postcode []interface{} `json:"postcode"`
Recommend string `json:"recommend"`
Shopid []interface{} `json:"shopid"`
Shopinfo string `json:"shopinfo"`
Tag []interface{} `json:"tag"`
Tel string `json:"tel"`
Timestamp []interface{} `json:"timestamp"`
Type string `json:"type"`
Typecode string `json:"typecode"`
Website []interface{} `json:"website"`
}
func (p Poi) String() string {
return fmt.Sprintln(spaceD(p.ID), spaceD(p.Name), spaceD(p.Type), spaceD(p.Typecode), spaceD(p.Address), spaceD(p.Cityname), spaceD(p.Adname), spaceD(p.Location), spaceD(p.Alias))
}
func spaceD(s string) string {
return strings.Join(strings.Fields(s), "")
}
// Point Point
type Point struct {
Lng float64
Lat float64
}
// Rectangle Rectangle
type Rectangle struct {
PointLT Point
PointRB Point
}
func (r Rectangle) check() bool {
return r.PointLT.Lng < r.PointRB.Lng && r.PointLT.Lat > r.PointRB.Lat
}
func (r Rectangle) polygon() string {
return fmt.Sprintf("%f,%f|%f,%f", r.PointLT.Lng, r.PointLT.Lat, r.PointRB.Lng, r.PointRB.Lat)
}
func (r Rectangle) quadtree() []Rectangle {
halflng, halflat := math.Abs(r.PointRB.Lng-r.PointLT.Lng)/2, math.Abs(r.PointLT.Lat-r.PointRB.Lat)/2
return []Rectangle{
{r.PointLT, Point{round(r.PointLT.Lng + halflng), round(r.PointLT.Lat - halflat)}},
{Point{round(r.PointLT.Lng + halflng), r.PointLT.Lat}, Point{r.PointRB.Lng, round(r.PointLT.Lat - halflat)}},
{Point{r.PointLT.Lng, round(r.PointLT.Lat - halflat)}, Point{round(r.PointLT.Lng + halflng), r.PointRB.Lat}},
{Point{round(r.PointLT.Lng + halflng), round(r.PointLT.Lat - halflat)}, r.PointRB}}
}
type minRec struct {
Rec Rectangle
Types string
Count int
Err error
}
type minRecPage struct {
Rec Rectangle
Types string
Page string
}
func round(f float64) float64 {
n10 := math.Pow10(6)
return math.Trunc(f*n10) / n10
}
var gaoDePolygonURL = "https://restapi.amap.com/v3/place/polygon"
var gaoDeDetailURL = "https://www.amap.com/detail/get/detail"
var key = "aaa8abdaf05433e3702eae99964cc8c6"
// var key = "935c7385f239000f98ade53bbbc002e7"
func cutRec(rec Rectangle, types string) (recCutresult []minRec) {
count, err := recCount(rec, types)
if err != nil {
fmt.Println(rec, types, count, err)
recCutresult = append(recCutresult, minRec{rec, types, count, err})
} else if count <= 800 && count > 0 {
fmt.Println(rec, types, count, err)
recCutresult = append(recCutresult, minRec{rec, types, count, err})
} else if count > 800 {
// fmt.Println("cuting:", rec, types, count, err)
rec4s := rec.quadtree()
for _, rec4 := range rec4s {
recCutresult = append(recCutresult, cutRec(rec4, types)...)
}
}
return
}
func recCount(rec Rectangle, types string) (count int, err error) {
para := map[string]string{
"types": types,
"offset": "1",
"polygon": rec.polygon(),
}
poiResult1, err := recRequest(para)
if err != nil {
return
}
count, err = strconv.Atoi(poiResult1.Count)
if err != nil {
return
}
return
}
func minRecPagePois(minRecPage minRecPage) (pois []Poi, err error) {
para := map[string]string{
"types": minRecPage.Types,
"offset": "20",
"polygon": minRecPage.Rec.polygon(),
"page": minRecPage.Page,
}
result, err := recRequest(para)
if err != nil {
return
}
pois = result.Pois
return
}
func minRecPagesPois(minRecPages []minRecPage) (pois []Poi) {
for _, minRecPage := range minRecPages {
pagePois, err := minRecPagePois(minRecPage)
if err == nil {
pois = append(pois, pagePois...)
} else {
fmt.Println(minRecPages, err)
}
}
return
}
func minRecPages(mRec minRec) (minRecPages []minRecPage) {
for page := int(math.Ceil(float64(mRec.Count) / 20)); page > 0; page-- {
minRecPages = append(minRecPages, minRecPage{mRec.Rec, mRec.Types, strconv.Itoa(page)})
}
return
}
func minRecsPages(mRecs []minRec) (mrp []minRecPage) {
for _, mRec := range mRecs {
mrp = append(mrp, minRecPages(mRec)...)
}
return
}
func recTypePages(rec Rectangle, types string) (mrp []minRecPage) {
cutrec := cutRec(rec, types)
mrp = minRecsPages(cutrec)
return
}
// RecTypePois RecTypePois
func RecTypePois(rec Rectangle, types string) (pois []Poi) {
pages := recTypePages(rec, types)
pois = minRecPagesPois(pages)
return
}
func recRequest(para map[string]string) (result PoiResult, err error) {
para["key"] = key
resp, err := resty.
SetTimeout(10 * time.Second).
SetRetryCount(5).
SetRetryWaitTime(10 * time.Second).
SetRetryMaxWaitTime(65 * time.Second).
R().
SetQueryParams(para).
Get(gaoDePolygonURL)
if err != nil {
return
}
json.Unmarshal(resp.Body(), &result)
if err != nil {
return
}
if result.Status != "1" || result.Infocode != "10000" {
err = fmt.Errorf(result.Status, result.Infocode, result.Info)
return
}
return
}
// Detail Detail
type Detail struct {
Status string `json:"status"`
Data struct {
Base struct {
PoiTag string `json:"poi_tag"`
Code string `json:"code"`
ImportanceVipFlag int `json:"importance_vip_flag"`
CityAdcode string `json:"city_adcode"`
Telephone string `json:"telephone"`
NewType string `json:"new_type"`
CityName string `json:"city_name"`
NewKeytype string `json:"new_keytype"`
Checked string `json:"checked"`
Title string `json:"title"`
CreFlag int `json:"cre_flag"`
StdTTag0V string `json:"std_t_tag_0_v"`
NaviGeometry string `json:"navi_geometry"`
Classify string `json:"classify"`
Business string `json:"business"`
ShopInfo struct {
Claim int `json:"claim"`
} `json:"shop_info"`
PoiTagHasTTag int `json:"poi_tag_has_t_tag"`
Pixelx string `json:"pixelx"`
Pixely string `json:"pixely"`
Geodata struct {
Aoi []struct {
Name string `json:"name"`
Mainpoi string `json:"mainpoi"`
Area float64 `json:"area"`
} `json:"aoi"`
} `json:"geodata"`
Poiid string `json:"poiid"`
Distance int `json:"distance"`
Name string `json:"name"`
StdVTag0V string `json:"std_v_tag_0_v"`
EndPoiExtension string `json:"end_poi_extension"`
Y string `json:"y"`
X string `json:"x"`
Address string `json:"address"`
Bcs string `json:"bcs"`
Tag string `json:"tag"`
} `json:"base"`
Spec struct {
MiningShape struct {
Aoiid string `json:"aoiid"`
Center string `json:"center"`
Level int `json:"level"`
SpType string `json:"sp_type"`
Area string `json:"area"`
Shape string `json:"shape"`
Type int `json:"type"`
} `json:"mining_shape"`
SpPic []interface{} `json:"sp_pic"`
} `json:"spec"`
Residential struct {
BuildingTypes string `json:"building_types"`
SrcTypeMix string `json:"src_type_mix"`
SrcID string `json:"src_id"`
IsCommunity int `json:"is_community"`
Business string `json:"business"`
Price string `json:"price"`
HaveSchDistrict int `json:"have_sch_district"`
PropertyFee string `json:"property_fee"`
AreaTotal string `json:"area_total"`
PropertyCompany string `json:"property_company"`
VolumeRate float64 `json:"volume_rate"`
GreenRate string `json:"green_rate"`
SrcType string `json:"src_type"`
Intro string `json:"intro"`
HxpicInfo []interface{} `json:"hxpic_info"`
Developer string `json:"developer"`
} `json:"residential"`
Deep struct {
BuildingTypes string `json:"building_types"`
SrcTypeMix string `json:"src_type_mix"`
SrcID string `json:"src_id"`
IsCommunity int `json:"is_community"`
Business string `json:"business"`
Price string `json:"price"`
HaveSchDistrict int `json:"have_sch_district"`
PropertyFee string `json:"property_fee"`
AreaTotal string `json:"area_total"`
PropertyCompany string `json:"property_company"`
VolumeRate float64 `json:"volume_rate"`
GreenRate string `json:"green_rate"`
SrcType string `json:"src_type"`
Intro string `json:"intro"`
HxpicInfo []interface{} `json:"hxpic_info"`
Developer string `json:"developer"`
} `json:"deep"`
Rti struct {
ReviewEntrance int `json:"review_entrance"`
ReviewSummary string `json:"review_summary"`
ReviewCount int `json:"review_count"`
HasDiscountFlag int `json:"has_discount_flag"`
ReviewLabels []interface{} `json:"review_labels"`
} `json:"rti"`
Review struct {
Comment []struct { | AosTagScore float64 `json:"aos_tag_score"`
Recommend string `json:"recommend"`
HighQuality int `json:"high_quality"`
Labels []interface{} `json:"labels"`
ReviewID string `json:"review_id"`
AuthorProfileurl string `json:"author_profileurl"`
ReviewWeburl string `json:"review_weburl"`
ReviewWapurl string `json:"review_wapurl"`
Review string `json:"review"`
Author string `json:"author"`
GoldNum int `json:"gold_num"`
QualityFlag int `json:"quality_flag"`
GoldType string `json:"gold_type"`
Score int `json:"score"`
LikeNum string `json:"like_num"`
ReviewAppurl struct {
IosAppurl string `json:"ios_appurl"`
AndroidAppurl string `json:"android_appurl"`
} `json:"review_appurl"`
Time string `json:"time"`
SrcName string `json:"src_name"`
SrcType string `json:"src_type"`
AuthorID int `json:"author_id"`
} `json:"comment"`
} `json:"review"`
SrcInfo []interface{} `json:"src_info"`
ShareURL string `json:"share_url"`
} `json:"data"`
}
func requestDetail(id string) (result Detail, err error) {
resp, err := resty.
R().
SetQueryParams(map[string]string{"id": id}).
Get(gaoDeDetailURL)
if err != nil {
return
}
json.Unmarshal(resp.Body(), &result)
if err != nil {
return
}
if result.Status != "1" {
err = fmt.Errorf(id, result.Status)
return
}
return
}
func requestDetails(ids []string) (result []Detail) {
for _, id := range ids {
r, err1 := requestDetail(id)
if err1 == nil {
result = append(result, r)
}
}
return
}
func printResult(id string, ch chan string) {
r, err := requestDetail(id)
if err == nil {
fmt.Println(id, r.Data.Spec.MiningShape.Shape, "type:"+strconv.Itoa(r.Data.Spec.MiningShape.Type), "sptype:"+r.Data.Spec.MiningShape.SpType)
} else if r.Status == "6" {
fmt.Println(id, "err:toofast")
time.Sleep(10 * time.Second)
} else if r.Status == "8" {
fmt.Println(id, "err:notfounddetail")
} else {
fmt.Println(id, "err"+r.Status)
time.Sleep(10 * time.Second)
}
<-ch
} | random_line_split |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.