file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
39k
| suffix
large_stringlengths 0
36.1k
| middle
large_stringlengths 0
29.4k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
feature_extract.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2017 Tomoki Hayashi (Nagoya University)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
import argparse
import logging
import multiprocessing as mp
import os
import sys
from distutils.util import strtobool
import librosa
import numpy as np
import pysptk
from scipy.interpolate import interp1d
from scipy.io import wavfile
from scipy.signal import firwin
from scipy.signal import get_window
from scipy.signal import lfilter
from sprocket.speech.feature_extractor import FeatureExtractor
from wavenet_vocoder.utils import find_files
from wavenet_vocoder.utils import read_txt
from wavenet_vocoder.utils import write_hdf5
EPS = 1e-10
def low_cut_filter(x, fs, cutoff=70):
"""APPLY LOW CUT FILTER.
Args:
x (ndarray): Waveform sequence.
fs (int): Sampling frequency.
cutoff (float): Cutoff frequency of low cut filter.
Return:
ndarray: Low cut filtered waveform sequence.
"""
nyquist = fs // 2
norm_cutoff = cutoff / nyquist
# low cut filter
fil = firwin(255, norm_cutoff, pass_zero=False)
lcf_x = lfilter(fil, 1, x)
return lcf_x
def low_pass_filter(x, fs, cutoff=70, padding=True):
"""APPLY LOW PASS FILTER.
Args:
x (ndarray): Waveform sequence.
fs (int): Sampling frequency.
cutoff (float): Cutoff frequency of low pass filter.
Returns:
ndarray: Low pass filtered waveform sequence
"""
nyquist = fs // 2
norm_cutoff = cutoff / nyquist
# low cut filter
numtaps = 255
fil = firwin(numtaps, norm_cutoff)
x_pad = np.pad(x, (numtaps, numtaps), 'edge')
lpf_x = lfilter(fil, 1, x_pad)
lpf_x = lpf_x[numtaps + numtaps // 2: -numtaps // 2]
return lpf_x
def convert_to_continuos_f0(f0):
"""CONVERT F0 TO CONTINUOUS F0.
Args:
f0 (ndarray): original f0 sequence with the shape (T,).
Returns:
ndarray: continuous f0 with the shape (T,).
"""
# get uv information as binary
uv = np.float32(f0 != 0)
# get start and end of f0
if (f0 == 0).all():
logging.warning("all of the f0 values are 0.")
return uv, f0
start_f0 = f0[f0 != 0][0]
end_f0 = f0[f0 != 0][-1]
# padding start and end of f0 sequence
start_idx = np.where(f0 == start_f0)[0][0]
end_idx = np.where(f0 == end_f0)[0][-1]
f0[:start_idx] = start_f0
f0[end_idx:] = end_f0
|
# perform linear interpolation
f = interp1d(nz_frames, f0[nz_frames])
cont_f0 = f(np.arange(0, f0.shape[0]))
return uv, cont_f0
def stft_mcep(x, fftl=512, shiftl=256, dim=25, alpha=0.41, window="hamming", is_padding=False):
"""EXTRACT STFT-BASED MEL-CEPSTRUM.
Args:
x (ndarray): Numpy double array with the size (T,).
fftl (int): FFT length in point (default=512).
shiftl (int): Shift length in point (default=256).
dim (int): Dimension of mel-cepstrum (default=25).
alpha (float): All pass filter coefficient (default=0.41).
window (str): Analysis window type (default="hamming").
is_padding (bool): Whether to pad the end of signal (default=False).
Returns:
ndarray: Mel-cepstrum with the size (N, n_fft).
"""
# perform padding
if is_padding:
n_pad = fftl - (len(x) - fftl) % shiftl
x = np.pad(x, (0, n_pad), 'reflect')
# get number of frames
n_frame = (len(x) - fftl) // shiftl + 1
# get window function
win = get_window(window, fftl)
# calculate spectrogram
mcep = [pysptk.mcep(x[shiftl * i: shiftl * i + fftl] * win,
dim, alpha, eps=EPS, etype=1)
for i in range(n_frame)]
return np.stack(mcep)
def world_feature_extract(wav_list, args):
"""EXTRACT WORLD FEATURE VECTOR."""
# define feature extractor
feature_extractor = FeatureExtractor(
analyzer="world",
fs=args.fs,
shiftms=args.shiftms,
minf0=args.minf0,
maxf0=args.maxf0,
fftl=args.fftl)
for i, wav_name in enumerate(wav_list):
logging.info("now processing %s (%d/%d)" % (wav_name, i + 1, len(wav_list)))
# load wavfile and apply low cut filter
fs, x = wavfile.read(wav_name)
if x.dtype != np.int16:
logging.warning("wav file format is not 16 bit PCM.")
x = np.array(x, dtype=np.float64)
if args.highpass_cutoff != 0:
x = low_cut_filter(x, fs, cutoff=args.highpass_cutoff)
# check sampling frequency
if not fs == args.fs:
logging.error("sampling frequency is not matched.")
sys.exit(1)
# extract features
f0, _, _ = feature_extractor.analyze(x)
uv, cont_f0 = convert_to_continuos_f0(f0)
cont_f0_lpf = low_pass_filter(cont_f0, int(1.0 / (args.shiftms * 0.001)), cutoff=20)
codeap = feature_extractor.codeap()
mcep = feature_extractor.mcep(dim=args.mcep_dim, alpha=args.mcep_alpha)
# concatenate
cont_f0_lpf = np.expand_dims(cont_f0_lpf, axis=-1)
uv = np.expand_dims(uv, axis=-1)
feats = np.concatenate([uv, cont_f0_lpf, mcep, codeap], axis=1)
# save to hdf5
hdf5name = args.hdf5dir + "/" + os.path.basename(wav_name).replace(".wav", ".h5")
write_hdf5(hdf5name, "/world", feats)
# overwrite wav file
if args.highpass_cutoff != 0 and args.save_wav:
wavfile.write(args.wavdir + "/" + os.path.basename(wav_name), fs, np.int16(x))
def melspectrogram_extract(wav_list, args):
"""EXTRACT MEL SPECTROGRAM."""
# define feature extractor
for i, wav_name in enumerate(wav_list):
logging.info("now processing %s (%d/%d)" % (wav_name, i + 1, len(wav_list)))
# load wavfile and apply low cut filter
fs, x = wavfile.read(wav_name)
if x.dtype != np.int16:
logging.warning("wav file format is not 16 bit PCM.")
x = np.array(x, dtype=np.float64)
if args.highpass_cutoff != 0:
x = low_cut_filter(x, fs, cutoff=args.highpass_cutoff)
# check sampling frequency
if not fs == args.fs:
logging.error("sampling frequency is not matched.")
sys.exit(1)
# extract features
x_norm = x / (np.iinfo(np.int16).max + 1)
shiftl = int(args.shiftms * fs * 0.001)
mspc = librosa.feature.melspectrogram(
x_norm, fs,
n_fft=args.fftl,
hop_length=shiftl,
n_mels=args.mspc_dim,
fmin=args.fmin if args.fmin is not None else 0,
fmax=args.fmax if args.fmax is not None else fs // 2,
power=1.0)
mspc = np.log10(np.maximum(EPS, mspc.T))
# save to hdf5
hdf5name = args.hdf5dir + "/" + os.path.basename(wav_name).replace(".wav", ".h5")
write_hdf5(hdf5name, "/melspc", np.float32(mspc))
# overwrite wav file
if args.highpass_cutoff != 0 and args.save_wav:
wavfile.write(args.wavdir + "/" + os.path.basename(wav_name), fs, np.int16(x))
def melcepstrum_extract(wav_list, args):
"""EXTRACT MEL CEPSTRUM."""
# define feature extractor
for i, wav_name in enumerate(wav_list):
logging.info("now processing %s (%d/%d)" % (wav_name, i + 1, len(wav_list)))
# load wavfile and apply low cut filter
fs, x = wavfile.read(wav_name)
if x.dtype != np.int16:
logging.warning("wav file format is not 16 bit PCM.")
x = np.array(x, dtype=np.float64)
if args.highpass_cutoff != 0:
x = low_cut_filter(x, fs, cutoff=args.highpass_cutoff)
# check sampling frequency
if not fs == args.fs:
logging.error("sampling frequency is not matched.")
sys.exit(1)
# extract features
shiftl = int(args.shiftms * fs * 0.001)
mcep = stft_mcep(x, args.fftl, shiftl, args.mcep_dim, args.mcep_alpha)
# save to hdf5
hdf5name = args.hdf5dir + "/" + os.path.basename(wav_name).replace(".wav", ".h5")
write_hdf5(hdf5name, "/mcep", np.float32(mcep))
# overwrite wav file
if args.highpass_cutoff != 0 and args.save_wav:
wavfile.write(args.wavdir + "/" + os.path.basename(wav_name), fs, np.int16(x))
def main():
"""RUN FEATURE EXTRACTION IN PARALLEL."""
parser = argparse.ArgumentParser(
description="making feature file argsurations.")
parser.add_argument(
"--waveforms", default=None,
help="directory or list of filename of input wavfile")
parser.add_argument(
"--hdf5dir", default=None,
help="directory to save hdf5")
parser.add_argument(
"--wavdir", default=None,
help="directory to save of preprocessed wav file")
parser.add_argument(
"--fs", default=16000,
type=int, help="Sampling frequency")
parser.add_argument(
"--shiftms", default=5,
type=float, help="Frame shift in msec")
parser.add_argument(
"--feature_type", default="world", choices=["world", "melspc", "mcep"],
type=str, help="feature type")
parser.add_argument(
"--mspc_dim", default=80,
type=int, help="Dimension of mel spectrogram")
parser.add_argument(
"--minf0", default=40,
type=int, help="minimum f0 for world analysis")
parser.add_argument(
"--maxf0", default=400,
type=int, help="maximum f0 for world analysis")
parser.add_argument(
"--fmin", default=None, nargs="?",
type=int, help="minimum frequency for melspc")
parser.add_argument(
"--fmax", default=None, nargs="?",
type=int, help="maximum frequency for melspc")
parser.add_argument(
"--mcep_dim", default=24,
type=int, help="Dimension of mel cepstrum")
parser.add_argument(
"--mcep_alpha", default=0.41,
type=float, help="Alpha of mel cepstrum")
parser.add_argument(
"--fftl", default=1024,
type=int, help="FFT length")
parser.add_argument(
"--highpass_cutoff", default=70,
type=int, help="Cut off frequency in lowpass filter")
parser.add_argument(
"--save_wav", default=True,
type=strtobool, help="Whether to save filtered wav file")
parser.add_argument(
"--n_jobs", default=10,
type=int, help="number of parallel jobs")
parser.add_argument(
"--verbose", default=1,
type=int, help="log message level")
args = parser.parse_args()
# set log level
if args.verbose == 1:
logging.basicConfig(level=logging.INFO,
format='%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s',
datefmt='%m/%d/%Y %I:%M:%S')
elif args.verbose > 1:
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s',
datefmt='%m/%d/%Y %I:%M:%S')
else:
logging.basicConfig(level=logging.WARNING,
format='%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s',
datefmt='%m/%d/%Y %I:%M:%S')
logging.warning("logging is disabled.")
# show arguments
for key, value in vars(args).items():
logging.info("%s = %s" % (key, str(value)))
# read list
if os.path.isdir(args.waveforms):
file_list = sorted(find_files(args.waveforms, "*.wav"))
else:
file_list = read_txt(args.waveforms)
logging.info("number of utterances = %d" % len(file_list))
# check directory existence
if not os.path.exists(args.wavdir) and args.highpass_cutoff != 0 and args.save_wav:
os.makedirs(args.wavdir)
if not os.path.exists(args.hdf5dir):
os.makedirs(args.hdf5dir)
# divide list
file_lists = np.array_split(file_list, args.n_jobs)
file_lists = [f_list.tolist() for f_list in file_lists]
# multi processing
processes = []
if args.feature_type == "world":
target_fn = world_feature_extract
elif args.feature_type == "melspc":
target_fn = melspectrogram_extract
else:
target_fn = melcepstrum_extract
for f in file_lists:
p = mp.Process(target=target_fn, args=(f, args,))
p.start()
processes.append(p)
# wait for all process
for p in processes:
p.join()
if __name__ == "__main__":
main() | # get non-zero frame index
nz_frames = np.where(f0 != 0)[0] | random_line_split |
feature_extract.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2017 Tomoki Hayashi (Nagoya University)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
import argparse
import logging
import multiprocessing as mp
import os
import sys
from distutils.util import strtobool
import librosa
import numpy as np
import pysptk
from scipy.interpolate import interp1d
from scipy.io import wavfile
from scipy.signal import firwin
from scipy.signal import get_window
from scipy.signal import lfilter
from sprocket.speech.feature_extractor import FeatureExtractor
from wavenet_vocoder.utils import find_files
from wavenet_vocoder.utils import read_txt
from wavenet_vocoder.utils import write_hdf5
EPS = 1e-10
def low_cut_filter(x, fs, cutoff=70):
"""APPLY LOW CUT FILTER.
Args:
x (ndarray): Waveform sequence.
fs (int): Sampling frequency.
cutoff (float): Cutoff frequency of low cut filter.
Return:
ndarray: Low cut filtered waveform sequence.
"""
nyquist = fs // 2
norm_cutoff = cutoff / nyquist
# low cut filter
fil = firwin(255, norm_cutoff, pass_zero=False)
lcf_x = lfilter(fil, 1, x)
return lcf_x
def low_pass_filter(x, fs, cutoff=70, padding=True):
"""APPLY LOW PASS FILTER.
Args:
x (ndarray): Waveform sequence.
fs (int): Sampling frequency.
cutoff (float): Cutoff frequency of low pass filter.
Returns:
ndarray: Low pass filtered waveform sequence
"""
nyquist = fs // 2
norm_cutoff = cutoff / nyquist
# low cut filter
numtaps = 255
fil = firwin(numtaps, norm_cutoff)
x_pad = np.pad(x, (numtaps, numtaps), 'edge')
lpf_x = lfilter(fil, 1, x_pad)
lpf_x = lpf_x[numtaps + numtaps // 2: -numtaps // 2]
return lpf_x
def convert_to_continuos_f0(f0):
"""CONVERT F0 TO CONTINUOUS F0.
Args:
f0 (ndarray): original f0 sequence with the shape (T,).
Returns:
ndarray: continuous f0 with the shape (T,).
"""
# get uv information as binary
uv = np.float32(f0 != 0)
# get start and end of f0
if (f0 == 0).all():
logging.warning("all of the f0 values are 0.")
return uv, f0
start_f0 = f0[f0 != 0][0]
end_f0 = f0[f0 != 0][-1]
# padding start and end of f0 sequence
start_idx = np.where(f0 == start_f0)[0][0]
end_idx = np.where(f0 == end_f0)[0][-1]
f0[:start_idx] = start_f0
f0[end_idx:] = end_f0
# get non-zero frame index
nz_frames = np.where(f0 != 0)[0]
# perform linear interpolation
f = interp1d(nz_frames, f0[nz_frames])
cont_f0 = f(np.arange(0, f0.shape[0]))
return uv, cont_f0
def | (x, fftl=512, shiftl=256, dim=25, alpha=0.41, window="hamming", is_padding=False):
"""EXTRACT STFT-BASED MEL-CEPSTRUM.
Args:
x (ndarray): Numpy double array with the size (T,).
fftl (int): FFT length in point (default=512).
shiftl (int): Shift length in point (default=256).
dim (int): Dimension of mel-cepstrum (default=25).
alpha (float): All pass filter coefficient (default=0.41).
window (str): Analysis window type (default="hamming").
is_padding (bool): Whether to pad the end of signal (default=False).
Returns:
ndarray: Mel-cepstrum with the size (N, n_fft).
"""
# perform padding
if is_padding:
n_pad = fftl - (len(x) - fftl) % shiftl
x = np.pad(x, (0, n_pad), 'reflect')
# get number of frames
n_frame = (len(x) - fftl) // shiftl + 1
# get window function
win = get_window(window, fftl)
# calculate spectrogram
mcep = [pysptk.mcep(x[shiftl * i: shiftl * i + fftl] * win,
dim, alpha, eps=EPS, etype=1)
for i in range(n_frame)]
return np.stack(mcep)
def world_feature_extract(wav_list, args):
"""EXTRACT WORLD FEATURE VECTOR."""
# define feature extractor
feature_extractor = FeatureExtractor(
analyzer="world",
fs=args.fs,
shiftms=args.shiftms,
minf0=args.minf0,
maxf0=args.maxf0,
fftl=args.fftl)
for i, wav_name in enumerate(wav_list):
logging.info("now processing %s (%d/%d)" % (wav_name, i + 1, len(wav_list)))
# load wavfile and apply low cut filter
fs, x = wavfile.read(wav_name)
if x.dtype != np.int16:
logging.warning("wav file format is not 16 bit PCM.")
x = np.array(x, dtype=np.float64)
if args.highpass_cutoff != 0:
x = low_cut_filter(x, fs, cutoff=args.highpass_cutoff)
# check sampling frequency
if not fs == args.fs:
logging.error("sampling frequency is not matched.")
sys.exit(1)
# extract features
f0, _, _ = feature_extractor.analyze(x)
uv, cont_f0 = convert_to_continuos_f0(f0)
cont_f0_lpf = low_pass_filter(cont_f0, int(1.0 / (args.shiftms * 0.001)), cutoff=20)
codeap = feature_extractor.codeap()
mcep = feature_extractor.mcep(dim=args.mcep_dim, alpha=args.mcep_alpha)
# concatenate
cont_f0_lpf = np.expand_dims(cont_f0_lpf, axis=-1)
uv = np.expand_dims(uv, axis=-1)
feats = np.concatenate([uv, cont_f0_lpf, mcep, codeap], axis=1)
# save to hdf5
hdf5name = args.hdf5dir + "/" + os.path.basename(wav_name).replace(".wav", ".h5")
write_hdf5(hdf5name, "/world", feats)
# overwrite wav file
if args.highpass_cutoff != 0 and args.save_wav:
wavfile.write(args.wavdir + "/" + os.path.basename(wav_name), fs, np.int16(x))
def melspectrogram_extract(wav_list, args):
"""EXTRACT MEL SPECTROGRAM."""
# define feature extractor
for i, wav_name in enumerate(wav_list):
logging.info("now processing %s (%d/%d)" % (wav_name, i + 1, len(wav_list)))
# load wavfile and apply low cut filter
fs, x = wavfile.read(wav_name)
if x.dtype != np.int16:
logging.warning("wav file format is not 16 bit PCM.")
x = np.array(x, dtype=np.float64)
if args.highpass_cutoff != 0:
x = low_cut_filter(x, fs, cutoff=args.highpass_cutoff)
# check sampling frequency
if not fs == args.fs:
logging.error("sampling frequency is not matched.")
sys.exit(1)
# extract features
x_norm = x / (np.iinfo(np.int16).max + 1)
shiftl = int(args.shiftms * fs * 0.001)
mspc = librosa.feature.melspectrogram(
x_norm, fs,
n_fft=args.fftl,
hop_length=shiftl,
n_mels=args.mspc_dim,
fmin=args.fmin if args.fmin is not None else 0,
fmax=args.fmax if args.fmax is not None else fs // 2,
power=1.0)
mspc = np.log10(np.maximum(EPS, mspc.T))
# save to hdf5
hdf5name = args.hdf5dir + "/" + os.path.basename(wav_name).replace(".wav", ".h5")
write_hdf5(hdf5name, "/melspc", np.float32(mspc))
# overwrite wav file
if args.highpass_cutoff != 0 and args.save_wav:
wavfile.write(args.wavdir + "/" + os.path.basename(wav_name), fs, np.int16(x))
def melcepstrum_extract(wav_list, args):
"""EXTRACT MEL CEPSTRUM."""
# define feature extractor
for i, wav_name in enumerate(wav_list):
logging.info("now processing %s (%d/%d)" % (wav_name, i + 1, len(wav_list)))
# load wavfile and apply low cut filter
fs, x = wavfile.read(wav_name)
if x.dtype != np.int16:
logging.warning("wav file format is not 16 bit PCM.")
x = np.array(x, dtype=np.float64)
if args.highpass_cutoff != 0:
x = low_cut_filter(x, fs, cutoff=args.highpass_cutoff)
# check sampling frequency
if not fs == args.fs:
logging.error("sampling frequency is not matched.")
sys.exit(1)
# extract features
shiftl = int(args.shiftms * fs * 0.001)
mcep = stft_mcep(x, args.fftl, shiftl, args.mcep_dim, args.mcep_alpha)
# save to hdf5
hdf5name = args.hdf5dir + "/" + os.path.basename(wav_name).replace(".wav", ".h5")
write_hdf5(hdf5name, "/mcep", np.float32(mcep))
# overwrite wav file
if args.highpass_cutoff != 0 and args.save_wav:
wavfile.write(args.wavdir + "/" + os.path.basename(wav_name), fs, np.int16(x))
def main():
"""RUN FEATURE EXTRACTION IN PARALLEL."""
parser = argparse.ArgumentParser(
description="making feature file argsurations.")
parser.add_argument(
"--waveforms", default=None,
help="directory or list of filename of input wavfile")
parser.add_argument(
"--hdf5dir", default=None,
help="directory to save hdf5")
parser.add_argument(
"--wavdir", default=None,
help="directory to save of preprocessed wav file")
parser.add_argument(
"--fs", default=16000,
type=int, help="Sampling frequency")
parser.add_argument(
"--shiftms", default=5,
type=float, help="Frame shift in msec")
parser.add_argument(
"--feature_type", default="world", choices=["world", "melspc", "mcep"],
type=str, help="feature type")
parser.add_argument(
"--mspc_dim", default=80,
type=int, help="Dimension of mel spectrogram")
parser.add_argument(
"--minf0", default=40,
type=int, help="minimum f0 for world analysis")
parser.add_argument(
"--maxf0", default=400,
type=int, help="maximum f0 for world analysis")
parser.add_argument(
"--fmin", default=None, nargs="?",
type=int, help="minimum frequency for melspc")
parser.add_argument(
"--fmax", default=None, nargs="?",
type=int, help="maximum frequency for melspc")
parser.add_argument(
"--mcep_dim", default=24,
type=int, help="Dimension of mel cepstrum")
parser.add_argument(
"--mcep_alpha", default=0.41,
type=float, help="Alpha of mel cepstrum")
parser.add_argument(
"--fftl", default=1024,
type=int, help="FFT length")
parser.add_argument(
"--highpass_cutoff", default=70,
type=int, help="Cut off frequency in lowpass filter")
parser.add_argument(
"--save_wav", default=True,
type=strtobool, help="Whether to save filtered wav file")
parser.add_argument(
"--n_jobs", default=10,
type=int, help="number of parallel jobs")
parser.add_argument(
"--verbose", default=1,
type=int, help="log message level")
args = parser.parse_args()
# set log level
if args.verbose == 1:
logging.basicConfig(level=logging.INFO,
format='%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s',
datefmt='%m/%d/%Y %I:%M:%S')
elif args.verbose > 1:
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s',
datefmt='%m/%d/%Y %I:%M:%S')
else:
logging.basicConfig(level=logging.WARNING,
format='%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s',
datefmt='%m/%d/%Y %I:%M:%S')
logging.warning("logging is disabled.")
# show arguments
for key, value in vars(args).items():
logging.info("%s = %s" % (key, str(value)))
# read list
if os.path.isdir(args.waveforms):
file_list = sorted(find_files(args.waveforms, "*.wav"))
else:
file_list = read_txt(args.waveforms)
logging.info("number of utterances = %d" % len(file_list))
# check directory existence
if not os.path.exists(args.wavdir) and args.highpass_cutoff != 0 and args.save_wav:
os.makedirs(args.wavdir)
if not os.path.exists(args.hdf5dir):
os.makedirs(args.hdf5dir)
# divide list
file_lists = np.array_split(file_list, args.n_jobs)
file_lists = [f_list.tolist() for f_list in file_lists]
# multi processing
processes = []
if args.feature_type == "world":
target_fn = world_feature_extract
elif args.feature_type == "melspc":
target_fn = melspectrogram_extract
else:
target_fn = melcepstrum_extract
for f in file_lists:
p = mp.Process(target=target_fn, args=(f, args,))
p.start()
processes.append(p)
# wait for all process
for p in processes:
p.join()
if __name__ == "__main__":
main()
| stft_mcep | identifier_name |
feature_extract.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2017 Tomoki Hayashi (Nagoya University)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
import argparse
import logging
import multiprocessing as mp
import os
import sys
from distutils.util import strtobool
import librosa
import numpy as np
import pysptk
from scipy.interpolate import interp1d
from scipy.io import wavfile
from scipy.signal import firwin
from scipy.signal import get_window
from scipy.signal import lfilter
from sprocket.speech.feature_extractor import FeatureExtractor
from wavenet_vocoder.utils import find_files
from wavenet_vocoder.utils import read_txt
from wavenet_vocoder.utils import write_hdf5
EPS = 1e-10
def low_cut_filter(x, fs, cutoff=70):
"""APPLY LOW CUT FILTER.
Args:
x (ndarray): Waveform sequence.
fs (int): Sampling frequency.
cutoff (float): Cutoff frequency of low cut filter.
Return:
ndarray: Low cut filtered waveform sequence.
"""
nyquist = fs // 2
norm_cutoff = cutoff / nyquist
# low cut filter
fil = firwin(255, norm_cutoff, pass_zero=False)
lcf_x = lfilter(fil, 1, x)
return lcf_x
def low_pass_filter(x, fs, cutoff=70, padding=True):
"""APPLY LOW PASS FILTER.
Args:
x (ndarray): Waveform sequence.
fs (int): Sampling frequency.
cutoff (float): Cutoff frequency of low pass filter.
Returns:
ndarray: Low pass filtered waveform sequence
"""
nyquist = fs // 2
norm_cutoff = cutoff / nyquist
# low cut filter
numtaps = 255
fil = firwin(numtaps, norm_cutoff)
x_pad = np.pad(x, (numtaps, numtaps), 'edge')
lpf_x = lfilter(fil, 1, x_pad)
lpf_x = lpf_x[numtaps + numtaps // 2: -numtaps // 2]
return lpf_x
def convert_to_continuos_f0(f0):
"""CONVERT F0 TO CONTINUOUS F0.
Args:
f0 (ndarray): original f0 sequence with the shape (T,).
Returns:
ndarray: continuous f0 with the shape (T,).
"""
# get uv information as binary
uv = np.float32(f0 != 0)
# get start and end of f0
if (f0 == 0).all():
logging.warning("all of the f0 values are 0.")
return uv, f0
start_f0 = f0[f0 != 0][0]
end_f0 = f0[f0 != 0][-1]
# padding start and end of f0 sequence
start_idx = np.where(f0 == start_f0)[0][0]
end_idx = np.where(f0 == end_f0)[0][-1]
f0[:start_idx] = start_f0
f0[end_idx:] = end_f0
# get non-zero frame index
nz_frames = np.where(f0 != 0)[0]
# perform linear interpolation
f = interp1d(nz_frames, f0[nz_frames])
cont_f0 = f(np.arange(0, f0.shape[0]))
return uv, cont_f0
def stft_mcep(x, fftl=512, shiftl=256, dim=25, alpha=0.41, window="hamming", is_padding=False):
"""EXTRACT STFT-BASED MEL-CEPSTRUM.
Args:
x (ndarray): Numpy double array with the size (T,).
fftl (int): FFT length in point (default=512).
shiftl (int): Shift length in point (default=256).
dim (int): Dimension of mel-cepstrum (default=25).
alpha (float): All pass filter coefficient (default=0.41).
window (str): Analysis window type (default="hamming").
is_padding (bool): Whether to pad the end of signal (default=False).
Returns:
ndarray: Mel-cepstrum with the size (N, n_fft).
"""
# perform padding
if is_padding:
n_pad = fftl - (len(x) - fftl) % shiftl
x = np.pad(x, (0, n_pad), 'reflect')
# get number of frames
n_frame = (len(x) - fftl) // shiftl + 1
# get window function
win = get_window(window, fftl)
# calculate spectrogram
mcep = [pysptk.mcep(x[shiftl * i: shiftl * i + fftl] * win,
dim, alpha, eps=EPS, etype=1)
for i in range(n_frame)]
return np.stack(mcep)
def world_feature_extract(wav_list, args):
"""EXTRACT WORLD FEATURE VECTOR."""
# define feature extractor
feature_extractor = FeatureExtractor(
analyzer="world",
fs=args.fs,
shiftms=args.shiftms,
minf0=args.minf0,
maxf0=args.maxf0,
fftl=args.fftl)
for i, wav_name in enumerate(wav_list):
logging.info("now processing %s (%d/%d)" % (wav_name, i + 1, len(wav_list)))
# load wavfile and apply low cut filter
fs, x = wavfile.read(wav_name)
if x.dtype != np.int16:
logging.warning("wav file format is not 16 bit PCM.")
x = np.array(x, dtype=np.float64)
if args.highpass_cutoff != 0:
x = low_cut_filter(x, fs, cutoff=args.highpass_cutoff)
# check sampling frequency
if not fs == args.fs:
logging.error("sampling frequency is not matched.")
sys.exit(1)
# extract features
f0, _, _ = feature_extractor.analyze(x)
uv, cont_f0 = convert_to_continuos_f0(f0)
cont_f0_lpf = low_pass_filter(cont_f0, int(1.0 / (args.shiftms * 0.001)), cutoff=20)
codeap = feature_extractor.codeap()
mcep = feature_extractor.mcep(dim=args.mcep_dim, alpha=args.mcep_alpha)
# concatenate
cont_f0_lpf = np.expand_dims(cont_f0_lpf, axis=-1)
uv = np.expand_dims(uv, axis=-1)
feats = np.concatenate([uv, cont_f0_lpf, mcep, codeap], axis=1)
# save to hdf5
hdf5name = args.hdf5dir + "/" + os.path.basename(wav_name).replace(".wav", ".h5")
write_hdf5(hdf5name, "/world", feats)
# overwrite wav file
if args.highpass_cutoff != 0 and args.save_wav:
wavfile.write(args.wavdir + "/" + os.path.basename(wav_name), fs, np.int16(x))
def melspectrogram_extract(wav_list, args):
|
def melcepstrum_extract(wav_list, args):
"""EXTRACT MEL CEPSTRUM."""
# define feature extractor
for i, wav_name in enumerate(wav_list):
logging.info("now processing %s (%d/%d)" % (wav_name, i + 1, len(wav_list)))
# load wavfile and apply low cut filter
fs, x = wavfile.read(wav_name)
if x.dtype != np.int16:
logging.warning("wav file format is not 16 bit PCM.")
x = np.array(x, dtype=np.float64)
if args.highpass_cutoff != 0:
x = low_cut_filter(x, fs, cutoff=args.highpass_cutoff)
# check sampling frequency
if not fs == args.fs:
logging.error("sampling frequency is not matched.")
sys.exit(1)
# extract features
shiftl = int(args.shiftms * fs * 0.001)
mcep = stft_mcep(x, args.fftl, shiftl, args.mcep_dim, args.mcep_alpha)
# save to hdf5
hdf5name = args.hdf5dir + "/" + os.path.basename(wav_name).replace(".wav", ".h5")
write_hdf5(hdf5name, "/mcep", np.float32(mcep))
# overwrite wav file
if args.highpass_cutoff != 0 and args.save_wav:
wavfile.write(args.wavdir + "/" + os.path.basename(wav_name), fs, np.int16(x))
def main():
"""RUN FEATURE EXTRACTION IN PARALLEL."""
parser = argparse.ArgumentParser(
description="making feature file argsurations.")
parser.add_argument(
"--waveforms", default=None,
help="directory or list of filename of input wavfile")
parser.add_argument(
"--hdf5dir", default=None,
help="directory to save hdf5")
parser.add_argument(
"--wavdir", default=None,
help="directory to save of preprocessed wav file")
parser.add_argument(
"--fs", default=16000,
type=int, help="Sampling frequency")
parser.add_argument(
"--shiftms", default=5,
type=float, help="Frame shift in msec")
parser.add_argument(
"--feature_type", default="world", choices=["world", "melspc", "mcep"],
type=str, help="feature type")
parser.add_argument(
"--mspc_dim", default=80,
type=int, help="Dimension of mel spectrogram")
parser.add_argument(
"--minf0", default=40,
type=int, help="minimum f0 for world analysis")
parser.add_argument(
"--maxf0", default=400,
type=int, help="maximum f0 for world analysis")
parser.add_argument(
"--fmin", default=None, nargs="?",
type=int, help="minimum frequency for melspc")
parser.add_argument(
"--fmax", default=None, nargs="?",
type=int, help="maximum frequency for melspc")
parser.add_argument(
"--mcep_dim", default=24,
type=int, help="Dimension of mel cepstrum")
parser.add_argument(
"--mcep_alpha", default=0.41,
type=float, help="Alpha of mel cepstrum")
parser.add_argument(
"--fftl", default=1024,
type=int, help="FFT length")
parser.add_argument(
"--highpass_cutoff", default=70,
type=int, help="Cut off frequency in lowpass filter")
parser.add_argument(
"--save_wav", default=True,
type=strtobool, help="Whether to save filtered wav file")
parser.add_argument(
"--n_jobs", default=10,
type=int, help="number of parallel jobs")
parser.add_argument(
"--verbose", default=1,
type=int, help="log message level")
args = parser.parse_args()
# set log level
if args.verbose == 1:
logging.basicConfig(level=logging.INFO,
format='%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s',
datefmt='%m/%d/%Y %I:%M:%S')
elif args.verbose > 1:
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s',
datefmt='%m/%d/%Y %I:%M:%S')
else:
logging.basicConfig(level=logging.WARNING,
format='%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s',
datefmt='%m/%d/%Y %I:%M:%S')
logging.warning("logging is disabled.")
# show arguments
for key, value in vars(args).items():
logging.info("%s = %s" % (key, str(value)))
# read list
if os.path.isdir(args.waveforms):
file_list = sorted(find_files(args.waveforms, "*.wav"))
else:
file_list = read_txt(args.waveforms)
logging.info("number of utterances = %d" % len(file_list))
# check directory existence
if not os.path.exists(args.wavdir) and args.highpass_cutoff != 0 and args.save_wav:
os.makedirs(args.wavdir)
if not os.path.exists(args.hdf5dir):
os.makedirs(args.hdf5dir)
# divide list
file_lists = np.array_split(file_list, args.n_jobs)
file_lists = [f_list.tolist() for f_list in file_lists]
# multi processing
processes = []
if args.feature_type == "world":
target_fn = world_feature_extract
elif args.feature_type == "melspc":
target_fn = melspectrogram_extract
else:
target_fn = melcepstrum_extract
for f in file_lists:
p = mp.Process(target=target_fn, args=(f, args,))
p.start()
processes.append(p)
# wait for all process
for p in processes:
p.join()
if __name__ == "__main__":
main()
| """EXTRACT MEL SPECTROGRAM."""
# define feature extractor
for i, wav_name in enumerate(wav_list):
logging.info("now processing %s (%d/%d)" % (wav_name, i + 1, len(wav_list)))
# load wavfile and apply low cut filter
fs, x = wavfile.read(wav_name)
if x.dtype != np.int16:
logging.warning("wav file format is not 16 bit PCM.")
x = np.array(x, dtype=np.float64)
if args.highpass_cutoff != 0:
x = low_cut_filter(x, fs, cutoff=args.highpass_cutoff)
# check sampling frequency
if not fs == args.fs:
logging.error("sampling frequency is not matched.")
sys.exit(1)
# extract features
x_norm = x / (np.iinfo(np.int16).max + 1)
shiftl = int(args.shiftms * fs * 0.001)
mspc = librosa.feature.melspectrogram(
x_norm, fs,
n_fft=args.fftl,
hop_length=shiftl,
n_mels=args.mspc_dim,
fmin=args.fmin if args.fmin is not None else 0,
fmax=args.fmax if args.fmax is not None else fs // 2,
power=1.0)
mspc = np.log10(np.maximum(EPS, mspc.T))
# save to hdf5
hdf5name = args.hdf5dir + "/" + os.path.basename(wav_name).replace(".wav", ".h5")
write_hdf5(hdf5name, "/melspc", np.float32(mspc))
# overwrite wav file
if args.highpass_cutoff != 0 and args.save_wav:
wavfile.write(args.wavdir + "/" + os.path.basename(wav_name), fs, np.int16(x)) | identifier_body |
feature_extract.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2017 Tomoki Hayashi (Nagoya University)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
import argparse
import logging
import multiprocessing as mp
import os
import sys
from distutils.util import strtobool
import librosa
import numpy as np
import pysptk
from scipy.interpolate import interp1d
from scipy.io import wavfile
from scipy.signal import firwin
from scipy.signal import get_window
from scipy.signal import lfilter
from sprocket.speech.feature_extractor import FeatureExtractor
from wavenet_vocoder.utils import find_files
from wavenet_vocoder.utils import read_txt
from wavenet_vocoder.utils import write_hdf5
EPS = 1e-10
def low_cut_filter(x, fs, cutoff=70):
"""APPLY LOW CUT FILTER.
Args:
x (ndarray): Waveform sequence.
fs (int): Sampling frequency.
cutoff (float): Cutoff frequency of low cut filter.
Return:
ndarray: Low cut filtered waveform sequence.
"""
nyquist = fs // 2
norm_cutoff = cutoff / nyquist
# low cut filter
fil = firwin(255, norm_cutoff, pass_zero=False)
lcf_x = lfilter(fil, 1, x)
return lcf_x
def low_pass_filter(x, fs, cutoff=70, padding=True):
"""APPLY LOW PASS FILTER.
Args:
x (ndarray): Waveform sequence.
fs (int): Sampling frequency.
cutoff (float): Cutoff frequency of low pass filter.
Returns:
ndarray: Low pass filtered waveform sequence
"""
nyquist = fs // 2
norm_cutoff = cutoff / nyquist
# low cut filter
numtaps = 255
fil = firwin(numtaps, norm_cutoff)
x_pad = np.pad(x, (numtaps, numtaps), 'edge')
lpf_x = lfilter(fil, 1, x_pad)
lpf_x = lpf_x[numtaps + numtaps // 2: -numtaps // 2]
return lpf_x
def convert_to_continuos_f0(f0):
"""CONVERT F0 TO CONTINUOUS F0.
Args:
f0 (ndarray): original f0 sequence with the shape (T,).
Returns:
ndarray: continuous f0 with the shape (T,).
"""
# get uv information as binary
uv = np.float32(f0 != 0)
# get start and end of f0
if (f0 == 0).all():
logging.warning("all of the f0 values are 0.")
return uv, f0
start_f0 = f0[f0 != 0][0]
end_f0 = f0[f0 != 0][-1]
# padding start and end of f0 sequence
start_idx = np.where(f0 == start_f0)[0][0]
end_idx = np.where(f0 == end_f0)[0][-1]
f0[:start_idx] = start_f0
f0[end_idx:] = end_f0
# get non-zero frame index
nz_frames = np.where(f0 != 0)[0]
# perform linear interpolation
f = interp1d(nz_frames, f0[nz_frames])
cont_f0 = f(np.arange(0, f0.shape[0]))
return uv, cont_f0
def stft_mcep(x, fftl=512, shiftl=256, dim=25, alpha=0.41, window="hamming", is_padding=False):
"""EXTRACT STFT-BASED MEL-CEPSTRUM.
Args:
x (ndarray): Numpy double array with the size (T,).
fftl (int): FFT length in point (default=512).
shiftl (int): Shift length in point (default=256).
dim (int): Dimension of mel-cepstrum (default=25).
alpha (float): All pass filter coefficient (default=0.41).
window (str): Analysis window type (default="hamming").
is_padding (bool): Whether to pad the end of signal (default=False).
Returns:
ndarray: Mel-cepstrum with the size (N, n_fft).
"""
# perform padding
if is_padding:
|
# get number of frames
n_frame = (len(x) - fftl) // shiftl + 1
# get window function
win = get_window(window, fftl)
# calculate spectrogram
mcep = [pysptk.mcep(x[shiftl * i: shiftl * i + fftl] * win,
dim, alpha, eps=EPS, etype=1)
for i in range(n_frame)]
return np.stack(mcep)
def world_feature_extract(wav_list, args):
"""EXTRACT WORLD FEATURE VECTOR."""
# define feature extractor
feature_extractor = FeatureExtractor(
analyzer="world",
fs=args.fs,
shiftms=args.shiftms,
minf0=args.minf0,
maxf0=args.maxf0,
fftl=args.fftl)
for i, wav_name in enumerate(wav_list):
logging.info("now processing %s (%d/%d)" % (wav_name, i + 1, len(wav_list)))
# load wavfile and apply low cut filter
fs, x = wavfile.read(wav_name)
if x.dtype != np.int16:
logging.warning("wav file format is not 16 bit PCM.")
x = np.array(x, dtype=np.float64)
if args.highpass_cutoff != 0:
x = low_cut_filter(x, fs, cutoff=args.highpass_cutoff)
# check sampling frequency
if not fs == args.fs:
logging.error("sampling frequency is not matched.")
sys.exit(1)
# extract features
f0, _, _ = feature_extractor.analyze(x)
uv, cont_f0 = convert_to_continuos_f0(f0)
cont_f0_lpf = low_pass_filter(cont_f0, int(1.0 / (args.shiftms * 0.001)), cutoff=20)
codeap = feature_extractor.codeap()
mcep = feature_extractor.mcep(dim=args.mcep_dim, alpha=args.mcep_alpha)
# concatenate
cont_f0_lpf = np.expand_dims(cont_f0_lpf, axis=-1)
uv = np.expand_dims(uv, axis=-1)
feats = np.concatenate([uv, cont_f0_lpf, mcep, codeap], axis=1)
# save to hdf5
hdf5name = args.hdf5dir + "/" + os.path.basename(wav_name).replace(".wav", ".h5")
write_hdf5(hdf5name, "/world", feats)
# overwrite wav file
if args.highpass_cutoff != 0 and args.save_wav:
wavfile.write(args.wavdir + "/" + os.path.basename(wav_name), fs, np.int16(x))
def melspectrogram_extract(wav_list, args):
"""EXTRACT MEL SPECTROGRAM."""
# define feature extractor
for i, wav_name in enumerate(wav_list):
logging.info("now processing %s (%d/%d)" % (wav_name, i + 1, len(wav_list)))
# load wavfile and apply low cut filter
fs, x = wavfile.read(wav_name)
if x.dtype != np.int16:
logging.warning("wav file format is not 16 bit PCM.")
x = np.array(x, dtype=np.float64)
if args.highpass_cutoff != 0:
x = low_cut_filter(x, fs, cutoff=args.highpass_cutoff)
# check sampling frequency
if not fs == args.fs:
logging.error("sampling frequency is not matched.")
sys.exit(1)
# extract features
x_norm = x / (np.iinfo(np.int16).max + 1)
shiftl = int(args.shiftms * fs * 0.001)
mspc = librosa.feature.melspectrogram(
x_norm, fs,
n_fft=args.fftl,
hop_length=shiftl,
n_mels=args.mspc_dim,
fmin=args.fmin if args.fmin is not None else 0,
fmax=args.fmax if args.fmax is not None else fs // 2,
power=1.0)
mspc = np.log10(np.maximum(EPS, mspc.T))
# save to hdf5
hdf5name = args.hdf5dir + "/" + os.path.basename(wav_name).replace(".wav", ".h5")
write_hdf5(hdf5name, "/melspc", np.float32(mspc))
# overwrite wav file
if args.highpass_cutoff != 0 and args.save_wav:
wavfile.write(args.wavdir + "/" + os.path.basename(wav_name), fs, np.int16(x))
def melcepstrum_extract(wav_list, args):
"""EXTRACT MEL CEPSTRUM."""
# define feature extractor
for i, wav_name in enumerate(wav_list):
logging.info("now processing %s (%d/%d)" % (wav_name, i + 1, len(wav_list)))
# load wavfile and apply low cut filter
fs, x = wavfile.read(wav_name)
if x.dtype != np.int16:
logging.warning("wav file format is not 16 bit PCM.")
x = np.array(x, dtype=np.float64)
if args.highpass_cutoff != 0:
x = low_cut_filter(x, fs, cutoff=args.highpass_cutoff)
# check sampling frequency
if not fs == args.fs:
logging.error("sampling frequency is not matched.")
sys.exit(1)
# extract features
shiftl = int(args.shiftms * fs * 0.001)
mcep = stft_mcep(x, args.fftl, shiftl, args.mcep_dim, args.mcep_alpha)
# save to hdf5
hdf5name = args.hdf5dir + "/" + os.path.basename(wav_name).replace(".wav", ".h5")
write_hdf5(hdf5name, "/mcep", np.float32(mcep))
# overwrite wav file
if args.highpass_cutoff != 0 and args.save_wav:
wavfile.write(args.wavdir + "/" + os.path.basename(wav_name), fs, np.int16(x))
def main():
"""RUN FEATURE EXTRACTION IN PARALLEL."""
parser = argparse.ArgumentParser(
description="making feature file argsurations.")
parser.add_argument(
"--waveforms", default=None,
help="directory or list of filename of input wavfile")
parser.add_argument(
"--hdf5dir", default=None,
help="directory to save hdf5")
parser.add_argument(
"--wavdir", default=None,
help="directory to save of preprocessed wav file")
parser.add_argument(
"--fs", default=16000,
type=int, help="Sampling frequency")
parser.add_argument(
"--shiftms", default=5,
type=float, help="Frame shift in msec")
parser.add_argument(
"--feature_type", default="world", choices=["world", "melspc", "mcep"],
type=str, help="feature type")
parser.add_argument(
"--mspc_dim", default=80,
type=int, help="Dimension of mel spectrogram")
parser.add_argument(
"--minf0", default=40,
type=int, help="minimum f0 for world analysis")
parser.add_argument(
"--maxf0", default=400,
type=int, help="maximum f0 for world analysis")
parser.add_argument(
"--fmin", default=None, nargs="?",
type=int, help="minimum frequency for melspc")
parser.add_argument(
"--fmax", default=None, nargs="?",
type=int, help="maximum frequency for melspc")
parser.add_argument(
"--mcep_dim", default=24,
type=int, help="Dimension of mel cepstrum")
parser.add_argument(
"--mcep_alpha", default=0.41,
type=float, help="Alpha of mel cepstrum")
parser.add_argument(
"--fftl", default=1024,
type=int, help="FFT length")
parser.add_argument(
"--highpass_cutoff", default=70,
type=int, help="Cut off frequency in lowpass filter")
parser.add_argument(
"--save_wav", default=True,
type=strtobool, help="Whether to save filtered wav file")
parser.add_argument(
"--n_jobs", default=10,
type=int, help="number of parallel jobs")
parser.add_argument(
"--verbose", default=1,
type=int, help="log message level")
args = parser.parse_args()
# set log level
if args.verbose == 1:
logging.basicConfig(level=logging.INFO,
format='%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s',
datefmt='%m/%d/%Y %I:%M:%S')
elif args.verbose > 1:
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s',
datefmt='%m/%d/%Y %I:%M:%S')
else:
logging.basicConfig(level=logging.WARNING,
format='%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s',
datefmt='%m/%d/%Y %I:%M:%S')
logging.warning("logging is disabled.")
# show arguments
for key, value in vars(args).items():
logging.info("%s = %s" % (key, str(value)))
# read list
if os.path.isdir(args.waveforms):
file_list = sorted(find_files(args.waveforms, "*.wav"))
else:
file_list = read_txt(args.waveforms)
logging.info("number of utterances = %d" % len(file_list))
# check directory existence
if not os.path.exists(args.wavdir) and args.highpass_cutoff != 0 and args.save_wav:
os.makedirs(args.wavdir)
if not os.path.exists(args.hdf5dir):
os.makedirs(args.hdf5dir)
# divide list
file_lists = np.array_split(file_list, args.n_jobs)
file_lists = [f_list.tolist() for f_list in file_lists]
# multi processing
processes = []
if args.feature_type == "world":
target_fn = world_feature_extract
elif args.feature_type == "melspc":
target_fn = melspectrogram_extract
else:
target_fn = melcepstrum_extract
for f in file_lists:
p = mp.Process(target=target_fn, args=(f, args,))
p.start()
processes.append(p)
# wait for all process
for p in processes:
p.join()
if __name__ == "__main__":
main()
| n_pad = fftl - (len(x) - fftl) % shiftl
x = np.pad(x, (0, n_pad), 'reflect') | conditional_block |
main.py | import hmac
import sqlite3
import urllib2
import time
import datetime
import webapp2
import jinja2
import os
import re
import random
import hashlib
from google.appengine.ext import db
#base template start
template_dir = os.path.join(os.path.dirname(__file__), 'templates')
jinja_env = jinja2.Environment(loader = jinja2.FileSystemLoader(template_dir),
autoescape = True)
def render_str(template, **params):
t = jinja_env.get_template(template)
return t.render(params)
cook=''
class BaseHandler(webapp2.RequestHandler):
def write(self, *a, **kw):
self.response.out.write(*a, **kw)
def render_str(self, template, **params):
# params['user'] = self.user
return render_str(template, **params)
def render(self, template, **kw):
self.write(self.render_str(template, **kw))
def set_cookie(self, name, val):
hashed = make_secure_val(val)
cooked = name +'=' + hashed + ';Path=/'
self.response.headers.add_header('Set-Cookie',str(cooked))
def read_secure_cookie(self, name):
cookie_val = self.request.cookies.get(name)
if cookie_val and check_secure_val(cookie_val):
return cookie_val
def login(self, user):
self.set_cookie('user_id', str(user.key().id()))
def logout(self):
self.response.headers.add_header('Set-Cookie', 'user_id=; Path=/')
def initialize(self, *a, **kw):
webapp2.RequestHandler.initialize(self, *a, **kw)
uid = self.read_secure_cookie('user_id')
if uid != None:
uid = check_secure_val(uid)
if uid != None:
self.username = User.by_id(int(uid)).name
else:
self.username = None
else:
self.username = None
#base template end
secret='hiiis'
letters='abcdefghijklmnopqrstuvwxyz'
#hashing and salt stuff
def make_salt(length = 5):
return ''.join(random.choice(letters) for x in xrange(length))
def make_pw_hash(name, pw, salt = None):
if not salt:
salt = make_salt()
h = hashlib.sha256(name + pw + salt).hexdigest()
return '%s,%s' % (salt, h)
def valid_pw(name, password, h):
salt = h.split(',')[0]
return h == make_pw_hash(name, password, salt)
def make_secure_val(val):
return '%s|%s' % (val, hmac.new(secret, val).hexdigest())
def check_secure_val(secure_val):
val = secure_val.split('|')[0]
if secure_val == make_secure_val(val):
return val
#hashing stuff ends
class User(db.Model):
name = db.StringProperty(required=True)
pw_hash = db.StringProperty(required=True)
email = db.StringProperty()
curr_balance = db.IntegerProperty(required=True)
@classmethod
def by_id(cls, uid):
return User.get_by_id(uid)
@classmethod
def by_name(cls, name):
u = User.all().filter('name =', name).get()
return u
@classmethod
def register(cls, name, pw, email = None):
pw_hash = make_pw_hash(name, pw)
return User(name = name,
pw_hash = pw_hash,
email = email,
curr_balance = 0)
@classmethod
def login(cls, name, pw):
u = cls.by_name(name)
# print(u)
if u and valid_pw(name, pw, u.pw_hash):
return u
# account details and related stuff
#
conn = sqlite3.connect('account.db')
# conn.execute('''CREATE TABLE stocks
# (
# uname TEXT NOT NULL,
# stk_symbl TEXT NOT NULL,
# stk_qty INT NOT NULL,
# stk_price INT NOT NULL,
# time_stamp TIMESTAMP NOT NULL )
# ''')
# conn.execute('''
# CREATE TABLE transactionRequests
# (
# uname TEXT NOT NULL,
# stk_qty INT NOT NULL,
# stk_symbl TEXT NOT NULL,
# stk_price INT NOT NULL,
# time_stamp TIMESTAMP NOT NULL,
# status TEXT,
# PRIMARY KEY(uname, time_stamp))
# ''')
t_now = datetime.datetime.now()
c = conn.cursor()
# inst = "INSERT INTO stocks VALUES ('test','TEST',10,100,?)"
# conn.execute("INSERT INTO stocks VALUES ('test','TEST',10,100,?)", (t_now,))
conn.commit()
c.execute('SELECT * FROM stocks')
print c.fetchall()
#conn.commit()
#functions for basic sign-up
USER_RE = re.compile(r"^[a-zA-Z0-9_-]{3,20}$")
def valid_username(username):
return username and USER_RE.match(username)
PASS_RE = re.compile(r"^.{3,20}$")
def valid_password(password):
|
EMAIL_RE = re.compile(r'^[\S]+@[\S]+\.[\S]+$')
def valid_email(email):
return not email or EMAIL_RE.match(email)
class SignUp(BaseHandler):
def get(self):
if self.username != None:
self.redirect('/welcome')
# if valid_username(self.username):
# self.render('welcome.html', username = self.username)
else:
self.render('signup-form.html')
def post(self):
uname = self.request.get('username')
password = self.request.get('password')
verify = self.request.get('verify')
email = self.request.get('email')
# self.username = uname
# self.password = password
# self.email = email
have_error = False
params = dict(username = uname,
email = email)
if not valid_username(uname):
params['error_username'] = "That's not a valid username."
have_error = True
if not valid_password(password):
params['error_password'] = "That wasn't a valid password."
have_error = True
elif password != verify:
params['error_verify'] = "Your passwords didn't match."
have_error = True
if not valid_email(email):
params['error_email'] = "That's not a valid email."
have_error = True
if have_error:
self.render('signup-form.html', **params)
else:
# cooked = 'name=' + uname + ';Path=/'
# self.response.headers.add_header('Set-Cookie',str(cooked))
# self.set_cook('name', uname)
u = User.by_name(uname)
if u:
msg = 'That user already exists.'
self.render('signup-form.html', error_username = msg)
else:
# cooked = 'name=' + uname + ';Path=/'
# self.response.headers.add_header('Set-Cookie',str(cooked))
# self.set_cookie('name',uname)
u = User.register(uname, password, email)
u.put()
self.login(u)
self.redirect('/welcome')
# self.login(u)
# self.redirect('/blog')
class History(BaseHandler):
def get(self):
if self.username != None:
params = {}
sname = self.request.get('sname')
c.execute("SELECT * from transactionRequests where uname=? and stk_symbl=?",(self.username, sname))
params['stk_arr'] = c.fetchall()
params['sname'] = sname
params['username'] = self.username
u = User.by_name(self.username)
print(u.curr_balance)
params['current_balance'] = u.curr_balance
print(params)
self.render('history.html', **params)
else:
self.redirect('/login')
class Welcome(BaseHandler):
def get(self):
if self.username != None:
params = {}
params['username'] = self.username;
c.execute("SELECT * FROM stocks where uname=?",(self.username,))
dat = c.fetchall()
params['stk_arr'] = dat
u = User.by_name(self.username)
print(u.curr_balance)
params['current_balance'] = u.curr_balance
self.render('welcome.html', **params)
else:
self.redirect('/login')
class Login(BaseHandler):
def get(self):
if self.username != None:
self.redirect('/welcome')
self.render('login-form.html')
def post(self):
uname=self.request.get('username')
passwd=self.request.get('password')
u=User.login(uname, passwd)
if u:
self.login(u)
self.redirect('/welcome')
else:
msg = 'Invalid login'
self.render('login-form.html', error = msg)
class LogOut(BaseHandler):
def get(self):
self.logout()
self.redirect('/login')
class SStock(BaseHandler):
def get(self):
sname = self.request.get('sname')
params ={}
params['stock_name']=sname
u = User.by_name(self.username)
params['current_balance'] = u.curr_balance
params['username'] = self.username
# c.execute("SELECT * FROM stocks where uname=?",(self.username,))
# dat = c.fetchall()
# self.render('stock_buy.html',stk_arr=dat)
c.execute("SELECT * FROM stocks where uname=? and stk_symbl=?",(self.username,sname))
global dat
dat = c.fetchone()
params['stk_arr']=dat
print(dat)
if dat:
params['sell_opt'] = True
self.render('stock_page.html',**params)
def post(self):
global dat
req = self.request.get('req')
sname = self.request.get('sname')
stk_qty = self.request.get('qty')
stk_price = self.request.get('stk_valu')
u = User.by_name(self.username)
print(req)
if(req == 'buy'):
print(req)
t_now = datetime.datetime.now()
tot_cost = int(stk_qty) * float(stk_price)
if(u.curr_balance > tot_cost):
u.curr_balance = int(u.curr_balance - tot_cost)
u.put()
c.execute("SELECT * FROM stocks where uname=? and stk_symbl=?",(self.username,sname))
tmp = c.fetchone()
conn.execute("INSERT INTO transactionRequests VALUES (?,?,?,?,?,?)", (self.username,stk_qty,sname,stk_price,t_now,"BUY--SUCCESS"))
if tmp:
print('in if')
tmp2 = int(tmp[2])
tmp3 = float(tmp[3])
avg = ((tmp2 * tmp3) + int(stk_qty) * float(stk_price)) / (tmp2 + int(stk_qty))
print(tmp2 + int(stk_qty))
print("UPDATE stocks SET stk_qty=? and stk_price=? where stk_symbl=? and uname=?", ((tmp2 + int(stk_qty)), avg, sname, self.username, ))
conn.execute("UPDATE stocks SET stk_qty=?, stk_price=? where uname=? and stk_symbl=?", ((tmp2 + int(stk_qty)), avg, self.username, sname))
conn.commit()
else:
print('hello world')
conn.execute("INSERT INTO stocks VALUES (?,?,?,?,?)", (self.username,sname,stk_qty,stk_price,t_now))
conn.commit()
print('helo transaction done')
# self.write('transaction complete')
# self.render('success.html')
# time.sleep(5)
self.redirect('/welcome')
else:
# self.render('regret.html')
# time.sleep(5)
# self.redirect('/stock_info?sname='+ sname)
conn.execute("INSERT INTO transactionRequests VALUES (?,?,?,?,?,?)", (self.username,stk_qty,sname,stk_price,t_now,"BUY--FAILED"))
conn.commit()
self.redirect('/regret')
# else
if (req == 'sell'):
t_now = datetime.datetime.now()
tot_cost = int(stk_qty) * float(stk_price)
if(int(stk_qty) > dat[2]):
conn.execute("INSERT INTO transactionRequests VALUES (?,?,?,?,?,?)", (self.username,stk_qty,sname,stk_price,t_now,"SELL--FAILED"))
print('naaah boy')
if(int(stk_qty) == int(dat[2])):
print('equal')
conn.execute("INSERT INTO transactionRequests VALUES (?,?,?,?,?,?)", (self.username,stk_qty,sname,stk_price,t_now,"SELL--FAILED"))
conn.execute("DELETE FROM stocks WHERE stk_symbl=?",(sname,))
u.curr_balance = int(u.curr_balance + tot_cost)
u.put()
else:
print('not hi')
conn.execute("INSERT INTO transactionRequests VALUES (?,?,?,?,?,?)", (self.username,stk_qty,sname,stk_price,t_now,"SELL--SUCCESS"))
tmp = int(dat[2]) - int(stk_qty)
u.curr_balance = int(u.curr_balance + tot_cost)
conn.execute("UPDATE stocks SET stk_qty=? where stk_symbl=?",(tmp,sname))
u.put()
conn.commit()
self.redirect('/welcome')
class BuyS(BaseHandler):
def get(self):
c.execute("SELECT * FROM stocks where uname=?",(self.username,))
dat = c.fetchall()
self.render('stock_buy.html',stk_arr=dat)
def post(self):
sname = self.request.get('sname')
stk_qty = self.request.get('qty')
stk_price = 100
t_now = datetime.datetime.now()
conn.execute("INSERT INTO stocks VALUES (?,?,?,?,?)", (self.username,sname,stk_qty,stk_price,t_now))
conn.commit()
self.write('success')
class Regret(BaseHandler):
def get(self):
self.render('regret.html');
app = webapp2.WSGIApplication([
('/sign_up', SignUp),
('/login', Login),
('/logout', LogOut),
('/welcome', Welcome),
('/history', History),
('/stock_info', SStock),
('/buy_stk', BuyS),
('/regret', Regret)
], debug=True)
| return password and PASS_RE.match(password) | identifier_body |
main.py | import hmac
import sqlite3
import urllib2
import time
import datetime
import webapp2
import jinja2
import os
import re
import random
import hashlib
from google.appengine.ext import db
#base template start
template_dir = os.path.join(os.path.dirname(__file__), 'templates')
jinja_env = jinja2.Environment(loader = jinja2.FileSystemLoader(template_dir),
autoescape = True)
def render_str(template, **params):
t = jinja_env.get_template(template)
return t.render(params)
cook=''
class BaseHandler(webapp2.RequestHandler):
def write(self, *a, **kw):
self.response.out.write(*a, **kw)
def render_str(self, template, **params):
# params['user'] = self.user
return render_str(template, **params)
def render(self, template, **kw):
self.write(self.render_str(template, **kw))
def set_cookie(self, name, val):
hashed = make_secure_val(val)
cooked = name +'=' + hashed + ';Path=/'
self.response.headers.add_header('Set-Cookie',str(cooked))
def read_secure_cookie(self, name):
cookie_val = self.request.cookies.get(name)
if cookie_val and check_secure_val(cookie_val):
return cookie_val
def login(self, user):
self.set_cookie('user_id', str(user.key().id()))
def logout(self):
self.response.headers.add_header('Set-Cookie', 'user_id=; Path=/')
def initialize(self, *a, **kw):
webapp2.RequestHandler.initialize(self, *a, **kw)
uid = self.read_secure_cookie('user_id')
if uid != None:
uid = check_secure_val(uid)
if uid != None:
self.username = User.by_id(int(uid)).name
else:
self.username = None
else:
self.username = None
#base template end
secret='hiiis'
letters='abcdefghijklmnopqrstuvwxyz'
#hashing and salt stuff
def make_salt(length = 5):
return ''.join(random.choice(letters) for x in xrange(length))
def make_pw_hash(name, pw, salt = None):
if not salt:
salt = make_salt()
h = hashlib.sha256(name + pw + salt).hexdigest()
return '%s,%s' % (salt, h)
def valid_pw(name, password, h):
salt = h.split(',')[0]
return h == make_pw_hash(name, password, salt)
def make_secure_val(val):
return '%s|%s' % (val, hmac.new(secret, val).hexdigest())
def check_secure_val(secure_val):
val = secure_val.split('|')[0]
if secure_val == make_secure_val(val):
return val
#hashing stuff ends
class User(db.Model):
name = db.StringProperty(required=True)
pw_hash = db.StringProperty(required=True)
email = db.StringProperty()
curr_balance = db.IntegerProperty(required=True)
@classmethod
def by_id(cls, uid):
return User.get_by_id(uid)
@classmethod
def by_name(cls, name):
u = User.all().filter('name =', name).get()
return u
@classmethod
def register(cls, name, pw, email = None):
pw_hash = make_pw_hash(name, pw)
return User(name = name,
pw_hash = pw_hash,
email = email,
curr_balance = 0)
@classmethod
def login(cls, name, pw):
u = cls.by_name(name)
# print(u)
if u and valid_pw(name, pw, u.pw_hash):
return u
# account details and related stuff
#
conn = sqlite3.connect('account.db')
# conn.execute('''CREATE TABLE stocks
# (
# uname TEXT NOT NULL,
# stk_symbl TEXT NOT NULL,
# stk_qty INT NOT NULL,
# stk_price INT NOT NULL,
# time_stamp TIMESTAMP NOT NULL )
# ''')
# conn.execute('''
# CREATE TABLE transactionRequests
# (
# uname TEXT NOT NULL,
# stk_qty INT NOT NULL,
# stk_symbl TEXT NOT NULL,
# stk_price INT NOT NULL,
# time_stamp TIMESTAMP NOT NULL,
# status TEXT,
# PRIMARY KEY(uname, time_stamp))
# ''')
t_now = datetime.datetime.now()
c = conn.cursor()
# inst = "INSERT INTO stocks VALUES ('test','TEST',10,100,?)"
# conn.execute("INSERT INTO stocks VALUES ('test','TEST',10,100,?)", (t_now,))
conn.commit()
c.execute('SELECT * FROM stocks')
print c.fetchall()
#conn.commit()
#functions for basic sign-up
USER_RE = re.compile(r"^[a-zA-Z0-9_-]{3,20}$")
def valid_username(username):
return username and USER_RE.match(username)
PASS_RE = re.compile(r"^.{3,20}$")
def valid_password(password):
return password and PASS_RE.match(password)
EMAIL_RE = re.compile(r'^[\S]+@[\S]+\.[\S]+$')
def valid_email(email):
return not email or EMAIL_RE.match(email)
class SignUp(BaseHandler):
def get(self):
if self.username != None:
self.redirect('/welcome')
# if valid_username(self.username):
# self.render('welcome.html', username = self.username)
else:
self.render('signup-form.html')
def post(self):
uname = self.request.get('username')
password = self.request.get('password')
verify = self.request.get('verify')
email = self.request.get('email')
# self.username = uname
# self.password = password
# self.email = email
have_error = False
params = dict(username = uname,
email = email)
if not valid_username(uname):
params['error_username'] = "That's not a valid username."
have_error = True
if not valid_password(password):
params['error_password'] = "That wasn't a valid password."
have_error = True | if not valid_email(email):
params['error_email'] = "That's not a valid email."
have_error = True
if have_error:
self.render('signup-form.html', **params)
else:
# cooked = 'name=' + uname + ';Path=/'
# self.response.headers.add_header('Set-Cookie',str(cooked))
# self.set_cook('name', uname)
u = User.by_name(uname)
if u:
msg = 'That user already exists.'
self.render('signup-form.html', error_username = msg)
else:
# cooked = 'name=' + uname + ';Path=/'
# self.response.headers.add_header('Set-Cookie',str(cooked))
# self.set_cookie('name',uname)
u = User.register(uname, password, email)
u.put()
self.login(u)
self.redirect('/welcome')
# self.login(u)
# self.redirect('/blog')
class History(BaseHandler):
def get(self):
if self.username != None:
params = {}
sname = self.request.get('sname')
c.execute("SELECT * from transactionRequests where uname=? and stk_symbl=?",(self.username, sname))
params['stk_arr'] = c.fetchall()
params['sname'] = sname
params['username'] = self.username
u = User.by_name(self.username)
print(u.curr_balance)
params['current_balance'] = u.curr_balance
print(params)
self.render('history.html', **params)
else:
self.redirect('/login')
class Welcome(BaseHandler):
def get(self):
if self.username != None:
params = {}
params['username'] = self.username;
c.execute("SELECT * FROM stocks where uname=?",(self.username,))
dat = c.fetchall()
params['stk_arr'] = dat
u = User.by_name(self.username)
print(u.curr_balance)
params['current_balance'] = u.curr_balance
self.render('welcome.html', **params)
else:
self.redirect('/login')
class Login(BaseHandler):
def get(self):
if self.username != None:
self.redirect('/welcome')
self.render('login-form.html')
def post(self):
uname=self.request.get('username')
passwd=self.request.get('password')
u=User.login(uname, passwd)
if u:
self.login(u)
self.redirect('/welcome')
else:
msg = 'Invalid login'
self.render('login-form.html', error = msg)
class LogOut(BaseHandler):
def get(self):
self.logout()
self.redirect('/login')
class SStock(BaseHandler):
def get(self):
sname = self.request.get('sname')
params ={}
params['stock_name']=sname
u = User.by_name(self.username)
params['current_balance'] = u.curr_balance
params['username'] = self.username
# c.execute("SELECT * FROM stocks where uname=?",(self.username,))
# dat = c.fetchall()
# self.render('stock_buy.html',stk_arr=dat)
c.execute("SELECT * FROM stocks where uname=? and stk_symbl=?",(self.username,sname))
global dat
dat = c.fetchone()
params['stk_arr']=dat
print(dat)
if dat:
params['sell_opt'] = True
self.render('stock_page.html',**params)
def post(self):
global dat
req = self.request.get('req')
sname = self.request.get('sname')
stk_qty = self.request.get('qty')
stk_price = self.request.get('stk_valu')
u = User.by_name(self.username)
print(req)
if(req == 'buy'):
print(req)
t_now = datetime.datetime.now()
tot_cost = int(stk_qty) * float(stk_price)
if(u.curr_balance > tot_cost):
u.curr_balance = int(u.curr_balance - tot_cost)
u.put()
c.execute("SELECT * FROM stocks where uname=? and stk_symbl=?",(self.username,sname))
tmp = c.fetchone()
conn.execute("INSERT INTO transactionRequests VALUES (?,?,?,?,?,?)", (self.username,stk_qty,sname,stk_price,t_now,"BUY--SUCCESS"))
if tmp:
print('in if')
tmp2 = int(tmp[2])
tmp3 = float(tmp[3])
avg = ((tmp2 * tmp3) + int(stk_qty) * float(stk_price)) / (tmp2 + int(stk_qty))
print(tmp2 + int(stk_qty))
print("UPDATE stocks SET stk_qty=? and stk_price=? where stk_symbl=? and uname=?", ((tmp2 + int(stk_qty)), avg, sname, self.username, ))
conn.execute("UPDATE stocks SET stk_qty=?, stk_price=? where uname=? and stk_symbl=?", ((tmp2 + int(stk_qty)), avg, self.username, sname))
conn.commit()
else:
print('hello world')
conn.execute("INSERT INTO stocks VALUES (?,?,?,?,?)", (self.username,sname,stk_qty,stk_price,t_now))
conn.commit()
print('helo transaction done')
# self.write('transaction complete')
# self.render('success.html')
# time.sleep(5)
self.redirect('/welcome')
else:
# self.render('regret.html')
# time.sleep(5)
# self.redirect('/stock_info?sname='+ sname)
conn.execute("INSERT INTO transactionRequests VALUES (?,?,?,?,?,?)", (self.username,stk_qty,sname,stk_price,t_now,"BUY--FAILED"))
conn.commit()
self.redirect('/regret')
# else
if (req == 'sell'):
t_now = datetime.datetime.now()
tot_cost = int(stk_qty) * float(stk_price)
if(int(stk_qty) > dat[2]):
conn.execute("INSERT INTO transactionRequests VALUES (?,?,?,?,?,?)", (self.username,stk_qty,sname,stk_price,t_now,"SELL--FAILED"))
print('naaah boy')
if(int(stk_qty) == int(dat[2])):
print('equal')
conn.execute("INSERT INTO transactionRequests VALUES (?,?,?,?,?,?)", (self.username,stk_qty,sname,stk_price,t_now,"SELL--FAILED"))
conn.execute("DELETE FROM stocks WHERE stk_symbl=?",(sname,))
u.curr_balance = int(u.curr_balance + tot_cost)
u.put()
else:
print('not hi')
conn.execute("INSERT INTO transactionRequests VALUES (?,?,?,?,?,?)", (self.username,stk_qty,sname,stk_price,t_now,"SELL--SUCCESS"))
tmp = int(dat[2]) - int(stk_qty)
u.curr_balance = int(u.curr_balance + tot_cost)
conn.execute("UPDATE stocks SET stk_qty=? where stk_symbl=?",(tmp,sname))
u.put()
conn.commit()
self.redirect('/welcome')
class BuyS(BaseHandler):
def get(self):
c.execute("SELECT * FROM stocks where uname=?",(self.username,))
dat = c.fetchall()
self.render('stock_buy.html',stk_arr=dat)
def post(self):
sname = self.request.get('sname')
stk_qty = self.request.get('qty')
stk_price = 100
t_now = datetime.datetime.now()
conn.execute("INSERT INTO stocks VALUES (?,?,?,?,?)", (self.username,sname,stk_qty,stk_price,t_now))
conn.commit()
self.write('success')
class Regret(BaseHandler):
def get(self):
self.render('regret.html');
app = webapp2.WSGIApplication([
('/sign_up', SignUp),
('/login', Login),
('/logout', LogOut),
('/welcome', Welcome),
('/history', History),
('/stock_info', SStock),
('/buy_stk', BuyS),
('/regret', Regret)
], debug=True) | elif password != verify:
params['error_verify'] = "Your passwords didn't match."
have_error = True
| random_line_split |
main.py | import hmac
import sqlite3
import urllib2
import time
import datetime
import webapp2
import jinja2
import os
import re
import random
import hashlib
from google.appengine.ext import db
#base template start
template_dir = os.path.join(os.path.dirname(__file__), 'templates')
jinja_env = jinja2.Environment(loader = jinja2.FileSystemLoader(template_dir),
autoescape = True)
def render_str(template, **params):
t = jinja_env.get_template(template)
return t.render(params)
cook=''
class BaseHandler(webapp2.RequestHandler):
def write(self, *a, **kw):
self.response.out.write(*a, **kw)
def render_str(self, template, **params):
# params['user'] = self.user
return render_str(template, **params)
def render(self, template, **kw):
self.write(self.render_str(template, **kw))
def set_cookie(self, name, val):
hashed = make_secure_val(val)
cooked = name +'=' + hashed + ';Path=/'
self.response.headers.add_header('Set-Cookie',str(cooked))
def read_secure_cookie(self, name):
cookie_val = self.request.cookies.get(name)
if cookie_val and check_secure_val(cookie_val):
return cookie_val
def login(self, user):
self.set_cookie('user_id', str(user.key().id()))
def logout(self):
self.response.headers.add_header('Set-Cookie', 'user_id=; Path=/')
def initialize(self, *a, **kw):
webapp2.RequestHandler.initialize(self, *a, **kw)
uid = self.read_secure_cookie('user_id')
if uid != None:
uid = check_secure_val(uid)
if uid != None:
self.username = User.by_id(int(uid)).name
else:
self.username = None
else:
self.username = None
#base template end
secret='hiiis'
letters='abcdefghijklmnopqrstuvwxyz'
#hashing and salt stuff
def make_salt(length = 5):
return ''.join(random.choice(letters) for x in xrange(length))
def make_pw_hash(name, pw, salt = None):
if not salt:
salt = make_salt()
h = hashlib.sha256(name + pw + salt).hexdigest()
return '%s,%s' % (salt, h)
def valid_pw(name, password, h):
salt = h.split(',')[0]
return h == make_pw_hash(name, password, salt)
def make_secure_val(val):
return '%s|%s' % (val, hmac.new(secret, val).hexdigest())
def check_secure_val(secure_val):
val = secure_val.split('|')[0]
if secure_val == make_secure_val(val):
return val
#hashing stuff ends
class User(db.Model):
name = db.StringProperty(required=True)
pw_hash = db.StringProperty(required=True)
email = db.StringProperty()
curr_balance = db.IntegerProperty(required=True)
@classmethod
def by_id(cls, uid):
return User.get_by_id(uid)
@classmethod
def by_name(cls, name):
u = User.all().filter('name =', name).get()
return u
@classmethod
def register(cls, name, pw, email = None):
pw_hash = make_pw_hash(name, pw)
return User(name = name,
pw_hash = pw_hash,
email = email,
curr_balance = 0)
@classmethod
def login(cls, name, pw):
u = cls.by_name(name)
# print(u)
if u and valid_pw(name, pw, u.pw_hash):
return u
# account details and related stuff
#
conn = sqlite3.connect('account.db')
# conn.execute('''CREATE TABLE stocks
# (
# uname TEXT NOT NULL,
# stk_symbl TEXT NOT NULL,
# stk_qty INT NOT NULL,
# stk_price INT NOT NULL,
# time_stamp TIMESTAMP NOT NULL )
# ''')
# conn.execute('''
# CREATE TABLE transactionRequests
# (
# uname TEXT NOT NULL,
# stk_qty INT NOT NULL,
# stk_symbl TEXT NOT NULL,
# stk_price INT NOT NULL,
# time_stamp TIMESTAMP NOT NULL,
# status TEXT,
# PRIMARY KEY(uname, time_stamp))
# ''')
t_now = datetime.datetime.now()
c = conn.cursor()
# inst = "INSERT INTO stocks VALUES ('test','TEST',10,100,?)"
# conn.execute("INSERT INTO stocks VALUES ('test','TEST',10,100,?)", (t_now,))
conn.commit()
c.execute('SELECT * FROM stocks')
print c.fetchall()
#conn.commit()
#functions for basic sign-up
USER_RE = re.compile(r"^[a-zA-Z0-9_-]{3,20}$")
def valid_username(username):
return username and USER_RE.match(username)
PASS_RE = re.compile(r"^.{3,20}$")
def valid_password(password):
return password and PASS_RE.match(password)
EMAIL_RE = re.compile(r'^[\S]+@[\S]+\.[\S]+$')
def valid_email(email):
return not email or EMAIL_RE.match(email)
class SignUp(BaseHandler):
def get(self):
if self.username != None:
self.redirect('/welcome')
# if valid_username(self.username):
# self.render('welcome.html', username = self.username)
else:
self.render('signup-form.html')
def post(self):
uname = self.request.get('username')
password = self.request.get('password')
verify = self.request.get('verify')
email = self.request.get('email')
# self.username = uname
# self.password = password
# self.email = email
have_error = False
params = dict(username = uname,
email = email)
if not valid_username(uname):
params['error_username'] = "That's not a valid username."
have_error = True
if not valid_password(password):
params['error_password'] = "That wasn't a valid password."
have_error = True
elif password != verify:
params['error_verify'] = "Your passwords didn't match."
have_error = True
if not valid_email(email):
params['error_email'] = "That's not a valid email."
have_error = True
if have_error:
self.render('signup-form.html', **params)
else:
# cooked = 'name=' + uname + ';Path=/'
# self.response.headers.add_header('Set-Cookie',str(cooked))
# self.set_cook('name', uname)
u = User.by_name(uname)
if u:
msg = 'That user already exists.'
self.render('signup-form.html', error_username = msg)
else:
# cooked = 'name=' + uname + ';Path=/'
# self.response.headers.add_header('Set-Cookie',str(cooked))
# self.set_cookie('name',uname)
u = User.register(uname, password, email)
u.put()
self.login(u)
self.redirect('/welcome')
# self.login(u)
# self.redirect('/blog')
class History(BaseHandler):
def get(self):
if self.username != None:
params = {}
sname = self.request.get('sname')
c.execute("SELECT * from transactionRequests where uname=? and stk_symbl=?",(self.username, sname))
params['stk_arr'] = c.fetchall()
params['sname'] = sname
params['username'] = self.username
u = User.by_name(self.username)
print(u.curr_balance)
params['current_balance'] = u.curr_balance
print(params)
self.render('history.html', **params)
else:
self.redirect('/login')
class Welcome(BaseHandler):
def get(self):
if self.username != None:
params = {}
params['username'] = self.username;
c.execute("SELECT * FROM stocks where uname=?",(self.username,))
dat = c.fetchall()
params['stk_arr'] = dat
u = User.by_name(self.username)
print(u.curr_balance)
params['current_balance'] = u.curr_balance
self.render('welcome.html', **params)
else:
self.redirect('/login')
class Login(BaseHandler):
def get(self):
if self.username != None:
self.redirect('/welcome')
self.render('login-form.html')
def post(self):
uname=self.request.get('username')
passwd=self.request.get('password')
u=User.login(uname, passwd)
if u:
self.login(u)
self.redirect('/welcome')
else:
msg = 'Invalid login'
self.render('login-form.html', error = msg)
class LogOut(BaseHandler):
def get(self):
self.logout()
self.redirect('/login')
class SStock(BaseHandler):
def get(self):
sname = self.request.get('sname')
params ={}
params['stock_name']=sname
u = User.by_name(self.username)
params['current_balance'] = u.curr_balance
params['username'] = self.username
# c.execute("SELECT * FROM stocks where uname=?",(self.username,))
# dat = c.fetchall()
# self.render('stock_buy.html',stk_arr=dat)
c.execute("SELECT * FROM stocks where uname=? and stk_symbl=?",(self.username,sname))
global dat
dat = c.fetchone()
params['stk_arr']=dat
print(dat)
if dat:
params['sell_opt'] = True
self.render('stock_page.html',**params)
def post(self):
global dat
req = self.request.get('req')
sname = self.request.get('sname')
stk_qty = self.request.get('qty')
stk_price = self.request.get('stk_valu')
u = User.by_name(self.username)
print(req)
if(req == 'buy'):
print(req)
t_now = datetime.datetime.now()
tot_cost = int(stk_qty) * float(stk_price)
if(u.curr_balance > tot_cost):
u.curr_balance = int(u.curr_balance - tot_cost)
u.put()
c.execute("SELECT * FROM stocks where uname=? and stk_symbl=?",(self.username,sname))
tmp = c.fetchone()
conn.execute("INSERT INTO transactionRequests VALUES (?,?,?,?,?,?)", (self.username,stk_qty,sname,stk_price,t_now,"BUY--SUCCESS"))
if tmp:
print('in if')
tmp2 = int(tmp[2])
tmp3 = float(tmp[3])
avg = ((tmp2 * tmp3) + int(stk_qty) * float(stk_price)) / (tmp2 + int(stk_qty))
print(tmp2 + int(stk_qty))
print("UPDATE stocks SET stk_qty=? and stk_price=? where stk_symbl=? and uname=?", ((tmp2 + int(stk_qty)), avg, sname, self.username, ))
conn.execute("UPDATE stocks SET stk_qty=?, stk_price=? where uname=? and stk_symbl=?", ((tmp2 + int(stk_qty)), avg, self.username, sname))
conn.commit()
else:
print('hello world')
conn.execute("INSERT INTO stocks VALUES (?,?,?,?,?)", (self.username,sname,stk_qty,stk_price,t_now))
conn.commit()
print('helo transaction done')
# self.write('transaction complete')
# self.render('success.html')
# time.sleep(5)
self.redirect('/welcome')
else:
# self.render('regret.html')
# time.sleep(5)
# self.redirect('/stock_info?sname='+ sname)
conn.execute("INSERT INTO transactionRequests VALUES (?,?,?,?,?,?)", (self.username,stk_qty,sname,stk_price,t_now,"BUY--FAILED"))
conn.commit()
self.redirect('/regret')
# else
if (req == 'sell'):
t_now = datetime.datetime.now()
tot_cost = int(stk_qty) * float(stk_price)
if(int(stk_qty) > dat[2]):
|
if(int(stk_qty) == int(dat[2])):
print('equal')
conn.execute("INSERT INTO transactionRequests VALUES (?,?,?,?,?,?)", (self.username,stk_qty,sname,stk_price,t_now,"SELL--FAILED"))
conn.execute("DELETE FROM stocks WHERE stk_symbl=?",(sname,))
u.curr_balance = int(u.curr_balance + tot_cost)
u.put()
else:
print('not hi')
conn.execute("INSERT INTO transactionRequests VALUES (?,?,?,?,?,?)", (self.username,stk_qty,sname,stk_price,t_now,"SELL--SUCCESS"))
tmp = int(dat[2]) - int(stk_qty)
u.curr_balance = int(u.curr_balance + tot_cost)
conn.execute("UPDATE stocks SET stk_qty=? where stk_symbl=?",(tmp,sname))
u.put()
conn.commit()
self.redirect('/welcome')
class BuyS(BaseHandler):
def get(self):
c.execute("SELECT * FROM stocks where uname=?",(self.username,))
dat = c.fetchall()
self.render('stock_buy.html',stk_arr=dat)
def post(self):
sname = self.request.get('sname')
stk_qty = self.request.get('qty')
stk_price = 100
t_now = datetime.datetime.now()
conn.execute("INSERT INTO stocks VALUES (?,?,?,?,?)", (self.username,sname,stk_qty,stk_price,t_now))
conn.commit()
self.write('success')
class Regret(BaseHandler):
def get(self):
self.render('regret.html');
app = webapp2.WSGIApplication([
('/sign_up', SignUp),
('/login', Login),
('/logout', LogOut),
('/welcome', Welcome),
('/history', History),
('/stock_info', SStock),
('/buy_stk', BuyS),
('/regret', Regret)
], debug=True)
| conn.execute("INSERT INTO transactionRequests VALUES (?,?,?,?,?,?)", (self.username,stk_qty,sname,stk_price,t_now,"SELL--FAILED"))
print('naaah boy') | conditional_block |
main.py | import hmac
import sqlite3
import urllib2
import time
import datetime
import webapp2
import jinja2
import os
import re
import random
import hashlib
from google.appengine.ext import db
#base template start
template_dir = os.path.join(os.path.dirname(__file__), 'templates')
jinja_env = jinja2.Environment(loader = jinja2.FileSystemLoader(template_dir),
autoescape = True)
def render_str(template, **params):
t = jinja_env.get_template(template)
return t.render(params)
cook=''
class BaseHandler(webapp2.RequestHandler):
def write(self, *a, **kw):
self.response.out.write(*a, **kw)
def render_str(self, template, **params):
# params['user'] = self.user
return render_str(template, **params)
def render(self, template, **kw):
self.write(self.render_str(template, **kw))
def set_cookie(self, name, val):
hashed = make_secure_val(val)
cooked = name +'=' + hashed + ';Path=/'
self.response.headers.add_header('Set-Cookie',str(cooked))
def read_secure_cookie(self, name):
cookie_val = self.request.cookies.get(name)
if cookie_val and check_secure_val(cookie_val):
return cookie_val
def login(self, user):
self.set_cookie('user_id', str(user.key().id()))
def logout(self):
self.response.headers.add_header('Set-Cookie', 'user_id=; Path=/')
def initialize(self, *a, **kw):
webapp2.RequestHandler.initialize(self, *a, **kw)
uid = self.read_secure_cookie('user_id')
if uid != None:
uid = check_secure_val(uid)
if uid != None:
self.username = User.by_id(int(uid)).name
else:
self.username = None
else:
self.username = None
#base template end
secret='hiiis'
letters='abcdefghijklmnopqrstuvwxyz'
#hashing and salt stuff
def make_salt(length = 5):
return ''.join(random.choice(letters) for x in xrange(length))
def make_pw_hash(name, pw, salt = None):
if not salt:
salt = make_salt()
h = hashlib.sha256(name + pw + salt).hexdigest()
return '%s,%s' % (salt, h)
def valid_pw(name, password, h):
salt = h.split(',')[0]
return h == make_pw_hash(name, password, salt)
def make_secure_val(val):
return '%s|%s' % (val, hmac.new(secret, val).hexdigest())
def check_secure_val(secure_val):
val = secure_val.split('|')[0]
if secure_val == make_secure_val(val):
return val
#hashing stuff ends
class User(db.Model):
name = db.StringProperty(required=True)
pw_hash = db.StringProperty(required=True)
email = db.StringProperty()
curr_balance = db.IntegerProperty(required=True)
@classmethod
def by_id(cls, uid):
return User.get_by_id(uid)
@classmethod
def by_name(cls, name):
u = User.all().filter('name =', name).get()
return u
@classmethod
def register(cls, name, pw, email = None):
pw_hash = make_pw_hash(name, pw)
return User(name = name,
pw_hash = pw_hash,
email = email,
curr_balance = 0)
@classmethod
def login(cls, name, pw):
u = cls.by_name(name)
# print(u)
if u and valid_pw(name, pw, u.pw_hash):
return u
# account details and related stuff
#
conn = sqlite3.connect('account.db')
# conn.execute('''CREATE TABLE stocks
# (
# uname TEXT NOT NULL,
# stk_symbl TEXT NOT NULL,
# stk_qty INT NOT NULL,
# stk_price INT NOT NULL,
# time_stamp TIMESTAMP NOT NULL )
# ''')
# conn.execute('''
# CREATE TABLE transactionRequests
# (
# uname TEXT NOT NULL,
# stk_qty INT NOT NULL,
# stk_symbl TEXT NOT NULL,
# stk_price INT NOT NULL,
# time_stamp TIMESTAMP NOT NULL,
# status TEXT,
# PRIMARY KEY(uname, time_stamp))
# ''')
t_now = datetime.datetime.now()
c = conn.cursor()
# inst = "INSERT INTO stocks VALUES ('test','TEST',10,100,?)"
# conn.execute("INSERT INTO stocks VALUES ('test','TEST',10,100,?)", (t_now,))
conn.commit()
c.execute('SELECT * FROM stocks')
print c.fetchall()
#conn.commit()
#functions for basic sign-up
USER_RE = re.compile(r"^[a-zA-Z0-9_-]{3,20}$")
def valid_username(username):
return username and USER_RE.match(username)
PASS_RE = re.compile(r"^.{3,20}$")
def valid_password(password):
return password and PASS_RE.match(password)
EMAIL_RE = re.compile(r'^[\S]+@[\S]+\.[\S]+$')
def valid_email(email):
return not email or EMAIL_RE.match(email)
class SignUp(BaseHandler):
def get(self):
if self.username != None:
self.redirect('/welcome')
# if valid_username(self.username):
# self.render('welcome.html', username = self.username)
else:
self.render('signup-form.html')
def post(self):
uname = self.request.get('username')
password = self.request.get('password')
verify = self.request.get('verify')
email = self.request.get('email')
# self.username = uname
# self.password = password
# self.email = email
have_error = False
params = dict(username = uname,
email = email)
if not valid_username(uname):
params['error_username'] = "That's not a valid username."
have_error = True
if not valid_password(password):
params['error_password'] = "That wasn't a valid password."
have_error = True
elif password != verify:
params['error_verify'] = "Your passwords didn't match."
have_error = True
if not valid_email(email):
params['error_email'] = "That's not a valid email."
have_error = True
if have_error:
self.render('signup-form.html', **params)
else:
# cooked = 'name=' + uname + ';Path=/'
# self.response.headers.add_header('Set-Cookie',str(cooked))
# self.set_cook('name', uname)
u = User.by_name(uname)
if u:
msg = 'That user already exists.'
self.render('signup-form.html', error_username = msg)
else:
# cooked = 'name=' + uname + ';Path=/'
# self.response.headers.add_header('Set-Cookie',str(cooked))
# self.set_cookie('name',uname)
u = User.register(uname, password, email)
u.put()
self.login(u)
self.redirect('/welcome')
# self.login(u)
# self.redirect('/blog')
class History(BaseHandler):
def get(self):
if self.username != None:
params = {}
sname = self.request.get('sname')
c.execute("SELECT * from transactionRequests where uname=? and stk_symbl=?",(self.username, sname))
params['stk_arr'] = c.fetchall()
params['sname'] = sname
params['username'] = self.username
u = User.by_name(self.username)
print(u.curr_balance)
params['current_balance'] = u.curr_balance
print(params)
self.render('history.html', **params)
else:
self.redirect('/login')
class Welcome(BaseHandler):
def get(self):
if self.username != None:
params = {}
params['username'] = self.username;
c.execute("SELECT * FROM stocks where uname=?",(self.username,))
dat = c.fetchall()
params['stk_arr'] = dat
u = User.by_name(self.username)
print(u.curr_balance)
params['current_balance'] = u.curr_balance
self.render('welcome.html', **params)
else:
self.redirect('/login')
class Login(BaseHandler):
def get(self):
if self.username != None:
self.redirect('/welcome')
self.render('login-form.html')
def | (self):
uname=self.request.get('username')
passwd=self.request.get('password')
u=User.login(uname, passwd)
if u:
self.login(u)
self.redirect('/welcome')
else:
msg = 'Invalid login'
self.render('login-form.html', error = msg)
class LogOut(BaseHandler):
def get(self):
self.logout()
self.redirect('/login')
class SStock(BaseHandler):
def get(self):
sname = self.request.get('sname')
params ={}
params['stock_name']=sname
u = User.by_name(self.username)
params['current_balance'] = u.curr_balance
params['username'] = self.username
# c.execute("SELECT * FROM stocks where uname=?",(self.username,))
# dat = c.fetchall()
# self.render('stock_buy.html',stk_arr=dat)
c.execute("SELECT * FROM stocks where uname=? and stk_symbl=?",(self.username,sname))
global dat
dat = c.fetchone()
params['stk_arr']=dat
print(dat)
if dat:
params['sell_opt'] = True
self.render('stock_page.html',**params)
def post(self):
global dat
req = self.request.get('req')
sname = self.request.get('sname')
stk_qty = self.request.get('qty')
stk_price = self.request.get('stk_valu')
u = User.by_name(self.username)
print(req)
if(req == 'buy'):
print(req)
t_now = datetime.datetime.now()
tot_cost = int(stk_qty) * float(stk_price)
if(u.curr_balance > tot_cost):
u.curr_balance = int(u.curr_balance - tot_cost)
u.put()
c.execute("SELECT * FROM stocks where uname=? and stk_symbl=?",(self.username,sname))
tmp = c.fetchone()
conn.execute("INSERT INTO transactionRequests VALUES (?,?,?,?,?,?)", (self.username,stk_qty,sname,stk_price,t_now,"BUY--SUCCESS"))
if tmp:
print('in if')
tmp2 = int(tmp[2])
tmp3 = float(tmp[3])
avg = ((tmp2 * tmp3) + int(stk_qty) * float(stk_price)) / (tmp2 + int(stk_qty))
print(tmp2 + int(stk_qty))
print("UPDATE stocks SET stk_qty=? and stk_price=? where stk_symbl=? and uname=?", ((tmp2 + int(stk_qty)), avg, sname, self.username, ))
conn.execute("UPDATE stocks SET stk_qty=?, stk_price=? where uname=? and stk_symbl=?", ((tmp2 + int(stk_qty)), avg, self.username, sname))
conn.commit()
else:
print('hello world')
conn.execute("INSERT INTO stocks VALUES (?,?,?,?,?)", (self.username,sname,stk_qty,stk_price,t_now))
conn.commit()
print('helo transaction done')
# self.write('transaction complete')
# self.render('success.html')
# time.sleep(5)
self.redirect('/welcome')
else:
# self.render('regret.html')
# time.sleep(5)
# self.redirect('/stock_info?sname='+ sname)
conn.execute("INSERT INTO transactionRequests VALUES (?,?,?,?,?,?)", (self.username,stk_qty,sname,stk_price,t_now,"BUY--FAILED"))
conn.commit()
self.redirect('/regret')
# else
if (req == 'sell'):
t_now = datetime.datetime.now()
tot_cost = int(stk_qty) * float(stk_price)
if(int(stk_qty) > dat[2]):
conn.execute("INSERT INTO transactionRequests VALUES (?,?,?,?,?,?)", (self.username,stk_qty,sname,stk_price,t_now,"SELL--FAILED"))
print('naaah boy')
if(int(stk_qty) == int(dat[2])):
print('equal')
conn.execute("INSERT INTO transactionRequests VALUES (?,?,?,?,?,?)", (self.username,stk_qty,sname,stk_price,t_now,"SELL--FAILED"))
conn.execute("DELETE FROM stocks WHERE stk_symbl=?",(sname,))
u.curr_balance = int(u.curr_balance + tot_cost)
u.put()
else:
print('not hi')
conn.execute("INSERT INTO transactionRequests VALUES (?,?,?,?,?,?)", (self.username,stk_qty,sname,stk_price,t_now,"SELL--SUCCESS"))
tmp = int(dat[2]) - int(stk_qty)
u.curr_balance = int(u.curr_balance + tot_cost)
conn.execute("UPDATE stocks SET stk_qty=? where stk_symbl=?",(tmp,sname))
u.put()
conn.commit()
self.redirect('/welcome')
class BuyS(BaseHandler):
def get(self):
c.execute("SELECT * FROM stocks where uname=?",(self.username,))
dat = c.fetchall()
self.render('stock_buy.html',stk_arr=dat)
def post(self):
sname = self.request.get('sname')
stk_qty = self.request.get('qty')
stk_price = 100
t_now = datetime.datetime.now()
conn.execute("INSERT INTO stocks VALUES (?,?,?,?,?)", (self.username,sname,stk_qty,stk_price,t_now))
conn.commit()
self.write('success')
class Regret(BaseHandler):
def get(self):
self.render('regret.html');
app = webapp2.WSGIApplication([
('/sign_up', SignUp),
('/login', Login),
('/logout', LogOut),
('/welcome', Welcome),
('/history', History),
('/stock_info', SStock),
('/buy_stk', BuyS),
('/regret', Regret)
], debug=True)
| post | identifier_name |
exception.rs | // Copyright 2020 Datafuse Labs.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![allow(non_snake_case)]
use std::fmt::Debug;
use std::fmt::Display;
use std::fmt::Formatter;
use std::net::AddrParseError;
use std::string::FromUtf8Error;
use std::sync::Arc;
use backtrace::Backtrace;
use thiserror::Error;
use tonic::Code;
use tonic::Status;
pub static ABORT_SESSION: u16 = 42;
pub static ABORT_QUERY: u16 = 43;
#[derive(Clone)]
pub enum ErrorCodeBacktrace {
Serialized(Arc<String>),
Origin(Arc<Backtrace>),
}
impl ToString for ErrorCodeBacktrace {
fn to_string(&self) -> String {
match self {
ErrorCodeBacktrace::Serialized(backtrace) => Arc::as_ref(backtrace).clone(),
ErrorCodeBacktrace::Origin(backtrace) => {
format!("{:?}", backtrace)
}
}
}
}
#[derive(Error)]
pub struct ErrorCode {
code: u16,
display_text: String,
// cause is only used to contain an `anyhow::Error`.
// TODO: remove `cause` when we completely get rid of `anyhow::Error`.
cause: Option<Box<dyn std::error::Error + Sync + Send>>,
backtrace: Option<ErrorCodeBacktrace>,
}
impl ErrorCode {
pub fn code(&self) -> u16 {
self.code
}
pub fn message(&self) -> String {
self.cause
.as_ref()
.map(|cause| format!("{}\n{:?}", self.display_text, cause))
.unwrap_or_else(|| self.display_text.clone())
}
pub fn add_message(self, msg: impl AsRef<str>) -> Self {
Self {
code: self.code(),
display_text: format!("{}\n{}", msg.as_ref(), self.display_text),
cause: self.cause,
backtrace: self.backtrace,
}
}
pub fn add_message_back(self, msg: impl AsRef<str>) -> Self {
Self {
code: self.code(),
display_text: format!("{}{}", self.display_text, msg.as_ref()),
cause: self.cause,
backtrace: self.backtrace,
}
}
pub fn backtrace(&self) -> Option<ErrorCodeBacktrace> {
self.backtrace.clone()
}
pub fn backtrace_str(&self) -> String {
match self.backtrace.as_ref() {
None => "".to_string(),
Some(backtrace) => backtrace.to_string(),
}
}
}
macro_rules! build_exceptions {
($($body:ident($code:expr)),*$(,)*) => {
impl ErrorCode {
$(
pub fn $body(display_text: impl Into<String>) -> ErrorCode {
ErrorCode {
code: $code,
display_text: display_text.into(),
cause: None,
backtrace: Some(ErrorCodeBacktrace::Origin(Arc::new(Backtrace::new()))),
}
}
paste::item! {
pub fn [< $body:snake _ code >] () -> u16{
$code
}
pub fn [< $body Code >] () -> u16{
$code
}
}
)*
}
}
}
build_exceptions! {
Ok(0),
UnknownTypeOfQuery(1),
UnImplement(2),
UnknownDatabase(3),
UnknownSetting(4),
SyntaxException(5),
BadArguments(6),
IllegalDataType(7),
UnknownFunction(8),
IllegalFunctionState(9),
BadDataValueType(10),
UnknownPlan(11),
IllegalPipelineState(12),
BadTransformType(13),
IllegalTransformConnectionState(14),
LogicalError(15),
EmptyData(16),
DataStructMissMatch(17),
BadDataArrayLength(18),
UnknownContextID(19),
UnknownVariable(20),
UnknownTableFunction(21),
BadOption(22),
CannotReadFile(23),
ParquetError(24),
UnknownTable(25),
IllegalAggregateExp(26),
UnknownAggregateFunction(27),
NumberArgumentsNotMatch(28),
NotFoundStream(29),
EmptyDataFromServer(30),
NotFoundLocalNode(31),
PlanScheduleError(32),
BadPlanInputs(33),
DuplicateClusterNode(34),
NotFoundClusterNode(35),
BadAddressFormat(36),
DnsParseError(37),
CannotConnectNode(38),
DuplicateGetStream(39),
Timeout(40),
TooManyUserConnections(41),
AbortedSession(ABORT_SESSION),
AbortedQuery(ABORT_QUERY),
NotFoundSession(44),
CannotListenerPort(45),
BadBytes(46),
InitPrometheusFailure(47),
ScalarSubqueryBadRows(48),
Overflow(49),
InvalidMetaBinaryFormat(50),
AuthenticateFailure(51),
TLSConfigurationFailure(52),
UnknownSession(53),
UnexpectedError(54),
DateTimeParseError(55),
BadPredicateRows(56),
SHA1CheckFailed(57),
// uncategorized
UnexpectedResponseType(600),
UnknownException(1000),
TokioError(1001),
}
// Store errors
build_exceptions! {
FileMetaNotFound(2001),
FileDamaged(2002),
// dfs node errors
UnknownNode(2101),
// meta service errors
// meta service does not work.
MetaServiceError(2201),
// meta service is shut down.
MetaServiceShutdown(2202),
// meta service is unavailable for now.
MetaServiceUnavailable(2203),
// config errors
InvalidConfig(2301),
// meta store errors
MetaStoreDamaged(2401),
MetaStoreAlreadyExists(2402),
MetaStoreNotFound(2403),
ConcurrentSnapshotInstall(2404),
IllegalSnapshot(2405),
UnknownTableId(2406),
TableVersionMissMatch(2407),
// KVSrv server error
KVSrvError(2501),
// FS error
IllegalFileName(2601),
// Store server error
DatabendStoreError(2701),
// TODO
// We may need to separate front-end errors from API errors (and system errors?)
// That may depend which components are using these error codes, and for what purposes,
// let's figure it out latter.
// user-api error codes
UnknownUser(3000),
UserAlreadyExists(3001),
IllegalUserInfoFormat(3002),
// meta-api error codes
DatabaseAlreadyExists(4001),
TableAlreadyExists(4003),
IllegalMetaOperationArgument(4004),
IllegalSchema(4005),
IllegalMetaState(4006),
MetaNodeInternalError(4007),
TruncateTableFailedError(4008),
CommitTableError(4009),
// namespace error.
NamespaceUnknownNode(4058),
NamespaceNodeAlreadyExists(4059),
NamespaceIllegalNodeFormat(4050),
// storage-api error codes
IllegalScanPlan(5000),
ReadFileError(5001),
BrokenChannel(5002),
// kv-api error codes
UnknownKey(6000),
// DAL error
DALTransportError(7000),
UnknownStorageSchemeName(7001),
SecretKeyNotSet(7002),
// datasource error
DuplicatedTableEngineProvider(8000),
UnknownDatabaseEngine(8001),
UnknownTableEngine(8002),
DuplicatedDatabaseEngineProvider(8003),
}
// General errors
build_exceptions! {
// A task that already stopped and can not stop twice.
AlreadyStarted(7101),
// A task that already started and can not start twice.
AlreadyStopped(7102),
// Trying to cast to a invalid type
InvalidCast(7201),
}
pub type Result<T> = std::result::Result<T, ErrorCode>;
impl Debug for ErrorCode {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
write!(
f,
"Code: {}, displayText = {}.",
self.code(),
self.message(),
)?;
match self.backtrace.as_ref() {
None => Ok(()), // no backtrace
Some(backtrace) => {
// TODO: Custom stack frame format for print
match backtrace {
ErrorCodeBacktrace::Origin(backtrace) => write!(f, "\n\n{:?}", backtrace),
ErrorCodeBacktrace::Serialized(backtrace) => write!(f, "\n\n{:?}", backtrace),
}
}
}
}
}
impl Display for ErrorCode {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
write!(
f,
"Code: {}, displayText = {}.",
self.code(),
self.message(),
)
}
}
#[derive(Error)]
enum OtherErrors {
AnyHow { error: anyhow::Error },
}
impl Display for OtherErrors {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
match self {
OtherErrors::AnyHow { error } => write!(f, "{}", error),
}
}
}
impl Debug for OtherErrors {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
match self {
OtherErrors::AnyHow { error } => write!(f, "{:?}", error),
}
}
}
impl From<anyhow::Error> for ErrorCode {
fn from(error: anyhow::Error) -> Self {
ErrorCode {
code: 1002,
display_text: format!("{}, source: {:?}", error, error.source()),
cause: Some(Box::new(OtherErrors::AnyHow { error })),
backtrace: Some(ErrorCodeBacktrace::Origin(Arc::new(Backtrace::new()))),
}
}
}
impl From<std::num::ParseIntError> for ErrorCode {
fn from(error: std::num::ParseIntError) -> Self {
ErrorCode::from_std_error(error)
}
}
impl From<std::num::ParseFloatError> for ErrorCode {
fn from(error: std::num::ParseFloatError) -> Self {
ErrorCode::from_std_error(error)
}
}
impl From<common_arrow::arrow::error::ArrowError> for ErrorCode {
fn from(error: common_arrow::arrow::error::ArrowError) -> Self {
ErrorCode::from_std_error(error)
}
}
impl From<serde_json::Error> for ErrorCode {
fn from(error: serde_json::Error) -> Self {
ErrorCode::from_std_error(error)
}
}
impl From<sqlparser::parser::ParserError> for ErrorCode {
fn from(error: sqlparser::parser::ParserError) -> Self {
ErrorCode::from_std_error(error)
}
}
impl From<std::io::Error> for ErrorCode {
fn from(error: std::io::Error) -> Self {
ErrorCode::from_std_error(error)
}
}
impl From<std::net::AddrParseError> for ErrorCode {
fn from(error: AddrParseError) -> Self {
ErrorCode::BadAddressFormat(format!("Bad address format, cause: {}", error))
}
}
impl From<FromUtf8Error> for ErrorCode {
fn from(error: FromUtf8Error) -> Self {
ErrorCode::BadBytes(format!(
"Bad bytes, cannot parse bytes with UTF8, cause: {}",
error
))
}
}
impl From<prost::EncodeError> for ErrorCode {
fn from(error: prost::EncodeError) -> Self {
ErrorCode::BadBytes(format!(
"Bad bytes, cannot parse bytes with prost, cause: {}", |
impl ErrorCode {
pub fn from_std_error<T: std::error::Error>(error: T) -> Self {
ErrorCode {
code: 1002,
display_text: format!("{}", error),
cause: None,
backtrace: Some(ErrorCodeBacktrace::Origin(Arc::new(Backtrace::new()))),
}
}
pub fn create(
code: u16,
display_text: String,
backtrace: Option<ErrorCodeBacktrace>,
) -> ErrorCode {
ErrorCode {
code,
display_text,
cause: None,
backtrace,
}
}
}
/// Provides the `map_err_to_code` method for `Result`.
///
/// ```
/// use common_exception::ToErrorCode;
/// use common_exception::ErrorCode;
///
/// let x: std::result::Result<(), std::fmt::Error> = Err(std::fmt::Error {});
/// let y: common_exception::Result<()> =
/// x.map_err_to_code(ErrorCode::UnknownException, || 123);
///
/// assert_eq!(
/// "Code: 1000, displayText = 123, cause: an error occurred when formatting an argument.",
/// format!("{}", y.unwrap_err())
/// );
/// ```
pub trait ToErrorCode<T, E, CtxFn>
where E: Display + Send + Sync + 'static
{
/// Wrap the error value with ErrorCode. It is lazily evaluated:
/// only when an error does occur.
///
/// `err_code_fn` is one of the ErrorCode builder function such as `ErrorCode::Ok`.
/// `context_fn` builds display_text for the ErrorCode.
fn map_err_to_code<ErrFn, D>(self, err_code_fn: ErrFn, context_fn: CtxFn) -> Result<T>
where
ErrFn: FnOnce(String) -> ErrorCode,
D: Display,
CtxFn: FnOnce() -> D;
}
impl<T, E, CtxFn> ToErrorCode<T, E, CtxFn> for std::result::Result<T, E>
where E: Display + Send + Sync + 'static
{
fn map_err_to_code<ErrFn, D>(self, make_exception: ErrFn, context_fn: CtxFn) -> Result<T>
where
ErrFn: FnOnce(String) -> ErrorCode,
D: Display,
CtxFn: FnOnce() -> D,
{
self.map_err(|error| {
let err_text = format!("{}, cause: {}", context_fn(), error);
make_exception(err_text)
})
}
}
// === ser/de to/from tonic::Status ===
#[derive(serde::Serialize, serde::Deserialize)]
struct SerializedError {
code: u16,
message: String,
backtrace: String,
}
impl From<&Status> for ErrorCode {
fn from(status: &Status) -> Self {
match status.code() {
tonic::Code::Unknown => {
let details = status.details();
if details.is_empty() {
return ErrorCode::UnknownException(status.message());
}
match serde_json::from_slice::<SerializedError>(details) {
Err(error) => ErrorCode::from(error),
Ok(serialized_error) => match serialized_error.backtrace.len() {
0 => {
ErrorCode::create(serialized_error.code, serialized_error.message, None)
}
_ => ErrorCode::create(
serialized_error.code,
serialized_error.message,
Some(ErrorCodeBacktrace::Serialized(Arc::new(
serialized_error.backtrace,
))),
),
},
}
}
_ => ErrorCode::UnImplement(status.to_string()),
}
}
}
impl From<Status> for ErrorCode {
fn from(status: Status) -> Self {
(&status).into()
}
}
impl From<ErrorCode> for Status {
fn from(err: ErrorCode) -> Self {
let rst_json = serde_json::to_vec::<SerializedError>(&SerializedError {
code: err.code(),
message: err.message(),
backtrace: {
let mut str = err.backtrace_str();
str.truncate(2 * 1024);
str
},
});
match rst_json {
Ok(serialized_error_json) => {
// Code::Internal will be used by h2, if something goes wrong internally.
// To distinguish from that, we use Code::Unknown here
Status::with_details(Code::Unknown, err.message(), serialized_error_json.into())
}
Err(error) => Status::unknown(error.to_string()),
}
}
}
impl Clone for ErrorCode {
fn clone(&self) -> Self {
ErrorCode::create(self.code(), self.message(), self.backtrace())
}
} | error
))
}
} | random_line_split |
exception.rs | // Copyright 2020 Datafuse Labs.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![allow(non_snake_case)]
use std::fmt::Debug;
use std::fmt::Display;
use std::fmt::Formatter;
use std::net::AddrParseError;
use std::string::FromUtf8Error;
use std::sync::Arc;
use backtrace::Backtrace;
use thiserror::Error;
use tonic::Code;
use tonic::Status;
pub static ABORT_SESSION: u16 = 42;
pub static ABORT_QUERY: u16 = 43;
#[derive(Clone)]
pub enum ErrorCodeBacktrace {
Serialized(Arc<String>),
Origin(Arc<Backtrace>),
}
impl ToString for ErrorCodeBacktrace {
fn to_string(&self) -> String {
match self {
ErrorCodeBacktrace::Serialized(backtrace) => Arc::as_ref(backtrace).clone(),
ErrorCodeBacktrace::Origin(backtrace) => {
format!("{:?}", backtrace)
}
}
}
}
#[derive(Error)]
pub struct ErrorCode {
code: u16,
display_text: String,
// cause is only used to contain an `anyhow::Error`.
// TODO: remove `cause` when we completely get rid of `anyhow::Error`.
cause: Option<Box<dyn std::error::Error + Sync + Send>>,
backtrace: Option<ErrorCodeBacktrace>,
}
impl ErrorCode {
pub fn code(&self) -> u16 {
self.code
}
pub fn message(&self) -> String {
self.cause
.as_ref()
.map(|cause| format!("{}\n{:?}", self.display_text, cause))
.unwrap_or_else(|| self.display_text.clone())
}
pub fn add_message(self, msg: impl AsRef<str>) -> Self {
Self {
code: self.code(),
display_text: format!("{}\n{}", msg.as_ref(), self.display_text),
cause: self.cause,
backtrace: self.backtrace,
}
}
pub fn add_message_back(self, msg: impl AsRef<str>) -> Self {
Self {
code: self.code(),
display_text: format!("{}{}", self.display_text, msg.as_ref()),
cause: self.cause,
backtrace: self.backtrace,
}
}
pub fn backtrace(&self) -> Option<ErrorCodeBacktrace> {
self.backtrace.clone()
}
pub fn backtrace_str(&self) -> String {
match self.backtrace.as_ref() {
None => "".to_string(),
Some(backtrace) => backtrace.to_string(),
}
}
}
macro_rules! build_exceptions {
($($body:ident($code:expr)),*$(,)*) => {
impl ErrorCode {
$(
pub fn $body(display_text: impl Into<String>) -> ErrorCode {
ErrorCode {
code: $code,
display_text: display_text.into(),
cause: None,
backtrace: Some(ErrorCodeBacktrace::Origin(Arc::new(Backtrace::new()))),
}
}
paste::item! {
pub fn [< $body:snake _ code >] () -> u16{
$code
}
pub fn [< $body Code >] () -> u16{
$code
}
}
)*
}
}
}
build_exceptions! {
Ok(0),
UnknownTypeOfQuery(1),
UnImplement(2),
UnknownDatabase(3),
UnknownSetting(4),
SyntaxException(5),
BadArguments(6),
IllegalDataType(7),
UnknownFunction(8),
IllegalFunctionState(9),
BadDataValueType(10),
UnknownPlan(11),
IllegalPipelineState(12),
BadTransformType(13),
IllegalTransformConnectionState(14),
LogicalError(15),
EmptyData(16),
DataStructMissMatch(17),
BadDataArrayLength(18),
UnknownContextID(19),
UnknownVariable(20),
UnknownTableFunction(21),
BadOption(22),
CannotReadFile(23),
ParquetError(24),
UnknownTable(25),
IllegalAggregateExp(26),
UnknownAggregateFunction(27),
NumberArgumentsNotMatch(28),
NotFoundStream(29),
EmptyDataFromServer(30),
NotFoundLocalNode(31),
PlanScheduleError(32),
BadPlanInputs(33),
DuplicateClusterNode(34),
NotFoundClusterNode(35),
BadAddressFormat(36),
DnsParseError(37),
CannotConnectNode(38),
DuplicateGetStream(39),
Timeout(40),
TooManyUserConnections(41),
AbortedSession(ABORT_SESSION),
AbortedQuery(ABORT_QUERY),
NotFoundSession(44),
CannotListenerPort(45),
BadBytes(46),
InitPrometheusFailure(47),
ScalarSubqueryBadRows(48),
Overflow(49),
InvalidMetaBinaryFormat(50),
AuthenticateFailure(51),
TLSConfigurationFailure(52),
UnknownSession(53),
UnexpectedError(54),
DateTimeParseError(55),
BadPredicateRows(56),
SHA1CheckFailed(57),
// uncategorized
UnexpectedResponseType(600),
UnknownException(1000),
TokioError(1001),
}
// Store errors
build_exceptions! {
FileMetaNotFound(2001),
FileDamaged(2002),
// dfs node errors
UnknownNode(2101),
// meta service errors
// meta service does not work.
MetaServiceError(2201),
// meta service is shut down.
MetaServiceShutdown(2202),
// meta service is unavailable for now.
MetaServiceUnavailable(2203),
// config errors
InvalidConfig(2301),
// meta store errors
MetaStoreDamaged(2401),
MetaStoreAlreadyExists(2402),
MetaStoreNotFound(2403),
ConcurrentSnapshotInstall(2404),
IllegalSnapshot(2405),
UnknownTableId(2406),
TableVersionMissMatch(2407),
// KVSrv server error
KVSrvError(2501),
// FS error
IllegalFileName(2601),
// Store server error
DatabendStoreError(2701),
// TODO
// We may need to separate front-end errors from API errors (and system errors?)
// That may depend which components are using these error codes, and for what purposes,
// let's figure it out latter.
// user-api error codes
UnknownUser(3000),
UserAlreadyExists(3001),
IllegalUserInfoFormat(3002),
// meta-api error codes
DatabaseAlreadyExists(4001),
TableAlreadyExists(4003),
IllegalMetaOperationArgument(4004),
IllegalSchema(4005),
IllegalMetaState(4006),
MetaNodeInternalError(4007),
TruncateTableFailedError(4008),
CommitTableError(4009),
// namespace error.
NamespaceUnknownNode(4058),
NamespaceNodeAlreadyExists(4059),
NamespaceIllegalNodeFormat(4050),
// storage-api error codes
IllegalScanPlan(5000),
ReadFileError(5001),
BrokenChannel(5002),
// kv-api error codes
UnknownKey(6000),
// DAL error
DALTransportError(7000),
UnknownStorageSchemeName(7001),
SecretKeyNotSet(7002),
// datasource error
DuplicatedTableEngineProvider(8000),
UnknownDatabaseEngine(8001),
UnknownTableEngine(8002),
DuplicatedDatabaseEngineProvider(8003),
}
// General errors
build_exceptions! {
// A task that already stopped and can not stop twice.
AlreadyStarted(7101),
// A task that already started and can not start twice.
AlreadyStopped(7102),
// Trying to cast to a invalid type
InvalidCast(7201),
}
pub type Result<T> = std::result::Result<T, ErrorCode>;
impl Debug for ErrorCode {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
write!(
f,
"Code: {}, displayText = {}.",
self.code(),
self.message(),
)?;
match self.backtrace.as_ref() {
None => Ok(()), // no backtrace
Some(backtrace) => {
// TODO: Custom stack frame format for print
match backtrace {
ErrorCodeBacktrace::Origin(backtrace) => write!(f, "\n\n{:?}", backtrace),
ErrorCodeBacktrace::Serialized(backtrace) => write!(f, "\n\n{:?}", backtrace),
}
}
}
}
}
impl Display for ErrorCode {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
write!(
f,
"Code: {}, displayText = {}.",
self.code(),
self.message(),
)
}
}
#[derive(Error)]
enum OtherErrors {
AnyHow { error: anyhow::Error },
}
impl Display for OtherErrors {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
match self {
OtherErrors::AnyHow { error } => write!(f, "{}", error),
}
}
}
impl Debug for OtherErrors {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
match self {
OtherErrors::AnyHow { error } => write!(f, "{:?}", error),
}
}
}
impl From<anyhow::Error> for ErrorCode {
fn from(error: anyhow::Error) -> Self {
ErrorCode {
code: 1002,
display_text: format!("{}, source: {:?}", error, error.source()),
cause: Some(Box::new(OtherErrors::AnyHow { error })),
backtrace: Some(ErrorCodeBacktrace::Origin(Arc::new(Backtrace::new()))),
}
}
}
impl From<std::num::ParseIntError> for ErrorCode {
fn | (error: std::num::ParseIntError) -> Self {
ErrorCode::from_std_error(error)
}
}
impl From<std::num::ParseFloatError> for ErrorCode {
fn from(error: std::num::ParseFloatError) -> Self {
ErrorCode::from_std_error(error)
}
}
impl From<common_arrow::arrow::error::ArrowError> for ErrorCode {
fn from(error: common_arrow::arrow::error::ArrowError) -> Self {
ErrorCode::from_std_error(error)
}
}
impl From<serde_json::Error> for ErrorCode {
fn from(error: serde_json::Error) -> Self {
ErrorCode::from_std_error(error)
}
}
impl From<sqlparser::parser::ParserError> for ErrorCode {
fn from(error: sqlparser::parser::ParserError) -> Self {
ErrorCode::from_std_error(error)
}
}
impl From<std::io::Error> for ErrorCode {
fn from(error: std::io::Error) -> Self {
ErrorCode::from_std_error(error)
}
}
impl From<std::net::AddrParseError> for ErrorCode {
fn from(error: AddrParseError) -> Self {
ErrorCode::BadAddressFormat(format!("Bad address format, cause: {}", error))
}
}
impl From<FromUtf8Error> for ErrorCode {
fn from(error: FromUtf8Error) -> Self {
ErrorCode::BadBytes(format!(
"Bad bytes, cannot parse bytes with UTF8, cause: {}",
error
))
}
}
impl From<prost::EncodeError> for ErrorCode {
fn from(error: prost::EncodeError) -> Self {
ErrorCode::BadBytes(format!(
"Bad bytes, cannot parse bytes with prost, cause: {}",
error
))
}
}
impl ErrorCode {
pub fn from_std_error<T: std::error::Error>(error: T) -> Self {
ErrorCode {
code: 1002,
display_text: format!("{}", error),
cause: None,
backtrace: Some(ErrorCodeBacktrace::Origin(Arc::new(Backtrace::new()))),
}
}
pub fn create(
code: u16,
display_text: String,
backtrace: Option<ErrorCodeBacktrace>,
) -> ErrorCode {
ErrorCode {
code,
display_text,
cause: None,
backtrace,
}
}
}
/// Provides the `map_err_to_code` method for `Result`.
///
/// ```
/// use common_exception::ToErrorCode;
/// use common_exception::ErrorCode;
///
/// let x: std::result::Result<(), std::fmt::Error> = Err(std::fmt::Error {});
/// let y: common_exception::Result<()> =
/// x.map_err_to_code(ErrorCode::UnknownException, || 123);
///
/// assert_eq!(
/// "Code: 1000, displayText = 123, cause: an error occurred when formatting an argument.",
/// format!("{}", y.unwrap_err())
/// );
/// ```
pub trait ToErrorCode<T, E, CtxFn>
where E: Display + Send + Sync + 'static
{
/// Wrap the error value with ErrorCode. It is lazily evaluated:
/// only when an error does occur.
///
/// `err_code_fn` is one of the ErrorCode builder function such as `ErrorCode::Ok`.
/// `context_fn` builds display_text for the ErrorCode.
fn map_err_to_code<ErrFn, D>(self, err_code_fn: ErrFn, context_fn: CtxFn) -> Result<T>
where
ErrFn: FnOnce(String) -> ErrorCode,
D: Display,
CtxFn: FnOnce() -> D;
}
impl<T, E, CtxFn> ToErrorCode<T, E, CtxFn> for std::result::Result<T, E>
where E: Display + Send + Sync + 'static
{
fn map_err_to_code<ErrFn, D>(self, make_exception: ErrFn, context_fn: CtxFn) -> Result<T>
where
ErrFn: FnOnce(String) -> ErrorCode,
D: Display,
CtxFn: FnOnce() -> D,
{
self.map_err(|error| {
let err_text = format!("{}, cause: {}", context_fn(), error);
make_exception(err_text)
})
}
}
// === ser/de to/from tonic::Status ===
#[derive(serde::Serialize, serde::Deserialize)]
struct SerializedError {
code: u16,
message: String,
backtrace: String,
}
impl From<&Status> for ErrorCode {
fn from(status: &Status) -> Self {
match status.code() {
tonic::Code::Unknown => {
let details = status.details();
if details.is_empty() {
return ErrorCode::UnknownException(status.message());
}
match serde_json::from_slice::<SerializedError>(details) {
Err(error) => ErrorCode::from(error),
Ok(serialized_error) => match serialized_error.backtrace.len() {
0 => {
ErrorCode::create(serialized_error.code, serialized_error.message, None)
}
_ => ErrorCode::create(
serialized_error.code,
serialized_error.message,
Some(ErrorCodeBacktrace::Serialized(Arc::new(
serialized_error.backtrace,
))),
),
},
}
}
_ => ErrorCode::UnImplement(status.to_string()),
}
}
}
impl From<Status> for ErrorCode {
fn from(status: Status) -> Self {
(&status).into()
}
}
impl From<ErrorCode> for Status {
fn from(err: ErrorCode) -> Self {
let rst_json = serde_json::to_vec::<SerializedError>(&SerializedError {
code: err.code(),
message: err.message(),
backtrace: {
let mut str = err.backtrace_str();
str.truncate(2 * 1024);
str
},
});
match rst_json {
Ok(serialized_error_json) => {
// Code::Internal will be used by h2, if something goes wrong internally.
// To distinguish from that, we use Code::Unknown here
Status::with_details(Code::Unknown, err.message(), serialized_error_json.into())
}
Err(error) => Status::unknown(error.to_string()),
}
}
}
impl Clone for ErrorCode {
fn clone(&self) -> Self {
ErrorCode::create(self.code(), self.message(), self.backtrace())
}
}
| from | identifier_name |
exception.rs | // Copyright 2020 Datafuse Labs.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![allow(non_snake_case)]
use std::fmt::Debug;
use std::fmt::Display;
use std::fmt::Formatter;
use std::net::AddrParseError;
use std::string::FromUtf8Error;
use std::sync::Arc;
use backtrace::Backtrace;
use thiserror::Error;
use tonic::Code;
use tonic::Status;
pub static ABORT_SESSION: u16 = 42;
pub static ABORT_QUERY: u16 = 43;
#[derive(Clone)]
pub enum ErrorCodeBacktrace {
Serialized(Arc<String>),
Origin(Arc<Backtrace>),
}
impl ToString for ErrorCodeBacktrace {
fn to_string(&self) -> String {
match self {
ErrorCodeBacktrace::Serialized(backtrace) => Arc::as_ref(backtrace).clone(),
ErrorCodeBacktrace::Origin(backtrace) => {
format!("{:?}", backtrace)
}
}
}
}
#[derive(Error)]
pub struct ErrorCode {
code: u16,
display_text: String,
// cause is only used to contain an `anyhow::Error`.
// TODO: remove `cause` when we completely get rid of `anyhow::Error`.
cause: Option<Box<dyn std::error::Error + Sync + Send>>,
backtrace: Option<ErrorCodeBacktrace>,
}
impl ErrorCode {
pub fn code(&self) -> u16 {
self.code
}
pub fn message(&self) -> String {
self.cause
.as_ref()
.map(|cause| format!("{}\n{:?}", self.display_text, cause))
.unwrap_or_else(|| self.display_text.clone())
}
pub fn add_message(self, msg: impl AsRef<str>) -> Self {
Self {
code: self.code(),
display_text: format!("{}\n{}", msg.as_ref(), self.display_text),
cause: self.cause,
backtrace: self.backtrace,
}
}
pub fn add_message_back(self, msg: impl AsRef<str>) -> Self {
Self {
code: self.code(),
display_text: format!("{}{}", self.display_text, msg.as_ref()),
cause: self.cause,
backtrace: self.backtrace,
}
}
pub fn backtrace(&self) -> Option<ErrorCodeBacktrace> {
self.backtrace.clone()
}
pub fn backtrace_str(&self) -> String {
match self.backtrace.as_ref() {
None => "".to_string(),
Some(backtrace) => backtrace.to_string(),
}
}
}
macro_rules! build_exceptions {
($($body:ident($code:expr)),*$(,)*) => {
impl ErrorCode {
$(
pub fn $body(display_text: impl Into<String>) -> ErrorCode {
ErrorCode {
code: $code,
display_text: display_text.into(),
cause: None,
backtrace: Some(ErrorCodeBacktrace::Origin(Arc::new(Backtrace::new()))),
}
}
paste::item! {
pub fn [< $body:snake _ code >] () -> u16{
$code
}
pub fn [< $body Code >] () -> u16{
$code
}
}
)*
}
}
}
build_exceptions! {
Ok(0),
UnknownTypeOfQuery(1),
UnImplement(2),
UnknownDatabase(3),
UnknownSetting(4),
SyntaxException(5),
BadArguments(6),
IllegalDataType(7),
UnknownFunction(8),
IllegalFunctionState(9),
BadDataValueType(10),
UnknownPlan(11),
IllegalPipelineState(12),
BadTransformType(13),
IllegalTransformConnectionState(14),
LogicalError(15),
EmptyData(16),
DataStructMissMatch(17),
BadDataArrayLength(18),
UnknownContextID(19),
UnknownVariable(20),
UnknownTableFunction(21),
BadOption(22),
CannotReadFile(23),
ParquetError(24),
UnknownTable(25),
IllegalAggregateExp(26),
UnknownAggregateFunction(27),
NumberArgumentsNotMatch(28),
NotFoundStream(29),
EmptyDataFromServer(30),
NotFoundLocalNode(31),
PlanScheduleError(32),
BadPlanInputs(33),
DuplicateClusterNode(34),
NotFoundClusterNode(35),
BadAddressFormat(36),
DnsParseError(37),
CannotConnectNode(38),
DuplicateGetStream(39),
Timeout(40),
TooManyUserConnections(41),
AbortedSession(ABORT_SESSION),
AbortedQuery(ABORT_QUERY),
NotFoundSession(44),
CannotListenerPort(45),
BadBytes(46),
InitPrometheusFailure(47),
ScalarSubqueryBadRows(48),
Overflow(49),
InvalidMetaBinaryFormat(50),
AuthenticateFailure(51),
TLSConfigurationFailure(52),
UnknownSession(53),
UnexpectedError(54),
DateTimeParseError(55),
BadPredicateRows(56),
SHA1CheckFailed(57),
// uncategorized
UnexpectedResponseType(600),
UnknownException(1000),
TokioError(1001),
}
// Store errors
build_exceptions! {
FileMetaNotFound(2001),
FileDamaged(2002),
// dfs node errors
UnknownNode(2101),
// meta service errors
// meta service does not work.
MetaServiceError(2201),
// meta service is shut down.
MetaServiceShutdown(2202),
// meta service is unavailable for now.
MetaServiceUnavailable(2203),
// config errors
InvalidConfig(2301),
// meta store errors
MetaStoreDamaged(2401),
MetaStoreAlreadyExists(2402),
MetaStoreNotFound(2403),
ConcurrentSnapshotInstall(2404),
IllegalSnapshot(2405),
UnknownTableId(2406),
TableVersionMissMatch(2407),
// KVSrv server error
KVSrvError(2501),
// FS error
IllegalFileName(2601),
// Store server error
DatabendStoreError(2701),
// TODO
// We may need to separate front-end errors from API errors (and system errors?)
// That may depend which components are using these error codes, and for what purposes,
// let's figure it out latter.
// user-api error codes
UnknownUser(3000),
UserAlreadyExists(3001),
IllegalUserInfoFormat(3002),
// meta-api error codes
DatabaseAlreadyExists(4001),
TableAlreadyExists(4003),
IllegalMetaOperationArgument(4004),
IllegalSchema(4005),
IllegalMetaState(4006),
MetaNodeInternalError(4007),
TruncateTableFailedError(4008),
CommitTableError(4009),
// namespace error.
NamespaceUnknownNode(4058),
NamespaceNodeAlreadyExists(4059),
NamespaceIllegalNodeFormat(4050),
// storage-api error codes
IllegalScanPlan(5000),
ReadFileError(5001),
BrokenChannel(5002),
// kv-api error codes
UnknownKey(6000),
// DAL error
DALTransportError(7000),
UnknownStorageSchemeName(7001),
SecretKeyNotSet(7002),
// datasource error
DuplicatedTableEngineProvider(8000),
UnknownDatabaseEngine(8001),
UnknownTableEngine(8002),
DuplicatedDatabaseEngineProvider(8003),
}
// General errors
build_exceptions! {
// A task that already stopped and can not stop twice.
AlreadyStarted(7101),
// A task that already started and can not start twice.
AlreadyStopped(7102),
// Trying to cast to a invalid type
InvalidCast(7201),
}
pub type Result<T> = std::result::Result<T, ErrorCode>;
impl Debug for ErrorCode {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
write!(
f,
"Code: {}, displayText = {}.",
self.code(),
self.message(),
)?;
match self.backtrace.as_ref() {
None => Ok(()), // no backtrace
Some(backtrace) => {
// TODO: Custom stack frame format for print
match backtrace {
ErrorCodeBacktrace::Origin(backtrace) => write!(f, "\n\n{:?}", backtrace),
ErrorCodeBacktrace::Serialized(backtrace) => write!(f, "\n\n{:?}", backtrace),
}
}
}
}
}
impl Display for ErrorCode {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
write!(
f,
"Code: {}, displayText = {}.",
self.code(),
self.message(),
)
}
}
#[derive(Error)]
enum OtherErrors {
AnyHow { error: anyhow::Error },
}
impl Display for OtherErrors {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
match self {
OtherErrors::AnyHow { error } => write!(f, "{}", error),
}
}
}
impl Debug for OtherErrors {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
match self {
OtherErrors::AnyHow { error } => write!(f, "{:?}", error),
}
}
}
impl From<anyhow::Error> for ErrorCode {
fn from(error: anyhow::Error) -> Self {
ErrorCode {
code: 1002,
display_text: format!("{}, source: {:?}", error, error.source()),
cause: Some(Box::new(OtherErrors::AnyHow { error })),
backtrace: Some(ErrorCodeBacktrace::Origin(Arc::new(Backtrace::new()))),
}
}
}
impl From<std::num::ParseIntError> for ErrorCode {
fn from(error: std::num::ParseIntError) -> Self |
}
impl From<std::num::ParseFloatError> for ErrorCode {
fn from(error: std::num::ParseFloatError) -> Self {
ErrorCode::from_std_error(error)
}
}
impl From<common_arrow::arrow::error::ArrowError> for ErrorCode {
fn from(error: common_arrow::arrow::error::ArrowError) -> Self {
ErrorCode::from_std_error(error)
}
}
impl From<serde_json::Error> for ErrorCode {
fn from(error: serde_json::Error) -> Self {
ErrorCode::from_std_error(error)
}
}
impl From<sqlparser::parser::ParserError> for ErrorCode {
fn from(error: sqlparser::parser::ParserError) -> Self {
ErrorCode::from_std_error(error)
}
}
impl From<std::io::Error> for ErrorCode {
fn from(error: std::io::Error) -> Self {
ErrorCode::from_std_error(error)
}
}
impl From<std::net::AddrParseError> for ErrorCode {
fn from(error: AddrParseError) -> Self {
ErrorCode::BadAddressFormat(format!("Bad address format, cause: {}", error))
}
}
impl From<FromUtf8Error> for ErrorCode {
fn from(error: FromUtf8Error) -> Self {
ErrorCode::BadBytes(format!(
"Bad bytes, cannot parse bytes with UTF8, cause: {}",
error
))
}
}
impl From<prost::EncodeError> for ErrorCode {
fn from(error: prost::EncodeError) -> Self {
ErrorCode::BadBytes(format!(
"Bad bytes, cannot parse bytes with prost, cause: {}",
error
))
}
}
impl ErrorCode {
pub fn from_std_error<T: std::error::Error>(error: T) -> Self {
ErrorCode {
code: 1002,
display_text: format!("{}", error),
cause: None,
backtrace: Some(ErrorCodeBacktrace::Origin(Arc::new(Backtrace::new()))),
}
}
pub fn create(
code: u16,
display_text: String,
backtrace: Option<ErrorCodeBacktrace>,
) -> ErrorCode {
ErrorCode {
code,
display_text,
cause: None,
backtrace,
}
}
}
/// Provides the `map_err_to_code` method for `Result`.
///
/// ```
/// use common_exception::ToErrorCode;
/// use common_exception::ErrorCode;
///
/// let x: std::result::Result<(), std::fmt::Error> = Err(std::fmt::Error {});
/// let y: common_exception::Result<()> =
/// x.map_err_to_code(ErrorCode::UnknownException, || 123);
///
/// assert_eq!(
/// "Code: 1000, displayText = 123, cause: an error occurred when formatting an argument.",
/// format!("{}", y.unwrap_err())
/// );
/// ```
pub trait ToErrorCode<T, E, CtxFn>
where E: Display + Send + Sync + 'static
{
/// Wrap the error value with ErrorCode. It is lazily evaluated:
/// only when an error does occur.
///
/// `err_code_fn` is one of the ErrorCode builder function such as `ErrorCode::Ok`.
/// `context_fn` builds display_text for the ErrorCode.
fn map_err_to_code<ErrFn, D>(self, err_code_fn: ErrFn, context_fn: CtxFn) -> Result<T>
where
ErrFn: FnOnce(String) -> ErrorCode,
D: Display,
CtxFn: FnOnce() -> D;
}
impl<T, E, CtxFn> ToErrorCode<T, E, CtxFn> for std::result::Result<T, E>
where E: Display + Send + Sync + 'static
{
fn map_err_to_code<ErrFn, D>(self, make_exception: ErrFn, context_fn: CtxFn) -> Result<T>
where
ErrFn: FnOnce(String) -> ErrorCode,
D: Display,
CtxFn: FnOnce() -> D,
{
self.map_err(|error| {
let err_text = format!("{}, cause: {}", context_fn(), error);
make_exception(err_text)
})
}
}
// === ser/de to/from tonic::Status ===
#[derive(serde::Serialize, serde::Deserialize)]
struct SerializedError {
code: u16,
message: String,
backtrace: String,
}
impl From<&Status> for ErrorCode {
fn from(status: &Status) -> Self {
match status.code() {
tonic::Code::Unknown => {
let details = status.details();
if details.is_empty() {
return ErrorCode::UnknownException(status.message());
}
match serde_json::from_slice::<SerializedError>(details) {
Err(error) => ErrorCode::from(error),
Ok(serialized_error) => match serialized_error.backtrace.len() {
0 => {
ErrorCode::create(serialized_error.code, serialized_error.message, None)
}
_ => ErrorCode::create(
serialized_error.code,
serialized_error.message,
Some(ErrorCodeBacktrace::Serialized(Arc::new(
serialized_error.backtrace,
))),
),
},
}
}
_ => ErrorCode::UnImplement(status.to_string()),
}
}
}
impl From<Status> for ErrorCode {
fn from(status: Status) -> Self {
(&status).into()
}
}
impl From<ErrorCode> for Status {
fn from(err: ErrorCode) -> Self {
let rst_json = serde_json::to_vec::<SerializedError>(&SerializedError {
code: err.code(),
message: err.message(),
backtrace: {
let mut str = err.backtrace_str();
str.truncate(2 * 1024);
str
},
});
match rst_json {
Ok(serialized_error_json) => {
// Code::Internal will be used by h2, if something goes wrong internally.
// To distinguish from that, we use Code::Unknown here
Status::with_details(Code::Unknown, err.message(), serialized_error_json.into())
}
Err(error) => Status::unknown(error.to_string()),
}
}
}
impl Clone for ErrorCode {
fn clone(&self) -> Self {
ErrorCode::create(self.code(), self.message(), self.backtrace())
}
}
| {
ErrorCode::from_std_error(error)
} | identifier_body |
mailbox.rs | // use byteorder::{ByteOrder, NativeEndian};
use core::{
convert::TryInto,
fmt, mem, ops, slice,
sync::atomic::{fence, Ordering},
};
use field_offset::offset_of;
use super::mmu::align_up;
const MAIL_BASE: usize = 0xB880;
const MAIL_FULL: u32 = 0x8000_0000;
const MAIL_EMPTY: u32 = 0x4000_0000;
// const MAPPED_REGISTERS_BASE: usize = 0x2000_0000;
const MAPPED_REGISTERS_BASE: usize = 0x3f00_0000;
// const MAPPED_REGISTERS_BASE: usize = 0x7E00_0000;
#[derive(Copy, Clone, Debug)]
struct MailboxRegisterOffsets {
read: u8,
peek: u8,
sender: u8,
status: u8,
config: u8,
write: u8,
}
const MAILBOX_OFFFSETS: MailboxRegisterOffsets = MailboxRegisterOffsets {
read: 0x00,
peek: 0x10,
sender: 0x14,
status: 0x18,
config: 0x1c,
write: 0x20,
};
// MailboxRegisterOffsets {
// read: 0x20,
// peek: 0x30,
// sender: 0x34,
// status: 0x38,
// config: 0x3c,
// write: 0x40,
// },
// ];
#[inline]
unsafe fn read_reg(base: usize, offset: u8) -> u32 {
((MAPPED_REGISTERS_BASE + base + offset as usize) as *const u32).read_volatile()
}
#[inline]
unsafe fn | (base: usize, offset: u8, value: u32) {
((MAPPED_REGISTERS_BASE + base + offset as usize) as *mut u32).write_volatile(value)
}
unsafe fn read_mailbox(channel: u8) -> u32 {
// 1. Read the status register until the empty flag is not set.
// 2. Read data from the read register.
// 3. If the lower four bits do not match the channel number desired repeat
// from 1.
// 4. The upper 28 bits are the returned data.
// Wait for the mailbox to be non-empty
// Execute a memory barrier
// Read MAIL0_STATUS
// Goto step 1 if MAIL_EMPTY bit is set
// Execute a memory barrier
// Read from MAIL0_READ
// Check the channel (lowest 4 bits) of the read value for the correct channel
// If the channel is not the one we wish to read from (i.e: 1), go to step 1
// Return the data (i.e: the read value >> 4)
// println!("Reading mailbox (want channel {})", channel);
let mut limit = 10;
loop {
let mut empty_limit = 10;
loop {
fence(Ordering::SeqCst);
if read_reg(MAIL_BASE, MAILBOX_OFFFSETS.status) & MAIL_EMPTY == 0 {
break;
}
if empty_limit == 0 {
panic!(
"Gave up waiting for mail when reading from mailbox (channel {})",
channel
);
}
empty_limit -= 1;
}
fence(Ordering::SeqCst);
let data: u32 = read_reg(MAIL_BASE, MAILBOX_OFFFSETS.read);
let read_channel = (data & 0x0F) as u8;
let data = data >> 4;
// println!(
// "Got data from mailbox: {:#8x} (from channel {})",
// data, read_channel
// );
if read_channel != channel {
// println!("Wrong channel, trying again...");
if limit == 0 {
panic!(
"Got trampled too many times when reading from mailbox (channel {})",
channel
);
}
limit -= 1;
continue;
}
return data;
}
}
unsafe fn write_mailbox(channel: u8, data: u32) {
// 1. Read the status register until the full flag is not set.
// 2. Write the data (shifted into the upper 28 bits) combined with the
// channel (in the lower four bits) to the write register.
// println!("Writing {:#8x} to mailbox channel {}", data, channel);
let mut limit = 10;
loop {
// Wait for space
fence(Ordering::SeqCst);
if read_reg(MAIL_BASE, MAILBOX_OFFFSETS.status + 0x20) & MAIL_FULL == 0 {
break;
}
if limit == 0 {
panic!(
"Gave up waiting for space to write to mailbox (channel {}, data: 0x{:08x})",
channel, data
);
}
limit -= 1;
}
write_reg(MAIL_BASE, MAILBOX_OFFFSETS.write, data | (channel as u32));
fence(Ordering::SeqCst);
// println!("Finished writing to mailbox");
}
pub trait PropertyTagList: Sized {
fn prepare(self) -> PropertyMessageWrapper<Self> {
PropertyMessageWrapper::new(self)
}
}
macro_rules! impl_ptl {
( $( $t:ident ),+ ) => {
impl< $($t),+ > PropertyTagList for ( $(PropertyMessage< $t >, )+ )
where $(
$t: Sized,
)+ {}
};
}
impl<T: Sized> PropertyTagList for PropertyMessage<T> {}
// impl_ptl!(T1);
// impl_ptl!(T1, T2);
// impl_ptl!(T1, T2, T3);
// impl_ptl!(T1, T2, T3, T4);
// impl_ptl!(T1, T2, T3, T4, T5);
// impl_ptl!(T1, T2, T3, T4, T5, T6);
// impl_ptl!(T1, T2, T3, T4, T5, T6, T7);
// impl_ptl!(T1, T2, T3, T4, T5, T6, T7, T8);
// impl_ptl!(T1, T2, T3, T4, T5, T6, T7, T8, T9);
// impl_ptl!(T1, T2, T3, T4, T5, T6, T7, T8, T9, T10);
#[repr(C, align(16))]
#[derive(Debug)]
pub struct PropertyMessageWrapper<TL: PropertyTagList> {
buffer_size: u32,
code: u32,
tags: TL,
// extra: [u32; 10],
}
impl<TL: PropertyTagList> PropertyMessageWrapper<TL> {
#[inline]
fn new(tags: TL) -> Self {
// let extra_offset = offset_of!(Self => extra).get_byte_offset();
// assert!(extra_offset % 4 == 0);
let buffer_size = core::mem::size_of::<Self>();
PropertyMessageWrapper {
buffer_size: buffer_size
.try_into()
.expect("Property message list size in bytes is too big to fit in a u32"),
code: 0x0000_0000,
tags,
// extra: [0; 10],
}
}
fn as_quads(&self) -> &[u32] {
let size_bytes = mem::size_of::<Self>();
debug_assert_eq!(size_bytes % 4, 0);
let u32_size: usize = size_bytes / 4;
unsafe { slice::from_raw_parts((self as *const Self) as *const u32, u32_size) }
}
pub fn send<'a>(&'a mut self) -> Option<&'a TL>
where
TL: fmt::Debug,
{
// println!("Property message before sending over mailbox: {:#x?}", self);
// println!(
// "Property message quads before sending over mailbox: {:#x?}",
// self.as_quads()
// );
const CHANNEL: u8 = Channel::PropertyTagsSend as u8;
println!("sending message {:x?}", self);
unsafe {
let ptr = self as *const Self;
let addr = ptr as usize;
write_mailbox(CHANNEL, addr.try_into().ok()?);
let resp_addr = read_mailbox(CHANNEL);
}
// let resp_ptr = resp_addr as *const u32;
// println!("Got response from mailbox: {:#?}", &*resp_ptr);
// let resp_code: u32 = *resp_ptr.offset(1);
// println!(
// "Property message after response {:#8x}: {:#x?}",
// resp_addr, self
// );
// {
// let message_quads = self.as_quads();
// println!("Property message words: {:#x?}", message_quads);
// }
if self.code != 0x8000_0000 {
return None;
}
// let msg_ptr = resp_ptr.offset(2);
// let value_buffer_size_ptr = msg_ptr.offset(1);
// let value_buffer_size = (*value_buffer_size_ptr) as usize;
// let value_buffer_ptr = msg_ptr.offset(3) as *const T;
// assert_eq!(value_buffer_size, mem::size_of::<T>());
// let value_ref = &*(value_buffer_ptr as *const T);
// Some(value_ref)
println!("received message: {:#x?}", self);
Some(&self.tags)
}
}
impl<TL: PropertyTagList> ops::Deref for PropertyMessageWrapper<TL> {
type Target = TL;
fn deref(&self) -> &TL {
&self.tags
}
}
impl<TL: PropertyTagList> ops::DerefMut for PropertyMessageWrapper<TL> {
fn deref_mut(&mut self) -> &mut TL {
&mut self.tags
}
}
#[repr(C, align(4))]
#[derive(Debug)]
pub struct PropertyMessage<T> {
tag: u32,
buffer_size: u32,
code: u32,
buffer: T,
}
impl<T> ops::Deref for PropertyMessage<T> {
type Target = T;
fn deref(&self) -> &T {
&self.buffer
}
}
impl<T> ops::DerefMut for PropertyMessage<T> {
fn deref_mut(&mut self) -> &mut T {
&mut self.buffer
}
}
impl<T: Sized> From<(u32, T)> for PropertyMessage<T> {
fn from((tag, buffer): (u32, T)) -> PropertyMessage<T> {
PropertyMessage::new(tag, buffer)
}
}
impl<T> PropertyMessage<T> {
pub fn new(tag: u32, buffer: T) -> Self {
let buffer_size = align_up(mem::size_of::<T>(), 2)
.try_into()
.expect("Property message size is too big to fit in a u32");
let msg = PropertyMessage {
tag,
buffer_size,
code: 0,
buffer,
};
let tag_offset = offset_of!(Self => tag);
assert_eq!(tag_offset.get_byte_offset(), 0);
let size_offset = offset_of!(Self => buffer_size);
assert_eq!(size_offset.get_byte_offset(), 4);
let code_offset = offset_of!(Self => code);
assert_eq!(code_offset.get_byte_offset(), 8);
let buffer_offset = offset_of!(Self => buffer);
assert_eq!(buffer_offset.get_byte_offset(), 12);
msg
}
pub fn value(&self) -> &T {
&self.buffer
}
}
// impl<T: fmt::Debug> PropertyMessage<T> {
// pub fn new(tag: u32, buffer: T) -> Self {
// PropertyMessage {
// }
// }
// }
pub fn send_raw_message<T: fmt::Debug>(channel: Channel, msg: &mut T) -> Result<u32, ()> {
let resp: u32;
let msg_ptr = msg as *mut T;
let msg_addr_usize = msg_ptr as usize;
let msg_addr_u32 = msg_addr_usize.try_into().map_err(|_| ())?;
unsafe {
write_mailbox(channel as u8, msg_addr_u32);
resp = read_mailbox(channel as u8);
}
// println!(
// "Got response {:#8x} after raw message send: {:#x?}",
// resp, msg
// );
Ok(resp)
}
#[repr(u8)]
#[derive(Copy, Clone, Debug)]
pub enum Channel {
Power = 0,
Framebuffer = 1,
VirtualUART = 2,
VCHIQ = 3,
LEDs = 4,
Buttons = 5,
TouchScreen = 6,
Unknown7 = 7,
PropertyTagsSend = 8,
PropertyTagsReceive = 9,
}
| write_reg | identifier_name |
mailbox.rs | // use byteorder::{ByteOrder, NativeEndian};
use core::{
convert::TryInto,
fmt, mem, ops, slice,
sync::atomic::{fence, Ordering},
};
use field_offset::offset_of;
use super::mmu::align_up;
const MAIL_BASE: usize = 0xB880;
const MAIL_FULL: u32 = 0x8000_0000;
const MAIL_EMPTY: u32 = 0x4000_0000;
// const MAPPED_REGISTERS_BASE: usize = 0x2000_0000;
const MAPPED_REGISTERS_BASE: usize = 0x3f00_0000;
// const MAPPED_REGISTERS_BASE: usize = 0x7E00_0000;
#[derive(Copy, Clone, Debug)]
struct MailboxRegisterOffsets {
read: u8,
peek: u8,
sender: u8,
status: u8,
config: u8,
write: u8,
}
const MAILBOX_OFFFSETS: MailboxRegisterOffsets = MailboxRegisterOffsets {
read: 0x00,
peek: 0x10,
sender: 0x14,
status: 0x18,
config: 0x1c,
write: 0x20,
};
// MailboxRegisterOffsets {
// read: 0x20,
// peek: 0x30,
// sender: 0x34,
// status: 0x38,
// config: 0x3c,
// write: 0x40,
// },
// ];
#[inline]
unsafe fn read_reg(base: usize, offset: u8) -> u32 {
((MAPPED_REGISTERS_BASE + base + offset as usize) as *const u32).read_volatile()
}
#[inline]
unsafe fn write_reg(base: usize, offset: u8, value: u32) {
((MAPPED_REGISTERS_BASE + base + offset as usize) as *mut u32).write_volatile(value)
}
unsafe fn read_mailbox(channel: u8) -> u32 {
// 1. Read the status register until the empty flag is not set.
// 2. Read data from the read register.
// 3. If the lower four bits do not match the channel number desired repeat
// from 1.
// 4. The upper 28 bits are the returned data.
// Wait for the mailbox to be non-empty
// Execute a memory barrier
// Read MAIL0_STATUS
// Goto step 1 if MAIL_EMPTY bit is set
// Execute a memory barrier
// Read from MAIL0_READ
// Check the channel (lowest 4 bits) of the read value for the correct channel
// If the channel is not the one we wish to read from (i.e: 1), go to step 1
// Return the data (i.e: the read value >> 4)
// println!("Reading mailbox (want channel {})", channel);
let mut limit = 10;
loop {
let mut empty_limit = 10;
loop {
fence(Ordering::SeqCst);
if read_reg(MAIL_BASE, MAILBOX_OFFFSETS.status) & MAIL_EMPTY == 0 {
break;
}
if empty_limit == 0 {
panic!(
"Gave up waiting for mail when reading from mailbox (channel {})",
channel
);
}
empty_limit -= 1;
}
fence(Ordering::SeqCst);
let data: u32 = read_reg(MAIL_BASE, MAILBOX_OFFFSETS.read);
let read_channel = (data & 0x0F) as u8;
let data = data >> 4;
// println!(
// "Got data from mailbox: {:#8x} (from channel {})",
// data, read_channel
// );
if read_channel != channel {
// println!("Wrong channel, trying again...");
if limit == 0 {
panic!(
"Got trampled too many times when reading from mailbox (channel {})",
channel
);
}
limit -= 1;
continue;
}
return data;
}
}
unsafe fn write_mailbox(channel: u8, data: u32) {
// 1. Read the status register until the full flag is not set.
// 2. Write the data (shifted into the upper 28 bits) combined with the
// channel (in the lower four bits) to the write register.
// println!("Writing {:#8x} to mailbox channel {}", data, channel);
let mut limit = 10;
loop {
// Wait for space
fence(Ordering::SeqCst);
if read_reg(MAIL_BASE, MAILBOX_OFFFSETS.status + 0x20) & MAIL_FULL == 0 {
break;
}
if limit == 0 |
limit -= 1;
}
write_reg(MAIL_BASE, MAILBOX_OFFFSETS.write, data | (channel as u32));
fence(Ordering::SeqCst);
// println!("Finished writing to mailbox");
}
pub trait PropertyTagList: Sized {
fn prepare(self) -> PropertyMessageWrapper<Self> {
PropertyMessageWrapper::new(self)
}
}
macro_rules! impl_ptl {
( $( $t:ident ),+ ) => {
impl< $($t),+ > PropertyTagList for ( $(PropertyMessage< $t >, )+ )
where $(
$t: Sized,
)+ {}
};
}
impl<T: Sized> PropertyTagList for PropertyMessage<T> {}
// impl_ptl!(T1);
// impl_ptl!(T1, T2);
// impl_ptl!(T1, T2, T3);
// impl_ptl!(T1, T2, T3, T4);
// impl_ptl!(T1, T2, T3, T4, T5);
// impl_ptl!(T1, T2, T3, T4, T5, T6);
// impl_ptl!(T1, T2, T3, T4, T5, T6, T7);
// impl_ptl!(T1, T2, T3, T4, T5, T6, T7, T8);
// impl_ptl!(T1, T2, T3, T4, T5, T6, T7, T8, T9);
// impl_ptl!(T1, T2, T3, T4, T5, T6, T7, T8, T9, T10);
#[repr(C, align(16))]
#[derive(Debug)]
pub struct PropertyMessageWrapper<TL: PropertyTagList> {
buffer_size: u32,
code: u32,
tags: TL,
// extra: [u32; 10],
}
impl<TL: PropertyTagList> PropertyMessageWrapper<TL> {
#[inline]
fn new(tags: TL) -> Self {
// let extra_offset = offset_of!(Self => extra).get_byte_offset();
// assert!(extra_offset % 4 == 0);
let buffer_size = core::mem::size_of::<Self>();
PropertyMessageWrapper {
buffer_size: buffer_size
.try_into()
.expect("Property message list size in bytes is too big to fit in a u32"),
code: 0x0000_0000,
tags,
// extra: [0; 10],
}
}
fn as_quads(&self) -> &[u32] {
let size_bytes = mem::size_of::<Self>();
debug_assert_eq!(size_bytes % 4, 0);
let u32_size: usize = size_bytes / 4;
unsafe { slice::from_raw_parts((self as *const Self) as *const u32, u32_size) }
}
pub fn send<'a>(&'a mut self) -> Option<&'a TL>
where
TL: fmt::Debug,
{
// println!("Property message before sending over mailbox: {:#x?}", self);
// println!(
// "Property message quads before sending over mailbox: {:#x?}",
// self.as_quads()
// );
const CHANNEL: u8 = Channel::PropertyTagsSend as u8;
println!("sending message {:x?}", self);
unsafe {
let ptr = self as *const Self;
let addr = ptr as usize;
write_mailbox(CHANNEL, addr.try_into().ok()?);
let resp_addr = read_mailbox(CHANNEL);
}
// let resp_ptr = resp_addr as *const u32;
// println!("Got response from mailbox: {:#?}", &*resp_ptr);
// let resp_code: u32 = *resp_ptr.offset(1);
// println!(
// "Property message after response {:#8x}: {:#x?}",
// resp_addr, self
// );
// {
// let message_quads = self.as_quads();
// println!("Property message words: {:#x?}", message_quads);
// }
if self.code != 0x8000_0000 {
return None;
}
// let msg_ptr = resp_ptr.offset(2);
// let value_buffer_size_ptr = msg_ptr.offset(1);
// let value_buffer_size = (*value_buffer_size_ptr) as usize;
// let value_buffer_ptr = msg_ptr.offset(3) as *const T;
// assert_eq!(value_buffer_size, mem::size_of::<T>());
// let value_ref = &*(value_buffer_ptr as *const T);
// Some(value_ref)
println!("received message: {:#x?}", self);
Some(&self.tags)
}
}
impl<TL: PropertyTagList> ops::Deref for PropertyMessageWrapper<TL> {
type Target = TL;
fn deref(&self) -> &TL {
&self.tags
}
}
impl<TL: PropertyTagList> ops::DerefMut for PropertyMessageWrapper<TL> {
fn deref_mut(&mut self) -> &mut TL {
&mut self.tags
}
}
#[repr(C, align(4))]
#[derive(Debug)]
pub struct PropertyMessage<T> {
tag: u32,
buffer_size: u32,
code: u32,
buffer: T,
}
impl<T> ops::Deref for PropertyMessage<T> {
type Target = T;
fn deref(&self) -> &T {
&self.buffer
}
}
impl<T> ops::DerefMut for PropertyMessage<T> {
fn deref_mut(&mut self) -> &mut T {
&mut self.buffer
}
}
impl<T: Sized> From<(u32, T)> for PropertyMessage<T> {
fn from((tag, buffer): (u32, T)) -> PropertyMessage<T> {
PropertyMessage::new(tag, buffer)
}
}
impl<T> PropertyMessage<T> {
pub fn new(tag: u32, buffer: T) -> Self {
let buffer_size = align_up(mem::size_of::<T>(), 2)
.try_into()
.expect("Property message size is too big to fit in a u32");
let msg = PropertyMessage {
tag,
buffer_size,
code: 0,
buffer,
};
let tag_offset = offset_of!(Self => tag);
assert_eq!(tag_offset.get_byte_offset(), 0);
let size_offset = offset_of!(Self => buffer_size);
assert_eq!(size_offset.get_byte_offset(), 4);
let code_offset = offset_of!(Self => code);
assert_eq!(code_offset.get_byte_offset(), 8);
let buffer_offset = offset_of!(Self => buffer);
assert_eq!(buffer_offset.get_byte_offset(), 12);
msg
}
pub fn value(&self) -> &T {
&self.buffer
}
}
// impl<T: fmt::Debug> PropertyMessage<T> {
// pub fn new(tag: u32, buffer: T) -> Self {
// PropertyMessage {
// }
// }
// }
pub fn send_raw_message<T: fmt::Debug>(channel: Channel, msg: &mut T) -> Result<u32, ()> {
let resp: u32;
let msg_ptr = msg as *mut T;
let msg_addr_usize = msg_ptr as usize;
let msg_addr_u32 = msg_addr_usize.try_into().map_err(|_| ())?;
unsafe {
write_mailbox(channel as u8, msg_addr_u32);
resp = read_mailbox(channel as u8);
}
// println!(
// "Got response {:#8x} after raw message send: {:#x?}",
// resp, msg
// );
Ok(resp)
}
#[repr(u8)]
#[derive(Copy, Clone, Debug)]
pub enum Channel {
Power = 0,
Framebuffer = 1,
VirtualUART = 2,
VCHIQ = 3,
LEDs = 4,
Buttons = 5,
TouchScreen = 6,
Unknown7 = 7,
PropertyTagsSend = 8,
PropertyTagsReceive = 9,
}
| {
panic!(
"Gave up waiting for space to write to mailbox (channel {}, data: 0x{:08x})",
channel, data
);
} | conditional_block |
mailbox.rs | // use byteorder::{ByteOrder, NativeEndian};
use core::{
convert::TryInto,
fmt, mem, ops, slice,
sync::atomic::{fence, Ordering},
};
use field_offset::offset_of;
use super::mmu::align_up;
const MAIL_BASE: usize = 0xB880;
const MAIL_FULL: u32 = 0x8000_0000;
const MAIL_EMPTY: u32 = 0x4000_0000;
// const MAPPED_REGISTERS_BASE: usize = 0x2000_0000;
const MAPPED_REGISTERS_BASE: usize = 0x3f00_0000;
// const MAPPED_REGISTERS_BASE: usize = 0x7E00_0000;
#[derive(Copy, Clone, Debug)]
struct MailboxRegisterOffsets {
read: u8,
peek: u8,
sender: u8,
status: u8,
config: u8,
write: u8,
}
const MAILBOX_OFFFSETS: MailboxRegisterOffsets = MailboxRegisterOffsets {
read: 0x00,
peek: 0x10,
sender: 0x14,
status: 0x18,
config: 0x1c,
write: 0x20,
};
// MailboxRegisterOffsets {
// read: 0x20,
// peek: 0x30,
// sender: 0x34,
// status: 0x38,
// config: 0x3c,
// write: 0x40,
// },
// ];
#[inline]
unsafe fn read_reg(base: usize, offset: u8) -> u32 {
((MAPPED_REGISTERS_BASE + base + offset as usize) as *const u32).read_volatile()
}
#[inline]
unsafe fn write_reg(base: usize, offset: u8, value: u32) {
((MAPPED_REGISTERS_BASE + base + offset as usize) as *mut u32).write_volatile(value)
}
unsafe fn read_mailbox(channel: u8) -> u32 {
// 1. Read the status register until the empty flag is not set.
// 2. Read data from the read register.
// 3. If the lower four bits do not match the channel number desired repeat
// from 1.
// 4. The upper 28 bits are the returned data.
// Wait for the mailbox to be non-empty
// Execute a memory barrier
// Read MAIL0_STATUS
// Goto step 1 if MAIL_EMPTY bit is set
// Execute a memory barrier
// Read from MAIL0_READ
// Check the channel (lowest 4 bits) of the read value for the correct channel
// If the channel is not the one we wish to read from (i.e: 1), go to step 1
// Return the data (i.e: the read value >> 4)
// println!("Reading mailbox (want channel {})", channel);
let mut limit = 10;
loop {
let mut empty_limit = 10;
loop {
fence(Ordering::SeqCst);
if read_reg(MAIL_BASE, MAILBOX_OFFFSETS.status) & MAIL_EMPTY == 0 {
break;
}
if empty_limit == 0 {
panic!(
"Gave up waiting for mail when reading from mailbox (channel {})",
channel
);
}
empty_limit -= 1;
}
fence(Ordering::SeqCst);
let data: u32 = read_reg(MAIL_BASE, MAILBOX_OFFFSETS.read);
let read_channel = (data & 0x0F) as u8;
let data = data >> 4;
// println!(
// "Got data from mailbox: {:#8x} (from channel {})",
// data, read_channel
// );
if read_channel != channel {
// println!("Wrong channel, trying again...");
if limit == 0 {
panic!(
"Got trampled too many times when reading from mailbox (channel {})",
channel
);
}
limit -= 1;
continue;
}
return data;
}
}
unsafe fn write_mailbox(channel: u8, data: u32) {
// 1. Read the status register until the full flag is not set.
// 2. Write the data (shifted into the upper 28 bits) combined with the
// channel (in the lower four bits) to the write register.
// println!("Writing {:#8x} to mailbox channel {}", data, channel);
let mut limit = 10;
loop {
// Wait for space
fence(Ordering::SeqCst);
if read_reg(MAIL_BASE, MAILBOX_OFFFSETS.status + 0x20) & MAIL_FULL == 0 {
break;
}
if limit == 0 {
panic!(
"Gave up waiting for space to write to mailbox (channel {}, data: 0x{:08x})",
channel, data
);
}
limit -= 1;
}
write_reg(MAIL_BASE, MAILBOX_OFFFSETS.write, data | (channel as u32));
fence(Ordering::SeqCst);
// println!("Finished writing to mailbox");
}
pub trait PropertyTagList: Sized {
fn prepare(self) -> PropertyMessageWrapper<Self> {
PropertyMessageWrapper::new(self)
}
}
macro_rules! impl_ptl {
( $( $t:ident ),+ ) => {
impl< $($t),+ > PropertyTagList for ( $(PropertyMessage< $t >, )+ )
where $(
$t: Sized,
)+ {}
};
}
impl<T: Sized> PropertyTagList for PropertyMessage<T> {}
// impl_ptl!(T1);
// impl_ptl!(T1, T2);
// impl_ptl!(T1, T2, T3);
// impl_ptl!(T1, T2, T3, T4);
// impl_ptl!(T1, T2, T3, T4, T5);
// impl_ptl!(T1, T2, T3, T4, T5, T6);
// impl_ptl!(T1, T2, T3, T4, T5, T6, T7);
// impl_ptl!(T1, T2, T3, T4, T5, T6, T7, T8);
// impl_ptl!(T1, T2, T3, T4, T5, T6, T7, T8, T9);
// impl_ptl!(T1, T2, T3, T4, T5, T6, T7, T8, T9, T10);
#[repr(C, align(16))]
#[derive(Debug)]
pub struct PropertyMessageWrapper<TL: PropertyTagList> {
buffer_size: u32,
code: u32,
tags: TL,
// extra: [u32; 10],
}
impl<TL: PropertyTagList> PropertyMessageWrapper<TL> {
#[inline]
fn new(tags: TL) -> Self {
// let extra_offset = offset_of!(Self => extra).get_byte_offset();
// assert!(extra_offset % 4 == 0);
let buffer_size = core::mem::size_of::<Self>();
PropertyMessageWrapper {
buffer_size: buffer_size
.try_into()
.expect("Property message list size in bytes is too big to fit in a u32"),
code: 0x0000_0000,
tags,
// extra: [0; 10],
}
}
fn as_quads(&self) -> &[u32] {
let size_bytes = mem::size_of::<Self>();
debug_assert_eq!(size_bytes % 4, 0);
let u32_size: usize = size_bytes / 4;
unsafe { slice::from_raw_parts((self as *const Self) as *const u32, u32_size) }
}
pub fn send<'a>(&'a mut self) -> Option<&'a TL>
where
TL: fmt::Debug,
{
// println!("Property message before sending over mailbox: {:#x?}", self);
// println!(
// "Property message quads before sending over mailbox: {:#x?}",
// self.as_quads()
// );
const CHANNEL: u8 = Channel::PropertyTagsSend as u8;
println!("sending message {:x?}", self);
unsafe {
let ptr = self as *const Self;
let addr = ptr as usize;
write_mailbox(CHANNEL, addr.try_into().ok()?);
let resp_addr = read_mailbox(CHANNEL);
}
// let resp_ptr = resp_addr as *const u32;
// println!("Got response from mailbox: {:#?}", &*resp_ptr);
// let resp_code: u32 = *resp_ptr.offset(1);
// println!(
// "Property message after response {:#8x}: {:#x?}",
// resp_addr, self
// );
// {
// let message_quads = self.as_quads();
// println!("Property message words: {:#x?}", message_quads);
// }
if self.code != 0x8000_0000 {
return None;
}
// let msg_ptr = resp_ptr.offset(2);
// let value_buffer_size_ptr = msg_ptr.offset(1);
// let value_buffer_size = (*value_buffer_size_ptr) as usize;
// let value_buffer_ptr = msg_ptr.offset(3) as *const T;
// assert_eq!(value_buffer_size, mem::size_of::<T>());
// let value_ref = &*(value_buffer_ptr as *const T);
// Some(value_ref)
println!("received message: {:#x?}", self);
Some(&self.tags)
}
}
impl<TL: PropertyTagList> ops::Deref for PropertyMessageWrapper<TL> {
type Target = TL;
fn deref(&self) -> &TL {
&self.tags
}
}
impl<TL: PropertyTagList> ops::DerefMut for PropertyMessageWrapper<TL> {
fn deref_mut(&mut self) -> &mut TL {
&mut self.tags
}
}
#[repr(C, align(4))]
#[derive(Debug)]
pub struct PropertyMessage<T> {
tag: u32,
buffer_size: u32,
code: u32,
buffer: T,
}
impl<T> ops::Deref for PropertyMessage<T> {
type Target = T;
fn deref(&self) -> &T {
&self.buffer
}
}
impl<T> ops::DerefMut for PropertyMessage<T> {
fn deref_mut(&mut self) -> &mut T {
&mut self.buffer
}
}
impl<T: Sized> From<(u32, T)> for PropertyMessage<T> {
fn from((tag, buffer): (u32, T)) -> PropertyMessage<T> {
PropertyMessage::new(tag, buffer)
}
}
impl<T> PropertyMessage<T> {
pub fn new(tag: u32, buffer: T) -> Self {
let buffer_size = align_up(mem::size_of::<T>(), 2)
.try_into()
.expect("Property message size is too big to fit in a u32");
let msg = PropertyMessage {
tag,
buffer_size,
code: 0,
buffer,
};
let tag_offset = offset_of!(Self => tag);
assert_eq!(tag_offset.get_byte_offset(), 0);
let size_offset = offset_of!(Self => buffer_size);
assert_eq!(size_offset.get_byte_offset(), 4);
let code_offset = offset_of!(Self => code);
assert_eq!(code_offset.get_byte_offset(), 8);
let buffer_offset = offset_of!(Self => buffer);
assert_eq!(buffer_offset.get_byte_offset(), 12);
msg
}
pub fn value(&self) -> &T {
&self.buffer
}
}
// impl<T: fmt::Debug> PropertyMessage<T> {
// pub fn new(tag: u32, buffer: T) -> Self {
// PropertyMessage {
// }
// }
// }
pub fn send_raw_message<T: fmt::Debug>(channel: Channel, msg: &mut T) -> Result<u32, ()> |
#[repr(u8)]
#[derive(Copy, Clone, Debug)]
pub enum Channel {
Power = 0,
Framebuffer = 1,
VirtualUART = 2,
VCHIQ = 3,
LEDs = 4,
Buttons = 5,
TouchScreen = 6,
Unknown7 = 7,
PropertyTagsSend = 8,
PropertyTagsReceive = 9,
}
| {
let resp: u32;
let msg_ptr = msg as *mut T;
let msg_addr_usize = msg_ptr as usize;
let msg_addr_u32 = msg_addr_usize.try_into().map_err(|_| ())?;
unsafe {
write_mailbox(channel as u8, msg_addr_u32);
resp = read_mailbox(channel as u8);
}
// println!(
// "Got response {:#8x} after raw message send: {:#x?}",
// resp, msg
// );
Ok(resp)
} | identifier_body |
mailbox.rs | // use byteorder::{ByteOrder, NativeEndian};
use core::{
convert::TryInto,
fmt, mem, ops, slice,
sync::atomic::{fence, Ordering},
};
use field_offset::offset_of;
use super::mmu::align_up;
const MAIL_BASE: usize = 0xB880;
const MAIL_FULL: u32 = 0x8000_0000;
const MAIL_EMPTY: u32 = 0x4000_0000;
// const MAPPED_REGISTERS_BASE: usize = 0x2000_0000;
const MAPPED_REGISTERS_BASE: usize = 0x3f00_0000;
// const MAPPED_REGISTERS_BASE: usize = 0x7E00_0000;
#[derive(Copy, Clone, Debug)]
struct MailboxRegisterOffsets {
read: u8,
peek: u8,
sender: u8,
status: u8,
config: u8,
write: u8,
}
const MAILBOX_OFFFSETS: MailboxRegisterOffsets = MailboxRegisterOffsets {
read: 0x00,
peek: 0x10,
sender: 0x14,
status: 0x18,
config: 0x1c,
write: 0x20,
};
// MailboxRegisterOffsets {
// read: 0x20,
// peek: 0x30,
// sender: 0x34,
// status: 0x38,
// config: 0x3c,
// write: 0x40,
// },
// ];
#[inline]
unsafe fn read_reg(base: usize, offset: u8) -> u32 {
((MAPPED_REGISTERS_BASE + base + offset as usize) as *const u32).read_volatile()
}
#[inline]
unsafe fn write_reg(base: usize, offset: u8, value: u32) {
((MAPPED_REGISTERS_BASE + base + offset as usize) as *mut u32).write_volatile(value)
}
unsafe fn read_mailbox(channel: u8) -> u32 {
// 1. Read the status register until the empty flag is not set.
// 2. Read data from the read register.
// 3. If the lower four bits do not match the channel number desired repeat
// from 1.
// 4. The upper 28 bits are the returned data.
// Wait for the mailbox to be non-empty
// Execute a memory barrier
// Read MAIL0_STATUS
// Goto step 1 if MAIL_EMPTY bit is set | // Check the channel (lowest 4 bits) of the read value for the correct channel
// If the channel is not the one we wish to read from (i.e: 1), go to step 1
// Return the data (i.e: the read value >> 4)
// println!("Reading mailbox (want channel {})", channel);
let mut limit = 10;
loop {
let mut empty_limit = 10;
loop {
fence(Ordering::SeqCst);
if read_reg(MAIL_BASE, MAILBOX_OFFFSETS.status) & MAIL_EMPTY == 0 {
break;
}
if empty_limit == 0 {
panic!(
"Gave up waiting for mail when reading from mailbox (channel {})",
channel
);
}
empty_limit -= 1;
}
fence(Ordering::SeqCst);
let data: u32 = read_reg(MAIL_BASE, MAILBOX_OFFFSETS.read);
let read_channel = (data & 0x0F) as u8;
let data = data >> 4;
// println!(
// "Got data from mailbox: {:#8x} (from channel {})",
// data, read_channel
// );
if read_channel != channel {
// println!("Wrong channel, trying again...");
if limit == 0 {
panic!(
"Got trampled too many times when reading from mailbox (channel {})",
channel
);
}
limit -= 1;
continue;
}
return data;
}
}
unsafe fn write_mailbox(channel: u8, data: u32) {
// 1. Read the status register until the full flag is not set.
// 2. Write the data (shifted into the upper 28 bits) combined with the
// channel (in the lower four bits) to the write register.
// println!("Writing {:#8x} to mailbox channel {}", data, channel);
let mut limit = 10;
loop {
// Wait for space
fence(Ordering::SeqCst);
if read_reg(MAIL_BASE, MAILBOX_OFFFSETS.status + 0x20) & MAIL_FULL == 0 {
break;
}
if limit == 0 {
panic!(
"Gave up waiting for space to write to mailbox (channel {}, data: 0x{:08x})",
channel, data
);
}
limit -= 1;
}
write_reg(MAIL_BASE, MAILBOX_OFFFSETS.write, data | (channel as u32));
fence(Ordering::SeqCst);
// println!("Finished writing to mailbox");
}
pub trait PropertyTagList: Sized {
fn prepare(self) -> PropertyMessageWrapper<Self> {
PropertyMessageWrapper::new(self)
}
}
macro_rules! impl_ptl {
( $( $t:ident ),+ ) => {
impl< $($t),+ > PropertyTagList for ( $(PropertyMessage< $t >, )+ )
where $(
$t: Sized,
)+ {}
};
}
impl<T: Sized> PropertyTagList for PropertyMessage<T> {}
// impl_ptl!(T1);
// impl_ptl!(T1, T2);
// impl_ptl!(T1, T2, T3);
// impl_ptl!(T1, T2, T3, T4);
// impl_ptl!(T1, T2, T3, T4, T5);
// impl_ptl!(T1, T2, T3, T4, T5, T6);
// impl_ptl!(T1, T2, T3, T4, T5, T6, T7);
// impl_ptl!(T1, T2, T3, T4, T5, T6, T7, T8);
// impl_ptl!(T1, T2, T3, T4, T5, T6, T7, T8, T9);
// impl_ptl!(T1, T2, T3, T4, T5, T6, T7, T8, T9, T10);
#[repr(C, align(16))]
#[derive(Debug)]
pub struct PropertyMessageWrapper<TL: PropertyTagList> {
buffer_size: u32,
code: u32,
tags: TL,
// extra: [u32; 10],
}
impl<TL: PropertyTagList> PropertyMessageWrapper<TL> {
#[inline]
fn new(tags: TL) -> Self {
// let extra_offset = offset_of!(Self => extra).get_byte_offset();
// assert!(extra_offset % 4 == 0);
let buffer_size = core::mem::size_of::<Self>();
PropertyMessageWrapper {
buffer_size: buffer_size
.try_into()
.expect("Property message list size in bytes is too big to fit in a u32"),
code: 0x0000_0000,
tags,
// extra: [0; 10],
}
}
fn as_quads(&self) -> &[u32] {
let size_bytes = mem::size_of::<Self>();
debug_assert_eq!(size_bytes % 4, 0);
let u32_size: usize = size_bytes / 4;
unsafe { slice::from_raw_parts((self as *const Self) as *const u32, u32_size) }
}
pub fn send<'a>(&'a mut self) -> Option<&'a TL>
where
TL: fmt::Debug,
{
// println!("Property message before sending over mailbox: {:#x?}", self);
// println!(
// "Property message quads before sending over mailbox: {:#x?}",
// self.as_quads()
// );
const CHANNEL: u8 = Channel::PropertyTagsSend as u8;
println!("sending message {:x?}", self);
unsafe {
let ptr = self as *const Self;
let addr = ptr as usize;
write_mailbox(CHANNEL, addr.try_into().ok()?);
let resp_addr = read_mailbox(CHANNEL);
}
// let resp_ptr = resp_addr as *const u32;
// println!("Got response from mailbox: {:#?}", &*resp_ptr);
// let resp_code: u32 = *resp_ptr.offset(1);
// println!(
// "Property message after response {:#8x}: {:#x?}",
// resp_addr, self
// );
// {
// let message_quads = self.as_quads();
// println!("Property message words: {:#x?}", message_quads);
// }
if self.code != 0x8000_0000 {
return None;
}
// let msg_ptr = resp_ptr.offset(2);
// let value_buffer_size_ptr = msg_ptr.offset(1);
// let value_buffer_size = (*value_buffer_size_ptr) as usize;
// let value_buffer_ptr = msg_ptr.offset(3) as *const T;
// assert_eq!(value_buffer_size, mem::size_of::<T>());
// let value_ref = &*(value_buffer_ptr as *const T);
// Some(value_ref)
println!("received message: {:#x?}", self);
Some(&self.tags)
}
}
impl<TL: PropertyTagList> ops::Deref for PropertyMessageWrapper<TL> {
type Target = TL;
fn deref(&self) -> &TL {
&self.tags
}
}
impl<TL: PropertyTagList> ops::DerefMut for PropertyMessageWrapper<TL> {
fn deref_mut(&mut self) -> &mut TL {
&mut self.tags
}
}
#[repr(C, align(4))]
#[derive(Debug)]
pub struct PropertyMessage<T> {
tag: u32,
buffer_size: u32,
code: u32,
buffer: T,
}
impl<T> ops::Deref for PropertyMessage<T> {
type Target = T;
fn deref(&self) -> &T {
&self.buffer
}
}
impl<T> ops::DerefMut for PropertyMessage<T> {
fn deref_mut(&mut self) -> &mut T {
&mut self.buffer
}
}
impl<T: Sized> From<(u32, T)> for PropertyMessage<T> {
fn from((tag, buffer): (u32, T)) -> PropertyMessage<T> {
PropertyMessage::new(tag, buffer)
}
}
impl<T> PropertyMessage<T> {
pub fn new(tag: u32, buffer: T) -> Self {
let buffer_size = align_up(mem::size_of::<T>(), 2)
.try_into()
.expect("Property message size is too big to fit in a u32");
let msg = PropertyMessage {
tag,
buffer_size,
code: 0,
buffer,
};
let tag_offset = offset_of!(Self => tag);
assert_eq!(tag_offset.get_byte_offset(), 0);
let size_offset = offset_of!(Self => buffer_size);
assert_eq!(size_offset.get_byte_offset(), 4);
let code_offset = offset_of!(Self => code);
assert_eq!(code_offset.get_byte_offset(), 8);
let buffer_offset = offset_of!(Self => buffer);
assert_eq!(buffer_offset.get_byte_offset(), 12);
msg
}
pub fn value(&self) -> &T {
&self.buffer
}
}
// impl<T: fmt::Debug> PropertyMessage<T> {
// pub fn new(tag: u32, buffer: T) -> Self {
// PropertyMessage {
// }
// }
// }
pub fn send_raw_message<T: fmt::Debug>(channel: Channel, msg: &mut T) -> Result<u32, ()> {
let resp: u32;
let msg_ptr = msg as *mut T;
let msg_addr_usize = msg_ptr as usize;
let msg_addr_u32 = msg_addr_usize.try_into().map_err(|_| ())?;
unsafe {
write_mailbox(channel as u8, msg_addr_u32);
resp = read_mailbox(channel as u8);
}
// println!(
// "Got response {:#8x} after raw message send: {:#x?}",
// resp, msg
// );
Ok(resp)
}
#[repr(u8)]
#[derive(Copy, Clone, Debug)]
pub enum Channel {
Power = 0,
Framebuffer = 1,
VirtualUART = 2,
VCHIQ = 3,
LEDs = 4,
Buttons = 5,
TouchScreen = 6,
Unknown7 = 7,
PropertyTagsSend = 8,
PropertyTagsReceive = 9,
} | // Execute a memory barrier
// Read from MAIL0_READ | random_line_split |
internal.rs | extern crate proc_macro;
use ink_lang_ir::Callable;
use proc_macro::TokenStream;
use proc_macro2::TokenStream as TokenStream2;
use quote::{
format_ident,
quote,
};
use std::{
collections::HashMap,
convert::TryFrom,
};
use syn::{
ext::IdentExt,
parenthesized,
parse::{
Parse,
ParseStream,
},
ItemImpl,
};
use crate::{
metadata::Metadata,
trait_definition::{
EXTERNAL_METHOD_SUFFIX,
EXTERNAL_TRAIT_SUFFIX,
WRAPPER_TRAIT_SUFFIX,
},
};
pub(crate) const BRUSH_PREFIX: &'static str = "__brush";
pub(crate) struct MetaList {
pub path: syn::Path,
pub _paren_token: syn::token::Paren,
pub nested: syn::punctuated::Punctuated<TokenStream2, syn::Token![,]>,
}
// Like Path::parse_mod_style but accepts keywords in the path.
fn parse_meta_path(input: ParseStream) -> syn::Result<syn::Path> {
Ok(syn::Path {
leading_colon: input.parse()?,
segments: {
let mut segments = syn::punctuated::Punctuated::new();
while input.peek(syn::Ident::peek_any) {
let ident = syn::Ident::parse_any(input)?;
segments.push_value(syn::PathSegment::from(ident));
if !input.peek(syn::Token![::]) {
break
}
let punct = input.parse()?;
segments.push_punct(punct);
}
if segments.is_empty() {
return Err(input.error("expected path"))
} else if segments.trailing_punct() {
return Err(input.error("expected path segment"))
}
segments
},
})
}
fn parse_meta_list_after_path(path: syn::Path, input: ParseStream) -> syn::Result<MetaList> {
let content;
Ok(MetaList {
path,
_paren_token: parenthesized!(content in input),
nested: content.parse_terminated(TokenStream2::parse)?,
})
}
fn parse_meta_after_path(path: syn::Path, input: ParseStream) -> syn::Result<NestedMeta> {
if input.peek(syn::token::Paren) {
parse_meta_list_after_path(path, input).map(NestedMeta::List)
} else {
Ok(NestedMeta::Path(path))
}
}
impl Parse for MetaList {
fn parse(input: ParseStream) -> syn::Result<Self> {
let path = input.call(parse_meta_path)?;
parse_meta_list_after_path(path, input)
}
}
pub(crate) enum NestedMeta {
Path(syn::Path),
List(MetaList),
}
impl Parse for NestedMeta {
fn parse(input: ParseStream) -> syn::Result<Self> {
let path = input.call(parse_meta_path)?;
parse_meta_after_path(path, input)
}
}
pub(crate) struct AttributeArgs(Vec<NestedMeta>);
impl Parse for AttributeArgs {
fn parse(input: ParseStream) -> syn::Result<Self> {
let mut attrs = Vec::new();
while input.peek(syn::Ident::peek_any) {
attrs.push(input.parse()?);
if input.is_empty() {
break
}
let _: syn::token::Comma = input.parse()?;
}
Ok(AttributeArgs { 0: attrs })
}
}
impl std::ops::Deref for AttributeArgs {
type Target = Vec<NestedMeta>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl std::ops::DerefMut for AttributeArgs {
fn deref_mut(&mut self) -> &mut Vec<NestedMeta> {
&mut self.0
}
}
pub(crate) struct Attributes(Vec<syn::Attribute>);
impl Parse for Attributes {
fn parse(input: ParseStream) -> syn::Result<Self> {
Ok(Self(syn::Attribute::parse_outer(input)?))
}
}
impl Attributes {
pub(crate) fn attr(&self) -> &Vec<syn::Attribute> {
&self.0
}
}
// Returns "ink-as-dependency" and not("ink-as-dependency") impls
pub(crate) fn impl_external_trait(
mut impl_item: syn::ItemImpl,
trait_ident: &syn::Ident,
metadata: &Metadata,
) -> (Vec<syn::Item>, Vec<syn::Item>) {
let impl_ink_attrs = extract_attr(&mut impl_item.attrs, "ink");
let mut ink_methods: HashMap<String, syn::TraitItemMethod> = HashMap::new();
metadata
.external_traits
.get(&trait_ident.to_string())
.methods()
.iter()
.for_each(|method| {
if is_attr(&method.attrs, "ink") {
let mut empty_method = method.clone();
empty_method.default = Some(
syn::parse2(quote! {
{
unimplemented!()
}
})
.unwrap(),
);
let mut attrs = empty_method.attrs.clone();
empty_method.attrs = extract_attr(&mut attrs, "doc");
empty_method.attrs.append(&mut extract_attr(&mut attrs, "ink"));
ink_methods.insert(method.sig.ident.to_string(), empty_method);
}
});
// Move ink! attrs from internal trait to external
impl_item.items.iter_mut().for_each(|mut item| {
if let syn::ImplItem::Method(method) = &mut item {
let method_key = method.sig.ident.to_string();
if ink_methods.contains_key(&method_key) {
// Internal attrs will override external, so user must include full declaration with ink(message) and etc.
ink_methods.get_mut(&method_key).unwrap().attrs = extract_attr(&mut method.attrs, "doc");
ink_methods
.get_mut(&method_key)
.unwrap()
.attrs
.append(&mut extract_attr(&mut method.attrs, "ink"));
}
}
});
let ink_methods_iter = ink_methods.iter().map(|(_, value)| value);
let self_ty = impl_item.self_ty.clone().as_ref().clone();
let draft_impl: ItemImpl = syn::parse2(quote! {
#(#impl_ink_attrs)*
impl #trait_ident for #self_ty {
#(#ink_methods_iter)*
}
})
.unwrap();
// Evaluate selector and metadata_name for each method based on rules in ink!
let ink_impl = ::ink_lang_ir::ItemImpl::try_from(draft_impl).unwrap();
ink_impl.iter_messages().for_each(|message| {
let method = ink_methods.get_mut(&message.ident().to_string()).unwrap();
if message.user_provided_selector().is_none() {
let selector_u32 = u32::from_be_bytes(message.composed_selector().as_bytes().clone()) as usize;
let selector = format!("{:#010x}", selector_u32);
method.attrs.push(new_attribute(quote! {#[ink(selector = #selector)]}));
}
if message.metadata_name() == message.ident().to_string() {
let selector = format!("{}", message.metadata_name());
method
.attrs
.push(new_attribute(quote! {#[ink(metadata_name = #selector)]}));
}
let original_name = message.ident();
let inputs_params = message.inputs().map(|pat_type| &pat_type.pat);
method.default = Some(
syn::parse2(quote! {
{
#trait_ident::#original_name(self #(, #inputs_params )* )
}
})
.unwrap(),
);
});
let ink_methods_iter = ink_methods.iter().map(|(_, value)| value);
let wrapper_trait_ident = format_ident!("{}_{}{}", BRUSH_PREFIX, trait_ident, WRAPPER_TRAIT_SUFFIX);
// We only want to use this implementation in case when ink-as-dependency for wrapper.
// It will provide methods with the same name like in initial trait.
let wrapper_impl: ItemImpl = syn::parse2(quote! {
#(#impl_ink_attrs)*
impl #wrapper_trait_ident for #self_ty {
#(#ink_methods_iter)*
}
})
.unwrap();
let trait_name = ink_impl
.trait_path()
.map(|path| path.segments.last().unwrap().ident.to_string());
let mut metadata_name_attr = quote! {};
if trait_name == ink_impl.trait_metadata_name() |
let external_ink_methods_iter = ink_methods.iter_mut().map(|(_, value)| {
value.sig.ident = format_ident!("{}_{}{}", BRUSH_PREFIX, value.sig.ident, EXTERNAL_METHOD_SUFFIX);
value
});
let external_trait_ident = format_ident!("{}_{}{}", BRUSH_PREFIX, trait_ident, EXTERNAL_TRAIT_SUFFIX);
// It is implementation of "external" trait(trait where all method marked with ink!)
// This trait has another name with external suffix. And all methods have external signature.
// But ABI generated by this impl section is the same as ABI generated by original trait.
let external_impl: ItemImpl = syn::parse2(quote! {
#metadata_name_attr
#(#impl_ink_attrs)*
impl #external_trait_ident for #self_ty {
#(#external_ink_methods_iter)*
}
})
.unwrap();
// Internal implementation must be disable during "ink-as-dependency"
let internal_impl = impl_item;
(
vec![syn::Item::from(wrapper_impl)],
vec![syn::Item::from(internal_impl), syn::Item::from(external_impl)],
)
}
#[inline]
pub(crate) fn is_attr(attrs: &Vec<syn::Attribute>, ident: &str) -> bool {
if let None = attrs
.iter()
.find(|attr| attr.path.segments.last().expect("No segments in path").ident == ident)
{
false
} else {
true
}
}
#[inline]
#[allow(dead_code)]
pub(crate) fn get_attr(attrs: &Vec<syn::Attribute>, ident: &str) -> Option<syn::Attribute> {
for attr in attrs.iter() {
if is_attr(&vec![attr.clone()], ident) {
return Some(attr.clone())
}
}
None
}
#[inline]
pub(crate) fn remove_attr(attrs: &Vec<syn::Attribute>, ident: &str) -> Vec<syn::Attribute> {
attrs
.clone()
.into_iter()
.filter_map(|attr| {
if is_attr(&vec![attr.clone()], ident) {
None
} else {
Some(attr)
}
})
.collect()
}
#[inline]
pub(crate) fn extract_attr(attrs: &mut Vec<syn::Attribute>, ident: &str) -> Vec<syn::Attribute> {
attrs.drain_filter(|attr| is_attr(&vec![attr.clone()], ident)).collect()
}
#[inline]
pub(crate) fn new_attribute(attr_stream: TokenStream2) -> syn::Attribute {
syn::parse2::<Attributes>(attr_stream).unwrap().attr()[0].clone()
}
/// Computes the BLAKE-2b 256-bit hash for the given input and stores it in output.
#[inline]
pub fn blake2b_256(input: &[u8], output: &mut [u8]) {
use ::blake2::digest::{
Update as _,
VariableOutput as _,
};
let mut blake2 = blake2::VarBlake2b::new_keyed(&[], 32);
blake2.update(input);
blake2.finalize_variable(|result| output.copy_from_slice(result));
}
#[inline]
pub(crate) fn blake2b_256_str(input: String) -> [u8; 32] {
let mut output: [u8; 32] = [0; 32];
blake2b_256(&input.into_bytes(), &mut output);
output
}
#[inline]
pub(crate) fn sanitize_to_str(input: TokenStream) -> String {
let mut str = input.to_string();
// Remove quotes rom the string
str.drain(1..str.len() - 1).collect()
}
| {
let name = format!("{}", trait_name.unwrap());
metadata_name_attr = quote! { #[ink(metadata_name = #name)] }
} | conditional_block |
internal.rs | extern crate proc_macro;
use ink_lang_ir::Callable;
use proc_macro::TokenStream;
use proc_macro2::TokenStream as TokenStream2;
use quote::{
format_ident,
quote,
};
use std::{
collections::HashMap,
convert::TryFrom,
};
use syn::{
ext::IdentExt,
parenthesized, | Parse,
ParseStream,
},
ItemImpl,
};
use crate::{
metadata::Metadata,
trait_definition::{
EXTERNAL_METHOD_SUFFIX,
EXTERNAL_TRAIT_SUFFIX,
WRAPPER_TRAIT_SUFFIX,
},
};
pub(crate) const BRUSH_PREFIX: &'static str = "__brush";
pub(crate) struct MetaList {
pub path: syn::Path,
pub _paren_token: syn::token::Paren,
pub nested: syn::punctuated::Punctuated<TokenStream2, syn::Token![,]>,
}
// Like Path::parse_mod_style but accepts keywords in the path.
fn parse_meta_path(input: ParseStream) -> syn::Result<syn::Path> {
Ok(syn::Path {
leading_colon: input.parse()?,
segments: {
let mut segments = syn::punctuated::Punctuated::new();
while input.peek(syn::Ident::peek_any) {
let ident = syn::Ident::parse_any(input)?;
segments.push_value(syn::PathSegment::from(ident));
if !input.peek(syn::Token![::]) {
break
}
let punct = input.parse()?;
segments.push_punct(punct);
}
if segments.is_empty() {
return Err(input.error("expected path"))
} else if segments.trailing_punct() {
return Err(input.error("expected path segment"))
}
segments
},
})
}
fn parse_meta_list_after_path(path: syn::Path, input: ParseStream) -> syn::Result<MetaList> {
let content;
Ok(MetaList {
path,
_paren_token: parenthesized!(content in input),
nested: content.parse_terminated(TokenStream2::parse)?,
})
}
fn parse_meta_after_path(path: syn::Path, input: ParseStream) -> syn::Result<NestedMeta> {
if input.peek(syn::token::Paren) {
parse_meta_list_after_path(path, input).map(NestedMeta::List)
} else {
Ok(NestedMeta::Path(path))
}
}
impl Parse for MetaList {
fn parse(input: ParseStream) -> syn::Result<Self> {
let path = input.call(parse_meta_path)?;
parse_meta_list_after_path(path, input)
}
}
pub(crate) enum NestedMeta {
Path(syn::Path),
List(MetaList),
}
impl Parse for NestedMeta {
fn parse(input: ParseStream) -> syn::Result<Self> {
let path = input.call(parse_meta_path)?;
parse_meta_after_path(path, input)
}
}
pub(crate) struct AttributeArgs(Vec<NestedMeta>);
impl Parse for AttributeArgs {
fn parse(input: ParseStream) -> syn::Result<Self> {
let mut attrs = Vec::new();
while input.peek(syn::Ident::peek_any) {
attrs.push(input.parse()?);
if input.is_empty() {
break
}
let _: syn::token::Comma = input.parse()?;
}
Ok(AttributeArgs { 0: attrs })
}
}
impl std::ops::Deref for AttributeArgs {
type Target = Vec<NestedMeta>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl std::ops::DerefMut for AttributeArgs {
fn deref_mut(&mut self) -> &mut Vec<NestedMeta> {
&mut self.0
}
}
pub(crate) struct Attributes(Vec<syn::Attribute>);
impl Parse for Attributes {
fn parse(input: ParseStream) -> syn::Result<Self> {
Ok(Self(syn::Attribute::parse_outer(input)?))
}
}
impl Attributes {
pub(crate) fn attr(&self) -> &Vec<syn::Attribute> {
&self.0
}
}
// Returns "ink-as-dependency" and not("ink-as-dependency") impls
pub(crate) fn impl_external_trait(
mut impl_item: syn::ItemImpl,
trait_ident: &syn::Ident,
metadata: &Metadata,
) -> (Vec<syn::Item>, Vec<syn::Item>) {
let impl_ink_attrs = extract_attr(&mut impl_item.attrs, "ink");
let mut ink_methods: HashMap<String, syn::TraitItemMethod> = HashMap::new();
metadata
.external_traits
.get(&trait_ident.to_string())
.methods()
.iter()
.for_each(|method| {
if is_attr(&method.attrs, "ink") {
let mut empty_method = method.clone();
empty_method.default = Some(
syn::parse2(quote! {
{
unimplemented!()
}
})
.unwrap(),
);
let mut attrs = empty_method.attrs.clone();
empty_method.attrs = extract_attr(&mut attrs, "doc");
empty_method.attrs.append(&mut extract_attr(&mut attrs, "ink"));
ink_methods.insert(method.sig.ident.to_string(), empty_method);
}
});
// Move ink! attrs from internal trait to external
impl_item.items.iter_mut().for_each(|mut item| {
if let syn::ImplItem::Method(method) = &mut item {
let method_key = method.sig.ident.to_string();
if ink_methods.contains_key(&method_key) {
// Internal attrs will override external, so user must include full declaration with ink(message) and etc.
ink_methods.get_mut(&method_key).unwrap().attrs = extract_attr(&mut method.attrs, "doc");
ink_methods
.get_mut(&method_key)
.unwrap()
.attrs
.append(&mut extract_attr(&mut method.attrs, "ink"));
}
}
});
let ink_methods_iter = ink_methods.iter().map(|(_, value)| value);
let self_ty = impl_item.self_ty.clone().as_ref().clone();
let draft_impl: ItemImpl = syn::parse2(quote! {
#(#impl_ink_attrs)*
impl #trait_ident for #self_ty {
#(#ink_methods_iter)*
}
})
.unwrap();
// Evaluate selector and metadata_name for each method based on rules in ink!
let ink_impl = ::ink_lang_ir::ItemImpl::try_from(draft_impl).unwrap();
ink_impl.iter_messages().for_each(|message| {
let method = ink_methods.get_mut(&message.ident().to_string()).unwrap();
if message.user_provided_selector().is_none() {
let selector_u32 = u32::from_be_bytes(message.composed_selector().as_bytes().clone()) as usize;
let selector = format!("{:#010x}", selector_u32);
method.attrs.push(new_attribute(quote! {#[ink(selector = #selector)]}));
}
if message.metadata_name() == message.ident().to_string() {
let selector = format!("{}", message.metadata_name());
method
.attrs
.push(new_attribute(quote! {#[ink(metadata_name = #selector)]}));
}
let original_name = message.ident();
let inputs_params = message.inputs().map(|pat_type| &pat_type.pat);
method.default = Some(
syn::parse2(quote! {
{
#trait_ident::#original_name(self #(, #inputs_params )* )
}
})
.unwrap(),
);
});
let ink_methods_iter = ink_methods.iter().map(|(_, value)| value);
let wrapper_trait_ident = format_ident!("{}_{}{}", BRUSH_PREFIX, trait_ident, WRAPPER_TRAIT_SUFFIX);
// We only want to use this implementation in case when ink-as-dependency for wrapper.
// It will provide methods with the same name like in initial trait.
let wrapper_impl: ItemImpl = syn::parse2(quote! {
#(#impl_ink_attrs)*
impl #wrapper_trait_ident for #self_ty {
#(#ink_methods_iter)*
}
})
.unwrap();
let trait_name = ink_impl
.trait_path()
.map(|path| path.segments.last().unwrap().ident.to_string());
let mut metadata_name_attr = quote! {};
if trait_name == ink_impl.trait_metadata_name() {
let name = format!("{}", trait_name.unwrap());
metadata_name_attr = quote! { #[ink(metadata_name = #name)] }
}
let external_ink_methods_iter = ink_methods.iter_mut().map(|(_, value)| {
value.sig.ident = format_ident!("{}_{}{}", BRUSH_PREFIX, value.sig.ident, EXTERNAL_METHOD_SUFFIX);
value
});
let external_trait_ident = format_ident!("{}_{}{}", BRUSH_PREFIX, trait_ident, EXTERNAL_TRAIT_SUFFIX);
// It is implementation of "external" trait(trait where all method marked with ink!)
// This trait has another name with external suffix. And all methods have external signature.
// But ABI generated by this impl section is the same as ABI generated by original trait.
let external_impl: ItemImpl = syn::parse2(quote! {
#metadata_name_attr
#(#impl_ink_attrs)*
impl #external_trait_ident for #self_ty {
#(#external_ink_methods_iter)*
}
})
.unwrap();
// Internal implementation must be disable during "ink-as-dependency"
let internal_impl = impl_item;
(
vec![syn::Item::from(wrapper_impl)],
vec![syn::Item::from(internal_impl), syn::Item::from(external_impl)],
)
}
#[inline]
pub(crate) fn is_attr(attrs: &Vec<syn::Attribute>, ident: &str) -> bool {
if let None = attrs
.iter()
.find(|attr| attr.path.segments.last().expect("No segments in path").ident == ident)
{
false
} else {
true
}
}
#[inline]
#[allow(dead_code)]
pub(crate) fn get_attr(attrs: &Vec<syn::Attribute>, ident: &str) -> Option<syn::Attribute> {
for attr in attrs.iter() {
if is_attr(&vec![attr.clone()], ident) {
return Some(attr.clone())
}
}
None
}
#[inline]
pub(crate) fn remove_attr(attrs: &Vec<syn::Attribute>, ident: &str) -> Vec<syn::Attribute> {
attrs
.clone()
.into_iter()
.filter_map(|attr| {
if is_attr(&vec![attr.clone()], ident) {
None
} else {
Some(attr)
}
})
.collect()
}
#[inline]
pub(crate) fn extract_attr(attrs: &mut Vec<syn::Attribute>, ident: &str) -> Vec<syn::Attribute> {
attrs.drain_filter(|attr| is_attr(&vec![attr.clone()], ident)).collect()
}
#[inline]
pub(crate) fn new_attribute(attr_stream: TokenStream2) -> syn::Attribute {
syn::parse2::<Attributes>(attr_stream).unwrap().attr()[0].clone()
}
/// Computes the BLAKE-2b 256-bit hash for the given input and stores it in output.
#[inline]
pub fn blake2b_256(input: &[u8], output: &mut [u8]) {
use ::blake2::digest::{
Update as _,
VariableOutput as _,
};
let mut blake2 = blake2::VarBlake2b::new_keyed(&[], 32);
blake2.update(input);
blake2.finalize_variable(|result| output.copy_from_slice(result));
}
#[inline]
pub(crate) fn blake2b_256_str(input: String) -> [u8; 32] {
let mut output: [u8; 32] = [0; 32];
blake2b_256(&input.into_bytes(), &mut output);
output
}
#[inline]
pub(crate) fn sanitize_to_str(input: TokenStream) -> String {
let mut str = input.to_string();
// Remove quotes rom the string
str.drain(1..str.len() - 1).collect()
} | parse::{ | random_line_split |
internal.rs | extern crate proc_macro;
use ink_lang_ir::Callable;
use proc_macro::TokenStream;
use proc_macro2::TokenStream as TokenStream2;
use quote::{
format_ident,
quote,
};
use std::{
collections::HashMap,
convert::TryFrom,
};
use syn::{
ext::IdentExt,
parenthesized,
parse::{
Parse,
ParseStream,
},
ItemImpl,
};
use crate::{
metadata::Metadata,
trait_definition::{
EXTERNAL_METHOD_SUFFIX,
EXTERNAL_TRAIT_SUFFIX,
WRAPPER_TRAIT_SUFFIX,
},
};
pub(crate) const BRUSH_PREFIX: &'static str = "__brush";
pub(crate) struct MetaList {
pub path: syn::Path,
pub _paren_token: syn::token::Paren,
pub nested: syn::punctuated::Punctuated<TokenStream2, syn::Token![,]>,
}
// Like Path::parse_mod_style but accepts keywords in the path.
fn parse_meta_path(input: ParseStream) -> syn::Result<syn::Path> {
Ok(syn::Path {
leading_colon: input.parse()?,
segments: {
let mut segments = syn::punctuated::Punctuated::new();
while input.peek(syn::Ident::peek_any) {
let ident = syn::Ident::parse_any(input)?;
segments.push_value(syn::PathSegment::from(ident));
if !input.peek(syn::Token![::]) {
break
}
let punct = input.parse()?;
segments.push_punct(punct);
}
if segments.is_empty() {
return Err(input.error("expected path"))
} else if segments.trailing_punct() {
return Err(input.error("expected path segment"))
}
segments
},
})
}
fn parse_meta_list_after_path(path: syn::Path, input: ParseStream) -> syn::Result<MetaList> {
let content;
Ok(MetaList {
path,
_paren_token: parenthesized!(content in input),
nested: content.parse_terminated(TokenStream2::parse)?,
})
}
fn parse_meta_after_path(path: syn::Path, input: ParseStream) -> syn::Result<NestedMeta> {
if input.peek(syn::token::Paren) {
parse_meta_list_after_path(path, input).map(NestedMeta::List)
} else {
Ok(NestedMeta::Path(path))
}
}
impl Parse for MetaList {
fn | (input: ParseStream) -> syn::Result<Self> {
let path = input.call(parse_meta_path)?;
parse_meta_list_after_path(path, input)
}
}
pub(crate) enum NestedMeta {
Path(syn::Path),
List(MetaList),
}
impl Parse for NestedMeta {
fn parse(input: ParseStream) -> syn::Result<Self> {
let path = input.call(parse_meta_path)?;
parse_meta_after_path(path, input)
}
}
pub(crate) struct AttributeArgs(Vec<NestedMeta>);
impl Parse for AttributeArgs {
fn parse(input: ParseStream) -> syn::Result<Self> {
let mut attrs = Vec::new();
while input.peek(syn::Ident::peek_any) {
attrs.push(input.parse()?);
if input.is_empty() {
break
}
let _: syn::token::Comma = input.parse()?;
}
Ok(AttributeArgs { 0: attrs })
}
}
impl std::ops::Deref for AttributeArgs {
type Target = Vec<NestedMeta>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl std::ops::DerefMut for AttributeArgs {
fn deref_mut(&mut self) -> &mut Vec<NestedMeta> {
&mut self.0
}
}
pub(crate) struct Attributes(Vec<syn::Attribute>);
impl Parse for Attributes {
fn parse(input: ParseStream) -> syn::Result<Self> {
Ok(Self(syn::Attribute::parse_outer(input)?))
}
}
impl Attributes {
pub(crate) fn attr(&self) -> &Vec<syn::Attribute> {
&self.0
}
}
// Returns "ink-as-dependency" and not("ink-as-dependency") impls
pub(crate) fn impl_external_trait(
mut impl_item: syn::ItemImpl,
trait_ident: &syn::Ident,
metadata: &Metadata,
) -> (Vec<syn::Item>, Vec<syn::Item>) {
let impl_ink_attrs = extract_attr(&mut impl_item.attrs, "ink");
let mut ink_methods: HashMap<String, syn::TraitItemMethod> = HashMap::new();
metadata
.external_traits
.get(&trait_ident.to_string())
.methods()
.iter()
.for_each(|method| {
if is_attr(&method.attrs, "ink") {
let mut empty_method = method.clone();
empty_method.default = Some(
syn::parse2(quote! {
{
unimplemented!()
}
})
.unwrap(),
);
let mut attrs = empty_method.attrs.clone();
empty_method.attrs = extract_attr(&mut attrs, "doc");
empty_method.attrs.append(&mut extract_attr(&mut attrs, "ink"));
ink_methods.insert(method.sig.ident.to_string(), empty_method);
}
});
// Move ink! attrs from internal trait to external
impl_item.items.iter_mut().for_each(|mut item| {
if let syn::ImplItem::Method(method) = &mut item {
let method_key = method.sig.ident.to_string();
if ink_methods.contains_key(&method_key) {
// Internal attrs will override external, so user must include full declaration with ink(message) and etc.
ink_methods.get_mut(&method_key).unwrap().attrs = extract_attr(&mut method.attrs, "doc");
ink_methods
.get_mut(&method_key)
.unwrap()
.attrs
.append(&mut extract_attr(&mut method.attrs, "ink"));
}
}
});
let ink_methods_iter = ink_methods.iter().map(|(_, value)| value);
let self_ty = impl_item.self_ty.clone().as_ref().clone();
let draft_impl: ItemImpl = syn::parse2(quote! {
#(#impl_ink_attrs)*
impl #trait_ident for #self_ty {
#(#ink_methods_iter)*
}
})
.unwrap();
// Evaluate selector and metadata_name for each method based on rules in ink!
let ink_impl = ::ink_lang_ir::ItemImpl::try_from(draft_impl).unwrap();
ink_impl.iter_messages().for_each(|message| {
let method = ink_methods.get_mut(&message.ident().to_string()).unwrap();
if message.user_provided_selector().is_none() {
let selector_u32 = u32::from_be_bytes(message.composed_selector().as_bytes().clone()) as usize;
let selector = format!("{:#010x}", selector_u32);
method.attrs.push(new_attribute(quote! {#[ink(selector = #selector)]}));
}
if message.metadata_name() == message.ident().to_string() {
let selector = format!("{}", message.metadata_name());
method
.attrs
.push(new_attribute(quote! {#[ink(metadata_name = #selector)]}));
}
let original_name = message.ident();
let inputs_params = message.inputs().map(|pat_type| &pat_type.pat);
method.default = Some(
syn::parse2(quote! {
{
#trait_ident::#original_name(self #(, #inputs_params )* )
}
})
.unwrap(),
);
});
let ink_methods_iter = ink_methods.iter().map(|(_, value)| value);
let wrapper_trait_ident = format_ident!("{}_{}{}", BRUSH_PREFIX, trait_ident, WRAPPER_TRAIT_SUFFIX);
// We only want to use this implementation in case when ink-as-dependency for wrapper.
// It will provide methods with the same name like in initial trait.
let wrapper_impl: ItemImpl = syn::parse2(quote! {
#(#impl_ink_attrs)*
impl #wrapper_trait_ident for #self_ty {
#(#ink_methods_iter)*
}
})
.unwrap();
let trait_name = ink_impl
.trait_path()
.map(|path| path.segments.last().unwrap().ident.to_string());
let mut metadata_name_attr = quote! {};
if trait_name == ink_impl.trait_metadata_name() {
let name = format!("{}", trait_name.unwrap());
metadata_name_attr = quote! { #[ink(metadata_name = #name)] }
}
let external_ink_methods_iter = ink_methods.iter_mut().map(|(_, value)| {
value.sig.ident = format_ident!("{}_{}{}", BRUSH_PREFIX, value.sig.ident, EXTERNAL_METHOD_SUFFIX);
value
});
let external_trait_ident = format_ident!("{}_{}{}", BRUSH_PREFIX, trait_ident, EXTERNAL_TRAIT_SUFFIX);
// It is implementation of "external" trait(trait where all method marked with ink!)
// This trait has another name with external suffix. And all methods have external signature.
// But ABI generated by this impl section is the same as ABI generated by original trait.
let external_impl: ItemImpl = syn::parse2(quote! {
#metadata_name_attr
#(#impl_ink_attrs)*
impl #external_trait_ident for #self_ty {
#(#external_ink_methods_iter)*
}
})
.unwrap();
// Internal implementation must be disable during "ink-as-dependency"
let internal_impl = impl_item;
(
vec![syn::Item::from(wrapper_impl)],
vec![syn::Item::from(internal_impl), syn::Item::from(external_impl)],
)
}
#[inline]
pub(crate) fn is_attr(attrs: &Vec<syn::Attribute>, ident: &str) -> bool {
if let None = attrs
.iter()
.find(|attr| attr.path.segments.last().expect("No segments in path").ident == ident)
{
false
} else {
true
}
}
#[inline]
#[allow(dead_code)]
pub(crate) fn get_attr(attrs: &Vec<syn::Attribute>, ident: &str) -> Option<syn::Attribute> {
for attr in attrs.iter() {
if is_attr(&vec![attr.clone()], ident) {
return Some(attr.clone())
}
}
None
}
#[inline]
pub(crate) fn remove_attr(attrs: &Vec<syn::Attribute>, ident: &str) -> Vec<syn::Attribute> {
attrs
.clone()
.into_iter()
.filter_map(|attr| {
if is_attr(&vec![attr.clone()], ident) {
None
} else {
Some(attr)
}
})
.collect()
}
#[inline]
pub(crate) fn extract_attr(attrs: &mut Vec<syn::Attribute>, ident: &str) -> Vec<syn::Attribute> {
attrs.drain_filter(|attr| is_attr(&vec![attr.clone()], ident)).collect()
}
#[inline]
pub(crate) fn new_attribute(attr_stream: TokenStream2) -> syn::Attribute {
syn::parse2::<Attributes>(attr_stream).unwrap().attr()[0].clone()
}
/// Computes the BLAKE-2b 256-bit hash for the given input and stores it in output.
#[inline]
pub fn blake2b_256(input: &[u8], output: &mut [u8]) {
use ::blake2::digest::{
Update as _,
VariableOutput as _,
};
let mut blake2 = blake2::VarBlake2b::new_keyed(&[], 32);
blake2.update(input);
blake2.finalize_variable(|result| output.copy_from_slice(result));
}
#[inline]
pub(crate) fn blake2b_256_str(input: String) -> [u8; 32] {
let mut output: [u8; 32] = [0; 32];
blake2b_256(&input.into_bytes(), &mut output);
output
}
#[inline]
pub(crate) fn sanitize_to_str(input: TokenStream) -> String {
let mut str = input.to_string();
// Remove quotes rom the string
str.drain(1..str.len() - 1).collect()
}
| parse | identifier_name |
internal.rs | extern crate proc_macro;
use ink_lang_ir::Callable;
use proc_macro::TokenStream;
use proc_macro2::TokenStream as TokenStream2;
use quote::{
format_ident,
quote,
};
use std::{
collections::HashMap,
convert::TryFrom,
};
use syn::{
ext::IdentExt,
parenthesized,
parse::{
Parse,
ParseStream,
},
ItemImpl,
};
use crate::{
metadata::Metadata,
trait_definition::{
EXTERNAL_METHOD_SUFFIX,
EXTERNAL_TRAIT_SUFFIX,
WRAPPER_TRAIT_SUFFIX,
},
};
pub(crate) const BRUSH_PREFIX: &'static str = "__brush";
pub(crate) struct MetaList {
pub path: syn::Path,
pub _paren_token: syn::token::Paren,
pub nested: syn::punctuated::Punctuated<TokenStream2, syn::Token![,]>,
}
// Like Path::parse_mod_style but accepts keywords in the path.
fn parse_meta_path(input: ParseStream) -> syn::Result<syn::Path> |
fn parse_meta_list_after_path(path: syn::Path, input: ParseStream) -> syn::Result<MetaList> {
let content;
Ok(MetaList {
path,
_paren_token: parenthesized!(content in input),
nested: content.parse_terminated(TokenStream2::parse)?,
})
}
fn parse_meta_after_path(path: syn::Path, input: ParseStream) -> syn::Result<NestedMeta> {
if input.peek(syn::token::Paren) {
parse_meta_list_after_path(path, input).map(NestedMeta::List)
} else {
Ok(NestedMeta::Path(path))
}
}
impl Parse for MetaList {
fn parse(input: ParseStream) -> syn::Result<Self> {
let path = input.call(parse_meta_path)?;
parse_meta_list_after_path(path, input)
}
}
pub(crate) enum NestedMeta {
Path(syn::Path),
List(MetaList),
}
impl Parse for NestedMeta {
fn parse(input: ParseStream) -> syn::Result<Self> {
let path = input.call(parse_meta_path)?;
parse_meta_after_path(path, input)
}
}
pub(crate) struct AttributeArgs(Vec<NestedMeta>);
impl Parse for AttributeArgs {
fn parse(input: ParseStream) -> syn::Result<Self> {
let mut attrs = Vec::new();
while input.peek(syn::Ident::peek_any) {
attrs.push(input.parse()?);
if input.is_empty() {
break
}
let _: syn::token::Comma = input.parse()?;
}
Ok(AttributeArgs { 0: attrs })
}
}
impl std::ops::Deref for AttributeArgs {
type Target = Vec<NestedMeta>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl std::ops::DerefMut for AttributeArgs {
fn deref_mut(&mut self) -> &mut Vec<NestedMeta> {
&mut self.0
}
}
pub(crate) struct Attributes(Vec<syn::Attribute>);
impl Parse for Attributes {
fn parse(input: ParseStream) -> syn::Result<Self> {
Ok(Self(syn::Attribute::parse_outer(input)?))
}
}
impl Attributes {
pub(crate) fn attr(&self) -> &Vec<syn::Attribute> {
&self.0
}
}
// Returns "ink-as-dependency" and not("ink-as-dependency") impls
pub(crate) fn impl_external_trait(
mut impl_item: syn::ItemImpl,
trait_ident: &syn::Ident,
metadata: &Metadata,
) -> (Vec<syn::Item>, Vec<syn::Item>) {
let impl_ink_attrs = extract_attr(&mut impl_item.attrs, "ink");
let mut ink_methods: HashMap<String, syn::TraitItemMethod> = HashMap::new();
metadata
.external_traits
.get(&trait_ident.to_string())
.methods()
.iter()
.for_each(|method| {
if is_attr(&method.attrs, "ink") {
let mut empty_method = method.clone();
empty_method.default = Some(
syn::parse2(quote! {
{
unimplemented!()
}
})
.unwrap(),
);
let mut attrs = empty_method.attrs.clone();
empty_method.attrs = extract_attr(&mut attrs, "doc");
empty_method.attrs.append(&mut extract_attr(&mut attrs, "ink"));
ink_methods.insert(method.sig.ident.to_string(), empty_method);
}
});
// Move ink! attrs from internal trait to external
impl_item.items.iter_mut().for_each(|mut item| {
if let syn::ImplItem::Method(method) = &mut item {
let method_key = method.sig.ident.to_string();
if ink_methods.contains_key(&method_key) {
// Internal attrs will override external, so user must include full declaration with ink(message) and etc.
ink_methods.get_mut(&method_key).unwrap().attrs = extract_attr(&mut method.attrs, "doc");
ink_methods
.get_mut(&method_key)
.unwrap()
.attrs
.append(&mut extract_attr(&mut method.attrs, "ink"));
}
}
});
let ink_methods_iter = ink_methods.iter().map(|(_, value)| value);
let self_ty = impl_item.self_ty.clone().as_ref().clone();
let draft_impl: ItemImpl = syn::parse2(quote! {
#(#impl_ink_attrs)*
impl #trait_ident for #self_ty {
#(#ink_methods_iter)*
}
})
.unwrap();
// Evaluate selector and metadata_name for each method based on rules in ink!
let ink_impl = ::ink_lang_ir::ItemImpl::try_from(draft_impl).unwrap();
ink_impl.iter_messages().for_each(|message| {
let method = ink_methods.get_mut(&message.ident().to_string()).unwrap();
if message.user_provided_selector().is_none() {
let selector_u32 = u32::from_be_bytes(message.composed_selector().as_bytes().clone()) as usize;
let selector = format!("{:#010x}", selector_u32);
method.attrs.push(new_attribute(quote! {#[ink(selector = #selector)]}));
}
if message.metadata_name() == message.ident().to_string() {
let selector = format!("{}", message.metadata_name());
method
.attrs
.push(new_attribute(quote! {#[ink(metadata_name = #selector)]}));
}
let original_name = message.ident();
let inputs_params = message.inputs().map(|pat_type| &pat_type.pat);
method.default = Some(
syn::parse2(quote! {
{
#trait_ident::#original_name(self #(, #inputs_params )* )
}
})
.unwrap(),
);
});
let ink_methods_iter = ink_methods.iter().map(|(_, value)| value);
let wrapper_trait_ident = format_ident!("{}_{}{}", BRUSH_PREFIX, trait_ident, WRAPPER_TRAIT_SUFFIX);
// We only want to use this implementation in case when ink-as-dependency for wrapper.
// It will provide methods with the same name like in initial trait.
let wrapper_impl: ItemImpl = syn::parse2(quote! {
#(#impl_ink_attrs)*
impl #wrapper_trait_ident for #self_ty {
#(#ink_methods_iter)*
}
})
.unwrap();
let trait_name = ink_impl
.trait_path()
.map(|path| path.segments.last().unwrap().ident.to_string());
let mut metadata_name_attr = quote! {};
if trait_name == ink_impl.trait_metadata_name() {
let name = format!("{}", trait_name.unwrap());
metadata_name_attr = quote! { #[ink(metadata_name = #name)] }
}
let external_ink_methods_iter = ink_methods.iter_mut().map(|(_, value)| {
value.sig.ident = format_ident!("{}_{}{}", BRUSH_PREFIX, value.sig.ident, EXTERNAL_METHOD_SUFFIX);
value
});
let external_trait_ident = format_ident!("{}_{}{}", BRUSH_PREFIX, trait_ident, EXTERNAL_TRAIT_SUFFIX);
// It is implementation of "external" trait(trait where all method marked with ink!)
// This trait has another name with external suffix. And all methods have external signature.
// But ABI generated by this impl section is the same as ABI generated by original trait.
let external_impl: ItemImpl = syn::parse2(quote! {
#metadata_name_attr
#(#impl_ink_attrs)*
impl #external_trait_ident for #self_ty {
#(#external_ink_methods_iter)*
}
})
.unwrap();
// Internal implementation must be disable during "ink-as-dependency"
let internal_impl = impl_item;
(
vec![syn::Item::from(wrapper_impl)],
vec![syn::Item::from(internal_impl), syn::Item::from(external_impl)],
)
}
#[inline]
pub(crate) fn is_attr(attrs: &Vec<syn::Attribute>, ident: &str) -> bool {
if let None = attrs
.iter()
.find(|attr| attr.path.segments.last().expect("No segments in path").ident == ident)
{
false
} else {
true
}
}
#[inline]
#[allow(dead_code)]
pub(crate) fn get_attr(attrs: &Vec<syn::Attribute>, ident: &str) -> Option<syn::Attribute> {
for attr in attrs.iter() {
if is_attr(&vec![attr.clone()], ident) {
return Some(attr.clone())
}
}
None
}
#[inline]
pub(crate) fn remove_attr(attrs: &Vec<syn::Attribute>, ident: &str) -> Vec<syn::Attribute> {
attrs
.clone()
.into_iter()
.filter_map(|attr| {
if is_attr(&vec![attr.clone()], ident) {
None
} else {
Some(attr)
}
})
.collect()
}
#[inline]
pub(crate) fn extract_attr(attrs: &mut Vec<syn::Attribute>, ident: &str) -> Vec<syn::Attribute> {
attrs.drain_filter(|attr| is_attr(&vec![attr.clone()], ident)).collect()
}
#[inline]
pub(crate) fn new_attribute(attr_stream: TokenStream2) -> syn::Attribute {
syn::parse2::<Attributes>(attr_stream).unwrap().attr()[0].clone()
}
/// Computes the BLAKE-2b 256-bit hash for the given input and stores it in output.
#[inline]
pub fn blake2b_256(input: &[u8], output: &mut [u8]) {
use ::blake2::digest::{
Update as _,
VariableOutput as _,
};
let mut blake2 = blake2::VarBlake2b::new_keyed(&[], 32);
blake2.update(input);
blake2.finalize_variable(|result| output.copy_from_slice(result));
}
#[inline]
pub(crate) fn blake2b_256_str(input: String) -> [u8; 32] {
let mut output: [u8; 32] = [0; 32];
blake2b_256(&input.into_bytes(), &mut output);
output
}
#[inline]
pub(crate) fn sanitize_to_str(input: TokenStream) -> String {
let mut str = input.to_string();
// Remove quotes rom the string
str.drain(1..str.len() - 1).collect()
}
| {
Ok(syn::Path {
leading_colon: input.parse()?,
segments: {
let mut segments = syn::punctuated::Punctuated::new();
while input.peek(syn::Ident::peek_any) {
let ident = syn::Ident::parse_any(input)?;
segments.push_value(syn::PathSegment::from(ident));
if !input.peek(syn::Token![::]) {
break
}
let punct = input.parse()?;
segments.push_punct(punct);
}
if segments.is_empty() {
return Err(input.error("expected path"))
} else if segments.trailing_punct() {
return Err(input.error("expected path segment"))
}
segments
},
})
} | identifier_body |
data.py | # -*- coding: utf-8 -*-
"""
coref.data
~~~~~~~~~~~~
File Processor
Parses 'Coreference Resolution Input File'
:copyright: (c) 2012 by Adam Walz, Charmaine Keck
:license:
"""
import re
from os import strerror
from sys import stderr
from time import sleep
from errno import EIO
from xml.dom.minidom import parseString
from xml.parsers.expat import ExpatError
import jsonrpc
from simplejson import loads
from nltk import sent_tokenize, word_tokenize
from nltk.tree import Tree
from nltk.corpus import wordnet as wn
from Levenshtein import ratio
from helpers import static_var, vprint
class FileParse():
def __init__(self, filename, pserver):
fparse = mk_fparse(filename.strip(), pserver)
self.parses = [Parse(p) for p in fparse[0]]
self.nps = fparse[1]
self.synsets = fparse[2]
class Parse():
def __init__(self, parse):
self.ptree = parse[0]
self.words = parse[1]
self.dependencies = parse[2]
self.text = parse[3]
class FilenameException(Exception):
"""Raised when file does not have the correct extension"""
pass
def mk_parses(listfile, corenlp_host):
"""Creates a list of FileParse objects for the files listed in the listfile
Args:
listfile: string, path to input listfile (see assignment description
for listfile details)
Returns:
list of FileParse objects
"""
# if not listfile.endswith('.listfile'):
# filetype = 'Co-Reference List file'
# error = 'has incorrect file type'
# raise FilenameException("Error: %s %s" % (filetype, error))
try:
with open(listfile) as f:
pserver = jsonrpc.ServerProxy(jsonrpc.JsonRpc20(),
jsonrpc.TransportTcpIp(
addr=(corenlp_host, 8080), limit=1000))
parses = dict([(get_id(path), FileParse(path, pserver))
for path in f.readlines()
if path.lstrip()[0] != '#'])
except IOError:
stderr.write(strerror(EIO)) # stderr.write does not have newlines
stderr.write("\nERROR: Could not open list file\n")
exit(EIO)
else:
return parses
def get_id(path):
"""Parses a file path for the filename without extension
Args:
path: string, full (or relative) file path for coreference file.
Must end in .crf
Returns:
string, file id (filename without extension)
>>> path = '/home/user/Desktop/full.crf'
>>> get_id(path)
'full'
>>> path = 'relative.crf'
>>> get_id(path)
'relative'
"""
fid, ext, _ = path.strip().split('/')[-1].partition('.crf')
if not fid or ext != '.crf':
filetype = 'Co-Reference Input file'
error = 'has incorrect file type'
raise FilenameException("Error: %s %s" % (filetype, error))
return fid
def mk_fparse(filename, pserver):
"""Parses input to get list of paragraphs with sentence structure
and a dictionary of noun phrases contained in the COREF tags
Args:
filename: string, path to crf file
pserver: jsonrpc.ServerProxy, stanford corenlp server for parsing
Returns:
tuple, (list_stanford_sent_parses, dict_file_corefs, dict_file_synsets)
"""
parses = []
try:
with open(filename) as f:
vprint('OPEN: %s' % filename)
xml = f.read()
except IOError:
print strerror(EIO)
print("ERROR: Could not open %s" % filename)
return (parses, get_tagged_corefs(''), get_synsets({}))
# remove unwanted characters from xml
vprint('\tPARSE: Parsing file: %s' % filename)
# parse_tries = 0
# while parse_tries < 5:
# try:
# t = loads(pserver.parse(_normalize_sentence(_remove_tags(xml))))
# parse_tries = 0
# break
# except jsonrpc.RPCTimeoutError:
# vprint('\tERROR: RPCTimeoutError - retrying')
# parse_tries += 3
# except jsonrpc.RPCTransportError:
# vprint('\tERROR: RPCTransportError - retrying')
# data = _normalize_sentence(_remove_tags(xml))
# sentences = [sent for part in data.split('\n\n')
# for sent in sent_tokenize(part)]
# try:
# xml1 = data[:data.find(sentences[len(sentences)/3])]
# xml2 = data[data.find(sentences[len(sentences)/3+1]):data.find(sentences[2*len(sentences)/3])]
# xml3 = data[data.find(sentences[2*len(sentences)/3+1]):]
# t1 = loads(pserver.parse(xml1))
# t2 = loads(pserver.parse(xml2))
# t3 = loads(pserver.parse(xml3))
# t = dict(t1.items() + t2.items() + t3.items())
# parse_tries = 0
# break
# except Exception:
# parse_tries = -1
# break
# parse_tries += 1
# if parse_tries != 0:
# vprint('\tFATAL: RPCTransportError - skipping')
sentences = [sent for part in xml.split('\n\n')
for sent in sent_tokenize(part)]
vprint('\tPARSE: Parsing sentences: %s' % filename)
for sent in sentences:
sent_corefs = get_tagged_corefs(sent, ordered=True)
# remove unwanted characters from xml
sent = _normalize_sentence(_remove_tags(sent))
parse_tries = 0
while parse_tries < 5:
try:
sparse = loads(pserver.parse(sent))
parse_tries = 0
break
except jsonrpc.RPCTransportError:
vprint('\tERROR: RPCTransportError - retrying')
parse_tries += 1
if parse_tries != 0:
vprint('\tFATAL: RPCTransportError - skipping')
pparse = _process_parse(sparse, sent_corefs)
if pparse:
parses.append(pparse)
pos_tags = {}
for parse in parses:
for word, attr in parse[1]:
tags = pos_tags.get(word, set())
tags.add(attr['PartOfSpeech'])
pos_tags[word] = tags
return parses, get_tagged_corefs(xml), get_synsets(pos_tags)
def tag_ptree(ptree, coreflist):
"""Tags given parse tree with coreferences
Args:
ptree: string, parenthesized str represenation of parse tree
coreflist: list of tuples, [('1', {'text': 'dog', 'ref': None})]
Returns:
string, tagged parse tree
>>> ptree = '(S NP( (NN He)) VP( (V ran)))'
>>> coreflist = [('1', {'text': 'He', 'ref': None})]
>>> tag_ptree(ptree, coreflist)
'(S NP( COREF_TAG_1( (NN He))) VP( (V ran)))'
"""
pattern = r"""(?P<lp>\(?\s*) # left parenthesis
(?P<tg>[a-zA-Z$]+)? # POS tag
(?P<data>\s*%s) # subtree of tag
(?P<rp>(?:\s*\))*) # right parenthesis
"""
for cid, coref in coreflist[::-1]:
words = ''.join(word_tokenize(coref['text']))
nltktree = Tree.parse(ptree)
nltktree.reverse() # perform search right to left
data = None
for subtree in nltktree.subtrees(): # BFS
if ''.join(subtree.leaves()) == words: # equal ignoring whitespace
data = subtree.pprint()
break
# If found via breadth-first search of parse tree
if data:
ptree = ptree.replace(data, '( COREF_TAG_%s%s)' % (cid, data))
else: # Try finding via regex matching instead
dpattern = r'\s*'.join([r'\(\s*[a-zA-Z$]+\s+%s\s*\)' % word
for word in word_tokenize(coref['text'])])
found = re.findall(pattern % dpattern, ptree, re.X)
if found:
repl = '%s%s ( COREF_TAG_%s%s) %s' % (found[0][0],
found[0][1],
cid,
found[0][2],
found[0][3])
ptree = re.sub(pattern % dpattern, repl, ptree, 1, re.X)
return ptree
def get_tagged_corefs(xml, ordered=False):
"""Parses xml to find all tagged coreferences contained in COREF tags
Args:
xml: string, xml markedup with COREF tags
ordered: if True, returns an list in the same order that the corefs
appear in the text
Returns:
if ordered
list of tuples, [(coref_id, {coref, ref_id}), ]
if not ordered
dict of dict, {coref_id: (coref, ref_id)
>>> text = "<TXT>John stubbed <COREF ID='1'>his</COREF> toe.</TXT>"
>>> get_tagged_corefs(text)
{u'1': {'text': u'his', 'ref': None}}
>>> get_tagged_corefs(text, ordered=True)
[(u'1', {'text': u'his', 'ref': None})]
>>> text = "<TXT><COREF ID='A'>John</COREF> stubbed " +\
"<COREF ID='1' REF='A'>his</COREF> toe.</TXT>"
>>> get_tagged_corefs(text)
{u'A': {'text': u'John', 'ref': None}, u'1': {'text': u'his', 'ref': u'A'}}
>>> get_tagged_corefs(text, ordered=True) # doctest: +NORMALIZE_WHITESPACE
[(u'A', {'text': u'John', 'ref': None}),
(u'1', {'text': u'his', 'ref': u'A'})]
"""
nps = {}
if ordered:
nps = []
xml = _normalize_malformed_xml(xml)
try:
corefs = parseString(xml).getElementsByTagName('COREF')
except ExpatError:
return nps
for coref in corefs:
try:
cid = coref.attributes['ID'].value
if ordered:
data = {}
for npid, np in nps:
if npid == cid:
data = np
break
else:
data = nps.get(cid, {})
except KeyError:
continue
try:
data['ref'] = coref.attributes['REF'].value
except KeyError:
data['ref'] = None
data['text'] = coref.firstChild.data
if ordered:
nps.append((cid, data))
else:
nps[cid] = data
return nps
def cid_less_than(cid1, cid2):
if cid1.isdigit() and cid2.isdigit():
return int(cid1) < int(cid2)
else:
return True
# elif not (cid1.isdigit() or cid2.isdigit()):
# num1 = int(cid1[:-1])
# num2 = int(cid2[:-1])
# if num1 == num2:
# return cid1[-1] < cid2[-1]
# else:
# return num1 < num2
# elif cid1.isdigit():
# return True
# else:
# return False
def _normalize_sentence(sent):
"""Removes unwanted characters from sentence for parsing
Args:
sent: string, sentence to normalize
Returns
string, normalized sentence | #sent = sent[sent.find('\n\n\n'):]
removed = r'[\n ]+'
sent = re.sub(removed, ' ', sent)
return sent.strip()
def _normalize_malformed_xml(xml):
"""Ensures that xml begins and ends with <TXT> </TXT> tags
Args:
xml: string, text to be formatted as xml
Returns:
string, formatted xml
>>> _normalize_malformed_xml('The dog.')
'<TXT>The dog.</TXT>'
>>> _normalize_malformed_xml('<TXT>The dog.')
'<TXT>The dog.</TXT>'
>>> _normalize_malformed_xml('The dog.</TXT>')
'<TXT>The dog.</TXT>'
>>> _normalize_malformed_xml('<TXT>The dog.</TXT>')
'<TXT>The dog.</TXT>'
"""
xml = xml.strip()
if not xml.startswith('<TXT>'):
xml = '<TXT>' + xml
if not xml.endswith('</TXT>'):
xml = xml + '</TXT>'
return xml
def _remove_tags(xml):
"""Removes xml tags from string, returning non-markedup text
Args:
xml: string, xml markedup text
Returns:
string, text from xml
>>> xml = "<TXT>John stubbed <COREF ID='1'>his</COREF> toe.</TXT>"
>>> _remove_tags(xml)
'John stubbed his toe.'
>>> xml = "<TXT><COREF ID='A'>John</COREF> stubbed " +\
"<COREF ID='1' REF='A'>his</COREF> toe.</TXT>"
>>> _remove_tags(xml)
'John stubbed his toe.'
"""
chars = list(xml)
i = 0
while i < len(chars):
if chars[i] == '<':
while chars[i] != '>':
chars.pop(i) # pop everything between brackets
chars.pop(i) # pops the right-angle bracket, too
else:
i += 1
return ''.join(chars)
def _process_parse(parse, coreflist):
"""Tags parse tree with corefs and returns the tree, lexicon, dependencies
and raw text as tuple
Args:
parse: list of stanford corenlp parsed sentences
coreflist: list of coreferences from tagged xml
Returns:
tuple, (ptree, lexicon, dependencies, rawtext) if parse contains a
sentence, else returns None
"""
sentence = parse.get('sentences')
if sentence:
ptree = Tree.parse(tag_ptree(sentence[0]['parsetree'], coreflist))
words = [(w[0], w[1]) for w in sentence[0]['words']]
depends = [(d[0], d[1], d[2]) for d in sentence[0]['dependencies']]
text = sentence[0]['text']
return ptree, words, depends, text
else:
return None
def get_synsets(words):
"""Returns sets of cognitive synonyms for each of the input words
Args:
words: dict, {word: (pos1, pos2, ...)}
Returns:
dict, {synset_name: (syn1, syn2, syn3, ...)}
>>> words = {u'apple': (u'NN')}
>>> get_synsets(words) # doctest: +NORMALIZE_WHITESPACE
{'apple.n.01': ('apple',),
'apple.n.02': ('apple', 'orchard_apple_tree', 'Malus_pumila')}
"""
synsets = {}
for word in words:
for syn in wn.synsets(word):
synsets[syn.name] = tuple([lemma.name for lemma in syn.lemmas])
return synsets
@static_var("id", '1A')
def _mk_coref_id():
"""Creates a unique coreference id tag
Note: only unique if input id's are not of the form num+alpha
Returns:
string, alphanumeric unique id
"""
num, alpha = int(_mk_coref_id.id[:-1]), _mk_coref_id.id[-1]
if alpha == 'Z':
alpha = 'A'
num += 1
else:
alpha = chr(ord(alpha) + 1)
_mk_coref_id.id = '%s%s' % (num, alpha)
return _mk_coref_id.id |
""" | random_line_split |
data.py | # -*- coding: utf-8 -*-
"""
coref.data
~~~~~~~~~~~~
File Processor
Parses 'Coreference Resolution Input File'
:copyright: (c) 2012 by Adam Walz, Charmaine Keck
:license:
"""
import re
from os import strerror
from sys import stderr
from time import sleep
from errno import EIO
from xml.dom.minidom import parseString
from xml.parsers.expat import ExpatError
import jsonrpc
from simplejson import loads
from nltk import sent_tokenize, word_tokenize
from nltk.tree import Tree
from nltk.corpus import wordnet as wn
from Levenshtein import ratio
from helpers import static_var, vprint
class FileParse():
def __init__(self, filename, pserver):
fparse = mk_fparse(filename.strip(), pserver)
self.parses = [Parse(p) for p in fparse[0]]
self.nps = fparse[1]
self.synsets = fparse[2]
class Parse():
def __init__(self, parse):
self.ptree = parse[0]
self.words = parse[1]
self.dependencies = parse[2]
self.text = parse[3]
class FilenameException(Exception):
"""Raised when file does not have the correct extension"""
pass
def mk_parses(listfile, corenlp_host):
"""Creates a list of FileParse objects for the files listed in the listfile
Args:
listfile: string, path to input listfile (see assignment description
for listfile details)
Returns:
list of FileParse objects
"""
# if not listfile.endswith('.listfile'):
# filetype = 'Co-Reference List file'
# error = 'has incorrect file type'
# raise FilenameException("Error: %s %s" % (filetype, error))
try:
with open(listfile) as f:
pserver = jsonrpc.ServerProxy(jsonrpc.JsonRpc20(),
jsonrpc.TransportTcpIp(
addr=(corenlp_host, 8080), limit=1000))
parses = dict([(get_id(path), FileParse(path, pserver))
for path in f.readlines()
if path.lstrip()[0] != '#'])
except IOError:
stderr.write(strerror(EIO)) # stderr.write does not have newlines
stderr.write("\nERROR: Could not open list file\n")
exit(EIO)
else:
|
def get_id(path):
"""Parses a file path for the filename without extension
Args:
path: string, full (or relative) file path for coreference file.
Must end in .crf
Returns:
string, file id (filename without extension)
>>> path = '/home/user/Desktop/full.crf'
>>> get_id(path)
'full'
>>> path = 'relative.crf'
>>> get_id(path)
'relative'
"""
fid, ext, _ = path.strip().split('/')[-1].partition('.crf')
if not fid or ext != '.crf':
filetype = 'Co-Reference Input file'
error = 'has incorrect file type'
raise FilenameException("Error: %s %s" % (filetype, error))
return fid
def mk_fparse(filename, pserver):
"""Parses input to get list of paragraphs with sentence structure
and a dictionary of noun phrases contained in the COREF tags
Args:
filename: string, path to crf file
pserver: jsonrpc.ServerProxy, stanford corenlp server for parsing
Returns:
tuple, (list_stanford_sent_parses, dict_file_corefs, dict_file_synsets)
"""
parses = []
try:
with open(filename) as f:
vprint('OPEN: %s' % filename)
xml = f.read()
except IOError:
print strerror(EIO)
print("ERROR: Could not open %s" % filename)
return (parses, get_tagged_corefs(''), get_synsets({}))
# remove unwanted characters from xml
vprint('\tPARSE: Parsing file: %s' % filename)
# parse_tries = 0
# while parse_tries < 5:
# try:
# t = loads(pserver.parse(_normalize_sentence(_remove_tags(xml))))
# parse_tries = 0
# break
# except jsonrpc.RPCTimeoutError:
# vprint('\tERROR: RPCTimeoutError - retrying')
# parse_tries += 3
# except jsonrpc.RPCTransportError:
# vprint('\tERROR: RPCTransportError - retrying')
# data = _normalize_sentence(_remove_tags(xml))
# sentences = [sent for part in data.split('\n\n')
# for sent in sent_tokenize(part)]
# try:
# xml1 = data[:data.find(sentences[len(sentences)/3])]
# xml2 = data[data.find(sentences[len(sentences)/3+1]):data.find(sentences[2*len(sentences)/3])]
# xml3 = data[data.find(sentences[2*len(sentences)/3+1]):]
# t1 = loads(pserver.parse(xml1))
# t2 = loads(pserver.parse(xml2))
# t3 = loads(pserver.parse(xml3))
# t = dict(t1.items() + t2.items() + t3.items())
# parse_tries = 0
# break
# except Exception:
# parse_tries = -1
# break
# parse_tries += 1
# if parse_tries != 0:
# vprint('\tFATAL: RPCTransportError - skipping')
sentences = [sent for part in xml.split('\n\n')
for sent in sent_tokenize(part)]
vprint('\tPARSE: Parsing sentences: %s' % filename)
for sent in sentences:
sent_corefs = get_tagged_corefs(sent, ordered=True)
# remove unwanted characters from xml
sent = _normalize_sentence(_remove_tags(sent))
parse_tries = 0
while parse_tries < 5:
try:
sparse = loads(pserver.parse(sent))
parse_tries = 0
break
except jsonrpc.RPCTransportError:
vprint('\tERROR: RPCTransportError - retrying')
parse_tries += 1
if parse_tries != 0:
vprint('\tFATAL: RPCTransportError - skipping')
pparse = _process_parse(sparse, sent_corefs)
if pparse:
parses.append(pparse)
pos_tags = {}
for parse in parses:
for word, attr in parse[1]:
tags = pos_tags.get(word, set())
tags.add(attr['PartOfSpeech'])
pos_tags[word] = tags
return parses, get_tagged_corefs(xml), get_synsets(pos_tags)
def tag_ptree(ptree, coreflist):
"""Tags given parse tree with coreferences
Args:
ptree: string, parenthesized str represenation of parse tree
coreflist: list of tuples, [('1', {'text': 'dog', 'ref': None})]
Returns:
string, tagged parse tree
>>> ptree = '(S NP( (NN He)) VP( (V ran)))'
>>> coreflist = [('1', {'text': 'He', 'ref': None})]
>>> tag_ptree(ptree, coreflist)
'(S NP( COREF_TAG_1( (NN He))) VP( (V ran)))'
"""
pattern = r"""(?P<lp>\(?\s*) # left parenthesis
(?P<tg>[a-zA-Z$]+)? # POS tag
(?P<data>\s*%s) # subtree of tag
(?P<rp>(?:\s*\))*) # right parenthesis
"""
for cid, coref in coreflist[::-1]:
words = ''.join(word_tokenize(coref['text']))
nltktree = Tree.parse(ptree)
nltktree.reverse() # perform search right to left
data = None
for subtree in nltktree.subtrees(): # BFS
if ''.join(subtree.leaves()) == words: # equal ignoring whitespace
data = subtree.pprint()
break
# If found via breadth-first search of parse tree
if data:
ptree = ptree.replace(data, '( COREF_TAG_%s%s)' % (cid, data))
else: # Try finding via regex matching instead
dpattern = r'\s*'.join([r'\(\s*[a-zA-Z$]+\s+%s\s*\)' % word
for word in word_tokenize(coref['text'])])
found = re.findall(pattern % dpattern, ptree, re.X)
if found:
repl = '%s%s ( COREF_TAG_%s%s) %s' % (found[0][0],
found[0][1],
cid,
found[0][2],
found[0][3])
ptree = re.sub(pattern % dpattern, repl, ptree, 1, re.X)
return ptree
def get_tagged_corefs(xml, ordered=False):
"""Parses xml to find all tagged coreferences contained in COREF tags
Args:
xml: string, xml markedup with COREF tags
ordered: if True, returns an list in the same order that the corefs
appear in the text
Returns:
if ordered
list of tuples, [(coref_id, {coref, ref_id}), ]
if not ordered
dict of dict, {coref_id: (coref, ref_id)
>>> text = "<TXT>John stubbed <COREF ID='1'>his</COREF> toe.</TXT>"
>>> get_tagged_corefs(text)
{u'1': {'text': u'his', 'ref': None}}
>>> get_tagged_corefs(text, ordered=True)
[(u'1', {'text': u'his', 'ref': None})]
>>> text = "<TXT><COREF ID='A'>John</COREF> stubbed " +\
"<COREF ID='1' REF='A'>his</COREF> toe.</TXT>"
>>> get_tagged_corefs(text)
{u'A': {'text': u'John', 'ref': None}, u'1': {'text': u'his', 'ref': u'A'}}
>>> get_tagged_corefs(text, ordered=True) # doctest: +NORMALIZE_WHITESPACE
[(u'A', {'text': u'John', 'ref': None}),
(u'1', {'text': u'his', 'ref': u'A'})]
"""
nps = {}
if ordered:
nps = []
xml = _normalize_malformed_xml(xml)
try:
corefs = parseString(xml).getElementsByTagName('COREF')
except ExpatError:
return nps
for coref in corefs:
try:
cid = coref.attributes['ID'].value
if ordered:
data = {}
for npid, np in nps:
if npid == cid:
data = np
break
else:
data = nps.get(cid, {})
except KeyError:
continue
try:
data['ref'] = coref.attributes['REF'].value
except KeyError:
data['ref'] = None
data['text'] = coref.firstChild.data
if ordered:
nps.append((cid, data))
else:
nps[cid] = data
return nps
def cid_less_than(cid1, cid2):
if cid1.isdigit() and cid2.isdigit():
return int(cid1) < int(cid2)
else:
return True
# elif not (cid1.isdigit() or cid2.isdigit()):
# num1 = int(cid1[:-1])
# num2 = int(cid2[:-1])
# if num1 == num2:
# return cid1[-1] < cid2[-1]
# else:
# return num1 < num2
# elif cid1.isdigit():
# return True
# else:
# return False
def _normalize_sentence(sent):
"""Removes unwanted characters from sentence for parsing
Args:
sent: string, sentence to normalize
Returns
string, normalized sentence
"""
#sent = sent[sent.find('\n\n\n'):]
removed = r'[\n ]+'
sent = re.sub(removed, ' ', sent)
return sent.strip()
def _normalize_malformed_xml(xml):
"""Ensures that xml begins and ends with <TXT> </TXT> tags
Args:
xml: string, text to be formatted as xml
Returns:
string, formatted xml
>>> _normalize_malformed_xml('The dog.')
'<TXT>The dog.</TXT>'
>>> _normalize_malformed_xml('<TXT>The dog.')
'<TXT>The dog.</TXT>'
>>> _normalize_malformed_xml('The dog.</TXT>')
'<TXT>The dog.</TXT>'
>>> _normalize_malformed_xml('<TXT>The dog.</TXT>')
'<TXT>The dog.</TXT>'
"""
xml = xml.strip()
if not xml.startswith('<TXT>'):
xml = '<TXT>' + xml
if not xml.endswith('</TXT>'):
xml = xml + '</TXT>'
return xml
def _remove_tags(xml):
"""Removes xml tags from string, returning non-markedup text
Args:
xml: string, xml markedup text
Returns:
string, text from xml
>>> xml = "<TXT>John stubbed <COREF ID='1'>his</COREF> toe.</TXT>"
>>> _remove_tags(xml)
'John stubbed his toe.'
>>> xml = "<TXT><COREF ID='A'>John</COREF> stubbed " +\
"<COREF ID='1' REF='A'>his</COREF> toe.</TXT>"
>>> _remove_tags(xml)
'John stubbed his toe.'
"""
chars = list(xml)
i = 0
while i < len(chars):
if chars[i] == '<':
while chars[i] != '>':
chars.pop(i) # pop everything between brackets
chars.pop(i) # pops the right-angle bracket, too
else:
i += 1
return ''.join(chars)
def _process_parse(parse, coreflist):
"""Tags parse tree with corefs and returns the tree, lexicon, dependencies
and raw text as tuple
Args:
parse: list of stanford corenlp parsed sentences
coreflist: list of coreferences from tagged xml
Returns:
tuple, (ptree, lexicon, dependencies, rawtext) if parse contains a
sentence, else returns None
"""
sentence = parse.get('sentences')
if sentence:
ptree = Tree.parse(tag_ptree(sentence[0]['parsetree'], coreflist))
words = [(w[0], w[1]) for w in sentence[0]['words']]
depends = [(d[0], d[1], d[2]) for d in sentence[0]['dependencies']]
text = sentence[0]['text']
return ptree, words, depends, text
else:
return None
def get_synsets(words):
"""Returns sets of cognitive synonyms for each of the input words
Args:
words: dict, {word: (pos1, pos2, ...)}
Returns:
dict, {synset_name: (syn1, syn2, syn3, ...)}
>>> words = {u'apple': (u'NN')}
>>> get_synsets(words) # doctest: +NORMALIZE_WHITESPACE
{'apple.n.01': ('apple',),
'apple.n.02': ('apple', 'orchard_apple_tree', 'Malus_pumila')}
"""
synsets = {}
for word in words:
for syn in wn.synsets(word):
synsets[syn.name] = tuple([lemma.name for lemma in syn.lemmas])
return synsets
@static_var("id", '1A')
def _mk_coref_id():
"""Creates a unique coreference id tag
Note: only unique if input id's are not of the form num+alpha
Returns:
string, alphanumeric unique id
"""
num, alpha = int(_mk_coref_id.id[:-1]), _mk_coref_id.id[-1]
if alpha == 'Z':
alpha = 'A'
num += 1
else:
alpha = chr(ord(alpha) + 1)
_mk_coref_id.id = '%s%s' % (num, alpha)
return _mk_coref_id.id
| return parses | conditional_block |
data.py | # -*- coding: utf-8 -*-
"""
coref.data
~~~~~~~~~~~~
File Processor
Parses 'Coreference Resolution Input File'
:copyright: (c) 2012 by Adam Walz, Charmaine Keck
:license:
"""
import re
from os import strerror
from sys import stderr
from time import sleep
from errno import EIO
from xml.dom.minidom import parseString
from xml.parsers.expat import ExpatError
import jsonrpc
from simplejson import loads
from nltk import sent_tokenize, word_tokenize
from nltk.tree import Tree
from nltk.corpus import wordnet as wn
from Levenshtein import ratio
from helpers import static_var, vprint
class FileParse():
def __init__(self, filename, pserver):
fparse = mk_fparse(filename.strip(), pserver)
self.parses = [Parse(p) for p in fparse[0]]
self.nps = fparse[1]
self.synsets = fparse[2]
class Parse():
def __init__(self, parse):
self.ptree = parse[0]
self.words = parse[1]
self.dependencies = parse[2]
self.text = parse[3]
class FilenameException(Exception):
"""Raised when file does not have the correct extension"""
pass
def mk_parses(listfile, corenlp_host):
"""Creates a list of FileParse objects for the files listed in the listfile
Args:
listfile: string, path to input listfile (see assignment description
for listfile details)
Returns:
list of FileParse objects
"""
# if not listfile.endswith('.listfile'):
# filetype = 'Co-Reference List file'
# error = 'has incorrect file type'
# raise FilenameException("Error: %s %s" % (filetype, error))
try:
with open(listfile) as f:
pserver = jsonrpc.ServerProxy(jsonrpc.JsonRpc20(),
jsonrpc.TransportTcpIp(
addr=(corenlp_host, 8080), limit=1000))
parses = dict([(get_id(path), FileParse(path, pserver))
for path in f.readlines()
if path.lstrip()[0] != '#'])
except IOError:
stderr.write(strerror(EIO)) # stderr.write does not have newlines
stderr.write("\nERROR: Could not open list file\n")
exit(EIO)
else:
return parses
def get_id(path):
"""Parses a file path for the filename without extension
Args:
path: string, full (or relative) file path for coreference file.
Must end in .crf
Returns:
string, file id (filename without extension)
>>> path = '/home/user/Desktop/full.crf'
>>> get_id(path)
'full'
>>> path = 'relative.crf'
>>> get_id(path)
'relative'
"""
fid, ext, _ = path.strip().split('/')[-1].partition('.crf')
if not fid or ext != '.crf':
filetype = 'Co-Reference Input file'
error = 'has incorrect file type'
raise FilenameException("Error: %s %s" % (filetype, error))
return fid
def mk_fparse(filename, pserver):
"""Parses input to get list of paragraphs with sentence structure
and a dictionary of noun phrases contained in the COREF tags
Args:
filename: string, path to crf file
pserver: jsonrpc.ServerProxy, stanford corenlp server for parsing
Returns:
tuple, (list_stanford_sent_parses, dict_file_corefs, dict_file_synsets)
"""
parses = []
try:
with open(filename) as f:
vprint('OPEN: %s' % filename)
xml = f.read()
except IOError:
print strerror(EIO)
print("ERROR: Could not open %s" % filename)
return (parses, get_tagged_corefs(''), get_synsets({}))
# remove unwanted characters from xml
vprint('\tPARSE: Parsing file: %s' % filename)
# parse_tries = 0
# while parse_tries < 5:
# try:
# t = loads(pserver.parse(_normalize_sentence(_remove_tags(xml))))
# parse_tries = 0
# break
# except jsonrpc.RPCTimeoutError:
# vprint('\tERROR: RPCTimeoutError - retrying')
# parse_tries += 3
# except jsonrpc.RPCTransportError:
# vprint('\tERROR: RPCTransportError - retrying')
# data = _normalize_sentence(_remove_tags(xml))
# sentences = [sent for part in data.split('\n\n')
# for sent in sent_tokenize(part)]
# try:
# xml1 = data[:data.find(sentences[len(sentences)/3])]
# xml2 = data[data.find(sentences[len(sentences)/3+1]):data.find(sentences[2*len(sentences)/3])]
# xml3 = data[data.find(sentences[2*len(sentences)/3+1]):]
# t1 = loads(pserver.parse(xml1))
# t2 = loads(pserver.parse(xml2))
# t3 = loads(pserver.parse(xml3))
# t = dict(t1.items() + t2.items() + t3.items())
# parse_tries = 0
# break
# except Exception:
# parse_tries = -1
# break
# parse_tries += 1
# if parse_tries != 0:
# vprint('\tFATAL: RPCTransportError - skipping')
sentences = [sent for part in xml.split('\n\n')
for sent in sent_tokenize(part)]
vprint('\tPARSE: Parsing sentences: %s' % filename)
for sent in sentences:
sent_corefs = get_tagged_corefs(sent, ordered=True)
# remove unwanted characters from xml
sent = _normalize_sentence(_remove_tags(sent))
parse_tries = 0
while parse_tries < 5:
try:
sparse = loads(pserver.parse(sent))
parse_tries = 0
break
except jsonrpc.RPCTransportError:
vprint('\tERROR: RPCTransportError - retrying')
parse_tries += 1
if parse_tries != 0:
vprint('\tFATAL: RPCTransportError - skipping')
pparse = _process_parse(sparse, sent_corefs)
if pparse:
parses.append(pparse)
pos_tags = {}
for parse in parses:
for word, attr in parse[1]:
tags = pos_tags.get(word, set())
tags.add(attr['PartOfSpeech'])
pos_tags[word] = tags
return parses, get_tagged_corefs(xml), get_synsets(pos_tags)
def tag_ptree(ptree, coreflist):
"""Tags given parse tree with coreferences
Args:
ptree: string, parenthesized str represenation of parse tree
coreflist: list of tuples, [('1', {'text': 'dog', 'ref': None})]
Returns:
string, tagged parse tree
>>> ptree = '(S NP( (NN He)) VP( (V ran)))'
>>> coreflist = [('1', {'text': 'He', 'ref': None})]
>>> tag_ptree(ptree, coreflist)
'(S NP( COREF_TAG_1( (NN He))) VP( (V ran)))'
"""
pattern = r"""(?P<lp>\(?\s*) # left parenthesis
(?P<tg>[a-zA-Z$]+)? # POS tag
(?P<data>\s*%s) # subtree of tag
(?P<rp>(?:\s*\))*) # right parenthesis
"""
for cid, coref in coreflist[::-1]:
words = ''.join(word_tokenize(coref['text']))
nltktree = Tree.parse(ptree)
nltktree.reverse() # perform search right to left
data = None
for subtree in nltktree.subtrees(): # BFS
if ''.join(subtree.leaves()) == words: # equal ignoring whitespace
data = subtree.pprint()
break
# If found via breadth-first search of parse tree
if data:
ptree = ptree.replace(data, '( COREF_TAG_%s%s)' % (cid, data))
else: # Try finding via regex matching instead
dpattern = r'\s*'.join([r'\(\s*[a-zA-Z$]+\s+%s\s*\)' % word
for word in word_tokenize(coref['text'])])
found = re.findall(pattern % dpattern, ptree, re.X)
if found:
repl = '%s%s ( COREF_TAG_%s%s) %s' % (found[0][0],
found[0][1],
cid,
found[0][2],
found[0][3])
ptree = re.sub(pattern % dpattern, repl, ptree, 1, re.X)
return ptree
def get_tagged_corefs(xml, ordered=False):
"""Parses xml to find all tagged coreferences contained in COREF tags
Args:
xml: string, xml markedup with COREF tags
ordered: if True, returns an list in the same order that the corefs
appear in the text
Returns:
if ordered
list of tuples, [(coref_id, {coref, ref_id}), ]
if not ordered
dict of dict, {coref_id: (coref, ref_id)
>>> text = "<TXT>John stubbed <COREF ID='1'>his</COREF> toe.</TXT>"
>>> get_tagged_corefs(text)
{u'1': {'text': u'his', 'ref': None}}
>>> get_tagged_corefs(text, ordered=True)
[(u'1', {'text': u'his', 'ref': None})]
>>> text = "<TXT><COREF ID='A'>John</COREF> stubbed " +\
"<COREF ID='1' REF='A'>his</COREF> toe.</TXT>"
>>> get_tagged_corefs(text)
{u'A': {'text': u'John', 'ref': None}, u'1': {'text': u'his', 'ref': u'A'}}
>>> get_tagged_corefs(text, ordered=True) # doctest: +NORMALIZE_WHITESPACE
[(u'A', {'text': u'John', 'ref': None}),
(u'1', {'text': u'his', 'ref': u'A'})]
"""
nps = {}
if ordered:
nps = []
xml = _normalize_malformed_xml(xml)
try:
corefs = parseString(xml).getElementsByTagName('COREF')
except ExpatError:
return nps
for coref in corefs:
try:
cid = coref.attributes['ID'].value
if ordered:
data = {}
for npid, np in nps:
if npid == cid:
data = np
break
else:
data = nps.get(cid, {})
except KeyError:
continue
try:
data['ref'] = coref.attributes['REF'].value
except KeyError:
data['ref'] = None
data['text'] = coref.firstChild.data
if ordered:
nps.append((cid, data))
else:
nps[cid] = data
return nps
def cid_less_than(cid1, cid2):
if cid1.isdigit() and cid2.isdigit():
return int(cid1) < int(cid2)
else:
return True
# elif not (cid1.isdigit() or cid2.isdigit()):
# num1 = int(cid1[:-1])
# num2 = int(cid2[:-1])
# if num1 == num2:
# return cid1[-1] < cid2[-1]
# else:
# return num1 < num2
# elif cid1.isdigit():
# return True
# else:
# return False
def _normalize_sentence(sent):
"""Removes unwanted characters from sentence for parsing
Args:
sent: string, sentence to normalize
Returns
string, normalized sentence
"""
#sent = sent[sent.find('\n\n\n'):]
removed = r'[\n ]+'
sent = re.sub(removed, ' ', sent)
return sent.strip()
def _normalize_malformed_xml(xml):
"""Ensures that xml begins and ends with <TXT> </TXT> tags
Args:
xml: string, text to be formatted as xml
Returns:
string, formatted xml
>>> _normalize_malformed_xml('The dog.')
'<TXT>The dog.</TXT>'
>>> _normalize_malformed_xml('<TXT>The dog.')
'<TXT>The dog.</TXT>'
>>> _normalize_malformed_xml('The dog.</TXT>')
'<TXT>The dog.</TXT>'
>>> _normalize_malformed_xml('<TXT>The dog.</TXT>')
'<TXT>The dog.</TXT>'
"""
xml = xml.strip()
if not xml.startswith('<TXT>'):
xml = '<TXT>' + xml
if not xml.endswith('</TXT>'):
xml = xml + '</TXT>'
return xml
def _remove_tags(xml):
"""Removes xml tags from string, returning non-markedup text
Args:
xml: string, xml markedup text
Returns:
string, text from xml
>>> xml = "<TXT>John stubbed <COREF ID='1'>his</COREF> toe.</TXT>"
>>> _remove_tags(xml)
'John stubbed his toe.'
>>> xml = "<TXT><COREF ID='A'>John</COREF> stubbed " +\
"<COREF ID='1' REF='A'>his</COREF> toe.</TXT>"
>>> _remove_tags(xml)
'John stubbed his toe.'
"""
chars = list(xml)
i = 0
while i < len(chars):
if chars[i] == '<':
while chars[i] != '>':
chars.pop(i) # pop everything between brackets
chars.pop(i) # pops the right-angle bracket, too
else:
i += 1
return ''.join(chars)
def _process_parse(parse, coreflist):
"""Tags parse tree with corefs and returns the tree, lexicon, dependencies
and raw text as tuple
Args:
parse: list of stanford corenlp parsed sentences
coreflist: list of coreferences from tagged xml
Returns:
tuple, (ptree, lexicon, dependencies, rawtext) if parse contains a
sentence, else returns None
"""
sentence = parse.get('sentences')
if sentence:
ptree = Tree.parse(tag_ptree(sentence[0]['parsetree'], coreflist))
words = [(w[0], w[1]) for w in sentence[0]['words']]
depends = [(d[0], d[1], d[2]) for d in sentence[0]['dependencies']]
text = sentence[0]['text']
return ptree, words, depends, text
else:
return None
def get_synsets(words):
"""Returns sets of cognitive synonyms for each of the input words
Args:
words: dict, {word: (pos1, pos2, ...)}
Returns:
dict, {synset_name: (syn1, syn2, syn3, ...)}
>>> words = {u'apple': (u'NN')}
>>> get_synsets(words) # doctest: +NORMALIZE_WHITESPACE
{'apple.n.01': ('apple',),
'apple.n.02': ('apple', 'orchard_apple_tree', 'Malus_pumila')}
"""
synsets = {}
for word in words:
for syn in wn.synsets(word):
synsets[syn.name] = tuple([lemma.name for lemma in syn.lemmas])
return synsets
@static_var("id", '1A')
def | ():
"""Creates a unique coreference id tag
Note: only unique if input id's are not of the form num+alpha
Returns:
string, alphanumeric unique id
"""
num, alpha = int(_mk_coref_id.id[:-1]), _mk_coref_id.id[-1]
if alpha == 'Z':
alpha = 'A'
num += 1
else:
alpha = chr(ord(alpha) + 1)
_mk_coref_id.id = '%s%s' % (num, alpha)
return _mk_coref_id.id
| _mk_coref_id | identifier_name |
data.py | # -*- coding: utf-8 -*-
"""
coref.data
~~~~~~~~~~~~
File Processor
Parses 'Coreference Resolution Input File'
:copyright: (c) 2012 by Adam Walz, Charmaine Keck
:license:
"""
import re
from os import strerror
from sys import stderr
from time import sleep
from errno import EIO
from xml.dom.minidom import parseString
from xml.parsers.expat import ExpatError
import jsonrpc
from simplejson import loads
from nltk import sent_tokenize, word_tokenize
from nltk.tree import Tree
from nltk.corpus import wordnet as wn
from Levenshtein import ratio
from helpers import static_var, vprint
class FileParse():
def __init__(self, filename, pserver):
fparse = mk_fparse(filename.strip(), pserver)
self.parses = [Parse(p) for p in fparse[0]]
self.nps = fparse[1]
self.synsets = fparse[2]
class Parse():
def __init__(self, parse):
|
class FilenameException(Exception):
"""Raised when file does not have the correct extension"""
pass
def mk_parses(listfile, corenlp_host):
"""Creates a list of FileParse objects for the files listed in the listfile
Args:
listfile: string, path to input listfile (see assignment description
for listfile details)
Returns:
list of FileParse objects
"""
# if not listfile.endswith('.listfile'):
# filetype = 'Co-Reference List file'
# error = 'has incorrect file type'
# raise FilenameException("Error: %s %s" % (filetype, error))
try:
with open(listfile) as f:
pserver = jsonrpc.ServerProxy(jsonrpc.JsonRpc20(),
jsonrpc.TransportTcpIp(
addr=(corenlp_host, 8080), limit=1000))
parses = dict([(get_id(path), FileParse(path, pserver))
for path in f.readlines()
if path.lstrip()[0] != '#'])
except IOError:
stderr.write(strerror(EIO)) # stderr.write does not have newlines
stderr.write("\nERROR: Could not open list file\n")
exit(EIO)
else:
return parses
def get_id(path):
"""Parses a file path for the filename without extension
Args:
path: string, full (or relative) file path for coreference file.
Must end in .crf
Returns:
string, file id (filename without extension)
>>> path = '/home/user/Desktop/full.crf'
>>> get_id(path)
'full'
>>> path = 'relative.crf'
>>> get_id(path)
'relative'
"""
fid, ext, _ = path.strip().split('/')[-1].partition('.crf')
if not fid or ext != '.crf':
filetype = 'Co-Reference Input file'
error = 'has incorrect file type'
raise FilenameException("Error: %s %s" % (filetype, error))
return fid
def mk_fparse(filename, pserver):
"""Parses input to get list of paragraphs with sentence structure
and a dictionary of noun phrases contained in the COREF tags
Args:
filename: string, path to crf file
pserver: jsonrpc.ServerProxy, stanford corenlp server for parsing
Returns:
tuple, (list_stanford_sent_parses, dict_file_corefs, dict_file_synsets)
"""
parses = []
try:
with open(filename) as f:
vprint('OPEN: %s' % filename)
xml = f.read()
except IOError:
print strerror(EIO)
print("ERROR: Could not open %s" % filename)
return (parses, get_tagged_corefs(''), get_synsets({}))
# remove unwanted characters from xml
vprint('\tPARSE: Parsing file: %s' % filename)
# parse_tries = 0
# while parse_tries < 5:
# try:
# t = loads(pserver.parse(_normalize_sentence(_remove_tags(xml))))
# parse_tries = 0
# break
# except jsonrpc.RPCTimeoutError:
# vprint('\tERROR: RPCTimeoutError - retrying')
# parse_tries += 3
# except jsonrpc.RPCTransportError:
# vprint('\tERROR: RPCTransportError - retrying')
# data = _normalize_sentence(_remove_tags(xml))
# sentences = [sent for part in data.split('\n\n')
# for sent in sent_tokenize(part)]
# try:
# xml1 = data[:data.find(sentences[len(sentences)/3])]
# xml2 = data[data.find(sentences[len(sentences)/3+1]):data.find(sentences[2*len(sentences)/3])]
# xml3 = data[data.find(sentences[2*len(sentences)/3+1]):]
# t1 = loads(pserver.parse(xml1))
# t2 = loads(pserver.parse(xml2))
# t3 = loads(pserver.parse(xml3))
# t = dict(t1.items() + t2.items() + t3.items())
# parse_tries = 0
# break
# except Exception:
# parse_tries = -1
# break
# parse_tries += 1
# if parse_tries != 0:
# vprint('\tFATAL: RPCTransportError - skipping')
sentences = [sent for part in xml.split('\n\n')
for sent in sent_tokenize(part)]
vprint('\tPARSE: Parsing sentences: %s' % filename)
for sent in sentences:
sent_corefs = get_tagged_corefs(sent, ordered=True)
# remove unwanted characters from xml
sent = _normalize_sentence(_remove_tags(sent))
parse_tries = 0
while parse_tries < 5:
try:
sparse = loads(pserver.parse(sent))
parse_tries = 0
break
except jsonrpc.RPCTransportError:
vprint('\tERROR: RPCTransportError - retrying')
parse_tries += 1
if parse_tries != 0:
vprint('\tFATAL: RPCTransportError - skipping')
pparse = _process_parse(sparse, sent_corefs)
if pparse:
parses.append(pparse)
pos_tags = {}
for parse in parses:
for word, attr in parse[1]:
tags = pos_tags.get(word, set())
tags.add(attr['PartOfSpeech'])
pos_tags[word] = tags
return parses, get_tagged_corefs(xml), get_synsets(pos_tags)
def tag_ptree(ptree, coreflist):
"""Tags given parse tree with coreferences
Args:
ptree: string, parenthesized str represenation of parse tree
coreflist: list of tuples, [('1', {'text': 'dog', 'ref': None})]
Returns:
string, tagged parse tree
>>> ptree = '(S NP( (NN He)) VP( (V ran)))'
>>> coreflist = [('1', {'text': 'He', 'ref': None})]
>>> tag_ptree(ptree, coreflist)
'(S NP( COREF_TAG_1( (NN He))) VP( (V ran)))'
"""
pattern = r"""(?P<lp>\(?\s*) # left parenthesis
(?P<tg>[a-zA-Z$]+)? # POS tag
(?P<data>\s*%s) # subtree of tag
(?P<rp>(?:\s*\))*) # right parenthesis
"""
for cid, coref in coreflist[::-1]:
words = ''.join(word_tokenize(coref['text']))
nltktree = Tree.parse(ptree)
nltktree.reverse() # perform search right to left
data = None
for subtree in nltktree.subtrees(): # BFS
if ''.join(subtree.leaves()) == words: # equal ignoring whitespace
data = subtree.pprint()
break
# If found via breadth-first search of parse tree
if data:
ptree = ptree.replace(data, '( COREF_TAG_%s%s)' % (cid, data))
else: # Try finding via regex matching instead
dpattern = r'\s*'.join([r'\(\s*[a-zA-Z$]+\s+%s\s*\)' % word
for word in word_tokenize(coref['text'])])
found = re.findall(pattern % dpattern, ptree, re.X)
if found:
repl = '%s%s ( COREF_TAG_%s%s) %s' % (found[0][0],
found[0][1],
cid,
found[0][2],
found[0][3])
ptree = re.sub(pattern % dpattern, repl, ptree, 1, re.X)
return ptree
def get_tagged_corefs(xml, ordered=False):
"""Parses xml to find all tagged coreferences contained in COREF tags
Args:
xml: string, xml markedup with COREF tags
ordered: if True, returns an list in the same order that the corefs
appear in the text
Returns:
if ordered
list of tuples, [(coref_id, {coref, ref_id}), ]
if not ordered
dict of dict, {coref_id: (coref, ref_id)
>>> text = "<TXT>John stubbed <COREF ID='1'>his</COREF> toe.</TXT>"
>>> get_tagged_corefs(text)
{u'1': {'text': u'his', 'ref': None}}
>>> get_tagged_corefs(text, ordered=True)
[(u'1', {'text': u'his', 'ref': None})]
>>> text = "<TXT><COREF ID='A'>John</COREF> stubbed " +\
"<COREF ID='1' REF='A'>his</COREF> toe.</TXT>"
>>> get_tagged_corefs(text)
{u'A': {'text': u'John', 'ref': None}, u'1': {'text': u'his', 'ref': u'A'}}
>>> get_tagged_corefs(text, ordered=True) # doctest: +NORMALIZE_WHITESPACE
[(u'A', {'text': u'John', 'ref': None}),
(u'1', {'text': u'his', 'ref': u'A'})]
"""
nps = {}
if ordered:
nps = []
xml = _normalize_malformed_xml(xml)
try:
corefs = parseString(xml).getElementsByTagName('COREF')
except ExpatError:
return nps
for coref in corefs:
try:
cid = coref.attributes['ID'].value
if ordered:
data = {}
for npid, np in nps:
if npid == cid:
data = np
break
else:
data = nps.get(cid, {})
except KeyError:
continue
try:
data['ref'] = coref.attributes['REF'].value
except KeyError:
data['ref'] = None
data['text'] = coref.firstChild.data
if ordered:
nps.append((cid, data))
else:
nps[cid] = data
return nps
def cid_less_than(cid1, cid2):
if cid1.isdigit() and cid2.isdigit():
return int(cid1) < int(cid2)
else:
return True
# elif not (cid1.isdigit() or cid2.isdigit()):
# num1 = int(cid1[:-1])
# num2 = int(cid2[:-1])
# if num1 == num2:
# return cid1[-1] < cid2[-1]
# else:
# return num1 < num2
# elif cid1.isdigit():
# return True
# else:
# return False
def _normalize_sentence(sent):
"""Removes unwanted characters from sentence for parsing
Args:
sent: string, sentence to normalize
Returns
string, normalized sentence
"""
#sent = sent[sent.find('\n\n\n'):]
removed = r'[\n ]+'
sent = re.sub(removed, ' ', sent)
return sent.strip()
def _normalize_malformed_xml(xml):
"""Ensures that xml begins and ends with <TXT> </TXT> tags
Args:
xml: string, text to be formatted as xml
Returns:
string, formatted xml
>>> _normalize_malformed_xml('The dog.')
'<TXT>The dog.</TXT>'
>>> _normalize_malformed_xml('<TXT>The dog.')
'<TXT>The dog.</TXT>'
>>> _normalize_malformed_xml('The dog.</TXT>')
'<TXT>The dog.</TXT>'
>>> _normalize_malformed_xml('<TXT>The dog.</TXT>')
'<TXT>The dog.</TXT>'
"""
xml = xml.strip()
if not xml.startswith('<TXT>'):
xml = '<TXT>' + xml
if not xml.endswith('</TXT>'):
xml = xml + '</TXT>'
return xml
def _remove_tags(xml):
"""Removes xml tags from string, returning non-markedup text
Args:
xml: string, xml markedup text
Returns:
string, text from xml
>>> xml = "<TXT>John stubbed <COREF ID='1'>his</COREF> toe.</TXT>"
>>> _remove_tags(xml)
'John stubbed his toe.'
>>> xml = "<TXT><COREF ID='A'>John</COREF> stubbed " +\
"<COREF ID='1' REF='A'>his</COREF> toe.</TXT>"
>>> _remove_tags(xml)
'John stubbed his toe.'
"""
chars = list(xml)
i = 0
while i < len(chars):
if chars[i] == '<':
while chars[i] != '>':
chars.pop(i) # pop everything between brackets
chars.pop(i) # pops the right-angle bracket, too
else:
i += 1
return ''.join(chars)
def _process_parse(parse, coreflist):
"""Tags parse tree with corefs and returns the tree, lexicon, dependencies
and raw text as tuple
Args:
parse: list of stanford corenlp parsed sentences
coreflist: list of coreferences from tagged xml
Returns:
tuple, (ptree, lexicon, dependencies, rawtext) if parse contains a
sentence, else returns None
"""
sentence = parse.get('sentences')
if sentence:
ptree = Tree.parse(tag_ptree(sentence[0]['parsetree'], coreflist))
words = [(w[0], w[1]) for w in sentence[0]['words']]
depends = [(d[0], d[1], d[2]) for d in sentence[0]['dependencies']]
text = sentence[0]['text']
return ptree, words, depends, text
else:
return None
def get_synsets(words):
"""Returns sets of cognitive synonyms for each of the input words
Args:
words: dict, {word: (pos1, pos2, ...)}
Returns:
dict, {synset_name: (syn1, syn2, syn3, ...)}
>>> words = {u'apple': (u'NN')}
>>> get_synsets(words) # doctest: +NORMALIZE_WHITESPACE
{'apple.n.01': ('apple',),
'apple.n.02': ('apple', 'orchard_apple_tree', 'Malus_pumila')}
"""
synsets = {}
for word in words:
for syn in wn.synsets(word):
synsets[syn.name] = tuple([lemma.name for lemma in syn.lemmas])
return synsets
@static_var("id", '1A')
def _mk_coref_id():
"""Creates a unique coreference id tag
Note: only unique if input id's are not of the form num+alpha
Returns:
string, alphanumeric unique id
"""
num, alpha = int(_mk_coref_id.id[:-1]), _mk_coref_id.id[-1]
if alpha == 'Z':
alpha = 'A'
num += 1
else:
alpha = chr(ord(alpha) + 1)
_mk_coref_id.id = '%s%s' % (num, alpha)
return _mk_coref_id.id
| self.ptree = parse[0]
self.words = parse[1]
self.dependencies = parse[2]
self.text = parse[3] | identifier_body |
lucidLog-1.0.0.source.js | seductiveapps.lucidLog = sa.l = {
about : {
whatsThis : 'seductiveapps.lucidLog = sa.l = A better console log for web applications.',
copyright : '(c) (r) 2013-2014 by [the owner of seductiveapps.com] <[email protected]>',
license : 'http://seductiveapps.com/seductiveapps/license.txt',
noWarranty : 'NO WARRANTY EXPRESSED OR IMPLIED. USE ONLY AT YOUR OWN RISK.',
version : '1.0.0',
firstReleased : '2013 January 08',
latestUpdate : '2014 February 5, 10:34 CET',
downloadURL : 'http://seductiveapps.com'
},
globals : {
available : navigator.userAgent.match(/Chrome/),
hideShowSpeed : 777,
corners : {
contentBackground : 'round',
itemBackground : 'round'
}
},
options : {}, // holds the definition of the desktop
settings : {}, // holds any other settings border:4px ridge #3FF;border-top:8px ridge #3FF,
data : {raw:[]},
init : function () {
return false;
var html =
'<div id="saLucidLog" style="position:absolute;z-index:41000000;width:100%;height:20%;bottom:-21%;opacity:0.01">'
+'<div id="saLucidLog_dragBar" style="position:absolute;width:100%;top:-3px;height:3px;cursor:n-resize;z-index:41000110"> </div>'
+'<div id="saLucidLog_background" class="fhl_content_background" style="position:absolute;z-index:41000010">'
+'<img src="'+sa.m.globals.urls.os+'/'+'com/ui/tools/lucidLog/images/background.jpg" class="fhl_content_background" style="position:absolute;width:100%;height:100%;"/>'
+'</div>'
+'<img id="saLucidLog_btnRefresh" src="'+sa.m.globals.urls.os+'/'+'com/ui/tools/lucidLog/images/refresh.png" style="position:absolute;top:2px;right:30px;width:20px;z-index:41000200"/>'
+'<img id="saLucidLog_btnHide" src="'+sa.m.globals.urls.os+'/'+'com/ui/tools/lucidLog/images/close.png" style="position:absolute;top:2px;right:5px;width:20px;z-index:41000200"/>'
+'<div id="saLucidLog_btnShowPHP" class="vividButton vividTheme__menu_001" style="position:absolute;left:10px;z-index:41000300"><a href="javascript:sa.l.ui.click.btnShowPHP();">PHP</a></div>'
+'<div id="saLucidLog_btnShowJavascript" class="vividButton vividTheme__menu_001" style="position:absolute;left:250px;z-index:41000300"><a href="javascript:sa.l.ui.click.btnShowJavascript();">Javascript</a></div>'
+'<div id="saLucidLog_btnShowLog" class="vividButton vividTheme__menu_001" style="position:absolute;left:490px;z-index:41000300"><a href="javascript:sa.l.ui.click.btnShowLog();">Console.log</a></div>'
+'<div id="saLucidLog_overlay1" class="fhl_overlay" style="position:absolute;background:black;z-index:41000013;opacity:0.05;filter:Alpha(opacity=5);"> </div>'
+'<div id="saLucidLog_overlay2" style="position:absolute;background:black;z-index:41000014;opacity:0.05;filter:Alpha(opacity=5);"> </div>'
+'<div id="saLucidLog_overlay3" style="position:absolute;background:black;z-index:41000015;opacity:0.05;filter:Alpha(opacity=5);"> </div>'
+'<div id="saLucidLog_content_background1" class="fhl_content_background" style="position:absolute;z-index:41000017;width:100%;height:100%;background:black;opacity:0.15;filter:Alpha(opacity=15);"> </div>'
+'<div id="saLucidLog_content_background2" class="fhl_content_background" style="position:absolute;z-index:41000018;width:100%;height:100%;background:black;opacity:0.18;filter:Alpha(opacity=18);"> </div>'
+'<div id="saLucidLog_content_background3" class="fhl_content_background" style="position:absolute;z-index:41000019;width:100%;height:100%;background:black;opacity:0.20;filter:Alpha(opacity=20);"> </div>'
+'<div id="saLucidLog_content_holder" class="vividScrollpane vividTheme__scroll_black" style="position:absolute;z-index:42000000;">'
+'<div id="saLucidLog_content" style="position:absolute;z-index:50000000;">'
+'<div id="saLucidLog_page_php" class="saLucidLogTabpage" style="position:absolute;width:100%;height:100%;">'
+'</div>'
+'<div id="saLucidLog_page_javascript" class="saLucidLogTabpage" style="position:absolute;width:100%;height:100%;visibility:hidden;">'
+'<div id="saLucidLog_hm_javascript" style="width:100%;height:100%;opacity:0.7"> </div>'
+'</div>'
+'<div id="saLucidLog_page_log" class="saLucidLogTabpage vividScrollpane vividTheme__scroll_black" style="position:absolute;width:100%;height:100%;visibility:hidden;">'
//+ sa.settings.log.entries.join ('<br/>')
//+'<div id="saLucidLog_hm_log" style="width:100%;height:100%;opacity:0.7"> </div>'
+'</div>'
+'</div>'
+'</div>'
+'</div>'
+'</div>'
//+'<img id="saLucidLog_btnRecord" src="'+sa.m.globals.urls.os+'/'+'com/ui/tools/lucidLog/images/btnRecord.png" style="position:absolute;bottom:4px;right:100px;height:21px;z-index:999999990;" title="show LucidLog (PHP + JS trace log)"/>'
+'<img id="saLucidLog_btnShow" class="saBtn_simpleImg" src="'+sa.m.globals.urls.os+'/'+'com/ui/tools/lucidLog/images/btnShow.png" style="position:absolute;width:89px;height:21px;bottom:3px;right:5px;z-index:999999990;" title="show LucidLog (PHP + JS trace log)"/>';
//+'<div id="saLucidLog_btnRecord" class="vividButton vividTheme__playpause_001" style="position:absolute;bottom:3px;right:3px;width:50px;height:50px;" onclick="sa.tracer.disabled=false;" title="record with sa.tracer"> </div>';
jQuery('body').append (html);
if (true)
//setTimeout (sa.m.traceFunction (function () {
sa.vcc.init (jQuery('#saLucidLog')[0], sa.m.traceFunction(function() {sa.l.componentFullyInitialized();}));
//}), 2000);
else {
sa.l.componentFullyInitialized();
};
},
componentFullyInitialized : function(){
jQuery('.fhl_overlay, .fhl_content_background').css ({borderRadius:'.5em'});
jQuery('#saLucidLog').css ({
bottom : 0,
left : 3,
height : ((jQuery(window).height() / 10 ) * 4.5),
width : (jQuery(window).width() - 6)
});
jQuery('#saLucidLog_btnRefresh').click (sa.l.ui.click.btnRefresh);
jQuery('#saLucidLog_btnHide').click (sa.l.ui.click.btnHide);
jQuery('#saLucidLog_btnShow').click (sa.l.ui.click.btnShow);
jQuery(window).resize (sa.l.ui.resize);
sa.l.tools.setupDragAndDrop_forTopBorder();
sa.l.ui.resize();
sa.l.php.initialize();
//sa.l.ui.hide();
jQuery('#saLucidLog').css({opacity:1,display:'none'});
},
log : function () {
if (typeof sa.tracer!=='undefined') {
var ua = sa.tracer.findUA(arguments);
if (ua && ua.pointer) {
var logIdx = ua.pointer.logMessages.length;
//debugger;
ua.pointer.logMessages[logIdx] = arguments;
}// else debugger;
}
},
visualizeTracerData : function () {
var r = {};
for (var uaIdx=0; uaIdx<sa.tracer.traced.length; uaIdx++) {
var ua = sa.tracer.userActions[uaIdx];
delete ua.pointer;
delete ua.stackLevel;
delete ua.callIdx;
delete ua.callJSON;
ua.timings.startTime = '' + ua.timings.startTime;
var uaJSON = sa.json.encode (ua);
r[uaJSON] = sa.tracer.traced[uaIdx];//sa.l.cleanupTracerData (sa.tracer.traced[uaIdx]);
};
hm (r, 'sa.tracer dump', { htmlID : 'saLucidLog_hm_javascript', opacity : 0.7 });
},
cleanupTracerData : function (ua) {
if (ua.logMessages) {
var
lm = ua.logMessages,
lmn = {};
for (var i=0; i<lm.length; i++) {
lmn[lm[i][0]] = lm[i][1][1];
};
ua.logMessages = lmn;
};
if (ua.calls) {
for (var i=0; i<ua.calls.length; i++) {
/*
if (!sa.l.settings.gottaCleanup) sa.l.settings.gottaCleanup = [];
sa.l.settings.gottaCleanup[sa.l.settings.gottaCleanup.length] = {
ua : ua.calls[i]
};
*/
sa.l.cleanupTracerData (ua.calls[i]);
};
// setTimeout (sa.l.processGottaCleanup, 100);
};
return ua;
},
processGottaCleanup : function () {
var
gt = sa.l.settings.gottaCleanup.shift(),
count = 0;
while (gt && count < 100) {
sa.l.cleanupTracerData (gt.ua);
gt = sa.l.settings.gottaCleanup.shift();
count++;
};
if (sa.l.settings.gottaCleanup.length>0) {
setTimeout (function () {
sa.tracer.processGottaTrace();
}, 10);
}
},
ui : {
resize : function () {
jQuery('#saLucidLog').css({width:jQuery(window).width()-6});
var h = jQuery('#saLucidLog').height();
var w = jQuery('#saLucidLog').width();
jQuery('#saLucidLog_background').css ({ height : h, width : w });
jQuery('#saLucidLog_overlay1').css ({ top : 1, left : 1, height : h-2, width : w-2 });
jQuery('#saLucidLog_overlay2').css ({ top : 2, left : 2, height : h-4, width : w-4 });
jQuery('#saLucidLog_overlay3').css ({ top : 3, left : 3, height : h-6, width : w-6 });
jQuery('#saLucidLog_content_background1').css ({ top : 30, left : 4, height : h - 34, width : w-8 });
jQuery('#saLucidLog_content_background2').css ({ top : 31, left : 5, height : h - 36, width : w-10 });
jQuery('#saLucidLog_content_background3').css ({ top : 32, left : 6, height : h - 38, width : w-12 });
if (jQuery('#saLucidLog_content_holder__container').length==1) {
jQuery('#saLucidLog_content_holder__container').css ({ top : 30, left : 4, height : h - 54, width : w-8 });
} else {
jQuery('#saLucidLog_content_holder').css ({ top : 30, left : 4, height : h - 34, width : w-8 });
};
jQuery('#saLucidLog_content').css ({ top : 3, left : 3, height : jQuery('#saLucidLog_content_holder').height()-6, width : jQuery('#saLucidLog_content_holder').width()-6 });
var jQueryhm = jQuery('#saLucidLog_hm_javascript');
if (
jQueryhm[0]
&& jQueryhm[0].children[0]
&& jQueryhm[0].children[0].children[2]
) sa.sp.containerSizeChanged(jQuery('#saLucidLog_hm_javascript')[0].children[0].children[2], true);
sa.sp.containerSizeChanged (jQuery('#saLucidLog_content_holder')[0], true);
jQuery('.fhl_item_content').each (function (idx,el) {
var id = el.id.replace ('fhl_', '').replace('_content','');
jQuery('#fhl_'+id+'_bg').css ({width : jQuery(el).width()+10});
});
jQuery('.tabsContainer').each ( sa.m.traceFunction ( function (idx) {
jQuery(this).css ({
height : jQuery(this).parent().height() - jQuery(this).prev().height(),
});
}));
sa.l.php.tools.resizeWindow(sa.l.php.cmd.cmdID);
},
hide : function (callback) {
jQuery('#saLucidLog').fadeOut (sa.l.globals.hideShowSpeed, callback);
},
show : function (callback) {
jQuery('#saLucidLog').fadeIn (sa.l.globals.hideShowSpeed, callback);
},
toggleShowHide : function() {
if (jQuery('#saLucidLog').css('display')=='none') {
sa.l.ui.show();
} else {
sa.l.ui.hide();
}
},
click : {
// (all click handlers for this web component) :
btnRefresh : function () {
sa.l.redrawRawLog();
},
btnHide : function () {
sa.l.ui.hide();
},
btnShow : function () {
sa.tracer.disabled = true;
sa.l.ui.show();
sa.l.ui.resize();
sa.l.visualizeTracerData();
},
btnShowPHP : function () {
jQuery('#saLucidLog_page_log').fadeOut(500);
jQuery('#saLucidLog_page_javascript').fadeOut(500);
setTimeout (sa.m.traceFunction(function() {
jQuery('#saLucidLog_page_php').css({display:'none',visibility:'visible'}).fadeIn(500);
}), 510);
},
btnShowJavascript : function () {
jQuery('#saLucidLog_page_log').fadeOut(500);
jQuery('#saLucidLog_page_php').fadeOut(500);
setTimeout (sa.m.traceFunction(function() {
jQuery('#saLucidLog_page_javascript').css({display:'none',visibility:'visible'}).fadeIn(500);
}), 510);
},
btnShowLog : function () {
jQuery('#saLucidLog_page_javascript').fadeOut(500);
jQuery('#saLucidLog_page_php').fadeOut(500);
setTimeout (sa.m.traceFunction(function() {
jQuery('#saLucidLog_page_log').html('<div id="saLucidLog_log" style="width:100%;height:100%;"></div>');
hm (sa.settings.log, 'Console.log', { htmlID : 'saLucidLog_log', opacity :0.65 });
jQuery('#saLucidLog_page_log').css({display:'none',visibility:'visible'}).fadeIn(500);
}), 510);
}
} // sa.l.ui.click
}, // sa.l.ui
tools : {
setupDragAndDrop_forTopBorder : function () {
// ripped with thanks from http://jsfiddle.net/gaby/Bek9L/186/ ;
var i = 0;
var dragging = false;
jQuery('#saLucidLog_dragBar').mousedown(function(e){
e.preventDefault();
dragging = true;
var main = jQuery('#saLucidLog');
var ghostbar = jQuery('<div>', {
id:'ghostbar',
css: {
position:'absolute',
background : 'black',
opacity : 0.7,
width: main.outerWidth(),
height : 3,
zIndex : 99999,
top: main.offset().top,
left: main.offset().left
}
}).appendTo('body');
jQuery(window).mousemove(function(e){
ghostbar.css("top",e.pageY+2);
jQuery('#saLucidLog').css("height", jQuery(window).height()- e.pageY);
sa.l.ui.resize();
});
if (document.getElementById ('iframe-content'))
jQuery(document.getElementById ('iframe-content').contentWindow).mousemove(function(e){
ghostbar.css("top",jQuery('#iframe-content', window.parent.document).offset().top + e.pageY+2);
jQuery('#saLucidLog').css("height", jQuery(window).height()- ghostbar.css("top").replace('px',''));
sa.l.ui.resize();
});
});
| if (document.getElementById ('iframe-content'))
jQuery(document.getElementById ('iframe-content').contentWindow).unbind('mousemove');
dragging = false;
}
});
}
} // sa.l.tools
}; |
jQuery(window).mouseup(function(e){
if (dragging) {
jQuery('#ghostbar').remove();
jQuery(window).unbind('mousemove');
| random_line_split |
lucidLog-1.0.0.source.js | seductiveapps.lucidLog = sa.l = {
about : {
whatsThis : 'seductiveapps.lucidLog = sa.l = A better console log for web applications.',
copyright : '(c) (r) 2013-2014 by [the owner of seductiveapps.com] <[email protected]>',
license : 'http://seductiveapps.com/seductiveapps/license.txt',
noWarranty : 'NO WARRANTY EXPRESSED OR IMPLIED. USE ONLY AT YOUR OWN RISK.',
version : '1.0.0',
firstReleased : '2013 January 08',
latestUpdate : '2014 February 5, 10:34 CET',
downloadURL : 'http://seductiveapps.com'
},
globals : {
available : navigator.userAgent.match(/Chrome/),
hideShowSpeed : 777,
corners : {
contentBackground : 'round',
itemBackground : 'round'
}
},
options : {}, // holds the definition of the desktop
settings : {}, // holds any other settings border:4px ridge #3FF;border-top:8px ridge #3FF,
data : {raw:[]},
init : function () {
return false;
var html =
'<div id="saLucidLog" style="position:absolute;z-index:41000000;width:100%;height:20%;bottom:-21%;opacity:0.01">'
+'<div id="saLucidLog_dragBar" style="position:absolute;width:100%;top:-3px;height:3px;cursor:n-resize;z-index:41000110"> </div>'
+'<div id="saLucidLog_background" class="fhl_content_background" style="position:absolute;z-index:41000010">'
+'<img src="'+sa.m.globals.urls.os+'/'+'com/ui/tools/lucidLog/images/background.jpg" class="fhl_content_background" style="position:absolute;width:100%;height:100%;"/>'
+'</div>'
+'<img id="saLucidLog_btnRefresh" src="'+sa.m.globals.urls.os+'/'+'com/ui/tools/lucidLog/images/refresh.png" style="position:absolute;top:2px;right:30px;width:20px;z-index:41000200"/>'
+'<img id="saLucidLog_btnHide" src="'+sa.m.globals.urls.os+'/'+'com/ui/tools/lucidLog/images/close.png" style="position:absolute;top:2px;right:5px;width:20px;z-index:41000200"/>'
+'<div id="saLucidLog_btnShowPHP" class="vividButton vividTheme__menu_001" style="position:absolute;left:10px;z-index:41000300"><a href="javascript:sa.l.ui.click.btnShowPHP();">PHP</a></div>'
+'<div id="saLucidLog_btnShowJavascript" class="vividButton vividTheme__menu_001" style="position:absolute;left:250px;z-index:41000300"><a href="javascript:sa.l.ui.click.btnShowJavascript();">Javascript</a></div>'
+'<div id="saLucidLog_btnShowLog" class="vividButton vividTheme__menu_001" style="position:absolute;left:490px;z-index:41000300"><a href="javascript:sa.l.ui.click.btnShowLog();">Console.log</a></div>'
+'<div id="saLucidLog_overlay1" class="fhl_overlay" style="position:absolute;background:black;z-index:41000013;opacity:0.05;filter:Alpha(opacity=5);"> </div>'
+'<div id="saLucidLog_overlay2" style="position:absolute;background:black;z-index:41000014;opacity:0.05;filter:Alpha(opacity=5);"> </div>'
+'<div id="saLucidLog_overlay3" style="position:absolute;background:black;z-index:41000015;opacity:0.05;filter:Alpha(opacity=5);"> </div>'
+'<div id="saLucidLog_content_background1" class="fhl_content_background" style="position:absolute;z-index:41000017;width:100%;height:100%;background:black;opacity:0.15;filter:Alpha(opacity=15);"> </div>'
+'<div id="saLucidLog_content_background2" class="fhl_content_background" style="position:absolute;z-index:41000018;width:100%;height:100%;background:black;opacity:0.18;filter:Alpha(opacity=18);"> </div>'
+'<div id="saLucidLog_content_background3" class="fhl_content_background" style="position:absolute;z-index:41000019;width:100%;height:100%;background:black;opacity:0.20;filter:Alpha(opacity=20);"> </div>'
+'<div id="saLucidLog_content_holder" class="vividScrollpane vividTheme__scroll_black" style="position:absolute;z-index:42000000;">'
+'<div id="saLucidLog_content" style="position:absolute;z-index:50000000;">'
+'<div id="saLucidLog_page_php" class="saLucidLogTabpage" style="position:absolute;width:100%;height:100%;">'
+'</div>'
+'<div id="saLucidLog_page_javascript" class="saLucidLogTabpage" style="position:absolute;width:100%;height:100%;visibility:hidden;">'
+'<div id="saLucidLog_hm_javascript" style="width:100%;height:100%;opacity:0.7"> </div>'
+'</div>'
+'<div id="saLucidLog_page_log" class="saLucidLogTabpage vividScrollpane vividTheme__scroll_black" style="position:absolute;width:100%;height:100%;visibility:hidden;">'
//+ sa.settings.log.entries.join ('<br/>')
//+'<div id="saLucidLog_hm_log" style="width:100%;height:100%;opacity:0.7"> </div>'
+'</div>'
+'</div>'
+'</div>'
+'</div>'
+'</div>'
//+'<img id="saLucidLog_btnRecord" src="'+sa.m.globals.urls.os+'/'+'com/ui/tools/lucidLog/images/btnRecord.png" style="position:absolute;bottom:4px;right:100px;height:21px;z-index:999999990;" title="show LucidLog (PHP + JS trace log)"/>'
+'<img id="saLucidLog_btnShow" class="saBtn_simpleImg" src="'+sa.m.globals.urls.os+'/'+'com/ui/tools/lucidLog/images/btnShow.png" style="position:absolute;width:89px;height:21px;bottom:3px;right:5px;z-index:999999990;" title="show LucidLog (PHP + JS trace log)"/>';
//+'<div id="saLucidLog_btnRecord" class="vividButton vividTheme__playpause_001" style="position:absolute;bottom:3px;right:3px;width:50px;height:50px;" onclick="sa.tracer.disabled=false;" title="record with sa.tracer"> </div>';
jQuery('body').append (html);
if (true)
//setTimeout (sa.m.traceFunction (function () {
sa.vcc.init (jQuery('#saLucidLog')[0], sa.m.traceFunction(function() {sa.l.componentFullyInitialized();}));
//}), 2000);
else {
sa.l.componentFullyInitialized();
};
},
componentFullyInitialized : function(){
jQuery('.fhl_overlay, .fhl_content_background').css ({borderRadius:'.5em'});
jQuery('#saLucidLog').css ({
bottom : 0,
left : 3,
height : ((jQuery(window).height() / 10 ) * 4.5),
width : (jQuery(window).width() - 6)
});
jQuery('#saLucidLog_btnRefresh').click (sa.l.ui.click.btnRefresh);
jQuery('#saLucidLog_btnHide').click (sa.l.ui.click.btnHide);
jQuery('#saLucidLog_btnShow').click (sa.l.ui.click.btnShow);
jQuery(window).resize (sa.l.ui.resize);
sa.l.tools.setupDragAndDrop_forTopBorder();
sa.l.ui.resize();
sa.l.php.initialize();
//sa.l.ui.hide();
jQuery('#saLucidLog').css({opacity:1,display:'none'});
},
log : function () {
if (typeof sa.tracer!=='undefined') {
var ua = sa.tracer.findUA(arguments);
if (ua && ua.pointer) {
var logIdx = ua.pointer.logMessages.length;
//debugger;
ua.pointer.logMessages[logIdx] = arguments;
}// else debugger;
}
},
visualizeTracerData : function () {
var r = {};
for (var uaIdx=0; uaIdx<sa.tracer.traced.length; uaIdx++) {
var ua = sa.tracer.userActions[uaIdx];
delete ua.pointer;
delete ua.stackLevel;
delete ua.callIdx;
delete ua.callJSON;
ua.timings.startTime = '' + ua.timings.startTime;
var uaJSON = sa.json.encode (ua);
r[uaJSON] = sa.tracer.traced[uaIdx];//sa.l.cleanupTracerData (sa.tracer.traced[uaIdx]);
};
hm (r, 'sa.tracer dump', { htmlID : 'saLucidLog_hm_javascript', opacity : 0.7 });
},
cleanupTracerData : function (ua) {
if (ua.logMessages) {
var
lm = ua.logMessages,
lmn = {};
for (var i=0; i<lm.length; i++) | ;
ua.logMessages = lmn;
};
if (ua.calls) {
for (var i=0; i<ua.calls.length; i++) {
/*
if (!sa.l.settings.gottaCleanup) sa.l.settings.gottaCleanup = [];
sa.l.settings.gottaCleanup[sa.l.settings.gottaCleanup.length] = {
ua : ua.calls[i]
};
*/
sa.l.cleanupTracerData (ua.calls[i]);
};
// setTimeout (sa.l.processGottaCleanup, 100);
};
return ua;
},
processGottaCleanup : function () {
var
gt = sa.l.settings.gottaCleanup.shift(),
count = 0;
while (gt && count < 100) {
sa.l.cleanupTracerData (gt.ua);
gt = sa.l.settings.gottaCleanup.shift();
count++;
};
if (sa.l.settings.gottaCleanup.length>0) {
setTimeout (function () {
sa.tracer.processGottaTrace();
}, 10);
}
},
ui : {
resize : function () {
jQuery('#saLucidLog').css({width:jQuery(window).width()-6});
var h = jQuery('#saLucidLog').height();
var w = jQuery('#saLucidLog').width();
jQuery('#saLucidLog_background').css ({ height : h, width : w });
jQuery('#saLucidLog_overlay1').css ({ top : 1, left : 1, height : h-2, width : w-2 });
jQuery('#saLucidLog_overlay2').css ({ top : 2, left : 2, height : h-4, width : w-4 });
jQuery('#saLucidLog_overlay3').css ({ top : 3, left : 3, height : h-6, width : w-6 });
jQuery('#saLucidLog_content_background1').css ({ top : 30, left : 4, height : h - 34, width : w-8 });
jQuery('#saLucidLog_content_background2').css ({ top : 31, left : 5, height : h - 36, width : w-10 });
jQuery('#saLucidLog_content_background3').css ({ top : 32, left : 6, height : h - 38, width : w-12 });
if (jQuery('#saLucidLog_content_holder__container').length==1) {
jQuery('#saLucidLog_content_holder__container').css ({ top : 30, left : 4, height : h - 54, width : w-8 });
} else {
jQuery('#saLucidLog_content_holder').css ({ top : 30, left : 4, height : h - 34, width : w-8 });
};
jQuery('#saLucidLog_content').css ({ top : 3, left : 3, height : jQuery('#saLucidLog_content_holder').height()-6, width : jQuery('#saLucidLog_content_holder').width()-6 });
var jQueryhm = jQuery('#saLucidLog_hm_javascript');
if (
jQueryhm[0]
&& jQueryhm[0].children[0]
&& jQueryhm[0].children[0].children[2]
) sa.sp.containerSizeChanged(jQuery('#saLucidLog_hm_javascript')[0].children[0].children[2], true);
sa.sp.containerSizeChanged (jQuery('#saLucidLog_content_holder')[0], true);
jQuery('.fhl_item_content').each (function (idx,el) {
var id = el.id.replace ('fhl_', '').replace('_content','');
jQuery('#fhl_'+id+'_bg').css ({width : jQuery(el).width()+10});
});
jQuery('.tabsContainer').each ( sa.m.traceFunction ( function (idx) {
jQuery(this).css ({
height : jQuery(this).parent().height() - jQuery(this).prev().height(),
});
}));
sa.l.php.tools.resizeWindow(sa.l.php.cmd.cmdID);
},
hide : function (callback) {
jQuery('#saLucidLog').fadeOut (sa.l.globals.hideShowSpeed, callback);
},
show : function (callback) {
jQuery('#saLucidLog').fadeIn (sa.l.globals.hideShowSpeed, callback);
},
toggleShowHide : function() {
if (jQuery('#saLucidLog').css('display')=='none') {
sa.l.ui.show();
} else {
sa.l.ui.hide();
}
},
click : {
// (all click handlers for this web component) :
btnRefresh : function () {
sa.l.redrawRawLog();
},
btnHide : function () {
sa.l.ui.hide();
},
btnShow : function () {
sa.tracer.disabled = true;
sa.l.ui.show();
sa.l.ui.resize();
sa.l.visualizeTracerData();
},
btnShowPHP : function () {
jQuery('#saLucidLog_page_log').fadeOut(500);
jQuery('#saLucidLog_page_javascript').fadeOut(500);
setTimeout (sa.m.traceFunction(function() {
jQuery('#saLucidLog_page_php').css({display:'none',visibility:'visible'}).fadeIn(500);
}), 510);
},
btnShowJavascript : function () {
jQuery('#saLucidLog_page_log').fadeOut(500);
jQuery('#saLucidLog_page_php').fadeOut(500);
setTimeout (sa.m.traceFunction(function() {
jQuery('#saLucidLog_page_javascript').css({display:'none',visibility:'visible'}).fadeIn(500);
}), 510);
},
btnShowLog : function () {
jQuery('#saLucidLog_page_javascript').fadeOut(500);
jQuery('#saLucidLog_page_php').fadeOut(500);
setTimeout (sa.m.traceFunction(function() {
jQuery('#saLucidLog_page_log').html('<div id="saLucidLog_log" style="width:100%;height:100%;"></div>');
hm (sa.settings.log, 'Console.log', { htmlID : 'saLucidLog_log', opacity :0.65 });
jQuery('#saLucidLog_page_log').css({display:'none',visibility:'visible'}).fadeIn(500);
}), 510);
}
} // sa.l.ui.click
}, // sa.l.ui
tools : {
setupDragAndDrop_forTopBorder : function () {
// ripped with thanks from http://jsfiddle.net/gaby/Bek9L/186/ ;
var i = 0;
var dragging = false;
jQuery('#saLucidLog_dragBar').mousedown(function(e){
e.preventDefault();
dragging = true;
var main = jQuery('#saLucidLog');
var ghostbar = jQuery('<div>', {
id:'ghostbar',
css: {
position:'absolute',
background : 'black',
opacity : 0.7,
width: main.outerWidth(),
height : 3,
zIndex : 99999,
top: main.offset().top,
left: main.offset().left
}
}).appendTo('body');
jQuery(window).mousemove(function(e){
ghostbar.css("top",e.pageY+2);
jQuery('#saLucidLog').css("height", jQuery(window).height()- e.pageY);
sa.l.ui.resize();
});
if (document.getElementById ('iframe-content'))
jQuery(document.getElementById ('iframe-content').contentWindow).mousemove(function(e){
ghostbar.css("top",jQuery('#iframe-content', window.parent.document).offset().top + e.pageY+2);
jQuery('#saLucidLog').css("height", jQuery(window).height()- ghostbar.css("top").replace('px',''));
sa.l.ui.resize();
});
});
jQuery(window).mouseup(function(e){
if (dragging) {
jQuery('#ghostbar').remove();
jQuery(window).unbind('mousemove');
if (document.getElementById ('iframe-content'))
jQuery(document.getElementById ('iframe-content').contentWindow).unbind('mousemove');
dragging = false;
}
});
}
} // sa.l.tools
};
| {
lmn[lm[i][0]] = lm[i][1][1];
} | conditional_block |
index.ts | /** Stores various indices on all files in the vault to make dataview generation fast. */
import { MetadataCache, Vault, TFile } from 'obsidian';
import { Task } from 'src/tasks';
import * as Tasks from 'src/tasks';
/** Aggregate index which has several sub-indices and will initialize all of them. */
export class FullIndex {
/** How often the reload queue is checked for reloads. */
static RELOAD_INTERVAL = 1_000;
/** Generate a full index from the given vault. */
static async generate(vault: Vault, cache: MetadataCache): Promise<FullIndex> {
// TODO: Probably need to do this on a worker thread to actually get
let tags = TagIndex.generate(vault, cache);
let prefix = PrefixIndex.generate(vault);
return Promise.all([tags, prefix]).then(value => {
return new FullIndex(vault, cache, value[0], value[1]);
});
}
// Handle for the interval which does the reloading.
reloadHandle: number;
// Files which are currently in queue to be reloaded.
reloadQueue: TFile[];
// Set of paths being reloaded, used for debouncing.
reloadSet: Set<string>;
// Custom extra reload handlers.
reloadHandlers: ((f: TFile) => Promise<void>)[];
// The set of indices which we update.
tag: TagIndex;
prefix: PrefixIndex;
// Other useful things to hold onto.
vault: Vault;
metadataCache: MetadataCache;
constructor(vault: Vault, metadataCache: MetadataCache, tag: TagIndex, prefix: PrefixIndex) {
this.vault = vault;
this.metadataCache = metadataCache;
this.tag = tag;
this.prefix = prefix;
this.reloadQueue = [];
this.reloadSet = new Set();
this.reloadHandlers = [];
// Background task which regularly checks for reloads.
this.reloadHandle = window.setInterval(() => this.reloadInternal(), FullIndex.RELOAD_INTERVAL);
// TODO: Metadata cache is not updated on modify, but on metadatacache resolve.
vault.on("modify", file => {
if (file instanceof TFile) {
this.queueReload(file);
}
});
}
/** Queue the file for reloading; several fast reloads in a row will be debounced. */
public queueReload(file: TFile) {
if (this.reloadSet.has(file.path)) return;
this.reloadSet.add(file.path);
this.reloadQueue.push(file);
}
public on(event: 'reload', handler: (a: TFile) => Promise<void>) {
this.reloadHandlers.push(handler);
}
/** Utility method which regularly checks the reload queue. */
private async reloadInternal() {
let copy = Array.from(this.reloadQueue);
this.reloadSet.clear();
this.reloadQueue = [];
for (let file of copy) {
await Promise.all([this.tag.reloadFile(file)].concat(this.reloadHandlers.map(f => f(file))));
}
}
}
/** Index which efficiently allows querying by tags / subtags. */
export class TagIndex {
/** Parse all subtags out of the given tag. I.e., #hello/i/am would yield [#hello/i/am, #hello/i, #hello]. */
public static parseSubtags(tag: string): string[] {
let result = [tag];
while (tag.contains("/")) {
tag = tag.substring(0, tag.lastIndexOf("/"));
result.push(tag);
}
return result;
}
/** Parse all of the tags for the given file. */
public static parseTags(cache: MetadataCache, path: string): Set<string> {
let fileCache = cache.getCache(path);
if (!fileCache) return new Set<string>();
let allTags = new Set<string>();
// Parse tags from in the file contents.
let tagCache = fileCache.tags;
if (tagCache) {
for (let tag of tagCache) {
if (!tag.tag || !(typeof tag.tag == 'string')) continue;
this.parseSubtags(tag.tag).forEach(t => allTags.add(t));
}
}
// Parse tags from YAML frontmatter.
let frontCache = fileCache.frontmatter;
// Search for the 'tags' field, since it may have wierd
let tagsName: string | undefined = undefined;
for (let key of Object.keys(frontCache ?? {})) {
if (key.toLowerCase() == "tags" || key.toLowerCase() == "tag")
tagsName = key;
}
if (frontCache && tagsName && frontCache[tagsName]) {
if (Array.isArray(frontCache[tagsName])) {
for (let tag of frontCache[tagsName]) {
if (!(typeof tag == 'string')) continue;
if (!tag.startsWith("#")) tag = "#" + tag;
this.parseSubtags(tag).forEach(t => allTags.add(t));
}
} else if (typeof frontCache[tagsName] === 'string') {
// Assume tags is a comma-separated list.
let tags = (frontCache[tagsName] as string).split(",").map(elem => {
elem = elem.trim();
if (!elem.startsWith("#")) elem = "#" + elem;
return elem;
});
for (let tag of tags) {
this.parseSubtags(tag).forEach(t => allTags.add(t));
}
}
}
return allTags;
}
public static async generate(vault: Vault, cache: MetadataCache): Promise<TagIndex> {
let initialMap = new Map<string, Set<string>>();
let initialInvMap = new Map<string, Set<string>>();
let timeStart = new Date().getTime();
// First time load...
for (let file of vault.getMarkdownFiles()) {
let allTags = TagIndex.parseTags(cache, file.path);
initialInvMap.set(file.path, allTags);
for (let subtag of allTags) {
if (!initialMap.has(subtag)) initialMap.set(subtag, new Set<string>());
initialMap.get(subtag)?.add(file.path);
}
}
let totalTimeMs = new Date().getTime() - timeStart;
console.log(`Dataview: Parsed ${initialMap.size} tags in ${initialInvMap.size} markdown files (${totalTimeMs / 1000.0}s)`);
return new TagIndex(vault, cache, initialMap, initialInvMap);
}
/** Maps tags -> set of files containing that exact tag. */
map: Map<string, Set<string>>;
/** Cached inverse map; maps file -> tags it was last known to contain. */
invMap: Map<string, Set<string>>;
vault: Vault;
cache: MetadataCache;
constructor(vault: Vault, metadataCache: MetadataCache,
map: Map<string, Set<string>>, invMap: Map<string, Set<string>>) {
this.vault = vault;
this.cache = metadataCache;
this.map = map;
this.invMap = invMap;
}
/** Returns all files which have the given tag. */
public get(tag: string): Set<string> {
let result = this.map.get(tag);
if (result) {
return new Set(result);
} else {
return new Set();
}
}
/** Returns all tags the given file has. */
public getInverse(file: string): Set<string> {
let result = this.invMap.get(file);
if (result) {
return new Set(result);
} else {
return new Set();
}
}
async reloadFile(file: TFile) {
this.clearFile(file.path);
let allTags = TagIndex.parseTags(this.cache, file.path);
for (let subtag of allTags) {
if (!this.map.has(subtag)) this.map.set(subtag, new Set<string>());
this.map.get(subtag)?.add(file.path);
}
this.invMap.set(file.path, allTags);
}
/** Clears all tags for the given file so they can be re-added. */
private clearFile(path: string) {
let oldTags = this.invMap.get(path);
if (!oldTags) return;
this.invMap.delete(path);
for (let tag of oldTags) {
this.map.get(tag)?.delete(path);
}
}
}
/** A node in the prefix tree. */
export class PrefixIndexNode {
// TODO: Instead of only storing file paths at the leaf, consider storing them at every level,
// since this will make for faster deletes and gathers in exchange for slightly slower adds and more memory usage.
// since we are optimizing for gather, and file paths tend to be shallow, this should be ok.
files: Set<string>;
element: string;
totalCount: number;
children: Map<string, PrefixIndexNode>;
constructor(element: string) {
this.element = element;
this.files = new Set();
this.totalCount = 0;
this.children = new Map();
}
public static add(root: PrefixIndexNode, path: string) {
let parts = path.split("/");
let node = root;
for (let index = 0; index < parts.length - 1; index++) |
node.totalCount += 1;
node.files.add(path);
}
public static remove(root: PrefixIndexNode, path: string) {
let parts = path.split("/");
let node = root;
let nodes = [];
for (let index = 0; index < parts.length - 1; index++) {
if (!node.children.has(parts[index])) return;
nodes.push(node);
node = node.children.get(parts[index]) as PrefixIndexNode;
}
if (!node.files.has(path)) return;
node.files.delete(path);
node.totalCount -= 1;
for (let p of nodes) p.totalCount -= 1;
}
public static find(root: PrefixIndexNode, prefix: string): PrefixIndexNode | null {
if (prefix.length == 0 || prefix == '/') return root;
let parts = prefix.split("/");
let node = root;
for (let index = 0; index < parts.length; index++) {
if (!node.children.has(parts[index])) return null;
node = node.children.get(parts[index]) as PrefixIndexNode;
}
return node;
}
public static gather(root: PrefixIndexNode): Set<string> {
let result = new Set<string>();
PrefixIndexNode.gatherRec(root, result);
return result;
}
static gatherRec(root: PrefixIndexNode, output: Set<string>) {
for (let file of root.files) output.add(file);
for (let child of root.children.values()) this.gatherRec(child, output);
}
}
/** Indexes files by their full prefix - essentially a simple prefix tree. */
export class PrefixIndex {
public static async generate(vault: Vault): Promise<PrefixIndex> {
let root = new PrefixIndexNode("");
let timeStart = new Date().getTime();
// First time load...
for (let file of vault.getMarkdownFiles()) {
PrefixIndexNode.add(root, file.path);
}
let totalTimeMs = new Date().getTime() - timeStart;
console.log(`Dataview: Parsed all file prefixes (${totalTimeMs / 1000.0}s)`);
return Promise.resolve(new PrefixIndex(vault, root));
}
root: PrefixIndexNode;
vault: Vault;
constructor(vault: Vault, root: PrefixIndexNode) {
this.vault = vault;
this.root = root;
// TODO: I'm not sure if there is an event for all files in a folder, or just the folder.
// I'm assuming the former naively for now until I inevitably fix it.
this.vault.on("delete", file => {
PrefixIndexNode.remove(this.root, file.path);
});
this.vault.on("create", file => {
PrefixIndexNode.add(this.root, file.path);
});
this.vault.on("rename", (file, old) => {
PrefixIndexNode.remove(this.root, old);
PrefixIndexNode.add(this.root, file.path);
});
}
public get(prefix: string): Set<string> {
let node = PrefixIndexNode.find(this.root, prefix);
if (node == null || node == undefined) return new Set();
return PrefixIndexNode.gather(node);
}
}
/** Caches tasks for each file to avoid repeated re-loading. */
export class TaskCache {
/** Create a task cache for the given vault. */
static async generate(vault: Vault): Promise<TaskCache> {
let initialCache: Record<string, Task[]> = {};
let timeStart = new Date().getTime();
// First time load...
for (let file of vault.getMarkdownFiles()) {
let tasks = await Tasks.findTasksInFile(vault, file);
if (tasks.length == 0) continue;
initialCache[file.path] = tasks;
}
let totalTimeMs = new Date().getTime() - timeStart;
console.log(`Dataview: Parsed tasks in ${Object.keys(initialCache).length} markdown files (${totalTimeMs / 1000.0}s)`);
return new TaskCache(vault, initialCache);
}
cache: Record<string, Task[]>;
vault: Vault;
constructor(vault: Vault, cache: Record<string, Task[]>) {
this.vault = vault;
this.cache = cache;
}
/** Get the tasks associated with a file path. */
public get(file: string): Task[] | null {
let result = this.cache[file];
if (result === undefined) return null;
else return result;
}
/** Return a map of all files -> tasks in that file. */
public all(): Record<string, Task[]> {
// TODO: Defensive copy.
return this.cache;
}
async reloadFile(file: TFile) {
let tasks = await Tasks.findTasksInFile(this.vault, file);
if (tasks.length == 0) {
delete this.cache[file.path];
} else {
this.cache[file.path] = tasks;
}
}
} | {
if (!node.children.has(parts[index])) node.children.set(parts[index], new PrefixIndexNode(parts[index]));
node.totalCount += 1;
node = node.children.get(parts[index]) as PrefixIndexNode;
} | conditional_block |
index.ts | /** Stores various indices on all files in the vault to make dataview generation fast. */
import { MetadataCache, Vault, TFile } from 'obsidian';
import { Task } from 'src/tasks';
import * as Tasks from 'src/tasks';
/** Aggregate index which has several sub-indices and will initialize all of them. */
export class FullIndex {
/** How often the reload queue is checked for reloads. */
static RELOAD_INTERVAL = 1_000;
/** Generate a full index from the given vault. */
static async generate(vault: Vault, cache: MetadataCache): Promise<FullIndex> {
// TODO: Probably need to do this on a worker thread to actually get
let tags = TagIndex.generate(vault, cache);
let prefix = PrefixIndex.generate(vault);
return Promise.all([tags, prefix]).then(value => {
return new FullIndex(vault, cache, value[0], value[1]);
});
}
// Handle for the interval which does the reloading.
reloadHandle: number;
// Files which are currently in queue to be reloaded.
reloadQueue: TFile[];
// Set of paths being reloaded, used for debouncing.
reloadSet: Set<string>;
// Custom extra reload handlers.
reloadHandlers: ((f: TFile) => Promise<void>)[];
// The set of indices which we update.
tag: TagIndex;
prefix: PrefixIndex;
// Other useful things to hold onto.
vault: Vault;
metadataCache: MetadataCache;
constructor(vault: Vault, metadataCache: MetadataCache, tag: TagIndex, prefix: PrefixIndex) {
this.vault = vault;
this.metadataCache = metadataCache;
this.tag = tag;
this.prefix = prefix;
this.reloadQueue = [];
this.reloadSet = new Set();
this.reloadHandlers = [];
// Background task which regularly checks for reloads.
this.reloadHandle = window.setInterval(() => this.reloadInternal(), FullIndex.RELOAD_INTERVAL);
// TODO: Metadata cache is not updated on modify, but on metadatacache resolve.
vault.on("modify", file => {
if (file instanceof TFile) {
this.queueReload(file);
}
});
}
/** Queue the file for reloading; several fast reloads in a row will be debounced. */
public queueReload(file: TFile) {
if (this.reloadSet.has(file.path)) return;
this.reloadSet.add(file.path);
this.reloadQueue.push(file);
}
public on(event: 'reload', handler: (a: TFile) => Promise<void>) {
this.reloadHandlers.push(handler);
}
/** Utility method which regularly checks the reload queue. */
private async reloadInternal() {
let copy = Array.from(this.reloadQueue);
this.reloadSet.clear();
this.reloadQueue = [];
for (let file of copy) {
await Promise.all([this.tag.reloadFile(file)].concat(this.reloadHandlers.map(f => f(file))));
}
}
}
/** Index which efficiently allows querying by tags / subtags. */
export class TagIndex {
/** Parse all subtags out of the given tag. I.e., #hello/i/am would yield [#hello/i/am, #hello/i, #hello]. */
public static parseSubtags(tag: string): string[] {
let result = [tag];
while (tag.contains("/")) {
tag = tag.substring(0, tag.lastIndexOf("/"));
result.push(tag);
}
return result;
}
/** Parse all of the tags for the given file. */
public static parseTags(cache: MetadataCache, path: string): Set<string> {
let fileCache = cache.getCache(path);
if (!fileCache) return new Set<string>();
let allTags = new Set<string>();
// Parse tags from in the file contents.
let tagCache = fileCache.tags;
if (tagCache) {
for (let tag of tagCache) {
if (!tag.tag || !(typeof tag.tag == 'string')) continue;
this.parseSubtags(tag.tag).forEach(t => allTags.add(t));
}
}
// Parse tags from YAML frontmatter.
let frontCache = fileCache.frontmatter;
// Search for the 'tags' field, since it may have wierd
let tagsName: string | undefined = undefined;
for (let key of Object.keys(frontCache ?? {})) {
if (key.toLowerCase() == "tags" || key.toLowerCase() == "tag")
tagsName = key;
}
if (frontCache && tagsName && frontCache[tagsName]) {
if (Array.isArray(frontCache[tagsName])) {
for (let tag of frontCache[tagsName]) {
if (!(typeof tag == 'string')) continue;
if (!tag.startsWith("#")) tag = "#" + tag;
this.parseSubtags(tag).forEach(t => allTags.add(t));
}
} else if (typeof frontCache[tagsName] === 'string') {
// Assume tags is a comma-separated list.
let tags = (frontCache[tagsName] as string).split(",").map(elem => {
elem = elem.trim();
if (!elem.startsWith("#")) elem = "#" + elem;
return elem;
});
for (let tag of tags) {
this.parseSubtags(tag).forEach(t => allTags.add(t));
}
}
}
return allTags;
}
public static async generate(vault: Vault, cache: MetadataCache): Promise<TagIndex> {
let initialMap = new Map<string, Set<string>>();
let initialInvMap = new Map<string, Set<string>>();
let timeStart = new Date().getTime();
// First time load...
for (let file of vault.getMarkdownFiles()) {
let allTags = TagIndex.parseTags(cache, file.path);
initialInvMap.set(file.path, allTags);
for (let subtag of allTags) {
if (!initialMap.has(subtag)) initialMap.set(subtag, new Set<string>());
initialMap.get(subtag)?.add(file.path);
}
}
let totalTimeMs = new Date().getTime() - timeStart;
console.log(`Dataview: Parsed ${initialMap.size} tags in ${initialInvMap.size} markdown files (${totalTimeMs / 1000.0}s)`);
return new TagIndex(vault, cache, initialMap, initialInvMap);
}
/** Maps tags -> set of files containing that exact tag. */
map: Map<string, Set<string>>;
/** Cached inverse map; maps file -> tags it was last known to contain. */
invMap: Map<string, Set<string>>;
vault: Vault;
cache: MetadataCache;
constructor(vault: Vault, metadataCache: MetadataCache,
map: Map<string, Set<string>>, invMap: Map<string, Set<string>>) {
this.vault = vault;
this.cache = metadataCache;
this.map = map;
this.invMap = invMap;
}
/** Returns all files which have the given tag. */
public get(tag: string): Set<string> {
let result = this.map.get(tag);
if (result) {
return new Set(result);
} else {
return new Set();
}
}
/** Returns all tags the given file has. */
public getInverse(file: string): Set<string> {
let result = this.invMap.get(file);
if (result) {
return new Set(result);
} else {
return new Set();
}
}
async reloadFile(file: TFile) {
this.clearFile(file.path);
let allTags = TagIndex.parseTags(this.cache, file.path);
for (let subtag of allTags) {
if (!this.map.has(subtag)) this.map.set(subtag, new Set<string>());
this.map.get(subtag)?.add(file.path);
}
this.invMap.set(file.path, allTags);
}
/** Clears all tags for the given file so they can be re-added. */
private clearFile(path: string) {
let oldTags = this.invMap.get(path);
if (!oldTags) return;
this.invMap.delete(path);
for (let tag of oldTags) {
this.map.get(tag)?.delete(path);
}
}
}
/** A node in the prefix tree. */
export class | {
// TODO: Instead of only storing file paths at the leaf, consider storing them at every level,
// since this will make for faster deletes and gathers in exchange for slightly slower adds and more memory usage.
// since we are optimizing for gather, and file paths tend to be shallow, this should be ok.
files: Set<string>;
element: string;
totalCount: number;
children: Map<string, PrefixIndexNode>;
constructor(element: string) {
this.element = element;
this.files = new Set();
this.totalCount = 0;
this.children = new Map();
}
public static add(root: PrefixIndexNode, path: string) {
let parts = path.split("/");
let node = root;
for (let index = 0; index < parts.length - 1; index++) {
if (!node.children.has(parts[index])) node.children.set(parts[index], new PrefixIndexNode(parts[index]));
node.totalCount += 1;
node = node.children.get(parts[index]) as PrefixIndexNode;
}
node.totalCount += 1;
node.files.add(path);
}
public static remove(root: PrefixIndexNode, path: string) {
let parts = path.split("/");
let node = root;
let nodes = [];
for (let index = 0; index < parts.length - 1; index++) {
if (!node.children.has(parts[index])) return;
nodes.push(node);
node = node.children.get(parts[index]) as PrefixIndexNode;
}
if (!node.files.has(path)) return;
node.files.delete(path);
node.totalCount -= 1;
for (let p of nodes) p.totalCount -= 1;
}
public static find(root: PrefixIndexNode, prefix: string): PrefixIndexNode | null {
if (prefix.length == 0 || prefix == '/') return root;
let parts = prefix.split("/");
let node = root;
for (let index = 0; index < parts.length; index++) {
if (!node.children.has(parts[index])) return null;
node = node.children.get(parts[index]) as PrefixIndexNode;
}
return node;
}
public static gather(root: PrefixIndexNode): Set<string> {
let result = new Set<string>();
PrefixIndexNode.gatherRec(root, result);
return result;
}
static gatherRec(root: PrefixIndexNode, output: Set<string>) {
for (let file of root.files) output.add(file);
for (let child of root.children.values()) this.gatherRec(child, output);
}
}
/** Indexes files by their full prefix - essentially a simple prefix tree. */
export class PrefixIndex {
public static async generate(vault: Vault): Promise<PrefixIndex> {
let root = new PrefixIndexNode("");
let timeStart = new Date().getTime();
// First time load...
for (let file of vault.getMarkdownFiles()) {
PrefixIndexNode.add(root, file.path);
}
let totalTimeMs = new Date().getTime() - timeStart;
console.log(`Dataview: Parsed all file prefixes (${totalTimeMs / 1000.0}s)`);
return Promise.resolve(new PrefixIndex(vault, root));
}
root: PrefixIndexNode;
vault: Vault;
constructor(vault: Vault, root: PrefixIndexNode) {
this.vault = vault;
this.root = root;
// TODO: I'm not sure if there is an event for all files in a folder, or just the folder.
// I'm assuming the former naively for now until I inevitably fix it.
this.vault.on("delete", file => {
PrefixIndexNode.remove(this.root, file.path);
});
this.vault.on("create", file => {
PrefixIndexNode.add(this.root, file.path);
});
this.vault.on("rename", (file, old) => {
PrefixIndexNode.remove(this.root, old);
PrefixIndexNode.add(this.root, file.path);
});
}
public get(prefix: string): Set<string> {
let node = PrefixIndexNode.find(this.root, prefix);
if (node == null || node == undefined) return new Set();
return PrefixIndexNode.gather(node);
}
}
/** Caches tasks for each file to avoid repeated re-loading. */
export class TaskCache {
/** Create a task cache for the given vault. */
static async generate(vault: Vault): Promise<TaskCache> {
let initialCache: Record<string, Task[]> = {};
let timeStart = new Date().getTime();
// First time load...
for (let file of vault.getMarkdownFiles()) {
let tasks = await Tasks.findTasksInFile(vault, file);
if (tasks.length == 0) continue;
initialCache[file.path] = tasks;
}
let totalTimeMs = new Date().getTime() - timeStart;
console.log(`Dataview: Parsed tasks in ${Object.keys(initialCache).length} markdown files (${totalTimeMs / 1000.0}s)`);
return new TaskCache(vault, initialCache);
}
cache: Record<string, Task[]>;
vault: Vault;
constructor(vault: Vault, cache: Record<string, Task[]>) {
this.vault = vault;
this.cache = cache;
}
/** Get the tasks associated with a file path. */
public get(file: string): Task[] | null {
let result = this.cache[file];
if (result === undefined) return null;
else return result;
}
/** Return a map of all files -> tasks in that file. */
public all(): Record<string, Task[]> {
// TODO: Defensive copy.
return this.cache;
}
async reloadFile(file: TFile) {
let tasks = await Tasks.findTasksInFile(this.vault, file);
if (tasks.length == 0) {
delete this.cache[file.path];
} else {
this.cache[file.path] = tasks;
}
}
} | PrefixIndexNode | identifier_name |
index.ts | /** Stores various indices on all files in the vault to make dataview generation fast. */
import { MetadataCache, Vault, TFile } from 'obsidian';
import { Task } from 'src/tasks';
import * as Tasks from 'src/tasks';
/** Aggregate index which has several sub-indices and will initialize all of them. */
export class FullIndex {
/** How often the reload queue is checked for reloads. */
static RELOAD_INTERVAL = 1_000;
/** Generate a full index from the given vault. */
static async generate(vault: Vault, cache: MetadataCache): Promise<FullIndex> {
// TODO: Probably need to do this on a worker thread to actually get
let tags = TagIndex.generate(vault, cache);
let prefix = PrefixIndex.generate(vault);
return Promise.all([tags, prefix]).then(value => {
return new FullIndex(vault, cache, value[0], value[1]);
});
}
// Handle for the interval which does the reloading.
reloadHandle: number;
// Files which are currently in queue to be reloaded.
reloadQueue: TFile[];
// Set of paths being reloaded, used for debouncing.
reloadSet: Set<string>;
// Custom extra reload handlers.
reloadHandlers: ((f: TFile) => Promise<void>)[];
// The set of indices which we update.
tag: TagIndex;
prefix: PrefixIndex;
// Other useful things to hold onto.
vault: Vault;
metadataCache: MetadataCache;
constructor(vault: Vault, metadataCache: MetadataCache, tag: TagIndex, prefix: PrefixIndex) {
this.vault = vault;
this.metadataCache = metadataCache;
this.tag = tag;
this.prefix = prefix;
this.reloadQueue = [];
this.reloadSet = new Set();
this.reloadHandlers = [];
// Background task which regularly checks for reloads.
this.reloadHandle = window.setInterval(() => this.reloadInternal(), FullIndex.RELOAD_INTERVAL);
// TODO: Metadata cache is not updated on modify, but on metadatacache resolve.
vault.on("modify", file => {
if (file instanceof TFile) {
this.queueReload(file);
}
});
}
/** Queue the file for reloading; several fast reloads in a row will be debounced. */
public queueReload(file: TFile) {
if (this.reloadSet.has(file.path)) return;
this.reloadSet.add(file.path);
this.reloadQueue.push(file);
}
public on(event: 'reload', handler: (a: TFile) => Promise<void>) {
this.reloadHandlers.push(handler);
}
/** Utility method which regularly checks the reload queue. */
private async reloadInternal() {
let copy = Array.from(this.reloadQueue);
this.reloadSet.clear();
this.reloadQueue = [];
for (let file of copy) {
await Promise.all([this.tag.reloadFile(file)].concat(this.reloadHandlers.map(f => f(file))));
}
}
}
/** Index which efficiently allows querying by tags / subtags. */
export class TagIndex {
/** Parse all subtags out of the given tag. I.e., #hello/i/am would yield [#hello/i/am, #hello/i, #hello]. */
public static parseSubtags(tag: string): string[] {
let result = [tag];
while (tag.contains("/")) {
tag = tag.substring(0, tag.lastIndexOf("/"));
result.push(tag);
}
return result;
}
/** Parse all of the tags for the given file. */
public static parseTags(cache: MetadataCache, path: string): Set<string> {
let fileCache = cache.getCache(path);
if (!fileCache) return new Set<string>();
let allTags = new Set<string>();
// Parse tags from in the file contents.
let tagCache = fileCache.tags;
if (tagCache) {
for (let tag of tagCache) {
if (!tag.tag || !(typeof tag.tag == 'string')) continue;
this.parseSubtags(tag.tag).forEach(t => allTags.add(t));
}
}
// Parse tags from YAML frontmatter.
let frontCache = fileCache.frontmatter;
// Search for the 'tags' field, since it may have wierd
let tagsName: string | undefined = undefined;
for (let key of Object.keys(frontCache ?? {})) {
if (key.toLowerCase() == "tags" || key.toLowerCase() == "tag")
tagsName = key;
}
if (frontCache && tagsName && frontCache[tagsName]) {
if (Array.isArray(frontCache[tagsName])) {
for (let tag of frontCache[tagsName]) {
if (!(typeof tag == 'string')) continue;
if (!tag.startsWith("#")) tag = "#" + tag;
this.parseSubtags(tag).forEach(t => allTags.add(t));
}
} else if (typeof frontCache[tagsName] === 'string') {
// Assume tags is a comma-separated list.
let tags = (frontCache[tagsName] as string).split(",").map(elem => {
elem = elem.trim();
if (!elem.startsWith("#")) elem = "#" + elem;
return elem;
});
for (let tag of tags) {
this.parseSubtags(tag).forEach(t => allTags.add(t));
}
}
}
return allTags;
}
public static async generate(vault: Vault, cache: MetadataCache): Promise<TagIndex> {
let initialMap = new Map<string, Set<string>>();
let initialInvMap = new Map<string, Set<string>>();
let timeStart = new Date().getTime();
// First time load...
for (let file of vault.getMarkdownFiles()) {
let allTags = TagIndex.parseTags(cache, file.path);
initialInvMap.set(file.path, allTags);
for (let subtag of allTags) {
if (!initialMap.has(subtag)) initialMap.set(subtag, new Set<string>());
initialMap.get(subtag)?.add(file.path);
}
}
let totalTimeMs = new Date().getTime() - timeStart;
console.log(`Dataview: Parsed ${initialMap.size} tags in ${initialInvMap.size} markdown files (${totalTimeMs / 1000.0}s)`);
return new TagIndex(vault, cache, initialMap, initialInvMap);
}
/** Maps tags -> set of files containing that exact tag. */
map: Map<string, Set<string>>;
/** Cached inverse map; maps file -> tags it was last known to contain. */
invMap: Map<string, Set<string>>;
vault: Vault;
cache: MetadataCache;
constructor(vault: Vault, metadataCache: MetadataCache,
map: Map<string, Set<string>>, invMap: Map<string, Set<string>>) {
this.vault = vault;
this.cache = metadataCache;
this.map = map;
this.invMap = invMap;
}
/** Returns all files which have the given tag. */
public get(tag: string): Set<string> {
let result = this.map.get(tag);
if (result) {
return new Set(result);
} else {
return new Set();
}
}
/** Returns all tags the given file has. */
public getInverse(file: string): Set<string> {
let result = this.invMap.get(file);
if (result) {
return new Set(result);
} else {
return new Set();
}
}
async reloadFile(file: TFile) {
this.clearFile(file.path);
let allTags = TagIndex.parseTags(this.cache, file.path);
for (let subtag of allTags) {
if (!this.map.has(subtag)) this.map.set(subtag, new Set<string>());
this.map.get(subtag)?.add(file.path);
}
this.invMap.set(file.path, allTags);
}
/** Clears all tags for the given file so they can be re-added. */
private clearFile(path: string) {
let oldTags = this.invMap.get(path);
if (!oldTags) return;
this.invMap.delete(path);
for (let tag of oldTags) {
this.map.get(tag)?.delete(path);
}
}
}
/** A node in the prefix tree. */
export class PrefixIndexNode {
// TODO: Instead of only storing file paths at the leaf, consider storing them at every level,
// since this will make for faster deletes and gathers in exchange for slightly slower adds and more memory usage.
// since we are optimizing for gather, and file paths tend to be shallow, this should be ok.
files: Set<string>;
element: string;
totalCount: number;
children: Map<string, PrefixIndexNode>;
constructor(element: string) {
this.element = element;
this.files = new Set();
this.totalCount = 0;
this.children = new Map();
}
public static add(root: PrefixIndexNode, path: string) {
let parts = path.split("/");
let node = root;
for (let index = 0; index < parts.length - 1; index++) {
if (!node.children.has(parts[index])) node.children.set(parts[index], new PrefixIndexNode(parts[index]));
node.totalCount += 1;
node = node.children.get(parts[index]) as PrefixIndexNode;
}
node.totalCount += 1;
node.files.add(path);
}
public static remove(root: PrefixIndexNode, path: string) {
let parts = path.split("/");
let node = root;
let nodes = [];
for (let index = 0; index < parts.length - 1; index++) {
if (!node.children.has(parts[index])) return;
nodes.push(node);
node = node.children.get(parts[index]) as PrefixIndexNode;
}
if (!node.files.has(path)) return;
node.files.delete(path);
node.totalCount -= 1;
for (let p of nodes) p.totalCount -= 1;
}
public static find(root: PrefixIndexNode, prefix: string): PrefixIndexNode | null {
if (prefix.length == 0 || prefix == '/') return root;
let parts = prefix.split("/");
let node = root;
for (let index = 0; index < parts.length; index++) {
if (!node.children.has(parts[index])) return null;
node = node.children.get(parts[index]) as PrefixIndexNode;
}
return node;
}
public static gather(root: PrefixIndexNode): Set<string> {
let result = new Set<string>();
PrefixIndexNode.gatherRec(root, result);
return result;
}
static gatherRec(root: PrefixIndexNode, output: Set<string>) {
for (let file of root.files) output.add(file);
for (let child of root.children.values()) this.gatherRec(child, output);
}
}
/** Indexes files by their full prefix - essentially a simple prefix tree. */
export class PrefixIndex {
public static async generate(vault: Vault): Promise<PrefixIndex> {
let root = new PrefixIndexNode("");
let timeStart = new Date().getTime();
// First time load...
for (let file of vault.getMarkdownFiles()) {
PrefixIndexNode.add(root, file.path);
}
let totalTimeMs = new Date().getTime() - timeStart;
console.log(`Dataview: Parsed all file prefixes (${totalTimeMs / 1000.0}s)`);
return Promise.resolve(new PrefixIndex(vault, root));
}
root: PrefixIndexNode;
vault: Vault;
constructor(vault: Vault, root: PrefixIndexNode) {
this.vault = vault;
this.root = root;
// TODO: I'm not sure if there is an event for all files in a folder, or just the folder.
// I'm assuming the former naively for now until I inevitably fix it.
this.vault.on("delete", file => {
PrefixIndexNode.remove(this.root, file.path);
});
this.vault.on("create", file => {
PrefixIndexNode.add(this.root, file.path);
});
this.vault.on("rename", (file, old) => {
PrefixIndexNode.remove(this.root, old);
PrefixIndexNode.add(this.root, file.path);
});
}
public get(prefix: string): Set<string> {
let node = PrefixIndexNode.find(this.root, prefix);
if (node == null || node == undefined) return new Set();
return PrefixIndexNode.gather(node);
}
}
/** Caches tasks for each file to avoid repeated re-loading. */
export class TaskCache {
/** Create a task cache for the given vault. */
static async generate(vault: Vault): Promise<TaskCache> {
let initialCache: Record<string, Task[]> = {};
let timeStart = new Date().getTime();
// First time load...
for (let file of vault.getMarkdownFiles()) {
let tasks = await Tasks.findTasksInFile(vault, file);
if (tasks.length == 0) continue;
initialCache[file.path] = tasks;
}
let totalTimeMs = new Date().getTime() - timeStart;
console.log(`Dataview: Parsed tasks in ${Object.keys(initialCache).length} markdown files (${totalTimeMs / 1000.0}s)`);
return new TaskCache(vault, initialCache);
}
cache: Record<string, Task[]>;
vault: Vault;
constructor(vault: Vault, cache: Record<string, Task[]>) |
/** Get the tasks associated with a file path. */
public get(file: string): Task[] | null {
let result = this.cache[file];
if (result === undefined) return null;
else return result;
}
/** Return a map of all files -> tasks in that file. */
public all(): Record<string, Task[]> {
// TODO: Defensive copy.
return this.cache;
}
async reloadFile(file: TFile) {
let tasks = await Tasks.findTasksInFile(this.vault, file);
if (tasks.length == 0) {
delete this.cache[file.path];
} else {
this.cache[file.path] = tasks;
}
}
} | {
this.vault = vault;
this.cache = cache;
} | identifier_body |
simple-job-board-public.js | /**
* Simple Job Board Core Front-end JS File - V 1.4.0
*
* @author PressTigers <[email protected]>, 2016
*
* Actions List
* - Job Application Submission Callbacks
* - Date Picker Initialization
* - Validate Email
* - Initialize TelInput Plugin
* - Validate Phone Number
* - Allowable Uploaded File's Extensions
* - Validate Required Inputs ( Attachment, Phone & Email )
* - Checkbox Group Required Attribute Callbacks
* - Custom Styling of File Upload Button
*/
(function ($) {
'use strict';
$(document).ready(function () {
var jobpost_submit_button = $('.app-submit');
$(".jobpost-form").on("submit", function (event) {
var jobpost_form_status = $('#jobpost_form_status');
var datastring = new FormData(document.getElementById("sjb-application-form"));
/**
* Application Form Submit -> Validate Email & Phone
* @since 2.2.0
*/
var is_valid_email = sjb_is_valid_input(event, "email", "sjb-email-address");
var is_valid_phone = sjb_is_valid_input(event, "phone", "sjb-phone-number");
var is_attachment = sjb_is_attachment(event);
/* Stop Form Submission on Invalid Phone, Email & File Attachement */
if ( !is_valid_email || !is_valid_phone || !is_attachment ) {
return false;
}
$.ajax({
url: application_form.ajaxurl,
type: 'POST',
dataType: 'json',
data: datastring,
async: false,
cache: false,
contentType: false,
processData: false,
beforeSend: function () {
jobpost_form_status.html('Submitting.....');
jobpost_submit_button.attr('disabled', 'diabled');
},
success: function ( response ) {
if ( response['success'] == true ) {
$('.jobpost-form').slideUp();
/* Translation Ready String Through Script Locaization */
jobpost_form_status.html(response['success_alert']);
}
if ( response['success'] == false ) {
/* Translation Ready String Through Script Locaization */
jobpost_form_status.html( response['error'] + ' ' + application_form.jquery_alerts['application_not_submitted'] + '</div>' );
jobpost_submit_button.removeAttr( 'disabled' );
}
}
});
return false;
});
/* Date Picker */
$('.sjb-datepicker').datepicker({
dateFormat: 'dd-mm-yy',
changeMonth: true,
changeYear: true
});
/**
* Application Form -> On Input Email Validation
*
* @since 2.2.0
*/
$('.sjb-email-address').on('input', function () {
var input = $(this);
var re = /^[a-zA-Z0-9.!#$%&'*+/=?^_`{|}~-]+@[a-zA-Z0-9-]+(?:\.[a-zA-Z0-9-]+)*$/;
var is_email = re.test(input.val());
var error_element = $("span", $(this).parent());
if (is_email) {
input.removeClass("invalid").addClass("valid");
error_element.hide();
} else {
input.removeClass("valid").addClass("invalid");
}
});
/**
* Initialize TelInput Plugin
*
* @since 2.2.0
*/
if ($('.sjb-phone-number').length) {
var telInput_id = $('.sjb-phone-number').map(function () {
return this.id;
}).get();
for (var input_ID in telInput_id) {
var telInput = $('#' + telInput_id[input_ID]);
telInput.intlTelInput({
initialCountry: "auto",
geoIpLookup: function (callback) {
$.get('http://ipinfo.io', function () {
}, "jsonp").always(function (resp) {
var countryCode = (resp && resp.country) ? resp.country : "";
callback(countryCode);
});
},
});
}
}
/**
* Application Form -> Phone Number Validation
*
* @since 2.2.0
*/
$('.sjb-phone-number').on('input', function () {
var telInput = $(this);
var telInput_id = $(this).attr('id');
var error_element = $("#" + telInput_id + "-invalid-phone");
error_element.hide();
// Validate Phone Number
if ($.trim(telInput.val())) {
if (telInput.intlTelInput("isValidNumber")) {
telInput.removeClass("invalid").addClass("valid");
error_element.hide();
} else {
telInput.removeClass("valid").addClass("invalid");
}
}
});
/**
* Check for Allowable Extensions of Uploaded File
*
* @since 2.3.0
*/
$('.sjb-attachment').on('change', function () {
var input = $(this);
var file = $("#" + $(this).attr("id"));
var error_element = file.parent().next("span");
error_element.text('');
error_element.hide();
// Validate on File Attachment
if ( 0 != file.get(0).files.length ) {
/**
* Uploded File Extensions Checks
* Get Uploded File Ext
*/
var file_ext = file.val().split('.').pop().toLowerCase();
// All Allowed File Extensions
var allowed_file_exts = application_form.allowed_extensions;
// Settings File Extensions && Getting value From Script Localization
var settings_file_exts = application_form.setting_extensions;
var selected_file_exts = (('yes' === application_form.all_extensions_check) || null == settings_file_exts) ? allowed_file_exts : settings_file_exts;
// File Extension Validation
if ($.inArray(file_ext, selected_file_exts) > -1) {
jobpost_submit_button.attr( 'disabled', false );
input.removeClass("invalid").addClass("valid");
} else {
/* Translation Ready String Through Script Locaization */
error_element.text(application_form.jquery_alerts['invalid_extension']);
error_element.show();
input.removeClass("valid").addClass("invalid");
}
}
});
/**
* Stop Form Submission -> On Required Attachments
*
* @since 2.3.0
*/
function | ( event ) {
var error_free = true;
$(".sjb-attachment").each(function () {
var element = $("#" + $(this).attr("id"));
var valid = element.hasClass("valid");
var is_required_class = element.hasClass("sjb-not-required");
// Set Error Indicator on Invalid Attachment
if (!valid) {
if (!(is_required_class && 0 === element.get(0).files.length)) {
error_free = false;
}
}
// Stop Form Submission
if (!error_free) {
event.preventDefault();
}
});
return error_free;
}
/**
* Stop Form Submission -> On Invalid Email/Phone
*
* @since 2.2.0
*/
function sjb_is_valid_input(event, input_type, input_class) {
var jobpost_form_inputs = $("." + input_class).serializeArray();
var error_free = true;
for (var i in jobpost_form_inputs) {
var element = $("#" + jobpost_form_inputs[i]['name']);
var valid = element.hasClass("valid");
var is_required_class = element.hasClass("sjb-not-required");
if (!(is_required_class && "" === jobpost_form_inputs[i]['value'])) {
if ("email" === input_type) {
var error_element = $("span", element.parent());
} else if ("phone" === input_type) {
var error_element = $("#" + jobpost_form_inputs[i]['name'] + "-invalid-phone");
}
// Set Error Indicator on Invalid Input
if (!valid) {
error_element.show();
error_free = false;
}
else {
error_element.hide();
}
// Stop Form Submission
if (!error_free) {
event.preventDefault();
}
}
}
return error_free;
}
/**
* Remove Required Attribute from Checkbox Group -> When one of the option is selected.
* Add Required Attribute from Checkboxes Group -> When none of the option is selected.
*
* @since 2.3.0
*/
var requiredCheckboxes = $(':checkbox[required]');
requiredCheckboxes.on('change', function () {
var checkboxGroup = requiredCheckboxes.filter('[name="' + $(this).attr('name') + '"]');
var isChecked = checkboxGroup.is(':checked');
checkboxGroup.prop('required', !isChecked);
});
});
/*
* Custom Styling of Upload Field Button
*
* @since 2.4.0
*/
var file = {
maxlength: 20, // maximum length of filename before it's trimmed
convert: function () {
// Convert all file type inputs.
$('input[type=file].sjb-attachment').each(function () {
$(this).wrap('<div class="file" />');
$(this).parent().prepend('<div>'+ application_form.file['browse']+'</div>');
$(this).parent().prepend('<span>'+ application_form.file['no_file_chosen']+'</span>');
$(this).fadeTo(0, 0);
$(this).attr('size', '50'); // Use this to adjust width for FireFox.
});
},
update: function (x) {
// Update the filename display.
var filename = x.val().replace(/^.*\\/g, '');
if (filename.length > $(this).maxlength) {
trim_start = $(this).maxlength / 2 - 1;
trim_end = trim_start + filename.length - $(this).maxlength + 1;
filename = filename.substr(0, trim_start) + '…' + filename.substr(trim_end);
}
if (filename == '')
filename = application_form.file['no_file_chosen'];
x.siblings('span').html(filename);
}
}
$(document).ready(function () {
file.convert();
$('input[type=file].sjb-attachment').change(function () {
file.update($(this));
});
});
})(jQuery); | sjb_is_attachment | identifier_name |
simple-job-board-public.js | /**
* Simple Job Board Core Front-end JS File - V 1.4.0
*
* @author PressTigers <[email protected]>, 2016
*
* Actions List
* - Job Application Submission Callbacks
* - Date Picker Initialization
* - Validate Email
* - Initialize TelInput Plugin
* - Validate Phone Number
* - Allowable Uploaded File's Extensions
* - Validate Required Inputs ( Attachment, Phone & Email )
* - Checkbox Group Required Attribute Callbacks
* - Custom Styling of File Upload Button
*/
(function ($) {
'use strict';
$(document).ready(function () {
var jobpost_submit_button = $('.app-submit');
$(".jobpost-form").on("submit", function (event) {
var jobpost_form_status = $('#jobpost_form_status');
var datastring = new FormData(document.getElementById("sjb-application-form"));
/**
* Application Form Submit -> Validate Email & Phone
* @since 2.2.0
*/
var is_valid_email = sjb_is_valid_input(event, "email", "sjb-email-address");
var is_valid_phone = sjb_is_valid_input(event, "phone", "sjb-phone-number");
var is_attachment = sjb_is_attachment(event);
/* Stop Form Submission on Invalid Phone, Email & File Attachement */
if ( !is_valid_email || !is_valid_phone || !is_attachment ) {
return false;
}
$.ajax({
url: application_form.ajaxurl,
type: 'POST',
dataType: 'json',
data: datastring,
async: false,
cache: false,
contentType: false,
processData: false,
beforeSend: function () {
jobpost_form_status.html('Submitting.....');
jobpost_submit_button.attr('disabled', 'diabled');
},
success: function ( response ) {
if ( response['success'] == true ) {
$('.jobpost-form').slideUp();
/* Translation Ready String Through Script Locaization */
jobpost_form_status.html(response['success_alert']);
}
if ( response['success'] == false ) {
/* Translation Ready String Through Script Locaization */
jobpost_form_status.html( response['error'] + ' ' + application_form.jquery_alerts['application_not_submitted'] + '</div>' );
jobpost_submit_button.removeAttr( 'disabled' );
}
}
});
return false;
});
/* Date Picker */
$('.sjb-datepicker').datepicker({
dateFormat: 'dd-mm-yy',
changeMonth: true,
changeYear: true
});
/**
* Application Form -> On Input Email Validation
*
* @since 2.2.0
*/
$('.sjb-email-address').on('input', function () {
var input = $(this);
var re = /^[a-zA-Z0-9.!#$%&'*+/=?^_`{|}~-]+@[a-zA-Z0-9-]+(?:\.[a-zA-Z0-9-]+)*$/;
var is_email = re.test(input.val());
var error_element = $("span", $(this).parent());
if (is_email) {
input.removeClass("invalid").addClass("valid");
error_element.hide();
} else {
input.removeClass("valid").addClass("invalid"); | }
});
/**
* Initialize TelInput Plugin
*
* @since 2.2.0
*/
if ($('.sjb-phone-number').length) {
var telInput_id = $('.sjb-phone-number').map(function () {
return this.id;
}).get();
for (var input_ID in telInput_id) {
var telInput = $('#' + telInput_id[input_ID]);
telInput.intlTelInput({
initialCountry: "auto",
geoIpLookup: function (callback) {
$.get('http://ipinfo.io', function () {
}, "jsonp").always(function (resp) {
var countryCode = (resp && resp.country) ? resp.country : "";
callback(countryCode);
});
},
});
}
}
/**
* Application Form -> Phone Number Validation
*
* @since 2.2.0
*/
$('.sjb-phone-number').on('input', function () {
var telInput = $(this);
var telInput_id = $(this).attr('id');
var error_element = $("#" + telInput_id + "-invalid-phone");
error_element.hide();
// Validate Phone Number
if ($.trim(telInput.val())) {
if (telInput.intlTelInput("isValidNumber")) {
telInput.removeClass("invalid").addClass("valid");
error_element.hide();
} else {
telInput.removeClass("valid").addClass("invalid");
}
}
});
/**
* Check for Allowable Extensions of Uploaded File
*
* @since 2.3.0
*/
$('.sjb-attachment').on('change', function () {
var input = $(this);
var file = $("#" + $(this).attr("id"));
var error_element = file.parent().next("span");
error_element.text('');
error_element.hide();
// Validate on File Attachment
if ( 0 != file.get(0).files.length ) {
/**
* Uploded File Extensions Checks
* Get Uploded File Ext
*/
var file_ext = file.val().split('.').pop().toLowerCase();
// All Allowed File Extensions
var allowed_file_exts = application_form.allowed_extensions;
// Settings File Extensions && Getting value From Script Localization
var settings_file_exts = application_form.setting_extensions;
var selected_file_exts = (('yes' === application_form.all_extensions_check) || null == settings_file_exts) ? allowed_file_exts : settings_file_exts;
// File Extension Validation
if ($.inArray(file_ext, selected_file_exts) > -1) {
jobpost_submit_button.attr( 'disabled', false );
input.removeClass("invalid").addClass("valid");
} else {
/* Translation Ready String Through Script Locaization */
error_element.text(application_form.jquery_alerts['invalid_extension']);
error_element.show();
input.removeClass("valid").addClass("invalid");
}
}
});
/**
* Stop Form Submission -> On Required Attachments
*
* @since 2.3.0
*/
function sjb_is_attachment( event ) {
var error_free = true;
$(".sjb-attachment").each(function () {
var element = $("#" + $(this).attr("id"));
var valid = element.hasClass("valid");
var is_required_class = element.hasClass("sjb-not-required");
// Set Error Indicator on Invalid Attachment
if (!valid) {
if (!(is_required_class && 0 === element.get(0).files.length)) {
error_free = false;
}
}
// Stop Form Submission
if (!error_free) {
event.preventDefault();
}
});
return error_free;
}
/**
* Stop Form Submission -> On Invalid Email/Phone
*
* @since 2.2.0
*/
function sjb_is_valid_input(event, input_type, input_class) {
var jobpost_form_inputs = $("." + input_class).serializeArray();
var error_free = true;
for (var i in jobpost_form_inputs) {
var element = $("#" + jobpost_form_inputs[i]['name']);
var valid = element.hasClass("valid");
var is_required_class = element.hasClass("sjb-not-required");
if (!(is_required_class && "" === jobpost_form_inputs[i]['value'])) {
if ("email" === input_type) {
var error_element = $("span", element.parent());
} else if ("phone" === input_type) {
var error_element = $("#" + jobpost_form_inputs[i]['name'] + "-invalid-phone");
}
// Set Error Indicator on Invalid Input
if (!valid) {
error_element.show();
error_free = false;
}
else {
error_element.hide();
}
// Stop Form Submission
if (!error_free) {
event.preventDefault();
}
}
}
return error_free;
}
/**
* Remove Required Attribute from Checkbox Group -> When one of the option is selected.
* Add Required Attribute from Checkboxes Group -> When none of the option is selected.
*
* @since 2.3.0
*/
var requiredCheckboxes = $(':checkbox[required]');
requiredCheckboxes.on('change', function () {
var checkboxGroup = requiredCheckboxes.filter('[name="' + $(this).attr('name') + '"]');
var isChecked = checkboxGroup.is(':checked');
checkboxGroup.prop('required', !isChecked);
});
});
/*
* Custom Styling of Upload Field Button
*
* @since 2.4.0
*/
var file = {
maxlength: 20, // maximum length of filename before it's trimmed
convert: function () {
// Convert all file type inputs.
$('input[type=file].sjb-attachment').each(function () {
$(this).wrap('<div class="file" />');
$(this).parent().prepend('<div>'+ application_form.file['browse']+'</div>');
$(this).parent().prepend('<span>'+ application_form.file['no_file_chosen']+'</span>');
$(this).fadeTo(0, 0);
$(this).attr('size', '50'); // Use this to adjust width for FireFox.
});
},
update: function (x) {
// Update the filename display.
var filename = x.val().replace(/^.*\\/g, '');
if (filename.length > $(this).maxlength) {
trim_start = $(this).maxlength / 2 - 1;
trim_end = trim_start + filename.length - $(this).maxlength + 1;
filename = filename.substr(0, trim_start) + '…' + filename.substr(trim_end);
}
if (filename == '')
filename = application_form.file['no_file_chosen'];
x.siblings('span').html(filename);
}
}
$(document).ready(function () {
file.convert();
$('input[type=file].sjb-attachment').change(function () {
file.update($(this));
});
});
})(jQuery); | random_line_split |
|
simple-job-board-public.js | /**
* Simple Job Board Core Front-end JS File - V 1.4.0
*
* @author PressTigers <[email protected]>, 2016
*
* Actions List
* - Job Application Submission Callbacks
* - Date Picker Initialization
* - Validate Email
* - Initialize TelInput Plugin
* - Validate Phone Number
* - Allowable Uploaded File's Extensions
* - Validate Required Inputs ( Attachment, Phone & Email )
* - Checkbox Group Required Attribute Callbacks
* - Custom Styling of File Upload Button
*/
(function ($) {
'use strict';
$(document).ready(function () {
var jobpost_submit_button = $('.app-submit');
$(".jobpost-form").on("submit", function (event) {
var jobpost_form_status = $('#jobpost_form_status');
var datastring = new FormData(document.getElementById("sjb-application-form"));
/**
* Application Form Submit -> Validate Email & Phone
* @since 2.2.0
*/
var is_valid_email = sjb_is_valid_input(event, "email", "sjb-email-address");
var is_valid_phone = sjb_is_valid_input(event, "phone", "sjb-phone-number");
var is_attachment = sjb_is_attachment(event);
/* Stop Form Submission on Invalid Phone, Email & File Attachement */
if ( !is_valid_email || !is_valid_phone || !is_attachment ) {
return false;
}
$.ajax({
url: application_form.ajaxurl,
type: 'POST',
dataType: 'json',
data: datastring,
async: false,
cache: false,
contentType: false,
processData: false,
beforeSend: function () {
jobpost_form_status.html('Submitting.....');
jobpost_submit_button.attr('disabled', 'diabled');
},
success: function ( response ) {
if ( response['success'] == true ) {
$('.jobpost-form').slideUp();
/* Translation Ready String Through Script Locaization */
jobpost_form_status.html(response['success_alert']);
}
if ( response['success'] == false ) {
/* Translation Ready String Through Script Locaization */
jobpost_form_status.html( response['error'] + ' ' + application_form.jquery_alerts['application_not_submitted'] + '</div>' );
jobpost_submit_button.removeAttr( 'disabled' );
}
}
});
return false;
});
/* Date Picker */
$('.sjb-datepicker').datepicker({
dateFormat: 'dd-mm-yy',
changeMonth: true,
changeYear: true
});
/**
* Application Form -> On Input Email Validation
*
* @since 2.2.0
*/
$('.sjb-email-address').on('input', function () {
var input = $(this);
var re = /^[a-zA-Z0-9.!#$%&'*+/=?^_`{|}~-]+@[a-zA-Z0-9-]+(?:\.[a-zA-Z0-9-]+)*$/;
var is_email = re.test(input.val());
var error_element = $("span", $(this).parent());
if (is_email) {
input.removeClass("invalid").addClass("valid");
error_element.hide();
} else {
input.removeClass("valid").addClass("invalid");
}
});
/**
* Initialize TelInput Plugin
*
* @since 2.2.0
*/
if ($('.sjb-phone-number').length) {
var telInput_id = $('.sjb-phone-number').map(function () {
return this.id;
}).get();
for (var input_ID in telInput_id) {
var telInput = $('#' + telInput_id[input_ID]);
telInput.intlTelInput({
initialCountry: "auto",
geoIpLookup: function (callback) {
$.get('http://ipinfo.io', function () {
}, "jsonp").always(function (resp) {
var countryCode = (resp && resp.country) ? resp.country : "";
callback(countryCode);
});
},
});
}
}
/**
* Application Form -> Phone Number Validation
*
* @since 2.2.0
*/
$('.sjb-phone-number').on('input', function () {
var telInput = $(this);
var telInput_id = $(this).attr('id');
var error_element = $("#" + telInput_id + "-invalid-phone");
error_element.hide();
// Validate Phone Number
if ($.trim(telInput.val())) {
if (telInput.intlTelInput("isValidNumber")) {
telInput.removeClass("invalid").addClass("valid");
error_element.hide();
} else {
telInput.removeClass("valid").addClass("invalid");
}
}
});
/**
* Check for Allowable Extensions of Uploaded File
*
* @since 2.3.0
*/
$('.sjb-attachment').on('change', function () {
var input = $(this);
var file = $("#" + $(this).attr("id"));
var error_element = file.parent().next("span");
error_element.text('');
error_element.hide();
// Validate on File Attachment
if ( 0 != file.get(0).files.length ) {
/**
* Uploded File Extensions Checks
* Get Uploded File Ext
*/
var file_ext = file.val().split('.').pop().toLowerCase();
// All Allowed File Extensions
var allowed_file_exts = application_form.allowed_extensions;
// Settings File Extensions && Getting value From Script Localization
var settings_file_exts = application_form.setting_extensions;
var selected_file_exts = (('yes' === application_form.all_extensions_check) || null == settings_file_exts) ? allowed_file_exts : settings_file_exts;
// File Extension Validation
if ($.inArray(file_ext, selected_file_exts) > -1) {
jobpost_submit_button.attr( 'disabled', false );
input.removeClass("invalid").addClass("valid");
} else {
/* Translation Ready String Through Script Locaization */
error_element.text(application_form.jquery_alerts['invalid_extension']);
error_element.show();
input.removeClass("valid").addClass("invalid");
}
}
});
/**
* Stop Form Submission -> On Required Attachments
*
* @since 2.3.0
*/
function sjb_is_attachment( event ) {
var error_free = true;
$(".sjb-attachment").each(function () {
var element = $("#" + $(this).attr("id"));
var valid = element.hasClass("valid");
var is_required_class = element.hasClass("sjb-not-required");
// Set Error Indicator on Invalid Attachment
if (!valid) {
if (!(is_required_class && 0 === element.get(0).files.length)) {
error_free = false;
}
}
// Stop Form Submission
if (!error_free) {
event.preventDefault();
}
});
return error_free;
}
/**
* Stop Form Submission -> On Invalid Email/Phone
*
* @since 2.2.0
*/
function sjb_is_valid_input(event, input_type, input_class) |
/**
* Remove Required Attribute from Checkbox Group -> When one of the option is selected.
* Add Required Attribute from Checkboxes Group -> When none of the option is selected.
*
* @since 2.3.0
*/
var requiredCheckboxes = $(':checkbox[required]');
requiredCheckboxes.on('change', function () {
var checkboxGroup = requiredCheckboxes.filter('[name="' + $(this).attr('name') + '"]');
var isChecked = checkboxGroup.is(':checked');
checkboxGroup.prop('required', !isChecked);
});
});
/*
* Custom Styling of Upload Field Button
*
* @since 2.4.0
*/
var file = {
maxlength: 20, // maximum length of filename before it's trimmed
convert: function () {
// Convert all file type inputs.
$('input[type=file].sjb-attachment').each(function () {
$(this).wrap('<div class="file" />');
$(this).parent().prepend('<div>'+ application_form.file['browse']+'</div>');
$(this).parent().prepend('<span>'+ application_form.file['no_file_chosen']+'</span>');
$(this).fadeTo(0, 0);
$(this).attr('size', '50'); // Use this to adjust width for FireFox.
});
},
update: function (x) {
// Update the filename display.
var filename = x.val().replace(/^.*\\/g, '');
if (filename.length > $(this).maxlength) {
trim_start = $(this).maxlength / 2 - 1;
trim_end = trim_start + filename.length - $(this).maxlength + 1;
filename = filename.substr(0, trim_start) + '…' + filename.substr(trim_end);
}
if (filename == '')
filename = application_form.file['no_file_chosen'];
x.siblings('span').html(filename);
}
}
$(document).ready(function () {
file.convert();
$('input[type=file].sjb-attachment').change(function () {
file.update($(this));
});
});
})(jQuery); | {
var jobpost_form_inputs = $("." + input_class).serializeArray();
var error_free = true;
for (var i in jobpost_form_inputs) {
var element = $("#" + jobpost_form_inputs[i]['name']);
var valid = element.hasClass("valid");
var is_required_class = element.hasClass("sjb-not-required");
if (!(is_required_class && "" === jobpost_form_inputs[i]['value'])) {
if ("email" === input_type) {
var error_element = $("span", element.parent());
} else if ("phone" === input_type) {
var error_element = $("#" + jobpost_form_inputs[i]['name'] + "-invalid-phone");
}
// Set Error Indicator on Invalid Input
if (!valid) {
error_element.show();
error_free = false;
}
else {
error_element.hide();
}
// Stop Form Submission
if (!error_free) {
event.preventDefault();
}
}
}
return error_free;
} | identifier_body |
simple-job-board-public.js | /**
* Simple Job Board Core Front-end JS File - V 1.4.0
*
* @author PressTigers <[email protected]>, 2016
*
* Actions List
* - Job Application Submission Callbacks
* - Date Picker Initialization
* - Validate Email
* - Initialize TelInput Plugin
* - Validate Phone Number
* - Allowable Uploaded File's Extensions
* - Validate Required Inputs ( Attachment, Phone & Email )
* - Checkbox Group Required Attribute Callbacks
* - Custom Styling of File Upload Button
*/
(function ($) {
'use strict';
$(document).ready(function () {
var jobpost_submit_button = $('.app-submit');
$(".jobpost-form").on("submit", function (event) {
var jobpost_form_status = $('#jobpost_form_status');
var datastring = new FormData(document.getElementById("sjb-application-form"));
/**
* Application Form Submit -> Validate Email & Phone
* @since 2.2.0
*/
var is_valid_email = sjb_is_valid_input(event, "email", "sjb-email-address");
var is_valid_phone = sjb_is_valid_input(event, "phone", "sjb-phone-number");
var is_attachment = sjb_is_attachment(event);
/* Stop Form Submission on Invalid Phone, Email & File Attachement */
if ( !is_valid_email || !is_valid_phone || !is_attachment ) {
return false;
}
$.ajax({
url: application_form.ajaxurl,
type: 'POST',
dataType: 'json',
data: datastring,
async: false,
cache: false,
contentType: false,
processData: false,
beforeSend: function () {
jobpost_form_status.html('Submitting.....');
jobpost_submit_button.attr('disabled', 'diabled');
},
success: function ( response ) {
if ( response['success'] == true ) {
$('.jobpost-form').slideUp();
/* Translation Ready String Through Script Locaization */
jobpost_form_status.html(response['success_alert']);
}
if ( response['success'] == false ) {
/* Translation Ready String Through Script Locaization */
jobpost_form_status.html( response['error'] + ' ' + application_form.jquery_alerts['application_not_submitted'] + '</div>' );
jobpost_submit_button.removeAttr( 'disabled' );
}
}
});
return false;
});
/* Date Picker */
$('.sjb-datepicker').datepicker({
dateFormat: 'dd-mm-yy',
changeMonth: true,
changeYear: true
});
/**
* Application Form -> On Input Email Validation
*
* @since 2.2.0
*/
$('.sjb-email-address').on('input', function () {
var input = $(this);
var re = /^[a-zA-Z0-9.!#$%&'*+/=?^_`{|}~-]+@[a-zA-Z0-9-]+(?:\.[a-zA-Z0-9-]+)*$/;
var is_email = re.test(input.val());
var error_element = $("span", $(this).parent());
if (is_email) | else {
input.removeClass("valid").addClass("invalid");
}
});
/**
* Initialize TelInput Plugin
*
* @since 2.2.0
*/
if ($('.sjb-phone-number').length) {
var telInput_id = $('.sjb-phone-number').map(function () {
return this.id;
}).get();
for (var input_ID in telInput_id) {
var telInput = $('#' + telInput_id[input_ID]);
telInput.intlTelInput({
initialCountry: "auto",
geoIpLookup: function (callback) {
$.get('http://ipinfo.io', function () {
}, "jsonp").always(function (resp) {
var countryCode = (resp && resp.country) ? resp.country : "";
callback(countryCode);
});
},
});
}
}
/**
* Application Form -> Phone Number Validation
*
* @since 2.2.0
*/
$('.sjb-phone-number').on('input', function () {
var telInput = $(this);
var telInput_id = $(this).attr('id');
var error_element = $("#" + telInput_id + "-invalid-phone");
error_element.hide();
// Validate Phone Number
if ($.trim(telInput.val())) {
if (telInput.intlTelInput("isValidNumber")) {
telInput.removeClass("invalid").addClass("valid");
error_element.hide();
} else {
telInput.removeClass("valid").addClass("invalid");
}
}
});
/**
* Check for Allowable Extensions of Uploaded File
*
* @since 2.3.0
*/
$('.sjb-attachment').on('change', function () {
var input = $(this);
var file = $("#" + $(this).attr("id"));
var error_element = file.parent().next("span");
error_element.text('');
error_element.hide();
// Validate on File Attachment
if ( 0 != file.get(0).files.length ) {
/**
* Uploded File Extensions Checks
* Get Uploded File Ext
*/
var file_ext = file.val().split('.').pop().toLowerCase();
// All Allowed File Extensions
var allowed_file_exts = application_form.allowed_extensions;
// Settings File Extensions && Getting value From Script Localization
var settings_file_exts = application_form.setting_extensions;
var selected_file_exts = (('yes' === application_form.all_extensions_check) || null == settings_file_exts) ? allowed_file_exts : settings_file_exts;
// File Extension Validation
if ($.inArray(file_ext, selected_file_exts) > -1) {
jobpost_submit_button.attr( 'disabled', false );
input.removeClass("invalid").addClass("valid");
} else {
/* Translation Ready String Through Script Locaization */
error_element.text(application_form.jquery_alerts['invalid_extension']);
error_element.show();
input.removeClass("valid").addClass("invalid");
}
}
});
/**
* Stop Form Submission -> On Required Attachments
*
* @since 2.3.0
*/
function sjb_is_attachment( event ) {
var error_free = true;
$(".sjb-attachment").each(function () {
var element = $("#" + $(this).attr("id"));
var valid = element.hasClass("valid");
var is_required_class = element.hasClass("sjb-not-required");
// Set Error Indicator on Invalid Attachment
if (!valid) {
if (!(is_required_class && 0 === element.get(0).files.length)) {
error_free = false;
}
}
// Stop Form Submission
if (!error_free) {
event.preventDefault();
}
});
return error_free;
}
/**
* Stop Form Submission -> On Invalid Email/Phone
*
* @since 2.2.0
*/
function sjb_is_valid_input(event, input_type, input_class) {
var jobpost_form_inputs = $("." + input_class).serializeArray();
var error_free = true;
for (var i in jobpost_form_inputs) {
var element = $("#" + jobpost_form_inputs[i]['name']);
var valid = element.hasClass("valid");
var is_required_class = element.hasClass("sjb-not-required");
if (!(is_required_class && "" === jobpost_form_inputs[i]['value'])) {
if ("email" === input_type) {
var error_element = $("span", element.parent());
} else if ("phone" === input_type) {
var error_element = $("#" + jobpost_form_inputs[i]['name'] + "-invalid-phone");
}
// Set Error Indicator on Invalid Input
if (!valid) {
error_element.show();
error_free = false;
}
else {
error_element.hide();
}
// Stop Form Submission
if (!error_free) {
event.preventDefault();
}
}
}
return error_free;
}
/**
* Remove Required Attribute from Checkbox Group -> When one of the option is selected.
* Add Required Attribute from Checkboxes Group -> When none of the option is selected.
*
* @since 2.3.0
*/
var requiredCheckboxes = $(':checkbox[required]');
requiredCheckboxes.on('change', function () {
var checkboxGroup = requiredCheckboxes.filter('[name="' + $(this).attr('name') + '"]');
var isChecked = checkboxGroup.is(':checked');
checkboxGroup.prop('required', !isChecked);
});
});
/*
* Custom Styling of Upload Field Button
*
* @since 2.4.0
*/
var file = {
maxlength: 20, // maximum length of filename before it's trimmed
convert: function () {
// Convert all file type inputs.
$('input[type=file].sjb-attachment').each(function () {
$(this).wrap('<div class="file" />');
$(this).parent().prepend('<div>'+ application_form.file['browse']+'</div>');
$(this).parent().prepend('<span>'+ application_form.file['no_file_chosen']+'</span>');
$(this).fadeTo(0, 0);
$(this).attr('size', '50'); // Use this to adjust width for FireFox.
});
},
update: function (x) {
// Update the filename display.
var filename = x.val().replace(/^.*\\/g, '');
if (filename.length > $(this).maxlength) {
trim_start = $(this).maxlength / 2 - 1;
trim_end = trim_start + filename.length - $(this).maxlength + 1;
filename = filename.substr(0, trim_start) + '…' + filename.substr(trim_end);
}
if (filename == '')
filename = application_form.file['no_file_chosen'];
x.siblings('span').html(filename);
}
}
$(document).ready(function () {
file.convert();
$('input[type=file].sjb-attachment').change(function () {
file.update($(this));
});
});
})(jQuery); | {
input.removeClass("invalid").addClass("valid");
error_element.hide();
} | conditional_block |
train_timeline.py |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import os.path
import os
import time
import sys
sys.path.insert(0,'lib')
sys.path.insert(0,'networks')
import tensorflow as tf
from tensorflow.python.client import timeline
from tensorflow.contrib import slim
import tensorflow.contrib.data as tf_data
from tensorflow.python.client import timeline
from collections import Counter
import numpy as np
import importlib
import itertools
import tensorflow.contrib.slim as slim
from tensorflow.contrib.slim.nets import resnet_v1, resnet_v2
import argparse
import utils
import sphere_network as network
from data_generator import DataGenerator
#import lfw
import pdb
#import cv2
#import pylab as plt
debug = False
softmax_ind = 0
from tensorflow.python.ops import data_flow_ops
def _from_tensor_slices(tensors_x,tensors_y):
#return TensorSliceDataset((tensors_x,tensors_y))
return tf_data.Dataset.from_tensor_slices((tensors_x,tensors_y))
def main(args):
#network = importlib.import_module(args.model_def)
subdir = datetime.strftime(datetime.now(), '%Y%m%d-%H%M%S')
log_dir = os.path.join(os.path.expanduser(args.logs_base_dir), subdir)
if not os.path.isdir(log_dir): # Create the log directory if it doesn't exist
os.makedirs(log_dir)
model_dir = os.path.join(os.path.expanduser(args.models_base_dir), subdir)
if not os.path.isdir(model_dir): # Create the model directory if it doesn't exist
os.makedirs(model_dir)
# Write arguments to a text file
utils.write_arguments_to_file(args, os.path.join(log_dir, 'arguments.txt'))
# Store some git revision info in a text file in the log directory
src_path,_ = os.path.split(os.path.realpath(__file__))
utils.store_revision_info(src_path, log_dir, ' '.join(sys.argv))
np.random.seed(seed=args.seed)
train_set = utils.get_dataset(args.data_dir)
nrof_classes = len(train_set)
print('nrof_classes: ',nrof_classes)
image_list, label_list = utils.get_image_paths_and_labels(train_set)
image_list = np.array(image_list)
print('total images: {}'.format(len(image_list)))
label_list = np.array(label_list,dtype=np.int32)
dataset_size = len(image_list)
data_reader = DataGenerator(image_list,label_list,args.batch_size)
print('Model directory: %s' % model_dir)
print('Log directory: %s' % log_dir)
if args.pretrained_model:
print('Pre-trained model: %s' % os.path.expanduser(args.pretrained_model))
with tf.Graph().as_default():
tf.set_random_seed(args.seed)
global_step = tf.Variable(0, trainable=False,name='global_step')
# Placeholder for the learning rate
learning_rate_placeholder = tf.placeholder(tf.float32, name='learning_rate')
images_placeholder = tf.placeholder(tf.float32,[None,112,96,3], name='images_placeholder')
#images_placeholder = tf.Variable(np.ones([args.batch_size,112,96,3]), dtype=np.float32,name='images_placeholder')
labels_placeholder = tf.placeholder(tf.int32,[None], name='labels_placeholder')
#labels_placeholder = tf.Variable(np.ones([args.batch_size,]),dtype=tf.int32, name='labels_placeholder')
phase_train_placeholder = tf.placeholder(tf.bool, name='phase_train')
learning_rate = tf.train.exponential_decay(learning_rate_placeholder, global_step,
args.learning_rate_decay_epochs*args.epoch_size, args.learning_rate_decay_factor, staircase=True)
tf.summary.scalar('learning_rate', learning_rate)
print('Using optimizer: {}'.format(args.optimizer))
if args.optimizer == 'ADAGRAD':
opt = tf.train.AdagradOptimizer(learning_rate)
elif args.optimizer == 'MOM':
opt = tf.train.MomentumOptimizer(learning_rate,0.9)
if args.network == 'sphere_network':
prelogits = network.infer(images_placeholder)
else:
raise Exception('Not supported network: {}'.format(args.loss_type))
if args.loss_type == 'softmax':
cross_entropy_mean = utils.softmax_loss(prelogits,labels_placeholder, len(train_set),args.weight_decay,False)
regularization_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
#loss = cross_entropy_mean + args.weight_decay*tf.add_n(regularization_losses)
loss = cross_entropy_mean + args.weight_decay*tf.add_n(regularization_losses)
#loss = cross_entropy_mean
else:
raise Exception('Not supported loss type: {}'.format(args.loss_type))
#loss = tf.add_n([cross_entropy_mean] + regularization_losses, name='total_loss')
losses = {}
losses['total_loss'] = loss
losses['softmax_loss'] = cross_entropy_mean
debug_info = {}
debug_info ['prelogits'] = prelogits
grads = opt.compute_gradients(loss,tf.trainable_variables())
train_op = opt.apply_gradients(grads,global_step=global_step)
#save_vars = [var for var in tf.global_variables() if 'Adagrad' not in var.name and 'global_step' not in var.name]
save_vars = tf.global_variables()
#saver = tf.train.Saver(tf.trainable_variables(), max_to_keep=3)
saver = tf.train.Saver(save_vars, max_to_keep=3)
# Build the summary operation based on the TF collection of Summaries.
# Start running operations on the Graph.
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=args.gpu_memory_fraction)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options,allow_soft_placement=True))
# Initialize variables
sess.run(tf.global_variables_initializer(), feed_dict={phase_train_placeholder:True})
sess.run(tf.local_variables_initializer(), feed_dict={phase_train_placeholder:True})
run_metadata = tf.RunMetadata()
run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
with sess.as_default():
#pdb.set_trace()
if args.pretrained_model:
print('Restoring pretrained model: %s' % args.pretrained_model)
saver.restore(sess, os.path.expanduser(args.pretrained_model))
# Training and validation loop
epoch = 0
while epoch < args.max_nrof_epochs:
step = sess.run(global_step, feed_dict=None)
epoch = step // args.epoch_size
# Train for one epoch
train(args, sess, epoch, images_placeholder, labels_placeholder, data_reader,run_metadata,run_options,
learning_rate_placeholder, global_step,
losses, train_op, args.learning_rate_schedule_file)
# Save variables and the metagraph if it doesn't exist already
model_dir = args.models_base_dir
checkpoint_path = os.path.join(model_dir, 'model-%s.ckpt' % 'softmax')
saver.save(sess, checkpoint_path, global_step=step, write_meta_graph=False)
# Evaluate on LFW
return model_dir
def train(args, sess, epoch, images_placeholder, labels_placeholder, data_reader,run_metadata, run_options,
learning_rate_placeholder, global_step,
loss, train_op, learning_rate_schedule_file):
batch_number = 0
if args.learning_rate>0.0:
lr = args.learning_rate
else:
lr = utils.get_learning_rate_from_file(learning_rate_schedule_file, epoch)
while batch_number < args.epoch_size:
start_time = time.time()
#print('Running forward pass on sampled images: ', end='')
start_time = time.time()
images, labels = data_reader.next_batch(args.image_height,args.image_width)
print("Load {} images cost time: {}".format(images.shape[0],time.time()-start_time))
#if epoch < 10:
# continue
#if batch_number > 49:
# pdb.set_trace()
# print(labels)
#print(images.shape,labels)
feed_dict = {learning_rate_placeholder: lr ,images_placeholder:images, labels_placeholder:labels}
#feed_dict = {learning_rate_placeholder: lr}
start_time = time.time()
#total_err, softmax_err, _, step = sess.run([loss['total_loss'], loss['softmax_loss'], train_op, global_step ], feed_dict=feed_dict, options=run_options, run_metadata=run_metadata)
_ = sess.run(train_op, feed_dict=feed_dict, options=run_options, run_metadata=run_metadata)
tl = timeline.Timeline(run_metadata.step_stats)
ctf = tl.generate_chrome_trace_format()
with open('json_placeholder/tl-{}.json'.format(batch_number),'w') as wd:
wd.write(ctf)
duration = time.time() - start_time
#print('Epoch: [%d][%d/%d]\tTime %.3f\tTotal Loss %2.3f\tSoftmax Loss %2.3f, lr %2.5f' %
# (epoch, batch_number+1, args.epoch_size, duration, total_err, softmax_err, lr))
batch_number += 1
return batch_number
def save_variables_and_metagraph(sess, saver, summary_writer, model_dir, model_name, step):
# Save the model checkpoint
print('Saving variables')
start_time = time.time()
checkpoint_path = os.path.join(model_dir, 'model-%s.ckpt' % model_name)
save_time_variables = time.time() - start_time
print('Variables saved in %.2f seconds' % save_time_variables)
metagraph_filename = os.path.join(model_dir, 'model-%s.meta' % model_name)
save_time_metagraph = 0
if not os.path.exists(metagraph_filename):
print('Saving metagraph')
start_time = time.time()
saver.export_meta_graph(metagraph_filename)
save_time_metagraph = time.time() - start_time
print('Metagraph saved in %.2f seconds' % save_time_metagraph)
def | (filename, epoch):
with open(filename, 'r') as f:
for line in f.readlines():
line = line.split('#', 1)[0]
if line:
par = line.strip().split(':')
e = int(par[0])
lr = float(par[1])
if e <= epoch:
learning_rate = lr
else:
return learning_rate
def parse_arguments(argv):
parser = argparse.ArgumentParser()
parser.add_argument('--logs_base_dir', type=str,
help='Directory where to write event logs.', default='logs/facenet_ms_mp')
parser.add_argument('--models_base_dir', type=str,
help='Directory where to write trained models and checkpoints.', default='models/facenet_ms_mp')
parser.add_argument('--gpu_memory_fraction', type=float,
help='Upper bound on the amount of GPU memory that will be used by the process.', default=.9)
parser.add_argument('--pretrained_model', type=str,
help='Load a pretrained model before training starts.')
parser.add_argument('--loss_type', type=str,
help='Which type loss to be used.',default='softmax')
parser.add_argument('--network', type=str,
help='which network is used to extract feature.',default='resnet50')
parser.add_argument('--data_dir', type=str,
help='Path to the data directory containing aligned face patches. Multiple directories are separated with colon.',
default='~/datasets/casia/casia_maxpy_mtcnnalign_182_160')
parser.add_argument('--model_def', type=str,
help='Model definition. Points to a module containing the definition of the inference graph.', default='models.inception_resnet_v1')
parser.add_argument('--max_nrof_epochs', type=int,
help='Number of epochs to run.', default=500)
parser.add_argument('--batch_size', type=int,
help='Number of images to process in a batch.', default=100)
parser.add_argument('--image_size', type=int,
help='Image size (height, width) in pixels.', default=160)
parser.add_argument('--image_src_size', type=int,
help='Src Image size (height, width) in pixels.', default=256)
parser.add_argument('--image_height', type=int,
help='Image size (height, width) in pixels.', default=112)
parser.add_argument('--image_width', type=int,
help='Image size (height, width) in pixels.', default=96)
parser.add_argument('--people_per_batch', type=int,
help='Number of people per batch.', default=30)
parser.add_argument('--num_gpus', type=int,
help='Number of gpus.', default=4)
parser.add_argument('--images_per_person', type=int,
help='Number of images per person.', default=5)
parser.add_argument('--epoch_size', type=int,
help='Number of batches per epoch.', default=600)
parser.add_argument('--embedding_size', type=int,
help='Dimensionality of the embedding.', default=256)
parser.add_argument('--random_crop',
help='Performs random cropping of training images. If false, the center image_size pixels from the training images are used. ' +
'If the size of the images in the data directory is equal to image_size no cropping is performed', action='store_true')
parser.add_argument('--random_flip',
help='Performs random horizontal flipping of training images.', action='store_true')
parser.add_argument('--keep_probability', type=float,
help='Keep probability of dropout for the fully connected layer(s).', default=1.0)
parser.add_argument('--weight_decay', type=float,
help='L2 weight regularization.', default=0.0)
parser.add_argument('--optimizer', type=str, choices=['ADAGRAD', 'ADADELTA', 'ADAM', 'RMSPROP', 'MOM','SGD'],
help='The optimization algorithm to use', default='ADAGRAD')
parser.add_argument('--learning_rate', type=float,
help='Initial learning rate. If set to a negative value a learning rate ' +
'schedule can be specified in the file "learning_rate_schedule.txt"', default=0.1)
parser.add_argument('--learning_rate_decay_epochs', type=int,
help='Number of epochs between learning rate decay.', default=100)
parser.add_argument('--learning_rate_decay_factor', type=float,
help='Learning rate decay factor.', default=1.0)
parser.add_argument('--moving_average_decay', type=float,
help='Exponential decay for tracking of training parameters.', default=0.9999)
parser.add_argument('--seed', type=int,
help='Random seed.', default=666)
parser.add_argument('--learning_rate_schedule_file', type=str,
help='File containing the learning rate schedule that is used when learning_rate is set to to -1.', default='data/learning_rate_schedule.txt')
return parser.parse_args(argv)
if __name__ == '__main__':
main(parse_arguments(sys.argv[1:]))
| get_learning_rate_from_file | identifier_name |
train_timeline.py |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import os.path
import os
import time
import sys
sys.path.insert(0,'lib')
sys.path.insert(0,'networks')
import tensorflow as tf
from tensorflow.python.client import timeline
from tensorflow.contrib import slim
import tensorflow.contrib.data as tf_data
from tensorflow.python.client import timeline
from collections import Counter
import numpy as np
import importlib
import itertools
import tensorflow.contrib.slim as slim
from tensorflow.contrib.slim.nets import resnet_v1, resnet_v2
import argparse
import utils
import sphere_network as network
from data_generator import DataGenerator
#import lfw
import pdb
#import cv2
#import pylab as plt
debug = False
softmax_ind = 0
from tensorflow.python.ops import data_flow_ops
def _from_tensor_slices(tensors_x,tensors_y):
#return TensorSliceDataset((tensors_x,tensors_y))
|
def main(args):
#network = importlib.import_module(args.model_def)
subdir = datetime.strftime(datetime.now(), '%Y%m%d-%H%M%S')
log_dir = os.path.join(os.path.expanduser(args.logs_base_dir), subdir)
if not os.path.isdir(log_dir): # Create the log directory if it doesn't exist
os.makedirs(log_dir)
model_dir = os.path.join(os.path.expanduser(args.models_base_dir), subdir)
if not os.path.isdir(model_dir): # Create the model directory if it doesn't exist
os.makedirs(model_dir)
# Write arguments to a text file
utils.write_arguments_to_file(args, os.path.join(log_dir, 'arguments.txt'))
# Store some git revision info in a text file in the log directory
src_path,_ = os.path.split(os.path.realpath(__file__))
utils.store_revision_info(src_path, log_dir, ' '.join(sys.argv))
np.random.seed(seed=args.seed)
train_set = utils.get_dataset(args.data_dir)
nrof_classes = len(train_set)
print('nrof_classes: ',nrof_classes)
image_list, label_list = utils.get_image_paths_and_labels(train_set)
image_list = np.array(image_list)
print('total images: {}'.format(len(image_list)))
label_list = np.array(label_list,dtype=np.int32)
dataset_size = len(image_list)
data_reader = DataGenerator(image_list,label_list,args.batch_size)
print('Model directory: %s' % model_dir)
print('Log directory: %s' % log_dir)
if args.pretrained_model:
print('Pre-trained model: %s' % os.path.expanduser(args.pretrained_model))
with tf.Graph().as_default():
tf.set_random_seed(args.seed)
global_step = tf.Variable(0, trainable=False,name='global_step')
# Placeholder for the learning rate
learning_rate_placeholder = tf.placeholder(tf.float32, name='learning_rate')
images_placeholder = tf.placeholder(tf.float32,[None,112,96,3], name='images_placeholder')
#images_placeholder = tf.Variable(np.ones([args.batch_size,112,96,3]), dtype=np.float32,name='images_placeholder')
labels_placeholder = tf.placeholder(tf.int32,[None], name='labels_placeholder')
#labels_placeholder = tf.Variable(np.ones([args.batch_size,]),dtype=tf.int32, name='labels_placeholder')
phase_train_placeholder = tf.placeholder(tf.bool, name='phase_train')
learning_rate = tf.train.exponential_decay(learning_rate_placeholder, global_step,
args.learning_rate_decay_epochs*args.epoch_size, args.learning_rate_decay_factor, staircase=True)
tf.summary.scalar('learning_rate', learning_rate)
print('Using optimizer: {}'.format(args.optimizer))
if args.optimizer == 'ADAGRAD':
opt = tf.train.AdagradOptimizer(learning_rate)
elif args.optimizer == 'MOM':
opt = tf.train.MomentumOptimizer(learning_rate,0.9)
if args.network == 'sphere_network':
prelogits = network.infer(images_placeholder)
else:
raise Exception('Not supported network: {}'.format(args.loss_type))
if args.loss_type == 'softmax':
cross_entropy_mean = utils.softmax_loss(prelogits,labels_placeholder, len(train_set),args.weight_decay,False)
regularization_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
#loss = cross_entropy_mean + args.weight_decay*tf.add_n(regularization_losses)
loss = cross_entropy_mean + args.weight_decay*tf.add_n(regularization_losses)
#loss = cross_entropy_mean
else:
raise Exception('Not supported loss type: {}'.format(args.loss_type))
#loss = tf.add_n([cross_entropy_mean] + regularization_losses, name='total_loss')
losses = {}
losses['total_loss'] = loss
losses['softmax_loss'] = cross_entropy_mean
debug_info = {}
debug_info ['prelogits'] = prelogits
grads = opt.compute_gradients(loss,tf.trainable_variables())
train_op = opt.apply_gradients(grads,global_step=global_step)
#save_vars = [var for var in tf.global_variables() if 'Adagrad' not in var.name and 'global_step' not in var.name]
save_vars = tf.global_variables()
#saver = tf.train.Saver(tf.trainable_variables(), max_to_keep=3)
saver = tf.train.Saver(save_vars, max_to_keep=3)
# Build the summary operation based on the TF collection of Summaries.
# Start running operations on the Graph.
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=args.gpu_memory_fraction)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options,allow_soft_placement=True))
# Initialize variables
sess.run(tf.global_variables_initializer(), feed_dict={phase_train_placeholder:True})
sess.run(tf.local_variables_initializer(), feed_dict={phase_train_placeholder:True})
run_metadata = tf.RunMetadata()
run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
with sess.as_default():
#pdb.set_trace()
if args.pretrained_model:
print('Restoring pretrained model: %s' % args.pretrained_model)
saver.restore(sess, os.path.expanduser(args.pretrained_model))
# Training and validation loop
epoch = 0
while epoch < args.max_nrof_epochs:
step = sess.run(global_step, feed_dict=None)
epoch = step // args.epoch_size
# Train for one epoch
train(args, sess, epoch, images_placeholder, labels_placeholder, data_reader,run_metadata,run_options,
learning_rate_placeholder, global_step,
losses, train_op, args.learning_rate_schedule_file)
# Save variables and the metagraph if it doesn't exist already
model_dir = args.models_base_dir
checkpoint_path = os.path.join(model_dir, 'model-%s.ckpt' % 'softmax')
saver.save(sess, checkpoint_path, global_step=step, write_meta_graph=False)
# Evaluate on LFW
return model_dir
def train(args, sess, epoch, images_placeholder, labels_placeholder, data_reader,run_metadata, run_options,
learning_rate_placeholder, global_step,
loss, train_op, learning_rate_schedule_file):
batch_number = 0
if args.learning_rate>0.0:
lr = args.learning_rate
else:
lr = utils.get_learning_rate_from_file(learning_rate_schedule_file, epoch)
while batch_number < args.epoch_size:
start_time = time.time()
#print('Running forward pass on sampled images: ', end='')
start_time = time.time()
images, labels = data_reader.next_batch(args.image_height,args.image_width)
print("Load {} images cost time: {}".format(images.shape[0],time.time()-start_time))
#if epoch < 10:
# continue
#if batch_number > 49:
# pdb.set_trace()
# print(labels)
#print(images.shape,labels)
feed_dict = {learning_rate_placeholder: lr ,images_placeholder:images, labels_placeholder:labels}
#feed_dict = {learning_rate_placeholder: lr}
start_time = time.time()
#total_err, softmax_err, _, step = sess.run([loss['total_loss'], loss['softmax_loss'], train_op, global_step ], feed_dict=feed_dict, options=run_options, run_metadata=run_metadata)
_ = sess.run(train_op, feed_dict=feed_dict, options=run_options, run_metadata=run_metadata)
tl = timeline.Timeline(run_metadata.step_stats)
ctf = tl.generate_chrome_trace_format()
with open('json_placeholder/tl-{}.json'.format(batch_number),'w') as wd:
wd.write(ctf)
duration = time.time() - start_time
#print('Epoch: [%d][%d/%d]\tTime %.3f\tTotal Loss %2.3f\tSoftmax Loss %2.3f, lr %2.5f' %
# (epoch, batch_number+1, args.epoch_size, duration, total_err, softmax_err, lr))
batch_number += 1
return batch_number
def save_variables_and_metagraph(sess, saver, summary_writer, model_dir, model_name, step):
# Save the model checkpoint
print('Saving variables')
start_time = time.time()
checkpoint_path = os.path.join(model_dir, 'model-%s.ckpt' % model_name)
save_time_variables = time.time() - start_time
print('Variables saved in %.2f seconds' % save_time_variables)
metagraph_filename = os.path.join(model_dir, 'model-%s.meta' % model_name)
save_time_metagraph = 0
if not os.path.exists(metagraph_filename):
print('Saving metagraph')
start_time = time.time()
saver.export_meta_graph(metagraph_filename)
save_time_metagraph = time.time() - start_time
print('Metagraph saved in %.2f seconds' % save_time_metagraph)
def get_learning_rate_from_file(filename, epoch):
with open(filename, 'r') as f:
for line in f.readlines():
line = line.split('#', 1)[0]
if line:
par = line.strip().split(':')
e = int(par[0])
lr = float(par[1])
if e <= epoch:
learning_rate = lr
else:
return learning_rate
def parse_arguments(argv):
parser = argparse.ArgumentParser()
parser.add_argument('--logs_base_dir', type=str,
help='Directory where to write event logs.', default='logs/facenet_ms_mp')
parser.add_argument('--models_base_dir', type=str,
help='Directory where to write trained models and checkpoints.', default='models/facenet_ms_mp')
parser.add_argument('--gpu_memory_fraction', type=float,
help='Upper bound on the amount of GPU memory that will be used by the process.', default=.9)
parser.add_argument('--pretrained_model', type=str,
help='Load a pretrained model before training starts.')
parser.add_argument('--loss_type', type=str,
help='Which type loss to be used.',default='softmax')
parser.add_argument('--network', type=str,
help='which network is used to extract feature.',default='resnet50')
parser.add_argument('--data_dir', type=str,
help='Path to the data directory containing aligned face patches. Multiple directories are separated with colon.',
default='~/datasets/casia/casia_maxpy_mtcnnalign_182_160')
parser.add_argument('--model_def', type=str,
help='Model definition. Points to a module containing the definition of the inference graph.', default='models.inception_resnet_v1')
parser.add_argument('--max_nrof_epochs', type=int,
help='Number of epochs to run.', default=500)
parser.add_argument('--batch_size', type=int,
help='Number of images to process in a batch.', default=100)
parser.add_argument('--image_size', type=int,
help='Image size (height, width) in pixels.', default=160)
parser.add_argument('--image_src_size', type=int,
help='Src Image size (height, width) in pixels.', default=256)
parser.add_argument('--image_height', type=int,
help='Image size (height, width) in pixels.', default=112)
parser.add_argument('--image_width', type=int,
help='Image size (height, width) in pixels.', default=96)
parser.add_argument('--people_per_batch', type=int,
help='Number of people per batch.', default=30)
parser.add_argument('--num_gpus', type=int,
help='Number of gpus.', default=4)
parser.add_argument('--images_per_person', type=int,
help='Number of images per person.', default=5)
parser.add_argument('--epoch_size', type=int,
help='Number of batches per epoch.', default=600)
parser.add_argument('--embedding_size', type=int,
help='Dimensionality of the embedding.', default=256)
parser.add_argument('--random_crop',
help='Performs random cropping of training images. If false, the center image_size pixels from the training images are used. ' +
'If the size of the images in the data directory is equal to image_size no cropping is performed', action='store_true')
parser.add_argument('--random_flip',
help='Performs random horizontal flipping of training images.', action='store_true')
parser.add_argument('--keep_probability', type=float,
help='Keep probability of dropout for the fully connected layer(s).', default=1.0)
parser.add_argument('--weight_decay', type=float,
help='L2 weight regularization.', default=0.0)
parser.add_argument('--optimizer', type=str, choices=['ADAGRAD', 'ADADELTA', 'ADAM', 'RMSPROP', 'MOM','SGD'],
help='The optimization algorithm to use', default='ADAGRAD')
parser.add_argument('--learning_rate', type=float,
help='Initial learning rate. If set to a negative value a learning rate ' +
'schedule can be specified in the file "learning_rate_schedule.txt"', default=0.1)
parser.add_argument('--learning_rate_decay_epochs', type=int,
help='Number of epochs between learning rate decay.', default=100)
parser.add_argument('--learning_rate_decay_factor', type=float,
help='Learning rate decay factor.', default=1.0)
parser.add_argument('--moving_average_decay', type=float,
help='Exponential decay for tracking of training parameters.', default=0.9999)
parser.add_argument('--seed', type=int,
help='Random seed.', default=666)
parser.add_argument('--learning_rate_schedule_file', type=str,
help='File containing the learning rate schedule that is used when learning_rate is set to to -1.', default='data/learning_rate_schedule.txt')
return parser.parse_args(argv)
if __name__ == '__main__':
main(parse_arguments(sys.argv[1:]))
| return tf_data.Dataset.from_tensor_slices((tensors_x,tensors_y)) | identifier_body |
train_timeline.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import os.path
import os
import time
import sys
sys.path.insert(0,'lib')
sys.path.insert(0,'networks')
import tensorflow as tf
from tensorflow.python.client import timeline
from tensorflow.contrib import slim
import tensorflow.contrib.data as tf_data
from tensorflow.python.client import timeline
from collections import Counter
import numpy as np
import importlib
import itertools
import tensorflow.contrib.slim as slim
from tensorflow.contrib.slim.nets import resnet_v1, resnet_v2
import argparse
import utils
import sphere_network as network
from data_generator import DataGenerator
#import lfw
import pdb
#import cv2
#import pylab as plt
debug = False
softmax_ind = 0
from tensorflow.python.ops import data_flow_ops
def _from_tensor_slices(tensors_x,tensors_y):
#return TensorSliceDataset((tensors_x,tensors_y))
return tf_data.Dataset.from_tensor_slices((tensors_x,tensors_y))
def main(args):
#network = importlib.import_module(args.model_def)
subdir = datetime.strftime(datetime.now(), '%Y%m%d-%H%M%S')
log_dir = os.path.join(os.path.expanduser(args.logs_base_dir), subdir)
if not os.path.isdir(log_dir): # Create the log directory if it doesn't exist
os.makedirs(log_dir)
model_dir = os.path.join(os.path.expanduser(args.models_base_dir), subdir)
if not os.path.isdir(model_dir): # Create the model directory if it doesn't exist
os.makedirs(model_dir)
# Write arguments to a text file
utils.write_arguments_to_file(args, os.path.join(log_dir, 'arguments.txt'))
# Store some git revision info in a text file in the log directory
src_path,_ = os.path.split(os.path.realpath(__file__))
utils.store_revision_info(src_path, log_dir, ' '.join(sys.argv))
np.random.seed(seed=args.seed)
train_set = utils.get_dataset(args.data_dir)
nrof_classes = len(train_set)
print('nrof_classes: ',nrof_classes)
image_list, label_list = utils.get_image_paths_and_labels(train_set)
image_list = np.array(image_list)
print('total images: {}'.format(len(image_list)))
label_list = np.array(label_list,dtype=np.int32)
dataset_size = len(image_list)
data_reader = DataGenerator(image_list,label_list,args.batch_size)
print('Model directory: %s' % model_dir)
print('Log directory: %s' % log_dir)
if args.pretrained_model:
print('Pre-trained model: %s' % os.path.expanduser(args.pretrained_model))
with tf.Graph().as_default():
tf.set_random_seed(args.seed)
global_step = tf.Variable(0, trainable=False,name='global_step')
# Placeholder for the learning rate
learning_rate_placeholder = tf.placeholder(tf.float32, name='learning_rate')
images_placeholder = tf.placeholder(tf.float32,[None,112,96,3], name='images_placeholder')
#images_placeholder = tf.Variable(np.ones([args.batch_size,112,96,3]), dtype=np.float32,name='images_placeholder')
labels_placeholder = tf.placeholder(tf.int32,[None], name='labels_placeholder') |
learning_rate = tf.train.exponential_decay(learning_rate_placeholder, global_step,
args.learning_rate_decay_epochs*args.epoch_size, args.learning_rate_decay_factor, staircase=True)
tf.summary.scalar('learning_rate', learning_rate)
print('Using optimizer: {}'.format(args.optimizer))
if args.optimizer == 'ADAGRAD':
opt = tf.train.AdagradOptimizer(learning_rate)
elif args.optimizer == 'MOM':
opt = tf.train.MomentumOptimizer(learning_rate,0.9)
if args.network == 'sphere_network':
prelogits = network.infer(images_placeholder)
else:
raise Exception('Not supported network: {}'.format(args.loss_type))
if args.loss_type == 'softmax':
cross_entropy_mean = utils.softmax_loss(prelogits,labels_placeholder, len(train_set),args.weight_decay,False)
regularization_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
#loss = cross_entropy_mean + args.weight_decay*tf.add_n(regularization_losses)
loss = cross_entropy_mean + args.weight_decay*tf.add_n(regularization_losses)
#loss = cross_entropy_mean
else:
raise Exception('Not supported loss type: {}'.format(args.loss_type))
#loss = tf.add_n([cross_entropy_mean] + regularization_losses, name='total_loss')
losses = {}
losses['total_loss'] = loss
losses['softmax_loss'] = cross_entropy_mean
debug_info = {}
debug_info ['prelogits'] = prelogits
grads = opt.compute_gradients(loss,tf.trainable_variables())
train_op = opt.apply_gradients(grads,global_step=global_step)
#save_vars = [var for var in tf.global_variables() if 'Adagrad' not in var.name and 'global_step' not in var.name]
save_vars = tf.global_variables()
#saver = tf.train.Saver(tf.trainable_variables(), max_to_keep=3)
saver = tf.train.Saver(save_vars, max_to_keep=3)
# Build the summary operation based on the TF collection of Summaries.
# Start running operations on the Graph.
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=args.gpu_memory_fraction)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options,allow_soft_placement=True))
# Initialize variables
sess.run(tf.global_variables_initializer(), feed_dict={phase_train_placeholder:True})
sess.run(tf.local_variables_initializer(), feed_dict={phase_train_placeholder:True})
run_metadata = tf.RunMetadata()
run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
with sess.as_default():
#pdb.set_trace()
if args.pretrained_model:
print('Restoring pretrained model: %s' % args.pretrained_model)
saver.restore(sess, os.path.expanduser(args.pretrained_model))
# Training and validation loop
epoch = 0
while epoch < args.max_nrof_epochs:
step = sess.run(global_step, feed_dict=None)
epoch = step // args.epoch_size
# Train for one epoch
train(args, sess, epoch, images_placeholder, labels_placeholder, data_reader,run_metadata,run_options,
learning_rate_placeholder, global_step,
losses, train_op, args.learning_rate_schedule_file)
# Save variables and the metagraph if it doesn't exist already
model_dir = args.models_base_dir
checkpoint_path = os.path.join(model_dir, 'model-%s.ckpt' % 'softmax')
saver.save(sess, checkpoint_path, global_step=step, write_meta_graph=False)
# Evaluate on LFW
return model_dir
def train(args, sess, epoch, images_placeholder, labels_placeholder, data_reader,run_metadata, run_options,
learning_rate_placeholder, global_step,
loss, train_op, learning_rate_schedule_file):
batch_number = 0
if args.learning_rate>0.0:
lr = args.learning_rate
else:
lr = utils.get_learning_rate_from_file(learning_rate_schedule_file, epoch)
while batch_number < args.epoch_size:
start_time = time.time()
#print('Running forward pass on sampled images: ', end='')
start_time = time.time()
images, labels = data_reader.next_batch(args.image_height,args.image_width)
print("Load {} images cost time: {}".format(images.shape[0],time.time()-start_time))
#if epoch < 10:
# continue
#if batch_number > 49:
# pdb.set_trace()
# print(labels)
#print(images.shape,labels)
feed_dict = {learning_rate_placeholder: lr ,images_placeholder:images, labels_placeholder:labels}
#feed_dict = {learning_rate_placeholder: lr}
start_time = time.time()
#total_err, softmax_err, _, step = sess.run([loss['total_loss'], loss['softmax_loss'], train_op, global_step ], feed_dict=feed_dict, options=run_options, run_metadata=run_metadata)
_ = sess.run(train_op, feed_dict=feed_dict, options=run_options, run_metadata=run_metadata)
tl = timeline.Timeline(run_metadata.step_stats)
ctf = tl.generate_chrome_trace_format()
with open('json_placeholder/tl-{}.json'.format(batch_number),'w') as wd:
wd.write(ctf)
duration = time.time() - start_time
#print('Epoch: [%d][%d/%d]\tTime %.3f\tTotal Loss %2.3f\tSoftmax Loss %2.3f, lr %2.5f' %
# (epoch, batch_number+1, args.epoch_size, duration, total_err, softmax_err, lr))
batch_number += 1
return batch_number
def save_variables_and_metagraph(sess, saver, summary_writer, model_dir, model_name, step):
# Save the model checkpoint
print('Saving variables')
start_time = time.time()
checkpoint_path = os.path.join(model_dir, 'model-%s.ckpt' % model_name)
save_time_variables = time.time() - start_time
print('Variables saved in %.2f seconds' % save_time_variables)
metagraph_filename = os.path.join(model_dir, 'model-%s.meta' % model_name)
save_time_metagraph = 0
if not os.path.exists(metagraph_filename):
print('Saving metagraph')
start_time = time.time()
saver.export_meta_graph(metagraph_filename)
save_time_metagraph = time.time() - start_time
print('Metagraph saved in %.2f seconds' % save_time_metagraph)
def get_learning_rate_from_file(filename, epoch):
with open(filename, 'r') as f:
for line in f.readlines():
line = line.split('#', 1)[0]
if line:
par = line.strip().split(':')
e = int(par[0])
lr = float(par[1])
if e <= epoch:
learning_rate = lr
else:
return learning_rate
def parse_arguments(argv):
parser = argparse.ArgumentParser()
parser.add_argument('--logs_base_dir', type=str,
help='Directory where to write event logs.', default='logs/facenet_ms_mp')
parser.add_argument('--models_base_dir', type=str,
help='Directory where to write trained models and checkpoints.', default='models/facenet_ms_mp')
parser.add_argument('--gpu_memory_fraction', type=float,
help='Upper bound on the amount of GPU memory that will be used by the process.', default=.9)
parser.add_argument('--pretrained_model', type=str,
help='Load a pretrained model before training starts.')
parser.add_argument('--loss_type', type=str,
help='Which type loss to be used.',default='softmax')
parser.add_argument('--network', type=str,
help='which network is used to extract feature.',default='resnet50')
parser.add_argument('--data_dir', type=str,
help='Path to the data directory containing aligned face patches. Multiple directories are separated with colon.',
default='~/datasets/casia/casia_maxpy_mtcnnalign_182_160')
parser.add_argument('--model_def', type=str,
help='Model definition. Points to a module containing the definition of the inference graph.', default='models.inception_resnet_v1')
parser.add_argument('--max_nrof_epochs', type=int,
help='Number of epochs to run.', default=500)
parser.add_argument('--batch_size', type=int,
help='Number of images to process in a batch.', default=100)
parser.add_argument('--image_size', type=int,
help='Image size (height, width) in pixels.', default=160)
parser.add_argument('--image_src_size', type=int,
help='Src Image size (height, width) in pixels.', default=256)
parser.add_argument('--image_height', type=int,
help='Image size (height, width) in pixels.', default=112)
parser.add_argument('--image_width', type=int,
help='Image size (height, width) in pixels.', default=96)
parser.add_argument('--people_per_batch', type=int,
help='Number of people per batch.', default=30)
parser.add_argument('--num_gpus', type=int,
help='Number of gpus.', default=4)
parser.add_argument('--images_per_person', type=int,
help='Number of images per person.', default=5)
parser.add_argument('--epoch_size', type=int,
help='Number of batches per epoch.', default=600)
parser.add_argument('--embedding_size', type=int,
help='Dimensionality of the embedding.', default=256)
parser.add_argument('--random_crop',
help='Performs random cropping of training images. If false, the center image_size pixels from the training images are used. ' +
'If the size of the images in the data directory is equal to image_size no cropping is performed', action='store_true')
parser.add_argument('--random_flip',
help='Performs random horizontal flipping of training images.', action='store_true')
parser.add_argument('--keep_probability', type=float,
help='Keep probability of dropout for the fully connected layer(s).', default=1.0)
parser.add_argument('--weight_decay', type=float,
help='L2 weight regularization.', default=0.0)
parser.add_argument('--optimizer', type=str, choices=['ADAGRAD', 'ADADELTA', 'ADAM', 'RMSPROP', 'MOM','SGD'],
help='The optimization algorithm to use', default='ADAGRAD')
parser.add_argument('--learning_rate', type=float,
help='Initial learning rate. If set to a negative value a learning rate ' +
'schedule can be specified in the file "learning_rate_schedule.txt"', default=0.1)
parser.add_argument('--learning_rate_decay_epochs', type=int,
help='Number of epochs between learning rate decay.', default=100)
parser.add_argument('--learning_rate_decay_factor', type=float,
help='Learning rate decay factor.', default=1.0)
parser.add_argument('--moving_average_decay', type=float,
help='Exponential decay for tracking of training parameters.', default=0.9999)
parser.add_argument('--seed', type=int,
help='Random seed.', default=666)
parser.add_argument('--learning_rate_schedule_file', type=str,
help='File containing the learning rate schedule that is used when learning_rate is set to to -1.', default='data/learning_rate_schedule.txt')
return parser.parse_args(argv)
if __name__ == '__main__':
main(parse_arguments(sys.argv[1:])) | #labels_placeholder = tf.Variable(np.ones([args.batch_size,]),dtype=tf.int32, name='labels_placeholder')
phase_train_placeholder = tf.placeholder(tf.bool, name='phase_train')
| random_line_split |
train_timeline.py |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import os.path
import os
import time
import sys
sys.path.insert(0,'lib')
sys.path.insert(0,'networks')
import tensorflow as tf
from tensorflow.python.client import timeline
from tensorflow.contrib import slim
import tensorflow.contrib.data as tf_data
from tensorflow.python.client import timeline
from collections import Counter
import numpy as np
import importlib
import itertools
import tensorflow.contrib.slim as slim
from tensorflow.contrib.slim.nets import resnet_v1, resnet_v2
import argparse
import utils
import sphere_network as network
from data_generator import DataGenerator
#import lfw
import pdb
#import cv2
#import pylab as plt
debug = False
softmax_ind = 0
from tensorflow.python.ops import data_flow_ops
def _from_tensor_slices(tensors_x,tensors_y):
#return TensorSliceDataset((tensors_x,tensors_y))
return tf_data.Dataset.from_tensor_slices((tensors_x,tensors_y))
def main(args):
#network = importlib.import_module(args.model_def)
subdir = datetime.strftime(datetime.now(), '%Y%m%d-%H%M%S')
log_dir = os.path.join(os.path.expanduser(args.logs_base_dir), subdir)
if not os.path.isdir(log_dir): # Create the log directory if it doesn't exist
os.makedirs(log_dir)
model_dir = os.path.join(os.path.expanduser(args.models_base_dir), subdir)
if not os.path.isdir(model_dir): # Create the model directory if it doesn't exist
os.makedirs(model_dir)
# Write arguments to a text file
utils.write_arguments_to_file(args, os.path.join(log_dir, 'arguments.txt'))
# Store some git revision info in a text file in the log directory
src_path,_ = os.path.split(os.path.realpath(__file__))
utils.store_revision_info(src_path, log_dir, ' '.join(sys.argv))
np.random.seed(seed=args.seed)
train_set = utils.get_dataset(args.data_dir)
nrof_classes = len(train_set)
print('nrof_classes: ',nrof_classes)
image_list, label_list = utils.get_image_paths_and_labels(train_set)
image_list = np.array(image_list)
print('total images: {}'.format(len(image_list)))
label_list = np.array(label_list,dtype=np.int32)
dataset_size = len(image_list)
data_reader = DataGenerator(image_list,label_list,args.batch_size)
print('Model directory: %s' % model_dir)
print('Log directory: %s' % log_dir)
if args.pretrained_model:
print('Pre-trained model: %s' % os.path.expanduser(args.pretrained_model))
with tf.Graph().as_default():
tf.set_random_seed(args.seed)
global_step = tf.Variable(0, trainable=False,name='global_step')
# Placeholder for the learning rate
learning_rate_placeholder = tf.placeholder(tf.float32, name='learning_rate')
images_placeholder = tf.placeholder(tf.float32,[None,112,96,3], name='images_placeholder')
#images_placeholder = tf.Variable(np.ones([args.batch_size,112,96,3]), dtype=np.float32,name='images_placeholder')
labels_placeholder = tf.placeholder(tf.int32,[None], name='labels_placeholder')
#labels_placeholder = tf.Variable(np.ones([args.batch_size,]),dtype=tf.int32, name='labels_placeholder')
phase_train_placeholder = tf.placeholder(tf.bool, name='phase_train')
learning_rate = tf.train.exponential_decay(learning_rate_placeholder, global_step,
args.learning_rate_decay_epochs*args.epoch_size, args.learning_rate_decay_factor, staircase=True)
tf.summary.scalar('learning_rate', learning_rate)
print('Using optimizer: {}'.format(args.optimizer))
if args.optimizer == 'ADAGRAD':
opt = tf.train.AdagradOptimizer(learning_rate)
elif args.optimizer == 'MOM':
opt = tf.train.MomentumOptimizer(learning_rate,0.9)
if args.network == 'sphere_network':
prelogits = network.infer(images_placeholder)
else:
raise Exception('Not supported network: {}'.format(args.loss_type))
if args.loss_type == 'softmax':
cross_entropy_mean = utils.softmax_loss(prelogits,labels_placeholder, len(train_set),args.weight_decay,False)
regularization_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
#loss = cross_entropy_mean + args.weight_decay*tf.add_n(regularization_losses)
loss = cross_entropy_mean + args.weight_decay*tf.add_n(regularization_losses)
#loss = cross_entropy_mean
else:
raise Exception('Not supported loss type: {}'.format(args.loss_type))
#loss = tf.add_n([cross_entropy_mean] + regularization_losses, name='total_loss')
losses = {}
losses['total_loss'] = loss
losses['softmax_loss'] = cross_entropy_mean
debug_info = {}
debug_info ['prelogits'] = prelogits
grads = opt.compute_gradients(loss,tf.trainable_variables())
train_op = opt.apply_gradients(grads,global_step=global_step)
#save_vars = [var for var in tf.global_variables() if 'Adagrad' not in var.name and 'global_step' not in var.name]
save_vars = tf.global_variables()
#saver = tf.train.Saver(tf.trainable_variables(), max_to_keep=3)
saver = tf.train.Saver(save_vars, max_to_keep=3)
# Build the summary operation based on the TF collection of Summaries.
# Start running operations on the Graph.
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=args.gpu_memory_fraction)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options,allow_soft_placement=True))
# Initialize variables
sess.run(tf.global_variables_initializer(), feed_dict={phase_train_placeholder:True})
sess.run(tf.local_variables_initializer(), feed_dict={phase_train_placeholder:True})
run_metadata = tf.RunMetadata()
run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
with sess.as_default():
#pdb.set_trace()
if args.pretrained_model:
print('Restoring pretrained model: %s' % args.pretrained_model)
saver.restore(sess, os.path.expanduser(args.pretrained_model))
# Training and validation loop
epoch = 0
while epoch < args.max_nrof_epochs:
step = sess.run(global_step, feed_dict=None)
epoch = step // args.epoch_size
# Train for one epoch
train(args, sess, epoch, images_placeholder, labels_placeholder, data_reader,run_metadata,run_options,
learning_rate_placeholder, global_step,
losses, train_op, args.learning_rate_schedule_file)
# Save variables and the metagraph if it doesn't exist already
model_dir = args.models_base_dir
checkpoint_path = os.path.join(model_dir, 'model-%s.ckpt' % 'softmax')
saver.save(sess, checkpoint_path, global_step=step, write_meta_graph=False)
# Evaluate on LFW
return model_dir
def train(args, sess, epoch, images_placeholder, labels_placeholder, data_reader,run_metadata, run_options,
learning_rate_placeholder, global_step,
loss, train_op, learning_rate_schedule_file):
batch_number = 0
if args.learning_rate>0.0:
lr = args.learning_rate
else:
lr = utils.get_learning_rate_from_file(learning_rate_schedule_file, epoch)
while batch_number < args.epoch_size:
start_time = time.time()
#print('Running forward pass on sampled images: ', end='')
start_time = time.time()
images, labels = data_reader.next_batch(args.image_height,args.image_width)
print("Load {} images cost time: {}".format(images.shape[0],time.time()-start_time))
#if epoch < 10:
# continue
#if batch_number > 49:
# pdb.set_trace()
# print(labels)
#print(images.shape,labels)
feed_dict = {learning_rate_placeholder: lr ,images_placeholder:images, labels_placeholder:labels}
#feed_dict = {learning_rate_placeholder: lr}
start_time = time.time()
#total_err, softmax_err, _, step = sess.run([loss['total_loss'], loss['softmax_loss'], train_op, global_step ], feed_dict=feed_dict, options=run_options, run_metadata=run_metadata)
_ = sess.run(train_op, feed_dict=feed_dict, options=run_options, run_metadata=run_metadata)
tl = timeline.Timeline(run_metadata.step_stats)
ctf = tl.generate_chrome_trace_format()
with open('json_placeholder/tl-{}.json'.format(batch_number),'w') as wd:
wd.write(ctf)
duration = time.time() - start_time
#print('Epoch: [%d][%d/%d]\tTime %.3f\tTotal Loss %2.3f\tSoftmax Loss %2.3f, lr %2.5f' %
# (epoch, batch_number+1, args.epoch_size, duration, total_err, softmax_err, lr))
batch_number += 1
return batch_number
def save_variables_and_metagraph(sess, saver, summary_writer, model_dir, model_name, step):
# Save the model checkpoint
print('Saving variables')
start_time = time.time()
checkpoint_path = os.path.join(model_dir, 'model-%s.ckpt' % model_name)
save_time_variables = time.time() - start_time
print('Variables saved in %.2f seconds' % save_time_variables)
metagraph_filename = os.path.join(model_dir, 'model-%s.meta' % model_name)
save_time_metagraph = 0
if not os.path.exists(metagraph_filename):
print('Saving metagraph')
start_time = time.time()
saver.export_meta_graph(metagraph_filename)
save_time_metagraph = time.time() - start_time
print('Metagraph saved in %.2f seconds' % save_time_metagraph)
def get_learning_rate_from_file(filename, epoch):
with open(filename, 'r') as f:
for line in f.readlines():
line = line.split('#', 1)[0]
if line:
par = line.strip().split(':')
e = int(par[0])
lr = float(par[1])
if e <= epoch:
learning_rate = lr
else:
|
def parse_arguments(argv):
parser = argparse.ArgumentParser()
parser.add_argument('--logs_base_dir', type=str,
help='Directory where to write event logs.', default='logs/facenet_ms_mp')
parser.add_argument('--models_base_dir', type=str,
help='Directory where to write trained models and checkpoints.', default='models/facenet_ms_mp')
parser.add_argument('--gpu_memory_fraction', type=float,
help='Upper bound on the amount of GPU memory that will be used by the process.', default=.9)
parser.add_argument('--pretrained_model', type=str,
help='Load a pretrained model before training starts.')
parser.add_argument('--loss_type', type=str,
help='Which type loss to be used.',default='softmax')
parser.add_argument('--network', type=str,
help='which network is used to extract feature.',default='resnet50')
parser.add_argument('--data_dir', type=str,
help='Path to the data directory containing aligned face patches. Multiple directories are separated with colon.',
default='~/datasets/casia/casia_maxpy_mtcnnalign_182_160')
parser.add_argument('--model_def', type=str,
help='Model definition. Points to a module containing the definition of the inference graph.', default='models.inception_resnet_v1')
parser.add_argument('--max_nrof_epochs', type=int,
help='Number of epochs to run.', default=500)
parser.add_argument('--batch_size', type=int,
help='Number of images to process in a batch.', default=100)
parser.add_argument('--image_size', type=int,
help='Image size (height, width) in pixels.', default=160)
parser.add_argument('--image_src_size', type=int,
help='Src Image size (height, width) in pixels.', default=256)
parser.add_argument('--image_height', type=int,
help='Image size (height, width) in pixels.', default=112)
parser.add_argument('--image_width', type=int,
help='Image size (height, width) in pixels.', default=96)
parser.add_argument('--people_per_batch', type=int,
help='Number of people per batch.', default=30)
parser.add_argument('--num_gpus', type=int,
help='Number of gpus.', default=4)
parser.add_argument('--images_per_person', type=int,
help='Number of images per person.', default=5)
parser.add_argument('--epoch_size', type=int,
help='Number of batches per epoch.', default=600)
parser.add_argument('--embedding_size', type=int,
help='Dimensionality of the embedding.', default=256)
parser.add_argument('--random_crop',
help='Performs random cropping of training images. If false, the center image_size pixels from the training images are used. ' +
'If the size of the images in the data directory is equal to image_size no cropping is performed', action='store_true')
parser.add_argument('--random_flip',
help='Performs random horizontal flipping of training images.', action='store_true')
parser.add_argument('--keep_probability', type=float,
help='Keep probability of dropout for the fully connected layer(s).', default=1.0)
parser.add_argument('--weight_decay', type=float,
help='L2 weight regularization.', default=0.0)
parser.add_argument('--optimizer', type=str, choices=['ADAGRAD', 'ADADELTA', 'ADAM', 'RMSPROP', 'MOM','SGD'],
help='The optimization algorithm to use', default='ADAGRAD')
parser.add_argument('--learning_rate', type=float,
help='Initial learning rate. If set to a negative value a learning rate ' +
'schedule can be specified in the file "learning_rate_schedule.txt"', default=0.1)
parser.add_argument('--learning_rate_decay_epochs', type=int,
help='Number of epochs between learning rate decay.', default=100)
parser.add_argument('--learning_rate_decay_factor', type=float,
help='Learning rate decay factor.', default=1.0)
parser.add_argument('--moving_average_decay', type=float,
help='Exponential decay for tracking of training parameters.', default=0.9999)
parser.add_argument('--seed', type=int,
help='Random seed.', default=666)
parser.add_argument('--learning_rate_schedule_file', type=str,
help='File containing the learning rate schedule that is used when learning_rate is set to to -1.', default='data/learning_rate_schedule.txt')
return parser.parse_args(argv)
if __name__ == '__main__':
main(parse_arguments(sys.argv[1:]))
| return learning_rate | conditional_block |
index.js | //weather search city
let apiKeyWeather = "bd7e1a6abf699f2eca2f3fae90b453ff";
let apiCallWeather = "https://api.openweathermap.org/data/2.5/weather?";
let lastSearchedCityWeather = {};
//Format the date into text
function fDate(currentDate) {
let fDays = [
"Sunday",
"Monday",
"Tuesday",
"Wednesday",
"Thursday",
"Friday",
"Saturday"
];
let fDay = fDays[currentDate.getDay()];
let fMonths = [
"January",
"February",
"March",
"April",
"May",
"June",
"July",
"August",
"September",
"October",
"November",
"December"
];
let fMonth = fMonths[currentDate.getMonth()];
return (
fDay + ", " +
fMonth +
" " +
currentDate.getDate() +
" " +
currentDate.getFullYear()
);
}
function setDateTime(){
//Get the current date & time
let now = new Date();
//get and set the time using "theTime" id
let time = document.querySelector("#theTime");
let mins = now.getMinutes();
//Add a Zero to keep mins double digits
if (mins < 10) {
mins = "0" + mins;
}
let theCurrentTime = `${now.getHours()}:${mins}`;
time.innerHTML = theCurrentTime;
//get and set the date using "todaysDate" id
let dateToday = fDate(now);
let today = document.querySelector("#todaysDate");
today.innerHTML = dateToday.toUpperCase();
}
setDateTime(); //Set the current date time for theTime & todaysDate elements
//update the current city name to match what was searched/submitted
function searchYourCity(event) {
event.preventDefault();
let input = document.querySelector("#citySearch");
let cityName = document.querySelector("#searchedCity");
if(input.value.length > 0)
{
cityName.innerHTML = input.value.toUpperCase();
checkWeatherByCity(input.value,checkUnitsSelected());
}
else
{
alert("Please enter a city");
}
}
function checkUnitsSelected(){
let units = "metric";
let unitC = document.querySelector("#tempC");
//If °C is selected
if (unitC.classList.contains("selectedTempUnit")==true)
{
units = "metric";
}
//If °F is selected
else
{
units = "imperial";
}
return units;
}
//Temperature conversion
function convertFtoC(tempF){
return Math.floor(((tempF -32)*5/9)*100)/100;
}
function convertCtoF(tempC){
return Math.floor(((tempC*9/5) + 32)*100)/100;
}
//Unit conversions
function convertUnits(){
let windSpeed = document.querySelector("#wind_Speed");
let feels_like = document.querySelector("#feels_Like");
let rainAmount = document.querySelector("#rain_Amount");
if (checkUnitsSelected()==="metric")
{
windSpeed.innerHTML = Math.floor(windSpeed.innerHTML.substring(0,windSpeed.innerHTML.length-3)*0.621371*100)/100;
feels_like.innerHTML = convertFtoC(feels_like.innerHTML.substring(0,feels_like.innerHTML.length-2));
rainAmount.innerHTML = Math.floor(rainAmount.innerHTML.substring(0,rainAmount.innerHTML.length-1)*25.4*100)/100;
}
//else °F is selected
else
{
windSpeed.innerHTML = Math.floor(windSpeed.innerHTML.substring(0,windSpeed.innerHTML.length-3)*1.609343502101025*100)/100;
feels_like.innerHTML = convertCtoF(feels_like.innerHTML.substring(0,feels_like.innerHTML.length-2));
rainAmount.innerHTML = Math.floor(rainAmount.innerHTML.substring(0,rainAmount.innerHTML.length-2)/25.4*100)/100;
}
addUnits();
}
//Update temperature display
function displayTemp(currentTemp){
let current_temp = document.querySelector("#currentCityTemp");
//If °C is selected
if (checkUnitsSelected()==="metric")
{
current_temp.innerHTML = currentTemp+"°C";
}
//else °F is selected
else
{
current_temp.innerHTML = currentTemp+"°F";
}
}
function changeUnit(){
let unitC = document.querySelector("#tempC");
let unitF = document.querySelector("#tempF");
let temp = document.querySelector("#currentCityTemp");
//If °F is selected
if (unitC.classList.contains("selectedTempUnit")==false)
{
unitF.classList.remove("selectedTempUnit");
unitC.classList.add("selectedTempUnit");
temp.innerHTML = convertFtoC(temp.innerHTML.substring(0,temp.innerHTML.length-2))+"°C";
convertUnits();
}
//else °C is selected
else
{
unitC.classList.remove("selectedTempUnit");
unitF.classList.add("selectedTempUnit");
temp.innerHTML = convertCtoF(temp.innerHTML.substring(0,temp.innerHTML.length-2))+"°F";
convertUnits();
}
getForecast();
}
//Capture user temperature unit selection
let tempC = document.querySelector("#tempC");
tempC.addEventListener("click", changeUnit);
let tempF = document.querySelector("#tempF");
tempF.addEventListener("click", changeUnit);
//create URL
function checkWeatherByCity(city,units){
//api.openweathermap.org/data/2.5/weather?q={city name}&appid={API key}
let weatherCheckUrl = apiCallWeather +"q="+ city + "&appid=" + apiKeyWeather + "&units=" + units;
//Get weather information from URL and then display weather
axios.get(weatherCheckUrl).then(displayWeather);
}
//Display Temperature of URL
function displayWeather(response) {
lastSearchedCityWeather = response;
displayTemp(lastSearchedCityWeather.data.main.temp);
// Update city name
let cityName = document.querySelector("#searchedCity");
cityName.innerHTML = lastSearchedCityWeather.data.name.toUpperCase();
// Update wind speed
document.querySelector("#wind_Speed").innerHTML = lastSearchedCityWeather.data.wind.speed;
// Update Rain
if (lastSearchedCityWeather.data.rain === undefined) {
document.querySelector("#rain_Amount").innerHTML = 0;
} else {
document.querySelector("#rain_Amount").innerHTML =
lastSearchedCityWeather.data.rain["1h"];
}
// Update weather description
document.querySelector("#weather_Description").innerHTML = lastSearchedCityWeather.data.weather[0].description.toUpperCase();
// Update weather icon
let currentWeatherIcon = document.querySelector("#today_Icon");
currentWeatherIcon.setAttribute("src",`https://openweathermap.org/img/wn/${lastSearchedCityWeather.data.weather[0].icon}@2x.png`);
currentWeatherIcon.setAttribute("alt",lastSearchedCityWeather.data.weather[0].description);
// Update feels like
document.querySelector("#feels_Like").innerHTML =lastSearchedCityWeather.data.main.feels_like;
// Update humidity
document.querySelector("#humidity").innerHTML =lastSearchedCityWeather.data.main.humidity + "%";
addUnits();
//Forecast
getForecast();
}
function getForecast() {
let coordinates = lastSearchedCityWeather.data.coord;
let apiUrl = `https://api.openweathermap.org/data/2.5/onecall?lat=${coordinates.lat}&lon=${coordinates.lon}&appid=${apiKeyWeather}&units=${checkUnitsSelected()}`;
axios.get(apiUrl).then(displayForecast);
}
function formatDay(timestamp) {
let date = new Date(timestamp * 1000);
let day = date.getDay();
let days = ["Mon", "Tues", "Wed", "Thurs", "Fri", "Sat", "Sun"];
return days [day];
}
function displayForecast(response) {
let forecast = response.data.daily;
let forecastElement = document.querySelector("#forecast");
let forecastHTML = `<div class="row">`;
forecast.forEach(function(forecastDay, index) {
if (index <6) {
forecastHTML =
forecastHTML +
`
<div class="col-2">
<div class="weather-forecast-dayName">${formatDay(forecastDay.dt)}</div>
<img
src="http://openweathermap.org/img/wn/${forecastDay.weather[0].icon}@2x.png"
alt=""
width="75"
class="forecastIcon"
/>
<div class="weather-forecast-tempertures">
<span class="weather-forecast-temperature-max"> ${Math.round(forecastDay.temp.max)}°
</span>
<span class="weather-forecast-temperature-min"> ${Math.round(forecastDay.temp.min)}°
</span>
</div>
</div>
`;
}
})
forecastHTML = forecastHTML + `</div>`;
forecastElement.innerHTML = `<div class="forecast-title">
<h5>In the next few days, you can expect...</h5>
</div>`
+ forecastHTML;
}
function addUnits(){
if (checkUnitsSelected()==="metric")
{
document.querySelector("#feels_Like").innerHTML = document.querySelector("#feels_Like").innerHTML+"°C";
document.querySelector("#wind_Speed").innerHTML = document.querySelector("#wind_Speed").innerHTML + "kph";
document.querySelector("#rain_Amount").innerHTML = document.querySelector("#rain_Amount").innerHTML + "mm";
}
//If °F is selected
else
{
document.querySelector("#feels_Like").innerHTML = document.querySelector("#feels_Like").innerHTML+"°F";
document.querySelector("#wind_Speed").innerHTML = document.querySelector("#wind_Speed").innerHTML + "mph";
document.querySelector("#rain_Amount").innerHTML = document.querySelector("#rain_Amount").innerHTML + '"';
}
}
//geo location
function showPosition(position) {
let lat = position.coords.latitude;
let lon = position.coords.longitude;
//Get weather information from URL and then display temp
checkWeatherByLocation(lat,lon,checkUnitsSelected());
}
function checkWeatherByLocation(lat,lon,units){
let currentL | tion
function findLocation(){
navigator.geolocation.getCurrentPosition(showPosition);
}
let searchCity = document.querySelector("#searchButton");
searchCity.addEventListener("click", searchYourCity);
let searchLocation = document.querySelector("#locationButton");
searchLocation.addEventListener("click", findLocation);
let form = document.querySelector("#search-form");
form.addEventListener("submit", searchYourCity);
//Default city set to Perth, AU with metric units
checkWeatherByCity("Perth","metric");
| at = `lat=${lat}`;
let currentLon = `lon=${lon}`;
//api.openweathermap.org/data/2.5/weather?lat={lat}&lon={lon}&appid={API key}
let weatherCheckUrl = apiCallWeather + currentLat + "&" + currentLon + "&appid=" + apiKeyWeather + "&units=" + units;
//Get weather information from URL and then display weather
axios.get(weatherCheckUrl).then(displayWeather);
}
//Get the posi | identifier_body |
index.js | //weather search city
let apiKeyWeather = "bd7e1a6abf699f2eca2f3fae90b453ff";
let apiCallWeather = "https://api.openweathermap.org/data/2.5/weather?";
let lastSearchedCityWeather = {};
//Format the date into text
function fDate(currentDate) {
let fDays = [
"Sunday",
"Monday",
"Tuesday",
"Wednesday",
"Thursday",
"Friday",
"Saturday"
];
let fDay = fDays[currentDate.getDay()];
let fMonths = [
"January",
"February",
"March",
"April",
"May",
"June",
"July",
"August",
"September",
"October",
"November",
"December"
];
let fMonth = fMonths[currentDate.getMonth()];
return (
fDay + ", " +
fMonth +
" " +
currentDate.getDate() +
" " +
currentDate.getFullYear()
);
}
function setDateTime(){
//Get the current date & time
let now = new Date();
//get and set the time using "theTime" id
let time = document.querySelector("#theTime");
let mins = now.getMinutes();
//Add a Zero to keep mins double digits
if (mins < 10) {
mins = "0" + mins;
}
let theCurrentTime = `${now.getHours()}:${mins}`;
time.innerHTML = theCurrentTime;
//get and set the date using "todaysDate" id
let dateToday = fDate(now);
let today = document.querySelector("#todaysDate");
today.innerHTML = dateToday.toUpperCase();
}
setDateTime(); //Set the current date time for theTime & todaysDate elements
//update the current city name to match what was searched/submitted
function searchYourCity(event) {
event.preventDefault();
let input = document.querySelector("#citySearch");
let cityName = document.querySelector("#searchedCity");
if(input.value.length > 0)
{
cityName.innerHTML = input.value.toUpperCase();
checkWeatherByCity(input.value,checkUnitsSelected());
}
else
{
alert("Please enter a city");
}
}
function checkUnitsSelected(){
let units = "metric";
let unitC = document.querySelector("#tempC");
//If °C is selected
if (unitC.classList.contains("selectedTempUnit")==true)
{
units = "metric";
}
//If °F is selected
else
{
units = "imperial";
}
return units;
}
//Temperature conversion
function convertFtoC(tempF){
return Math.floor(((tempF -32)*5/9)*100)/100;
}
function convertCtoF(tempC){
return Math.floor(((tempC*9/5) + 32)*100)/100;
}
//Unit conversions
function convertUnits(){
let windSpeed = document.querySelector("#wind_Speed");
let feels_like = document.querySelector("#feels_Like");
let rainAmount = document.querySelector("#rain_Amount");
if (checkUnitsSelected()==="metric")
{
windSpeed.innerHTML = Math.floor(windSpeed.innerHTML.substring(0,windSpeed.innerHTML.length-3)*0.621371*100)/100;
feels_like.innerHTML = convertFtoC(feels_like.innerHTML.substring(0,feels_like.innerHTML.length-2));
rainAmount.innerHTML = Math.floor(rainAmount.innerHTML.substring(0,rainAmount.innerHTML.length-1)*25.4*100)/100;
}
//else °F is selected
else
{
windSpeed.innerHTML = Math.floor(windSpeed.innerHTML.substring(0,windSpeed.innerHTML.length-3)*1.609343502101025*100)/100;
feels_like.innerHTML = convertCtoF(feels_like.innerHTML.substring(0,feels_like.innerHTML.length-2));
rainAmount.innerHTML = Math.floor(rainAmount.innerHTML.substring(0,rainAmount.innerHTML.length-2)/25.4*100)/100;
}
addUnits();
}
//Update temperature display
function displayTemp(currentTemp){
let current_temp = document.querySelector("#currentCityTemp");
//If °C is selected
if (checkUnitsSelected()==="metric")
{
current_temp.innerHTML = currentTemp+"°C";
}
//else °F is selected
else
{
current_temp.innerHTML = currentTemp+"°F";
}
}
function changeUnit(){
let unitC = document.querySelector("#tempC");
let unitF = document.querySelector("#tempF");
let temp = document.querySelector("#currentCityTemp");
//If °F is selected
if (unitC.classList.contains("selectedTempUnit")==false)
{
unitF.classList.remove("selectedTempUnit");
unitC.classList.add("selectedTempUnit");
temp.innerHTML = convertFtoC(temp.innerHTML.substring(0,temp.innerHTML.length-2))+"°C";
convertUnits();
}
//else °C is selected
else
{
unitC.classList.remove("selectedTempUnit");
unitF.classList.add("selectedTempUnit");
temp.innerHTML = convertCtoF(temp.innerHTML.substring(0,temp.innerHTML.length-2))+"°F";
convertUnits();
}
getForecast();
}
//Capture user temperature unit selection
let tempC = document.querySelector("#tempC");
tempC.addEventListener("click", changeUnit);
let tempF = document.querySelector("#tempF");
tempF.addEventListener("click", changeUnit);
//create URL
function checkWeatherByCity(city,units){
//api.openweathermap.org/data/2.5/weather?q={city name}&appid={API key}
let weatherCheckUrl = apiCallWeather +"q="+ city + "&appid=" + apiKeyWeather + "&units=" + units;
//Get weather information from URL and then display weather
axios.get(weatherCheckUrl).then(displayWeather);
}
//Display Temperature of URL
function displayWeather(response) {
lastSearchedCityWeather = response;
displayTemp(lastSearchedCityWeather.data.main.temp);
// Update city name
let cityName = document.querySelector("#searchedCity");
cityName.innerHTML = lastSearchedCityWeather.data.name.toUpperCase();
// Update wind speed
document.querySelector("#wind_Speed").innerHTML = lastSearchedCityWeather.data.wind.speed;
// Update Rain
if (lastSearchedCityWeather.data.rain === undefined) {
document.querySelector("#rain_Amount").innerHTML = 0;
} else {
document.querySelector("#rain_Amount").innerHTML =
lastSearchedCityWeather.data.rain["1h"];
}
// Update weather description
document.querySelector("#weather_Description").innerHTML = lastSearchedCityWeather.data.weather[0].description.toUpperCase();
// Update weather icon
let currentWeatherIcon = document.querySelector("#today_Icon");
currentWeatherIcon.setAttribute("src",`https://openweathermap.org/img/wn/${lastSearchedCityWeather.data.weather[0].icon}@2x.png`);
currentWeatherIcon.setAttribute("alt",lastSearchedCityWeather.data.weather[0].description);
// Update feels like
document.querySelector("#feels_Like").innerHTML =lastSearchedCityWeather.data.main.feels_like;
// Update humidity
document.querySelector("#humidity").innerHTML =lastSearchedCityWeather.data.main.humidity + "%";
addUnits();
//Forecast
getForecast();
}
function getForecast() {
let coordinates = lastSearchedCityWeather.data.coord;
let apiUrl = `https://api.openweathermap.org/data/2.5/onecall?lat=${coordinates.lat}&lon=${coordinates.lon}&appid=${apiKeyWeather}&units=${checkUnitsSelected()}`;
axios.get(apiUrl).then(displayForecast);
}
function formatDay(timestamp) {
let date = new Date(timestamp * 1000);
let day = date.getDay();
let days = ["Mon", "Tues", "Wed", "Thurs", "Fri", "Sat", "Sun"];
return days [day];
}
function displayForecast(response) {
let forecast = response.data.daily;
let forecastElement = document.querySelector("#forecast");
let forecastHTML = `<div class="row">`;
forecast.forEach(function(forecastDay, index) {
if (index <6) {
forecastHTML =
forecastHTML +
`
<div class="col-2">
<div class="weather-forecast-dayName">${formatDay(forecastDay.dt)}</div>
<img
src="http://openweathermap.org/img/wn/${forecastDay.weather[0].icon}@2x.png"
alt=""
width="75"
class="forecastIcon"
/>
<div class="weather-forecast-tempertures">
<span class="weather-forecast-temperature-max"> ${Math.round(forecastDay.temp.max)}°
</span>
<span class="weather-forecast-temperature-min"> ${Math.round(forecastDay.temp.min)}°
</span>
</div>
</div>
`;
}
})
forecastHTML = forecastHTML + `</div>`;
forecastElement.innerHTML = `<div class="forecast-title">
<h5>In the next few days, you can expect...</h5>
</div>`
+ forecastHTML;
}
function addUnits(){
if (checkUnitsSelected()==="metric")
{
document.querySelector("#feels_Like").innerHTML = document.querySelector("#feels_Like").innerHTML+"°C";
document.querySelector("#wind_Speed").innerHTML = document.querySelector("#wind_Speed").innerHTML + "kph";
document.querySelector("#rain_Amount").innerHTML = document.querySelector("#rain_Amount").innerHTML + "mm";
}
//If °F is selected
else
{
document.querySelector("#feels_Like").innerHTML = document.querySelector("#feels_Like").innerHTML+"°F";
document.querySelector("#wind_Speed").innerHTML = document.querySelector("#wind_Speed").innerHTML + "mph";
document.querySelector("#rain_Amount").innerHTML = document.querySelector("#rain_Amount").innerHTML + '"';
}
}
//geo location
function showPosition(position) {
let lat = position.coords.latitude;
let lon = position.coords.longitude;
//Get weather information from URL and then display temp
checkWeatherByLocation(lat,lon,checkUnitsSelected());
}
function checkWeatherByLocation(lat,lon,units){
let currentLat = `lat=${lat}`;
let currentLon = `lon=${lon}`;
//api.openweathermap.org/data/2.5/weather?lat={lat}&lon={lon}&appid={API key}
let weatherCheckUrl = apiCallWeather + currentLat + "&" + currentLon + "&appid=" + apiKeyWeather + "&units=" + units;
//Get weather information from URL and then display weather
axios.get(weatherCheckUrl).then(displayWeather);
}
//Get the position
function findLocation(){
| geolocation.getCurrentPosition(showPosition);
}
let searchCity = document.querySelector("#searchButton");
searchCity.addEventListener("click", searchYourCity);
let searchLocation = document.querySelector("#locationButton");
searchLocation.addEventListener("click", findLocation);
let form = document.querySelector("#search-form");
form.addEventListener("submit", searchYourCity);
//Default city set to Perth, AU with metric units
checkWeatherByCity("Perth","metric");
| navigator. | identifier_name |
index.js | //weather search city
let apiKeyWeather = "bd7e1a6abf699f2eca2f3fae90b453ff";
let apiCallWeather = "https://api.openweathermap.org/data/2.5/weather?";
let lastSearchedCityWeather = {};
//Format the date into text
function fDate(currentDate) {
let fDays = [
"Sunday",
"Monday",
"Tuesday",
"Wednesday",
"Thursday",
"Friday",
"Saturday"
];
let fDay = fDays[currentDate.getDay()];
let fMonths = [
"January",
"February",
"March",
"April",
"May",
"June",
"July",
"August",
"September",
"October",
"November",
"December"
];
let fMonth = fMonths[currentDate.getMonth()];
return (
fDay + ", " +
fMonth +
" " +
currentDate.getDate() +
" " +
currentDate.getFullYear()
);
}
function setDateTime(){
//Get the current date & time
let now = new Date();
//get and set the time using "theTime" id
let time = document.querySelector("#theTime");
let mins = now.getMinutes();
//Add a Zero to keep mins double digits
if (mins < 10) {
mins = "0" + mins;
}
let theCurrentTime = `${now.getHours()}:${mins}`;
time.innerHTML = theCurrentTime;
//get and set the date using "todaysDate" id
let dateToday = fDate(now);
let today = document.querySelector("#todaysDate");
today.innerHTML = dateToday.toUpperCase();
}
setDateTime(); //Set the current date time for theTime & todaysDate elements
//update the current city name to match what was searched/submitted
function searchYourCity(event) {
event.preventDefault();
let input = document.querySelector("#citySearch");
let cityName = document.querySelector("#searchedCity");
if(input.value.length > 0)
{
cityName.innerHTML = input.value.toUpperCase();
checkWeatherByCity(input.value,checkUnitsSelected());
}
else
{
alert("Please enter a city");
}
}
function checkUnitsSelected(){
let units = "metric";
let unitC = document.querySelector("#tempC");
//If °C is selected
if (unitC.classList.contains("selectedTempUnit")==true)
{ | //If °F is selected
else
{
units = "imperial";
}
return units;
}
//Temperature conversion
function convertFtoC(tempF){
return Math.floor(((tempF -32)*5/9)*100)/100;
}
function convertCtoF(tempC){
return Math.floor(((tempC*9/5) + 32)*100)/100;
}
//Unit conversions
function convertUnits(){
let windSpeed = document.querySelector("#wind_Speed");
let feels_like = document.querySelector("#feels_Like");
let rainAmount = document.querySelector("#rain_Amount");
if (checkUnitsSelected()==="metric")
{
windSpeed.innerHTML = Math.floor(windSpeed.innerHTML.substring(0,windSpeed.innerHTML.length-3)*0.621371*100)/100;
feels_like.innerHTML = convertFtoC(feels_like.innerHTML.substring(0,feels_like.innerHTML.length-2));
rainAmount.innerHTML = Math.floor(rainAmount.innerHTML.substring(0,rainAmount.innerHTML.length-1)*25.4*100)/100;
}
//else °F is selected
else
{
windSpeed.innerHTML = Math.floor(windSpeed.innerHTML.substring(0,windSpeed.innerHTML.length-3)*1.609343502101025*100)/100;
feels_like.innerHTML = convertCtoF(feels_like.innerHTML.substring(0,feels_like.innerHTML.length-2));
rainAmount.innerHTML = Math.floor(rainAmount.innerHTML.substring(0,rainAmount.innerHTML.length-2)/25.4*100)/100;
}
addUnits();
}
//Update temperature display
function displayTemp(currentTemp){
let current_temp = document.querySelector("#currentCityTemp");
//If °C is selected
if (checkUnitsSelected()==="metric")
{
current_temp.innerHTML = currentTemp+"°C";
}
//else °F is selected
else
{
current_temp.innerHTML = currentTemp+"°F";
}
}
function changeUnit(){
let unitC = document.querySelector("#tempC");
let unitF = document.querySelector("#tempF");
let temp = document.querySelector("#currentCityTemp");
//If °F is selected
if (unitC.classList.contains("selectedTempUnit")==false)
{
unitF.classList.remove("selectedTempUnit");
unitC.classList.add("selectedTempUnit");
temp.innerHTML = convertFtoC(temp.innerHTML.substring(0,temp.innerHTML.length-2))+"°C";
convertUnits();
}
//else °C is selected
else
{
unitC.classList.remove("selectedTempUnit");
unitF.classList.add("selectedTempUnit");
temp.innerHTML = convertCtoF(temp.innerHTML.substring(0,temp.innerHTML.length-2))+"°F";
convertUnits();
}
getForecast();
}
//Capture user temperature unit selection
let tempC = document.querySelector("#tempC");
tempC.addEventListener("click", changeUnit);
let tempF = document.querySelector("#tempF");
tempF.addEventListener("click", changeUnit);
//create URL
function checkWeatherByCity(city,units){
//api.openweathermap.org/data/2.5/weather?q={city name}&appid={API key}
let weatherCheckUrl = apiCallWeather +"q="+ city + "&appid=" + apiKeyWeather + "&units=" + units;
//Get weather information from URL and then display weather
axios.get(weatherCheckUrl).then(displayWeather);
}
//Display Temperature of URL
function displayWeather(response) {
lastSearchedCityWeather = response;
displayTemp(lastSearchedCityWeather.data.main.temp);
// Update city name
let cityName = document.querySelector("#searchedCity");
cityName.innerHTML = lastSearchedCityWeather.data.name.toUpperCase();
// Update wind speed
document.querySelector("#wind_Speed").innerHTML = lastSearchedCityWeather.data.wind.speed;
// Update Rain
if (lastSearchedCityWeather.data.rain === undefined) {
document.querySelector("#rain_Amount").innerHTML = 0;
} else {
document.querySelector("#rain_Amount").innerHTML =
lastSearchedCityWeather.data.rain["1h"];
}
// Update weather description
document.querySelector("#weather_Description").innerHTML = lastSearchedCityWeather.data.weather[0].description.toUpperCase();
// Update weather icon
let currentWeatherIcon = document.querySelector("#today_Icon");
currentWeatherIcon.setAttribute("src",`https://openweathermap.org/img/wn/${lastSearchedCityWeather.data.weather[0].icon}@2x.png`);
currentWeatherIcon.setAttribute("alt",lastSearchedCityWeather.data.weather[0].description);
// Update feels like
document.querySelector("#feels_Like").innerHTML =lastSearchedCityWeather.data.main.feels_like;
// Update humidity
document.querySelector("#humidity").innerHTML =lastSearchedCityWeather.data.main.humidity + "%";
addUnits();
//Forecast
getForecast();
}
function getForecast() {
let coordinates = lastSearchedCityWeather.data.coord;
let apiUrl = `https://api.openweathermap.org/data/2.5/onecall?lat=${coordinates.lat}&lon=${coordinates.lon}&appid=${apiKeyWeather}&units=${checkUnitsSelected()}`;
axios.get(apiUrl).then(displayForecast);
}
function formatDay(timestamp) {
let date = new Date(timestamp * 1000);
let day = date.getDay();
let days = ["Mon", "Tues", "Wed", "Thurs", "Fri", "Sat", "Sun"];
return days [day];
}
function displayForecast(response) {
let forecast = response.data.daily;
let forecastElement = document.querySelector("#forecast");
let forecastHTML = `<div class="row">`;
forecast.forEach(function(forecastDay, index) {
if (index <6) {
forecastHTML =
forecastHTML +
`
<div class="col-2">
<div class="weather-forecast-dayName">${formatDay(forecastDay.dt)}</div>
<img
src="http://openweathermap.org/img/wn/${forecastDay.weather[0].icon}@2x.png"
alt=""
width="75"
class="forecastIcon"
/>
<div class="weather-forecast-tempertures">
<span class="weather-forecast-temperature-max"> ${Math.round(forecastDay.temp.max)}°
</span>
<span class="weather-forecast-temperature-min"> ${Math.round(forecastDay.temp.min)}°
</span>
</div>
</div>
`;
}
})
forecastHTML = forecastHTML + `</div>`;
forecastElement.innerHTML = `<div class="forecast-title">
<h5>In the next few days, you can expect...</h5>
</div>`
+ forecastHTML;
}
function addUnits(){
if (checkUnitsSelected()==="metric")
{
document.querySelector("#feels_Like").innerHTML = document.querySelector("#feels_Like").innerHTML+"°C";
document.querySelector("#wind_Speed").innerHTML = document.querySelector("#wind_Speed").innerHTML + "kph";
document.querySelector("#rain_Amount").innerHTML = document.querySelector("#rain_Amount").innerHTML + "mm";
}
//If °F is selected
else
{
document.querySelector("#feels_Like").innerHTML = document.querySelector("#feels_Like").innerHTML+"°F";
document.querySelector("#wind_Speed").innerHTML = document.querySelector("#wind_Speed").innerHTML + "mph";
document.querySelector("#rain_Amount").innerHTML = document.querySelector("#rain_Amount").innerHTML + '"';
}
}
//geo location
function showPosition(position) {
let lat = position.coords.latitude;
let lon = position.coords.longitude;
//Get weather information from URL and then display temp
checkWeatherByLocation(lat,lon,checkUnitsSelected());
}
function checkWeatherByLocation(lat,lon,units){
let currentLat = `lat=${lat}`;
let currentLon = `lon=${lon}`;
//api.openweathermap.org/data/2.5/weather?lat={lat}&lon={lon}&appid={API key}
let weatherCheckUrl = apiCallWeather + currentLat + "&" + currentLon + "&appid=" + apiKeyWeather + "&units=" + units;
//Get weather information from URL and then display weather
axios.get(weatherCheckUrl).then(displayWeather);
}
//Get the position
function findLocation(){
navigator.geolocation.getCurrentPosition(showPosition);
}
let searchCity = document.querySelector("#searchButton");
searchCity.addEventListener("click", searchYourCity);
let searchLocation = document.querySelector("#locationButton");
searchLocation.addEventListener("click", findLocation);
let form = document.querySelector("#search-form");
form.addEventListener("submit", searchYourCity);
//Default city set to Perth, AU with metric units
checkWeatherByCity("Perth","metric");
|
units = "metric";
}
| conditional_block |
index.js | //weather search city
let apiKeyWeather = "bd7e1a6abf699f2eca2f3fae90b453ff";
let apiCallWeather = "https://api.openweathermap.org/data/2.5/weather?";
let lastSearchedCityWeather = {};
//Format the date into text
function fDate(currentDate) {
let fDays = [
"Sunday",
"Monday",
"Tuesday",
"Wednesday",
"Thursday",
"Friday",
"Saturday"
];
let fDay = fDays[currentDate.getDay()];
let fMonths = [
"January",
"February",
"March",
"April",
"May",
"June",
"July",
"August",
"September",
"October",
"November",
"December"
];
let fMonth = fMonths[currentDate.getMonth()];
return (
fDay + ", " +
fMonth +
" " +
currentDate.getDate() +
" " +
currentDate.getFullYear()
);
}
function setDateTime(){
//Get the current date & time
let now = new Date();
//get and set the time using "theTime" id
let time = document.querySelector("#theTime");
let mins = now.getMinutes();
//Add a Zero to keep mins double digits
if (mins < 10) {
mins = "0" + mins;
}
let theCurrentTime = `${now.getHours()}:${mins}`;
time.innerHTML = theCurrentTime;
//get and set the date using "todaysDate" id
let dateToday = fDate(now);
let today = document.querySelector("#todaysDate");
today.innerHTML = dateToday.toUpperCase();
}
setDateTime(); //Set the current date time for theTime & todaysDate elements
//update the current city name to match what was searched/submitted
function searchYourCity(event) {
event.preventDefault();
let input = document.querySelector("#citySearch");
let cityName = document.querySelector("#searchedCity");
if(input.value.length > 0)
{
cityName.innerHTML = input.value.toUpperCase();
checkWeatherByCity(input.value,checkUnitsSelected());
}
else
{
alert("Please enter a city");
}
}
function checkUnitsSelected(){
let units = "metric";
let unitC = document.querySelector("#tempC");
//If °C is selected
if (unitC.classList.contains("selectedTempUnit")==true)
{
units = "metric";
}
//If °F is selected
else
{
units = "imperial"; | return units;
}
//Temperature conversion
function convertFtoC(tempF){
return Math.floor(((tempF -32)*5/9)*100)/100;
}
function convertCtoF(tempC){
return Math.floor(((tempC*9/5) + 32)*100)/100;
}
//Unit conversions
function convertUnits(){
let windSpeed = document.querySelector("#wind_Speed");
let feels_like = document.querySelector("#feels_Like");
let rainAmount = document.querySelector("#rain_Amount");
if (checkUnitsSelected()==="metric")
{
windSpeed.innerHTML = Math.floor(windSpeed.innerHTML.substring(0,windSpeed.innerHTML.length-3)*0.621371*100)/100;
feels_like.innerHTML = convertFtoC(feels_like.innerHTML.substring(0,feels_like.innerHTML.length-2));
rainAmount.innerHTML = Math.floor(rainAmount.innerHTML.substring(0,rainAmount.innerHTML.length-1)*25.4*100)/100;
}
//else °F is selected
else
{
windSpeed.innerHTML = Math.floor(windSpeed.innerHTML.substring(0,windSpeed.innerHTML.length-3)*1.609343502101025*100)/100;
feels_like.innerHTML = convertCtoF(feels_like.innerHTML.substring(0,feels_like.innerHTML.length-2));
rainAmount.innerHTML = Math.floor(rainAmount.innerHTML.substring(0,rainAmount.innerHTML.length-2)/25.4*100)/100;
}
addUnits();
}
//Update temperature display
function displayTemp(currentTemp){
let current_temp = document.querySelector("#currentCityTemp");
//If °C is selected
if (checkUnitsSelected()==="metric")
{
current_temp.innerHTML = currentTemp+"°C";
}
//else °F is selected
else
{
current_temp.innerHTML = currentTemp+"°F";
}
}
function changeUnit(){
let unitC = document.querySelector("#tempC");
let unitF = document.querySelector("#tempF");
let temp = document.querySelector("#currentCityTemp");
//If °F is selected
if (unitC.classList.contains("selectedTempUnit")==false)
{
unitF.classList.remove("selectedTempUnit");
unitC.classList.add("selectedTempUnit");
temp.innerHTML = convertFtoC(temp.innerHTML.substring(0,temp.innerHTML.length-2))+"°C";
convertUnits();
}
//else °C is selected
else
{
unitC.classList.remove("selectedTempUnit");
unitF.classList.add("selectedTempUnit");
temp.innerHTML = convertCtoF(temp.innerHTML.substring(0,temp.innerHTML.length-2))+"°F";
convertUnits();
}
getForecast();
}
//Capture user temperature unit selection
let tempC = document.querySelector("#tempC");
tempC.addEventListener("click", changeUnit);
let tempF = document.querySelector("#tempF");
tempF.addEventListener("click", changeUnit);
//create URL
function checkWeatherByCity(city,units){
//api.openweathermap.org/data/2.5/weather?q={city name}&appid={API key}
let weatherCheckUrl = apiCallWeather +"q="+ city + "&appid=" + apiKeyWeather + "&units=" + units;
//Get weather information from URL and then display weather
axios.get(weatherCheckUrl).then(displayWeather);
}
//Display Temperature of URL
function displayWeather(response) {
lastSearchedCityWeather = response;
displayTemp(lastSearchedCityWeather.data.main.temp);
// Update city name
let cityName = document.querySelector("#searchedCity");
cityName.innerHTML = lastSearchedCityWeather.data.name.toUpperCase();
// Update wind speed
document.querySelector("#wind_Speed").innerHTML = lastSearchedCityWeather.data.wind.speed;
// Update Rain
if (lastSearchedCityWeather.data.rain === undefined) {
document.querySelector("#rain_Amount").innerHTML = 0;
} else {
document.querySelector("#rain_Amount").innerHTML =
lastSearchedCityWeather.data.rain["1h"];
}
// Update weather description
document.querySelector("#weather_Description").innerHTML = lastSearchedCityWeather.data.weather[0].description.toUpperCase();
// Update weather icon
let currentWeatherIcon = document.querySelector("#today_Icon");
currentWeatherIcon.setAttribute("src",`https://openweathermap.org/img/wn/${lastSearchedCityWeather.data.weather[0].icon}@2x.png`);
currentWeatherIcon.setAttribute("alt",lastSearchedCityWeather.data.weather[0].description);
// Update feels like
document.querySelector("#feels_Like").innerHTML =lastSearchedCityWeather.data.main.feels_like;
// Update humidity
document.querySelector("#humidity").innerHTML =lastSearchedCityWeather.data.main.humidity + "%";
addUnits();
//Forecast
getForecast();
}
function getForecast() {
let coordinates = lastSearchedCityWeather.data.coord;
let apiUrl = `https://api.openweathermap.org/data/2.5/onecall?lat=${coordinates.lat}&lon=${coordinates.lon}&appid=${apiKeyWeather}&units=${checkUnitsSelected()}`;
axios.get(apiUrl).then(displayForecast);
}
function formatDay(timestamp) {
let date = new Date(timestamp * 1000);
let day = date.getDay();
let days = ["Mon", "Tues", "Wed", "Thurs", "Fri", "Sat", "Sun"];
return days [day];
}
function displayForecast(response) {
let forecast = response.data.daily;
let forecastElement = document.querySelector("#forecast");
let forecastHTML = `<div class="row">`;
forecast.forEach(function(forecastDay, index) {
if (index <6) {
forecastHTML =
forecastHTML +
`
<div class="col-2">
<div class="weather-forecast-dayName">${formatDay(forecastDay.dt)}</div>
<img
src="http://openweathermap.org/img/wn/${forecastDay.weather[0].icon}@2x.png"
alt=""
width="75"
class="forecastIcon"
/>
<div class="weather-forecast-tempertures">
<span class="weather-forecast-temperature-max"> ${Math.round(forecastDay.temp.max)}°
</span>
<span class="weather-forecast-temperature-min"> ${Math.round(forecastDay.temp.min)}°
</span>
</div>
</div>
`;
}
})
forecastHTML = forecastHTML + `</div>`;
forecastElement.innerHTML = `<div class="forecast-title">
<h5>In the next few days, you can expect...</h5>
</div>`
+ forecastHTML;
}
function addUnits(){
if (checkUnitsSelected()==="metric")
{
document.querySelector("#feels_Like").innerHTML = document.querySelector("#feels_Like").innerHTML+"°C";
document.querySelector("#wind_Speed").innerHTML = document.querySelector("#wind_Speed").innerHTML + "kph";
document.querySelector("#rain_Amount").innerHTML = document.querySelector("#rain_Amount").innerHTML + "mm";
}
//If °F is selected
else
{
document.querySelector("#feels_Like").innerHTML = document.querySelector("#feels_Like").innerHTML+"°F";
document.querySelector("#wind_Speed").innerHTML = document.querySelector("#wind_Speed").innerHTML + "mph";
document.querySelector("#rain_Amount").innerHTML = document.querySelector("#rain_Amount").innerHTML + '"';
}
}
//geo location
function showPosition(position) {
let lat = position.coords.latitude;
let lon = position.coords.longitude;
//Get weather information from URL and then display temp
checkWeatherByLocation(lat,lon,checkUnitsSelected());
}
function checkWeatherByLocation(lat,lon,units){
let currentLat = `lat=${lat}`;
let currentLon = `lon=${lon}`;
//api.openweathermap.org/data/2.5/weather?lat={lat}&lon={lon}&appid={API key}
let weatherCheckUrl = apiCallWeather + currentLat + "&" + currentLon + "&appid=" + apiKeyWeather + "&units=" + units;
//Get weather information from URL and then display weather
axios.get(weatherCheckUrl).then(displayWeather);
}
//Get the position
function findLocation(){
navigator.geolocation.getCurrentPosition(showPosition);
}
let searchCity = document.querySelector("#searchButton");
searchCity.addEventListener("click", searchYourCity);
let searchLocation = document.querySelector("#locationButton");
searchLocation.addEventListener("click", findLocation);
let form = document.querySelector("#search-form");
form.addEventListener("submit", searchYourCity);
//Default city set to Perth, AU with metric units
checkWeatherByCity("Perth","metric"); | } | random_line_split |
web.go | package main
import (
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/smtp"
"os"
"reflect"
"regexp"
"strconv"
"sync"
"github.com/gorilla/handlers"
"github.com/gorilla/mux"
log "github.com/sirupsen/logrus"
)
type Mail struct {
Subject string `json:"subject"`
FullName string `json:"fullname"`
Email string `json:"email"`
Business string `json:"business"`
Body string `json:"body"`
AdditionalDetails string `json:"details"`
}
type Config struct {
Smtp *Smtp
Cwd string
Hostname string
Port int
}
type Smtp struct {
Email string
To string
Password string
Hostname string
Port int
}
type FieldLength struct {
Min int
Max int
}
type FieldRegex struct {
Pattern string
Error string
}
type Field struct {
Name string
DisplayName string
Value string
Length *FieldLength
Regex *FieldRegex
}
const (
// Regex patterns for the inputted fields
VALID_NAME string = "^\\p{L}{2,}(?:\\x20\\p{L}{2,}){1,5}$"
VALID_SENTENCE string = "^[\\p{L}\\d\\x20-\\x2F\\x3A-\\x40\\x5B-\\x60\\x7B-\\x7E\\x{00B4}]*$"
VALID_MESSAGE string = "^[\\p{L}\\d\\x20-\\x2F\\x3A-\\x40\\x5B-\\x60\\x7B-\\x7E\\x{00B4}\\s]+$"
// This email regex has to get better, missing _ and a proper . check before the @
VALID_EMAIL string = "^[a-z\\x2E\\x5F]+\\x2B?[a-z]*[^\\x2B]\\x40(?:[a-z]+[a-z\\x2D\\x2E]?)+[^\\x2D]\\x2E[a-z]{2,5}$"
// Validation errors
EMAIL_ERROR string = "%s must be a valid email address"
NAME_ERROR string = "%s has to have at least one middle or last name"
SENTENCE_ERROR string = "%s must only contain language letters and ascii symbols"
LENGTH_ERROR string = "%s length mustn't be shorter than %d characters or longer than %d characters"
// Validations were successful
SUCCESS_MSG string = "Alright! The email has been sent."
// Amount of bytes to read from the body of a request
PAYLOAD_MAX_SIZE int64 = 8192
)
var (
config *Config
homepage *[]byte
cwd *string
mailmu sync.Mutex
)
// Concatenate a given hostname and port
func GetAddress(h *string, p *int) string {
return *h + ":" + strconv.FormatUint(uint64(*p), 10)
}
// Append an error message to slice if value fails the checks
func validateField(f *Field, hash map[string]string) {
length := 0
if f.Value != "" {
length = len(f.Value)
}
switch {
case length < f.Length.Min || length > f.Length.Max:
hash[f.Name] = fmt.Sprintf(LENGTH_ERROR, f.DisplayName, f.Length.Min, f.Length.Max)
case f.Regex != nil:
matches, _ := regexp.MatchString(f.Regex.Pattern, f.Value)
if !matches {
hash[f.Name] = fmt.Sprintf(f.Regex.Error, f.DisplayName)
}
}
}
// Encode content to JSON and write it to the response
func JSONResponse(w http.ResponseWriter, content interface{}, statusCode int) {
resp, err := json.Marshal(content)
if err != nil |
w.Header().Set("Content-Type", "application/json")
w.Header().Set("Access-Control-Allow-Origin", "*")
w.Header().Set("Access-Control-Allow-Methods", "GET, POST")
w.WriteHeader(statusCode)
w.Write(resp)
}
func (c *Config) Populate() {
content, err := ioutil.ReadFile(path("/config/app.json"))
if err != nil {
log.WithFields(log.Fields{
"error": err.Error(),
}).Fatal("Couldn't read file /config/app.json")
}
if err := json.Unmarshal(content, &c); err != nil {
log.WithFields(log.Fields{
"error": err.Error(),
}).Fatal("Failed to decode config.json")
}
}
func path(p string) string {
return *cwd + p
}
func (m *Mail) Validate() map[string]string {
errors := make(map[string]string)
validateField(&Field{
Name: "subject",
DisplayName: "Subject",
Value: m.Subject,
Length: &FieldLength{
Min: 12,
Max: 128,
},
Regex: &FieldRegex{
Pattern: VALID_SENTENCE,
Error: SENTENCE_ERROR,
},
}, errors)
validateField(&Field{
Name: "fullname",
DisplayName: "Full name",
Value: m.FullName,
Length: &FieldLength{
Min: 5,
Max: 48,
},
Regex: &FieldRegex{
Pattern: VALID_NAME,
Error: NAME_ERROR,
},
}, errors)
validateField(&Field{
Name: "email",
DisplayName: "Email",
Value: m.Email,
Length: &FieldLength{
Min: 12,
Max: 128,
},
Regex: &FieldRegex{
Pattern: VALID_EMAIL,
Error: EMAIL_ERROR,
},
}, errors)
validateField(&Field{
Name: "business",
DisplayName: "Business",
Value: m.Business,
Length: &FieldLength{
Min: 3,
Max: 32,
},
Regex: &FieldRegex{
Pattern: VALID_SENTENCE,
Error: SENTENCE_ERROR,
},
}, errors)
validateField(&Field{
Name: "body",
DisplayName: "Message",
Value: m.Body,
Length: &FieldLength{
Min: 64,
Max: 4096,
},
Regex: &FieldRegex{
Pattern: VALID_MESSAGE,
Error: SENTENCE_ERROR,
},
}, errors)
validateField(&Field{
Name: "details",
DisplayName: "Additional details",
Value: m.AdditionalDetails,
Length: &FieldLength{
Min: 4,
Max: 1024,
},
Regex: &FieldRegex{
Pattern: VALID_MESSAGE,
Error: SENTENCE_ERROR,
},
}, errors)
return errors
}
func (m *Mail) Send() {
err := smtp.SendMail(
GetAddress(&config.Smtp.Hostname, &config.Smtp.Port),
smtp.PlainAuth("", config.Smtp.Email, config.Smtp.Password, config.Smtp.Hostname),
config.Smtp.Email, []string{config.Smtp.To}, []byte(m.Body))
if err != nil {
log.WithFields(log.Fields{
"error": err.Error(),
}).Error(fmt.Sprintf("Failed to send email from <%s> to <%s>", config.Smtp.Email, config.Smtp.To))
}
log.WithFields(log.Fields{
"action": "Send email",
}).Info(fmt.Sprintf("Successfully sent email written by %s <%s>", m.FullName, m.Email))
}
func (m *Mail) Save() {
mailmu.Lock()
defer mailmu.Unlock()
info, err := os.Stat(path("/storage.json"))
if err != nil {
log.WithFields(log.Fields{
"error": err.Error(),
}).Error("Failed to fetch info of storage.json")
}
file, err := os.OpenFile(path("/storage.json"), os.O_RDWR, 0600)
defer file.Close()
if err != nil {
log.WithFields(log.Fields{
"error": err.Error(),
}).Error("Failed to open storage.json")
}
mail, _ := json.Marshal(m)
buffer := make([]byte, 2)
var toWrite []byte
if _, err = file.ReadAt(buffer, info.Size()-2); err != nil {
log.WithFields(log.Fields{
"error": err.Error(),
}).Error("Failed to read file storage.json")
}
switch {
case reflect.DeepEqual(buffer, []byte{'[', ']'}):
toWrite = append(toWrite, mail...)
case reflect.DeepEqual(buffer, []byte{'}', ']'}):
toWrite = append(toWrite, append([]byte{','}, mail...)...)
}
if _, err = file.WriteAt(append(toWrite, ']'), info.Size()-1); err != nil {
log.WithFields(log.Fields{
"error": err.Error(),
}).Error("Failed to save email in storage.json")
}
log.WithFields(log.Fields{
"action": "Save email",
}).Info(fmt.Sprintf("Successfully saved email written by %s <%s>", m.FullName, m.Email))
}
func (m *Mail) SaveAndSend() {
m.Save()
m.Send()
}
func handleWriteEmail(w http.ResponseWriter, r *http.Request) {
body, err := ioutil.ReadAll(io.LimitReader(r.Body, PAYLOAD_MAX_SIZE))
if err != nil {
http.Error(w, "Failed to read body from HTTP request", http.StatusInternalServerError)
return
}
mail := new(Mail)
err = json.Unmarshal(body, &mail)
if err != nil {
http.Error(w, "Failed to parse payload", http.StatusBadRequest)
return
}
validationErrors := mail.Validate()
if len(validationErrors) != 0 {
JSONResponse(w, map[string](map[string]string){"errors": validationErrors}, http.StatusBadRequest)
return
}
go mail.SaveAndSend()
w.WriteHeader(200)
}
func handleHomepage(w http.ResponseWriter, _ *http.Request) {
w.Write(*homepage)
}
func createIfNotExists(p string, content []byte) {
if _, err := os.Stat(p); os.IsNotExist(err) {
err = ioutil.WriteFile(p, content, 0600)
if err != nil {
log.WithFields(log.Fields{
"error": err.Error(),
}).Fatal("Failed to create new file " + p)
}
}
}
func bootstrap() *os.File {
wd, err := os.Getwd()
if err != nil {
log.WithFields(log.Fields{
"error": err.Error(),
}).Fatal("Failed to retrieve the current working directory")
}
cwd = &wd
// Create storage for mails as backup and to double check sent mails
createIfNotExists(path("/storage.json"), []byte{'[', ']'})
// Create log files, logging is gud
createIfNotExists(path("/logs/actions.log"), nil)
createIfNotExists(path("/logs/requests.log"), nil)
// Create config file
createIfNotExists(path("/config/app.json"), []byte{'{', '}'})
// Initialize logger and write new entries to /logs/actions.log
logFile, err := os.OpenFile(path("/logs/actions.log"), os.O_APPEND|os.O_WRONLY, 0600)
if err != nil {
log.WithFields(log.Fields{
"error": err.Error(),
}).Fatal("Failed to open file /logs/actions.log")
}
log.SetOutput(logFile)
log.SetLevel(log.DebugLevel)
// Allocate and initialize config
config = new(Config)
config.Populate()
return logFile
}
func server() {
cachedHomepage, err := ioutil.ReadFile(path("/ui/views/index.html"))
if err != nil {
log.WithFields(log.Fields{
"error": err.Error(),
}).Fatal("Failed to open homepage")
}
homepage = &cachedHomepage
router := mux.NewRouter()
addr := GetAddress(&config.Hostname, &config.Port)
router.HandleFunc("/", handleHomepage).Methods("GET")
router.HandleFunc("/write-email", handleWriteEmail).Methods("POST")
router.PathPrefix("/static").Handler(http.StripPrefix("/static", http.FileServer(http.Dir(path("/build")))))
log.Info("Starting HTTP server on " + addr)
fmt.Println("Starting HTTP server on " + addr)
logFile, err := os.OpenFile(path("/logs/requests.log"), os.O_APPEND|os.O_WRONLY, 0600)
if err != nil {
log.WithFields(log.Fields{
"error": err.Error(),
}).Fatal("Failed to open file /logs/requests.log")
}
defer logFile.Close()
logger := handlers.LoggingHandler(logFile, router)
log.Fatal(http.ListenAndServe(addr, logger))
}
func main() {
logger := bootstrap()
defer logger.Close()
server()
}
| {
http.Error(w, "Failed to encode content to JSON", http.StatusInternalServerError)
return
} | conditional_block |
web.go | package main
import (
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/smtp"
"os"
"reflect"
"regexp"
"strconv"
"sync"
"github.com/gorilla/handlers"
"github.com/gorilla/mux"
log "github.com/sirupsen/logrus"
)
type Mail struct {
Subject string `json:"subject"`
FullName string `json:"fullname"`
Email string `json:"email"`
Business string `json:"business"`
Body string `json:"body"`
AdditionalDetails string `json:"details"`
}
type Config struct {
Smtp *Smtp
Cwd string
Hostname string
Port int
}
type Smtp struct {
Email string
To string
Password string
Hostname string
Port int
}
type FieldLength struct {
Min int
Max int
}
type FieldRegex struct {
Pattern string
Error string
}
type Field struct {
Name string
DisplayName string
Value string
Length *FieldLength
Regex *FieldRegex
}
const (
// Regex patterns for the inputted fields
VALID_NAME string = "^\\p{L}{2,}(?:\\x20\\p{L}{2,}){1,5}$"
VALID_SENTENCE string = "^[\\p{L}\\d\\x20-\\x2F\\x3A-\\x40\\x5B-\\x60\\x7B-\\x7E\\x{00B4}]*$"
VALID_MESSAGE string = "^[\\p{L}\\d\\x20-\\x2F\\x3A-\\x40\\x5B-\\x60\\x7B-\\x7E\\x{00B4}\\s]+$"
// This email regex has to get better, missing _ and a proper . check before the @
VALID_EMAIL string = "^[a-z\\x2E\\x5F]+\\x2B?[a-z]*[^\\x2B]\\x40(?:[a-z]+[a-z\\x2D\\x2E]?)+[^\\x2D]\\x2E[a-z]{2,5}$"
// Validation errors
EMAIL_ERROR string = "%s must be a valid email address"
NAME_ERROR string = "%s has to have at least one middle or last name"
SENTENCE_ERROR string = "%s must only contain language letters and ascii symbols"
LENGTH_ERROR string = "%s length mustn't be shorter than %d characters or longer than %d characters"
// Validations were successful
SUCCESS_MSG string = "Alright! The email has been sent."
// Amount of bytes to read from the body of a request
PAYLOAD_MAX_SIZE int64 = 8192
)
var (
config *Config
homepage *[]byte
cwd *string
mailmu sync.Mutex
)
// Concatenate a given hostname and port
func GetAddress(h *string, p *int) string {
return *h + ":" + strconv.FormatUint(uint64(*p), 10)
} |
// Append an error message to slice if value fails the checks
func validateField(f *Field, hash map[string]string) {
length := 0
if f.Value != "" {
length = len(f.Value)
}
switch {
case length < f.Length.Min || length > f.Length.Max:
hash[f.Name] = fmt.Sprintf(LENGTH_ERROR, f.DisplayName, f.Length.Min, f.Length.Max)
case f.Regex != nil:
matches, _ := regexp.MatchString(f.Regex.Pattern, f.Value)
if !matches {
hash[f.Name] = fmt.Sprintf(f.Regex.Error, f.DisplayName)
}
}
}
// Encode content to JSON and write it to the response
func JSONResponse(w http.ResponseWriter, content interface{}, statusCode int) {
resp, err := json.Marshal(content)
if err != nil {
http.Error(w, "Failed to encode content to JSON", http.StatusInternalServerError)
return
}
w.Header().Set("Content-Type", "application/json")
w.Header().Set("Access-Control-Allow-Origin", "*")
w.Header().Set("Access-Control-Allow-Methods", "GET, POST")
w.WriteHeader(statusCode)
w.Write(resp)
}
func (c *Config) Populate() {
content, err := ioutil.ReadFile(path("/config/app.json"))
if err != nil {
log.WithFields(log.Fields{
"error": err.Error(),
}).Fatal("Couldn't read file /config/app.json")
}
if err := json.Unmarshal(content, &c); err != nil {
log.WithFields(log.Fields{
"error": err.Error(),
}).Fatal("Failed to decode config.json")
}
}
func path(p string) string {
return *cwd + p
}
func (m *Mail) Validate() map[string]string {
errors := make(map[string]string)
validateField(&Field{
Name: "subject",
DisplayName: "Subject",
Value: m.Subject,
Length: &FieldLength{
Min: 12,
Max: 128,
},
Regex: &FieldRegex{
Pattern: VALID_SENTENCE,
Error: SENTENCE_ERROR,
},
}, errors)
validateField(&Field{
Name: "fullname",
DisplayName: "Full name",
Value: m.FullName,
Length: &FieldLength{
Min: 5,
Max: 48,
},
Regex: &FieldRegex{
Pattern: VALID_NAME,
Error: NAME_ERROR,
},
}, errors)
validateField(&Field{
Name: "email",
DisplayName: "Email",
Value: m.Email,
Length: &FieldLength{
Min: 12,
Max: 128,
},
Regex: &FieldRegex{
Pattern: VALID_EMAIL,
Error: EMAIL_ERROR,
},
}, errors)
validateField(&Field{
Name: "business",
DisplayName: "Business",
Value: m.Business,
Length: &FieldLength{
Min: 3,
Max: 32,
},
Regex: &FieldRegex{
Pattern: VALID_SENTENCE,
Error: SENTENCE_ERROR,
},
}, errors)
validateField(&Field{
Name: "body",
DisplayName: "Message",
Value: m.Body,
Length: &FieldLength{
Min: 64,
Max: 4096,
},
Regex: &FieldRegex{
Pattern: VALID_MESSAGE,
Error: SENTENCE_ERROR,
},
}, errors)
validateField(&Field{
Name: "details",
DisplayName: "Additional details",
Value: m.AdditionalDetails,
Length: &FieldLength{
Min: 4,
Max: 1024,
},
Regex: &FieldRegex{
Pattern: VALID_MESSAGE,
Error: SENTENCE_ERROR,
},
}, errors)
return errors
}
func (m *Mail) Send() {
err := smtp.SendMail(
GetAddress(&config.Smtp.Hostname, &config.Smtp.Port),
smtp.PlainAuth("", config.Smtp.Email, config.Smtp.Password, config.Smtp.Hostname),
config.Smtp.Email, []string{config.Smtp.To}, []byte(m.Body))
if err != nil {
log.WithFields(log.Fields{
"error": err.Error(),
}).Error(fmt.Sprintf("Failed to send email from <%s> to <%s>", config.Smtp.Email, config.Smtp.To))
}
log.WithFields(log.Fields{
"action": "Send email",
}).Info(fmt.Sprintf("Successfully sent email written by %s <%s>", m.FullName, m.Email))
}
func (m *Mail) Save() {
mailmu.Lock()
defer mailmu.Unlock()
info, err := os.Stat(path("/storage.json"))
if err != nil {
log.WithFields(log.Fields{
"error": err.Error(),
}).Error("Failed to fetch info of storage.json")
}
file, err := os.OpenFile(path("/storage.json"), os.O_RDWR, 0600)
defer file.Close()
if err != nil {
log.WithFields(log.Fields{
"error": err.Error(),
}).Error("Failed to open storage.json")
}
mail, _ := json.Marshal(m)
buffer := make([]byte, 2)
var toWrite []byte
if _, err = file.ReadAt(buffer, info.Size()-2); err != nil {
log.WithFields(log.Fields{
"error": err.Error(),
}).Error("Failed to read file storage.json")
}
switch {
case reflect.DeepEqual(buffer, []byte{'[', ']'}):
toWrite = append(toWrite, mail...)
case reflect.DeepEqual(buffer, []byte{'}', ']'}):
toWrite = append(toWrite, append([]byte{','}, mail...)...)
}
if _, err = file.WriteAt(append(toWrite, ']'), info.Size()-1); err != nil {
log.WithFields(log.Fields{
"error": err.Error(),
}).Error("Failed to save email in storage.json")
}
log.WithFields(log.Fields{
"action": "Save email",
}).Info(fmt.Sprintf("Successfully saved email written by %s <%s>", m.FullName, m.Email))
}
func (m *Mail) SaveAndSend() {
m.Save()
m.Send()
}
func handleWriteEmail(w http.ResponseWriter, r *http.Request) {
body, err := ioutil.ReadAll(io.LimitReader(r.Body, PAYLOAD_MAX_SIZE))
if err != nil {
http.Error(w, "Failed to read body from HTTP request", http.StatusInternalServerError)
return
}
mail := new(Mail)
err = json.Unmarshal(body, &mail)
if err != nil {
http.Error(w, "Failed to parse payload", http.StatusBadRequest)
return
}
validationErrors := mail.Validate()
if len(validationErrors) != 0 {
JSONResponse(w, map[string](map[string]string){"errors": validationErrors}, http.StatusBadRequest)
return
}
go mail.SaveAndSend()
w.WriteHeader(200)
}
func handleHomepage(w http.ResponseWriter, _ *http.Request) {
w.Write(*homepage)
}
func createIfNotExists(p string, content []byte) {
if _, err := os.Stat(p); os.IsNotExist(err) {
err = ioutil.WriteFile(p, content, 0600)
if err != nil {
log.WithFields(log.Fields{
"error": err.Error(),
}).Fatal("Failed to create new file " + p)
}
}
}
func bootstrap() *os.File {
wd, err := os.Getwd()
if err != nil {
log.WithFields(log.Fields{
"error": err.Error(),
}).Fatal("Failed to retrieve the current working directory")
}
cwd = &wd
// Create storage for mails as backup and to double check sent mails
createIfNotExists(path("/storage.json"), []byte{'[', ']'})
// Create log files, logging is gud
createIfNotExists(path("/logs/actions.log"), nil)
createIfNotExists(path("/logs/requests.log"), nil)
// Create config file
createIfNotExists(path("/config/app.json"), []byte{'{', '}'})
// Initialize logger and write new entries to /logs/actions.log
logFile, err := os.OpenFile(path("/logs/actions.log"), os.O_APPEND|os.O_WRONLY, 0600)
if err != nil {
log.WithFields(log.Fields{
"error": err.Error(),
}).Fatal("Failed to open file /logs/actions.log")
}
log.SetOutput(logFile)
log.SetLevel(log.DebugLevel)
// Allocate and initialize config
config = new(Config)
config.Populate()
return logFile
}
func server() {
cachedHomepage, err := ioutil.ReadFile(path("/ui/views/index.html"))
if err != nil {
log.WithFields(log.Fields{
"error": err.Error(),
}).Fatal("Failed to open homepage")
}
homepage = &cachedHomepage
router := mux.NewRouter()
addr := GetAddress(&config.Hostname, &config.Port)
router.HandleFunc("/", handleHomepage).Methods("GET")
router.HandleFunc("/write-email", handleWriteEmail).Methods("POST")
router.PathPrefix("/static").Handler(http.StripPrefix("/static", http.FileServer(http.Dir(path("/build")))))
log.Info("Starting HTTP server on " + addr)
fmt.Println("Starting HTTP server on " + addr)
logFile, err := os.OpenFile(path("/logs/requests.log"), os.O_APPEND|os.O_WRONLY, 0600)
if err != nil {
log.WithFields(log.Fields{
"error": err.Error(),
}).Fatal("Failed to open file /logs/requests.log")
}
defer logFile.Close()
logger := handlers.LoggingHandler(logFile, router)
log.Fatal(http.ListenAndServe(addr, logger))
}
func main() {
logger := bootstrap()
defer logger.Close()
server()
} | random_line_split |
|
web.go | package main
import (
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/smtp"
"os"
"reflect"
"regexp"
"strconv"
"sync"
"github.com/gorilla/handlers"
"github.com/gorilla/mux"
log "github.com/sirupsen/logrus"
)
type Mail struct {
Subject string `json:"subject"`
FullName string `json:"fullname"`
Email string `json:"email"`
Business string `json:"business"`
Body string `json:"body"`
AdditionalDetails string `json:"details"`
}
type Config struct {
Smtp *Smtp
Cwd string
Hostname string
Port int
}
type Smtp struct {
Email string
To string
Password string
Hostname string
Port int
}
type FieldLength struct {
Min int
Max int
}
type FieldRegex struct {
Pattern string
Error string
}
type Field struct {
Name string
DisplayName string
Value string
Length *FieldLength
Regex *FieldRegex
}
const (
// Regex patterns for the inputted fields
VALID_NAME string = "^\\p{L}{2,}(?:\\x20\\p{L}{2,}){1,5}$"
VALID_SENTENCE string = "^[\\p{L}\\d\\x20-\\x2F\\x3A-\\x40\\x5B-\\x60\\x7B-\\x7E\\x{00B4}]*$"
VALID_MESSAGE string = "^[\\p{L}\\d\\x20-\\x2F\\x3A-\\x40\\x5B-\\x60\\x7B-\\x7E\\x{00B4}\\s]+$"
// This email regex has to get better, missing _ and a proper . check before the @
VALID_EMAIL string = "^[a-z\\x2E\\x5F]+\\x2B?[a-z]*[^\\x2B]\\x40(?:[a-z]+[a-z\\x2D\\x2E]?)+[^\\x2D]\\x2E[a-z]{2,5}$"
// Validation errors
EMAIL_ERROR string = "%s must be a valid email address"
NAME_ERROR string = "%s has to have at least one middle or last name"
SENTENCE_ERROR string = "%s must only contain language letters and ascii symbols"
LENGTH_ERROR string = "%s length mustn't be shorter than %d characters or longer than %d characters"
// Validations were successful
SUCCESS_MSG string = "Alright! The email has been sent."
// Amount of bytes to read from the body of a request
PAYLOAD_MAX_SIZE int64 = 8192
)
var (
config *Config
homepage *[]byte
cwd *string
mailmu sync.Mutex
)
// Concatenate a given hostname and port
func GetAddress(h *string, p *int) string |
// Append an error message to slice if value fails the checks
func validateField(f *Field, hash map[string]string) {
length := 0
if f.Value != "" {
length = len(f.Value)
}
switch {
case length < f.Length.Min || length > f.Length.Max:
hash[f.Name] = fmt.Sprintf(LENGTH_ERROR, f.DisplayName, f.Length.Min, f.Length.Max)
case f.Regex != nil:
matches, _ := regexp.MatchString(f.Regex.Pattern, f.Value)
if !matches {
hash[f.Name] = fmt.Sprintf(f.Regex.Error, f.DisplayName)
}
}
}
// Encode content to JSON and write it to the response
func JSONResponse(w http.ResponseWriter, content interface{}, statusCode int) {
resp, err := json.Marshal(content)
if err != nil {
http.Error(w, "Failed to encode content to JSON", http.StatusInternalServerError)
return
}
w.Header().Set("Content-Type", "application/json")
w.Header().Set("Access-Control-Allow-Origin", "*")
w.Header().Set("Access-Control-Allow-Methods", "GET, POST")
w.WriteHeader(statusCode)
w.Write(resp)
}
func (c *Config) Populate() {
content, err := ioutil.ReadFile(path("/config/app.json"))
if err != nil {
log.WithFields(log.Fields{
"error": err.Error(),
}).Fatal("Couldn't read file /config/app.json")
}
if err := json.Unmarshal(content, &c); err != nil {
log.WithFields(log.Fields{
"error": err.Error(),
}).Fatal("Failed to decode config.json")
}
}
func path(p string) string {
return *cwd + p
}
func (m *Mail) Validate() map[string]string {
errors := make(map[string]string)
validateField(&Field{
Name: "subject",
DisplayName: "Subject",
Value: m.Subject,
Length: &FieldLength{
Min: 12,
Max: 128,
},
Regex: &FieldRegex{
Pattern: VALID_SENTENCE,
Error: SENTENCE_ERROR,
},
}, errors)
validateField(&Field{
Name: "fullname",
DisplayName: "Full name",
Value: m.FullName,
Length: &FieldLength{
Min: 5,
Max: 48,
},
Regex: &FieldRegex{
Pattern: VALID_NAME,
Error: NAME_ERROR,
},
}, errors)
validateField(&Field{
Name: "email",
DisplayName: "Email",
Value: m.Email,
Length: &FieldLength{
Min: 12,
Max: 128,
},
Regex: &FieldRegex{
Pattern: VALID_EMAIL,
Error: EMAIL_ERROR,
},
}, errors)
validateField(&Field{
Name: "business",
DisplayName: "Business",
Value: m.Business,
Length: &FieldLength{
Min: 3,
Max: 32,
},
Regex: &FieldRegex{
Pattern: VALID_SENTENCE,
Error: SENTENCE_ERROR,
},
}, errors)
validateField(&Field{
Name: "body",
DisplayName: "Message",
Value: m.Body,
Length: &FieldLength{
Min: 64,
Max: 4096,
},
Regex: &FieldRegex{
Pattern: VALID_MESSAGE,
Error: SENTENCE_ERROR,
},
}, errors)
validateField(&Field{
Name: "details",
DisplayName: "Additional details",
Value: m.AdditionalDetails,
Length: &FieldLength{
Min: 4,
Max: 1024,
},
Regex: &FieldRegex{
Pattern: VALID_MESSAGE,
Error: SENTENCE_ERROR,
},
}, errors)
return errors
}
func (m *Mail) Send() {
err := smtp.SendMail(
GetAddress(&config.Smtp.Hostname, &config.Smtp.Port),
smtp.PlainAuth("", config.Smtp.Email, config.Smtp.Password, config.Smtp.Hostname),
config.Smtp.Email, []string{config.Smtp.To}, []byte(m.Body))
if err != nil {
log.WithFields(log.Fields{
"error": err.Error(),
}).Error(fmt.Sprintf("Failed to send email from <%s> to <%s>", config.Smtp.Email, config.Smtp.To))
}
log.WithFields(log.Fields{
"action": "Send email",
}).Info(fmt.Sprintf("Successfully sent email written by %s <%s>", m.FullName, m.Email))
}
func (m *Mail) Save() {
mailmu.Lock()
defer mailmu.Unlock()
info, err := os.Stat(path("/storage.json"))
if err != nil {
log.WithFields(log.Fields{
"error": err.Error(),
}).Error("Failed to fetch info of storage.json")
}
file, err := os.OpenFile(path("/storage.json"), os.O_RDWR, 0600)
defer file.Close()
if err != nil {
log.WithFields(log.Fields{
"error": err.Error(),
}).Error("Failed to open storage.json")
}
mail, _ := json.Marshal(m)
buffer := make([]byte, 2)
var toWrite []byte
if _, err = file.ReadAt(buffer, info.Size()-2); err != nil {
log.WithFields(log.Fields{
"error": err.Error(),
}).Error("Failed to read file storage.json")
}
switch {
case reflect.DeepEqual(buffer, []byte{'[', ']'}):
toWrite = append(toWrite, mail...)
case reflect.DeepEqual(buffer, []byte{'}', ']'}):
toWrite = append(toWrite, append([]byte{','}, mail...)...)
}
if _, err = file.WriteAt(append(toWrite, ']'), info.Size()-1); err != nil {
log.WithFields(log.Fields{
"error": err.Error(),
}).Error("Failed to save email in storage.json")
}
log.WithFields(log.Fields{
"action": "Save email",
}).Info(fmt.Sprintf("Successfully saved email written by %s <%s>", m.FullName, m.Email))
}
func (m *Mail) SaveAndSend() {
m.Save()
m.Send()
}
func handleWriteEmail(w http.ResponseWriter, r *http.Request) {
body, err := ioutil.ReadAll(io.LimitReader(r.Body, PAYLOAD_MAX_SIZE))
if err != nil {
http.Error(w, "Failed to read body from HTTP request", http.StatusInternalServerError)
return
}
mail := new(Mail)
err = json.Unmarshal(body, &mail)
if err != nil {
http.Error(w, "Failed to parse payload", http.StatusBadRequest)
return
}
validationErrors := mail.Validate()
if len(validationErrors) != 0 {
JSONResponse(w, map[string](map[string]string){"errors": validationErrors}, http.StatusBadRequest)
return
}
go mail.SaveAndSend()
w.WriteHeader(200)
}
func handleHomepage(w http.ResponseWriter, _ *http.Request) {
w.Write(*homepage)
}
func createIfNotExists(p string, content []byte) {
if _, err := os.Stat(p); os.IsNotExist(err) {
err = ioutil.WriteFile(p, content, 0600)
if err != nil {
log.WithFields(log.Fields{
"error": err.Error(),
}).Fatal("Failed to create new file " + p)
}
}
}
func bootstrap() *os.File {
wd, err := os.Getwd()
if err != nil {
log.WithFields(log.Fields{
"error": err.Error(),
}).Fatal("Failed to retrieve the current working directory")
}
cwd = &wd
// Create storage for mails as backup and to double check sent mails
createIfNotExists(path("/storage.json"), []byte{'[', ']'})
// Create log files, logging is gud
createIfNotExists(path("/logs/actions.log"), nil)
createIfNotExists(path("/logs/requests.log"), nil)
// Create config file
createIfNotExists(path("/config/app.json"), []byte{'{', '}'})
// Initialize logger and write new entries to /logs/actions.log
logFile, err := os.OpenFile(path("/logs/actions.log"), os.O_APPEND|os.O_WRONLY, 0600)
if err != nil {
log.WithFields(log.Fields{
"error": err.Error(),
}).Fatal("Failed to open file /logs/actions.log")
}
log.SetOutput(logFile)
log.SetLevel(log.DebugLevel)
// Allocate and initialize config
config = new(Config)
config.Populate()
return logFile
}
func server() {
cachedHomepage, err := ioutil.ReadFile(path("/ui/views/index.html"))
if err != nil {
log.WithFields(log.Fields{
"error": err.Error(),
}).Fatal("Failed to open homepage")
}
homepage = &cachedHomepage
router := mux.NewRouter()
addr := GetAddress(&config.Hostname, &config.Port)
router.HandleFunc("/", handleHomepage).Methods("GET")
router.HandleFunc("/write-email", handleWriteEmail).Methods("POST")
router.PathPrefix("/static").Handler(http.StripPrefix("/static", http.FileServer(http.Dir(path("/build")))))
log.Info("Starting HTTP server on " + addr)
fmt.Println("Starting HTTP server on " + addr)
logFile, err := os.OpenFile(path("/logs/requests.log"), os.O_APPEND|os.O_WRONLY, 0600)
if err != nil {
log.WithFields(log.Fields{
"error": err.Error(),
}).Fatal("Failed to open file /logs/requests.log")
}
defer logFile.Close()
logger := handlers.LoggingHandler(logFile, router)
log.Fatal(http.ListenAndServe(addr, logger))
}
func main() {
logger := bootstrap()
defer logger.Close()
server()
}
| {
return *h + ":" + strconv.FormatUint(uint64(*p), 10)
} | identifier_body |
web.go | package main
import (
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/smtp"
"os"
"reflect"
"regexp"
"strconv"
"sync"
"github.com/gorilla/handlers"
"github.com/gorilla/mux"
log "github.com/sirupsen/logrus"
)
type Mail struct {
Subject string `json:"subject"`
FullName string `json:"fullname"`
Email string `json:"email"`
Business string `json:"business"`
Body string `json:"body"`
AdditionalDetails string `json:"details"`
}
type Config struct {
Smtp *Smtp
Cwd string
Hostname string
Port int
}
type Smtp struct {
Email string
To string
Password string
Hostname string
Port int
}
type FieldLength struct {
Min int
Max int
}
type FieldRegex struct {
Pattern string
Error string
}
type Field struct {
Name string
DisplayName string
Value string
Length *FieldLength
Regex *FieldRegex
}
const (
// Regex patterns for the inputted fields
VALID_NAME string = "^\\p{L}{2,}(?:\\x20\\p{L}{2,}){1,5}$"
VALID_SENTENCE string = "^[\\p{L}\\d\\x20-\\x2F\\x3A-\\x40\\x5B-\\x60\\x7B-\\x7E\\x{00B4}]*$"
VALID_MESSAGE string = "^[\\p{L}\\d\\x20-\\x2F\\x3A-\\x40\\x5B-\\x60\\x7B-\\x7E\\x{00B4}\\s]+$"
// This email regex has to get better, missing _ and a proper . check before the @
VALID_EMAIL string = "^[a-z\\x2E\\x5F]+\\x2B?[a-z]*[^\\x2B]\\x40(?:[a-z]+[a-z\\x2D\\x2E]?)+[^\\x2D]\\x2E[a-z]{2,5}$"
// Validation errors
EMAIL_ERROR string = "%s must be a valid email address"
NAME_ERROR string = "%s has to have at least one middle or last name"
SENTENCE_ERROR string = "%s must only contain language letters and ascii symbols"
LENGTH_ERROR string = "%s length mustn't be shorter than %d characters or longer than %d characters"
// Validations were successful
SUCCESS_MSG string = "Alright! The email has been sent."
// Amount of bytes to read from the body of a request
PAYLOAD_MAX_SIZE int64 = 8192
)
var (
config *Config
homepage *[]byte
cwd *string
mailmu sync.Mutex
)
// Concatenate a given hostname and port
func GetAddress(h *string, p *int) string {
return *h + ":" + strconv.FormatUint(uint64(*p), 10)
}
// Append an error message to slice if value fails the checks
func validateField(f *Field, hash map[string]string) {
length := 0
if f.Value != "" {
length = len(f.Value)
}
switch {
case length < f.Length.Min || length > f.Length.Max:
hash[f.Name] = fmt.Sprintf(LENGTH_ERROR, f.DisplayName, f.Length.Min, f.Length.Max)
case f.Regex != nil:
matches, _ := regexp.MatchString(f.Regex.Pattern, f.Value)
if !matches {
hash[f.Name] = fmt.Sprintf(f.Regex.Error, f.DisplayName)
}
}
}
// Encode content to JSON and write it to the response
func JSONResponse(w http.ResponseWriter, content interface{}, statusCode int) {
resp, err := json.Marshal(content)
if err != nil {
http.Error(w, "Failed to encode content to JSON", http.StatusInternalServerError)
return
}
w.Header().Set("Content-Type", "application/json")
w.Header().Set("Access-Control-Allow-Origin", "*")
w.Header().Set("Access-Control-Allow-Methods", "GET, POST")
w.WriteHeader(statusCode)
w.Write(resp)
}
func (c *Config) Populate() {
content, err := ioutil.ReadFile(path("/config/app.json"))
if err != nil {
log.WithFields(log.Fields{
"error": err.Error(),
}).Fatal("Couldn't read file /config/app.json")
}
if err := json.Unmarshal(content, &c); err != nil {
log.WithFields(log.Fields{
"error": err.Error(),
}).Fatal("Failed to decode config.json")
}
}
func path(p string) string {
return *cwd + p
}
func (m *Mail) Validate() map[string]string {
errors := make(map[string]string)
validateField(&Field{
Name: "subject",
DisplayName: "Subject",
Value: m.Subject,
Length: &FieldLength{
Min: 12,
Max: 128,
},
Regex: &FieldRegex{
Pattern: VALID_SENTENCE,
Error: SENTENCE_ERROR,
},
}, errors)
validateField(&Field{
Name: "fullname",
DisplayName: "Full name",
Value: m.FullName,
Length: &FieldLength{
Min: 5,
Max: 48,
},
Regex: &FieldRegex{
Pattern: VALID_NAME,
Error: NAME_ERROR,
},
}, errors)
validateField(&Field{
Name: "email",
DisplayName: "Email",
Value: m.Email,
Length: &FieldLength{
Min: 12,
Max: 128,
},
Regex: &FieldRegex{
Pattern: VALID_EMAIL,
Error: EMAIL_ERROR,
},
}, errors)
validateField(&Field{
Name: "business",
DisplayName: "Business",
Value: m.Business,
Length: &FieldLength{
Min: 3,
Max: 32,
},
Regex: &FieldRegex{
Pattern: VALID_SENTENCE,
Error: SENTENCE_ERROR,
},
}, errors)
validateField(&Field{
Name: "body",
DisplayName: "Message",
Value: m.Body,
Length: &FieldLength{
Min: 64,
Max: 4096,
},
Regex: &FieldRegex{
Pattern: VALID_MESSAGE,
Error: SENTENCE_ERROR,
},
}, errors)
validateField(&Field{
Name: "details",
DisplayName: "Additional details",
Value: m.AdditionalDetails,
Length: &FieldLength{
Min: 4,
Max: 1024,
},
Regex: &FieldRegex{
Pattern: VALID_MESSAGE,
Error: SENTENCE_ERROR,
},
}, errors)
return errors
}
func (m *Mail) Send() {
err := smtp.SendMail(
GetAddress(&config.Smtp.Hostname, &config.Smtp.Port),
smtp.PlainAuth("", config.Smtp.Email, config.Smtp.Password, config.Smtp.Hostname),
config.Smtp.Email, []string{config.Smtp.To}, []byte(m.Body))
if err != nil {
log.WithFields(log.Fields{
"error": err.Error(),
}).Error(fmt.Sprintf("Failed to send email from <%s> to <%s>", config.Smtp.Email, config.Smtp.To))
}
log.WithFields(log.Fields{
"action": "Send email",
}).Info(fmt.Sprintf("Successfully sent email written by %s <%s>", m.FullName, m.Email))
}
func (m *Mail) Save() {
mailmu.Lock()
defer mailmu.Unlock()
info, err := os.Stat(path("/storage.json"))
if err != nil {
log.WithFields(log.Fields{
"error": err.Error(),
}).Error("Failed to fetch info of storage.json")
}
file, err := os.OpenFile(path("/storage.json"), os.O_RDWR, 0600)
defer file.Close()
if err != nil {
log.WithFields(log.Fields{
"error": err.Error(),
}).Error("Failed to open storage.json")
}
mail, _ := json.Marshal(m)
buffer := make([]byte, 2)
var toWrite []byte
if _, err = file.ReadAt(buffer, info.Size()-2); err != nil {
log.WithFields(log.Fields{
"error": err.Error(),
}).Error("Failed to read file storage.json")
}
switch {
case reflect.DeepEqual(buffer, []byte{'[', ']'}):
toWrite = append(toWrite, mail...)
case reflect.DeepEqual(buffer, []byte{'}', ']'}):
toWrite = append(toWrite, append([]byte{','}, mail...)...)
}
if _, err = file.WriteAt(append(toWrite, ']'), info.Size()-1); err != nil {
log.WithFields(log.Fields{
"error": err.Error(),
}).Error("Failed to save email in storage.json")
}
log.WithFields(log.Fields{
"action": "Save email",
}).Info(fmt.Sprintf("Successfully saved email written by %s <%s>", m.FullName, m.Email))
}
func (m *Mail) SaveAndSend() {
m.Save()
m.Send()
}
func | (w http.ResponseWriter, r *http.Request) {
body, err := ioutil.ReadAll(io.LimitReader(r.Body, PAYLOAD_MAX_SIZE))
if err != nil {
http.Error(w, "Failed to read body from HTTP request", http.StatusInternalServerError)
return
}
mail := new(Mail)
err = json.Unmarshal(body, &mail)
if err != nil {
http.Error(w, "Failed to parse payload", http.StatusBadRequest)
return
}
validationErrors := mail.Validate()
if len(validationErrors) != 0 {
JSONResponse(w, map[string](map[string]string){"errors": validationErrors}, http.StatusBadRequest)
return
}
go mail.SaveAndSend()
w.WriteHeader(200)
}
func handleHomepage(w http.ResponseWriter, _ *http.Request) {
w.Write(*homepage)
}
func createIfNotExists(p string, content []byte) {
if _, err := os.Stat(p); os.IsNotExist(err) {
err = ioutil.WriteFile(p, content, 0600)
if err != nil {
log.WithFields(log.Fields{
"error": err.Error(),
}).Fatal("Failed to create new file " + p)
}
}
}
func bootstrap() *os.File {
wd, err := os.Getwd()
if err != nil {
log.WithFields(log.Fields{
"error": err.Error(),
}).Fatal("Failed to retrieve the current working directory")
}
cwd = &wd
// Create storage for mails as backup and to double check sent mails
createIfNotExists(path("/storage.json"), []byte{'[', ']'})
// Create log files, logging is gud
createIfNotExists(path("/logs/actions.log"), nil)
createIfNotExists(path("/logs/requests.log"), nil)
// Create config file
createIfNotExists(path("/config/app.json"), []byte{'{', '}'})
// Initialize logger and write new entries to /logs/actions.log
logFile, err := os.OpenFile(path("/logs/actions.log"), os.O_APPEND|os.O_WRONLY, 0600)
if err != nil {
log.WithFields(log.Fields{
"error": err.Error(),
}).Fatal("Failed to open file /logs/actions.log")
}
log.SetOutput(logFile)
log.SetLevel(log.DebugLevel)
// Allocate and initialize config
config = new(Config)
config.Populate()
return logFile
}
func server() {
cachedHomepage, err := ioutil.ReadFile(path("/ui/views/index.html"))
if err != nil {
log.WithFields(log.Fields{
"error": err.Error(),
}).Fatal("Failed to open homepage")
}
homepage = &cachedHomepage
router := mux.NewRouter()
addr := GetAddress(&config.Hostname, &config.Port)
router.HandleFunc("/", handleHomepage).Methods("GET")
router.HandleFunc("/write-email", handleWriteEmail).Methods("POST")
router.PathPrefix("/static").Handler(http.StripPrefix("/static", http.FileServer(http.Dir(path("/build")))))
log.Info("Starting HTTP server on " + addr)
fmt.Println("Starting HTTP server on " + addr)
logFile, err := os.OpenFile(path("/logs/requests.log"), os.O_APPEND|os.O_WRONLY, 0600)
if err != nil {
log.WithFields(log.Fields{
"error": err.Error(),
}).Fatal("Failed to open file /logs/requests.log")
}
defer logFile.Close()
logger := handlers.LoggingHandler(logFile, router)
log.Fatal(http.ListenAndServe(addr, logger))
}
func main() {
logger := bootstrap()
defer logger.Close()
server()
}
| handleWriteEmail | identifier_name |
scraper.py | #!/usr/bin/env python
"""
Provides client interface to Plans.
"""
import sys
if sys.version_info >= (3,3):
from urllib.parse import urlparse, parse_qsl
from http.cookiejar import LWPCookieJar
from html.parser import HTMLParser
elif sys.version_info < (3,):
from urlparse import urlparse, parse_qsl
from cookielib import LWPCookieJar
from HTMLParser import HTMLParser
str = unicode
import json
import re
import bs4
import requests
from .util import plans_md5, convert_endings, parse_plans_date
class PlansError(Exception):
"""Exception raised when there is an error talking to plans."""
pass
class PlansPageParser(HTMLParser):
"""HTML parser for GrinnellPlans pages."""
def handle_starttag(self, tag, attrs):
if tag == 'body':
# parse out id of body tag
# (can use to identify page)
self.page_id = dict(attrs).get('id', None)
if self.page_id is None:
# old interface <body> tag has no attrs
raise PlansError('Postmodern interface required')
if tag == 'a':
# parse out username to see who we're logged in as.
# amazingly, the only place this reliably appears
# is in the bug report link at the bottom of every page.
attrs = dict(attrs)
href = urlparse(attrs.get('href'))
if (href.netloc == 'github.com'
and href.path == '/grinnellplans/grinnellplans-php/issues/new'):
# if this is the bug submission link
query = parse_qsl(href.query)
comment = dict(query)['body']
# find username in submission content using brackets
start, stop = comment.index('['), comment.index(']')
self.username = comment[start + 1:stop]
# -------------------------------------------
# PLANS SCRAPEY-I
# get it? like "api" except... oh, never mind
# -------------------------------------------
class PlansConnection(object):
"""
Encapsulates an active login to plans.
"""
def __init__(self, cookiejar=None,
base_url="https://www.grinnellplans.com",
server_tz='US/Central'):
"""
Create a new plans connection.
Optional keyword arguments:
cookiejar -- an existing cookielib.CookieJar to store
credentials in.
base_url -- URL at which to access plans, no trailing slash.
server_tz -- Name of the timezone used by the server.
This class will convert dates to UTC.
"""
self.base_url = base_url
self.server_tz = server_tz
if cookiejar is None:
self.cookiejar = LWPCookieJar()
else:
self.cookiejar = cookiejar
self.session = requests.Session()
self.session.cookies = self.cookiejar
self.parser = PlansPageParser()
self.username = None
def _get_page(self, name, get=None, post=None):
"""
Retrieve an HTML page from plans.
"""
method = 'GET' if post is None else 'POST'
url = '/'.join((self.base_url, name))
req = requests.Request(method, url, params=get, data=post)
prepped = self.session.prepare_request(req)
try:
handle = self.session.send(prepped,
verify=url.startswith('https'))
except requests.exceptions.ConnectionError:
err = "Check your internet connection. Plans could also be down."
raise PlansError(err)
return handle
def _parse_message(self, soup):
"""
Scrape details from an infomessage or alertmessage div.
Returns a dictionary of the message parameters.
"""
kind, = soup.attrs[u'class']
title = soup.findChild().text
body = ''.join(t.text for t in soup.findChildren()[1:])
message = dict(kind=kind, title=title, body=body)
for val in message.values():
assert type(val) == str
return message
def _canonicalize_plantext(self, plan):
"""
Modify reserialized plan text to match what was served.
For consistency, we want to return plan text *exactly* how it
is formatted when served. However, variants of certain tags
(e.g. <br> vs <br/>) are syntactically equivalent in HTML, and
may be interchanged when parsed and reserialized.
This function manually changes the formatting returned by our
parser to more closely match that given by plans.
"""
# Our parser will correct <hr> and <br> to self closing tags
plan = plan.replace('<br/>', '<br>')
plan = plan.replace('<hr/>', '<hr>')
# put attributes in the right order because I have OCD
plan = re.sub(r'<a class="([^\s]*)" href="([^\s]*)">',
r'<a href="\2" class="\1">', plan)
# to avoid playing whack-a-mole, we should configure the
# parser to not do this, or else treat contents of
# <div class="plan_text"> tags as plain text
# (not sure if this is possible)
return plan
@staticmethod
def _html_esc(string):
"""
Replaces certain characters with html escape sequences.
Meant to be passed to the BS4 'decode' method as kwarg 'formatter'.
By default, BS4 only replaces angle brackets and ampersands with
the html escape sequence, but text served by plans also replaces
double quotes. This function as BS4's decoding formatter
reproduces that behavior.
"""
repls = {
'<': 'lt',
'>': 'gt',
'&': 'amp',
'"': 'quot',
}
def repl(matchobj):
return "&%s;" % repls[matchobj.group(0)]
regex = "([%s])" % ''.join(repls.keys())
return re.sub(regex, repl, string)
def plans_login(self, username='', password=''):
"""
Log into plans.
Returns True on success, False on failure. Leave username and
password blank to check an existing login.
"""
# the provided username and password ONLY get checked
# by the plans server if our cookie is expired.
# hence, if we've logged in recently, this will return True even
# if un/pw are not provided or are otherwise bad.
login_info = {'username': username,
'password': password,
'submit': 'Login'}
response = self._get_page('index.php', post=login_info)
# if login is successful, we'll be redirected to home
success = response.url[-9:] == '/home.php'
if success:
self.parser.feed(response.text) # parse out username
self.username = self.parser.username
return success
def get_edit_text(self):
"""
Retrieve contents of the edit plan field.
Returns the edit_text of the plan and its md5 hash,
as computed on the server side.
"""
# grab edit page
response = self._get_page('edit.php')
html = response.text
# parse out existing plan
soup = bs4.BeautifulSoup(html, 'html5lib')
plan = soup.find('textarea')
if plan is None:
raise PlansError("Couldn't get edit text, are we logged in?")
else:
plan = u'' + plan.contents[0]
# prepending the empty string somehow prevents BS from
# escaping all the HTML characters (weird)
assert type(plan) == str
# convert to CRLF line endings
plan = convert_endings(plan, 'CRLF')
# parse out plan md5
md5sum = soup.find('input',
attrs={'name': 'edit_text_md5'}).attrs['value']
# also, explicitly compute the hash, for kicks
assert md5sum == plans_md5(plan)
# verify that username has not changed
assert self.username == self.parser.username
return plan, md5sum
def set_edit_text(self, newtext, md5):
"""
Update plan with new content.
To prevent errors, the server does a hash check on the existing
plan before replacing it with the new one. We provide an
md5 sum to confirm that yes, we really want to update the plan.
Returns info message.
"""
# convert to CRLF line endings
newtext = convert_endings(newtext, 'CRLF')
newtext = newtext.encode('utf8')
edit_info = {'plan': newtext,
'edit_text_md5': md5,
'submit': 'Change Plan'}
response = self._get_page('edit.php', post=edit_info)
soup = bs4.BeautifulSoup(response.text, "html5lib")
alert = soup.find('div', {'class': 'alertmessage'})
info = soup.find('div', {'class': 'infomessage'})
if alert is not None:
# some kind of error
msg = self._parse_message(alert)
raise PlansError(msg['body'])
elif info is None:
raise PlansError('Plans did not verify update')
else:
# probably success
msg = self._parse_message(info)
return msg['body']
def | (self):
"""
Retrieve all levels of the autofinger (autoread) list.
Returns a dictionary where the keys are the group names
"Level 1", "Level 2", etc. and the values are a list of
usernames waiting to be read.
"""
# this actually doesn't scrape; there's a function for it
# in the old JSON API.
get = {'task': 'autofingerlist'}
response = self._get_page('api/1/index.php', get=get)
data = json.loads(response.text)
# the returned JSON is crufty; clean it up
autofinger = {}
for group in data['autofingerList']:
name = "Level %s" % group['level']
autofinger[name] = group['usernames']
return autofinger
def read_plan(self, plan):
"""
Retrieve the contents of the specified plan.
Returns two objects: the plan header (as a python dictionary)
the plan text (in HTML format)
"""
get = {'searchname': plan}
response = self._get_page('read.php', get=get)
soup = bs4.BeautifulSoup(response.text, 'html5lib')
header = soup.find('div', {'id': 'header'})
text = soup.find('div', {'class': 'plan_text'})
if text is None or header is None:
# probably a nonexistent user
alert = soup.find('div', {'class': 'alertmessage'})
msg = self._parse_message(alert)
raise PlansError(msg['title'])
# convert header html into a python dictionary
header_dict = {}
for key in ('username', 'planname'):
content = header.find(
'li', {'class': key}
).find(
'span', {'class': 'value'}
).contents
value = str(content[0]) if len(content) > 0 else None
header_dict[key] = value
for key in ('lastupdated', 'lastlogin'):
content = header.find(
'li', {'class': key}
).find(
'span', {'class': 'value'}
).find(
'span', {'class': 'long'}
).contents
if len(content) > 0:
value = str(content[0])
value = parse_plans_date(value, tz_name=self.server_tz)
else:
value = None
header_dict[key] = value
text.hidden = True # prevents BS from wrapping contents in
# <div> upon conversion to unicode string
plan = text.decode(formatter=self._html_esc) # soup to unicode
assert plan[0] == '\n' # drop leading newline
plan = self._canonicalize_plantext(plan[1:])
return header_dict, plan
def search_plans(self, term, planlove=False):
"""
Search plans for the provided ``term``.
If ``planlove`` is ``True``, ``term`` is a username, and the
search will be for incidences of planlove for that user.
returns: list of plans upon which the search term was found.
each list element is a 3-tuple:
- plan name
- number of occurrences of search term on the plan
- list of plan excerpts giving context
the length of the excerpt list may be equal to or less than
the number of occurrences of the search term, since
overlapping excerpts are consolidated.
"""
get = {'mysearch': term,
'planlove': int(bool(planlove))}
response = self._get_page('search.php', get=get)
soup = bs4.BeautifulSoup(response.text, 'html5lib')
results = soup.find('ul', {'id': 'search_results'})
if results is None:
return [] # no results
# results are grouped by the plan
# on which the result was found
user_groups = results.findAll(
'div', {'class': 'result_user_group'})
resultlist = []
for group in user_groups:
user = group.find('a', {'class': 'planlove'}).contents[0]
count = group.find('span').contents[0]
# now extract snippets
snippetlist = group.findAll('li')
snippets = []
for li in snippetlist:
tag = li.find('span')
tag.hidden = True # prevents BS from wrapping contents in
# <span> upon conversion to unicode string
snip = tag.decode(formatter=self._html_esc) # soup to unicode
snip = self._canonicalize_plantext(snip)
snippets.append(snip)
resultlist.append((str(user), int(count), snippets))
return resultlist
def planwatch(self, hours=12):
"""
Return plans updated in the last ``hours`` hours.
The result is a list of (username, timestamp) 2-tuples.
"""
post = {'mytime': str(hours)}
response = self._get_page('planwatch.php', post=post)
soup = bs4.BeautifulSoup(response.text, 'html5lib')
results = soup.find('ul', {'id': 'new_plan_list'})
new_plans = results.findAll('div', {'class': 'newplan'})
resultlist = []
for div in new_plans:
user = div.find('a', {'class': 'planlove'}).contents[0]
time = div.find('span').contents[0]
time = parse_plans_date(time, tz_name=self.server_tz)
resultlist.append((user, time))
return resultlist
| get_autofinger | identifier_name |
scraper.py | #!/usr/bin/env python
"""
Provides client interface to Plans.
"""
import sys
if sys.version_info >= (3,3):
from urllib.parse import urlparse, parse_qsl
from http.cookiejar import LWPCookieJar
from html.parser import HTMLParser
elif sys.version_info < (3,):
from urlparse import urlparse, parse_qsl
from cookielib import LWPCookieJar
from HTMLParser import HTMLParser
str = unicode
import json
import re
import bs4
import requests
from .util import plans_md5, convert_endings, parse_plans_date
class PlansError(Exception):
"""Exception raised when there is an error talking to plans."""
pass
class PlansPageParser(HTMLParser):
"""HTML parser for GrinnellPlans pages."""
def handle_starttag(self, tag, attrs):
if tag == 'body':
# parse out id of body tag
# (can use to identify page)
self.page_id = dict(attrs).get('id', None)
if self.page_id is None:
# old interface <body> tag has no attrs
raise PlansError('Postmodern interface required')
if tag == 'a':
# parse out username to see who we're logged in as.
# amazingly, the only place this reliably appears
# is in the bug report link at the bottom of every page.
attrs = dict(attrs)
href = urlparse(attrs.get('href'))
if (href.netloc == 'github.com'
and href.path == '/grinnellplans/grinnellplans-php/issues/new'):
# if this is the bug submission link
query = parse_qsl(href.query)
comment = dict(query)['body']
# find username in submission content using brackets
start, stop = comment.index('['), comment.index(']')
self.username = comment[start + 1:stop]
# -------------------------------------------
# PLANS SCRAPEY-I
# get it? like "api" except... oh, never mind
# -------------------------------------------
class PlansConnection(object):
"""
Encapsulates an active login to plans.
"""
def __init__(self, cookiejar=None,
base_url="https://www.grinnellplans.com",
server_tz='US/Central'):
"""
Create a new plans connection.
Optional keyword arguments:
cookiejar -- an existing cookielib.CookieJar to store
credentials in.
base_url -- URL at which to access plans, no trailing slash.
server_tz -- Name of the timezone used by the server.
This class will convert dates to UTC.
"""
self.base_url = base_url
self.server_tz = server_tz
if cookiejar is None:
self.cookiejar = LWPCookieJar()
else:
self.cookiejar = cookiejar
self.session = requests.Session()
self.session.cookies = self.cookiejar
self.parser = PlansPageParser()
self.username = None
def _get_page(self, name, get=None, post=None):
"""
Retrieve an HTML page from plans.
"""
method = 'GET' if post is None else 'POST' | verify=url.startswith('https'))
except requests.exceptions.ConnectionError:
err = "Check your internet connection. Plans could also be down."
raise PlansError(err)
return handle
def _parse_message(self, soup):
"""
Scrape details from an infomessage or alertmessage div.
Returns a dictionary of the message parameters.
"""
kind, = soup.attrs[u'class']
title = soup.findChild().text
body = ''.join(t.text for t in soup.findChildren()[1:])
message = dict(kind=kind, title=title, body=body)
for val in message.values():
assert type(val) == str
return message
def _canonicalize_plantext(self, plan):
"""
Modify reserialized plan text to match what was served.
For consistency, we want to return plan text *exactly* how it
is formatted when served. However, variants of certain tags
(e.g. <br> vs <br/>) are syntactically equivalent in HTML, and
may be interchanged when parsed and reserialized.
This function manually changes the formatting returned by our
parser to more closely match that given by plans.
"""
# Our parser will correct <hr> and <br> to self closing tags
plan = plan.replace('<br/>', '<br>')
plan = plan.replace('<hr/>', '<hr>')
# put attributes in the right order because I have OCD
plan = re.sub(r'<a class="([^\s]*)" href="([^\s]*)">',
r'<a href="\2" class="\1">', plan)
# to avoid playing whack-a-mole, we should configure the
# parser to not do this, or else treat contents of
# <div class="plan_text"> tags as plain text
# (not sure if this is possible)
return plan
@staticmethod
def _html_esc(string):
"""
Replaces certain characters with html escape sequences.
Meant to be passed to the BS4 'decode' method as kwarg 'formatter'.
By default, BS4 only replaces angle brackets and ampersands with
the html escape sequence, but text served by plans also replaces
double quotes. This function as BS4's decoding formatter
reproduces that behavior.
"""
repls = {
'<': 'lt',
'>': 'gt',
'&': 'amp',
'"': 'quot',
}
def repl(matchobj):
return "&%s;" % repls[matchobj.group(0)]
regex = "([%s])" % ''.join(repls.keys())
return re.sub(regex, repl, string)
def plans_login(self, username='', password=''):
"""
Log into plans.
Returns True on success, False on failure. Leave username and
password blank to check an existing login.
"""
# the provided username and password ONLY get checked
# by the plans server if our cookie is expired.
# hence, if we've logged in recently, this will return True even
# if un/pw are not provided or are otherwise bad.
login_info = {'username': username,
'password': password,
'submit': 'Login'}
response = self._get_page('index.php', post=login_info)
# if login is successful, we'll be redirected to home
success = response.url[-9:] == '/home.php'
if success:
self.parser.feed(response.text) # parse out username
self.username = self.parser.username
return success
def get_edit_text(self):
"""
Retrieve contents of the edit plan field.
Returns the edit_text of the plan and its md5 hash,
as computed on the server side.
"""
# grab edit page
response = self._get_page('edit.php')
html = response.text
# parse out existing plan
soup = bs4.BeautifulSoup(html, 'html5lib')
plan = soup.find('textarea')
if plan is None:
raise PlansError("Couldn't get edit text, are we logged in?")
else:
plan = u'' + plan.contents[0]
# prepending the empty string somehow prevents BS from
# escaping all the HTML characters (weird)
assert type(plan) == str
# convert to CRLF line endings
plan = convert_endings(plan, 'CRLF')
# parse out plan md5
md5sum = soup.find('input',
attrs={'name': 'edit_text_md5'}).attrs['value']
# also, explicitly compute the hash, for kicks
assert md5sum == plans_md5(plan)
# verify that username has not changed
assert self.username == self.parser.username
return plan, md5sum
def set_edit_text(self, newtext, md5):
"""
Update plan with new content.
To prevent errors, the server does a hash check on the existing
plan before replacing it with the new one. We provide an
md5 sum to confirm that yes, we really want to update the plan.
Returns info message.
"""
# convert to CRLF line endings
newtext = convert_endings(newtext, 'CRLF')
newtext = newtext.encode('utf8')
edit_info = {'plan': newtext,
'edit_text_md5': md5,
'submit': 'Change Plan'}
response = self._get_page('edit.php', post=edit_info)
soup = bs4.BeautifulSoup(response.text, "html5lib")
alert = soup.find('div', {'class': 'alertmessage'})
info = soup.find('div', {'class': 'infomessage'})
if alert is not None:
# some kind of error
msg = self._parse_message(alert)
raise PlansError(msg['body'])
elif info is None:
raise PlansError('Plans did not verify update')
else:
# probably success
msg = self._parse_message(info)
return msg['body']
def get_autofinger(self):
"""
Retrieve all levels of the autofinger (autoread) list.
Returns a dictionary where the keys are the group names
"Level 1", "Level 2", etc. and the values are a list of
usernames waiting to be read.
"""
# this actually doesn't scrape; there's a function for it
# in the old JSON API.
get = {'task': 'autofingerlist'}
response = self._get_page('api/1/index.php', get=get)
data = json.loads(response.text)
# the returned JSON is crufty; clean it up
autofinger = {}
for group in data['autofingerList']:
name = "Level %s" % group['level']
autofinger[name] = group['usernames']
return autofinger
def read_plan(self, plan):
"""
Retrieve the contents of the specified plan.
Returns two objects: the plan header (as a python dictionary)
the plan text (in HTML format)
"""
get = {'searchname': plan}
response = self._get_page('read.php', get=get)
soup = bs4.BeautifulSoup(response.text, 'html5lib')
header = soup.find('div', {'id': 'header'})
text = soup.find('div', {'class': 'plan_text'})
if text is None or header is None:
# probably a nonexistent user
alert = soup.find('div', {'class': 'alertmessage'})
msg = self._parse_message(alert)
raise PlansError(msg['title'])
# convert header html into a python dictionary
header_dict = {}
for key in ('username', 'planname'):
content = header.find(
'li', {'class': key}
).find(
'span', {'class': 'value'}
).contents
value = str(content[0]) if len(content) > 0 else None
header_dict[key] = value
for key in ('lastupdated', 'lastlogin'):
content = header.find(
'li', {'class': key}
).find(
'span', {'class': 'value'}
).find(
'span', {'class': 'long'}
).contents
if len(content) > 0:
value = str(content[0])
value = parse_plans_date(value, tz_name=self.server_tz)
else:
value = None
header_dict[key] = value
text.hidden = True # prevents BS from wrapping contents in
# <div> upon conversion to unicode string
plan = text.decode(formatter=self._html_esc) # soup to unicode
assert plan[0] == '\n' # drop leading newline
plan = self._canonicalize_plantext(plan[1:])
return header_dict, plan
def search_plans(self, term, planlove=False):
"""
Search plans for the provided ``term``.
If ``planlove`` is ``True``, ``term`` is a username, and the
search will be for incidences of planlove for that user.
returns: list of plans upon which the search term was found.
each list element is a 3-tuple:
- plan name
- number of occurrences of search term on the plan
- list of plan excerpts giving context
the length of the excerpt list may be equal to or less than
the number of occurrences of the search term, since
overlapping excerpts are consolidated.
"""
get = {'mysearch': term,
'planlove': int(bool(planlove))}
response = self._get_page('search.php', get=get)
soup = bs4.BeautifulSoup(response.text, 'html5lib')
results = soup.find('ul', {'id': 'search_results'})
if results is None:
return [] # no results
# results are grouped by the plan
# on which the result was found
user_groups = results.findAll(
'div', {'class': 'result_user_group'})
resultlist = []
for group in user_groups:
user = group.find('a', {'class': 'planlove'}).contents[0]
count = group.find('span').contents[0]
# now extract snippets
snippetlist = group.findAll('li')
snippets = []
for li in snippetlist:
tag = li.find('span')
tag.hidden = True # prevents BS from wrapping contents in
# <span> upon conversion to unicode string
snip = tag.decode(formatter=self._html_esc) # soup to unicode
snip = self._canonicalize_plantext(snip)
snippets.append(snip)
resultlist.append((str(user), int(count), snippets))
return resultlist
def planwatch(self, hours=12):
"""
Return plans updated in the last ``hours`` hours.
The result is a list of (username, timestamp) 2-tuples.
"""
post = {'mytime': str(hours)}
response = self._get_page('planwatch.php', post=post)
soup = bs4.BeautifulSoup(response.text, 'html5lib')
results = soup.find('ul', {'id': 'new_plan_list'})
new_plans = results.findAll('div', {'class': 'newplan'})
resultlist = []
for div in new_plans:
user = div.find('a', {'class': 'planlove'}).contents[0]
time = div.find('span').contents[0]
time = parse_plans_date(time, tz_name=self.server_tz)
resultlist.append((user, time))
return resultlist | url = '/'.join((self.base_url, name))
req = requests.Request(method, url, params=get, data=post)
prepped = self.session.prepare_request(req)
try:
handle = self.session.send(prepped, | random_line_split |
scraper.py | #!/usr/bin/env python
"""
Provides client interface to Plans.
"""
import sys
if sys.version_info >= (3,3):
from urllib.parse import urlparse, parse_qsl
from http.cookiejar import LWPCookieJar
from html.parser import HTMLParser
elif sys.version_info < (3,):
from urlparse import urlparse, parse_qsl
from cookielib import LWPCookieJar
from HTMLParser import HTMLParser
str = unicode
import json
import re
import bs4
import requests
from .util import plans_md5, convert_endings, parse_plans_date
class PlansError(Exception):
"""Exception raised when there is an error talking to plans."""
pass
class PlansPageParser(HTMLParser):
"""HTML parser for GrinnellPlans pages."""
def handle_starttag(self, tag, attrs):
if tag == 'body':
# parse out id of body tag
# (can use to identify page)
self.page_id = dict(attrs).get('id', None)
if self.page_id is None:
# old interface <body> tag has no attrs
raise PlansError('Postmodern interface required')
if tag == 'a':
# parse out username to see who we're logged in as.
# amazingly, the only place this reliably appears
# is in the bug report link at the bottom of every page.
attrs = dict(attrs)
href = urlparse(attrs.get('href'))
if (href.netloc == 'github.com'
and href.path == '/grinnellplans/grinnellplans-php/issues/new'):
# if this is the bug submission link
query = parse_qsl(href.query)
comment = dict(query)['body']
# find username in submission content using brackets
start, stop = comment.index('['), comment.index(']')
self.username = comment[start + 1:stop]
# -------------------------------------------
# PLANS SCRAPEY-I
# get it? like "api" except... oh, never mind
# -------------------------------------------
class PlansConnection(object):
"""
Encapsulates an active login to plans.
"""
def __init__(self, cookiejar=None,
base_url="https://www.grinnellplans.com",
server_tz='US/Central'):
"""
Create a new plans connection.
Optional keyword arguments:
cookiejar -- an existing cookielib.CookieJar to store
credentials in.
base_url -- URL at which to access plans, no trailing slash.
server_tz -- Name of the timezone used by the server.
This class will convert dates to UTC.
"""
self.base_url = base_url
self.server_tz = server_tz
if cookiejar is None:
self.cookiejar = LWPCookieJar()
else:
|
self.session = requests.Session()
self.session.cookies = self.cookiejar
self.parser = PlansPageParser()
self.username = None
def _get_page(self, name, get=None, post=None):
"""
Retrieve an HTML page from plans.
"""
method = 'GET' if post is None else 'POST'
url = '/'.join((self.base_url, name))
req = requests.Request(method, url, params=get, data=post)
prepped = self.session.prepare_request(req)
try:
handle = self.session.send(prepped,
verify=url.startswith('https'))
except requests.exceptions.ConnectionError:
err = "Check your internet connection. Plans could also be down."
raise PlansError(err)
return handle
def _parse_message(self, soup):
"""
Scrape details from an infomessage or alertmessage div.
Returns a dictionary of the message parameters.
"""
kind, = soup.attrs[u'class']
title = soup.findChild().text
body = ''.join(t.text for t in soup.findChildren()[1:])
message = dict(kind=kind, title=title, body=body)
for val in message.values():
assert type(val) == str
return message
def _canonicalize_plantext(self, plan):
"""
Modify reserialized plan text to match what was served.
For consistency, we want to return plan text *exactly* how it
is formatted when served. However, variants of certain tags
(e.g. <br> vs <br/>) are syntactically equivalent in HTML, and
may be interchanged when parsed and reserialized.
This function manually changes the formatting returned by our
parser to more closely match that given by plans.
"""
# Our parser will correct <hr> and <br> to self closing tags
plan = plan.replace('<br/>', '<br>')
plan = plan.replace('<hr/>', '<hr>')
# put attributes in the right order because I have OCD
plan = re.sub(r'<a class="([^\s]*)" href="([^\s]*)">',
r'<a href="\2" class="\1">', plan)
# to avoid playing whack-a-mole, we should configure the
# parser to not do this, or else treat contents of
# <div class="plan_text"> tags as plain text
# (not sure if this is possible)
return plan
@staticmethod
def _html_esc(string):
"""
Replaces certain characters with html escape sequences.
Meant to be passed to the BS4 'decode' method as kwarg 'formatter'.
By default, BS4 only replaces angle brackets and ampersands with
the html escape sequence, but text served by plans also replaces
double quotes. This function as BS4's decoding formatter
reproduces that behavior.
"""
repls = {
'<': 'lt',
'>': 'gt',
'&': 'amp',
'"': 'quot',
}
def repl(matchobj):
return "&%s;" % repls[matchobj.group(0)]
regex = "([%s])" % ''.join(repls.keys())
return re.sub(regex, repl, string)
def plans_login(self, username='', password=''):
"""
Log into plans.
Returns True on success, False on failure. Leave username and
password blank to check an existing login.
"""
# the provided username and password ONLY get checked
# by the plans server if our cookie is expired.
# hence, if we've logged in recently, this will return True even
# if un/pw are not provided or are otherwise bad.
login_info = {'username': username,
'password': password,
'submit': 'Login'}
response = self._get_page('index.php', post=login_info)
# if login is successful, we'll be redirected to home
success = response.url[-9:] == '/home.php'
if success:
self.parser.feed(response.text) # parse out username
self.username = self.parser.username
return success
def get_edit_text(self):
"""
Retrieve contents of the edit plan field.
Returns the edit_text of the plan and its md5 hash,
as computed on the server side.
"""
# grab edit page
response = self._get_page('edit.php')
html = response.text
# parse out existing plan
soup = bs4.BeautifulSoup(html, 'html5lib')
plan = soup.find('textarea')
if plan is None:
raise PlansError("Couldn't get edit text, are we logged in?")
else:
plan = u'' + plan.contents[0]
# prepending the empty string somehow prevents BS from
# escaping all the HTML characters (weird)
assert type(plan) == str
# convert to CRLF line endings
plan = convert_endings(plan, 'CRLF')
# parse out plan md5
md5sum = soup.find('input',
attrs={'name': 'edit_text_md5'}).attrs['value']
# also, explicitly compute the hash, for kicks
assert md5sum == plans_md5(plan)
# verify that username has not changed
assert self.username == self.parser.username
return plan, md5sum
def set_edit_text(self, newtext, md5):
"""
Update plan with new content.
To prevent errors, the server does a hash check on the existing
plan before replacing it with the new one. We provide an
md5 sum to confirm that yes, we really want to update the plan.
Returns info message.
"""
# convert to CRLF line endings
newtext = convert_endings(newtext, 'CRLF')
newtext = newtext.encode('utf8')
edit_info = {'plan': newtext,
'edit_text_md5': md5,
'submit': 'Change Plan'}
response = self._get_page('edit.php', post=edit_info)
soup = bs4.BeautifulSoup(response.text, "html5lib")
alert = soup.find('div', {'class': 'alertmessage'})
info = soup.find('div', {'class': 'infomessage'})
if alert is not None:
# some kind of error
msg = self._parse_message(alert)
raise PlansError(msg['body'])
elif info is None:
raise PlansError('Plans did not verify update')
else:
# probably success
msg = self._parse_message(info)
return msg['body']
def get_autofinger(self):
"""
Retrieve all levels of the autofinger (autoread) list.
Returns a dictionary where the keys are the group names
"Level 1", "Level 2", etc. and the values are a list of
usernames waiting to be read.
"""
# this actually doesn't scrape; there's a function for it
# in the old JSON API.
get = {'task': 'autofingerlist'}
response = self._get_page('api/1/index.php', get=get)
data = json.loads(response.text)
# the returned JSON is crufty; clean it up
autofinger = {}
for group in data['autofingerList']:
name = "Level %s" % group['level']
autofinger[name] = group['usernames']
return autofinger
def read_plan(self, plan):
"""
Retrieve the contents of the specified plan.
Returns two objects: the plan header (as a python dictionary)
the plan text (in HTML format)
"""
get = {'searchname': plan}
response = self._get_page('read.php', get=get)
soup = bs4.BeautifulSoup(response.text, 'html5lib')
header = soup.find('div', {'id': 'header'})
text = soup.find('div', {'class': 'plan_text'})
if text is None or header is None:
# probably a nonexistent user
alert = soup.find('div', {'class': 'alertmessage'})
msg = self._parse_message(alert)
raise PlansError(msg['title'])
# convert header html into a python dictionary
header_dict = {}
for key in ('username', 'planname'):
content = header.find(
'li', {'class': key}
).find(
'span', {'class': 'value'}
).contents
value = str(content[0]) if len(content) > 0 else None
header_dict[key] = value
for key in ('lastupdated', 'lastlogin'):
content = header.find(
'li', {'class': key}
).find(
'span', {'class': 'value'}
).find(
'span', {'class': 'long'}
).contents
if len(content) > 0:
value = str(content[0])
value = parse_plans_date(value, tz_name=self.server_tz)
else:
value = None
header_dict[key] = value
text.hidden = True # prevents BS from wrapping contents in
# <div> upon conversion to unicode string
plan = text.decode(formatter=self._html_esc) # soup to unicode
assert plan[0] == '\n' # drop leading newline
plan = self._canonicalize_plantext(plan[1:])
return header_dict, plan
def search_plans(self, term, planlove=False):
"""
Search plans for the provided ``term``.
If ``planlove`` is ``True``, ``term`` is a username, and the
search will be for incidences of planlove for that user.
returns: list of plans upon which the search term was found.
each list element is a 3-tuple:
- plan name
- number of occurrences of search term on the plan
- list of plan excerpts giving context
the length of the excerpt list may be equal to or less than
the number of occurrences of the search term, since
overlapping excerpts are consolidated.
"""
get = {'mysearch': term,
'planlove': int(bool(planlove))}
response = self._get_page('search.php', get=get)
soup = bs4.BeautifulSoup(response.text, 'html5lib')
results = soup.find('ul', {'id': 'search_results'})
if results is None:
return [] # no results
# results are grouped by the plan
# on which the result was found
user_groups = results.findAll(
'div', {'class': 'result_user_group'})
resultlist = []
for group in user_groups:
user = group.find('a', {'class': 'planlove'}).contents[0]
count = group.find('span').contents[0]
# now extract snippets
snippetlist = group.findAll('li')
snippets = []
for li in snippetlist:
tag = li.find('span')
tag.hidden = True # prevents BS from wrapping contents in
# <span> upon conversion to unicode string
snip = tag.decode(formatter=self._html_esc) # soup to unicode
snip = self._canonicalize_plantext(snip)
snippets.append(snip)
resultlist.append((str(user), int(count), snippets))
return resultlist
def planwatch(self, hours=12):
"""
Return plans updated in the last ``hours`` hours.
The result is a list of (username, timestamp) 2-tuples.
"""
post = {'mytime': str(hours)}
response = self._get_page('planwatch.php', post=post)
soup = bs4.BeautifulSoup(response.text, 'html5lib')
results = soup.find('ul', {'id': 'new_plan_list'})
new_plans = results.findAll('div', {'class': 'newplan'})
resultlist = []
for div in new_plans:
user = div.find('a', {'class': 'planlove'}).contents[0]
time = div.find('span').contents[0]
time = parse_plans_date(time, tz_name=self.server_tz)
resultlist.append((user, time))
return resultlist
| self.cookiejar = cookiejar | conditional_block |
scraper.py | #!/usr/bin/env python
"""
Provides client interface to Plans.
"""
import sys
if sys.version_info >= (3,3):
from urllib.parse import urlparse, parse_qsl
from http.cookiejar import LWPCookieJar
from html.parser import HTMLParser
elif sys.version_info < (3,):
from urlparse import urlparse, parse_qsl
from cookielib import LWPCookieJar
from HTMLParser import HTMLParser
str = unicode
import json
import re
import bs4
import requests
from .util import plans_md5, convert_endings, parse_plans_date
class PlansError(Exception):
"""Exception raised when there is an error talking to plans."""
pass
class PlansPageParser(HTMLParser):
"""HTML parser for GrinnellPlans pages."""
def handle_starttag(self, tag, attrs):
if tag == 'body':
# parse out id of body tag
# (can use to identify page)
self.page_id = dict(attrs).get('id', None)
if self.page_id is None:
# old interface <body> tag has no attrs
raise PlansError('Postmodern interface required')
if tag == 'a':
# parse out username to see who we're logged in as.
# amazingly, the only place this reliably appears
# is in the bug report link at the bottom of every page.
attrs = dict(attrs)
href = urlparse(attrs.get('href'))
if (href.netloc == 'github.com'
and href.path == '/grinnellplans/grinnellplans-php/issues/new'):
# if this is the bug submission link
query = parse_qsl(href.query)
comment = dict(query)['body']
# find username in submission content using brackets
start, stop = comment.index('['), comment.index(']')
self.username = comment[start + 1:stop]
# -------------------------------------------
# PLANS SCRAPEY-I
# get it? like "api" except... oh, never mind
# -------------------------------------------
class PlansConnection(object):
"""
Encapsulates an active login to plans.
"""
def __init__(self, cookiejar=None,
base_url="https://www.grinnellplans.com",
server_tz='US/Central'):
"""
Create a new plans connection.
Optional keyword arguments:
cookiejar -- an existing cookielib.CookieJar to store
credentials in.
base_url -- URL at which to access plans, no trailing slash.
server_tz -- Name of the timezone used by the server.
This class will convert dates to UTC.
"""
self.base_url = base_url
self.server_tz = server_tz
if cookiejar is None:
self.cookiejar = LWPCookieJar()
else:
self.cookiejar = cookiejar
self.session = requests.Session()
self.session.cookies = self.cookiejar
self.parser = PlansPageParser()
self.username = None
def _get_page(self, name, get=None, post=None):
"""
Retrieve an HTML page from plans.
"""
method = 'GET' if post is None else 'POST'
url = '/'.join((self.base_url, name))
req = requests.Request(method, url, params=get, data=post)
prepped = self.session.prepare_request(req)
try:
handle = self.session.send(prepped,
verify=url.startswith('https'))
except requests.exceptions.ConnectionError:
err = "Check your internet connection. Plans could also be down."
raise PlansError(err)
return handle
def _parse_message(self, soup):
"""
Scrape details from an infomessage or alertmessage div.
Returns a dictionary of the message parameters.
"""
kind, = soup.attrs[u'class']
title = soup.findChild().text
body = ''.join(t.text for t in soup.findChildren()[1:])
message = dict(kind=kind, title=title, body=body)
for val in message.values():
assert type(val) == str
return message
def _canonicalize_plantext(self, plan):
"""
Modify reserialized plan text to match what was served.
For consistency, we want to return plan text *exactly* how it
is formatted when served. However, variants of certain tags
(e.g. <br> vs <br/>) are syntactically equivalent in HTML, and
may be interchanged when parsed and reserialized.
This function manually changes the formatting returned by our
parser to more closely match that given by plans.
"""
# Our parser will correct <hr> and <br> to self closing tags
plan = plan.replace('<br/>', '<br>')
plan = plan.replace('<hr/>', '<hr>')
# put attributes in the right order because I have OCD
plan = re.sub(r'<a class="([^\s]*)" href="([^\s]*)">',
r'<a href="\2" class="\1">', plan)
# to avoid playing whack-a-mole, we should configure the
# parser to not do this, or else treat contents of
# <div class="plan_text"> tags as plain text
# (not sure if this is possible)
return plan
@staticmethod
def _html_esc(string):
"""
Replaces certain characters with html escape sequences.
Meant to be passed to the BS4 'decode' method as kwarg 'formatter'.
By default, BS4 only replaces angle brackets and ampersands with
the html escape sequence, but text served by plans also replaces
double quotes. This function as BS4's decoding formatter
reproduces that behavior.
"""
repls = {
'<': 'lt',
'>': 'gt',
'&': 'amp',
'"': 'quot',
}
def repl(matchobj):
return "&%s;" % repls[matchobj.group(0)]
regex = "([%s])" % ''.join(repls.keys())
return re.sub(regex, repl, string)
def plans_login(self, username='', password=''):
"""
Log into plans.
Returns True on success, False on failure. Leave username and
password blank to check an existing login.
"""
# the provided username and password ONLY get checked
# by the plans server if our cookie is expired.
# hence, if we've logged in recently, this will return True even
# if un/pw are not provided or are otherwise bad.
login_info = {'username': username,
'password': password,
'submit': 'Login'}
response = self._get_page('index.php', post=login_info)
# if login is successful, we'll be redirected to home
success = response.url[-9:] == '/home.php'
if success:
self.parser.feed(response.text) # parse out username
self.username = self.parser.username
return success
def get_edit_text(self):
"""
Retrieve contents of the edit plan field.
Returns the edit_text of the plan and its md5 hash,
as computed on the server side.
"""
# grab edit page
response = self._get_page('edit.php')
html = response.text
# parse out existing plan
soup = bs4.BeautifulSoup(html, 'html5lib')
plan = soup.find('textarea')
if plan is None:
raise PlansError("Couldn't get edit text, are we logged in?")
else:
plan = u'' + plan.contents[0]
# prepending the empty string somehow prevents BS from
# escaping all the HTML characters (weird)
assert type(plan) == str
# convert to CRLF line endings
plan = convert_endings(plan, 'CRLF')
# parse out plan md5
md5sum = soup.find('input',
attrs={'name': 'edit_text_md5'}).attrs['value']
# also, explicitly compute the hash, for kicks
assert md5sum == plans_md5(plan)
# verify that username has not changed
assert self.username == self.parser.username
return plan, md5sum
def set_edit_text(self, newtext, md5):
"""
Update plan with new content.
To prevent errors, the server does a hash check on the existing
plan before replacing it with the new one. We provide an
md5 sum to confirm that yes, we really want to update the plan.
Returns info message.
"""
# convert to CRLF line endings
newtext = convert_endings(newtext, 'CRLF')
newtext = newtext.encode('utf8')
edit_info = {'plan': newtext,
'edit_text_md5': md5,
'submit': 'Change Plan'}
response = self._get_page('edit.php', post=edit_info)
soup = bs4.BeautifulSoup(response.text, "html5lib")
alert = soup.find('div', {'class': 'alertmessage'})
info = soup.find('div', {'class': 'infomessage'})
if alert is not None:
# some kind of error
msg = self._parse_message(alert)
raise PlansError(msg['body'])
elif info is None:
raise PlansError('Plans did not verify update')
else:
# probably success
msg = self._parse_message(info)
return msg['body']
def get_autofinger(self):
"""
Retrieve all levels of the autofinger (autoread) list.
Returns a dictionary where the keys are the group names
"Level 1", "Level 2", etc. and the values are a list of
usernames waiting to be read.
"""
# this actually doesn't scrape; there's a function for it
# in the old JSON API.
get = {'task': 'autofingerlist'}
response = self._get_page('api/1/index.php', get=get)
data = json.loads(response.text)
# the returned JSON is crufty; clean it up
autofinger = {}
for group in data['autofingerList']:
name = "Level %s" % group['level']
autofinger[name] = group['usernames']
return autofinger
def read_plan(self, plan):
"""
Retrieve the contents of the specified plan.
Returns two objects: the plan header (as a python dictionary)
the plan text (in HTML format)
"""
get = {'searchname': plan}
response = self._get_page('read.php', get=get)
soup = bs4.BeautifulSoup(response.text, 'html5lib')
header = soup.find('div', {'id': 'header'})
text = soup.find('div', {'class': 'plan_text'})
if text is None or header is None:
# probably a nonexistent user
alert = soup.find('div', {'class': 'alertmessage'})
msg = self._parse_message(alert)
raise PlansError(msg['title'])
# convert header html into a python dictionary
header_dict = {}
for key in ('username', 'planname'):
content = header.find(
'li', {'class': key}
).find(
'span', {'class': 'value'}
).contents
value = str(content[0]) if len(content) > 0 else None
header_dict[key] = value
for key in ('lastupdated', 'lastlogin'):
content = header.find(
'li', {'class': key}
).find(
'span', {'class': 'value'}
).find(
'span', {'class': 'long'}
).contents
if len(content) > 0:
value = str(content[0])
value = parse_plans_date(value, tz_name=self.server_tz)
else:
value = None
header_dict[key] = value
text.hidden = True # prevents BS from wrapping contents in
# <div> upon conversion to unicode string
plan = text.decode(formatter=self._html_esc) # soup to unicode
assert plan[0] == '\n' # drop leading newline
plan = self._canonicalize_plantext(plan[1:])
return header_dict, plan
def search_plans(self, term, planlove=False):
"""
Search plans for the provided ``term``.
If ``planlove`` is ``True``, ``term`` is a username, and the
search will be for incidences of planlove for that user.
returns: list of plans upon which the search term was found.
each list element is a 3-tuple:
- plan name
- number of occurrences of search term on the plan
- list of plan excerpts giving context
the length of the excerpt list may be equal to or less than
the number of occurrences of the search term, since
overlapping excerpts are consolidated.
"""
get = {'mysearch': term,
'planlove': int(bool(planlove))}
response = self._get_page('search.php', get=get)
soup = bs4.BeautifulSoup(response.text, 'html5lib')
results = soup.find('ul', {'id': 'search_results'})
if results is None:
return [] # no results
# results are grouped by the plan
# on which the result was found
user_groups = results.findAll(
'div', {'class': 'result_user_group'})
resultlist = []
for group in user_groups:
user = group.find('a', {'class': 'planlove'}).contents[0]
count = group.find('span').contents[0]
# now extract snippets
snippetlist = group.findAll('li')
snippets = []
for li in snippetlist:
tag = li.find('span')
tag.hidden = True # prevents BS from wrapping contents in
# <span> upon conversion to unicode string
snip = tag.decode(formatter=self._html_esc) # soup to unicode
snip = self._canonicalize_plantext(snip)
snippets.append(snip)
resultlist.append((str(user), int(count), snippets))
return resultlist
def planwatch(self, hours=12):
| """
Return plans updated in the last ``hours`` hours.
The result is a list of (username, timestamp) 2-tuples.
"""
post = {'mytime': str(hours)}
response = self._get_page('planwatch.php', post=post)
soup = bs4.BeautifulSoup(response.text, 'html5lib')
results = soup.find('ul', {'id': 'new_plan_list'})
new_plans = results.findAll('div', {'class': 'newplan'})
resultlist = []
for div in new_plans:
user = div.find('a', {'class': 'planlove'}).contents[0]
time = div.find('span').contents[0]
time = parse_plans_date(time, tz_name=self.server_tz)
resultlist.append((user, time))
return resultlist | identifier_body |
|
AnimePageFetcher.py | import requests
import re
import sys
from bs4 import BeautifulSoup
import time
from datetime import datetime
import urllib
# Finds instances of #24332
rank_regex = re.compile(r"#(\d+)")
# Finds instances of 342,543,212
number_regex = re.compile(r"[\d,]+")
# Finds instances of 4 hr.
hours_regex = re.compile(r"(\d+) hr\.")
# Finds instances of 34 min.
min_regex = re.compile(r"(\d+) min\.")
# Finds instances of 12 sec.
sec_regex = re.compile(r"(\d+) sec\.")
# Finds <h2>Information</h2>
info_regex = re.compile(r"<h2>\s*Information\s*</h2>")
# Finds <h2>Statistics</h2>
stats_regex = re.compile(r"<h2>\s*Statistics\s*</h2>")
# Finds <h2>External Links</h2>
stats_end_regex = re.compile(r'<div class="clearfix mauto mt16"')
# Finds <h2>Related Anime</h2>
related_anime_regex = re.compile(r"<h2>\s*Related Anime\s*</h2>")
# Finds <h2>Summary Stats</h2>
stats_summary_regex = re.compile(r"<h2>\s*Summary Stats\s*</h2>")
# Finds <h2>Score Stats</h2>
stats_score_regex = re.compile(r"<h2>\s*Score Stats\s*</h2>")
favorites_regex = re.compile(r"""<div>\s*<span class="dark_text">\s*Favorites:\s*</span>\s*([\d,]+)\s*</div>""")
# Returns an int version of a comma seperated number in a string
# Assumes that the string has a number in it.
# e.g. extract_comma_number()"Members \n 433,312") returns 433312 (int)
def extract_comma_number(str):
return int(number_regex.search(str).group(0).replace(",", ""))
def get_safe_url(url):
url = url.encode("utf8")
slash_index = url.rfind("/")
url_title = url[slash_index+1:]
if "%" not in url_title:
url = url[:slash_index+1] + urllib.quote(url_title)
return url
def get_html(url, verbose=False):
if verbose: sys.stdout.write("Making get request to " + url + " ... ")
r = requests.get(url)
if r.status_code != requests.codes.ok:
if verbose:
print "[ERROR] request.get returned non-OK status. Got:", r.status_code
return None
else:
if verbose:
sys.stdout.write("OK.\n")
sys.stdout.flush()
return r.text.encode('utf8')
def save_html(url, out_file, render_first=False):
html = ""
print "Making get request to", url
r = requests.get(url)
if r.status_code == requests.codes.ok:
print "OK"
html = r.text.encode('utf8')
else:
print "request.get returned non-OK status"
return None
print "Writing html to", out_file
print "Type is", type(html)
with open(out_file, 'w') as f:
soup = BeautifulSoup(html, 'html.parser')
html = soup.prettify()
f.write(html.encode("utf8"))
return html
def load_html_from_file(file):
with open(file, 'r') as f:
html = f.read()
return html
#these are the stats found on the stat page of an anime
#Example: https://myanimelist.net/anime/30/Neon_Genesis_Evangelion/stats
def getGeneralStatistics(soup, aggregate_dict={}):
return aggregate_dict
# Returns (type, id) where type is str in ["anime", "manga"], and id an int
def getCategoryAndIDFromUrl(url):
url = str(url)
result = url.split("myanimelist.net/")
if(len(result) > 1):
result = result[1].split("/")
content_category = result[0]
content_id = int(result[1])
return (content_category, content_id)
#these are generic info like the producers and the source found in the
#sidebar of an anime page
#Example https://myanimelist.net/anime/30/Neon_Genesis_Evangelion
def getGeneralInformation(html, aggregate_dict={}):
soup = BeautifulSoup(html, 'html.parser')
# Title
title = soup.find("h1", class_="h1").get_text()
aggregate_dict["title"] = title
# Score
score_tag = soup.find("div", class_="fl-l score")
score_users = extract_comma_number(score_tag["data-user"])
score_value = 0.0
try:
score_value = float(score_tag.get_text().strip())
except:
score_value = 0.0
aggregate_dict["score_users"] = score_users
aggregate_dict["score"] = score_value
# Rank
rank_text = soup.find("span", class_="numbers ranked").get_text()
rank_match = rank_regex.search(rank_text)
if rank_match is None:
aggregate_dict["rank"] = None
else:
rank_value = int(rank_match.group(1))
aggregate_dict["rank"] = rank_value
# Popularity
popularity_text = soup.find("span", class_="numbers popularity").get_text()
popularity_value = int(rank_regex.search(popularity_text).group(1))
aggregate_dict["popularity"] = popularity_value
# Members
members_text = soup.find("span", class_="numbers members").get_text().strip()
members_value = int(extract_comma_number(members_text))
aggregate_dict["members"] = members_value
# Synoposis
synopsis_soup = soup.find("span", itemprop="description")
if synopsis_soup is None:
aggregate_dict["synopsis"] = None
else:
synopsis_text = " ".join(synopsis_soup.strings)
aggregate_dict["synopsis"] = synopsis_text
#RelatedAnime
related_table = soup.find_all("table", class_="anime_detail_related_anime")
if len(related_table) == 0:
aggregate_dict["related_ids"] = []
else:
related_entries = [t['href'].strip() for t in related_table[0].find_all("a")]
related_entries = filter(lambda x: "/anime/" in x and len(x.split("/")[2]) > 0, related_entries)
related_titles = map(lambda x: int(x.split("/")[2]), related_entries)
aggregate_dict["related_ids"] = related_titles
# Statistics/Favorites (we have everything else)
favorites_match = favorites_regex.search(html)
favorites_text = favorites_match.group(1)
aggregate_dict["favorites"] = int(favorites_text.replace(",", ""))
# Information section
info_dict = extract_info_section(html)
# Info/Type
aggregate_dict["type"] = info_dict.get("Type")
# Info/Episodes
if "Episodes" in info_dict:
episodes_value = 0
# Some anime pages have "Unknown" for number of episodes.
# 0 represents unknown, because no anime can truly have 0 episodes.
try:
episodes_value = int(info_dict["Episodes"])
except:
episodes_value = 0
aggregate_dict["episodes"] = episodes_value
else:
aggregate_dict["episodes"] = None
# Info/Status
aggregate_dict["status"] = info_dict.get("Status")
# Info/Aired
aired_start = None
aired_end = None
if "Aired" in info_dict:
aired_text = info_dict["Aired"]
# Some animes are aired on one date. Others run for some time period.
# Those that run over a duration have "to" in the text.
if "to" in aired_text:
start_end_split = aired_text.split("to")
aired_start = parse_date(start_end_split[0])
# Some currently running animes have ? for their end date.
if "?" in start_end_split[1]:
aired_end = None
else:
aired_end = parse_date(start_end_split[1])
else:
aired_start = parse_date(aired_text)
aired_end = aired_start
aggregate_dict["aired_start"] = aired_start
aggregate_dict["aired_end"] = aired_end
# Info/Premiered
aggregate_dict["premiered"] = info_dict.get("Premiered")
# Info/Broadcast
aggregate_dict["broadcast"] = info_dict.get("Broadcast")
# Info/Producers
if "Producers" in info_dict:
aggregate_dict["producers"] = parse_info_list(info_dict["Producers"])
else:
aggregate_dict["producers"] = None
# Info/Licensors
if "Licensors" in info_dict:
aggregate_dict["licensors"] = parse_info_list(info_dict["Licensors"])
else:
aggregate_dict["licensors"] = None
# Info/Studios
if "Studios" in info_dict:
aggregate_dict["studios"] = parse_info_list(info_dict["Studios"])
else:
aggregate_dict["studios"] = studios_list
# Info/Source
aggregate_dict["source"] = info_dict.get("Source")
# Info/Genres
if "Genres" in info_dict:
aggregate_dict["genres"] = parse_info_list(info_dict["Genres"])
else:
aggregate_dict["genres"] = None
# Info/Duration
if "Duration" in info_dict:
duration_text = info_dict["Duration"]
mins = 0.0
# Hours
match = hours_regex.search(duration_text)
if match is not None:
mins += float(match.group(1)) * 60.0
# Minutes
match = min_regex.search(duration_text)
if match is not None:
mins += float(match.group(1))
# Seconds
match = sec_regex.search(duration_text)
if match is not None:
mins += float(match.group(1)) / 60.0
aggregate_dict["duration"] = mins
else:
aggregate_dict["duration"] = None
# Info/Rating
if "Rating" in info_dict:
rating_text = info_dict["Rating"]
rating_shorthand = rating_text.split(" - ")[0].strip()
aggregate_dict["rating"] = rating_shorthand
else:
aggregate_dict["rating"] = None
return aggregate_dict
#Fetches the production staff associated with a show
#Example: https://myanimelist.net/anime/30/Neon_Genesis_Evangelion/characters#staff
def getStaff(html):
return
#Fetches the characters and voice actors associted with a show
#Example: https://myanimelist.net/anime/30/Neon_Genesis_Evangelion/characters
def getCharactersAndJapaneseCast(html):
return
#Fetches the list of other related anime, same series etc.
def getRelatedTitles(html):
return
def cooldown():
COOLDOWN_IN_SECONDS = 0.5
time.sleep(COOLDOWN_IN_SECONDS)
def bs_preprocess(html):
"""remove distracting whitespaces and newline characters"""
pat = re.compile('(^[\s]+)|([\s]+$)', re.MULTILINE)
html = re.sub(pat, '', html) # remove leading and trailing whitespaces
html = re.sub('\n', ' ', html) # convert newlines to spaces
# this preserves newline delimiters
html = re.sub('[\s]+<', '<', html) # remove whitespaces before opening tags
html = re.sub('>[\s]+', '>', html) # remove whitespaces after closing tags
return html
# Returns dictionary with data
def scrape_main_page(html, aggregate_data={}):
getGeneralInformation(html, aggregate_data)
return aggregate_data
# Returns dictionary with data
def | (html, aggregate_data={}):
getStatSummary(html, aggregate_data)
getStatDistribution(html, aggregate_data)
return aggregate_data
def getStatSummary(html, aggregate_dict={}):
#soup = BeautifulSoup(html, 'html.parser')
start_index = stats_summary_regex.search(html).end()
end_index = stats_score_regex.search(html).start()
subs = html[start_index:end_index]
stat_soup = BeautifulSoup(html[start_index:end_index], 'html.parser')
field_list = [t.get_text().strip() for t in stat_soup.find_all("div")]
for field in field_list[:6]:
t = "users_" + str(field.split(':')[0]).lower().replace(" ", "_").replace("-","")
count = str(field.split(':')[1]).replace(',','')
aggregate_dict[t] = int(count)
return aggregate_dict
def getStatDistribution(html, aggregate_dict={}):
#soup = BeautifulSoup(html, 'html.parser')
start_index = stats_score_regex.search(html).end()
stat_soup = BeautifulSoup(html[start_index:], 'html.parser')
table = stat_soup.find("table")
field_list = [t.get_text().strip() for t in table.find_all("tr")]
field_list = filter(lambda x: "votes" in x, field_list)
voteSum = 0
for text in field_list:
idx = extract_comma_number(text)
key = "score_" + str(idx) + "_votes"
start_idx = text.index("(")+1
end_idx = text.index(" vote")
count = text[start_idx:end_idx]
aggregate_dict[key] = int(count)
voteSum += int(count)
# Add in any missing scores
for i in xrange(1, 11):
if "score_" + str(i) + "_votes" not in aggregate_dict:
aggregate_dict["score_" + str(i) + "_votes"] = 0
aggregate_dict["score_total_votes"] = voteSum
#aggregate_dict["Watching"] = score_users
#aggregate_dict[""] = score_value
return aggregate_dict
# Returns pair (success, data)
# If success if False, then some error occurred and data may be None or corrupted.
def getAllDataFromUrl(url):
success = True
# Make URL
url = get_safe_url(url)
data = {}
data["url"] = url
# Get main page html
try:
html = get_html(url, True)
retries = 0
while html is None:
print "Retrying after 5 seconds..."
time.sleep(5)
html = get_html(url, True)
retries += 1
if retries >= 3:
return (False, data)
html = bs_preprocess(html)
except:
return (False, data)
# Page category and ID (i.e. ("anime", 345))
# Used for primary keys
page_category, page_id = getCategoryAndIDFromUrl(url)
data["category"] = page_category
data["id"] = page_id
# Scrape data from the html of the main page
try:
scrape_main_page(html, data)
except Exception as e:
print "[ERROR] Fetching '", data.get("title", url), "' terminated early. Exception:", e.message
success = False
# Get stat html
stat_url = url + "/stats"
try:
html = get_html(stat_url, True)
retries = 0
while html is None:
print "Retrying fetching stats after 5 seconds..."
time.sleep(5)
html = get_html(stat_url, True)
retries += 1
if retries >= 3:
return (False, data)
html = bs_preprocess(html)
except:
return (False, data)
# Scrape data from the html of the stats page
try:
scrape_stats_page(html, data)
except Exception as e:
print "[ERROR] Fetching '", data.get("title", url), "' stats terminated early. Exception:", e.message
success = False
return (success, data)
# Returns the data in the Information section as a dict.
# The keys are the bolded text, and the values are everything that follows.
# e.g. if d is the return dictionary, and the page had
# "Type:\n Music\n Episodes:\n 1 \n Licensors: \n None found, add some\n"
# you would get:
# d["Type"] = "Music"
# d["Episodes"] = "1"
# d["Licensors"] = "None found, add some"
def extract_info_section(html):
info_match = info_regex.search(html)
stat_match = stats_regex.search(html)
info_html = html[info_match.end():stat_match.start()]
info_soup = BeautifulSoup(info_html, 'html.parser')
info_list = [t.get_text().strip() for t in info_soup.find_all("div")]
info_dict = {}
for info in info_list:
try:
split_data = info.split(":", 1)
key, value = split_data[0].strip(), split_data[1].strip()
info_dict[key] = value
except:
print "[ERROR] Cannot split Information field. Got:", info
return info_dict
def extract_stat_section(html):
start_index = stats_regex.search(html).end()
end_index = stats_end_regex.search(html).start()
subs = html[start_index:end_index]
stat_soup = BeautifulSoup(html[start_index:end_index], 'html.parser')
field_list = [t.get_text().strip() for t in stat_soup.find_all("div")]
field_dict = {}
for field in field_list:
try:
split_data = field.split(":", 1)
key, value = split_data[0].strip(), split_data[1].strip()
field_dict[key] = value
except:
print "[ERROR] Cannot split Statistics field. Got:", field
return field_dict
def parse_info_list(str):
if "none found" in str.lower():
return []
else:
return str.split(",")
def parse_date(s):
try:
return datetime.strptime(s.strip(), "%b %d, %Y").isoformat()
except:
print "[WARNING] Could not parse date normally. Got:", s
try:
d = datetime.strptime(s.strip(), "%b, %Y").isoformat()
print "[WARNING] Using alternative:", d
return d
except:
print "[WARNING] Could not get month, year format."
try:
d = datetime.strptime(s.strip(), "%Y").isoformat()
print "[WARNING] Using alternative:", d
return d
except:
print "[WARNING] Could not parse date at all. Returning None."
return d
def example():
print("Start")
url = "https://myanimelist.net/anime/13391/Rakuen_Tsuihou__Expelled_from_Paradise"
data = getAllDataFromUrl(url)
print data[1]
print("Done")
needed_keys = set([
'aired_end',
'aired_start',
'broadcast',
'category',
'duration',
'episodes',
'favorites',
'genres',
'id',
'licensors',
'members',
'popularity',
'premiered',
'producers',
'rank',
'rating',
'related_ids',
'score',
'score_10_votes',
'score_1_votes',
'score_2_votes',
'score_3_votes',
'score_4_votes',
'score_5_votes',
'score_6_votes',
'score_7_votes',
'score_8_votes',
'score_9_votes',
'score_total_votes',
'score_users',
'source',
'status',
'studios',
'synopsis',
'title',
'type',
'url',
'users_completed',
'users_dropped',
'users_onhold',
'users_plan_to_watch',
'users_total',
'users_watching'
])
def validate(data):
return needed_keys.issubset(set(data.keys()))
#TODO Remove this when convenient
# d = getAllDataFromUrl("https://myanimelist.net/anime/10797/Kayoe_Chuugaku")[1]
# for k in d:
# print "{}: {}".format(k, d[k])
| scrape_stats_page | identifier_name |
AnimePageFetcher.py | import requests
import re
import sys
from bs4 import BeautifulSoup
import time
from datetime import datetime
import urllib
# Finds instances of #24332
rank_regex = re.compile(r"#(\d+)")
# Finds instances of 342,543,212
number_regex = re.compile(r"[\d,]+")
# Finds instances of 4 hr.
hours_regex = re.compile(r"(\d+) hr\.")
# Finds instances of 34 min.
min_regex = re.compile(r"(\d+) min\.")
# Finds instances of 12 sec.
sec_regex = re.compile(r"(\d+) sec\.")
# Finds <h2>Information</h2>
info_regex = re.compile(r"<h2>\s*Information\s*</h2>")
# Finds <h2>Statistics</h2>
stats_regex = re.compile(r"<h2>\s*Statistics\s*</h2>")
# Finds <h2>External Links</h2>
stats_end_regex = re.compile(r'<div class="clearfix mauto mt16"')
# Finds <h2>Related Anime</h2>
related_anime_regex = re.compile(r"<h2>\s*Related Anime\s*</h2>")
# Finds <h2>Summary Stats</h2>
stats_summary_regex = re.compile(r"<h2>\s*Summary Stats\s*</h2>")
# Finds <h2>Score Stats</h2>
stats_score_regex = re.compile(r"<h2>\s*Score Stats\s*</h2>")
favorites_regex = re.compile(r"""<div>\s*<span class="dark_text">\s*Favorites:\s*</span>\s*([\d,]+)\s*</div>""")
# Returns an int version of a comma seperated number in a string
# Assumes that the string has a number in it.
# e.g. extract_comma_number()"Members \n 433,312") returns 433312 (int)
def extract_comma_number(str):
return int(number_regex.search(str).group(0).replace(",", ""))
def get_safe_url(url):
url = url.encode("utf8")
slash_index = url.rfind("/")
url_title = url[slash_index+1:]
if "%" not in url_title:
url = url[:slash_index+1] + urllib.quote(url_title)
return url
def get_html(url, verbose=False):
if verbose: sys.stdout.write("Making get request to " + url + " ... ")
r = requests.get(url)
if r.status_code != requests.codes.ok:
if verbose:
print "[ERROR] request.get returned non-OK status. Got:", r.status_code
return None
else:
if verbose:
sys.stdout.write("OK.\n")
sys.stdout.flush()
return r.text.encode('utf8')
def save_html(url, out_file, render_first=False):
html = ""
print "Making get request to", url
r = requests.get(url)
if r.status_code == requests.codes.ok:
print "OK"
html = r.text.encode('utf8')
else:
print "request.get returned non-OK status"
return None | with open(out_file, 'w') as f:
soup = BeautifulSoup(html, 'html.parser')
html = soup.prettify()
f.write(html.encode("utf8"))
return html
def load_html_from_file(file):
with open(file, 'r') as f:
html = f.read()
return html
#these are the stats found on the stat page of an anime
#Example: https://myanimelist.net/anime/30/Neon_Genesis_Evangelion/stats
def getGeneralStatistics(soup, aggregate_dict={}):
return aggregate_dict
# Returns (type, id) where type is str in ["anime", "manga"], and id an int
def getCategoryAndIDFromUrl(url):
url = str(url)
result = url.split("myanimelist.net/")
if(len(result) > 1):
result = result[1].split("/")
content_category = result[0]
content_id = int(result[1])
return (content_category, content_id)
#these are generic info like the producers and the source found in the
#sidebar of an anime page
#Example https://myanimelist.net/anime/30/Neon_Genesis_Evangelion
def getGeneralInformation(html, aggregate_dict={}):
soup = BeautifulSoup(html, 'html.parser')
# Title
title = soup.find("h1", class_="h1").get_text()
aggregate_dict["title"] = title
# Score
score_tag = soup.find("div", class_="fl-l score")
score_users = extract_comma_number(score_tag["data-user"])
score_value = 0.0
try:
score_value = float(score_tag.get_text().strip())
except:
score_value = 0.0
aggregate_dict["score_users"] = score_users
aggregate_dict["score"] = score_value
# Rank
rank_text = soup.find("span", class_="numbers ranked").get_text()
rank_match = rank_regex.search(rank_text)
if rank_match is None:
aggregate_dict["rank"] = None
else:
rank_value = int(rank_match.group(1))
aggregate_dict["rank"] = rank_value
# Popularity
popularity_text = soup.find("span", class_="numbers popularity").get_text()
popularity_value = int(rank_regex.search(popularity_text).group(1))
aggregate_dict["popularity"] = popularity_value
# Members
members_text = soup.find("span", class_="numbers members").get_text().strip()
members_value = int(extract_comma_number(members_text))
aggregate_dict["members"] = members_value
# Synoposis
synopsis_soup = soup.find("span", itemprop="description")
if synopsis_soup is None:
aggregate_dict["synopsis"] = None
else:
synopsis_text = " ".join(synopsis_soup.strings)
aggregate_dict["synopsis"] = synopsis_text
#RelatedAnime
related_table = soup.find_all("table", class_="anime_detail_related_anime")
if len(related_table) == 0:
aggregate_dict["related_ids"] = []
else:
related_entries = [t['href'].strip() for t in related_table[0].find_all("a")]
related_entries = filter(lambda x: "/anime/" in x and len(x.split("/")[2]) > 0, related_entries)
related_titles = map(lambda x: int(x.split("/")[2]), related_entries)
aggregate_dict["related_ids"] = related_titles
# Statistics/Favorites (we have everything else)
favorites_match = favorites_regex.search(html)
favorites_text = favorites_match.group(1)
aggregate_dict["favorites"] = int(favorites_text.replace(",", ""))
# Information section
info_dict = extract_info_section(html)
# Info/Type
aggregate_dict["type"] = info_dict.get("Type")
# Info/Episodes
if "Episodes" in info_dict:
episodes_value = 0
# Some anime pages have "Unknown" for number of episodes.
# 0 represents unknown, because no anime can truly have 0 episodes.
try:
episodes_value = int(info_dict["Episodes"])
except:
episodes_value = 0
aggregate_dict["episodes"] = episodes_value
else:
aggregate_dict["episodes"] = None
# Info/Status
aggregate_dict["status"] = info_dict.get("Status")
# Info/Aired
aired_start = None
aired_end = None
if "Aired" in info_dict:
aired_text = info_dict["Aired"]
# Some animes are aired on one date. Others run for some time period.
# Those that run over a duration have "to" in the text.
if "to" in aired_text:
start_end_split = aired_text.split("to")
aired_start = parse_date(start_end_split[0])
# Some currently running animes have ? for their end date.
if "?" in start_end_split[1]:
aired_end = None
else:
aired_end = parse_date(start_end_split[1])
else:
aired_start = parse_date(aired_text)
aired_end = aired_start
aggregate_dict["aired_start"] = aired_start
aggregate_dict["aired_end"] = aired_end
# Info/Premiered
aggregate_dict["premiered"] = info_dict.get("Premiered")
# Info/Broadcast
aggregate_dict["broadcast"] = info_dict.get("Broadcast")
# Info/Producers
if "Producers" in info_dict:
aggregate_dict["producers"] = parse_info_list(info_dict["Producers"])
else:
aggregate_dict["producers"] = None
# Info/Licensors
if "Licensors" in info_dict:
aggregate_dict["licensors"] = parse_info_list(info_dict["Licensors"])
else:
aggregate_dict["licensors"] = None
# Info/Studios
if "Studios" in info_dict:
aggregate_dict["studios"] = parse_info_list(info_dict["Studios"])
else:
aggregate_dict["studios"] = studios_list
# Info/Source
aggregate_dict["source"] = info_dict.get("Source")
# Info/Genres
if "Genres" in info_dict:
aggregate_dict["genres"] = parse_info_list(info_dict["Genres"])
else:
aggregate_dict["genres"] = None
# Info/Duration
if "Duration" in info_dict:
duration_text = info_dict["Duration"]
mins = 0.0
# Hours
match = hours_regex.search(duration_text)
if match is not None:
mins += float(match.group(1)) * 60.0
# Minutes
match = min_regex.search(duration_text)
if match is not None:
mins += float(match.group(1))
# Seconds
match = sec_regex.search(duration_text)
if match is not None:
mins += float(match.group(1)) / 60.0
aggregate_dict["duration"] = mins
else:
aggregate_dict["duration"] = None
# Info/Rating
if "Rating" in info_dict:
rating_text = info_dict["Rating"]
rating_shorthand = rating_text.split(" - ")[0].strip()
aggregate_dict["rating"] = rating_shorthand
else:
aggregate_dict["rating"] = None
return aggregate_dict
#Fetches the production staff associated with a show
#Example: https://myanimelist.net/anime/30/Neon_Genesis_Evangelion/characters#staff
def getStaff(html):
return
#Fetches the characters and voice actors associted with a show
#Example: https://myanimelist.net/anime/30/Neon_Genesis_Evangelion/characters
def getCharactersAndJapaneseCast(html):
return
#Fetches the list of other related anime, same series etc.
def getRelatedTitles(html):
return
def cooldown():
COOLDOWN_IN_SECONDS = 0.5
time.sleep(COOLDOWN_IN_SECONDS)
def bs_preprocess(html):
"""remove distracting whitespaces and newline characters"""
pat = re.compile('(^[\s]+)|([\s]+$)', re.MULTILINE)
html = re.sub(pat, '', html) # remove leading and trailing whitespaces
html = re.sub('\n', ' ', html) # convert newlines to spaces
# this preserves newline delimiters
html = re.sub('[\s]+<', '<', html) # remove whitespaces before opening tags
html = re.sub('>[\s]+', '>', html) # remove whitespaces after closing tags
return html
# Returns dictionary with data
def scrape_main_page(html, aggregate_data={}):
getGeneralInformation(html, aggregate_data)
return aggregate_data
# Returns dictionary with data
def scrape_stats_page(html, aggregate_data={}):
getStatSummary(html, aggregate_data)
getStatDistribution(html, aggregate_data)
return aggregate_data
def getStatSummary(html, aggregate_dict={}):
#soup = BeautifulSoup(html, 'html.parser')
start_index = stats_summary_regex.search(html).end()
end_index = stats_score_regex.search(html).start()
subs = html[start_index:end_index]
stat_soup = BeautifulSoup(html[start_index:end_index], 'html.parser')
field_list = [t.get_text().strip() for t in stat_soup.find_all("div")]
for field in field_list[:6]:
t = "users_" + str(field.split(':')[0]).lower().replace(" ", "_").replace("-","")
count = str(field.split(':')[1]).replace(',','')
aggregate_dict[t] = int(count)
return aggregate_dict
def getStatDistribution(html, aggregate_dict={}):
#soup = BeautifulSoup(html, 'html.parser')
start_index = stats_score_regex.search(html).end()
stat_soup = BeautifulSoup(html[start_index:], 'html.parser')
table = stat_soup.find("table")
field_list = [t.get_text().strip() for t in table.find_all("tr")]
field_list = filter(lambda x: "votes" in x, field_list)
voteSum = 0
for text in field_list:
idx = extract_comma_number(text)
key = "score_" + str(idx) + "_votes"
start_idx = text.index("(")+1
end_idx = text.index(" vote")
count = text[start_idx:end_idx]
aggregate_dict[key] = int(count)
voteSum += int(count)
# Add in any missing scores
for i in xrange(1, 11):
if "score_" + str(i) + "_votes" not in aggregate_dict:
aggregate_dict["score_" + str(i) + "_votes"] = 0
aggregate_dict["score_total_votes"] = voteSum
#aggregate_dict["Watching"] = score_users
#aggregate_dict[""] = score_value
return aggregate_dict
# Returns pair (success, data)
# If success if False, then some error occurred and data may be None or corrupted.
def getAllDataFromUrl(url):
success = True
# Make URL
url = get_safe_url(url)
data = {}
data["url"] = url
# Get main page html
try:
html = get_html(url, True)
retries = 0
while html is None:
print "Retrying after 5 seconds..."
time.sleep(5)
html = get_html(url, True)
retries += 1
if retries >= 3:
return (False, data)
html = bs_preprocess(html)
except:
return (False, data)
# Page category and ID (i.e. ("anime", 345))
# Used for primary keys
page_category, page_id = getCategoryAndIDFromUrl(url)
data["category"] = page_category
data["id"] = page_id
# Scrape data from the html of the main page
try:
scrape_main_page(html, data)
except Exception as e:
print "[ERROR] Fetching '", data.get("title", url), "' terminated early. Exception:", e.message
success = False
# Get stat html
stat_url = url + "/stats"
try:
html = get_html(stat_url, True)
retries = 0
while html is None:
print "Retrying fetching stats after 5 seconds..."
time.sleep(5)
html = get_html(stat_url, True)
retries += 1
if retries >= 3:
return (False, data)
html = bs_preprocess(html)
except:
return (False, data)
# Scrape data from the html of the stats page
try:
scrape_stats_page(html, data)
except Exception as e:
print "[ERROR] Fetching '", data.get("title", url), "' stats terminated early. Exception:", e.message
success = False
return (success, data)
# Returns the data in the Information section as a dict.
# The keys are the bolded text, and the values are everything that follows.
# e.g. if d is the return dictionary, and the page had
# "Type:\n Music\n Episodes:\n 1 \n Licensors: \n None found, add some\n"
# you would get:
# d["Type"] = "Music"
# d["Episodes"] = "1"
# d["Licensors"] = "None found, add some"
def extract_info_section(html):
info_match = info_regex.search(html)
stat_match = stats_regex.search(html)
info_html = html[info_match.end():stat_match.start()]
info_soup = BeautifulSoup(info_html, 'html.parser')
info_list = [t.get_text().strip() for t in info_soup.find_all("div")]
info_dict = {}
for info in info_list:
try:
split_data = info.split(":", 1)
key, value = split_data[0].strip(), split_data[1].strip()
info_dict[key] = value
except:
print "[ERROR] Cannot split Information field. Got:", info
return info_dict
def extract_stat_section(html):
start_index = stats_regex.search(html).end()
end_index = stats_end_regex.search(html).start()
subs = html[start_index:end_index]
stat_soup = BeautifulSoup(html[start_index:end_index], 'html.parser')
field_list = [t.get_text().strip() for t in stat_soup.find_all("div")]
field_dict = {}
for field in field_list:
try:
split_data = field.split(":", 1)
key, value = split_data[0].strip(), split_data[1].strip()
field_dict[key] = value
except:
print "[ERROR] Cannot split Statistics field. Got:", field
return field_dict
def parse_info_list(str):
if "none found" in str.lower():
return []
else:
return str.split(",")
def parse_date(s):
try:
return datetime.strptime(s.strip(), "%b %d, %Y").isoformat()
except:
print "[WARNING] Could not parse date normally. Got:", s
try:
d = datetime.strptime(s.strip(), "%b, %Y").isoformat()
print "[WARNING] Using alternative:", d
return d
except:
print "[WARNING] Could not get month, year format."
try:
d = datetime.strptime(s.strip(), "%Y").isoformat()
print "[WARNING] Using alternative:", d
return d
except:
print "[WARNING] Could not parse date at all. Returning None."
return d
def example():
print("Start")
url = "https://myanimelist.net/anime/13391/Rakuen_Tsuihou__Expelled_from_Paradise"
data = getAllDataFromUrl(url)
print data[1]
print("Done")
needed_keys = set([
'aired_end',
'aired_start',
'broadcast',
'category',
'duration',
'episodes',
'favorites',
'genres',
'id',
'licensors',
'members',
'popularity',
'premiered',
'producers',
'rank',
'rating',
'related_ids',
'score',
'score_10_votes',
'score_1_votes',
'score_2_votes',
'score_3_votes',
'score_4_votes',
'score_5_votes',
'score_6_votes',
'score_7_votes',
'score_8_votes',
'score_9_votes',
'score_total_votes',
'score_users',
'source',
'status',
'studios',
'synopsis',
'title',
'type',
'url',
'users_completed',
'users_dropped',
'users_onhold',
'users_plan_to_watch',
'users_total',
'users_watching'
])
def validate(data):
return needed_keys.issubset(set(data.keys()))
#TODO Remove this when convenient
# d = getAllDataFromUrl("https://myanimelist.net/anime/10797/Kayoe_Chuugaku")[1]
# for k in d:
# print "{}: {}".format(k, d[k]) | print "Writing html to", out_file
print "Type is", type(html) | random_line_split |
AnimePageFetcher.py | import requests
import re
import sys
from bs4 import BeautifulSoup
import time
from datetime import datetime
import urllib
# Finds instances of #24332
rank_regex = re.compile(r"#(\d+)")
# Finds instances of 342,543,212
number_regex = re.compile(r"[\d,]+")
# Finds instances of 4 hr.
hours_regex = re.compile(r"(\d+) hr\.")
# Finds instances of 34 min.
min_regex = re.compile(r"(\d+) min\.")
# Finds instances of 12 sec.
sec_regex = re.compile(r"(\d+) sec\.")
# Finds <h2>Information</h2>
info_regex = re.compile(r"<h2>\s*Information\s*</h2>")
# Finds <h2>Statistics</h2>
stats_regex = re.compile(r"<h2>\s*Statistics\s*</h2>")
# Finds <h2>External Links</h2>
stats_end_regex = re.compile(r'<div class="clearfix mauto mt16"')
# Finds <h2>Related Anime</h2>
related_anime_regex = re.compile(r"<h2>\s*Related Anime\s*</h2>")
# Finds <h2>Summary Stats</h2>
stats_summary_regex = re.compile(r"<h2>\s*Summary Stats\s*</h2>")
# Finds <h2>Score Stats</h2>
stats_score_regex = re.compile(r"<h2>\s*Score Stats\s*</h2>")
favorites_regex = re.compile(r"""<div>\s*<span class="dark_text">\s*Favorites:\s*</span>\s*([\d,]+)\s*</div>""")
# Returns an int version of a comma seperated number in a string
# Assumes that the string has a number in it.
# e.g. extract_comma_number()"Members \n 433,312") returns 433312 (int)
def extract_comma_number(str):
return int(number_regex.search(str).group(0).replace(",", ""))
def get_safe_url(url):
url = url.encode("utf8")
slash_index = url.rfind("/")
url_title = url[slash_index+1:]
if "%" not in url_title:
url = url[:slash_index+1] + urllib.quote(url_title)
return url
def get_html(url, verbose=False):
if verbose: sys.stdout.write("Making get request to " + url + " ... ")
r = requests.get(url)
if r.status_code != requests.codes.ok:
if verbose:
print "[ERROR] request.get returned non-OK status. Got:", r.status_code
return None
else:
if verbose:
sys.stdout.write("OK.\n")
sys.stdout.flush()
return r.text.encode('utf8')
def save_html(url, out_file, render_first=False):
html = ""
print "Making get request to", url
r = requests.get(url)
if r.status_code == requests.codes.ok:
print "OK"
html = r.text.encode('utf8')
else:
print "request.get returned non-OK status"
return None
print "Writing html to", out_file
print "Type is", type(html)
with open(out_file, 'w') as f:
soup = BeautifulSoup(html, 'html.parser')
html = soup.prettify()
f.write(html.encode("utf8"))
return html
def load_html_from_file(file):
with open(file, 'r') as f:
html = f.read()
return html
#these are the stats found on the stat page of an anime
#Example: https://myanimelist.net/anime/30/Neon_Genesis_Evangelion/stats
def getGeneralStatistics(soup, aggregate_dict={}):
return aggregate_dict
# Returns (type, id) where type is str in ["anime", "manga"], and id an int
def getCategoryAndIDFromUrl(url):
url = str(url)
result = url.split("myanimelist.net/")
if(len(result) > 1):
result = result[1].split("/")
content_category = result[0]
content_id = int(result[1])
return (content_category, content_id)
#these are generic info like the producers and the source found in the
#sidebar of an anime page
#Example https://myanimelist.net/anime/30/Neon_Genesis_Evangelion
def getGeneralInformation(html, aggregate_dict={}):
soup = BeautifulSoup(html, 'html.parser')
# Title
title = soup.find("h1", class_="h1").get_text()
aggregate_dict["title"] = title
# Score
score_tag = soup.find("div", class_="fl-l score")
score_users = extract_comma_number(score_tag["data-user"])
score_value = 0.0
try:
score_value = float(score_tag.get_text().strip())
except:
score_value = 0.0
aggregate_dict["score_users"] = score_users
aggregate_dict["score"] = score_value
# Rank
rank_text = soup.find("span", class_="numbers ranked").get_text()
rank_match = rank_regex.search(rank_text)
if rank_match is None:
aggregate_dict["rank"] = None
else:
rank_value = int(rank_match.group(1))
aggregate_dict["rank"] = rank_value
# Popularity
popularity_text = soup.find("span", class_="numbers popularity").get_text()
popularity_value = int(rank_regex.search(popularity_text).group(1))
aggregate_dict["popularity"] = popularity_value
# Members
members_text = soup.find("span", class_="numbers members").get_text().strip()
members_value = int(extract_comma_number(members_text))
aggregate_dict["members"] = members_value
# Synoposis
synopsis_soup = soup.find("span", itemprop="description")
if synopsis_soup is None:
aggregate_dict["synopsis"] = None
else:
synopsis_text = " ".join(synopsis_soup.strings)
aggregate_dict["synopsis"] = synopsis_text
#RelatedAnime
related_table = soup.find_all("table", class_="anime_detail_related_anime")
if len(related_table) == 0:
aggregate_dict["related_ids"] = []
else:
related_entries = [t['href'].strip() for t in related_table[0].find_all("a")]
related_entries = filter(lambda x: "/anime/" in x and len(x.split("/")[2]) > 0, related_entries)
related_titles = map(lambda x: int(x.split("/")[2]), related_entries)
aggregate_dict["related_ids"] = related_titles
# Statistics/Favorites (we have everything else)
favorites_match = favorites_regex.search(html)
favorites_text = favorites_match.group(1)
aggregate_dict["favorites"] = int(favorites_text.replace(",", ""))
# Information section
info_dict = extract_info_section(html)
# Info/Type
aggregate_dict["type"] = info_dict.get("Type")
# Info/Episodes
if "Episodes" in info_dict:
episodes_value = 0
# Some anime pages have "Unknown" for number of episodes.
# 0 represents unknown, because no anime can truly have 0 episodes.
try:
episodes_value = int(info_dict["Episodes"])
except:
episodes_value = 0
aggregate_dict["episodes"] = episodes_value
else:
aggregate_dict["episodes"] = None
# Info/Status
aggregate_dict["status"] = info_dict.get("Status")
# Info/Aired
aired_start = None
aired_end = None
if "Aired" in info_dict:
aired_text = info_dict["Aired"]
# Some animes are aired on one date. Others run for some time period.
# Those that run over a duration have "to" in the text.
if "to" in aired_text:
start_end_split = aired_text.split("to")
aired_start = parse_date(start_end_split[0])
# Some currently running animes have ? for their end date.
if "?" in start_end_split[1]:
aired_end = None
else:
aired_end = parse_date(start_end_split[1])
else:
aired_start = parse_date(aired_text)
aired_end = aired_start
aggregate_dict["aired_start"] = aired_start
aggregate_dict["aired_end"] = aired_end
# Info/Premiered
aggregate_dict["premiered"] = info_dict.get("Premiered")
# Info/Broadcast
aggregate_dict["broadcast"] = info_dict.get("Broadcast")
# Info/Producers
if "Producers" in info_dict:
aggregate_dict["producers"] = parse_info_list(info_dict["Producers"])
else:
aggregate_dict["producers"] = None
# Info/Licensors
if "Licensors" in info_dict:
aggregate_dict["licensors"] = parse_info_list(info_dict["Licensors"])
else:
aggregate_dict["licensors"] = None
# Info/Studios
if "Studios" in info_dict:
aggregate_dict["studios"] = parse_info_list(info_dict["Studios"])
else:
aggregate_dict["studios"] = studios_list
# Info/Source
aggregate_dict["source"] = info_dict.get("Source")
# Info/Genres
if "Genres" in info_dict:
aggregate_dict["genres"] = parse_info_list(info_dict["Genres"])
else:
aggregate_dict["genres"] = None
# Info/Duration
if "Duration" in info_dict:
|
else:
aggregate_dict["duration"] = None
# Info/Rating
if "Rating" in info_dict:
rating_text = info_dict["Rating"]
rating_shorthand = rating_text.split(" - ")[0].strip()
aggregate_dict["rating"] = rating_shorthand
else:
aggregate_dict["rating"] = None
return aggregate_dict
#Fetches the production staff associated with a show
#Example: https://myanimelist.net/anime/30/Neon_Genesis_Evangelion/characters#staff
def getStaff(html):
return
#Fetches the characters and voice actors associted with a show
#Example: https://myanimelist.net/anime/30/Neon_Genesis_Evangelion/characters
def getCharactersAndJapaneseCast(html):
return
#Fetches the list of other related anime, same series etc.
def getRelatedTitles(html):
return
def cooldown():
COOLDOWN_IN_SECONDS = 0.5
time.sleep(COOLDOWN_IN_SECONDS)
def bs_preprocess(html):
"""remove distracting whitespaces and newline characters"""
pat = re.compile('(^[\s]+)|([\s]+$)', re.MULTILINE)
html = re.sub(pat, '', html) # remove leading and trailing whitespaces
html = re.sub('\n', ' ', html) # convert newlines to spaces
# this preserves newline delimiters
html = re.sub('[\s]+<', '<', html) # remove whitespaces before opening tags
html = re.sub('>[\s]+', '>', html) # remove whitespaces after closing tags
return html
# Returns dictionary with data
def scrape_main_page(html, aggregate_data={}):
getGeneralInformation(html, aggregate_data)
return aggregate_data
# Returns dictionary with data
def scrape_stats_page(html, aggregate_data={}):
getStatSummary(html, aggregate_data)
getStatDistribution(html, aggregate_data)
return aggregate_data
def getStatSummary(html, aggregate_dict={}):
#soup = BeautifulSoup(html, 'html.parser')
start_index = stats_summary_regex.search(html).end()
end_index = stats_score_regex.search(html).start()
subs = html[start_index:end_index]
stat_soup = BeautifulSoup(html[start_index:end_index], 'html.parser')
field_list = [t.get_text().strip() for t in stat_soup.find_all("div")]
for field in field_list[:6]:
t = "users_" + str(field.split(':')[0]).lower().replace(" ", "_").replace("-","")
count = str(field.split(':')[1]).replace(',','')
aggregate_dict[t] = int(count)
return aggregate_dict
def getStatDistribution(html, aggregate_dict={}):
#soup = BeautifulSoup(html, 'html.parser')
start_index = stats_score_regex.search(html).end()
stat_soup = BeautifulSoup(html[start_index:], 'html.parser')
table = stat_soup.find("table")
field_list = [t.get_text().strip() for t in table.find_all("tr")]
field_list = filter(lambda x: "votes" in x, field_list)
voteSum = 0
for text in field_list:
idx = extract_comma_number(text)
key = "score_" + str(idx) + "_votes"
start_idx = text.index("(")+1
end_idx = text.index(" vote")
count = text[start_idx:end_idx]
aggregate_dict[key] = int(count)
voteSum += int(count)
# Add in any missing scores
for i in xrange(1, 11):
if "score_" + str(i) + "_votes" not in aggregate_dict:
aggregate_dict["score_" + str(i) + "_votes"] = 0
aggregate_dict["score_total_votes"] = voteSum
#aggregate_dict["Watching"] = score_users
#aggregate_dict[""] = score_value
return aggregate_dict
# Returns pair (success, data)
# If success if False, then some error occurred and data may be None or corrupted.
def getAllDataFromUrl(url):
success = True
# Make URL
url = get_safe_url(url)
data = {}
data["url"] = url
# Get main page html
try:
html = get_html(url, True)
retries = 0
while html is None:
print "Retrying after 5 seconds..."
time.sleep(5)
html = get_html(url, True)
retries += 1
if retries >= 3:
return (False, data)
html = bs_preprocess(html)
except:
return (False, data)
# Page category and ID (i.e. ("anime", 345))
# Used for primary keys
page_category, page_id = getCategoryAndIDFromUrl(url)
data["category"] = page_category
data["id"] = page_id
# Scrape data from the html of the main page
try:
scrape_main_page(html, data)
except Exception as e:
print "[ERROR] Fetching '", data.get("title", url), "' terminated early. Exception:", e.message
success = False
# Get stat html
stat_url = url + "/stats"
try:
html = get_html(stat_url, True)
retries = 0
while html is None:
print "Retrying fetching stats after 5 seconds..."
time.sleep(5)
html = get_html(stat_url, True)
retries += 1
if retries >= 3:
return (False, data)
html = bs_preprocess(html)
except:
return (False, data)
# Scrape data from the html of the stats page
try:
scrape_stats_page(html, data)
except Exception as e:
print "[ERROR] Fetching '", data.get("title", url), "' stats terminated early. Exception:", e.message
success = False
return (success, data)
# Returns the data in the Information section as a dict.
# The keys are the bolded text, and the values are everything that follows.
# e.g. if d is the return dictionary, and the page had
# "Type:\n Music\n Episodes:\n 1 \n Licensors: \n None found, add some\n"
# you would get:
# d["Type"] = "Music"
# d["Episodes"] = "1"
# d["Licensors"] = "None found, add some"
def extract_info_section(html):
info_match = info_regex.search(html)
stat_match = stats_regex.search(html)
info_html = html[info_match.end():stat_match.start()]
info_soup = BeautifulSoup(info_html, 'html.parser')
info_list = [t.get_text().strip() for t in info_soup.find_all("div")]
info_dict = {}
for info in info_list:
try:
split_data = info.split(":", 1)
key, value = split_data[0].strip(), split_data[1].strip()
info_dict[key] = value
except:
print "[ERROR] Cannot split Information field. Got:", info
return info_dict
def extract_stat_section(html):
start_index = stats_regex.search(html).end()
end_index = stats_end_regex.search(html).start()
subs = html[start_index:end_index]
stat_soup = BeautifulSoup(html[start_index:end_index], 'html.parser')
field_list = [t.get_text().strip() for t in stat_soup.find_all("div")]
field_dict = {}
for field in field_list:
try:
split_data = field.split(":", 1)
key, value = split_data[0].strip(), split_data[1].strip()
field_dict[key] = value
except:
print "[ERROR] Cannot split Statistics field. Got:", field
return field_dict
def parse_info_list(str):
if "none found" in str.lower():
return []
else:
return str.split(",")
def parse_date(s):
try:
return datetime.strptime(s.strip(), "%b %d, %Y").isoformat()
except:
print "[WARNING] Could not parse date normally. Got:", s
try:
d = datetime.strptime(s.strip(), "%b, %Y").isoformat()
print "[WARNING] Using alternative:", d
return d
except:
print "[WARNING] Could not get month, year format."
try:
d = datetime.strptime(s.strip(), "%Y").isoformat()
print "[WARNING] Using alternative:", d
return d
except:
print "[WARNING] Could not parse date at all. Returning None."
return d
def example():
print("Start")
url = "https://myanimelist.net/anime/13391/Rakuen_Tsuihou__Expelled_from_Paradise"
data = getAllDataFromUrl(url)
print data[1]
print("Done")
needed_keys = set([
'aired_end',
'aired_start',
'broadcast',
'category',
'duration',
'episodes',
'favorites',
'genres',
'id',
'licensors',
'members',
'popularity',
'premiered',
'producers',
'rank',
'rating',
'related_ids',
'score',
'score_10_votes',
'score_1_votes',
'score_2_votes',
'score_3_votes',
'score_4_votes',
'score_5_votes',
'score_6_votes',
'score_7_votes',
'score_8_votes',
'score_9_votes',
'score_total_votes',
'score_users',
'source',
'status',
'studios',
'synopsis',
'title',
'type',
'url',
'users_completed',
'users_dropped',
'users_onhold',
'users_plan_to_watch',
'users_total',
'users_watching'
])
def validate(data):
return needed_keys.issubset(set(data.keys()))
#TODO Remove this when convenient
# d = getAllDataFromUrl("https://myanimelist.net/anime/10797/Kayoe_Chuugaku")[1]
# for k in d:
# print "{}: {}".format(k, d[k])
| duration_text = info_dict["Duration"]
mins = 0.0
# Hours
match = hours_regex.search(duration_text)
if match is not None:
mins += float(match.group(1)) * 60.0
# Minutes
match = min_regex.search(duration_text)
if match is not None:
mins += float(match.group(1))
# Seconds
match = sec_regex.search(duration_text)
if match is not None:
mins += float(match.group(1)) / 60.0
aggregate_dict["duration"] = mins | conditional_block |
AnimePageFetcher.py | import requests
import re
import sys
from bs4 import BeautifulSoup
import time
from datetime import datetime
import urllib
# Finds instances of #24332
rank_regex = re.compile(r"#(\d+)")
# Finds instances of 342,543,212
number_regex = re.compile(r"[\d,]+")
# Finds instances of 4 hr.
hours_regex = re.compile(r"(\d+) hr\.")
# Finds instances of 34 min.
min_regex = re.compile(r"(\d+) min\.")
# Finds instances of 12 sec.
sec_regex = re.compile(r"(\d+) sec\.")
# Finds <h2>Information</h2>
info_regex = re.compile(r"<h2>\s*Information\s*</h2>")
# Finds <h2>Statistics</h2>
stats_regex = re.compile(r"<h2>\s*Statistics\s*</h2>")
# Finds <h2>External Links</h2>
stats_end_regex = re.compile(r'<div class="clearfix mauto mt16"')
# Finds <h2>Related Anime</h2>
related_anime_regex = re.compile(r"<h2>\s*Related Anime\s*</h2>")
# Finds <h2>Summary Stats</h2>
stats_summary_regex = re.compile(r"<h2>\s*Summary Stats\s*</h2>")
# Finds <h2>Score Stats</h2>
stats_score_regex = re.compile(r"<h2>\s*Score Stats\s*</h2>")
favorites_regex = re.compile(r"""<div>\s*<span class="dark_text">\s*Favorites:\s*</span>\s*([\d,]+)\s*</div>""")
# Returns an int version of a comma seperated number in a string
# Assumes that the string has a number in it.
# e.g. extract_comma_number()"Members \n 433,312") returns 433312 (int)
def extract_comma_number(str):
return int(number_regex.search(str).group(0).replace(",", ""))
def get_safe_url(url):
url = url.encode("utf8")
slash_index = url.rfind("/")
url_title = url[slash_index+1:]
if "%" not in url_title:
url = url[:slash_index+1] + urllib.quote(url_title)
return url
def get_html(url, verbose=False):
if verbose: sys.stdout.write("Making get request to " + url + " ... ")
r = requests.get(url)
if r.status_code != requests.codes.ok:
if verbose:
print "[ERROR] request.get returned non-OK status. Got:", r.status_code
return None
else:
if verbose:
sys.stdout.write("OK.\n")
sys.stdout.flush()
return r.text.encode('utf8')
def save_html(url, out_file, render_first=False):
html = ""
print "Making get request to", url
r = requests.get(url)
if r.status_code == requests.codes.ok:
print "OK"
html = r.text.encode('utf8')
else:
print "request.get returned non-OK status"
return None
print "Writing html to", out_file
print "Type is", type(html)
with open(out_file, 'w') as f:
soup = BeautifulSoup(html, 'html.parser')
html = soup.prettify()
f.write(html.encode("utf8"))
return html
def load_html_from_file(file):
with open(file, 'r') as f:
html = f.read()
return html
#these are the stats found on the stat page of an anime
#Example: https://myanimelist.net/anime/30/Neon_Genesis_Evangelion/stats
def getGeneralStatistics(soup, aggregate_dict={}):
return aggregate_dict
# Returns (type, id) where type is str in ["anime", "manga"], and id an int
def getCategoryAndIDFromUrl(url):
url = str(url)
result = url.split("myanimelist.net/")
if(len(result) > 1):
result = result[1].split("/")
content_category = result[0]
content_id = int(result[1])
return (content_category, content_id)
#these are generic info like the producers and the source found in the
#sidebar of an anime page
#Example https://myanimelist.net/anime/30/Neon_Genesis_Evangelion
def getGeneralInformation(html, aggregate_dict={}):
soup = BeautifulSoup(html, 'html.parser')
# Title
title = soup.find("h1", class_="h1").get_text()
aggregate_dict["title"] = title
# Score
score_tag = soup.find("div", class_="fl-l score")
score_users = extract_comma_number(score_tag["data-user"])
score_value = 0.0
try:
score_value = float(score_tag.get_text().strip())
except:
score_value = 0.0
aggregate_dict["score_users"] = score_users
aggregate_dict["score"] = score_value
# Rank
rank_text = soup.find("span", class_="numbers ranked").get_text()
rank_match = rank_regex.search(rank_text)
if rank_match is None:
aggregate_dict["rank"] = None
else:
rank_value = int(rank_match.group(1))
aggregate_dict["rank"] = rank_value
# Popularity
popularity_text = soup.find("span", class_="numbers popularity").get_text()
popularity_value = int(rank_regex.search(popularity_text).group(1))
aggregate_dict["popularity"] = popularity_value
# Members
members_text = soup.find("span", class_="numbers members").get_text().strip()
members_value = int(extract_comma_number(members_text))
aggregate_dict["members"] = members_value
# Synoposis
synopsis_soup = soup.find("span", itemprop="description")
if synopsis_soup is None:
aggregate_dict["synopsis"] = None
else:
synopsis_text = " ".join(synopsis_soup.strings)
aggregate_dict["synopsis"] = synopsis_text
#RelatedAnime
related_table = soup.find_all("table", class_="anime_detail_related_anime")
if len(related_table) == 0:
aggregate_dict["related_ids"] = []
else:
related_entries = [t['href'].strip() for t in related_table[0].find_all("a")]
related_entries = filter(lambda x: "/anime/" in x and len(x.split("/")[2]) > 0, related_entries)
related_titles = map(lambda x: int(x.split("/")[2]), related_entries)
aggregate_dict["related_ids"] = related_titles
# Statistics/Favorites (we have everything else)
favorites_match = favorites_regex.search(html)
favorites_text = favorites_match.group(1)
aggregate_dict["favorites"] = int(favorites_text.replace(",", ""))
# Information section
info_dict = extract_info_section(html)
# Info/Type
aggregate_dict["type"] = info_dict.get("Type")
# Info/Episodes
if "Episodes" in info_dict:
episodes_value = 0
# Some anime pages have "Unknown" for number of episodes.
# 0 represents unknown, because no anime can truly have 0 episodes.
try:
episodes_value = int(info_dict["Episodes"])
except:
episodes_value = 0
aggregate_dict["episodes"] = episodes_value
else:
aggregate_dict["episodes"] = None
# Info/Status
aggregate_dict["status"] = info_dict.get("Status")
# Info/Aired
aired_start = None
aired_end = None
if "Aired" in info_dict:
aired_text = info_dict["Aired"]
# Some animes are aired on one date. Others run for some time period.
# Those that run over a duration have "to" in the text.
if "to" in aired_text:
start_end_split = aired_text.split("to")
aired_start = parse_date(start_end_split[0])
# Some currently running animes have ? for their end date.
if "?" in start_end_split[1]:
aired_end = None
else:
aired_end = parse_date(start_end_split[1])
else:
aired_start = parse_date(aired_text)
aired_end = aired_start
aggregate_dict["aired_start"] = aired_start
aggregate_dict["aired_end"] = aired_end
# Info/Premiered
aggregate_dict["premiered"] = info_dict.get("Premiered")
# Info/Broadcast
aggregate_dict["broadcast"] = info_dict.get("Broadcast")
# Info/Producers
if "Producers" in info_dict:
aggregate_dict["producers"] = parse_info_list(info_dict["Producers"])
else:
aggregate_dict["producers"] = None
# Info/Licensors
if "Licensors" in info_dict:
aggregate_dict["licensors"] = parse_info_list(info_dict["Licensors"])
else:
aggregate_dict["licensors"] = None
# Info/Studios
if "Studios" in info_dict:
aggregate_dict["studios"] = parse_info_list(info_dict["Studios"])
else:
aggregate_dict["studios"] = studios_list
# Info/Source
aggregate_dict["source"] = info_dict.get("Source")
# Info/Genres
if "Genres" in info_dict:
aggregate_dict["genres"] = parse_info_list(info_dict["Genres"])
else:
aggregate_dict["genres"] = None
# Info/Duration
if "Duration" in info_dict:
duration_text = info_dict["Duration"]
mins = 0.0
# Hours
match = hours_regex.search(duration_text)
if match is not None:
mins += float(match.group(1)) * 60.0
# Minutes
match = min_regex.search(duration_text)
if match is not None:
mins += float(match.group(1))
# Seconds
match = sec_regex.search(duration_text)
if match is not None:
mins += float(match.group(1)) / 60.0
aggregate_dict["duration"] = mins
else:
aggregate_dict["duration"] = None
# Info/Rating
if "Rating" in info_dict:
rating_text = info_dict["Rating"]
rating_shorthand = rating_text.split(" - ")[0].strip()
aggregate_dict["rating"] = rating_shorthand
else:
aggregate_dict["rating"] = None
return aggregate_dict
#Fetches the production staff associated with a show
#Example: https://myanimelist.net/anime/30/Neon_Genesis_Evangelion/characters#staff
def getStaff(html):
|
#Fetches the characters and voice actors associted with a show
#Example: https://myanimelist.net/anime/30/Neon_Genesis_Evangelion/characters
def getCharactersAndJapaneseCast(html):
return
#Fetches the list of other related anime, same series etc.
def getRelatedTitles(html):
return
def cooldown():
COOLDOWN_IN_SECONDS = 0.5
time.sleep(COOLDOWN_IN_SECONDS)
def bs_preprocess(html):
"""remove distracting whitespaces and newline characters"""
pat = re.compile('(^[\s]+)|([\s]+$)', re.MULTILINE)
html = re.sub(pat, '', html) # remove leading and trailing whitespaces
html = re.sub('\n', ' ', html) # convert newlines to spaces
# this preserves newline delimiters
html = re.sub('[\s]+<', '<', html) # remove whitespaces before opening tags
html = re.sub('>[\s]+', '>', html) # remove whitespaces after closing tags
return html
# Returns dictionary with data
def scrape_main_page(html, aggregate_data={}):
getGeneralInformation(html, aggregate_data)
return aggregate_data
# Returns dictionary with data
def scrape_stats_page(html, aggregate_data={}):
getStatSummary(html, aggregate_data)
getStatDistribution(html, aggregate_data)
return aggregate_data
def getStatSummary(html, aggregate_dict={}):
#soup = BeautifulSoup(html, 'html.parser')
start_index = stats_summary_regex.search(html).end()
end_index = stats_score_regex.search(html).start()
subs = html[start_index:end_index]
stat_soup = BeautifulSoup(html[start_index:end_index], 'html.parser')
field_list = [t.get_text().strip() for t in stat_soup.find_all("div")]
for field in field_list[:6]:
t = "users_" + str(field.split(':')[0]).lower().replace(" ", "_").replace("-","")
count = str(field.split(':')[1]).replace(',','')
aggregate_dict[t] = int(count)
return aggregate_dict
def getStatDistribution(html, aggregate_dict={}):
#soup = BeautifulSoup(html, 'html.parser')
start_index = stats_score_regex.search(html).end()
stat_soup = BeautifulSoup(html[start_index:], 'html.parser')
table = stat_soup.find("table")
field_list = [t.get_text().strip() for t in table.find_all("tr")]
field_list = filter(lambda x: "votes" in x, field_list)
voteSum = 0
for text in field_list:
idx = extract_comma_number(text)
key = "score_" + str(idx) + "_votes"
start_idx = text.index("(")+1
end_idx = text.index(" vote")
count = text[start_idx:end_idx]
aggregate_dict[key] = int(count)
voteSum += int(count)
# Add in any missing scores
for i in xrange(1, 11):
if "score_" + str(i) + "_votes" not in aggregate_dict:
aggregate_dict["score_" + str(i) + "_votes"] = 0
aggregate_dict["score_total_votes"] = voteSum
#aggregate_dict["Watching"] = score_users
#aggregate_dict[""] = score_value
return aggregate_dict
# Returns pair (success, data)
# If success if False, then some error occurred and data may be None or corrupted.
def getAllDataFromUrl(url):
success = True
# Make URL
url = get_safe_url(url)
data = {}
data["url"] = url
# Get main page html
try:
html = get_html(url, True)
retries = 0
while html is None:
print "Retrying after 5 seconds..."
time.sleep(5)
html = get_html(url, True)
retries += 1
if retries >= 3:
return (False, data)
html = bs_preprocess(html)
except:
return (False, data)
# Page category and ID (i.e. ("anime", 345))
# Used for primary keys
page_category, page_id = getCategoryAndIDFromUrl(url)
data["category"] = page_category
data["id"] = page_id
# Scrape data from the html of the main page
try:
scrape_main_page(html, data)
except Exception as e:
print "[ERROR] Fetching '", data.get("title", url), "' terminated early. Exception:", e.message
success = False
# Get stat html
stat_url = url + "/stats"
try:
html = get_html(stat_url, True)
retries = 0
while html is None:
print "Retrying fetching stats after 5 seconds..."
time.sleep(5)
html = get_html(stat_url, True)
retries += 1
if retries >= 3:
return (False, data)
html = bs_preprocess(html)
except:
return (False, data)
# Scrape data from the html of the stats page
try:
scrape_stats_page(html, data)
except Exception as e:
print "[ERROR] Fetching '", data.get("title", url), "' stats terminated early. Exception:", e.message
success = False
return (success, data)
# Returns the data in the Information section as a dict.
# The keys are the bolded text, and the values are everything that follows.
# e.g. if d is the return dictionary, and the page had
# "Type:\n Music\n Episodes:\n 1 \n Licensors: \n None found, add some\n"
# you would get:
# d["Type"] = "Music"
# d["Episodes"] = "1"
# d["Licensors"] = "None found, add some"
def extract_info_section(html):
info_match = info_regex.search(html)
stat_match = stats_regex.search(html)
info_html = html[info_match.end():stat_match.start()]
info_soup = BeautifulSoup(info_html, 'html.parser')
info_list = [t.get_text().strip() for t in info_soup.find_all("div")]
info_dict = {}
for info in info_list:
try:
split_data = info.split(":", 1)
key, value = split_data[0].strip(), split_data[1].strip()
info_dict[key] = value
except:
print "[ERROR] Cannot split Information field. Got:", info
return info_dict
def extract_stat_section(html):
start_index = stats_regex.search(html).end()
end_index = stats_end_regex.search(html).start()
subs = html[start_index:end_index]
stat_soup = BeautifulSoup(html[start_index:end_index], 'html.parser')
field_list = [t.get_text().strip() for t in stat_soup.find_all("div")]
field_dict = {}
for field in field_list:
try:
split_data = field.split(":", 1)
key, value = split_data[0].strip(), split_data[1].strip()
field_dict[key] = value
except:
print "[ERROR] Cannot split Statistics field. Got:", field
return field_dict
def parse_info_list(str):
if "none found" in str.lower():
return []
else:
return str.split(",")
def parse_date(s):
try:
return datetime.strptime(s.strip(), "%b %d, %Y").isoformat()
except:
print "[WARNING] Could not parse date normally. Got:", s
try:
d = datetime.strptime(s.strip(), "%b, %Y").isoformat()
print "[WARNING] Using alternative:", d
return d
except:
print "[WARNING] Could not get month, year format."
try:
d = datetime.strptime(s.strip(), "%Y").isoformat()
print "[WARNING] Using alternative:", d
return d
except:
print "[WARNING] Could not parse date at all. Returning None."
return d
def example():
print("Start")
url = "https://myanimelist.net/anime/13391/Rakuen_Tsuihou__Expelled_from_Paradise"
data = getAllDataFromUrl(url)
print data[1]
print("Done")
needed_keys = set([
'aired_end',
'aired_start',
'broadcast',
'category',
'duration',
'episodes',
'favorites',
'genres',
'id',
'licensors',
'members',
'popularity',
'premiered',
'producers',
'rank',
'rating',
'related_ids',
'score',
'score_10_votes',
'score_1_votes',
'score_2_votes',
'score_3_votes',
'score_4_votes',
'score_5_votes',
'score_6_votes',
'score_7_votes',
'score_8_votes',
'score_9_votes',
'score_total_votes',
'score_users',
'source',
'status',
'studios',
'synopsis',
'title',
'type',
'url',
'users_completed',
'users_dropped',
'users_onhold',
'users_plan_to_watch',
'users_total',
'users_watching'
])
def validate(data):
return needed_keys.issubset(set(data.keys()))
#TODO Remove this when convenient
# d = getAllDataFromUrl("https://myanimelist.net/anime/10797/Kayoe_Chuugaku")[1]
# for k in d:
# print "{}: {}".format(k, d[k])
| return | identifier_body |
spider.js | //casperjs --disk-cache=yes spider.js --web-security=no
var casper = require('casper').create({
pageSettings: {
//navigationRequested: true,
//请求资源等待时间
resourceTimeout: 1000,
loadImages: false,
//loadPlugins: false,
userAgent: "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36"
},
verbose: true,
logLevel: 'info'
});
var utils = require('utils');
var host = "http://172.17.150.115:8080/today";
var cityIds = [
101010100,
101010200,
101010300,
101010400,
101010500,
101010600,
101010700,
101010800,
101010900,
101011000,
101011100,
101011200,
101011300,
101011400,
101011500,
101020100,
101020200,
101020300,
101020500,
101020600,
101020700,
101020800,
101020900,
101021000,
101021100,
101021200,
101021300,
101030100,
101030200,
101030300,
101030400,
101030500,
101030600,
101030700,
101030800,
101030900,
101031000,
101031100,
101031200,
101031400,
101040100,
101040200,
101040300,
101040400,
101040500,
101040600,
101040700,
101040800,
101040900,
101041000,
101041100,
101041300,
101041400,
101041500,
101041600,
101041700,
101041800,
101041900,
101042000,
101042100,
101042200,
101042300,
101042400,
101042500,
101042600,
101042700,
101042800,
101042900,
101043000,
101043100,
101043200,
101043300,
101043400,
101043600,
101050101,
101050102,
101050103,
101050104,
101050105,
101050106,
101050107,
101050108,
101050109,
101050110,
101050111,
101050112,
101050113,
101050201,
101050202,
101050203
];
// var dump = require("utils").dump;
// script argument
// casper.log("Casper CLI passed args:",'info');
// dump(casper.cli.args);
// filter the png & jpg, to speed up
casper.on('resource.requested', function (request) {
//if (/\.(png|jpg)$/i.test(request.url)) {
// 过滤广告链接
if (/(google|tanx|toruk|tq121|tongji|googlesyndication|taobao|taobaocdn|googlesyndication|doubleclick|baidu)/i.test(request.url)) {
//this.log("Abort resource.requested:" + request.url,'info');
request.abort();
} else {
//this.log("Resource.requested:" + request.url, 'info');
}
});
//casper.on('navigation.requested', function (url, navigationType, navigationLocked, isMainFrame) {
// //this.log("navigation.requested:" + url + " " + navigationType + " " + navigationLocked + " " + isMainFrame, 'info');
//});
//
//casper.on('page.resource.requested', function (requestData, request) {
// //this.log("page.resource.requested:" + requestData.url, 'info');
//
//});
//
//casper.on('load.finished', function (status) {
// //this.log("load.finished:" + status, 'info');
//
//});
casper.getHTML = function getHTML(selector, outer) {
"use strict";
this.checkStarted();
if (!selector) {
return this.page.frameContent;
}
return this.evaluate(function getSelectorHTML(selector, outer) {
var element = __utils__.findOne(selector);
return outer ? element.outerHTML : element.innerHTML;
}, selector, !!outer);
};
function send(host, result) {
try {
return __utils__.sendAJAX(host, 'POST', result, false,
{
contentType: 'application/x-www-form-urlencoded; charset=UTF-8'
});
} catch (e) {
__utils__.log("Server error:" + e, 'error');
}
}
function grab(cityId) {
var start = new Date().getTime();
var result = {};
this.waitForSelector("div.fl h1", (function () {
// ------------- Today weather --------------
result["cityId"] = cityId;
var city = this.getHTML('div.cityName.clearfix div.fl h2');
if (city === null) {
result["city"] = this.getHTML('div.cityName.clearfix div.fl h3');
} else {
result['city'] = city;
}
result["district"] = this.getHTML('div.cityName.clearfix div.fl h1');
result["temp"] = this.getHTML('p.tem span');
//风向
var wd = this.getElementAttr('span.sp2', 'title');
if (wd != null) {
result["wd"] = wd;
}
//风级
var ws = this.getHTML('span.sp2');
if (ws != null) {
result["ws"] = ws.replace(/[^0-9]/ig, "");
}
//湿度
var sd = this.getHTML('span.sp1');
if (sd != null) {
result["sd"] = sd.replace(/[^0-9]/ig, "");
}
var time = this.getHTML('#today span');
if (time != null) {
result["time"] = time.replace(/[^0-9:]/ig, "");
}
var now = new Date();
result["sysdate"] = now.getHours() + ":" + now.getMinutes();
// ------------- Today sunrise & sunset ------
var sunrise = this.getHTML("p.sunUp");
if (sunrise != null) {
result["sunrise"] = sunrise.replace(/[^0-9:]/ig, "");
}
var sunset = this.getHTML("p.sunDown");
if (sunset != null) {
result["sunset"] = sunset.replace(/[^0-9:]/ig, "");
}
// ------------- Today suggestion ------------
//穿衣
result["cyint"] = this.getHTML("section.mask section.ct b");
result["cydes"] = this.getHTML("section.mask section.ct aside").split("</b>")[1];
//感冒
result["gmint"] = this.getHTML("section.mask section.gm b");
result["gmdes"] = this.getHTML("section.mask section.gm aside").split("</b>")[1];
//紫外线
result["uvint"] = this.getHTML("section.mask section.uv b");
result["uvdes"] = this.getHTML("section.mask section.uv aside").split("</b>")[1];
//洗车
result["xcint"] = this.getHTML("section.mask section.xc b");
result["xcdes"] = this.getHTML("section.mask section.xc aside").split("</b>")[1];
//太阳镜
result["tyjint"] = this.getHTML("section.mask section.gl b");
result["tyjdes"] = this.getHTML("section.mask section.gl aside").split("</b>")[1];
//旅游
result["trint"] = this.getHTML("section.mask section.tr b");
result["trdes"] = this.getHTML("section.mask section.tr aside").split("</b>")[1];
//美容
result["mint"] = this.getHTML("section.mask section.gm b");
result["nydes"] = this.getHTML("section.mask section.gm aside").split("</b>")[1];
//晨练
result["clint"] = this.getHTML("section.mask section.cl b");
result["cldes"] = this.getHTML("section.mask section.cl aside").split("</b>")[1];
//过敏
result["agint"] = this.getHTML("section.mask section.ag b");
result["agdes"] = this.getHTML("section.mask section.ag aside").split("</b>")[1];
//雨伞
result["ysint"] = this.getHTML("section.mask section.ys b");
result["ysdes"] = this.getHTML("section.mask section.ys aside").split("</b>")[1];
//运动
result["ydint"] = this.getHTML("section.mask section.yd b");
result["iddes"] = this.getHTML("section.mask section.yd aside").split("</b>")[1];
//化妆
result["hzint"] = this.getHTML("section.mask section.pp b");
result["hzdes"] = this.getHTML("section.mask section.pp aside").split("</b>")[1];
//舒适度
result["ssdint"] = this.getHTML("section.mask section.co b");
result["ssdes"] = this.getHTML("section.mask section.co aside").split("</b>")[1];
this.log('Grab data in ' + (now.getTime() - start) + 'ms', 'info');
// ---------------- 发送给收集端 ---------------
//var response = this.evaluate(send, {host: host, result: result});
//this.log("Server response:" + response, 'debug');
//----------------- 调试输出 -----------
utils.dump(result);
}), (function () {
this.die(this.getCurrentUrl() + " timeout reached.");
}), 12000);
}
//手机版
//function grabFuture(cityId) {
// var result = {};
// result["cityId"] = cityId;
// this.evaluate(function (host, result) {
// var futures = __utils__.querySelectorAll("div.days7 li");
// for (var i = 0, j = futures.length; i < j; i++) {
// //天气图片标题,多云或者晴
// var img = futures[i].querySelectorAll("i img"); | // if (before === after) {
// result["weather" + (i + 1)] = before;
// } else {
// result["weather" + (i + 1)] = before + "转" + after;
// }
// //温度,5°/10°
// result["temp" + (i + 1)] = futures[i].querySelector("span").innerHTML;
// //天气图标号码
// var link1 = img[0].getAttribute("src");
// var link2 = img[1].getAttribute("src");
// result["img" + (i + 1)] = link1.charAt(link1.length - 5);
// result["img" + (i + 2)] = link2.charAt(link1.length - 5);
//
// }
//
// }, {host: host, result: result})
//}
//网页版
function grabFuture(cityId) {
var start = new Date().getTime();
var result = {};
result["cityId"] = cityId;
var city = this.getHTML('div.cityName.clearfix div.fl h2');
if (city === null) {
result["city"] = this.getHTML('div.cityName.clearfix div.fl h3');
} else {
result['city'] = city;
}
result["date"] = this.getHTML('#tabDays > p').replace(/[^0-9:-\s]/ig, "");
result["district"] = this.getHTML('div.cityName.clearfix div.fl h1');
var now = new Date();
result["sysdate"] = now.getHours() + ":" + now.getMinutes();
this.evaluate(function (host, result) {
var futures = __utils__.querySelectorAll("ul.t.clearfix li.dn");
for (var i = 0, j = futures.length; i < j; i++) {
//天气图片标题,多云或者晴
var img_title = futures[i].querySelector("p.wea").innerHTML;
if (img_title.indexOf("转") != -1) {
var titles = img_title.split("转");
result["img_title" + (i + 1)] = titles[0];
result["img_title" + (i + 2)] = titles[1];
} else {
result["img_title" + (i + 1)] = img_title;
result["img_title" + (i + 2)] = img_title;
}
//天气转变,多云转晴
result["weather" + (i + 1)] = img_title;
//温度,5°/10°
var temp = futures[i].querySelectorAll("p.tem span");
result["temp" + (i + 1)] = temp[0].innerHTML + '/' + temp[1].innerHTML;
//天气图标号码
var img = futures[i].querySelectorAll("big");
var num1 = img[0].getAttribute("class").split(" ")[1].charAt(2);
var num2 = img[1].getAttribute("class").split(" ")[1].charAt(2);
result["img" + (i + 1)] = num1;
result["img" + (i + 2)] = num2;
//风向
var fx = futures[i].querySelectorAll("p.win span");
result["fx" + (i + 1)] = fx[0].getAttribute("title");
result["fx" + (i + 2)] = fx[1].getAttribute("title");
result["fl" + (i + 1)] = futures[i].querySelector("p.win i").innerHTML;
result["fl" + (i + 2)] = futures[i].querySelector("p.win i").innerHTML;
}
utils.dump(result);
this.log('Grab data in ' + (now.getTime() - start) + 'ms', 'info');
}, {host: host, result: result});
}
casper.start();
//执行当天天气抓取
casper.each(cityIds, function (self, cityId) {
var link = "http://www.weather.com.cn/weather1d/" + cityId + ".shtml";
self.log("-----------today weather link:" + link, 'info');
self.thenOpen(link, function () {
grab.call(this, cityId)
});
});
//执行7天预测天气抓取
//casper.each(cityIds, function (self, cityId) {
// var link = "http://www.weather.com.cn/weather/" + cityId + ".shtml";
// self.log("-----------future weather link:" + link, 'info');
// self.thenOpen(link, function () {
// grabFuture.call(this, cityId)
// });
//});
//运行
casper.run(function () {
//运行完毕推出
casper.exit();
}); | // var before = img[0].getAttribute("alt");
// var after = img[1].getAttribute("alt");
// result["img_title" + (i + 1)] = before;
// result["img_title" + (i + 2)] = after;
// //天气转变,多云转晴 | random_line_split |
spider.js | //casperjs --disk-cache=yes spider.js --web-security=no
var casper = require('casper').create({
pageSettings: {
//navigationRequested: true,
//请求资源等待时间
resourceTimeout: 1000,
loadImages: false,
//loadPlugins: false,
userAgent: "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36"
},
verbose: true,
logLevel: 'info'
});
var utils = require('utils');
var host = "http://172.17.150.115:8080/today";
var cityIds = [
101010100,
101010200,
101010300,
101010400,
101010500,
101010600,
101010700,
101010800,
101010900,
101011000,
101011100,
101011200,
101011300,
101011400,
101011500,
101020100,
101020200,
101020300,
101020500,
101020600,
101020700,
101020800,
101020900,
101021000,
101021100,
101021200,
101021300,
101030100,
101030200,
101030300,
101030400,
101030500,
101030600,
101030700,
101030800,
101030900,
101031000,
101031100,
101031200,
101031400,
101040100,
101040200,
101040300,
101040400,
101040500,
101040600,
101040700,
101040800,
101040900,
101041000,
101041100,
101041300,
101041400,
101041500,
101041600,
101041700,
101041800,
101041900,
101042000,
101042100,
101042200,
101042300,
101042400,
101042500,
101042600,
101042700,
101042800,
101042900,
101043000,
101043100,
101043200,
101043300,
101043400,
101043600,
101050101,
101050102,
101050103,
101050104,
101050105,
101050106,
101050107,
101050108,
101050109,
101050110,
101050111,
101050112,
101050113,
101050201,
101050202,
101050203
];
// var dump = require("utils").dump;
// script argument
// casper.log("Casper CLI passed args:",'info');
// dump(casper.cli.args);
// filter the png & jpg, to speed up
casper.on('resource.requested', function (request) {
//if (/\.(png|jpg)$/i.test(request.url)) {
// 过滤广告链接
if (/(google|tanx|toruk|tq121|tongji|googlesyndication|taobao|taobaocdn|googlesyndication|doubleclick|baidu)/i.test(request.url)) {
//this.log("Abort resource.requested:" + request.url,'info');
request.abort();
} else {
//this.log("Resource.requested:" + request.url, 'info');
}
});
//casper.on('navigation.requested', function (url, navigationType, navigationLocked, isMainFrame) {
// //this.log("navigation.requested:" + url + " " + navigationType + " " + navigationLocked + " " + isMainFrame, 'info');
//});
//
//casper.on('page.resource.requested', function (requestData, request) {
// //this.log("page.resource.requested:" + requestData.url, 'info');
//
//});
//
//casper.on('load.finished', function (status) {
// //this.log("load.finished:" + status, 'info');
//
//});
casper.getHTML = function getHTML(selector, outer) {
"use strict";
this.checkStarted();
if (!selector) {
return this.page.frameContent;
}
return this.evaluate(function getSelectorHTML(selector, outer) {
var element = __utils__.findOne(selector);
return outer ? element.outerHTML : element.innerHTML;
}, selector, !!outer);
};
function send(host, result) {
try {
return _ | var start = new Date().getTime();
var result = {};
this.waitForSelector("div.fl h1", (function () {
// ------------- Today weather --------------
result["cityId"] = cityId;
var city = this.getHTML('div.cityName.clearfix div.fl h2');
if (city === null) {
result["city"] = this.getHTML('div.cityName.clearfix div.fl h3');
} else {
result['city'] = city;
}
result["district"] = this.getHTML('div.cityName.clearfix div.fl h1');
result["temp"] = this.getHTML('p.tem span');
//风向
var wd = this.getElementAttr('span.sp2', 'title');
if (wd != null) {
result["wd"] = wd;
}
//风级
var ws = this.getHTML('span.sp2');
if (ws != null) {
result["ws"] = ws.replace(/[^0-9]/ig, "");
}
//湿度
var sd = this.getHTML('span.sp1');
if (sd != null) {
result["sd"] = sd.replace(/[^0-9]/ig, "");
}
var time = this.getHTML('#today span');
if (time != null) {
result["time"] = time.replace(/[^0-9:]/ig, "");
}
var now = new Date();
result["sysdate"] = now.getHours() + ":" + now.getMinutes();
// ------------- Today sunrise & sunset ------
var sunrise = this.getHTML("p.sunUp");
if (sunrise != null) {
result["sunrise"] = sunrise.replace(/[^0-9:]/ig, "");
}
var sunset = this.getHTML("p.sunDown");
if (sunset != null) {
result["sunset"] = sunset.replace(/[^0-9:]/ig, "");
}
// ------------- Today suggestion ------------
//穿衣
result["cyint"] = this.getHTML("section.mask section.ct b");
result["cydes"] = this.getHTML("section.mask section.ct aside").split("</b>")[1];
//感冒
result["gmint"] = this.getHTML("section.mask section.gm b");
result["gmdes"] = this.getHTML("section.mask section.gm aside").split("</b>")[1];
//紫外线
result["uvint"] = this.getHTML("section.mask section.uv b");
result["uvdes"] = this.getHTML("section.mask section.uv aside").split("</b>")[1];
//洗车
result["xcint"] = this.getHTML("section.mask section.xc b");
result["xcdes"] = this.getHTML("section.mask section.xc aside").split("</b>")[1];
//太阳镜
result["tyjint"] = this.getHTML("section.mask section.gl b");
result["tyjdes"] = this.getHTML("section.mask section.gl aside").split("</b>")[1];
//旅游
result["trint"] = this.getHTML("section.mask section.tr b");
result["trdes"] = this.getHTML("section.mask section.tr aside").split("</b>")[1];
//美容
result["mint"] = this.getHTML("section.mask section.gm b");
result["nydes"] = this.getHTML("section.mask section.gm aside").split("</b>")[1];
//晨练
result["clint"] = this.getHTML("section.mask section.cl b");
result["cldes"] = this.getHTML("section.mask section.cl aside").split("</b>")[1];
//过敏
result["agint"] = this.getHTML("section.mask section.ag b");
result["agdes"] = this.getHTML("section.mask section.ag aside").split("</b>")[1];
//雨伞
result["ysint"] = this.getHTML("section.mask section.ys b");
result["ysdes"] = this.getHTML("section.mask section.ys aside").split("</b>")[1];
//运动
result["ydint"] = this.getHTML("section.mask section.yd b");
result["iddes"] = this.getHTML("section.mask section.yd aside").split("</b>")[1];
//化妆
result["hzint"] = this.getHTML("section.mask section.pp b");
result["hzdes"] = this.getHTML("section.mask section.pp aside").split("</b>")[1];
//舒适度
result["ssdint"] = this.getHTML("section.mask section.co b");
result["ssdes"] = this.getHTML("section.mask section.co aside").split("</b>")[1];
this.log('Grab data in ' + (now.getTime() - start) + 'ms', 'info');
// ---------------- 发送给收集端 ---------------
//var response = this.evaluate(send, {host: host, result: result});
//this.log("Server response:" + response, 'debug');
//----------------- 调试输出 -----------
utils.dump(result);
}), (function () {
this.die(this.getCurrentUrl() + " timeout reached.");
}), 12000);
}
//手机版
//function grabFuture(cityId) {
// var result = {};
// result["cityId"] = cityId;
// this.evaluate(function (host, result) {
// var futures = __utils__.querySelectorAll("div.days7 li");
// for (var i = 0, j = futures.length; i < j; i++) {
// //天气图片标题,多云或者晴
// var img = futures[i].querySelectorAll("i img");
// var before = img[0].getAttribute("alt");
// var after = img[1].getAttribute("alt");
// result["img_title" + (i + 1)] = before;
// result["img_title" + (i + 2)] = after;
// //天气转变,多云转晴
// if (before === after) {
// result["weather" + (i + 1)] = before;
// } else {
// result["weather" + (i + 1)] = before + "转" + after;
// }
// //温度,5°/10°
// result["temp" + (i + 1)] = futures[i].querySelector("span").innerHTML;
// //天气图标号码
// var link1 = img[0].getAttribute("src");
// var link2 = img[1].getAttribute("src");
// result["img" + (i + 1)] = link1.charAt(link1.length - 5);
// result["img" + (i + 2)] = link2.charAt(link1.length - 5);
//
// }
//
// }, {host: host, result: result})
//}
//网页版
function grabFuture(cityId) {
var start = new Date().getTime();
var result = {};
result["cityId"] = cityId;
var city = this.getHTML('div.cityName.clearfix div.fl h2');
if (city === null) {
result["city"] = this.getHTML('div.cityName.clearfix div.fl h3');
} else {
result['city'] = city;
}
result["date"] = this.getHTML('#tabDays > p').replace(/[^0-9:-\s]/ig, "");
result["district"] = this.getHTML('div.cityName.clearfix div.fl h1');
var now = new Date();
result["sysdate"] = now.getHours() + ":" + now.getMinutes();
this.evaluate(function (host, result) {
var futures = __utils__.querySelectorAll("ul.t.clearfix li.dn");
for (var i = 0, j = futures.length; i < j; i++) {
//天气图片标题,多云或者晴
var img_title = futures[i].querySelector("p.wea").innerHTML;
if (img_title.indexOf("转") != -1) {
var titles = img_title.split("转");
result["img_title" + (i + 1)] = titles[0];
result["img_title" + (i + 2)] = titles[1];
} else {
result["img_title" + (i + 1)] = img_title;
result["img_title" + (i + 2)] = img_title;
}
//天气转变,多云转晴
result["weather" + (i + 1)] = img_title;
//温度,5°/10°
var temp = futures[i].querySelectorAll("p.tem span");
result["temp" + (i + 1)] = temp[0].innerHTML + '/' + temp[1].innerHTML;
//天气图标号码
var img = futures[i].querySelectorAll("big");
var num1 = img[0].getAttribute("class").split(" ")[1].charAt(2);
var num2 = img[1].getAttribute("class").split(" ")[1].charAt(2);
result["img" + (i + 1)] = num1;
result["img" + (i + 2)] = num2;
//风向
var fx = futures[i].querySelectorAll("p.win span");
result["fx" + (i + 1)] = fx[0].getAttribute("title");
result["fx" + (i + 2)] = fx[1].getAttribute("title");
result["fl" + (i + 1)] = futures[i].querySelector("p.win i").innerHTML;
result["fl" + (i + 2)] = futures[i].querySelector("p.win i").innerHTML;
}
utils.dump(result);
this.log('Grab data in ' + (now.getTime() - start) + 'ms', 'info');
}, {host: host, result: result});
}
casper.start();
//执行当天天气抓取
casper.each(cityIds, function (self, cityId) {
var link = "http://www.weather.com.cn/weather1d/" + cityId + ".shtml";
self.log("-----------today weather link:" + link, 'info');
self.thenOpen(link, function () {
grab.call(this, cityId)
});
});
//执行7天预测天气抓取
//casper.each(cityIds, function (self, cityId) {
// var link = "http://www.weather.com.cn/weather/" + cityId + ".shtml";
// self.log("-----------future weather link:" + link, 'info');
// self.thenOpen(link, function () {
// grabFuture.call(this, cityId)
// });
//});
//运行
casper.run(function () {
//运行完毕推出
casper.exit();
});
| _utils__.sendAJAX(host, 'POST', result, false,
{
contentType: 'application/x-www-form-urlencoded; charset=UTF-8'
});
} catch (e) {
__utils__.log("Server error:" + e, 'error');
}
}
function grab(cityId) {
| identifier_body |
spider.js | //casperjs --disk-cache=yes spider.js --web-security=no
var casper = require('casper').create({
pageSettings: {
//navigationRequested: true,
//请求资源等待时间
resourceTimeout: 1000,
loadImages: false,
//loadPlugins: false,
userAgent: "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36"
},
verbose: true,
logLevel: 'info'
});
var utils = require('utils');
var host = "http://172.17.150.115:8080/today";
var cityIds = [
101010100,
101010200,
101010300,
101010400,
101010500,
101010600,
101010700,
101010800,
101010900,
101011000,
101011100,
101011200,
101011300,
101011400,
101011500,
101020100,
101020200,
101020300,
101020500,
101020600,
101020700,
101020800,
101020900,
101021000,
101021100,
101021200,
101021300,
101030100,
101030200,
101030300,
101030400,
101030500,
101030600,
101030700,
101030800,
101030900,
101031000,
101031100,
101031200,
101031400,
101040100,
101040200,
101040300,
101040400,
101040500,
101040600,
101040700,
101040800,
101040900,
101041000,
101041100,
101041300,
101041400,
101041500,
101041600,
101041700,
101041800,
101041900,
101042000,
101042100,
101042200,
101042300,
101042400,
101042500,
101042600,
101042700,
101042800,
101042900,
101043000,
101043100,
101043200,
101043300,
101043400,
101043600,
101050101,
101050102,
101050103,
101050104,
101050105,
101050106,
101050107,
101050108,
101050109,
101050110,
101050111,
101050112,
101050113,
101050201,
101050202,
101050203
];
// var dump = require("utils").dump;
// script argument
// casper.log("Casper CLI passed args:",'info');
// dump(casper.cli.args);
// filter the png & jpg, to speed up
casper.on('resource.requested', function (request) {
//if (/\.(png|jpg)$/i.test(request.url)) {
// 过滤广告链接
if (/(google|tanx|toruk|tq121|tongji|googlesyndication|taobao|taobaocdn|googlesyndication|doubleclick|baidu)/i.test(request.url)) {
//this.log("Abort | Resource.requested:" + request.url, 'info');
}
});
//casper.on('navigation.requested', function (url, navigationType, navigationLocked, isMainFrame) {
// //this.log("navigation.requested:" + url + " " + navigationType + " " + navigationLocked + " " + isMainFrame, 'info');
//});
//
//casper.on('page.resource.requested', function (requestData, request) {
// //this.log("page.resource.requested:" + requestData.url, 'info');
//
//});
//
//casper.on('load.finished', function (status) {
// //this.log("load.finished:" + status, 'info');
//
//});
casper.getHTML = function getHTML(selector, outer) {
"use strict";
this.checkStarted();
if (!selector) {
return this.page.frameContent;
}
return this.evaluate(function getSelectorHTML(selector, outer) {
var element = __utils__.findOne(selector);
return outer ? element.outerHTML : element.innerHTML;
}, selector, !!outer);
};
function send(host, result) {
try {
return __utils__.sendAJAX(host, 'POST', result, false,
{
contentType: 'application/x-www-form-urlencoded; charset=UTF-8'
});
} catch (e) {
__utils__.log("Server error:" + e, 'error');
}
}
function grab(cityId) {
var start = new Date().getTime();
var result = {};
this.waitForSelector("div.fl h1", (function () {
// ------------- Today weather --------------
result["cityId"] = cityId;
var city = this.getHTML('div.cityName.clearfix div.fl h2');
if (city === null) {
result["city"] = this.getHTML('div.cityName.clearfix div.fl h3');
} else {
result['city'] = city;
}
result["district"] = this.getHTML('div.cityName.clearfix div.fl h1');
result["temp"] = this.getHTML('p.tem span');
//风向
var wd = this.getElementAttr('span.sp2', 'title');
if (wd != null) {
result["wd"] = wd;
}
//风级
var ws = this.getHTML('span.sp2');
if (ws != null) {
result["ws"] = ws.replace(/[^0-9]/ig, "");
}
//湿度
var sd = this.getHTML('span.sp1');
if (sd != null) {
result["sd"] = sd.replace(/[^0-9]/ig, "");
}
var time = this.getHTML('#today span');
if (time != null) {
result["time"] = time.replace(/[^0-9:]/ig, "");
}
var now = new Date();
result["sysdate"] = now.getHours() + ":" + now.getMinutes();
// ------------- Today sunrise & sunset ------
var sunrise = this.getHTML("p.sunUp");
if (sunrise != null) {
result["sunrise"] = sunrise.replace(/[^0-9:]/ig, "");
}
var sunset = this.getHTML("p.sunDown");
if (sunset != null) {
result["sunset"] = sunset.replace(/[^0-9:]/ig, "");
}
// ------------- Today suggestion ------------
//穿衣
result["cyint"] = this.getHTML("section.mask section.ct b");
result["cydes"] = this.getHTML("section.mask section.ct aside").split("</b>")[1];
//感冒
result["gmint"] = this.getHTML("section.mask section.gm b");
result["gmdes"] = this.getHTML("section.mask section.gm aside").split("</b>")[1];
//紫外线
result["uvint"] = this.getHTML("section.mask section.uv b");
result["uvdes"] = this.getHTML("section.mask section.uv aside").split("</b>")[1];
//洗车
result["xcint"] = this.getHTML("section.mask section.xc b");
result["xcdes"] = this.getHTML("section.mask section.xc aside").split("</b>")[1];
//太阳镜
result["tyjint"] = this.getHTML("section.mask section.gl b");
result["tyjdes"] = this.getHTML("section.mask section.gl aside").split("</b>")[1];
//旅游
result["trint"] = this.getHTML("section.mask section.tr b");
result["trdes"] = this.getHTML("section.mask section.tr aside").split("</b>")[1];
//美容
result["mint"] = this.getHTML("section.mask section.gm b");
result["nydes"] = this.getHTML("section.mask section.gm aside").split("</b>")[1];
//晨练
result["clint"] = this.getHTML("section.mask section.cl b");
result["cldes"] = this.getHTML("section.mask section.cl aside").split("</b>")[1];
//过敏
result["agint"] = this.getHTML("section.mask section.ag b");
result["agdes"] = this.getHTML("section.mask section.ag aside").split("</b>")[1];
//雨伞
result["ysint"] = this.getHTML("section.mask section.ys b");
result["ysdes"] = this.getHTML("section.mask section.ys aside").split("</b>")[1];
//运动
result["ydint"] = this.getHTML("section.mask section.yd b");
result["iddes"] = this.getHTML("section.mask section.yd aside").split("</b>")[1];
//化妆
result["hzint"] = this.getHTML("section.mask section.pp b");
result["hzdes"] = this.getHTML("section.mask section.pp aside").split("</b>")[1];
//舒适度
result["ssdint"] = this.getHTML("section.mask section.co b");
result["ssdes"] = this.getHTML("section.mask section.co aside").split("</b>")[1];
this.log('Grab data in ' + (now.getTime() - start) + 'ms', 'info');
// ---------------- 发送给收集端 ---------------
//var response = this.evaluate(send, {host: host, result: result});
//this.log("Server response:" + response, 'debug');
//----------------- 调试输出 -----------
utils.dump(result);
}), (function () {
this.die(this.getCurrentUrl() + " timeout reached.");
}), 12000);
}
//手机版
//function grabFuture(cityId) {
// var result = {};
// result["cityId"] = cityId;
// this.evaluate(function (host, result) {
// var futures = __utils__.querySelectorAll("div.days7 li");
// for (var i = 0, j = futures.length; i < j; i++) {
// //天气图片标题,多云或者晴
// var img = futures[i].querySelectorAll("i img");
// var before = img[0].getAttribute("alt");
// var after = img[1].getAttribute("alt");
// result["img_title" + (i + 1)] = before;
// result["img_title" + (i + 2)] = after;
// //天气转变,多云转晴
// if (before === after) {
// result["weather" + (i + 1)] = before;
// } else {
// result["weather" + (i + 1)] = before + "转" + after;
// }
// //温度,5°/10°
// result["temp" + (i + 1)] = futures[i].querySelector("span").innerHTML;
// //天气图标号码
// var link1 = img[0].getAttribute("src");
// var link2 = img[1].getAttribute("src");
// result["img" + (i + 1)] = link1.charAt(link1.length - 5);
// result["img" + (i + 2)] = link2.charAt(link1.length - 5);
//
// }
//
// }, {host: host, result: result})
//}
//网页版
function grabFuture(cityId) {
var start = new Date().getTime();
var result = {};
result["cityId"] = cityId;
var city = this.getHTML('div.cityName.clearfix div.fl h2');
if (city === null) {
result["city"] = this.getHTML('div.cityName.clearfix div.fl h3');
} else {
result['city'] = city;
}
result["date"] = this.getHTML('#tabDays > p').replace(/[^0-9:-\s]/ig, "");
result["district"] = this.getHTML('div.cityName.clearfix div.fl h1');
var now = new Date();
result["sysdate"] = now.getHours() + ":" + now.getMinutes();
this.evaluate(function (host, result) {
var futures = __utils__.querySelectorAll("ul.t.clearfix li.dn");
for (var i = 0, j = futures.length; i < j; i++) {
//天气图片标题,多云或者晴
var img_title = futures[i].querySelector("p.wea").innerHTML;
if (img_title.indexOf("转") != -1) {
var titles = img_title.split("转");
result["img_title" + (i + 1)] = titles[0];
result["img_title" + (i + 2)] = titles[1];
} else {
result["img_title" + (i + 1)] = img_title;
result["img_title" + (i + 2)] = img_title;
}
//天气转变,多云转晴
result["weather" + (i + 1)] = img_title;
//温度,5°/10°
var temp = futures[i].querySelectorAll("p.tem span");
result["temp" + (i + 1)] = temp[0].innerHTML + '/' + temp[1].innerHTML;
//天气图标号码
var img = futures[i].querySelectorAll("big");
var num1 = img[0].getAttribute("class").split(" ")[1].charAt(2);
var num2 = img[1].getAttribute("class").split(" ")[1].charAt(2);
result["img" + (i + 1)] = num1;
result["img" + (i + 2)] = num2;
//风向
var fx = futures[i].querySelectorAll("p.win span");
result["fx" + (i + 1)] = fx[0].getAttribute("title");
result["fx" + (i + 2)] = fx[1].getAttribute("title");
result["fl" + (i + 1)] = futures[i].querySelector("p.win i").innerHTML;
result["fl" + (i + 2)] = futures[i].querySelector("p.win i").innerHTML;
}
utils.dump(result);
this.log('Grab data in ' + (now.getTime() - start) + 'ms', 'info');
}, {host: host, result: result});
}
casper.start();
//执行当天天气抓取
casper.each(cityIds, function (self, cityId) {
var link = "http://www.weather.com.cn/weather1d/" + cityId + ".shtml";
self.log("-----------today weather link:" + link, 'info');
self.thenOpen(link, function () {
grab.call(this, cityId)
});
});
//执行7天预测天气抓取
//casper.each(cityIds, function (self, cityId) {
// var link = "http://www.weather.com.cn/weather/" + cityId + ".shtml";
// self.log("-----------future weather link:" + link, 'info');
// self.thenOpen(link, function () {
// grabFuture.call(this, cityId)
// });
//});
//运行
casper.run(function () {
//运行完毕推出
casper.exit();
});
| resource.requested:" + request.url,'info');
request.abort();
} else {
//this.log(" | conditional_block |
spider.js | //casperjs --disk-cache=yes spider.js --web-security=no
var casper = require('casper').create({
pageSettings: {
//navigationRequested: true,
//请求资源等待时间
resourceTimeout: 1000,
loadImages: false,
//loadPlugins: false,
userAgent: "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36"
},
verbose: true,
logLevel: 'info'
});
var utils = require('utils');
var host = "http://172.17.150.115:8080/today";
var cityIds = [
101010100,
101010200,
101010300,
101010400,
101010500,
101010600,
101010700,
101010800,
101010900,
101011000,
101011100,
101011200,
101011300,
101011400,
101011500,
101020100,
101020200,
101020300,
101020500,
101020600,
101020700,
101020800,
101020900,
101021000,
101021100,
101021200,
101021300,
101030100,
101030200,
101030300,
101030400,
101030500,
101030600,
101030700,
101030800,
101030900,
101031000,
101031100,
101031200,
101031400,
101040100,
101040200,
101040300,
101040400,
101040500,
101040600,
101040700,
101040800,
101040900,
101041000,
101041100,
101041300,
101041400,
101041500,
101041600,
101041700,
101041800,
101041900,
101042000,
101042100,
101042200,
101042300,
101042400,
101042500,
101042600,
101042700,
101042800,
101042900,
101043000,
101043100,
101043200,
101043300,
101043400,
101043600,
101050101,
101050102,
101050103,
101050104,
101050105,
101050106,
101050107,
101050108,
101050109,
101050110,
101050111,
101050112,
101050113,
101050201,
101050202,
101050203
];
// var dump = require("utils").dump;
// script argument
// casper.log("Casper CLI passed args:",'info');
// dump(casper.cli.args);
// filter the png & jpg, to speed up
casper.on('resource.requested', function (request) {
//if (/\.(png|jpg)$/i.test(request.url)) {
// 过滤广告链接
if (/(google|tanx|toruk|tq121|tongji|googlesyndication|taobao|taobaocdn|googlesyndication|doubleclick|baidu)/i.test(request.url)) {
//this.log("Abort resource.requested:" + request.url,'info');
request.abort();
} else {
//this.log("Resource.requested:" + request.url, 'info');
}
});
//casper.on('navigation.requested', function (url, navigationType, navigationLocked, isMainFrame) {
// //this.log("navigation.requested:" + url + " " + navigationType + " " + navigationLocked + " " + isMainFrame, 'info');
//});
//
//casper.on('page.resource.requested', function (requestData, request) {
// //this.log("page.resource.requested:" + requestData.url, 'info');
//
//});
//
//casper.on('load.finished', function (status) {
// //this.log("load.finished:" + status, 'info');
//
//});
casper.getHTML = function getHTML(selector, outer) {
"use strict";
this.checkStarted();
if (!selector) {
return this.page.frameContent;
}
return this.evaluate(function getSelectorHTML(selector, outer) {
var element = __utils__.findOne(selector);
return outer ? element.outerHTML : element.innerHTML;
}, selector, !!outer);
};
function send(host, result) {
try {
return __utils__.sendAJAX(host, 'POST', result, false,
{
contentType: 'application/x-www-form-urlencoded; charset=UTF-8'
});
} catch (e) {
__utils__.log("Server error:" + e, 'error');
}
}
function grab(cityId) {
var start = new Date().getTime();
var result = {};
this.waitForSelector("div.fl h1", (function () {
// ------------- Today weather --------------
result["cityId"] = cityId;
var city = this.getHTML('div.cityName.clearfix div.fl h2');
if (city === null) {
result["city"] = this.getHTML('div.cityName.clearfix div.fl h3');
} else {
result['city'] = city;
}
result["district"] = this.getHTML('div.cityName.clearfix div.fl h1');
result["temp"] = this.getHTML('p.tem span');
//风向
var wd = this.getElementAttr('span.sp2', 'title');
if (wd != null) {
result["wd"] = wd;
}
//风级
var ws = this.getHTML('span.sp2');
if (ws != null) {
result["ws"] = ws.replace(/[^0-9]/ig, "");
}
//湿度
var sd = this.getHTML('span.sp1');
if (sd != null) {
result["sd"] = sd.replace(/[^0-9]/ig, "");
}
var time = this.getHTML('#today span');
if (time != null) {
result["time"] = time.replace(/[^0-9:]/ig, "");
}
var now = new Date();
result["sysdate"] = now.getHours() + ":" + now.getMinutes();
// ------------- Today sunrise & sunset ------
var sunrise = this.getHTML("p.sunUp");
if (sunrise != null) {
result["sunrise"] = sunrise.replace(/[^0-9:]/ig, "");
}
var sunset = this.getHTML("p.sunDown");
if (sunset != null) {
result["sunset"] = sunset.replace(/[^0-9:]/ig, "");
}
// ------------- Today suggestion ------------
//穿衣
result["cyint"] = this.getHTML("section.mask section.ct b");
result["cydes"] = this.getHTML("section.mask section.ct aside").split("</b>")[1];
//感冒
result["gmint"] = this.getHTML("section.mask section.gm b");
result["gmdes"] = this.getHTML("section.mask section.gm aside").split("</b>")[1];
//紫外线
result["uvint"] = this.getHTML("section.mask section.uv b");
result["uvdes"] = this.getHTML("section.mask section.uv aside").split("</b>")[1];
//洗车
result["xcint"] = this.getHTML("section.mask section.xc b");
result["xcdes"] = this.getHTML("section.mask section.xc aside").split("</b>")[1];
//太阳镜
result["tyjint"] = this.getHTML("section.mask section.gl b");
result["tyjdes"] = this.getHTML("section.mask section.gl aside").split("</b>")[1];
//旅游
result["trint"] = this.getHTML("section.mask section.tr b");
result["trdes"] = this.getHTML("section.mask section.tr aside").split("</b>")[1];
//美容
result["mint"] = this.getHTML("section.mask section.gm b");
result["nydes"] = this.getHTML("section.mask section.gm aside").split("</b>")[1];
//晨练
result["clint"] = this.getHTML("section.mask section.cl b");
result["cldes"] = this.getHTML("section.mask section.cl aside").split("</b>")[1];
//过敏
result["agint"] = this.getHTML("section.mask section.ag b");
result["agdes"] = this.getHTML("section.mask section.ag aside").split("</b>")[1];
//雨伞
result["ysint"] = this.getHTML("section.mask section.ys b");
result["ysdes"] = this.getHTML("section.mask section.ys aside").split("</b>")[1];
//运动
result["ydint"] = this.getHTML("section.mask section.yd b");
result["iddes"] = this.getHTML("section.mask section.yd aside").split("</b>")[1];
//化妆
result["hzint"] = this.getHTML("section.mask section.pp b");
result["hzdes"] = this.getHTML("section.mask section.pp aside").split("</b>")[1];
//舒适度
result["ssdint"] = this.getHTML("section.mask section.co b");
result["ssdes"] = this.getHTML("section.mask section.co aside").split("</b>")[1];
this.log('Grab data in ' + (now.getTime() - start) + 'ms', 'info');
// ---------------- 发送给收集端 ---------------
//var response = this.evaluate(send, {host: host, result: result});
//this.log("Server response:" + response, 'debug');
//----------------- 调试输出 -----------
utils.dump(result);
}), (function () {
this.die(this.getCurrentUrl() + " timeout reached.");
}), 12000);
}
//手机版
//function grabFuture(cityId) {
// var result = {};
// result["cityId"] = cityId;
// this.evaluate(function (host, result) {
// var futures = __utils__.querySelectorAll("div.days7 li");
// for (var i = 0, j = futures.length; i < j; i++) {
// //天气图片标题,多云或者晴
// var img = futures[i].querySelectorAll("i img");
// var before = img[0].getAttribute("alt");
// var after = img[1].getAttribute("alt");
// result["img_title" + (i + 1)] = before;
// result["img_title" + (i + 2)] = after;
// //天气转变,多云转晴
// if (before === after) {
// result["weather" + (i + 1)] = before;
// } else {
// result["weather" + (i + 1)] = before + "转" + after;
// }
// //温度,5°/10°
// result["temp" + (i + 1)] = futures[i].querySelector("span").innerHTML;
// //天气图标号码
// var link1 = img[0].getAttribute("src");
// var link2 = img[1].getAttribute("src");
// result["img" + (i + 1)] = link1.charAt(link1.length - 5);
// result["img" + (i + 2)] = link2.charAt(link1.length - 5);
//
// }
//
// }, {host: host, result: result})
//}
//网页版
function grabFuture(cityId) {
var start = new Date().getTime();
var result = {};
result["cityId"] = cityId;
var city = this.getHTML('div.cityName.clearfix div.fl h2');
if (city === | result["city"] = this.getHTML('div.cityName.clearfix div.fl h3');
} else {
result['city'] = city;
}
result["date"] = this.getHTML('#tabDays > p').replace(/[^0-9:-\s]/ig, "");
result["district"] = this.getHTML('div.cityName.clearfix div.fl h1');
var now = new Date();
result["sysdate"] = now.getHours() + ":" + now.getMinutes();
this.evaluate(function (host, result) {
var futures = __utils__.querySelectorAll("ul.t.clearfix li.dn");
for (var i = 0, j = futures.length; i < j; i++) {
//天气图片标题,多云或者晴
var img_title = futures[i].querySelector("p.wea").innerHTML;
if (img_title.indexOf("转") != -1) {
var titles = img_title.split("转");
result["img_title" + (i + 1)] = titles[0];
result["img_title" + (i + 2)] = titles[1];
} else {
result["img_title" + (i + 1)] = img_title;
result["img_title" + (i + 2)] = img_title;
}
//天气转变,多云转晴
result["weather" + (i + 1)] = img_title;
//温度,5°/10°
var temp = futures[i].querySelectorAll("p.tem span");
result["temp" + (i + 1)] = temp[0].innerHTML + '/' + temp[1].innerHTML;
//天气图标号码
var img = futures[i].querySelectorAll("big");
var num1 = img[0].getAttribute("class").split(" ")[1].charAt(2);
var num2 = img[1].getAttribute("class").split(" ")[1].charAt(2);
result["img" + (i + 1)] = num1;
result["img" + (i + 2)] = num2;
//风向
var fx = futures[i].querySelectorAll("p.win span");
result["fx" + (i + 1)] = fx[0].getAttribute("title");
result["fx" + (i + 2)] = fx[1].getAttribute("title");
result["fl" + (i + 1)] = futures[i].querySelector("p.win i").innerHTML;
result["fl" + (i + 2)] = futures[i].querySelector("p.win i").innerHTML;
}
utils.dump(result);
this.log('Grab data in ' + (now.getTime() - start) + 'ms', 'info');
}, {host: host, result: result});
}
casper.start();
//执行当天天气抓取
casper.each(cityIds, function (self, cityId) {
var link = "http://www.weather.com.cn/weather1d/" + cityId + ".shtml";
self.log("-----------today weather link:" + link, 'info');
self.thenOpen(link, function () {
grab.call(this, cityId)
});
});
//执行7天预测天气抓取
//casper.each(cityIds, function (self, cityId) {
// var link = "http://www.weather.com.cn/weather/" + cityId + ".shtml";
// self.log("-----------future weather link:" + link, 'info');
// self.thenOpen(link, function () {
// grabFuture.call(this, cityId)
// });
//});
//运行
casper.run(function () {
//运行完毕推出
casper.exit();
});
| null) {
| identifier_name |
real_time_plotting_new.py | '''
file: live_plotting.py
Created by: Curtis Puetz 2018-07-08
Improved by: Kimberlee Dube 2019-07-16
note to users:
1) you must hard code in the location you want to LOAD from your log files within the
read_last_line_in_data_log() function on the line:
log_file_path = r"C:\\Users\puetz\Desktop\Telemtry_logs"
2) the 'plot_pause_for_interactive' variable should be a little bit faster than the rate of
data being written to the log files in serial_communication.py (i.e. 'check_for_data_delay'
variable). It can easily be 1 second. This allows for CPU usage to remain low, since the program
does not check the log file as often.
3) you must be generating data for this program to do anything. So either serial_communication.py
needs to be running and receiving data from the balloon, or generate_dummy_logs.py needs to be
running to generate artificial data. In the latter, a text file needs to be supplied to
generate_dummy_logs.py with reasonable data, and the log_file_paths in both 'generate_dummy_logs.py'
and this program need to be appropriate.
4) the png files used for the longitude latitude maps needs to be set to your location (you also
need to generate the constrains of the picture manually)
'''
import datetime
import os
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
import matplotlib.image as mpimg
import cartopy.crs as ccrs
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
# **** VARIABLES TO DEFINE BEFORE FLIGHT **************
location_of_base_image = r'C:/Users/kimdu/Documents/ph549/basemap.png'
home_lat = 52.4904018
home_lon = -105.719035
# coordinates of background image
left_lon = -107.7
right_lon = -103.7
bottom_lat = 50.7
top_lat = 54.3
approx_start_time = datetime.datetime(2019, 7, 17, 1, 1, 1)
approx_end_time = datetime.datetime(2019, 7, 17, 23, 1, 1)
# ****************************************************
plt.style.use('plotstyle.mplstyle')
def | (projection=ccrs.PlateCarree()):
"""
Code from https://ocefpaf.github.io/python4oceanographers/blog/2015/06/22/osm/
"""
fig, ax = plt.subplots(figsize=(9, 13),
subplot_kw=dict(projection=projection))
gl = ax.gridlines(draw_labels=True)
gl.xlabels_top = gl.ylabels_right = False
gl.xformatter = LONGITUDE_FORMATTER
gl.yformatter = LATITUDE_FORMATTER
return fig, ax
def save_map_image(loc):
"""
Code adapted from https://ocefpaf.github.io/python4oceanographers/blog/2015/06/22/osm/
Grab google maps image covering geographic area given by 'extent'
and save image to file for use as background map.
Use this function to generate the basemap image. It is also possible to plot data
directly on the cartopy object (map_ax) which provides higher resolution,
however that requires an internet connection onsite.
:param loc: path defining location to save image
:return: nothing
"""
import cartopy.io.img_tiles as cimgt
extent = [left_lon, right_lon, bottom_lat, top_lat]
request = cimgt.GoogleTiles()
map_fig, map_ax = make_map(projection=request.crs)
map_ax.set_extent(extent)
map_ax.add_image(request, 10)
# put star at launch site
map_ax.plot(home_lon, home_lat, marker='*', color='black', markersize=10,
transform=ccrs.Geodetic())
map_ax.savefig(loc)
def set_up_plots():
'''
Set the the axes of the desired plots
Written by Curtis Puetz 2018-07-07
Completely changed by Kimberlee Dube 2019-07-17
:return: None
'''
fig, axes = plt.subplots(2, 2, figsize=(20, 15), num=1,
sharex=False, sharey=False)
ax0 = axes[0, 0]
ax0.set_title('Altitude')
ax0.set_xlabel('Time')
ax0.set_ylabel('Altitude [m]')
ax1 = axes[0, 1]
ax1.set_title('Internal Temperature')
ax1.set_xlabel('Temperature [$\degree$C]')
ax1.set_ylabel('Altitude [m]')
ax2 = axes[1, 1]
ax2.set_title('External Temperature')
ax2.set_xlabel('Temperature [$\degree$C]')
ax2.set_ylabel('Altitude [m]')
ax3 = axes[1, 0]
ax3.set_title('Geiger Counters')
ax3.set_xlabel('Count/Time')
ax3.set_ylabel('Altitude [m]')
ax3.legend([Line2D([0], [0], color='red', lw=4),
Line2D([0], [0], color='blue', lw=4)], ['C1', 'C2'])
plt.tight_layout()
map_fig, map_ax = plt.subplots(figsize=(9, 13))
map_ax.set_title("Where my payload at?")
img = mpimg.imread(location_of_base_image)
imgplot = map_ax.imshow(img)
return axes, map_ax, img
def plot_data(data, header_data, axes, map_ax, img):
'''
Plot a single data point for each of the plots defined in 'set_up_plots()'
This will occur each time a comma separated data list is received
Written by Curtis Puetz 2018-07-07
Rewritten by Kimberlee Dube 2019-07-17
:param data: the list of data generated from the downlinked comma separated data list
:return: None
'''
pi_time = datetime.datetime.strptime(data[0], '%Y%m%d_%X.%f')
# isolate the floats and save them in a dictionary (while checking the units of altitude)
data = data[1:] # removes the time datetime value
header_data = header_data[1:] # removes the time datetime value
data_dict = dict(zip(header_data, data))
if data_dict['Altu'] == "KM":
alt_factor = 1000
else:
alt_factor = 1
data_dict['Alt'] *= alt_factor
del data_dict['Altu']
del data_dict['NS']
del data_dict['EW']
data_float = [[] for i in range(len(data_dict))]
for i, dai in enumerate(list(data_dict.values())):
if dai == "":
data_float[i] = ""
else:
data_float[i] = float(dai)
data_dict = dict(zip(list(data_dict.keys()), data_float))
# Change in altitude over time
if not data_dict['Alt'] == "":
axes[0, 0].scatter(pi_time, data_dict['Alt'], color='green')
# Need to manually set the approximate flight start
# and end times for the plot to look nice
axes[0, 0].set_xlim([approx_start_time, approx_end_time])
# Altitude profile of internal temperature
if not data_dict['TC'] == "" and not data_dict['Alt'] == "":
axes[0, 1].scatter(data_dict['TC'], data_dict['Alt'], color='green')
# Altitude profile of external temperature
if not data_dict['temp'] == "" and not data_dict['Alt'] == "":
axes[1, 1].scatter(data_dict['temp'], data_dict['Alt'], color='green')
# Altitude profiles of Geiger counter measurements
if not data_dict['C1'] == "":
axes[1, 0].scatter(data_dict['C1'], data_dict['Alt'], color='red', label='C1')
if not data_dict['C2'] == "":
axes[1, 0].scatter(data_dict['C2'], data_dict['Alt'], color='blue', label='C2')
# Map of geographic location
if not data_dict['LtDgMn'] == "" and not data_dict['LnDgMn'] == "":
lat = int(data_dict['LtDgMn']/100) + (data_dict['LtDgMn'] - int(data_dict['LtDgMn']/100)*100)/60
lon = -(int(data_dict['LnDgMn']/100) + (data_dict['LnDgMn'] - int(data_dict['LnDgMn']/100)*100)/60)
# change to sask coords for testing
#lat = 52 + (lat % 1)
#lon = -105 + (lon % 1)
index_y = np.interp(lat, np.linspace(bottom_lat, top_lat, len(img)), np.arange(0, len(img))[::-1])
index_x = np.interp(lon, np.linspace(left_lon, right_lon, len(img[0])), np.arange(0, len(img[0])))
# map_ax.plot(lon, lat, marker='o', color='red', markersize=5,
# transform=ccrs.Geodetic())
map_ax.scatter(index_x, index_y, marker='o', color='red')
plt.pause(0.05)
def read_last_line_in_data_log():
"""
This function will read the last line in the data log file and return it
Written by Daniel Letros, 2018-07-03
:return: None
"""
timestamp = datetime.datetime.utcnow().strftime("%Y%m%d")
log_file_path = r'C:/Users/kimdu/Documents/ph549/Telemetry_logs'
log_file_path += os.sep + timestamp
file_name = log_file_path + os.sep + timestamp + "_data.txt"
# file_name = r'C:/Users/kimdu/Documents/ph549/Telemetry_logs/test.txt' # test generated data
try:
with open(file_name, 'rb') as f:
f.seek(-2, os.SEEK_END)
while f.read(1) != b'\n':
f.seek(-2, os.SEEK_CUR)
content = f.readline().decode()
except:
with open(file_name, 'rb') as f:
content = f.readlines()[-1].decode()
return content
if __name__ == '__main__':
header_2018 = ['PiTS', 'ATSms', 'UTC', 'LtDgMn', 'NS', 'LnDgMn', 'EW',
'Nsat', 'Alt', 'Altu', 'Acxms2', 'Acyms2', 'Aczms2', 'Gyxrs',
'Gyyrs', 'Gyzrs', 'MgxuT', 'MgyuT', 'MgzuT', 'Elxdg', 'Elydg',
'Elzdg', 'LAcxms2', 'LAcyms2', 'LAczms2', 'Gvxms2', 'Gvyms2',
'Gvzms2', 'TC', 'SyCl03', 'GyCl03', 'AcCl03', 'MgCl03',
'', 'C1', 'C2', 'SC', 'RSSI']
header = ['PiTS', 'ATSms', 'UTC', 'LtDgMn', 'NS', 'LnDgMn', 'EW',
'Nsat', 'Alt', 'Altu', 'Acxms2', 'Acyms2', 'Aczms2', 'Gyxrs',
'Gyyrs', 'Gyzrs', 'MgxuT', 'MgyuT', 'MgzuT', 'Elxdg', 'Elydg',
'Elzdg', 'LAcxms2', 'LAcyms2', 'LAczms2', 'Gvxms2', 'Gvyms2',
'Gvzms2', 'TC', 'SyCl03', 'GyCl03', 'AcCl03', 'MgCl03',
'C1', 'C2', 'GN', 'BBL1', 'IRL1', 'BBL2', 'IRL2',
'BBL3', 'IRL3', 'temp']
plot_pause_for_interactive = 4
axes, map_ax, img = set_up_plots()
plt.ion()
hold = ""
while True:
data = read_last_line_in_data_log()
if data == hold:
plt.pause(plot_pause_for_interactive)
continue
hold = data
data = data[:-2] # remove newline character
print(data)
if data[0] == "P": # first character of header string
header_data = data.split(',')
elif data[0] == '2': # first character of a row of good data (starts with year)
data = data.split(',')
plot_data(data, header, axes, map_ax, img)
| make_map | identifier_name |
real_time_plotting_new.py | '''
file: live_plotting.py
Created by: Curtis Puetz 2018-07-08
Improved by: Kimberlee Dube 2019-07-16
note to users:
1) you must hard code in the location you want to LOAD from your log files within the
read_last_line_in_data_log() function on the line:
log_file_path = r"C:\\Users\puetz\Desktop\Telemtry_logs"
2) the 'plot_pause_for_interactive' variable should be a little bit faster than the rate of
data being written to the log files in serial_communication.py (i.e. 'check_for_data_delay'
variable). It can easily be 1 second. This allows for CPU usage to remain low, since the program
does not check the log file as often.
3) you must be generating data for this program to do anything. So either serial_communication.py
needs to be running and receiving data from the balloon, or generate_dummy_logs.py needs to be
running to generate artificial data. In the latter, a text file needs to be supplied to
generate_dummy_logs.py with reasonable data, and the log_file_paths in both 'generate_dummy_logs.py'
and this program need to be appropriate.
4) the png files used for the longitude latitude maps needs to be set to your location (you also
need to generate the constrains of the picture manually)
'''
import datetime
import os
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
import matplotlib.image as mpimg
import cartopy.crs as ccrs
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
# **** VARIABLES TO DEFINE BEFORE FLIGHT **************
location_of_base_image = r'C:/Users/kimdu/Documents/ph549/basemap.png'
home_lat = 52.4904018
home_lon = -105.719035
# coordinates of background image
left_lon = -107.7
right_lon = -103.7
bottom_lat = 50.7
top_lat = 54.3
approx_start_time = datetime.datetime(2019, 7, 17, 1, 1, 1)
approx_end_time = datetime.datetime(2019, 7, 17, 23, 1, 1)
# ****************************************************
plt.style.use('plotstyle.mplstyle')
def make_map(projection=ccrs.PlateCarree()):
"""
Code from https://ocefpaf.github.io/python4oceanographers/blog/2015/06/22/osm/
""" | gl.xlabels_top = gl.ylabels_right = False
gl.xformatter = LONGITUDE_FORMATTER
gl.yformatter = LATITUDE_FORMATTER
return fig, ax
def save_map_image(loc):
"""
Code adapted from https://ocefpaf.github.io/python4oceanographers/blog/2015/06/22/osm/
Grab google maps image covering geographic area given by 'extent'
and save image to file for use as background map.
Use this function to generate the basemap image. It is also possible to plot data
directly on the cartopy object (map_ax) which provides higher resolution,
however that requires an internet connection onsite.
:param loc: path defining location to save image
:return: nothing
"""
import cartopy.io.img_tiles as cimgt
extent = [left_lon, right_lon, bottom_lat, top_lat]
request = cimgt.GoogleTiles()
map_fig, map_ax = make_map(projection=request.crs)
map_ax.set_extent(extent)
map_ax.add_image(request, 10)
# put star at launch site
map_ax.plot(home_lon, home_lat, marker='*', color='black', markersize=10,
transform=ccrs.Geodetic())
map_ax.savefig(loc)
def set_up_plots():
'''
Set the the axes of the desired plots
Written by Curtis Puetz 2018-07-07
Completely changed by Kimberlee Dube 2019-07-17
:return: None
'''
fig, axes = plt.subplots(2, 2, figsize=(20, 15), num=1,
sharex=False, sharey=False)
ax0 = axes[0, 0]
ax0.set_title('Altitude')
ax0.set_xlabel('Time')
ax0.set_ylabel('Altitude [m]')
ax1 = axes[0, 1]
ax1.set_title('Internal Temperature')
ax1.set_xlabel('Temperature [$\degree$C]')
ax1.set_ylabel('Altitude [m]')
ax2 = axes[1, 1]
ax2.set_title('External Temperature')
ax2.set_xlabel('Temperature [$\degree$C]')
ax2.set_ylabel('Altitude [m]')
ax3 = axes[1, 0]
ax3.set_title('Geiger Counters')
ax3.set_xlabel('Count/Time')
ax3.set_ylabel('Altitude [m]')
ax3.legend([Line2D([0], [0], color='red', lw=4),
Line2D([0], [0], color='blue', lw=4)], ['C1', 'C2'])
plt.tight_layout()
map_fig, map_ax = plt.subplots(figsize=(9, 13))
map_ax.set_title("Where my payload at?")
img = mpimg.imread(location_of_base_image)
imgplot = map_ax.imshow(img)
return axes, map_ax, img
def plot_data(data, header_data, axes, map_ax, img):
'''
Plot a single data point for each of the plots defined in 'set_up_plots()'
This will occur each time a comma separated data list is received
Written by Curtis Puetz 2018-07-07
Rewritten by Kimberlee Dube 2019-07-17
:param data: the list of data generated from the downlinked comma separated data list
:return: None
'''
pi_time = datetime.datetime.strptime(data[0], '%Y%m%d_%X.%f')
# isolate the floats and save them in a dictionary (while checking the units of altitude)
data = data[1:] # removes the time datetime value
header_data = header_data[1:] # removes the time datetime value
data_dict = dict(zip(header_data, data))
if data_dict['Altu'] == "KM":
alt_factor = 1000
else:
alt_factor = 1
data_dict['Alt'] *= alt_factor
del data_dict['Altu']
del data_dict['NS']
del data_dict['EW']
data_float = [[] for i in range(len(data_dict))]
for i, dai in enumerate(list(data_dict.values())):
if dai == "":
data_float[i] = ""
else:
data_float[i] = float(dai)
data_dict = dict(zip(list(data_dict.keys()), data_float))
# Change in altitude over time
if not data_dict['Alt'] == "":
axes[0, 0].scatter(pi_time, data_dict['Alt'], color='green')
# Need to manually set the approximate flight start
# and end times for the plot to look nice
axes[0, 0].set_xlim([approx_start_time, approx_end_time])
# Altitude profile of internal temperature
if not data_dict['TC'] == "" and not data_dict['Alt'] == "":
axes[0, 1].scatter(data_dict['TC'], data_dict['Alt'], color='green')
# Altitude profile of external temperature
if not data_dict['temp'] == "" and not data_dict['Alt'] == "":
axes[1, 1].scatter(data_dict['temp'], data_dict['Alt'], color='green')
# Altitude profiles of Geiger counter measurements
if not data_dict['C1'] == "":
axes[1, 0].scatter(data_dict['C1'], data_dict['Alt'], color='red', label='C1')
if not data_dict['C2'] == "":
axes[1, 0].scatter(data_dict['C2'], data_dict['Alt'], color='blue', label='C2')
# Map of geographic location
if not data_dict['LtDgMn'] == "" and not data_dict['LnDgMn'] == "":
lat = int(data_dict['LtDgMn']/100) + (data_dict['LtDgMn'] - int(data_dict['LtDgMn']/100)*100)/60
lon = -(int(data_dict['LnDgMn']/100) + (data_dict['LnDgMn'] - int(data_dict['LnDgMn']/100)*100)/60)
# change to sask coords for testing
#lat = 52 + (lat % 1)
#lon = -105 + (lon % 1)
index_y = np.interp(lat, np.linspace(bottom_lat, top_lat, len(img)), np.arange(0, len(img))[::-1])
index_x = np.interp(lon, np.linspace(left_lon, right_lon, len(img[0])), np.arange(0, len(img[0])))
# map_ax.plot(lon, lat, marker='o', color='red', markersize=5,
# transform=ccrs.Geodetic())
map_ax.scatter(index_x, index_y, marker='o', color='red')
plt.pause(0.05)
def read_last_line_in_data_log():
"""
This function will read the last line in the data log file and return it
Written by Daniel Letros, 2018-07-03
:return: None
"""
timestamp = datetime.datetime.utcnow().strftime("%Y%m%d")
log_file_path = r'C:/Users/kimdu/Documents/ph549/Telemetry_logs'
log_file_path += os.sep + timestamp
file_name = log_file_path + os.sep + timestamp + "_data.txt"
# file_name = r'C:/Users/kimdu/Documents/ph549/Telemetry_logs/test.txt' # test generated data
try:
with open(file_name, 'rb') as f:
f.seek(-2, os.SEEK_END)
while f.read(1) != b'\n':
f.seek(-2, os.SEEK_CUR)
content = f.readline().decode()
except:
with open(file_name, 'rb') as f:
content = f.readlines()[-1].decode()
return content
if __name__ == '__main__':
header_2018 = ['PiTS', 'ATSms', 'UTC', 'LtDgMn', 'NS', 'LnDgMn', 'EW',
'Nsat', 'Alt', 'Altu', 'Acxms2', 'Acyms2', 'Aczms2', 'Gyxrs',
'Gyyrs', 'Gyzrs', 'MgxuT', 'MgyuT', 'MgzuT', 'Elxdg', 'Elydg',
'Elzdg', 'LAcxms2', 'LAcyms2', 'LAczms2', 'Gvxms2', 'Gvyms2',
'Gvzms2', 'TC', 'SyCl03', 'GyCl03', 'AcCl03', 'MgCl03',
'', 'C1', 'C2', 'SC', 'RSSI']
header = ['PiTS', 'ATSms', 'UTC', 'LtDgMn', 'NS', 'LnDgMn', 'EW',
'Nsat', 'Alt', 'Altu', 'Acxms2', 'Acyms2', 'Aczms2', 'Gyxrs',
'Gyyrs', 'Gyzrs', 'MgxuT', 'MgyuT', 'MgzuT', 'Elxdg', 'Elydg',
'Elzdg', 'LAcxms2', 'LAcyms2', 'LAczms2', 'Gvxms2', 'Gvyms2',
'Gvzms2', 'TC', 'SyCl03', 'GyCl03', 'AcCl03', 'MgCl03',
'C1', 'C2', 'GN', 'BBL1', 'IRL1', 'BBL2', 'IRL2',
'BBL3', 'IRL3', 'temp']
plot_pause_for_interactive = 4
axes, map_ax, img = set_up_plots()
plt.ion()
hold = ""
while True:
data = read_last_line_in_data_log()
if data == hold:
plt.pause(plot_pause_for_interactive)
continue
hold = data
data = data[:-2] # remove newline character
print(data)
if data[0] == "P": # first character of header string
header_data = data.split(',')
elif data[0] == '2': # first character of a row of good data (starts with year)
data = data.split(',')
plot_data(data, header, axes, map_ax, img) | fig, ax = plt.subplots(figsize=(9, 13),
subplot_kw=dict(projection=projection))
gl = ax.gridlines(draw_labels=True) | random_line_split |
real_time_plotting_new.py | '''
file: live_plotting.py
Created by: Curtis Puetz 2018-07-08
Improved by: Kimberlee Dube 2019-07-16
note to users:
1) you must hard code in the location you want to LOAD from your log files within the
read_last_line_in_data_log() function on the line:
log_file_path = r"C:\\Users\puetz\Desktop\Telemtry_logs"
2) the 'plot_pause_for_interactive' variable should be a little bit faster than the rate of
data being written to the log files in serial_communication.py (i.e. 'check_for_data_delay'
variable). It can easily be 1 second. This allows for CPU usage to remain low, since the program
does not check the log file as often.
3) you must be generating data for this program to do anything. So either serial_communication.py
needs to be running and receiving data from the balloon, or generate_dummy_logs.py needs to be
running to generate artificial data. In the latter, a text file needs to be supplied to
generate_dummy_logs.py with reasonable data, and the log_file_paths in both 'generate_dummy_logs.py'
and this program need to be appropriate.
4) the png files used for the longitude latitude maps needs to be set to your location (you also
need to generate the constrains of the picture manually)
'''
import datetime
import os
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
import matplotlib.image as mpimg
import cartopy.crs as ccrs
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
# **** VARIABLES TO DEFINE BEFORE FLIGHT **************
location_of_base_image = r'C:/Users/kimdu/Documents/ph549/basemap.png'
home_lat = 52.4904018
home_lon = -105.719035
# coordinates of background image
left_lon = -107.7
right_lon = -103.7
bottom_lat = 50.7
top_lat = 54.3
approx_start_time = datetime.datetime(2019, 7, 17, 1, 1, 1)
approx_end_time = datetime.datetime(2019, 7, 17, 23, 1, 1)
# ****************************************************
plt.style.use('plotstyle.mplstyle')
def make_map(projection=ccrs.PlateCarree()):
"""
Code from https://ocefpaf.github.io/python4oceanographers/blog/2015/06/22/osm/
"""
fig, ax = plt.subplots(figsize=(9, 13),
subplot_kw=dict(projection=projection))
gl = ax.gridlines(draw_labels=True)
gl.xlabels_top = gl.ylabels_right = False
gl.xformatter = LONGITUDE_FORMATTER
gl.yformatter = LATITUDE_FORMATTER
return fig, ax
def save_map_image(loc):
"""
Code adapted from https://ocefpaf.github.io/python4oceanographers/blog/2015/06/22/osm/
Grab google maps image covering geographic area given by 'extent'
and save image to file for use as background map.
Use this function to generate the basemap image. It is also possible to plot data
directly on the cartopy object (map_ax) which provides higher resolution,
however that requires an internet connection onsite.
:param loc: path defining location to save image
:return: nothing
"""
import cartopy.io.img_tiles as cimgt
extent = [left_lon, right_lon, bottom_lat, top_lat]
request = cimgt.GoogleTiles()
map_fig, map_ax = make_map(projection=request.crs)
map_ax.set_extent(extent)
map_ax.add_image(request, 10)
# put star at launch site
map_ax.plot(home_lon, home_lat, marker='*', color='black', markersize=10,
transform=ccrs.Geodetic())
map_ax.savefig(loc)
def set_up_plots():
'''
Set the the axes of the desired plots
Written by Curtis Puetz 2018-07-07
Completely changed by Kimberlee Dube 2019-07-17
:return: None
'''
fig, axes = plt.subplots(2, 2, figsize=(20, 15), num=1,
sharex=False, sharey=False)
ax0 = axes[0, 0]
ax0.set_title('Altitude')
ax0.set_xlabel('Time')
ax0.set_ylabel('Altitude [m]')
ax1 = axes[0, 1]
ax1.set_title('Internal Temperature')
ax1.set_xlabel('Temperature [$\degree$C]')
ax1.set_ylabel('Altitude [m]')
ax2 = axes[1, 1]
ax2.set_title('External Temperature')
ax2.set_xlabel('Temperature [$\degree$C]')
ax2.set_ylabel('Altitude [m]')
ax3 = axes[1, 0]
ax3.set_title('Geiger Counters')
ax3.set_xlabel('Count/Time')
ax3.set_ylabel('Altitude [m]')
ax3.legend([Line2D([0], [0], color='red', lw=4),
Line2D([0], [0], color='blue', lw=4)], ['C1', 'C2'])
plt.tight_layout()
map_fig, map_ax = plt.subplots(figsize=(9, 13))
map_ax.set_title("Where my payload at?")
img = mpimg.imread(location_of_base_image)
imgplot = map_ax.imshow(img)
return axes, map_ax, img
def plot_data(data, header_data, axes, map_ax, img):
'''
Plot a single data point for each of the plots defined in 'set_up_plots()'
This will occur each time a comma separated data list is received
Written by Curtis Puetz 2018-07-07
Rewritten by Kimberlee Dube 2019-07-17
:param data: the list of data generated from the downlinked comma separated data list
:return: None
'''
pi_time = datetime.datetime.strptime(data[0], '%Y%m%d_%X.%f')
# isolate the floats and save them in a dictionary (while checking the units of altitude)
data = data[1:] # removes the time datetime value
header_data = header_data[1:] # removes the time datetime value
data_dict = dict(zip(header_data, data))
if data_dict['Altu'] == "KM":
alt_factor = 1000
else:
alt_factor = 1
data_dict['Alt'] *= alt_factor
del data_dict['Altu']
del data_dict['NS']
del data_dict['EW']
data_float = [[] for i in range(len(data_dict))]
for i, dai in enumerate(list(data_dict.values())):
if dai == "":
data_float[i] = ""
else:
data_float[i] = float(dai)
data_dict = dict(zip(list(data_dict.keys()), data_float))
# Change in altitude over time
if not data_dict['Alt'] == "":
axes[0, 0].scatter(pi_time, data_dict['Alt'], color='green')
# Need to manually set the approximate flight start
# and end times for the plot to look nice
axes[0, 0].set_xlim([approx_start_time, approx_end_time])
# Altitude profile of internal temperature
if not data_dict['TC'] == "" and not data_dict['Alt'] == "":
axes[0, 1].scatter(data_dict['TC'], data_dict['Alt'], color='green')
# Altitude profile of external temperature
if not data_dict['temp'] == "" and not data_dict['Alt'] == "":
axes[1, 1].scatter(data_dict['temp'], data_dict['Alt'], color='green')
# Altitude profiles of Geiger counter measurements
if not data_dict['C1'] == "":
axes[1, 0].scatter(data_dict['C1'], data_dict['Alt'], color='red', label='C1')
if not data_dict['C2'] == "":
axes[1, 0].scatter(data_dict['C2'], data_dict['Alt'], color='blue', label='C2')
# Map of geographic location
if not data_dict['LtDgMn'] == "" and not data_dict['LnDgMn'] == "":
lat = int(data_dict['LtDgMn']/100) + (data_dict['LtDgMn'] - int(data_dict['LtDgMn']/100)*100)/60
lon = -(int(data_dict['LnDgMn']/100) + (data_dict['LnDgMn'] - int(data_dict['LnDgMn']/100)*100)/60)
# change to sask coords for testing
#lat = 52 + (lat % 1)
#lon = -105 + (lon % 1)
index_y = np.interp(lat, np.linspace(bottom_lat, top_lat, len(img)), np.arange(0, len(img))[::-1])
index_x = np.interp(lon, np.linspace(left_lon, right_lon, len(img[0])), np.arange(0, len(img[0])))
# map_ax.plot(lon, lat, marker='o', color='red', markersize=5,
# transform=ccrs.Geodetic())
map_ax.scatter(index_x, index_y, marker='o', color='red')
plt.pause(0.05)
def read_last_line_in_data_log():
"""
This function will read the last line in the data log file and return it
Written by Daniel Letros, 2018-07-03
:return: None
"""
timestamp = datetime.datetime.utcnow().strftime("%Y%m%d")
log_file_path = r'C:/Users/kimdu/Documents/ph549/Telemetry_logs'
log_file_path += os.sep + timestamp
file_name = log_file_path + os.sep + timestamp + "_data.txt"
# file_name = r'C:/Users/kimdu/Documents/ph549/Telemetry_logs/test.txt' # test generated data
try:
with open(file_name, 'rb') as f:
f.seek(-2, os.SEEK_END)
while f.read(1) != b'\n':
f.seek(-2, os.SEEK_CUR)
content = f.readline().decode()
except:
with open(file_name, 'rb') as f:
content = f.readlines()[-1].decode()
return content
if __name__ == '__main__':
header_2018 = ['PiTS', 'ATSms', 'UTC', 'LtDgMn', 'NS', 'LnDgMn', 'EW',
'Nsat', 'Alt', 'Altu', 'Acxms2', 'Acyms2', 'Aczms2', 'Gyxrs',
'Gyyrs', 'Gyzrs', 'MgxuT', 'MgyuT', 'MgzuT', 'Elxdg', 'Elydg',
'Elzdg', 'LAcxms2', 'LAcyms2', 'LAczms2', 'Gvxms2', 'Gvyms2',
'Gvzms2', 'TC', 'SyCl03', 'GyCl03', 'AcCl03', 'MgCl03',
'', 'C1', 'C2', 'SC', 'RSSI']
header = ['PiTS', 'ATSms', 'UTC', 'LtDgMn', 'NS', 'LnDgMn', 'EW',
'Nsat', 'Alt', 'Altu', 'Acxms2', 'Acyms2', 'Aczms2', 'Gyxrs',
'Gyyrs', 'Gyzrs', 'MgxuT', 'MgyuT', 'MgzuT', 'Elxdg', 'Elydg',
'Elzdg', 'LAcxms2', 'LAcyms2', 'LAczms2', 'Gvxms2', 'Gvyms2',
'Gvzms2', 'TC', 'SyCl03', 'GyCl03', 'AcCl03', 'MgCl03',
'C1', 'C2', 'GN', 'BBL1', 'IRL1', 'BBL2', 'IRL2',
'BBL3', 'IRL3', 'temp']
plot_pause_for_interactive = 4
axes, map_ax, img = set_up_plots()
plt.ion()
hold = ""
while True:
data = read_last_line_in_data_log()
if data == hold:
plt.pause(plot_pause_for_interactive)
continue
hold = data
data = data[:-2] # remove newline character
print(data)
if data[0] == "P": # first character of header string
header_data = data.split(',')
elif data[0] == '2': # first character of a row of good data (starts with year)
| data = data.split(',')
plot_data(data, header, axes, map_ax, img) | conditional_block |
|
real_time_plotting_new.py | '''
file: live_plotting.py
Created by: Curtis Puetz 2018-07-08
Improved by: Kimberlee Dube 2019-07-16
note to users:
1) you must hard code in the location you want to LOAD from your log files within the
read_last_line_in_data_log() function on the line:
log_file_path = r"C:\\Users\puetz\Desktop\Telemtry_logs"
2) the 'plot_pause_for_interactive' variable should be a little bit faster than the rate of
data being written to the log files in serial_communication.py (i.e. 'check_for_data_delay'
variable). It can easily be 1 second. This allows for CPU usage to remain low, since the program
does not check the log file as often.
3) you must be generating data for this program to do anything. So either serial_communication.py
needs to be running and receiving data from the balloon, or generate_dummy_logs.py needs to be
running to generate artificial data. In the latter, a text file needs to be supplied to
generate_dummy_logs.py with reasonable data, and the log_file_paths in both 'generate_dummy_logs.py'
and this program need to be appropriate.
4) the png files used for the longitude latitude maps needs to be set to your location (you also
need to generate the constrains of the picture manually)
'''
import datetime
import os
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
import matplotlib.image as mpimg
import cartopy.crs as ccrs
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
# **** VARIABLES TO DEFINE BEFORE FLIGHT **************
location_of_base_image = r'C:/Users/kimdu/Documents/ph549/basemap.png'
home_lat = 52.4904018
home_lon = -105.719035
# coordinates of background image
left_lon = -107.7
right_lon = -103.7
bottom_lat = 50.7
top_lat = 54.3
approx_start_time = datetime.datetime(2019, 7, 17, 1, 1, 1)
approx_end_time = datetime.datetime(2019, 7, 17, 23, 1, 1)
# ****************************************************
plt.style.use('plotstyle.mplstyle')
def make_map(projection=ccrs.PlateCarree()):
"""
Code from https://ocefpaf.github.io/python4oceanographers/blog/2015/06/22/osm/
"""
fig, ax = plt.subplots(figsize=(9, 13),
subplot_kw=dict(projection=projection))
gl = ax.gridlines(draw_labels=True)
gl.xlabels_top = gl.ylabels_right = False
gl.xformatter = LONGITUDE_FORMATTER
gl.yformatter = LATITUDE_FORMATTER
return fig, ax
def save_map_image(loc):
"""
Code adapted from https://ocefpaf.github.io/python4oceanographers/blog/2015/06/22/osm/
Grab google maps image covering geographic area given by 'extent'
and save image to file for use as background map.
Use this function to generate the basemap image. It is also possible to plot data
directly on the cartopy object (map_ax) which provides higher resolution,
however that requires an internet connection onsite.
:param loc: path defining location to save image
:return: nothing
"""
import cartopy.io.img_tiles as cimgt
extent = [left_lon, right_lon, bottom_lat, top_lat]
request = cimgt.GoogleTiles()
map_fig, map_ax = make_map(projection=request.crs)
map_ax.set_extent(extent)
map_ax.add_image(request, 10)
# put star at launch site
map_ax.plot(home_lon, home_lat, marker='*', color='black', markersize=10,
transform=ccrs.Geodetic())
map_ax.savefig(loc)
def set_up_plots():
|
def plot_data(data, header_data, axes, map_ax, img):
'''
Plot a single data point for each of the plots defined in 'set_up_plots()'
This will occur each time a comma separated data list is received
Written by Curtis Puetz 2018-07-07
Rewritten by Kimberlee Dube 2019-07-17
:param data: the list of data generated from the downlinked comma separated data list
:return: None
'''
pi_time = datetime.datetime.strptime(data[0], '%Y%m%d_%X.%f')
# isolate the floats and save them in a dictionary (while checking the units of altitude)
data = data[1:] # removes the time datetime value
header_data = header_data[1:] # removes the time datetime value
data_dict = dict(zip(header_data, data))
if data_dict['Altu'] == "KM":
alt_factor = 1000
else:
alt_factor = 1
data_dict['Alt'] *= alt_factor
del data_dict['Altu']
del data_dict['NS']
del data_dict['EW']
data_float = [[] for i in range(len(data_dict))]
for i, dai in enumerate(list(data_dict.values())):
if dai == "":
data_float[i] = ""
else:
data_float[i] = float(dai)
data_dict = dict(zip(list(data_dict.keys()), data_float))
# Change in altitude over time
if not data_dict['Alt'] == "":
axes[0, 0].scatter(pi_time, data_dict['Alt'], color='green')
# Need to manually set the approximate flight start
# and end times for the plot to look nice
axes[0, 0].set_xlim([approx_start_time, approx_end_time])
# Altitude profile of internal temperature
if not data_dict['TC'] == "" and not data_dict['Alt'] == "":
axes[0, 1].scatter(data_dict['TC'], data_dict['Alt'], color='green')
# Altitude profile of external temperature
if not data_dict['temp'] == "" and not data_dict['Alt'] == "":
axes[1, 1].scatter(data_dict['temp'], data_dict['Alt'], color='green')
# Altitude profiles of Geiger counter measurements
if not data_dict['C1'] == "":
axes[1, 0].scatter(data_dict['C1'], data_dict['Alt'], color='red', label='C1')
if not data_dict['C2'] == "":
axes[1, 0].scatter(data_dict['C2'], data_dict['Alt'], color='blue', label='C2')
# Map of geographic location
if not data_dict['LtDgMn'] == "" and not data_dict['LnDgMn'] == "":
lat = int(data_dict['LtDgMn']/100) + (data_dict['LtDgMn'] - int(data_dict['LtDgMn']/100)*100)/60
lon = -(int(data_dict['LnDgMn']/100) + (data_dict['LnDgMn'] - int(data_dict['LnDgMn']/100)*100)/60)
# change to sask coords for testing
#lat = 52 + (lat % 1)
#lon = -105 + (lon % 1)
index_y = np.interp(lat, np.linspace(bottom_lat, top_lat, len(img)), np.arange(0, len(img))[::-1])
index_x = np.interp(lon, np.linspace(left_lon, right_lon, len(img[0])), np.arange(0, len(img[0])))
# map_ax.plot(lon, lat, marker='o', color='red', markersize=5,
# transform=ccrs.Geodetic())
map_ax.scatter(index_x, index_y, marker='o', color='red')
plt.pause(0.05)
def read_last_line_in_data_log():
"""
This function will read the last line in the data log file and return it
Written by Daniel Letros, 2018-07-03
:return: None
"""
timestamp = datetime.datetime.utcnow().strftime("%Y%m%d")
log_file_path = r'C:/Users/kimdu/Documents/ph549/Telemetry_logs'
log_file_path += os.sep + timestamp
file_name = log_file_path + os.sep + timestamp + "_data.txt"
# file_name = r'C:/Users/kimdu/Documents/ph549/Telemetry_logs/test.txt' # test generated data
try:
with open(file_name, 'rb') as f:
f.seek(-2, os.SEEK_END)
while f.read(1) != b'\n':
f.seek(-2, os.SEEK_CUR)
content = f.readline().decode()
except:
with open(file_name, 'rb') as f:
content = f.readlines()[-1].decode()
return content
if __name__ == '__main__':
header_2018 = ['PiTS', 'ATSms', 'UTC', 'LtDgMn', 'NS', 'LnDgMn', 'EW',
'Nsat', 'Alt', 'Altu', 'Acxms2', 'Acyms2', 'Aczms2', 'Gyxrs',
'Gyyrs', 'Gyzrs', 'MgxuT', 'MgyuT', 'MgzuT', 'Elxdg', 'Elydg',
'Elzdg', 'LAcxms2', 'LAcyms2', 'LAczms2', 'Gvxms2', 'Gvyms2',
'Gvzms2', 'TC', 'SyCl03', 'GyCl03', 'AcCl03', 'MgCl03',
'', 'C1', 'C2', 'SC', 'RSSI']
header = ['PiTS', 'ATSms', 'UTC', 'LtDgMn', 'NS', 'LnDgMn', 'EW',
'Nsat', 'Alt', 'Altu', 'Acxms2', 'Acyms2', 'Aczms2', 'Gyxrs',
'Gyyrs', 'Gyzrs', 'MgxuT', 'MgyuT', 'MgzuT', 'Elxdg', 'Elydg',
'Elzdg', 'LAcxms2', 'LAcyms2', 'LAczms2', 'Gvxms2', 'Gvyms2',
'Gvzms2', 'TC', 'SyCl03', 'GyCl03', 'AcCl03', 'MgCl03',
'C1', 'C2', 'GN', 'BBL1', 'IRL1', 'BBL2', 'IRL2',
'BBL3', 'IRL3', 'temp']
plot_pause_for_interactive = 4
axes, map_ax, img = set_up_plots()
plt.ion()
hold = ""
while True:
data = read_last_line_in_data_log()
if data == hold:
plt.pause(plot_pause_for_interactive)
continue
hold = data
data = data[:-2] # remove newline character
print(data)
if data[0] == "P": # first character of header string
header_data = data.split(',')
elif data[0] == '2': # first character of a row of good data (starts with year)
data = data.split(',')
plot_data(data, header, axes, map_ax, img)
| '''
Set the the axes of the desired plots
Written by Curtis Puetz 2018-07-07
Completely changed by Kimberlee Dube 2019-07-17
:return: None
'''
fig, axes = plt.subplots(2, 2, figsize=(20, 15), num=1,
sharex=False, sharey=False)
ax0 = axes[0, 0]
ax0.set_title('Altitude')
ax0.set_xlabel('Time')
ax0.set_ylabel('Altitude [m]')
ax1 = axes[0, 1]
ax1.set_title('Internal Temperature')
ax1.set_xlabel('Temperature [$\degree$C]')
ax1.set_ylabel('Altitude [m]')
ax2 = axes[1, 1]
ax2.set_title('External Temperature')
ax2.set_xlabel('Temperature [$\degree$C]')
ax2.set_ylabel('Altitude [m]')
ax3 = axes[1, 0]
ax3.set_title('Geiger Counters')
ax3.set_xlabel('Count/Time')
ax3.set_ylabel('Altitude [m]')
ax3.legend([Line2D([0], [0], color='red', lw=4),
Line2D([0], [0], color='blue', lw=4)], ['C1', 'C2'])
plt.tight_layout()
map_fig, map_ax = plt.subplots(figsize=(9, 13))
map_ax.set_title("Where my payload at?")
img = mpimg.imread(location_of_base_image)
imgplot = map_ax.imshow(img)
return axes, map_ax, img | identifier_body |
main.go | // Copyright 2018-2021 CERN
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// In applying this license, CERN does not waive the privileges and immunities
// granted to it by virtue of its status as an Intergovernmental Organization
// or submit itself to any jurisdiction.
// Package conversions sits between CS3 type definitions and OCS API Responses
package conversions
import (
"context"
"fmt"
"net/http"
"path"
"time"
"github.com/cs3org/reva/v2/pkg/publicshare"
"github.com/cs3org/reva/v2/pkg/user"
grouppb "github.com/cs3org/go-cs3apis/cs3/identity/group/v1beta1"
userpb "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1"
collaboration "github.com/cs3org/go-cs3apis/cs3/sharing/collaboration/v1beta1"
link "github.com/cs3org/go-cs3apis/cs3/sharing/link/v1beta1"
provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1"
types "github.com/cs3org/go-cs3apis/cs3/types/v1beta1"
publicsharemgr "github.com/cs3org/reva/v2/pkg/publicshare/manager/registry"
usermgr "github.com/cs3org/reva/v2/pkg/user/manager/registry"
)
const (
// ShareTypeUser refers to user shares
ShareTypeUser ShareType = 0
// ShareTypePublicLink refers to public link shares
ShareTypePublicLink ShareType = 3
// ShareTypeGroup represents a group share
ShareTypeGroup ShareType = 1
// ShareTypeFederatedCloudShare represents a federated share
ShareTypeFederatedCloudShare ShareType = 6
// ShareTypeSpaceMembershipUser represents an action regarding user type space members
ShareTypeSpaceMembershipUser ShareType = 7
// ShareTypeSpaceMembershipGroup represents an action regarding group type space members
ShareTypeSpaceMembershipGroup ShareType = 8
// ShareWithUserTypeUser represents a normal user
ShareWithUserTypeUser ShareWithUserType = 0
// ShareWithUserTypeGuest represents a guest user
ShareWithUserTypeGuest ShareWithUserType = 1
// The datetime format of ISO8601
_iso8601 = "2006-01-02T15:04:05Z0700"
)
// ResourceType indicates the OCS type of the resource
type ResourceType int
func (rt ResourceType) String() (s string) {
switch rt {
case 0:
s = "invalid"
case 1:
s = "file"
case 2:
s = "folder"
case 3:
s = "reference"
default:
s = "invalid"
}
return
}
// ShareType denotes a type of share
type ShareType int
// ShareWithUserType denotes a type of user
type ShareWithUserType int
// ShareData represents https://doc.owncloud.com/server/developer_manual/core/ocs-share-api.html#response-attributes-1
type ShareData struct {
// TODO int?
ID string `json:"id" xml:"id"`
// The share’s type
ShareType ShareType `json:"share_type" xml:"share_type"`
// The username of the owner of the share.
UIDOwner string `json:"uid_owner" xml:"uid_owner"`
// The display name of the owner of the share.
DisplaynameOwner string `json:"displayname_owner" xml:"displayname_owner"`
// Additional info to identify the share owner, eg. the email or username
AdditionalInfoOwner string `json:"additional_info_owner" xml:"additional_info_owner"`
// The permission attribute set on the file.
// TODO(jfd) change the default to read only
Permissions Permissions `json:"permissions" xml:"permissions"`
// The UNIX timestamp when the share was created.
STime uint64 `json:"stime" xml:"stime"`
// ?
Parent string `json:"parent" xml:"parent"`
// The UNIX timestamp when the share expires.
Expiration string `json:"expiration" xml:"expiration"`
// The public link to the item being shared.
Token string `json:"token" xml:"token"`
// The unique id of the user that owns the file or folder being shared.
UIDFileOwner string `json:"uid_file_owner" xml:"uid_file_owner"`
// The display name of the user that owns the file or folder being shared.
DisplaynameFileOwner string `json:"displayname_file_owner" xml:"displayname_file_owner"`
// Additional info to identify the file owner, eg. the email or username
AdditionalInfoFileOwner string `json:"additional_info_file_owner" xml:"additional_info_file_owner"`
// share state, 0 = accepted, 1 = pending, 2 = declined
State int `json:"state" xml:"state"`
// The path to the shared file or folder.
Path string `json:"path" xml:"path"`
// The type of the object being shared. This can be one of 'file' or 'folder'.
ItemType string `json:"item_type" xml:"item_type"`
// The RFC2045-compliant mimetype of the file.
MimeType string `json:"mimetype" xml:"mimetype"`
// The space ID of the original file location
SpaceID string `json:"space_id" xml:"space_id"`
// The space alias of the original file location
SpaceAlias string `json:"space_alias" xml:"space_alias"`
StorageID string `json:"storage_id" xml:"storage_id"`
Storage uint64 `json:"storage" xml:"storage"`
// The unique node id of the item being shared.
ItemSource string `json:"item_source" xml:"item_source"`
// The unique node id of the item being shared. For legacy reasons item_source and file_source attributes have the same value.
FileSource string `json:"file_source" xml:"file_source"`
// The unique node id of the parent node of the item being shared.
FileParent string `json:"file_parent" xml:"file_parent"`
// The basename of the shared file.
FileTarget string `json:"file_target" xml:"file_target"`
// The uid of the share recipient. This is either
// - a GID (group id) if it is being shared with a group or
// - a UID (user id) if the share is shared with a user.
// - a password for public links
ShareWith string `json:"share_with,omitempty" xml:"share_with,omitempty"`
// The type of user
// - 0 = normal user
// - 1 = guest account
ShareWithUserType ShareWithUserType `json:"share_with_user_type" xml:"share_with_user_type"`
// The display name of the share recipient
ShareWithDisplayname string `json:"share_with_displayname,omitempty" xml:"share_with_displayname,omitempty"`
// Additional info to identify the share recipient, eg. the email or username
ShareWithAdditionalInfo string `json:"share_with_additional_info" xml:"share_with_additional_info"`
// Whether the recipient was notified, by mail, about the share being shared with them.
MailSend int `json:"mail_send" xml:"mail_send"`
// Name of the public share
Name string `json:"name" xml:"name"`
// URL of the public share
URL string `json:"url,omitempty" xml:"url,omitempty"`
// Attributes associated
Attributes string `json:"attributes,omitempty" xml:"attributes,omitempty"`
// Quicklink indicates if the link is the quicklink
Quicklink bool `json:"quicklink,omitempty" xml:"quicklink,omitempty"`
// PasswordProtected represents a public share is password protected
// PasswordProtected bool `json:"password_protected,omitempty" xml:"password_protected,omitempty"`
}
// ShareeData holds share recipient search results
type ShareeData struct {
Exact *ExactMatchesData `json:"exact" xml:"exact"`
Users []*MatchData `json:"users" xml:"users>element"`
Groups []*MatchData `json:"groups" xml:"groups>element"`
Remotes []*MatchData `json:"remotes" xml:"remotes>element"`
}
// TokenInfo holds token information
type TokenInfo struct {
// for all callers
Token string `json:"token" xml:"token"`
LinkURL string `json:"link_url" xml:"link_url"`
PasswordProtected bool `json:"password_protected" xml:"password_protected"`
Aliaslink bool `json:"alias_link" xml:"alias_link"`
// if not password protected
ID string `json:"id" xml:"id"`
StorageID string `json:"storage_id" xml:"storage_id"`
SpaceID string `json:"space_id" xml:"space_id"`
OpaqueID string `json:"opaque_id" xml:"opaque_id"`
Path string `json:"path" xml:"path"`
// if native access
SpacePath string `json:"space_path" xml:"space_path"`
SpaceAlias string `json:"space_alias" xml:"space_alias"`
SpaceURL string `json:"space_url" xml:"space_url"`
SpaceType string `json:"space_type" xml:"space_type"`
}
// ExactMatchesData hold exact matches
type ExactMatchesData struct {
Users []*MatchData `json:"users" xml:"users>element"`
Groups []*MatchData `json:"groups" xml:"groups>element"`
Remotes []*MatchData `json:"remotes" xml:"remotes>element"`
}
// MatchData describes a single match
type MatchData struct {
Label string `json:"label" xml:"label,omitempty"`
Value *MatchValueData `json:"value" xml:"value"`
}
// MatchValueData holds the type and actual value
type MatchValueData struct {
ShareType int `json:"shareType" xml:"shareType"`
ShareWith string `json:"shareWith" xml:"shareWith"`
ShareWithAdditionalInfo string `json:"shareWithAdditionalInfo" xml:"shareWithAdditionalInfo,omitempty"`
UserType int `json:"userType" xml:"userType"`
}
// CS3Share2ShareData converts a cs3api user share into shareData data model
func CS3Share2ShareData(ctx context.Context, share *collaboration.Share) (*ShareData, error) {
sd := &ShareData{
// share.permissions are mapped below
// Displaynames are added later
UIDOwner: LocalUserIDToString(share.GetCreator()),
UIDFileOwner: LocalUserIDToString(share.GetOwner()),
}
if share.Grantee.Type == provider.GranteeType_GRANTEE_TYPE_USER {
sd.ShareType = ShareTypeUser
sd.ShareWith = LocalUserIDToString(share.Grantee.GetUserId())
shareType := share.GetGrantee().GetUserId().GetType()
if shareType == userpb.UserType_USER_TYPE_LIGHTWEIGHT || shareType == userpb.UserType_USER_TYPE_GUEST {
sd.ShareWithUserType = ShareWithUserTypeGuest
} else {
sd.ShareWithUserType = ShareWithUserTypeUser
}
} else if share.Grantee.Type == provider.GranteeType_GRANTEE_TYPE_GROUP {
sd.ShareType = ShareTypeGroup
sd.ShareWith = LocalGroupIDToString(share.Grantee.GetGroupId())
}
if share.Id != nil {
sd.ID = share.Id.OpaqueId
}
if share.GetPermissions().GetPermissions() != nil {
sd.Permissions = RoleFromResourcePermissions(share.GetPermissions().GetPermissions(), false).OCSPermissions()
}
if share.Ctime != nil {
sd.STime = share.Ctime.Seconds // TODO CS3 api birth time = btime
}
if share.Expiration != nil {
expiration := time.Unix(int64(share.Expiration.Seconds), int64(share.Expiration.Nanos))
sd.Expiration = expiration.Format(_iso8601)
}
return sd, nil
}
// PublicShare2ShareData converts a cs3api public share into shareData data model
func PublicShare2ShareData(share *link.PublicShare, r *http.Request, publicURL string) *ShareData {
sd := &ShareData{
// share.permissions are mapped below
// Displaynames are added later
ShareType: ShareTypePublicLink,
Token: share.Token,
Name: share.DisplayName,
MailSend: 0,
URL: publicURL + path.Join("/", "s/"+share.Token),
UIDOwner: LocalUserIDToString(share.Creator),
UIDFileOwner: LocalUserIDToString(share.Owner),
Quicklink: share.Quicklink,
} | sd.ID = share.Id.OpaqueId
}
if s := share.GetPermissions().GetPermissions(); s != nil {
sd.Permissions = RoleFromResourcePermissions(share.GetPermissions().GetPermissions(), true).OCSPermissions()
}
if share.Expiration != nil {
sd.Expiration = timestampToExpiration(share.Expiration)
}
if share.Ctime != nil {
sd.STime = share.Ctime.Seconds // TODO CS3 api birth time = btime
}
// hide password
if share.PasswordProtected {
sd.ShareWith = "***redacted***"
sd.ShareWithDisplayname = "***redacted***"
}
return sd
}
// LocalUserIDToString transforms a cs3api user id into an ocs data model without domain name
// TODO ocs uses user names ... so an additional lookup is needed. see mapUserIds()
func LocalUserIDToString(userID *userpb.UserId) string {
if userID == nil || userID.OpaqueId == "" {
return ""
}
return userID.OpaqueId
}
// LocalGroupIDToString transforms a cs3api group id into an ocs data model without domain name
func LocalGroupIDToString(groupID *grouppb.GroupId) string {
if groupID == nil || groupID.OpaqueId == "" {
return ""
}
return groupID.OpaqueId
}
// GetUserManager returns a connection to a user share manager
func GetUserManager(manager string, m map[string]map[string]interface{}) (user.Manager, error) {
if f, ok := usermgr.NewFuncs[manager]; ok {
return f(m[manager])
}
return nil, fmt.Errorf("driver %s not found for user manager", manager)
}
// GetPublicShareManager returns a connection to a public share manager
func GetPublicShareManager(manager string, m map[string]map[string]interface{}) (publicshare.Manager, error) {
if f, ok := publicsharemgr.NewFuncs[manager]; ok {
return f(m[manager])
}
return nil, fmt.Errorf("driver %s not found for public shares manager", manager)
}
// timestamp is assumed to be UTC ... just human readable ...
// FIXME and ambiguous / error prone because there is no time zone ...
func timestampToExpiration(t *types.Timestamp) string {
return time.Unix(int64(t.Seconds), int64(t.Nanos)).UTC().Format("2006-01-02 15:05:05")
}
// ParseTimestamp tries to parse the ocs expiry into a CS3 Timestamp
func ParseTimestamp(timestampString string) (*types.Timestamp, error) {
parsedTime, err := time.Parse("2006-01-02T15:04:05Z0700", timestampString)
if err != nil {
parsedTime, err = time.Parse("2006-01-02", timestampString)
if err == nil {
// the link needs to be valid for the whole day
parsedTime = parsedTime.Add(23*time.Hour + 59*time.Minute + 59*time.Second)
}
}
if err != nil {
return nil, fmt.Errorf("datetime format invalid: %v, %s", timestampString, err.Error())
}
final := parsedTime.UnixNano()
return &types.Timestamp{
Seconds: uint64(final / 1000000000),
Nanos: uint32(final % 1000000000),
}, nil
}
// UserTypeString returns human readable strings for various user types
func UserTypeString(userType userpb.UserType) string {
switch userType {
case userpb.UserType_USER_TYPE_PRIMARY:
return "primary"
case userpb.UserType_USER_TYPE_SECONDARY:
return "secondary"
case userpb.UserType_USER_TYPE_SERVICE:
return "service"
case userpb.UserType_USER_TYPE_APPLICATION:
return "application"
case userpb.UserType_USER_TYPE_GUEST:
return "guest"
case userpb.UserType_USER_TYPE_FEDERATED:
return "federated"
case userpb.UserType_USER_TYPE_LIGHTWEIGHT:
return "lightweight"
}
return "invalid"
} | if share.Id != nil { | random_line_split |
main.go | // Copyright 2018-2021 CERN
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// In applying this license, CERN does not waive the privileges and immunities
// granted to it by virtue of its status as an Intergovernmental Organization
// or submit itself to any jurisdiction.
// Package conversions sits between CS3 type definitions and OCS API Responses
package conversions
import (
"context"
"fmt"
"net/http"
"path"
"time"
"github.com/cs3org/reva/v2/pkg/publicshare"
"github.com/cs3org/reva/v2/pkg/user"
grouppb "github.com/cs3org/go-cs3apis/cs3/identity/group/v1beta1"
userpb "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1"
collaboration "github.com/cs3org/go-cs3apis/cs3/sharing/collaboration/v1beta1"
link "github.com/cs3org/go-cs3apis/cs3/sharing/link/v1beta1"
provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1"
types "github.com/cs3org/go-cs3apis/cs3/types/v1beta1"
publicsharemgr "github.com/cs3org/reva/v2/pkg/publicshare/manager/registry"
usermgr "github.com/cs3org/reva/v2/pkg/user/manager/registry"
)
const (
// ShareTypeUser refers to user shares
ShareTypeUser ShareType = 0
// ShareTypePublicLink refers to public link shares
ShareTypePublicLink ShareType = 3
// ShareTypeGroup represents a group share
ShareTypeGroup ShareType = 1
// ShareTypeFederatedCloudShare represents a federated share
ShareTypeFederatedCloudShare ShareType = 6
// ShareTypeSpaceMembershipUser represents an action regarding user type space members
ShareTypeSpaceMembershipUser ShareType = 7
// ShareTypeSpaceMembershipGroup represents an action regarding group type space members
ShareTypeSpaceMembershipGroup ShareType = 8
// ShareWithUserTypeUser represents a normal user
ShareWithUserTypeUser ShareWithUserType = 0
// ShareWithUserTypeGuest represents a guest user
ShareWithUserTypeGuest ShareWithUserType = 1
// The datetime format of ISO8601
_iso8601 = "2006-01-02T15:04:05Z0700"
)
// ResourceType indicates the OCS type of the resource
type ResourceType int
func (rt ResourceType) String() (s string) {
switch rt {
case 0:
s = "invalid"
case 1:
s = "file"
case 2:
s = "folder"
case 3:
s = "reference"
default:
s = "invalid"
}
return
}
// ShareType denotes a type of share
type ShareType int
// ShareWithUserType denotes a type of user
type ShareWithUserType int
// ShareData represents https://doc.owncloud.com/server/developer_manual/core/ocs-share-api.html#response-attributes-1
type ShareData struct {
// TODO int?
ID string `json:"id" xml:"id"`
// The share’s type
ShareType ShareType `json:"share_type" xml:"share_type"`
// The username of the owner of the share.
UIDOwner string `json:"uid_owner" xml:"uid_owner"`
// The display name of the owner of the share.
DisplaynameOwner string `json:"displayname_owner" xml:"displayname_owner"`
// Additional info to identify the share owner, eg. the email or username
AdditionalInfoOwner string `json:"additional_info_owner" xml:"additional_info_owner"`
// The permission attribute set on the file.
// TODO(jfd) change the default to read only
Permissions Permissions `json:"permissions" xml:"permissions"`
// The UNIX timestamp when the share was created.
STime uint64 `json:"stime" xml:"stime"`
// ?
Parent string `json:"parent" xml:"parent"`
// The UNIX timestamp when the share expires.
Expiration string `json:"expiration" xml:"expiration"`
// The public link to the item being shared.
Token string `json:"token" xml:"token"`
// The unique id of the user that owns the file or folder being shared.
UIDFileOwner string `json:"uid_file_owner" xml:"uid_file_owner"`
// The display name of the user that owns the file or folder being shared.
DisplaynameFileOwner string `json:"displayname_file_owner" xml:"displayname_file_owner"`
// Additional info to identify the file owner, eg. the email or username
AdditionalInfoFileOwner string `json:"additional_info_file_owner" xml:"additional_info_file_owner"`
// share state, 0 = accepted, 1 = pending, 2 = declined
State int `json:"state" xml:"state"`
// The path to the shared file or folder.
Path string `json:"path" xml:"path"`
// The type of the object being shared. This can be one of 'file' or 'folder'.
ItemType string `json:"item_type" xml:"item_type"`
// The RFC2045-compliant mimetype of the file.
MimeType string `json:"mimetype" xml:"mimetype"`
// The space ID of the original file location
SpaceID string `json:"space_id" xml:"space_id"`
// The space alias of the original file location
SpaceAlias string `json:"space_alias" xml:"space_alias"`
StorageID string `json:"storage_id" xml:"storage_id"`
Storage uint64 `json:"storage" xml:"storage"`
// The unique node id of the item being shared.
ItemSource string `json:"item_source" xml:"item_source"`
// The unique node id of the item being shared. For legacy reasons item_source and file_source attributes have the same value.
FileSource string `json:"file_source" xml:"file_source"`
// The unique node id of the parent node of the item being shared.
FileParent string `json:"file_parent" xml:"file_parent"`
// The basename of the shared file.
FileTarget string `json:"file_target" xml:"file_target"`
// The uid of the share recipient. This is either
// - a GID (group id) if it is being shared with a group or
// - a UID (user id) if the share is shared with a user.
// - a password for public links
ShareWith string `json:"share_with,omitempty" xml:"share_with,omitempty"`
// The type of user
// - 0 = normal user
// - 1 = guest account
ShareWithUserType ShareWithUserType `json:"share_with_user_type" xml:"share_with_user_type"`
// The display name of the share recipient
ShareWithDisplayname string `json:"share_with_displayname,omitempty" xml:"share_with_displayname,omitempty"`
// Additional info to identify the share recipient, eg. the email or username
ShareWithAdditionalInfo string `json:"share_with_additional_info" xml:"share_with_additional_info"`
// Whether the recipient was notified, by mail, about the share being shared with them.
MailSend int `json:"mail_send" xml:"mail_send"`
// Name of the public share
Name string `json:"name" xml:"name"`
// URL of the public share
URL string `json:"url,omitempty" xml:"url,omitempty"`
// Attributes associated
Attributes string `json:"attributes,omitempty" xml:"attributes,omitempty"`
// Quicklink indicates if the link is the quicklink
Quicklink bool `json:"quicklink,omitempty" xml:"quicklink,omitempty"`
// PasswordProtected represents a public share is password protected
// PasswordProtected bool `json:"password_protected,omitempty" xml:"password_protected,omitempty"`
}
// ShareeData holds share recipient search results
type ShareeData struct {
Exact *ExactMatchesData `json:"exact" xml:"exact"`
Users []*MatchData `json:"users" xml:"users>element"`
Groups []*MatchData `json:"groups" xml:"groups>element"`
Remotes []*MatchData `json:"remotes" xml:"remotes>element"`
}
// TokenInfo holds token information
type TokenInfo struct {
// for all callers
Token string `json:"token" xml:"token"`
LinkURL string `json:"link_url" xml:"link_url"`
PasswordProtected bool `json:"password_protected" xml:"password_protected"`
Aliaslink bool `json:"alias_link" xml:"alias_link"`
// if not password protected
ID string `json:"id" xml:"id"`
StorageID string `json:"storage_id" xml:"storage_id"`
SpaceID string `json:"space_id" xml:"space_id"`
OpaqueID string `json:"opaque_id" xml:"opaque_id"`
Path string `json:"path" xml:"path"`
// if native access
SpacePath string `json:"space_path" xml:"space_path"`
SpaceAlias string `json:"space_alias" xml:"space_alias"`
SpaceURL string `json:"space_url" xml:"space_url"`
SpaceType string `json:"space_type" xml:"space_type"`
}
// ExactMatchesData hold exact matches
type ExactMatchesData struct {
Users []*MatchData `json:"users" xml:"users>element"`
Groups []*MatchData `json:"groups" xml:"groups>element"`
Remotes []*MatchData `json:"remotes" xml:"remotes>element"`
}
// MatchData describes a single match
type MatchData struct {
Label string `json:"label" xml:"label,omitempty"`
Value *MatchValueData `json:"value" xml:"value"`
}
// MatchValueData holds the type and actual value
type MatchValueData struct {
ShareType int `json:"shareType" xml:"shareType"`
ShareWith string `json:"shareWith" xml:"shareWith"`
ShareWithAdditionalInfo string `json:"shareWithAdditionalInfo" xml:"shareWithAdditionalInfo,omitempty"`
UserType int `json:"userType" xml:"userType"`
}
// CS3Share2ShareData converts a cs3api user share into shareData data model
func CS3Share2ShareData(ctx context.Context, share *collaboration.Share) (*ShareData, error) {
| // PublicShare2ShareData converts a cs3api public share into shareData data model
func PublicShare2ShareData(share *link.PublicShare, r *http.Request, publicURL string) *ShareData {
sd := &ShareData{
// share.permissions are mapped below
// Displaynames are added later
ShareType: ShareTypePublicLink,
Token: share.Token,
Name: share.DisplayName,
MailSend: 0,
URL: publicURL + path.Join("/", "s/"+share.Token),
UIDOwner: LocalUserIDToString(share.Creator),
UIDFileOwner: LocalUserIDToString(share.Owner),
Quicklink: share.Quicklink,
}
if share.Id != nil {
sd.ID = share.Id.OpaqueId
}
if s := share.GetPermissions().GetPermissions(); s != nil {
sd.Permissions = RoleFromResourcePermissions(share.GetPermissions().GetPermissions(), true).OCSPermissions()
}
if share.Expiration != nil {
sd.Expiration = timestampToExpiration(share.Expiration)
}
if share.Ctime != nil {
sd.STime = share.Ctime.Seconds // TODO CS3 api birth time = btime
}
// hide password
if share.PasswordProtected {
sd.ShareWith = "***redacted***"
sd.ShareWithDisplayname = "***redacted***"
}
return sd
}
// LocalUserIDToString transforms a cs3api user id into an ocs data model without domain name
// TODO ocs uses user names ... so an additional lookup is needed. see mapUserIds()
func LocalUserIDToString(userID *userpb.UserId) string {
if userID == nil || userID.OpaqueId == "" {
return ""
}
return userID.OpaqueId
}
// LocalGroupIDToString transforms a cs3api group id into an ocs data model without domain name
func LocalGroupIDToString(groupID *grouppb.GroupId) string {
if groupID == nil || groupID.OpaqueId == "" {
return ""
}
return groupID.OpaqueId
}
// GetUserManager returns a connection to a user share manager
func GetUserManager(manager string, m map[string]map[string]interface{}) (user.Manager, error) {
if f, ok := usermgr.NewFuncs[manager]; ok {
return f(m[manager])
}
return nil, fmt.Errorf("driver %s not found for user manager", manager)
}
// GetPublicShareManager returns a connection to a public share manager
func GetPublicShareManager(manager string, m map[string]map[string]interface{}) (publicshare.Manager, error) {
if f, ok := publicsharemgr.NewFuncs[manager]; ok {
return f(m[manager])
}
return nil, fmt.Errorf("driver %s not found for public shares manager", manager)
}
// timestamp is assumed to be UTC ... just human readable ...
// FIXME and ambiguous / error prone because there is no time zone ...
func timestampToExpiration(t *types.Timestamp) string {
return time.Unix(int64(t.Seconds), int64(t.Nanos)).UTC().Format("2006-01-02 15:05:05")
}
// ParseTimestamp tries to parse the ocs expiry into a CS3 Timestamp
func ParseTimestamp(timestampString string) (*types.Timestamp, error) {
parsedTime, err := time.Parse("2006-01-02T15:04:05Z0700", timestampString)
if err != nil {
parsedTime, err = time.Parse("2006-01-02", timestampString)
if err == nil {
// the link needs to be valid for the whole day
parsedTime = parsedTime.Add(23*time.Hour + 59*time.Minute + 59*time.Second)
}
}
if err != nil {
return nil, fmt.Errorf("datetime format invalid: %v, %s", timestampString, err.Error())
}
final := parsedTime.UnixNano()
return &types.Timestamp{
Seconds: uint64(final / 1000000000),
Nanos: uint32(final % 1000000000),
}, nil
}
// UserTypeString returns human readable strings for various user types
func UserTypeString(userType userpb.UserType) string {
switch userType {
case userpb.UserType_USER_TYPE_PRIMARY:
return "primary"
case userpb.UserType_USER_TYPE_SECONDARY:
return "secondary"
case userpb.UserType_USER_TYPE_SERVICE:
return "service"
case userpb.UserType_USER_TYPE_APPLICATION:
return "application"
case userpb.UserType_USER_TYPE_GUEST:
return "guest"
case userpb.UserType_USER_TYPE_FEDERATED:
return "federated"
case userpb.UserType_USER_TYPE_LIGHTWEIGHT:
return "lightweight"
}
return "invalid"
}
| sd := &ShareData{
// share.permissions are mapped below
// Displaynames are added later
UIDOwner: LocalUserIDToString(share.GetCreator()),
UIDFileOwner: LocalUserIDToString(share.GetOwner()),
}
if share.Grantee.Type == provider.GranteeType_GRANTEE_TYPE_USER {
sd.ShareType = ShareTypeUser
sd.ShareWith = LocalUserIDToString(share.Grantee.GetUserId())
shareType := share.GetGrantee().GetUserId().GetType()
if shareType == userpb.UserType_USER_TYPE_LIGHTWEIGHT || shareType == userpb.UserType_USER_TYPE_GUEST {
sd.ShareWithUserType = ShareWithUserTypeGuest
} else {
sd.ShareWithUserType = ShareWithUserTypeUser
}
} else if share.Grantee.Type == provider.GranteeType_GRANTEE_TYPE_GROUP {
sd.ShareType = ShareTypeGroup
sd.ShareWith = LocalGroupIDToString(share.Grantee.GetGroupId())
}
if share.Id != nil {
sd.ID = share.Id.OpaqueId
}
if share.GetPermissions().GetPermissions() != nil {
sd.Permissions = RoleFromResourcePermissions(share.GetPermissions().GetPermissions(), false).OCSPermissions()
}
if share.Ctime != nil {
sd.STime = share.Ctime.Seconds // TODO CS3 api birth time = btime
}
if share.Expiration != nil {
expiration := time.Unix(int64(share.Expiration.Seconds), int64(share.Expiration.Nanos))
sd.Expiration = expiration.Format(_iso8601)
}
return sd, nil
}
| identifier_body |
main.go | // Copyright 2018-2021 CERN
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// In applying this license, CERN does not waive the privileges and immunities
// granted to it by virtue of its status as an Intergovernmental Organization
// or submit itself to any jurisdiction.
// Package conversions sits between CS3 type definitions and OCS API Responses
package conversions
import (
"context"
"fmt"
"net/http"
"path"
"time"
"github.com/cs3org/reva/v2/pkg/publicshare"
"github.com/cs3org/reva/v2/pkg/user"
grouppb "github.com/cs3org/go-cs3apis/cs3/identity/group/v1beta1"
userpb "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1"
collaboration "github.com/cs3org/go-cs3apis/cs3/sharing/collaboration/v1beta1"
link "github.com/cs3org/go-cs3apis/cs3/sharing/link/v1beta1"
provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1"
types "github.com/cs3org/go-cs3apis/cs3/types/v1beta1"
publicsharemgr "github.com/cs3org/reva/v2/pkg/publicshare/manager/registry"
usermgr "github.com/cs3org/reva/v2/pkg/user/manager/registry"
)
const (
// ShareTypeUser refers to user shares
ShareTypeUser ShareType = 0
// ShareTypePublicLink refers to public link shares
ShareTypePublicLink ShareType = 3
// ShareTypeGroup represents a group share
ShareTypeGroup ShareType = 1
// ShareTypeFederatedCloudShare represents a federated share
ShareTypeFederatedCloudShare ShareType = 6
// ShareTypeSpaceMembershipUser represents an action regarding user type space members
ShareTypeSpaceMembershipUser ShareType = 7
// ShareTypeSpaceMembershipGroup represents an action regarding group type space members
ShareTypeSpaceMembershipGroup ShareType = 8
// ShareWithUserTypeUser represents a normal user
ShareWithUserTypeUser ShareWithUserType = 0
// ShareWithUserTypeGuest represents a guest user
ShareWithUserTypeGuest ShareWithUserType = 1
// The datetime format of ISO8601
_iso8601 = "2006-01-02T15:04:05Z0700"
)
// ResourceType indicates the OCS type of the resource
type ResourceType int
func (rt ResourceType) String() (s string) {
switch rt {
case 0:
s = "invalid"
case 1:
s = "file"
case 2:
s = "folder"
case 3:
s = "reference"
default:
s = "invalid"
}
return
}
// ShareType denotes a type of share
type ShareType int
// ShareWithUserType denotes a type of user
type ShareWithUserType int
// ShareData represents https://doc.owncloud.com/server/developer_manual/core/ocs-share-api.html#response-attributes-1
type ShareData struct {
// TODO int?
ID string `json:"id" xml:"id"`
// The share’s type
ShareType ShareType `json:"share_type" xml:"share_type"`
// The username of the owner of the share.
UIDOwner string `json:"uid_owner" xml:"uid_owner"`
// The display name of the owner of the share.
DisplaynameOwner string `json:"displayname_owner" xml:"displayname_owner"`
// Additional info to identify the share owner, eg. the email or username
AdditionalInfoOwner string `json:"additional_info_owner" xml:"additional_info_owner"`
// The permission attribute set on the file.
// TODO(jfd) change the default to read only
Permissions Permissions `json:"permissions" xml:"permissions"`
// The UNIX timestamp when the share was created.
STime uint64 `json:"stime" xml:"stime"`
// ?
Parent string `json:"parent" xml:"parent"`
// The UNIX timestamp when the share expires.
Expiration string `json:"expiration" xml:"expiration"`
// The public link to the item being shared.
Token string `json:"token" xml:"token"`
// The unique id of the user that owns the file or folder being shared.
UIDFileOwner string `json:"uid_file_owner" xml:"uid_file_owner"`
// The display name of the user that owns the file or folder being shared.
DisplaynameFileOwner string `json:"displayname_file_owner" xml:"displayname_file_owner"`
// Additional info to identify the file owner, eg. the email or username
AdditionalInfoFileOwner string `json:"additional_info_file_owner" xml:"additional_info_file_owner"`
// share state, 0 = accepted, 1 = pending, 2 = declined
State int `json:"state" xml:"state"`
// The path to the shared file or folder.
Path string `json:"path" xml:"path"`
// The type of the object being shared. This can be one of 'file' or 'folder'.
ItemType string `json:"item_type" xml:"item_type"`
// The RFC2045-compliant mimetype of the file.
MimeType string `json:"mimetype" xml:"mimetype"`
// The space ID of the original file location
SpaceID string `json:"space_id" xml:"space_id"`
// The space alias of the original file location
SpaceAlias string `json:"space_alias" xml:"space_alias"`
StorageID string `json:"storage_id" xml:"storage_id"`
Storage uint64 `json:"storage" xml:"storage"`
// The unique node id of the item being shared.
ItemSource string `json:"item_source" xml:"item_source"`
// The unique node id of the item being shared. For legacy reasons item_source and file_source attributes have the same value.
FileSource string `json:"file_source" xml:"file_source"`
// The unique node id of the parent node of the item being shared.
FileParent string `json:"file_parent" xml:"file_parent"`
// The basename of the shared file.
FileTarget string `json:"file_target" xml:"file_target"`
// The uid of the share recipient. This is either
// - a GID (group id) if it is being shared with a group or
// - a UID (user id) if the share is shared with a user.
// - a password for public links
ShareWith string `json:"share_with,omitempty" xml:"share_with,omitempty"`
// The type of user
// - 0 = normal user
// - 1 = guest account
ShareWithUserType ShareWithUserType `json:"share_with_user_type" xml:"share_with_user_type"`
// The display name of the share recipient
ShareWithDisplayname string `json:"share_with_displayname,omitempty" xml:"share_with_displayname,omitempty"`
// Additional info to identify the share recipient, eg. the email or username
ShareWithAdditionalInfo string `json:"share_with_additional_info" xml:"share_with_additional_info"`
// Whether the recipient was notified, by mail, about the share being shared with them.
MailSend int `json:"mail_send" xml:"mail_send"`
// Name of the public share
Name string `json:"name" xml:"name"`
// URL of the public share
URL string `json:"url,omitempty" xml:"url,omitempty"`
// Attributes associated
Attributes string `json:"attributes,omitempty" xml:"attributes,omitempty"`
// Quicklink indicates if the link is the quicklink
Quicklink bool `json:"quicklink,omitempty" xml:"quicklink,omitempty"`
// PasswordProtected represents a public share is password protected
// PasswordProtected bool `json:"password_protected,omitempty" xml:"password_protected,omitempty"`
}
// ShareeData holds share recipient search results
type ShareeData struct {
Exact *ExactMatchesData `json:"exact" xml:"exact"`
Users []*MatchData `json:"users" xml:"users>element"`
Groups []*MatchData `json:"groups" xml:"groups>element"`
Remotes []*MatchData `json:"remotes" xml:"remotes>element"`
}
// TokenInfo holds token information
type TokenInfo struct {
// for all callers
Token string `json:"token" xml:"token"`
LinkURL string `json:"link_url" xml:"link_url"`
PasswordProtected bool `json:"password_protected" xml:"password_protected"`
Aliaslink bool `json:"alias_link" xml:"alias_link"`
// if not password protected
ID string `json:"id" xml:"id"`
StorageID string `json:"storage_id" xml:"storage_id"`
SpaceID string `json:"space_id" xml:"space_id"`
OpaqueID string `json:"opaque_id" xml:"opaque_id"`
Path string `json:"path" xml:"path"`
// if native access
SpacePath string `json:"space_path" xml:"space_path"`
SpaceAlias string `json:"space_alias" xml:"space_alias"`
SpaceURL string `json:"space_url" xml:"space_url"`
SpaceType string `json:"space_type" xml:"space_type"`
}
// ExactMatchesData hold exact matches
type ExactMatchesData struct {
Users []*MatchData `json:"users" xml:"users>element"`
Groups []*MatchData `json:"groups" xml:"groups>element"`
Remotes []*MatchData `json:"remotes" xml:"remotes>element"`
}
// MatchData describes a single match
type MatchData struct {
Label string `json:"label" xml:"label,omitempty"`
Value *MatchValueData `json:"value" xml:"value"`
}
// MatchValueData holds the type and actual value
type MatchValueData struct {
ShareType int `json:"shareType" xml:"shareType"`
ShareWith string `json:"shareWith" xml:"shareWith"`
ShareWithAdditionalInfo string `json:"shareWithAdditionalInfo" xml:"shareWithAdditionalInfo,omitempty"`
UserType int `json:"userType" xml:"userType"`
}
// CS3Share2ShareData converts a cs3api user share into shareData data model
func CS3Share2ShareData(ctx context.Context, share *collaboration.Share) (*ShareData, error) {
sd := &ShareData{
// share.permissions are mapped below
// Displaynames are added later
UIDOwner: LocalUserIDToString(share.GetCreator()),
UIDFileOwner: LocalUserIDToString(share.GetOwner()),
}
if share.Grantee.Type == provider.GranteeType_GRANTEE_TYPE_USER {
sd.ShareType = ShareTypeUser
sd.ShareWith = LocalUserIDToString(share.Grantee.GetUserId())
shareType := share.GetGrantee().GetUserId().GetType()
if shareType == userpb.UserType_USER_TYPE_LIGHTWEIGHT || shareType == userpb.UserType_USER_TYPE_GUEST {
sd.ShareWithUserType = ShareWithUserTypeGuest
} else {
sd.ShareWithUserType = ShareWithUserTypeUser
}
} else if share.Grantee.Type == provider.GranteeType_GRANTEE_TYPE_GROUP {
sd.ShareType = ShareTypeGroup
sd.ShareWith = LocalGroupIDToString(share.Grantee.GetGroupId())
}
if share.Id != nil {
sd.ID = share.Id.OpaqueId
}
if share.GetPermissions().GetPermissions() != nil {
sd.Permissions = RoleFromResourcePermissions(share.GetPermissions().GetPermissions(), false).OCSPermissions()
}
if share.Ctime != nil {
sd.STime = share.Ctime.Seconds // TODO CS3 api birth time = btime
}
if share.Expiration != nil {
expiration := time.Unix(int64(share.Expiration.Seconds), int64(share.Expiration.Nanos))
sd.Expiration = expiration.Format(_iso8601)
}
return sd, nil
}
// PublicShare2ShareData converts a cs3api public share into shareData data model
func PublicShare2ShareData(share *link.PublicShare, r *http.Request, publicURL string) *ShareData {
sd := &ShareData{
// share.permissions are mapped below
// Displaynames are added later
ShareType: ShareTypePublicLink,
Token: share.Token,
Name: share.DisplayName,
MailSend: 0,
URL: publicURL + path.Join("/", "s/"+share.Token),
UIDOwner: LocalUserIDToString(share.Creator),
UIDFileOwner: LocalUserIDToString(share.Owner),
Quicklink: share.Quicklink,
}
if share.Id != nil {
sd.ID = share.Id.OpaqueId
}
if s := share.GetPermissions().GetPermissions(); s != nil {
sd.Permissions = RoleFromResourcePermissions(share.GetPermissions().GetPermissions(), true).OCSPermissions()
}
if share.Expiration != nil {
sd.Expiration = timestampToExpiration(share.Expiration)
}
if share.Ctime != nil {
sd.STime = share.Ctime.Seconds // TODO CS3 api birth time = btime
}
// hide password
if share.PasswordProtected {
sd.ShareWith = "***redacted***"
sd.ShareWithDisplayname = "***redacted***"
}
return sd
}
// LocalUserIDToString transforms a cs3api user id into an ocs data model without domain name
// TODO ocs uses user names ... so an additional lookup is needed. see mapUserIds()
func LocalUserIDToString(userID *userpb.UserId) string {
if userID == nil || userID.OpaqueId == "" {
return ""
}
return userID.OpaqueId
}
// LocalGroupIDToString transforms a cs3api group id into an ocs data model without domain name
func LocalGroupIDToString(groupID *grouppb.GroupId) string {
if groupID == nil || groupID.OpaqueId == "" {
return ""
}
return groupID.OpaqueId
}
// GetUserManager returns a connection to a user share manager
func Ge | anager string, m map[string]map[string]interface{}) (user.Manager, error) {
if f, ok := usermgr.NewFuncs[manager]; ok {
return f(m[manager])
}
return nil, fmt.Errorf("driver %s not found for user manager", manager)
}
// GetPublicShareManager returns a connection to a public share manager
func GetPublicShareManager(manager string, m map[string]map[string]interface{}) (publicshare.Manager, error) {
if f, ok := publicsharemgr.NewFuncs[manager]; ok {
return f(m[manager])
}
return nil, fmt.Errorf("driver %s not found for public shares manager", manager)
}
// timestamp is assumed to be UTC ... just human readable ...
// FIXME and ambiguous / error prone because there is no time zone ...
func timestampToExpiration(t *types.Timestamp) string {
return time.Unix(int64(t.Seconds), int64(t.Nanos)).UTC().Format("2006-01-02 15:05:05")
}
// ParseTimestamp tries to parse the ocs expiry into a CS3 Timestamp
func ParseTimestamp(timestampString string) (*types.Timestamp, error) {
parsedTime, err := time.Parse("2006-01-02T15:04:05Z0700", timestampString)
if err != nil {
parsedTime, err = time.Parse("2006-01-02", timestampString)
if err == nil {
// the link needs to be valid for the whole day
parsedTime = parsedTime.Add(23*time.Hour + 59*time.Minute + 59*time.Second)
}
}
if err != nil {
return nil, fmt.Errorf("datetime format invalid: %v, %s", timestampString, err.Error())
}
final := parsedTime.UnixNano()
return &types.Timestamp{
Seconds: uint64(final / 1000000000),
Nanos: uint32(final % 1000000000),
}, nil
}
// UserTypeString returns human readable strings for various user types
func UserTypeString(userType userpb.UserType) string {
switch userType {
case userpb.UserType_USER_TYPE_PRIMARY:
return "primary"
case userpb.UserType_USER_TYPE_SECONDARY:
return "secondary"
case userpb.UserType_USER_TYPE_SERVICE:
return "service"
case userpb.UserType_USER_TYPE_APPLICATION:
return "application"
case userpb.UserType_USER_TYPE_GUEST:
return "guest"
case userpb.UserType_USER_TYPE_FEDERATED:
return "federated"
case userpb.UserType_USER_TYPE_LIGHTWEIGHT:
return "lightweight"
}
return "invalid"
}
| tUserManager(m | identifier_name |
main.go | // Copyright 2018-2021 CERN
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// In applying this license, CERN does not waive the privileges and immunities
// granted to it by virtue of its status as an Intergovernmental Organization
// or submit itself to any jurisdiction.
// Package conversions sits between CS3 type definitions and OCS API Responses
package conversions
import (
"context"
"fmt"
"net/http"
"path"
"time"
"github.com/cs3org/reva/v2/pkg/publicshare"
"github.com/cs3org/reva/v2/pkg/user"
grouppb "github.com/cs3org/go-cs3apis/cs3/identity/group/v1beta1"
userpb "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1"
collaboration "github.com/cs3org/go-cs3apis/cs3/sharing/collaboration/v1beta1"
link "github.com/cs3org/go-cs3apis/cs3/sharing/link/v1beta1"
provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1"
types "github.com/cs3org/go-cs3apis/cs3/types/v1beta1"
publicsharemgr "github.com/cs3org/reva/v2/pkg/publicshare/manager/registry"
usermgr "github.com/cs3org/reva/v2/pkg/user/manager/registry"
)
const (
// ShareTypeUser refers to user shares
ShareTypeUser ShareType = 0
// ShareTypePublicLink refers to public link shares
ShareTypePublicLink ShareType = 3
// ShareTypeGroup represents a group share
ShareTypeGroup ShareType = 1
// ShareTypeFederatedCloudShare represents a federated share
ShareTypeFederatedCloudShare ShareType = 6
// ShareTypeSpaceMembershipUser represents an action regarding user type space members
ShareTypeSpaceMembershipUser ShareType = 7
// ShareTypeSpaceMembershipGroup represents an action regarding group type space members
ShareTypeSpaceMembershipGroup ShareType = 8
// ShareWithUserTypeUser represents a normal user
ShareWithUserTypeUser ShareWithUserType = 0
// ShareWithUserTypeGuest represents a guest user
ShareWithUserTypeGuest ShareWithUserType = 1
// The datetime format of ISO8601
_iso8601 = "2006-01-02T15:04:05Z0700"
)
// ResourceType indicates the OCS type of the resource
type ResourceType int
func (rt ResourceType) String() (s string) {
switch rt {
case 0:
s = "invalid"
case 1:
s = "file"
case 2:
s = "folder"
case 3:
s = "reference"
default:
s = "invalid"
}
return
}
// ShareType denotes a type of share
type ShareType int
// ShareWithUserType denotes a type of user
type ShareWithUserType int
// ShareData represents https://doc.owncloud.com/server/developer_manual/core/ocs-share-api.html#response-attributes-1
type ShareData struct {
// TODO int?
ID string `json:"id" xml:"id"`
// The share’s type
ShareType ShareType `json:"share_type" xml:"share_type"`
// The username of the owner of the share.
UIDOwner string `json:"uid_owner" xml:"uid_owner"`
// The display name of the owner of the share.
DisplaynameOwner string `json:"displayname_owner" xml:"displayname_owner"`
// Additional info to identify the share owner, eg. the email or username
AdditionalInfoOwner string `json:"additional_info_owner" xml:"additional_info_owner"`
// The permission attribute set on the file.
// TODO(jfd) change the default to read only
Permissions Permissions `json:"permissions" xml:"permissions"`
// The UNIX timestamp when the share was created.
STime uint64 `json:"stime" xml:"stime"`
// ?
Parent string `json:"parent" xml:"parent"`
// The UNIX timestamp when the share expires.
Expiration string `json:"expiration" xml:"expiration"`
// The public link to the item being shared.
Token string `json:"token" xml:"token"`
// The unique id of the user that owns the file or folder being shared.
UIDFileOwner string `json:"uid_file_owner" xml:"uid_file_owner"`
// The display name of the user that owns the file or folder being shared.
DisplaynameFileOwner string `json:"displayname_file_owner" xml:"displayname_file_owner"`
// Additional info to identify the file owner, eg. the email or username
AdditionalInfoFileOwner string `json:"additional_info_file_owner" xml:"additional_info_file_owner"`
// share state, 0 = accepted, 1 = pending, 2 = declined
State int `json:"state" xml:"state"`
// The path to the shared file or folder.
Path string `json:"path" xml:"path"`
// The type of the object being shared. This can be one of 'file' or 'folder'.
ItemType string `json:"item_type" xml:"item_type"`
// The RFC2045-compliant mimetype of the file.
MimeType string `json:"mimetype" xml:"mimetype"`
// The space ID of the original file location
SpaceID string `json:"space_id" xml:"space_id"`
// The space alias of the original file location
SpaceAlias string `json:"space_alias" xml:"space_alias"`
StorageID string `json:"storage_id" xml:"storage_id"`
Storage uint64 `json:"storage" xml:"storage"`
// The unique node id of the item being shared.
ItemSource string `json:"item_source" xml:"item_source"`
// The unique node id of the item being shared. For legacy reasons item_source and file_source attributes have the same value.
FileSource string `json:"file_source" xml:"file_source"`
// The unique node id of the parent node of the item being shared.
FileParent string `json:"file_parent" xml:"file_parent"`
// The basename of the shared file.
FileTarget string `json:"file_target" xml:"file_target"`
// The uid of the share recipient. This is either
// - a GID (group id) if it is being shared with a group or
// - a UID (user id) if the share is shared with a user.
// - a password for public links
ShareWith string `json:"share_with,omitempty" xml:"share_with,omitempty"`
// The type of user
// - 0 = normal user
// - 1 = guest account
ShareWithUserType ShareWithUserType `json:"share_with_user_type" xml:"share_with_user_type"`
// The display name of the share recipient
ShareWithDisplayname string `json:"share_with_displayname,omitempty" xml:"share_with_displayname,omitempty"`
// Additional info to identify the share recipient, eg. the email or username
ShareWithAdditionalInfo string `json:"share_with_additional_info" xml:"share_with_additional_info"`
// Whether the recipient was notified, by mail, about the share being shared with them.
MailSend int `json:"mail_send" xml:"mail_send"`
// Name of the public share
Name string `json:"name" xml:"name"`
// URL of the public share
URL string `json:"url,omitempty" xml:"url,omitempty"`
// Attributes associated
Attributes string `json:"attributes,omitempty" xml:"attributes,omitempty"`
// Quicklink indicates if the link is the quicklink
Quicklink bool `json:"quicklink,omitempty" xml:"quicklink,omitempty"`
// PasswordProtected represents a public share is password protected
// PasswordProtected bool `json:"password_protected,omitempty" xml:"password_protected,omitempty"`
}
// ShareeData holds share recipient search results
type ShareeData struct {
Exact *ExactMatchesData `json:"exact" xml:"exact"`
Users []*MatchData `json:"users" xml:"users>element"`
Groups []*MatchData `json:"groups" xml:"groups>element"`
Remotes []*MatchData `json:"remotes" xml:"remotes>element"`
}
// TokenInfo holds token information
type TokenInfo struct {
// for all callers
Token string `json:"token" xml:"token"`
LinkURL string `json:"link_url" xml:"link_url"`
PasswordProtected bool `json:"password_protected" xml:"password_protected"`
Aliaslink bool `json:"alias_link" xml:"alias_link"`
// if not password protected
ID string `json:"id" xml:"id"`
StorageID string `json:"storage_id" xml:"storage_id"`
SpaceID string `json:"space_id" xml:"space_id"`
OpaqueID string `json:"opaque_id" xml:"opaque_id"`
Path string `json:"path" xml:"path"`
// if native access
SpacePath string `json:"space_path" xml:"space_path"`
SpaceAlias string `json:"space_alias" xml:"space_alias"`
SpaceURL string `json:"space_url" xml:"space_url"`
SpaceType string `json:"space_type" xml:"space_type"`
}
// ExactMatchesData hold exact matches
type ExactMatchesData struct {
Users []*MatchData `json:"users" xml:"users>element"`
Groups []*MatchData `json:"groups" xml:"groups>element"`
Remotes []*MatchData `json:"remotes" xml:"remotes>element"`
}
// MatchData describes a single match
type MatchData struct {
Label string `json:"label" xml:"label,omitempty"`
Value *MatchValueData `json:"value" xml:"value"`
}
// MatchValueData holds the type and actual value
type MatchValueData struct {
ShareType int `json:"shareType" xml:"shareType"`
ShareWith string `json:"shareWith" xml:"shareWith"`
ShareWithAdditionalInfo string `json:"shareWithAdditionalInfo" xml:"shareWithAdditionalInfo,omitempty"`
UserType int `json:"userType" xml:"userType"`
}
// CS3Share2ShareData converts a cs3api user share into shareData data model
func CS3Share2ShareData(ctx context.Context, share *collaboration.Share) (*ShareData, error) {
sd := &ShareData{
// share.permissions are mapped below
// Displaynames are added later
UIDOwner: LocalUserIDToString(share.GetCreator()),
UIDFileOwner: LocalUserIDToString(share.GetOwner()),
}
if share.Grantee.Type == provider.GranteeType_GRANTEE_TYPE_USER {
sd.ShareType = ShareTypeUser
sd.ShareWith = LocalUserIDToString(share.Grantee.GetUserId())
shareType := share.GetGrantee().GetUserId().GetType()
if shareType == userpb.UserType_USER_TYPE_LIGHTWEIGHT || shareType == userpb.UserType_USER_TYPE_GUEST {
sd.ShareWithUserType = ShareWithUserTypeGuest
} else {
sd.ShareWithUserType = ShareWithUserTypeUser
}
} else if share.Grantee.Type == provider.GranteeType_GRANTEE_TYPE_GROUP {
sd.ShareType = ShareTypeGroup
sd.ShareWith = LocalGroupIDToString(share.Grantee.GetGroupId())
}
if share.Id != nil {
sd.ID = share.Id.OpaqueId
}
if share.GetPermissions().GetPermissions() != nil {
sd.Permissions = RoleFromResourcePermissions(share.GetPermissions().GetPermissions(), false).OCSPermissions()
}
if share.Ctime != nil {
sd.STime = share.Ctime.Seconds // TODO CS3 api birth time = btime
}
if share.Expiration != nil {
expiration := time.Unix(int64(share.Expiration.Seconds), int64(share.Expiration.Nanos))
sd.Expiration = expiration.Format(_iso8601)
}
return sd, nil
}
// PublicShare2ShareData converts a cs3api public share into shareData data model
func PublicShare2ShareData(share *link.PublicShare, r *http.Request, publicURL string) *ShareData {
sd := &ShareData{
// share.permissions are mapped below
// Displaynames are added later
ShareType: ShareTypePublicLink,
Token: share.Token,
Name: share.DisplayName,
MailSend: 0,
URL: publicURL + path.Join("/", "s/"+share.Token),
UIDOwner: LocalUserIDToString(share.Creator),
UIDFileOwner: LocalUserIDToString(share.Owner),
Quicklink: share.Quicklink,
}
if share.Id != nil {
sd.ID = share.Id.OpaqueId
}
if s := share.GetPermissions().GetPermissions(); s != nil {
sd.Permissions = RoleFromResourcePermissions(share.GetPermissions().GetPermissions(), true).OCSPermissions()
}
if share.Expiration != nil {
sd.Expiration = timestampToExpiration(share.Expiration)
}
if share.Ctime != nil {
sd.STime = share.Ctime.Seconds // TODO CS3 api birth time = btime
}
// hide password
if share.PasswordProtected {
| return sd
}
// LocalUserIDToString transforms a cs3api user id into an ocs data model without domain name
// TODO ocs uses user names ... so an additional lookup is needed. see mapUserIds()
func LocalUserIDToString(userID *userpb.UserId) string {
if userID == nil || userID.OpaqueId == "" {
return ""
}
return userID.OpaqueId
}
// LocalGroupIDToString transforms a cs3api group id into an ocs data model without domain name
func LocalGroupIDToString(groupID *grouppb.GroupId) string {
if groupID == nil || groupID.OpaqueId == "" {
return ""
}
return groupID.OpaqueId
}
// GetUserManager returns a connection to a user share manager
func GetUserManager(manager string, m map[string]map[string]interface{}) (user.Manager, error) {
if f, ok := usermgr.NewFuncs[manager]; ok {
return f(m[manager])
}
return nil, fmt.Errorf("driver %s not found for user manager", manager)
}
// GetPublicShareManager returns a connection to a public share manager
func GetPublicShareManager(manager string, m map[string]map[string]interface{}) (publicshare.Manager, error) {
if f, ok := publicsharemgr.NewFuncs[manager]; ok {
return f(m[manager])
}
return nil, fmt.Errorf("driver %s not found for public shares manager", manager)
}
// timestamp is assumed to be UTC ... just human readable ...
// FIXME and ambiguous / error prone because there is no time zone ...
func timestampToExpiration(t *types.Timestamp) string {
return time.Unix(int64(t.Seconds), int64(t.Nanos)).UTC().Format("2006-01-02 15:05:05")
}
// ParseTimestamp tries to parse the ocs expiry into a CS3 Timestamp
func ParseTimestamp(timestampString string) (*types.Timestamp, error) {
parsedTime, err := time.Parse("2006-01-02T15:04:05Z0700", timestampString)
if err != nil {
parsedTime, err = time.Parse("2006-01-02", timestampString)
if err == nil {
// the link needs to be valid for the whole day
parsedTime = parsedTime.Add(23*time.Hour + 59*time.Minute + 59*time.Second)
}
}
if err != nil {
return nil, fmt.Errorf("datetime format invalid: %v, %s", timestampString, err.Error())
}
final := parsedTime.UnixNano()
return &types.Timestamp{
Seconds: uint64(final / 1000000000),
Nanos: uint32(final % 1000000000),
}, nil
}
// UserTypeString returns human readable strings for various user types
func UserTypeString(userType userpb.UserType) string {
switch userType {
case userpb.UserType_USER_TYPE_PRIMARY:
return "primary"
case userpb.UserType_USER_TYPE_SECONDARY:
return "secondary"
case userpb.UserType_USER_TYPE_SERVICE:
return "service"
case userpb.UserType_USER_TYPE_APPLICATION:
return "application"
case userpb.UserType_USER_TYPE_GUEST:
return "guest"
case userpb.UserType_USER_TYPE_FEDERATED:
return "federated"
case userpb.UserType_USER_TYPE_LIGHTWEIGHT:
return "lightweight"
}
return "invalid"
}
| sd.ShareWith = "***redacted***"
sd.ShareWithDisplayname = "***redacted***"
}
| conditional_block |
main.rs | extern crate ginseng;
use ginseng::guest::Guest;
use ginseng::guest::Range;
use std::cmp::min;
use std::cmp::Ordering;
use std:collections::HashMap;
use std::vec::Vec;
fn social_welfare(proposed_allocation: &Vec<(&Guest, u64)>) -> u64
{
let mut total_welfare: u64 = 0;
for (guest, allocation) in proposed_allocation
{
total_welfare += guest.mem_unit_price * allocation;
}
total_welfare
}
// assumes that `guest_list` has already been sorted
// just doles out memory in sorted order such that we
// don't give anyone any more memory than they asked for
// it's still possible that we give people too little memory
// which should be checked after getting the result of this
// function
// I think what I really want to return from this is a vector of allocation amounts
// it'll even take up less space than (reference, allocation) pairs and will be much
// less problematic
fn naive_allocation
(
guest_list: &Vec<&Guest>,
available_memory: u64
)
-> Vec<(u64)>
{
let mut remaining_memory: u64 = available_memory;
guest_list.iter().map
(
|guest|
{
// if there's no memory left to hand out our job is simple
if remaining_memory == 0
{
0
}
// otherwise get the maximum amount memory this guest
// wants to pay for and give them that
else
{
// if the last forbidden range goes to infinity we want
//to use the minimum of that forbidden range
let upper_bound =
guest
.forbidden_ranges
.last()
.filter(|range| {range.max == u64::max_value()})
.map(|range| {range.min})
.unwrap_or(u64::max_value());
let mem_to_alloc = min(remaining_memory, upper_bound);
remaining_memory -= mem_to_alloc;
mem_to_alloc
}
}
).collect()
}
// this guy already returns indices instead of references
fn | (proposed_allocations: &Vec<(&Guest, u64)>) -> Vec<(usize, Range)>
{
let mut violations: Vec<(usize, Range)> = Vec::new();
let mut index: usize = 0;
for &(guest, amount) in proposed_allocations.iter()
{
// we want to get the guest's forbidden range with the greatest
// min value that is less than amount. This would probably best
// be done with a binary search, but for now iterative is fine
for range in guest.forbidden_ranges.clone()
{
if range.min < amount && range.max > amount
{
violations.push((index, range));
}
}
index = index + 1;
}
violations
}
fn public_auction_function
(
guest_list: &Vec<&Guest>,
available_memory: u64
)
-> Vec<u64>
{
auction_with_pinned_allocations(guest_list, available_memory, Vec::new())
}
// returns the list of allocations of the provided memory to the list of
// provided guests which results in the maximal social welfare possible without
// changing the provided pinned allocations
fn auction_with_pinned_allocations
(
guest_list: &Vec<&Guest>,
available_memory: u64,
pinned_allocations: &HashMap<u64, u64> //list of (index, allocation) pairs
)
-> Vec<u64>
{
// so I think the idea is we filter the pinned allocations out in an
// enumerate into a filter into a map that discards the index into a
// collect
// then we put them back into the result of the recursive case with an
// enumerate into a flatmap that returns element +
// contiguous-succesive-elements. So we just need a mutable `index_correction`
// variable that gets incremented every time we insert an element so we
// can keep track of each elements place in the new list so we can map positions in
// the enumerate to positions in the original list
let my_copy =
guest_list
.iter()
.enumerate()
.filter(|(index, &guest)| !pinned_allocations.contains_key(index))
.map(|(index, &guest)| guest.clone())
.collect()
// let mut my_copy = guest_list.clone();
let invalid = invalid_allocations(my_copy.iter().zip(naive_allocation(my_copy, available_memory))
}
// Given the applicable range for an invalid allocation and the amount of available_memory
// this function returns the one to two allocation options available to replace the
// invalid allocation
fn two_options(applicable_range: Range, available_memory: u64) -> Vec<u64>
{
match applicable_range.max.cmp(&available_memory)
{
Ordering::Less | Ordering::Equal =>
vec![applicable_range.min, applicable_range.max],
Ordering::Greater => vec![applicable_range.min],
}
}
//I think I want an immutable list slice that I jut progressively slice more off of
// or present different iterators of maybe?
// the payment rule should be implemented in a different function
// so that we can use this function recursively
// I think what we should really return from this is a list of (index, allocation) pairs
// and take as input a list of inidices of invalid allocations (we can have a helper function
//without that argument that passes an empty list into this function)
// fn auction<'a>(guest_list: &'a mut Vec<&'a Guest>, available_memory: u64) -> Vec<(&'a Guest, u64)>
// {
// if guest_list.is_empty()
// {
// return Vec::new();
// }
//
// // so first we try just try giving as much as
// // possible of the remaining_memory
// // to each guest
// let applicable_range: Range;
// let invalid_guest_number;
// {
// //then we first attempt a naive allocation based solely on the sorting
// //and any upper bounds the guests may have set
// // naive_allocation should maintain the same ordering of geusts as guest_list
// // so indices of proposed_allocation can be used to index into guest_list
// let proposed_allocations = naive_allocation(guest_list, available_memory);
//
// let mut invalid = invalid_allocations(&proposed_allocations);
//
// // yup, commenting out this early return got rid of two of the 3 compile errors
// // and the third is pretty obviously a good thing to error out on because it let me know
// //I was adding two guests for case two instead of just one
// // if invalid.is_empty()
// // {
// // return proposed_allocations;
// // }
//
// // so with this first attempt we want to first check and see if we're
// // assigning someone an amount of memory in one of their forbidden ranges
// // and for each case in which someone was allocated an invalid amount, we
// // need to try two cases.
// // so we just need to try removing the first invalid allocation, which means
// // we can just mutate the guest_list instead of cloning every time
// let (_invalid_guest_number, _applicable_range) = invalid.remove(0);
// invalid_guest_number = _invalid_guest_number;
// applicable_range = _applicable_range;
//
// }
// //so we remove the first invalid allcoation
// let badly_served_guest = guest_list.remove(invalid_guest_number);
//
// // and then we try the two cases with that guest
//
// // So I think the idea is that we try the minimum and maximum of the
// // forbidden range that the invalid value fell into
//
// //case one is no more than the minimum of the forbidden range
// let allocation_amount_one = applicable_range.min;
//
// let mut case_one_proposal = auction(guest_list, available_memory - allocation_amount_one);
//
// case_one_proposal.push((badly_served_guest, allocation_amount_one));
//
// let case_one_welfare = social_welfare(&case_one_proposal);
//
// //case two is at least as much as the maximum of the forbidden range
// let allocation_amount_two = applicable_range.max;
//
// let (case_two_welfare, case_two_proposal) =
// if allocation_amount_two <= available_memory
// {
// let mut inner_case_two_proposal =
// auction(guest_list, available_memory - allocation_amount_two);
//
// inner_case_two_proposal.push((badly_served_guest, allocation_amount_two));
//
// (social_welfare(&inner_case_two_proposal), inner_case_two_proposal)
// }
// else
// {
// (0, Vec::new())
// };
//
//
//
// //return the one with greater welfare, or if equal, the one that allocates less memory
// match case_one_welfare.cmp(&case_two_welfare)
// {
// Ordering::Less => case_two_proposal,
//
// Ordering::Greater => case_one_proposal,
//
// Ordering::Equal => case_one_proposal,
// }
// }
// fn registerGuest(baseMemory: i64)
// {
//
// }
// fn makeBid(mem_unit_price: f64, guest: Guest)
// {
//
// }
fn main()
{
let guest1 =
Guest
{
mem_unit_price: 2,
current_holdings: 1,
forbidden_ranges:
vec!
[
Range{min: 0, max: 3},
Range{min: 4, max: u64::max_value()}
],
base_memory: 10
};
let guest2 =
Guest
{
mem_unit_price: 1,
current_holdings: 1,
forbidden_ranges:
vec!
[
Range{min: 0, max: 3},
Range{min: 5, max: u64::max_value()}
],
base_memory: 10
};
let mut guest_list = vec![&guest1, &guest2];
guest_list.sort_unstable();
{
for guest in &guest_list
{
println!("{:?}", guest);
}
}
{
let naive = naive_allocation(&guest_list, 6);
println!("The naive allocation is: ", );
{
for (ref guest, allocated) in &naive
{
println!("{:?} gets {:?}", guest, allocated);
}
}
println!("it has a social welfare of {:?}", social_welfare(&naive));
}
let final_allocation = auction(&mut guest_list, 6);
println!("The final allocation is: ", );
{
for (ref guest, allocated) in &final_allocation
{
println!("{:?} gets {:?}", guest, allocated);
}
}
println!("it has a social welfare of {:?}", social_welfare(&final_allocation));
println!("Hello, world!");
}
| invalid_allocations | identifier_name |
main.rs | extern crate ginseng;
use ginseng::guest::Guest;
use ginseng::guest::Range;
use std::cmp::min;
use std::cmp::Ordering;
use std:collections::HashMap;
use std::vec::Vec;
fn social_welfare(proposed_allocation: &Vec<(&Guest, u64)>) -> u64
{
let mut total_welfare: u64 = 0;
for (guest, allocation) in proposed_allocation
{
total_welfare += guest.mem_unit_price * allocation;
}
total_welfare
}
// assumes that `guest_list` has already been sorted
// just doles out memory in sorted order such that we
// don't give anyone any more memory than they asked for
// it's still possible that we give people too little memory
// which should be checked after getting the result of this
// function
// I think what I really want to return from this is a vector of allocation amounts
// it'll even take up less space than (reference, allocation) pairs and will be much
// less problematic
fn naive_allocation
(
guest_list: &Vec<&Guest>,
available_memory: u64
)
-> Vec<(u64)>
|
// this guy already returns indices instead of references
fn invalid_allocations(proposed_allocations: &Vec<(&Guest, u64)>) -> Vec<(usize, Range)>
{
let mut violations: Vec<(usize, Range)> = Vec::new();
let mut index: usize = 0;
for &(guest, amount) in proposed_allocations.iter()
{
// we want to get the guest's forbidden range with the greatest
// min value that is less than amount. This would probably best
// be done with a binary search, but for now iterative is fine
for range in guest.forbidden_ranges.clone()
{
if range.min < amount && range.max > amount
{
violations.push((index, range));
}
}
index = index + 1;
}
violations
}
fn public_auction_function
(
guest_list: &Vec<&Guest>,
available_memory: u64
)
-> Vec<u64>
{
auction_with_pinned_allocations(guest_list, available_memory, Vec::new())
}
// returns the list of allocations of the provided memory to the list of
// provided guests which results in the maximal social welfare possible without
// changing the provided pinned allocations
fn auction_with_pinned_allocations
(
guest_list: &Vec<&Guest>,
available_memory: u64,
pinned_allocations: &HashMap<u64, u64> //list of (index, allocation) pairs
)
-> Vec<u64>
{
// so I think the idea is we filter the pinned allocations out in an
// enumerate into a filter into a map that discards the index into a
// collect
// then we put them back into the result of the recursive case with an
// enumerate into a flatmap that returns element +
// contiguous-succesive-elements. So we just need a mutable `index_correction`
// variable that gets incremented every time we insert an element so we
// can keep track of each elements place in the new list so we can map positions in
// the enumerate to positions in the original list
let my_copy =
guest_list
.iter()
.enumerate()
.filter(|(index, &guest)| !pinned_allocations.contains_key(index))
.map(|(index, &guest)| guest.clone())
.collect()
// let mut my_copy = guest_list.clone();
let invalid = invalid_allocations(my_copy.iter().zip(naive_allocation(my_copy, available_memory))
}
// Given the applicable range for an invalid allocation and the amount of available_memory
// this function returns the one to two allocation options available to replace the
// invalid allocation
fn two_options(applicable_range: Range, available_memory: u64) -> Vec<u64>
{
match applicable_range.max.cmp(&available_memory)
{
Ordering::Less | Ordering::Equal =>
vec![applicable_range.min, applicable_range.max],
Ordering::Greater => vec![applicable_range.min],
}
}
//I think I want an immutable list slice that I jut progressively slice more off of
// or present different iterators of maybe?
// the payment rule should be implemented in a different function
// so that we can use this function recursively
// I think what we should really return from this is a list of (index, allocation) pairs
// and take as input a list of inidices of invalid allocations (we can have a helper function
//without that argument that passes an empty list into this function)
// fn auction<'a>(guest_list: &'a mut Vec<&'a Guest>, available_memory: u64) -> Vec<(&'a Guest, u64)>
// {
// if guest_list.is_empty()
// {
// return Vec::new();
// }
//
// // so first we try just try giving as much as
// // possible of the remaining_memory
// // to each guest
// let applicable_range: Range;
// let invalid_guest_number;
// {
// //then we first attempt a naive allocation based solely on the sorting
// //and any upper bounds the guests may have set
// // naive_allocation should maintain the same ordering of geusts as guest_list
// // so indices of proposed_allocation can be used to index into guest_list
// let proposed_allocations = naive_allocation(guest_list, available_memory);
//
// let mut invalid = invalid_allocations(&proposed_allocations);
//
// // yup, commenting out this early return got rid of two of the 3 compile errors
// // and the third is pretty obviously a good thing to error out on because it let me know
// //I was adding two guests for case two instead of just one
// // if invalid.is_empty()
// // {
// // return proposed_allocations;
// // }
//
// // so with this first attempt we want to first check and see if we're
// // assigning someone an amount of memory in one of their forbidden ranges
// // and for each case in which someone was allocated an invalid amount, we
// // need to try two cases.
// // so we just need to try removing the first invalid allocation, which means
// // we can just mutate the guest_list instead of cloning every time
// let (_invalid_guest_number, _applicable_range) = invalid.remove(0);
// invalid_guest_number = _invalid_guest_number;
// applicable_range = _applicable_range;
//
// }
// //so we remove the first invalid allcoation
// let badly_served_guest = guest_list.remove(invalid_guest_number);
//
// // and then we try the two cases with that guest
//
// // So I think the idea is that we try the minimum and maximum of the
// // forbidden range that the invalid value fell into
//
// //case one is no more than the minimum of the forbidden range
// let allocation_amount_one = applicable_range.min;
//
// let mut case_one_proposal = auction(guest_list, available_memory - allocation_amount_one);
//
// case_one_proposal.push((badly_served_guest, allocation_amount_one));
//
// let case_one_welfare = social_welfare(&case_one_proposal);
//
// //case two is at least as much as the maximum of the forbidden range
// let allocation_amount_two = applicable_range.max;
//
// let (case_two_welfare, case_two_proposal) =
// if allocation_amount_two <= available_memory
// {
// let mut inner_case_two_proposal =
// auction(guest_list, available_memory - allocation_amount_two);
//
// inner_case_two_proposal.push((badly_served_guest, allocation_amount_two));
//
// (social_welfare(&inner_case_two_proposal), inner_case_two_proposal)
// }
// else
// {
// (0, Vec::new())
// };
//
//
//
// //return the one with greater welfare, or if equal, the one that allocates less memory
// match case_one_welfare.cmp(&case_two_welfare)
// {
// Ordering::Less => case_two_proposal,
//
// Ordering::Greater => case_one_proposal,
//
// Ordering::Equal => case_one_proposal,
// }
// }
// fn registerGuest(baseMemory: i64)
// {
//
// }
// fn makeBid(mem_unit_price: f64, guest: Guest)
// {
//
// }
fn main()
{
let guest1 =
Guest
{
mem_unit_price: 2,
current_holdings: 1,
forbidden_ranges:
vec!
[
Range{min: 0, max: 3},
Range{min: 4, max: u64::max_value()}
],
base_memory: 10
};
let guest2 =
Guest
{
mem_unit_price: 1,
current_holdings: 1,
forbidden_ranges:
vec!
[
Range{min: 0, max: 3},
Range{min: 5, max: u64::max_value()}
],
base_memory: 10
};
let mut guest_list = vec![&guest1, &guest2];
guest_list.sort_unstable();
{
for guest in &guest_list
{
println!("{:?}", guest);
}
}
{
let naive = naive_allocation(&guest_list, 6);
println!("The naive allocation is: ", );
{
for (ref guest, allocated) in &naive
{
println!("{:?} gets {:?}", guest, allocated);
}
}
println!("it has a social welfare of {:?}", social_welfare(&naive));
}
let final_allocation = auction(&mut guest_list, 6);
println!("The final allocation is: ", );
{
for (ref guest, allocated) in &final_allocation
{
println!("{:?} gets {:?}", guest, allocated);
}
}
println!("it has a social welfare of {:?}", social_welfare(&final_allocation));
println!("Hello, world!");
}
| {
let mut remaining_memory: u64 = available_memory;
guest_list.iter().map
(
|guest|
{
// if there's no memory left to hand out our job is simple
if remaining_memory == 0
{
0
}
// otherwise get the maximum amount memory this guest
// wants to pay for and give them that
else
{
// if the last forbidden range goes to infinity we want
//to use the minimum of that forbidden range
let upper_bound =
guest
.forbidden_ranges
.last()
.filter(|range| {range.max == u64::max_value()})
.map(|range| {range.min})
.unwrap_or(u64::max_value());
let mem_to_alloc = min(remaining_memory, upper_bound);
remaining_memory -= mem_to_alloc;
mem_to_alloc
}
}
).collect()
} | identifier_body |
main.rs | extern crate ginseng;
use ginseng::guest::Guest;
use ginseng::guest::Range;
use std::cmp::min;
use std::cmp::Ordering;
use std:collections::HashMap;
use std::vec::Vec;
fn social_welfare(proposed_allocation: &Vec<(&Guest, u64)>) -> u64
{
let mut total_welfare: u64 = 0;
for (guest, allocation) in proposed_allocation
{
total_welfare += guest.mem_unit_price * allocation;
}
total_welfare
}
// assumes that `guest_list` has already been sorted
// just doles out memory in sorted order such that we
// don't give anyone any more memory than they asked for
// it's still possible that we give people too little memory | // which should be checked after getting the result of this
// function
// I think what I really want to return from this is a vector of allocation amounts
// it'll even take up less space than (reference, allocation) pairs and will be much
// less problematic
fn naive_allocation
(
guest_list: &Vec<&Guest>,
available_memory: u64
)
-> Vec<(u64)>
{
let mut remaining_memory: u64 = available_memory;
guest_list.iter().map
(
|guest|
{
// if there's no memory left to hand out our job is simple
if remaining_memory == 0
{
0
}
// otherwise get the maximum amount memory this guest
// wants to pay for and give them that
else
{
// if the last forbidden range goes to infinity we want
//to use the minimum of that forbidden range
let upper_bound =
guest
.forbidden_ranges
.last()
.filter(|range| {range.max == u64::max_value()})
.map(|range| {range.min})
.unwrap_or(u64::max_value());
let mem_to_alloc = min(remaining_memory, upper_bound);
remaining_memory -= mem_to_alloc;
mem_to_alloc
}
}
).collect()
}
// this guy already returns indices instead of references
fn invalid_allocations(proposed_allocations: &Vec<(&Guest, u64)>) -> Vec<(usize, Range)>
{
let mut violations: Vec<(usize, Range)> = Vec::new();
let mut index: usize = 0;
for &(guest, amount) in proposed_allocations.iter()
{
// we want to get the guest's forbidden range with the greatest
// min value that is less than amount. This would probably best
// be done with a binary search, but for now iterative is fine
for range in guest.forbidden_ranges.clone()
{
if range.min < amount && range.max > amount
{
violations.push((index, range));
}
}
index = index + 1;
}
violations
}
fn public_auction_function
(
guest_list: &Vec<&Guest>,
available_memory: u64
)
-> Vec<u64>
{
auction_with_pinned_allocations(guest_list, available_memory, Vec::new())
}
// returns the list of allocations of the provided memory to the list of
// provided guests which results in the maximal social welfare possible without
// changing the provided pinned allocations
fn auction_with_pinned_allocations
(
guest_list: &Vec<&Guest>,
available_memory: u64,
pinned_allocations: &HashMap<u64, u64> //list of (index, allocation) pairs
)
-> Vec<u64>
{
// so I think the idea is we filter the pinned allocations out in an
// enumerate into a filter into a map that discards the index into a
// collect
// then we put them back into the result of the recursive case with an
// enumerate into a flatmap that returns element +
// contiguous-succesive-elements. So we just need a mutable `index_correction`
// variable that gets incremented every time we insert an element so we
// can keep track of each elements place in the new list so we can map positions in
// the enumerate to positions in the original list
let my_copy =
guest_list
.iter()
.enumerate()
.filter(|(index, &guest)| !pinned_allocations.contains_key(index))
.map(|(index, &guest)| guest.clone())
.collect()
// let mut my_copy = guest_list.clone();
let invalid = invalid_allocations(my_copy.iter().zip(naive_allocation(my_copy, available_memory))
}
// Given the applicable range for an invalid allocation and the amount of available_memory
// this function returns the one to two allocation options available to replace the
// invalid allocation
fn two_options(applicable_range: Range, available_memory: u64) -> Vec<u64>
{
match applicable_range.max.cmp(&available_memory)
{
Ordering::Less | Ordering::Equal =>
vec![applicable_range.min, applicable_range.max],
Ordering::Greater => vec![applicable_range.min],
}
}
//I think I want an immutable list slice that I jut progressively slice more off of
// or present different iterators of maybe?
// the payment rule should be implemented in a different function
// so that we can use this function recursively
// I think what we should really return from this is a list of (index, allocation) pairs
// and take as input a list of inidices of invalid allocations (we can have a helper function
//without that argument that passes an empty list into this function)
// fn auction<'a>(guest_list: &'a mut Vec<&'a Guest>, available_memory: u64) -> Vec<(&'a Guest, u64)>
// {
// if guest_list.is_empty()
// {
// return Vec::new();
// }
//
// // so first we try just try giving as much as
// // possible of the remaining_memory
// // to each guest
// let applicable_range: Range;
// let invalid_guest_number;
// {
// //then we first attempt a naive allocation based solely on the sorting
// //and any upper bounds the guests may have set
// // naive_allocation should maintain the same ordering of geusts as guest_list
// // so indices of proposed_allocation can be used to index into guest_list
// let proposed_allocations = naive_allocation(guest_list, available_memory);
//
// let mut invalid = invalid_allocations(&proposed_allocations);
//
// // yup, commenting out this early return got rid of two of the 3 compile errors
// // and the third is pretty obviously a good thing to error out on because it let me know
// //I was adding two guests for case two instead of just one
// // if invalid.is_empty()
// // {
// // return proposed_allocations;
// // }
//
// // so with this first attempt we want to first check and see if we're
// // assigning someone an amount of memory in one of their forbidden ranges
// // and for each case in which someone was allocated an invalid amount, we
// // need to try two cases.
// // so we just need to try removing the first invalid allocation, which means
// // we can just mutate the guest_list instead of cloning every time
// let (_invalid_guest_number, _applicable_range) = invalid.remove(0);
// invalid_guest_number = _invalid_guest_number;
// applicable_range = _applicable_range;
//
// }
// //so we remove the first invalid allcoation
// let badly_served_guest = guest_list.remove(invalid_guest_number);
//
// // and then we try the two cases with that guest
//
// // So I think the idea is that we try the minimum and maximum of the
// // forbidden range that the invalid value fell into
//
// //case one is no more than the minimum of the forbidden range
// let allocation_amount_one = applicable_range.min;
//
// let mut case_one_proposal = auction(guest_list, available_memory - allocation_amount_one);
//
// case_one_proposal.push((badly_served_guest, allocation_amount_one));
//
// let case_one_welfare = social_welfare(&case_one_proposal);
//
// //case two is at least as much as the maximum of the forbidden range
// let allocation_amount_two = applicable_range.max;
//
// let (case_two_welfare, case_two_proposal) =
// if allocation_amount_two <= available_memory
// {
// let mut inner_case_two_proposal =
// auction(guest_list, available_memory - allocation_amount_two);
//
// inner_case_two_proposal.push((badly_served_guest, allocation_amount_two));
//
// (social_welfare(&inner_case_two_proposal), inner_case_two_proposal)
// }
// else
// {
// (0, Vec::new())
// };
//
//
//
// //return the one with greater welfare, or if equal, the one that allocates less memory
// match case_one_welfare.cmp(&case_two_welfare)
// {
// Ordering::Less => case_two_proposal,
//
// Ordering::Greater => case_one_proposal,
//
// Ordering::Equal => case_one_proposal,
// }
// }
// fn registerGuest(baseMemory: i64)
// {
//
// }
// fn makeBid(mem_unit_price: f64, guest: Guest)
// {
//
// }
fn main()
{
let guest1 =
Guest
{
mem_unit_price: 2,
current_holdings: 1,
forbidden_ranges:
vec!
[
Range{min: 0, max: 3},
Range{min: 4, max: u64::max_value()}
],
base_memory: 10
};
let guest2 =
Guest
{
mem_unit_price: 1,
current_holdings: 1,
forbidden_ranges:
vec!
[
Range{min: 0, max: 3},
Range{min: 5, max: u64::max_value()}
],
base_memory: 10
};
let mut guest_list = vec![&guest1, &guest2];
guest_list.sort_unstable();
{
for guest in &guest_list
{
println!("{:?}", guest);
}
}
{
let naive = naive_allocation(&guest_list, 6);
println!("The naive allocation is: ", );
{
for (ref guest, allocated) in &naive
{
println!("{:?} gets {:?}", guest, allocated);
}
}
println!("it has a social welfare of {:?}", social_welfare(&naive));
}
let final_allocation = auction(&mut guest_list, 6);
println!("The final allocation is: ", );
{
for (ref guest, allocated) in &final_allocation
{
println!("{:?} gets {:?}", guest, allocated);
}
}
println!("it has a social welfare of {:?}", social_welfare(&final_allocation));
println!("Hello, world!");
} | random_line_split |
|
map.rs | use super::BareDoomMap;
use super::geom::{Coord, Point, Rect, Size};
use std::collections::HashMap;
use std::marker::PhantomData;
use std;
// TODO
// map diagnostics
// - error:
// - info: unused vertex
// - info: unused side
// - info: sector with no sides
// - info: thing not in the map (polyobjs excluded)
/// A fully-fledged map, independent (more or less) of any particular underlying format.
// TODO actually i'm not so sure about that! should i, say, have different map impls that use
// different types...
pub struct Map {
/*
lines: Vec<Rc<RefCell<Line>>>,
sides: Vec<Rc<RefCell<Side>>>,
sectors: Vec<Rc<RefCell<Sector>>>,
things: Vec<Rc<RefCell<Thing>>>,
vertices: Vec<Rc<RefCell<Vertex>>>,
*/
lines: Vec<Line>,
sides: Vec<Side>,
sectors: Vec<Sector>,
things: Vec<Thing>,
vertices: Vec<Vertex>,
bbox: Option<Rect>,
}
impl Map {
pub fn new() -> Self {
Map {
lines: Vec::new(),
sides: Vec::new(),
sectors: Vec::new(),
things: Vec::new(),
vertices: Vec::new(),
bbox: None,
}
}
pub fn from_bare(bare_map: &BareDoomMap) -> Self {
let mut map = Map::new();
for bare_sector in bare_map.sectors.iter() {
let sectorh = map.add_sector();
let sector = &mut map.sectors[sectorh.0];
sector.tag = bare_sector.sector_tag as u32;
sector.special = bare_sector.sector_type as u32;
sector.floor_height = bare_sector.floor_height as i32;
sector.ceiling_height = bare_sector.ceiling_height as i32;
}
for bare_vertex in bare_map.vertices.iter() {
map.add_vertex(bare_vertex.x as f64, bare_vertex.y as f64);
}
for bare_side in bare_map.sides.iter() {
let handle = map.add_side((bare_side.sector as usize).into());
let side = map.side_mut(handle);
side.lower_texture = bare_side.lower_texture.into();
side.middle_texture = bare_side.middle_texture.into();
side.upper_texture = bare_side.upper_texture.into();
}
for bare_line in bare_map.lines.iter() {
let handle = map.add_line((bare_line.v0 as usize).into(), (bare_line.v1 as usize).into());
let line = map.line_mut(handle);
line.flags = bare_line.flags as u32;
// FIXME and here's where we start to go awry -- this should use a method. so should
// new side w/ sector
if bare_line.front_sidedef != -1 {
line.front = Some((bare_line.front_sidedef as usize).into());
}
if bare_line.back_sidedef != -1 {
line.back = Some((bare_line.back_sidedef as usize).into());
}
}
for bare_thing in bare_map.things.iter() {
map.things.push(Thing{
point: Point::new(bare_thing.x as Coord, bare_thing.y as Coord),
doomednum: bare_thing.doomednum as u32,
});
}
map
}
fn side_mut(&mut self, handle: Handle<Side>) -> &mut Side {
&mut self.sides[handle.0]
}
fn line_mut(&mut self, handle: Handle<Line>) -> &mut Line {
&mut self.lines[handle.0]
}
fn add_sector(&mut self) -> Handle<Sector> {
self.sectors.push(Sector{ special: 0, tag: 0, floor_height: 0, ceiling_height: 0 });
(self.sectors.len() - 1).into()
}
fn add_side(&mut self, sector: Handle<Sector>) -> Handle<Side> {
self.sides.push(Side{
id: 0,
lower_texture: "".into(),
middle_texture: "".into(),
upper_texture: "".into(),
sector: sector,
});
(self.sides.len() - 1).into()
}
fn add_vertex(&mut self, x: f64, y: f64) {
self.vertices.push(Vertex{ x, y });
//self.vertices.push(vertex);
//return vertex;
}
fn add_line(&mut self, start: Handle<Vertex>, end: Handle<Vertex>) -> Handle<Line> {
self.lines.push(Line{
start,
end,
flags: 0,
special: 0,
front: None,
back: None,
});
(self.lines.len() - 1).into()
}
pub fn iter_lines(&self) -> <Vec<BoundLine> as IntoIterator>::IntoIter {
let bound: Vec<_> = self.lines.iter().map(|a| BoundLine(a, self)).collect();
bound.into_iter()
// return self.lines.iter().map(|a| BoundLine(a, self));
}
pub fn iter_sectors(&self) -> std::slice::Iter<Sector> {
self.sectors.iter()
}
pub fn iter_things(&self) -> std::slice::Iter<Thing> {
self.things.iter()
}
pub fn vertex(&self, handle: Handle<Vertex>) -> &Vertex {
&self.vertices[handle.0]
}
pub fn side(&self, handle: Handle<Side>) -> &Side {
&self.sides[handle.0]
}
pub fn sector(&self, handle: Handle<Sector>) -> &Sector {
&self.sectors[handle.0]
}
pub fn bbox(&self) -> Rect {
// TODO ah heck, should include Things too
let points: Vec<_> = self.vertices.iter().map(|v| Point::new(v.x, v.y)).collect();
Rect::from_points(points.iter())
}
pub fn find_player_start(&self) -> Option<Point> {
for thing in &self.things {
if thing.doomednum() == 1 {
return Some(thing.point());
}
}
None
}
pub fn sector_to_polygons(&self, s: usize) -> Vec<Vec<Point>> {
struct Edge<'a> {
line: &'a Line,
side: &'a Side,
facing: Facing,
v0: &'a Vertex,
v1: &'a Vertex,
done: bool,
}
// This is just to convince HashMap to hash on the actual reference, not the underlying
// BareVertex value
struct VertexRef<'a>(&'a Vertex);
impl<'a> PartialEq for VertexRef<'a> {
fn eq(&self, other: &VertexRef) -> bool {
(self.0 as *const _) == (other.0 as *const _)
}
}
impl<'a> Eq for VertexRef<'a> {}
impl<'a> std::hash::Hash for VertexRef<'a> {
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
(self.0 as *const Vertex).hash(state)
}
}
let mut edges = vec![];
let mut vertices_to_edges = HashMap::new();
// TODO linear scan -- would make more sense to turn the entire map into polygons in one go
for line in &self.lines {
let (frontid, backid) = line.side_indices();
// FIXME need to handle self-referencing sectors, but also
if let Some(front) = line.front.map(|h| &self.sides[h.0]) {
if let Some(back) = line.back.map(|h| &self.sides[h.0]) {
if front.sector == back.sector {
continue;
}
}
}
// TODO seems like a good case for a custom iterator
for &(facing, sideid) in [(Facing::Front, frontid), (Facing::Back, backid)].iter() {
if sideid.is_none() {
continue;
}
// TODO this and the vertices lookups might be bogus and crash...
let side = &self.sides[sideid.unwrap().0];
if side.sector.0 == s {
let v0 = &self.vertices[line.start.0];
let v1 = &self.vertices[line.end.0];
let edge = Edge{
line,
side,
facing,
// TODO should these be swapped depending on the line facing?
v0,
v1,
done: false,
};
edges.push(edge);
vertices_to_edges.entry(VertexRef(v0)).or_insert_with(|| Vec::new()).push(edges.len() - 1);
vertices_to_edges.entry(VertexRef(v1)).or_insert_with(|| Vec::new()).push(edges.len() - 1);
}
}
}
// Trace sectors by starting at the first side's first vertex and attempting to walk from
// there
let mut outlines = Vec::new();
let mut seen_vertices = HashMap::new();
while edges.len() > 0 {
let mut next_vertices = vec![];
for edge in edges.iter() {
// TODO having done-ness for both edges and vertices seems weird, idk
if !seen_vertices.contains_key(&VertexRef(edge.v0)) {
next_vertices.push(edge.v0);
break;
}
if !seen_vertices.contains_key(&VertexRef(edge.v1)) {
next_vertices.push(edge.v1);
break;
}
}
if next_vertices.is_empty() {
break;
}
let mut outline = Vec::new();
while next_vertices.len() > 0 {
let vertices = next_vertices;
next_vertices = Vec::new();
for vertex in vertices.iter() {
if seen_vertices.contains_key(&VertexRef(vertex)) {
continue;
}
seen_vertices.insert(VertexRef(vertex), true);
outline.push(Point::new(vertex.x, vertex.y));
// TODO so, problems occur here if:
// - a vertex has more than two edges
// - special case: double-sided edges are OK! but we have to eliminate
// those, WITHOUT ruining entirely self-referencing sectors
// - a vertex has one edge
for e in vertices_to_edges.get(&VertexRef(vertex)).unwrap().iter() {
let edge = &mut edges[*e];
if edge.done {
// TODO actually this seems weird? why would this happen.
continue;
}
edge.done = true;
if !seen_vertices.contains_key(&VertexRef(edge.v0)) {
next_vertices.push(edge.v0);
}
else if !seen_vertices.contains_key(&VertexRef(edge.v1)) {
next_vertices.push(edge.v1);
}
// Only add EXACTLY ONE vertex at a time for now -- so, assuming simple
// polygons! Figure out the rest, uh, later.
break;
}
}
}
if outline.len() > 0 {
outlines.push(outline);
}
}
outlines
}
}
#[derive(Copy, Clone, Debug)]
pub enum Facing {
Front,
Back,
}
pub struct Handle<T>(usize, PhantomData<*const T>);
// These traits are implemented by hand because #derive'd impls only apply when T implements the
// same trait, but we don't actually own a T, so that bound is unnecessary.
impl<T> Clone for Handle<T> {
fn clone(&self) -> Self {
Handle(self.0, PhantomData)
}
}
impl<T> Copy for Handle<T> {}
impl<T> PartialEq for Handle<T> {
fn eq(&self, other: &Self) -> bool {
self.0 == other.0
}
}
impl<T> Eq for Handle<T> {}
impl<T> std::hash::Hash for Handle<T> {
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
self.0.hash(state)
}
}
impl<T> From<usize> for Handle<T> {
fn from(index: usize) -> Self {
Handle(index, PhantomData)
}
}
trait MapComponent {}
pub struct Thing {
point: Point,
doomednum: u32,
}
impl Thing {
pub fn point(&self) -> Point {
self.point
}
pub fn doomednum(&self) -> u32 {
self.doomednum
}
}
pub struct Line {
start: Handle<Vertex>,
end: Handle<Vertex>,
flags: u32,
special: usize,
//sector_tag -- oops, different in zdoom...
front: Option<Handle<Side>>,
back: Option<Handle<Side>>,
}
impl Line {
pub fn vertex_indices(&self) -> (Handle<Vertex>, Handle<Vertex>) {
(self.start, self.end)
}
pub fn side_indices(&self) -> (Option<Handle<Side>>, Option<Handle<Side>>) {
(self.front, self.back)
}
pub fn has_special(&self) -> bool {
self.special != 0
}
pub fn blocks_player(&self) -> bool {
self.flags & 1 != 0
}
pub fn is_one_sided(&self) -> bool {
self.front.is_some() != self.back.is_some()
}
pub fn is_two_sided(&self) -> bool {
self.front.is_some() && self.back.is_some()
}
}
// A Line that knows what map it came from, so it can look up its actual sides and vertices
#[derive(Clone, Copy)]
pub struct BoundLine<'a>(&'a Line, &'a Map);
impl<'a> BoundLine<'a> {
pub fn start(&self) -> &Vertex {
self.1.vertex(self.0.start)
}
pub fn end(&self) -> &Vertex {
self.1.vertex(self.0.end)
}
pub fn front(&self) -> Option<&Side> {
self.0.front.map(|s| self.1.side(s))
}
pub fn back(&self) -> Option<&Side> {
self.0.back.map(|s| self.1.side(s))
}
// TODO these are all delegates, eugh
pub fn | (&self) -> (Handle<Vertex>, Handle<Vertex>) {
self.0.vertex_indices()
}
pub fn side_indices(&self) -> (Option<Handle<Side>>, Option<Handle<Side>>) {
self.0.side_indices()
}
pub fn has_special(&self) -> bool {
self.0.has_special()
}
pub fn blocks_player(&self) -> bool {
self.0.blocks_player()
}
pub fn is_one_sided(&self) -> bool {
self.0.is_one_sided()
}
pub fn is_two_sided(&self) -> bool {
self.0.is_two_sided()
}
}
pub struct Sector {
tag: u32,
special: u32,
floor_height: i32,
ceiling_height: i32,
}
impl Sector {
pub fn tag(&self) -> u32 {
self.tag
}
pub fn special(&self) -> u32 {
self.special
}
pub fn floor_height(&self) -> i32 {
self.floor_height
}
pub fn ceiling_height(&self) -> i32 {
self.ceiling_height
}
}
pub struct Side {
//map: Rc<Map>,
pub id: u32,
pub upper_texture: String,
pub lower_texture: String,
pub middle_texture: String,
pub sector: Handle<Sector>,
}
#[derive(Clone, Copy)]
pub struct BoundSide<'a>(&'a Side, &'a Map);
impl<'a> BoundSide<'a> {
//pub fn sector(&self) ->
}
pub struct Vertex {
pub x: f64,
pub y: f64,
}
| vertex_indices | identifier_name |
map.rs | use super::BareDoomMap;
use super::geom::{Coord, Point, Rect, Size};
use std::collections::HashMap;
use std::marker::PhantomData;
use std;
// TODO
// map diagnostics
// - error:
// - info: unused vertex
// - info: unused side
// - info: sector with no sides
// - info: thing not in the map (polyobjs excluded)
/// A fully-fledged map, independent (more or less) of any particular underlying format.
// TODO actually i'm not so sure about that! should i, say, have different map impls that use
// different types...
pub struct Map {
/*
lines: Vec<Rc<RefCell<Line>>>,
sides: Vec<Rc<RefCell<Side>>>,
sectors: Vec<Rc<RefCell<Sector>>>,
things: Vec<Rc<RefCell<Thing>>>,
vertices: Vec<Rc<RefCell<Vertex>>>,
*/
lines: Vec<Line>,
sides: Vec<Side>,
sectors: Vec<Sector>,
things: Vec<Thing>,
vertices: Vec<Vertex>,
bbox: Option<Rect>,
}
impl Map {
pub fn new() -> Self {
Map {
lines: Vec::new(),
sides: Vec::new(),
sectors: Vec::new(),
things: Vec::new(),
vertices: Vec::new(),
bbox: None,
}
}
pub fn from_bare(bare_map: &BareDoomMap) -> Self {
let mut map = Map::new();
for bare_sector in bare_map.sectors.iter() {
let sectorh = map.add_sector();
let sector = &mut map.sectors[sectorh.0];
sector.tag = bare_sector.sector_tag as u32;
sector.special = bare_sector.sector_type as u32;
sector.floor_height = bare_sector.floor_height as i32;
sector.ceiling_height = bare_sector.ceiling_height as i32;
}
for bare_vertex in bare_map.vertices.iter() {
map.add_vertex(bare_vertex.x as f64, bare_vertex.y as f64);
}
for bare_side in bare_map.sides.iter() {
let handle = map.add_side((bare_side.sector as usize).into());
let side = map.side_mut(handle);
side.lower_texture = bare_side.lower_texture.into();
side.middle_texture = bare_side.middle_texture.into();
side.upper_texture = bare_side.upper_texture.into();
}
for bare_line in bare_map.lines.iter() {
let handle = map.add_line((bare_line.v0 as usize).into(), (bare_line.v1 as usize).into());
let line = map.line_mut(handle);
line.flags = bare_line.flags as u32;
// FIXME and here's where we start to go awry -- this should use a method. so should
// new side w/ sector
if bare_line.front_sidedef != -1 {
line.front = Some((bare_line.front_sidedef as usize).into());
}
if bare_line.back_sidedef != -1 {
line.back = Some((bare_line.back_sidedef as usize).into());
}
}
for bare_thing in bare_map.things.iter() {
map.things.push(Thing{
point: Point::new(bare_thing.x as Coord, bare_thing.y as Coord),
doomednum: bare_thing.doomednum as u32,
});
}
map
}
fn side_mut(&mut self, handle: Handle<Side>) -> &mut Side {
&mut self.sides[handle.0]
}
fn line_mut(&mut self, handle: Handle<Line>) -> &mut Line {
&mut self.lines[handle.0]
}
fn add_sector(&mut self) -> Handle<Sector> {
self.sectors.push(Sector{ special: 0, tag: 0, floor_height: 0, ceiling_height: 0 });
(self.sectors.len() - 1).into()
}
fn add_side(&mut self, sector: Handle<Sector>) -> Handle<Side> {
self.sides.push(Side{
id: 0,
lower_texture: "".into(),
middle_texture: "".into(),
upper_texture: "".into(),
sector: sector,
});
(self.sides.len() - 1).into()
}
fn add_vertex(&mut self, x: f64, y: f64) {
self.vertices.push(Vertex{ x, y });
//self.vertices.push(vertex);
//return vertex;
}
fn add_line(&mut self, start: Handle<Vertex>, end: Handle<Vertex>) -> Handle<Line> {
self.lines.push(Line{
start,
end,
flags: 0,
special: 0,
front: None,
back: None,
});
(self.lines.len() - 1).into()
}
pub fn iter_lines(&self) -> <Vec<BoundLine> as IntoIterator>::IntoIter {
let bound: Vec<_> = self.lines.iter().map(|a| BoundLine(a, self)).collect();
bound.into_iter()
// return self.lines.iter().map(|a| BoundLine(a, self));
}
pub fn iter_sectors(&self) -> std::slice::Iter<Sector> {
self.sectors.iter()
}
pub fn iter_things(&self) -> std::slice::Iter<Thing> {
self.things.iter()
}
pub fn vertex(&self, handle: Handle<Vertex>) -> &Vertex {
&self.vertices[handle.0]
}
pub fn side(&self, handle: Handle<Side>) -> &Side {
&self.sides[handle.0]
}
pub fn sector(&self, handle: Handle<Sector>) -> &Sector {
&self.sectors[handle.0]
}
pub fn bbox(&self) -> Rect {
// TODO ah heck, should include Things too
let points: Vec<_> = self.vertices.iter().map(|v| Point::new(v.x, v.y)).collect();
Rect::from_points(points.iter())
}
pub fn find_player_start(&self) -> Option<Point> {
for thing in &self.things {
if thing.doomednum() == 1 {
return Some(thing.point());
}
}
None
}
pub fn sector_to_polygons(&self, s: usize) -> Vec<Vec<Point>> {
struct Edge<'a> {
line: &'a Line,
side: &'a Side,
facing: Facing,
v0: &'a Vertex,
v1: &'a Vertex,
done: bool,
}
// This is just to convince HashMap to hash on the actual reference, not the underlying
// BareVertex value
struct VertexRef<'a>(&'a Vertex);
impl<'a> PartialEq for VertexRef<'a> {
fn eq(&self, other: &VertexRef) -> bool {
(self.0 as *const _) == (other.0 as *const _)
}
}
impl<'a> Eq for VertexRef<'a> {}
impl<'a> std::hash::Hash for VertexRef<'a> {
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
(self.0 as *const Vertex).hash(state)
}
}
let mut edges = vec![];
let mut vertices_to_edges = HashMap::new();
// TODO linear scan -- would make more sense to turn the entire map into polygons in one go
for line in &self.lines {
let (frontid, backid) = line.side_indices();
// FIXME need to handle self-referencing sectors, but also
if let Some(front) = line.front.map(|h| &self.sides[h.0]) {
if let Some(back) = line.back.map(|h| &self.sides[h.0]) {
if front.sector == back.sector {
continue;
}
}
}
// TODO seems like a good case for a custom iterator
for &(facing, sideid) in [(Facing::Front, frontid), (Facing::Back, backid)].iter() {
if sideid.is_none() {
continue;
}
// TODO this and the vertices lookups might be bogus and crash...
let side = &self.sides[sideid.unwrap().0];
if side.sector.0 == s {
let v0 = &self.vertices[line.start.0];
let v1 = &self.vertices[line.end.0];
let edge = Edge{
line,
side,
facing,
// TODO should these be swapped depending on the line facing?
v0,
v1,
done: false,
};
edges.push(edge);
vertices_to_edges.entry(VertexRef(v0)).or_insert_with(|| Vec::new()).push(edges.len() - 1);
vertices_to_edges.entry(VertexRef(v1)).or_insert_with(|| Vec::new()).push(edges.len() - 1);
}
}
}
// Trace sectors by starting at the first side's first vertex and attempting to walk from
// there
let mut outlines = Vec::new();
let mut seen_vertices = HashMap::new();
while edges.len() > 0 {
let mut next_vertices = vec![];
for edge in edges.iter() {
// TODO having done-ness for both edges and vertices seems weird, idk
if !seen_vertices.contains_key(&VertexRef(edge.v0)) {
next_vertices.push(edge.v0);
break;
}
if !seen_vertices.contains_key(&VertexRef(edge.v1)) {
next_vertices.push(edge.v1);
break;
}
}
if next_vertices.is_empty() {
break;
}
let mut outline = Vec::new();
while next_vertices.len() > 0 {
let vertices = next_vertices;
next_vertices = Vec::new();
for vertex in vertices.iter() {
if seen_vertices.contains_key(&VertexRef(vertex)) {
continue;
}
seen_vertices.insert(VertexRef(vertex), true);
outline.push(Point::new(vertex.x, vertex.y));
// TODO so, problems occur here if:
// - a vertex has more than two edges
// - special case: double-sided edges are OK! but we have to eliminate
// those, WITHOUT ruining entirely self-referencing sectors
// - a vertex has one edge
for e in vertices_to_edges.get(&VertexRef(vertex)).unwrap().iter() {
let edge = &mut edges[*e];
if edge.done {
// TODO actually this seems weird? why would this happen.
continue;
}
edge.done = true;
if !seen_vertices.contains_key(&VertexRef(edge.v0)) {
next_vertices.push(edge.v0);
}
else if !seen_vertices.contains_key(&VertexRef(edge.v1)) {
next_vertices.push(edge.v1);
}
// Only add EXACTLY ONE vertex at a time for now -- so, assuming simple
// polygons! Figure out the rest, uh, later.
break;
}
}
}
if outline.len() > 0 {
outlines.push(outline);
}
}
outlines
}
}
#[derive(Copy, Clone, Debug)]
pub enum Facing {
Front,
Back,
}
pub struct Handle<T>(usize, PhantomData<*const T>);
// These traits are implemented by hand because #derive'd impls only apply when T implements the
// same trait, but we don't actually own a T, so that bound is unnecessary.
impl<T> Clone for Handle<T> {
fn clone(&self) -> Self {
Handle(self.0, PhantomData)
}
}
impl<T> Copy for Handle<T> {}
impl<T> PartialEq for Handle<T> {
fn eq(&self, other: &Self) -> bool {
self.0 == other.0
}
}
impl<T> Eq for Handle<T> {}
impl<T> std::hash::Hash for Handle<T> {
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
self.0.hash(state) | }
}
impl<T> From<usize> for Handle<T> {
fn from(index: usize) -> Self {
Handle(index, PhantomData)
}
}
trait MapComponent {}
pub struct Thing {
point: Point,
doomednum: u32,
}
impl Thing {
pub fn point(&self) -> Point {
self.point
}
pub fn doomednum(&self) -> u32 {
self.doomednum
}
}
pub struct Line {
start: Handle<Vertex>,
end: Handle<Vertex>,
flags: u32,
special: usize,
//sector_tag -- oops, different in zdoom...
front: Option<Handle<Side>>,
back: Option<Handle<Side>>,
}
impl Line {
pub fn vertex_indices(&self) -> (Handle<Vertex>, Handle<Vertex>) {
(self.start, self.end)
}
pub fn side_indices(&self) -> (Option<Handle<Side>>, Option<Handle<Side>>) {
(self.front, self.back)
}
pub fn has_special(&self) -> bool {
self.special != 0
}
pub fn blocks_player(&self) -> bool {
self.flags & 1 != 0
}
pub fn is_one_sided(&self) -> bool {
self.front.is_some() != self.back.is_some()
}
pub fn is_two_sided(&self) -> bool {
self.front.is_some() && self.back.is_some()
}
}
// A Line that knows what map it came from, so it can look up its actual sides and vertices
#[derive(Clone, Copy)]
pub struct BoundLine<'a>(&'a Line, &'a Map);
impl<'a> BoundLine<'a> {
pub fn start(&self) -> &Vertex {
self.1.vertex(self.0.start)
}
pub fn end(&self) -> &Vertex {
self.1.vertex(self.0.end)
}
pub fn front(&self) -> Option<&Side> {
self.0.front.map(|s| self.1.side(s))
}
pub fn back(&self) -> Option<&Side> {
self.0.back.map(|s| self.1.side(s))
}
// TODO these are all delegates, eugh
pub fn vertex_indices(&self) -> (Handle<Vertex>, Handle<Vertex>) {
self.0.vertex_indices()
}
pub fn side_indices(&self) -> (Option<Handle<Side>>, Option<Handle<Side>>) {
self.0.side_indices()
}
pub fn has_special(&self) -> bool {
self.0.has_special()
}
pub fn blocks_player(&self) -> bool {
self.0.blocks_player()
}
pub fn is_one_sided(&self) -> bool {
self.0.is_one_sided()
}
pub fn is_two_sided(&self) -> bool {
self.0.is_two_sided()
}
}
pub struct Sector {
tag: u32,
special: u32,
floor_height: i32,
ceiling_height: i32,
}
impl Sector {
pub fn tag(&self) -> u32 {
self.tag
}
pub fn special(&self) -> u32 {
self.special
}
pub fn floor_height(&self) -> i32 {
self.floor_height
}
pub fn ceiling_height(&self) -> i32 {
self.ceiling_height
}
}
pub struct Side {
//map: Rc<Map>,
pub id: u32,
pub upper_texture: String,
pub lower_texture: String,
pub middle_texture: String,
pub sector: Handle<Sector>,
}
#[derive(Clone, Copy)]
pub struct BoundSide<'a>(&'a Side, &'a Map);
impl<'a> BoundSide<'a> {
//pub fn sector(&self) ->
}
pub struct Vertex {
pub x: f64,
pub y: f64,
} | random_line_split |
|
map.rs | use super::BareDoomMap;
use super::geom::{Coord, Point, Rect, Size};
use std::collections::HashMap;
use std::marker::PhantomData;
use std;
// TODO
// map diagnostics
// - error:
// - info: unused vertex
// - info: unused side
// - info: sector with no sides
// - info: thing not in the map (polyobjs excluded)
/// A fully-fledged map, independent (more or less) of any particular underlying format.
// TODO actually i'm not so sure about that! should i, say, have different map impls that use
// different types...
pub struct Map {
/*
lines: Vec<Rc<RefCell<Line>>>,
sides: Vec<Rc<RefCell<Side>>>,
sectors: Vec<Rc<RefCell<Sector>>>,
things: Vec<Rc<RefCell<Thing>>>,
vertices: Vec<Rc<RefCell<Vertex>>>,
*/
lines: Vec<Line>,
sides: Vec<Side>,
sectors: Vec<Sector>,
things: Vec<Thing>,
vertices: Vec<Vertex>,
bbox: Option<Rect>,
}
impl Map {
pub fn new() -> Self {
Map {
lines: Vec::new(),
sides: Vec::new(),
sectors: Vec::new(),
things: Vec::new(),
vertices: Vec::new(),
bbox: None,
}
}
pub fn from_bare(bare_map: &BareDoomMap) -> Self {
let mut map = Map::new();
for bare_sector in bare_map.sectors.iter() {
let sectorh = map.add_sector();
let sector = &mut map.sectors[sectorh.0];
sector.tag = bare_sector.sector_tag as u32;
sector.special = bare_sector.sector_type as u32;
sector.floor_height = bare_sector.floor_height as i32;
sector.ceiling_height = bare_sector.ceiling_height as i32;
}
for bare_vertex in bare_map.vertices.iter() {
map.add_vertex(bare_vertex.x as f64, bare_vertex.y as f64);
}
for bare_side in bare_map.sides.iter() {
let handle = map.add_side((bare_side.sector as usize).into());
let side = map.side_mut(handle);
side.lower_texture = bare_side.lower_texture.into();
side.middle_texture = bare_side.middle_texture.into();
side.upper_texture = bare_side.upper_texture.into();
}
for bare_line in bare_map.lines.iter() {
let handle = map.add_line((bare_line.v0 as usize).into(), (bare_line.v1 as usize).into());
let line = map.line_mut(handle);
line.flags = bare_line.flags as u32;
// FIXME and here's where we start to go awry -- this should use a method. so should
// new side w/ sector
if bare_line.front_sidedef != -1 {
line.front = Some((bare_line.front_sidedef as usize).into());
}
if bare_line.back_sidedef != -1 {
line.back = Some((bare_line.back_sidedef as usize).into());
}
}
for bare_thing in bare_map.things.iter() {
map.things.push(Thing{
point: Point::new(bare_thing.x as Coord, bare_thing.y as Coord),
doomednum: bare_thing.doomednum as u32,
});
}
map
}
fn side_mut(&mut self, handle: Handle<Side>) -> &mut Side {
&mut self.sides[handle.0]
}
fn line_mut(&mut self, handle: Handle<Line>) -> &mut Line {
&mut self.lines[handle.0]
}
fn add_sector(&mut self) -> Handle<Sector> {
self.sectors.push(Sector{ special: 0, tag: 0, floor_height: 0, ceiling_height: 0 });
(self.sectors.len() - 1).into()
}
fn add_side(&mut self, sector: Handle<Sector>) -> Handle<Side> {
self.sides.push(Side{
id: 0,
lower_texture: "".into(),
middle_texture: "".into(),
upper_texture: "".into(),
sector: sector,
});
(self.sides.len() - 1).into()
}
fn add_vertex(&mut self, x: f64, y: f64) {
self.vertices.push(Vertex{ x, y });
//self.vertices.push(vertex);
//return vertex;
}
fn add_line(&mut self, start: Handle<Vertex>, end: Handle<Vertex>) -> Handle<Line> {
self.lines.push(Line{
start,
end,
flags: 0,
special: 0,
front: None,
back: None,
});
(self.lines.len() - 1).into()
}
pub fn iter_lines(&self) -> <Vec<BoundLine> as IntoIterator>::IntoIter {
let bound: Vec<_> = self.lines.iter().map(|a| BoundLine(a, self)).collect();
bound.into_iter()
// return self.lines.iter().map(|a| BoundLine(a, self));
}
pub fn iter_sectors(&self) -> std::slice::Iter<Sector> |
pub fn iter_things(&self) -> std::slice::Iter<Thing> {
self.things.iter()
}
pub fn vertex(&self, handle: Handle<Vertex>) -> &Vertex {
&self.vertices[handle.0]
}
pub fn side(&self, handle: Handle<Side>) -> &Side {
&self.sides[handle.0]
}
pub fn sector(&self, handle: Handle<Sector>) -> &Sector {
&self.sectors[handle.0]
}
pub fn bbox(&self) -> Rect {
// TODO ah heck, should include Things too
let points: Vec<_> = self.vertices.iter().map(|v| Point::new(v.x, v.y)).collect();
Rect::from_points(points.iter())
}
pub fn find_player_start(&self) -> Option<Point> {
for thing in &self.things {
if thing.doomednum() == 1 {
return Some(thing.point());
}
}
None
}
pub fn sector_to_polygons(&self, s: usize) -> Vec<Vec<Point>> {
struct Edge<'a> {
line: &'a Line,
side: &'a Side,
facing: Facing,
v0: &'a Vertex,
v1: &'a Vertex,
done: bool,
}
// This is just to convince HashMap to hash on the actual reference, not the underlying
// BareVertex value
struct VertexRef<'a>(&'a Vertex);
impl<'a> PartialEq for VertexRef<'a> {
fn eq(&self, other: &VertexRef) -> bool {
(self.0 as *const _) == (other.0 as *const _)
}
}
impl<'a> Eq for VertexRef<'a> {}
impl<'a> std::hash::Hash for VertexRef<'a> {
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
(self.0 as *const Vertex).hash(state)
}
}
let mut edges = vec![];
let mut vertices_to_edges = HashMap::new();
// TODO linear scan -- would make more sense to turn the entire map into polygons in one go
for line in &self.lines {
let (frontid, backid) = line.side_indices();
// FIXME need to handle self-referencing sectors, but also
if let Some(front) = line.front.map(|h| &self.sides[h.0]) {
if let Some(back) = line.back.map(|h| &self.sides[h.0]) {
if front.sector == back.sector {
continue;
}
}
}
// TODO seems like a good case for a custom iterator
for &(facing, sideid) in [(Facing::Front, frontid), (Facing::Back, backid)].iter() {
if sideid.is_none() {
continue;
}
// TODO this and the vertices lookups might be bogus and crash...
let side = &self.sides[sideid.unwrap().0];
if side.sector.0 == s {
let v0 = &self.vertices[line.start.0];
let v1 = &self.vertices[line.end.0];
let edge = Edge{
line,
side,
facing,
// TODO should these be swapped depending on the line facing?
v0,
v1,
done: false,
};
edges.push(edge);
vertices_to_edges.entry(VertexRef(v0)).or_insert_with(|| Vec::new()).push(edges.len() - 1);
vertices_to_edges.entry(VertexRef(v1)).or_insert_with(|| Vec::new()).push(edges.len() - 1);
}
}
}
// Trace sectors by starting at the first side's first vertex and attempting to walk from
// there
let mut outlines = Vec::new();
let mut seen_vertices = HashMap::new();
while edges.len() > 0 {
let mut next_vertices = vec![];
for edge in edges.iter() {
// TODO having done-ness for both edges and vertices seems weird, idk
if !seen_vertices.contains_key(&VertexRef(edge.v0)) {
next_vertices.push(edge.v0);
break;
}
if !seen_vertices.contains_key(&VertexRef(edge.v1)) {
next_vertices.push(edge.v1);
break;
}
}
if next_vertices.is_empty() {
break;
}
let mut outline = Vec::new();
while next_vertices.len() > 0 {
let vertices = next_vertices;
next_vertices = Vec::new();
for vertex in vertices.iter() {
if seen_vertices.contains_key(&VertexRef(vertex)) {
continue;
}
seen_vertices.insert(VertexRef(vertex), true);
outline.push(Point::new(vertex.x, vertex.y));
// TODO so, problems occur here if:
// - a vertex has more than two edges
// - special case: double-sided edges are OK! but we have to eliminate
// those, WITHOUT ruining entirely self-referencing sectors
// - a vertex has one edge
for e in vertices_to_edges.get(&VertexRef(vertex)).unwrap().iter() {
let edge = &mut edges[*e];
if edge.done {
// TODO actually this seems weird? why would this happen.
continue;
}
edge.done = true;
if !seen_vertices.contains_key(&VertexRef(edge.v0)) {
next_vertices.push(edge.v0);
}
else if !seen_vertices.contains_key(&VertexRef(edge.v1)) {
next_vertices.push(edge.v1);
}
// Only add EXACTLY ONE vertex at a time for now -- so, assuming simple
// polygons! Figure out the rest, uh, later.
break;
}
}
}
if outline.len() > 0 {
outlines.push(outline);
}
}
outlines
}
}
#[derive(Copy, Clone, Debug)]
pub enum Facing {
Front,
Back,
}
pub struct Handle<T>(usize, PhantomData<*const T>);
// These traits are implemented by hand because #derive'd impls only apply when T implements the
// same trait, but we don't actually own a T, so that bound is unnecessary.
impl<T> Clone for Handle<T> {
fn clone(&self) -> Self {
Handle(self.0, PhantomData)
}
}
impl<T> Copy for Handle<T> {}
impl<T> PartialEq for Handle<T> {
fn eq(&self, other: &Self) -> bool {
self.0 == other.0
}
}
impl<T> Eq for Handle<T> {}
impl<T> std::hash::Hash for Handle<T> {
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
self.0.hash(state)
}
}
impl<T> From<usize> for Handle<T> {
fn from(index: usize) -> Self {
Handle(index, PhantomData)
}
}
trait MapComponent {}
pub struct Thing {
point: Point,
doomednum: u32,
}
impl Thing {
pub fn point(&self) -> Point {
self.point
}
pub fn doomednum(&self) -> u32 {
self.doomednum
}
}
pub struct Line {
start: Handle<Vertex>,
end: Handle<Vertex>,
flags: u32,
special: usize,
//sector_tag -- oops, different in zdoom...
front: Option<Handle<Side>>,
back: Option<Handle<Side>>,
}
impl Line {
pub fn vertex_indices(&self) -> (Handle<Vertex>, Handle<Vertex>) {
(self.start, self.end)
}
pub fn side_indices(&self) -> (Option<Handle<Side>>, Option<Handle<Side>>) {
(self.front, self.back)
}
pub fn has_special(&self) -> bool {
self.special != 0
}
pub fn blocks_player(&self) -> bool {
self.flags & 1 != 0
}
pub fn is_one_sided(&self) -> bool {
self.front.is_some() != self.back.is_some()
}
pub fn is_two_sided(&self) -> bool {
self.front.is_some() && self.back.is_some()
}
}
// A Line that knows what map it came from, so it can look up its actual sides and vertices
#[derive(Clone, Copy)]
pub struct BoundLine<'a>(&'a Line, &'a Map);
impl<'a> BoundLine<'a> {
pub fn start(&self) -> &Vertex {
self.1.vertex(self.0.start)
}
pub fn end(&self) -> &Vertex {
self.1.vertex(self.0.end)
}
pub fn front(&self) -> Option<&Side> {
self.0.front.map(|s| self.1.side(s))
}
pub fn back(&self) -> Option<&Side> {
self.0.back.map(|s| self.1.side(s))
}
// TODO these are all delegates, eugh
pub fn vertex_indices(&self) -> (Handle<Vertex>, Handle<Vertex>) {
self.0.vertex_indices()
}
pub fn side_indices(&self) -> (Option<Handle<Side>>, Option<Handle<Side>>) {
self.0.side_indices()
}
pub fn has_special(&self) -> bool {
self.0.has_special()
}
pub fn blocks_player(&self) -> bool {
self.0.blocks_player()
}
pub fn is_one_sided(&self) -> bool {
self.0.is_one_sided()
}
pub fn is_two_sided(&self) -> bool {
self.0.is_two_sided()
}
}
pub struct Sector {
tag: u32,
special: u32,
floor_height: i32,
ceiling_height: i32,
}
impl Sector {
pub fn tag(&self) -> u32 {
self.tag
}
pub fn special(&self) -> u32 {
self.special
}
pub fn floor_height(&self) -> i32 {
self.floor_height
}
pub fn ceiling_height(&self) -> i32 {
self.ceiling_height
}
}
pub struct Side {
//map: Rc<Map>,
pub id: u32,
pub upper_texture: String,
pub lower_texture: String,
pub middle_texture: String,
pub sector: Handle<Sector>,
}
#[derive(Clone, Copy)]
pub struct BoundSide<'a>(&'a Side, &'a Map);
impl<'a> BoundSide<'a> {
//pub fn sector(&self) ->
}
pub struct Vertex {
pub x: f64,
pub y: f64,
}
| {
self.sectors.iter()
} | identifier_body |
models.py | from django.db import models
from django.urls import reverse # To generate URLS by reversing URL patterns
from django.conf import settings
from django.core.validators import MaxValueValidator, MinValueValidator
# Imports Django's default User system.
from django.contrib.auth.models import User
# Movie rating imports. Docs: https://django-star-ratings.readthedocs.io/en/latest/?badge=latest/
from django.contrib.contenttypes.fields import GenericRelation
from star_ratings.models import Rating
# Amazon AWS S3 import.
from s3direct.fields import S3DirectField
# IMDbPY import. Docs: https://imdbpy.readthedocs.io/en/latest/
import imdb
# Video stats with MoviePy. Docs: https://zulko.github.io/moviepy/index.html
from moviepy.editor import VideoFileClip
# Google Scholar import. Docs: https://scholarly.readthedocs.io/en/latest/?badge=latest
from scholarly import scholarly, ProxyGenerator
# Django taggit import for managing comma-separated tags. Docs: https://django-taggit.readthedocs.io/en/latest/
from taggit.managers import TaggableManager
import sys, re, os, datetime
class Genre(models.Model):
# Model representing a movie genre (e.g. Science Fiction, Non Fiction).
name = models.CharField(
max_length=200,
help_text="Enter a movie genre (e.g. Science Fiction, French Poetry etc.)"
)
def __str__(self):
# String for representing the Model object (in Admin site etc.)
return self.name
class Language(models.Model):
# Model representing a Language (e.g. English, French, Japanese, etc.)
name = models.CharField(max_length=200,
help_text="Enter the movie's natural language (e.g. English, French, Japanese etc.)")
def __str__(self):
# String for representing the Model object (in Admin site etc.)
return self.name
class Movie(models.Model):
# Model representing a movie (but not a specific copy of a movie).
title = models.CharField(max_length=200, null=True, blank=True, help_text='This field will be overwritten if given a valid IMDB id and left blank.')
imdb_link = models.CharField('IMDB Link', max_length=100, blank=True, null=True, help_text='For example, here is <a target="_blank" '
'href="https://www.imdb.com/title/tt3322364/">Concussion\'s link</a>.')
# Foreign Key used because movie can only have one director, but directors can have multiple movies
# Director as a string rather than object because it hasn't been declared yet in file.
director = models.ForeignKey('Director', on_delete=models.SET_NULL, null=True, blank=True, help_text='This field will be overwritten \
if given a valid IMDB id and left blank.')
language = models.ForeignKey('Language', on_delete=models.SET_NULL, null=True, blank=True)
summary = models.TextField(max_length=5000, null=True, blank=True, help_text="Enter a brief description of the movie. This field will \
be overwritten if given a valid IMDB id and left blank.")
# Genre class has already been defined so we can specify the object above.
genre = models.ForeignKey('Genre', on_delete=models.SET_NULL, null=True, blank=True, help_text='This field will be overwritten if given \
a valid IMDB id and left blank.')
tags = TaggableManager(blank=True)
year = models.CharField(max_length=200, null=True, blank=True, help_text='This field will be overwritten if given a valid IMDB id and left blank.')
thumbnail = models.CharField('Thumbnail', max_length=500, blank=True, null=True, help_text='This field will be overwritten \
if given a valid IMDB id and left blank.')
file = S3DirectField(dest='videos', blank=True, null=True)
ads = models.CharField('Google VAMP Ads Link', max_length=1000, blank=True, null=True, help_text="""For example, here is a <a target="_blank"
href="https://pubads.g.doubleclick.net/gampad/ads?sz=640x480&iu=/124319096/external/ad_rule_samples&ciu_szs=300x250&ad_rule=1&impl=s&gdfp_req=1&env=vp&output=vmap&unviewed_position_start=1&cust_params=deployment%3Ddevsite%26sample_ar%3Dpremidpost&cmsid=496&vid=short_onecue&correlator=">
Google VAMP example link</a>.""")
duration = models.CharField(max_length=200)
fps = models.CharField(max_length=200)
dimensions = models.CharField(max_length=200)
max_num_find_articles = models.IntegerField('Max number of research articles', default=5, validators=[MinValueValidator(0), MaxValueValidator(100)], help_text="Default number is 5.")
found_articles = models.TextField('Found Research Articles', max_length=5000, null=True, blank=True, help_text="HTML list output of found research \
articles on Google Scholar. Clear the text to find new articles.")
ratings = GenericRelation(Rating, related_query_name='movie-rating')
class Meta:
ordering = ['title', 'director']
def display_genre(self):
# Creates a string for the Genre. This is required to display genre in Admin.
return ', '.join([genre.name for genre in self.genre.all()[:3]])
display_genre.short_description = 'Genre'
def get_absolute_url(self):
# Returns the url to access a particular movie instance.
return reverse('movie-detail', args=[str(self.id)])
def __str__(self):
# String for representing the Model object.
return self.title if self.title else ''
def get_movie_url(self):
return (self.file).replace(" ", "+")
def get_video_stats(self):
#filename = str(settings.BASE_DIR) + self.file.url
clip = VideoFileClip(self.get_movie_url())
duration = str(datetime.timedelta(seconds=round(clip.duration)))
fps = clip.fps
width, height = clip.size
return [duration, fps, (width, height)]
def get_imdb_stats(self):
ia = imdb.IMDb()
reg = re.compile(r'^.*(ch|co|ev|nm|tt)(\d{7}\d*)\/?$')
id_found = reg.match(self.imdb_link)
if id_found:
movie = ia.get_movie(id_found.group(2))
return [movie['year'], movie['directors'][0], movie['genres'][0], movie['title'], movie.get('plot')[0], movie['cover url']]
else:
raise Exception(f"No imdb match found for imdb link: {self.imdb_link}")
def get_research_articles(self, max_num):
# Search string for Google Scholar to look for.
# e.g. "{self.title} {self.director.name}" would equate to "Concussion Peter Landesman" for the movie Concussion.
search_str = f'{self.title} {self.director.name}'
output = f""
try:
pg = ProxyGenerator()
ip = os.environ['PROXY_IP']
pg.SingleProxy(http = ip, https = ip)
o = scholarly.use_proxy(pg)
search_query = scholarly.search_pubs(search_str)
for i in range(0, max_num):
curr = next(search_query)
# For debugging purposes, this is how you pretty print the search query's contents.
#scholarly.pprint(curr)
# Grab the title of the article.
title = curr['bib']['title']
# Begin our formatted html output for each found research article.
output += f"""
<li>
"""
# See if a publication url (i.e. curr['pub_url']) exists. If so, add an external link to it.
if 'pub_url' in curr:
output += f"""
<a target='_blank' href=\"{curr['pub_url']}\">{title}</a>
"""
else:
output += f"""
{title}
"""
output += f"""
<br>
"""
# Writes the abstract (i.e.curr['bib']['abstract']) if it exists.
if 'bib' in curr and 'abstract' in curr['bib']:
output += f"""
<p>{curr['bib']['abstract']}</p>
"""
output += f"""
</li>
"""
except Exception as e:
pass
# Useful for seeing errors in your terminal. Replace pass with the print statement below.
#print(sys.stderr, e)
return output
def save(self, *args, **kwargs):
|
import uuid # Required for unique movie instances
from datetime import date
class Director(models.Model):
# Model representing a director.
name = models.CharField(max_length=100)
date_of_birth = models.DateField(null=True, blank=True)
date_of_death = models.DateField('died', null=True, blank=True)
class Meta:
ordering = ['name']
def get_absolute_url(self):
# Returns the url to access a particular director instance.
return reverse('director-detail', args=[str(self.id)])
def __str__(self):
# String for representing the Model object.
return self.name
from djstripe.models import Customer, Subscription
class Profile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE, primary_key=True)
USER_TYPE_CHOICES = (
(1, 'Free - access independent films and media outlets'),
(2, 'Low Subscription - access to Hollywood films'),
(3, 'Premium Subscription - access to A-list movies')
)
user_type = models.IntegerField('Subscription Tier', default=1, choices=USER_TYPE_CHOICES)
# Assigns a Stripe customer and subscription to a User.
customer = models.ForeignKey(Customer, null=True, blank=True, on_delete=models.SET_NULL)
subscription = models.ForeignKey(Subscription, null=True, blank=True, on_delete=models.SET_NULL)
def get_user_type(self):
return dict(self.USER_TYPE_CHOICES).get(self.user_type)
def get_user_type_short(self):
return dict(self.USER_TYPE_CHOICES).get(self.user_type).split()[0]
class Contact(models.Model):
name = models.CharField(max_length=100)
email = models.EmailField()
subject = models.CharField(max_length=100)
body = models.TextField()
def __str__(self):
return self.name | super(Movie, self).save(*args, **kwargs)
# Uses a custom save to end date any subCases
orig = Movie.objects.get(id=self.id)
fields_to_update = []
try:
specs = self.get_video_stats()
orig.duration = specs[0]
orig.fps = specs[1]
orig.dimensions = specs[2]
fields_to_update.extend(['duration', 'fps', 'dimensions'])
except Exception as e:
pass
try:
imdb_stats = self.get_imdb_stats()
orig.title = imdb_stats[3]
orig.year = imdb_stats[0]
orig.thumbnail = imdb_stats[5]
# Checks if a director name already exists. If not, create and assign to the movie.
director = None
try:
directors = Director.objects.all()
for d in directors:
if (str(d) == str(imdb_stats[1])):
director = d
break
orig.director = director if director is not None else Director.objects.create(name=imdb_stats[1])
except:
orig.director = Director.objects.create(name=imdb_stats[1])
# Checks if a genre name already exists. If not, create and assign to the movie.
genre = None
try:
genres = Genre.objects.all()
for g in genres:
if (str(g) == str(imdb_stats[2])):
genre = g
break
genre = genre if genre is not None else Genre.objects.create(name=imdb_stats[2])
except:
genre = Genre.objects.create(name=imdb_stats[2])
orig.genre = genre
# Updates values only if their fields are left blank by the admin.
if not self.year:
fields_to_update.append('year')
if not self.genre:
fields_to_update.append('genre')
if not self.title:
fields_to_update.append('title')
if not self.director:
fields_to_update.append('director')
if not self.summary:
orig.summary = imdb_stats[4]
fields_to_update.append('summary')
if not self.thumbnail:
fields_to_update.append('thumbnail')
except Exception as e:
pass
# Searches for research articles by using a single proxy for a Google Scholar search query.
if not self.found_articles:
orig.found_articles = orig.get_research_articles(self.max_num_find_articles)
fields_to_update.append('found_articles')
super(Movie, orig).save(update_fields=fields_to_update) | identifier_body |
models.py | from django.db import models
from django.urls import reverse # To generate URLS by reversing URL patterns
from django.conf import settings
from django.core.validators import MaxValueValidator, MinValueValidator
# Imports Django's default User system.
from django.contrib.auth.models import User
# Movie rating imports. Docs: https://django-star-ratings.readthedocs.io/en/latest/?badge=latest/
from django.contrib.contenttypes.fields import GenericRelation
from star_ratings.models import Rating
# Amazon AWS S3 import.
from s3direct.fields import S3DirectField
# IMDbPY import. Docs: https://imdbpy.readthedocs.io/en/latest/
import imdb
# Video stats with MoviePy. Docs: https://zulko.github.io/moviepy/index.html
from moviepy.editor import VideoFileClip
# Google Scholar import. Docs: https://scholarly.readthedocs.io/en/latest/?badge=latest
from scholarly import scholarly, ProxyGenerator
# Django taggit import for managing comma-separated tags. Docs: https://django-taggit.readthedocs.io/en/latest/
from taggit.managers import TaggableManager
import sys, re, os, datetime
class Genre(models.Model):
# Model representing a movie genre (e.g. Science Fiction, Non Fiction).
name = models.CharField(
max_length=200,
help_text="Enter a movie genre (e.g. Science Fiction, French Poetry etc.)"
)
def __str__(self):
# String for representing the Model object (in Admin site etc.)
return self.name
class Language(models.Model):
# Model representing a Language (e.g. English, French, Japanese, etc.)
name = models.CharField(max_length=200,
help_text="Enter the movie's natural language (e.g. English, French, Japanese etc.)")
def __str__(self):
# String for representing the Model object (in Admin site etc.)
return self.name
class Movie(models.Model):
# Model representing a movie (but not a specific copy of a movie).
title = models.CharField(max_length=200, null=True, blank=True, help_text='This field will be overwritten if given a valid IMDB id and left blank.')
imdb_link = models.CharField('IMDB Link', max_length=100, blank=True, null=True, help_text='For example, here is <a target="_blank" '
'href="https://www.imdb.com/title/tt3322364/">Concussion\'s link</a>.')
# Foreign Key used because movie can only have one director, but directors can have multiple movies
# Director as a string rather than object because it hasn't been declared yet in file.
director = models.ForeignKey('Director', on_delete=models.SET_NULL, null=True, blank=True, help_text='This field will be overwritten \
if given a valid IMDB id and left blank.')
language = models.ForeignKey('Language', on_delete=models.SET_NULL, null=True, blank=True)
summary = models.TextField(max_length=5000, null=True, blank=True, help_text="Enter a brief description of the movie. This field will \
be overwritten if given a valid IMDB id and left blank.")
# Genre class has already been defined so we can specify the object above.
genre = models.ForeignKey('Genre', on_delete=models.SET_NULL, null=True, blank=True, help_text='This field will be overwritten if given \
a valid IMDB id and left blank.')
tags = TaggableManager(blank=True)
year = models.CharField(max_length=200, null=True, blank=True, help_text='This field will be overwritten if given a valid IMDB id and left blank.')
thumbnail = models.CharField('Thumbnail', max_length=500, blank=True, null=True, help_text='This field will be overwritten \
if given a valid IMDB id and left blank.')
file = S3DirectField(dest='videos', blank=True, null=True)
ads = models.CharField('Google VAMP Ads Link', max_length=1000, blank=True, null=True, help_text="""For example, here is a <a target="_blank"
href="https://pubads.g.doubleclick.net/gampad/ads?sz=640x480&iu=/124319096/external/ad_rule_samples&ciu_szs=300x250&ad_rule=1&impl=s&gdfp_req=1&env=vp&output=vmap&unviewed_position_start=1&cust_params=deployment%3Ddevsite%26sample_ar%3Dpremidpost&cmsid=496&vid=short_onecue&correlator=">
Google VAMP example link</a>.""")
duration = models.CharField(max_length=200) | max_num_find_articles = models.IntegerField('Max number of research articles', default=5, validators=[MinValueValidator(0), MaxValueValidator(100)], help_text="Default number is 5.")
found_articles = models.TextField('Found Research Articles', max_length=5000, null=True, blank=True, help_text="HTML list output of found research \
articles on Google Scholar. Clear the text to find new articles.")
ratings = GenericRelation(Rating, related_query_name='movie-rating')
class Meta:
ordering = ['title', 'director']
def display_genre(self):
# Creates a string for the Genre. This is required to display genre in Admin.
return ', '.join([genre.name for genre in self.genre.all()[:3]])
display_genre.short_description = 'Genre'
def get_absolute_url(self):
# Returns the url to access a particular movie instance.
return reverse('movie-detail', args=[str(self.id)])
def __str__(self):
# String for representing the Model object.
return self.title if self.title else ''
def get_movie_url(self):
return (self.file).replace(" ", "+")
def get_video_stats(self):
#filename = str(settings.BASE_DIR) + self.file.url
clip = VideoFileClip(self.get_movie_url())
duration = str(datetime.timedelta(seconds=round(clip.duration)))
fps = clip.fps
width, height = clip.size
return [duration, fps, (width, height)]
def get_imdb_stats(self):
ia = imdb.IMDb()
reg = re.compile(r'^.*(ch|co|ev|nm|tt)(\d{7}\d*)\/?$')
id_found = reg.match(self.imdb_link)
if id_found:
movie = ia.get_movie(id_found.group(2))
return [movie['year'], movie['directors'][0], movie['genres'][0], movie['title'], movie.get('plot')[0], movie['cover url']]
else:
raise Exception(f"No imdb match found for imdb link: {self.imdb_link}")
def get_research_articles(self, max_num):
# Search string for Google Scholar to look for.
# e.g. "{self.title} {self.director.name}" would equate to "Concussion Peter Landesman" for the movie Concussion.
search_str = f'{self.title} {self.director.name}'
output = f""
try:
pg = ProxyGenerator()
ip = os.environ['PROXY_IP']
pg.SingleProxy(http = ip, https = ip)
o = scholarly.use_proxy(pg)
search_query = scholarly.search_pubs(search_str)
for i in range(0, max_num):
curr = next(search_query)
# For debugging purposes, this is how you pretty print the search query's contents.
#scholarly.pprint(curr)
# Grab the title of the article.
title = curr['bib']['title']
# Begin our formatted html output for each found research article.
output += f"""
<li>
"""
# See if a publication url (i.e. curr['pub_url']) exists. If so, add an external link to it.
if 'pub_url' in curr:
output += f"""
<a target='_blank' href=\"{curr['pub_url']}\">{title}</a>
"""
else:
output += f"""
{title}
"""
output += f"""
<br>
"""
# Writes the abstract (i.e.curr['bib']['abstract']) if it exists.
if 'bib' in curr and 'abstract' in curr['bib']:
output += f"""
<p>{curr['bib']['abstract']}</p>
"""
output += f"""
</li>
"""
except Exception as e:
pass
# Useful for seeing errors in your terminal. Replace pass with the print statement below.
#print(sys.stderr, e)
return output
def save(self, *args, **kwargs):
super(Movie, self).save(*args, **kwargs)
# Uses a custom save to end date any subCases
orig = Movie.objects.get(id=self.id)
fields_to_update = []
try:
specs = self.get_video_stats()
orig.duration = specs[0]
orig.fps = specs[1]
orig.dimensions = specs[2]
fields_to_update.extend(['duration', 'fps', 'dimensions'])
except Exception as e:
pass
try:
imdb_stats = self.get_imdb_stats()
orig.title = imdb_stats[3]
orig.year = imdb_stats[0]
orig.thumbnail = imdb_stats[5]
# Checks if a director name already exists. If not, create and assign to the movie.
director = None
try:
directors = Director.objects.all()
for d in directors:
if (str(d) == str(imdb_stats[1])):
director = d
break
orig.director = director if director is not None else Director.objects.create(name=imdb_stats[1])
except:
orig.director = Director.objects.create(name=imdb_stats[1])
# Checks if a genre name already exists. If not, create and assign to the movie.
genre = None
try:
genres = Genre.objects.all()
for g in genres:
if (str(g) == str(imdb_stats[2])):
genre = g
break
genre = genre if genre is not None else Genre.objects.create(name=imdb_stats[2])
except:
genre = Genre.objects.create(name=imdb_stats[2])
orig.genre = genre
# Updates values only if their fields are left blank by the admin.
if not self.year:
fields_to_update.append('year')
if not self.genre:
fields_to_update.append('genre')
if not self.title:
fields_to_update.append('title')
if not self.director:
fields_to_update.append('director')
if not self.summary:
orig.summary = imdb_stats[4]
fields_to_update.append('summary')
if not self.thumbnail:
fields_to_update.append('thumbnail')
except Exception as e:
pass
# Searches for research articles by using a single proxy for a Google Scholar search query.
if not self.found_articles:
orig.found_articles = orig.get_research_articles(self.max_num_find_articles)
fields_to_update.append('found_articles')
super(Movie, orig).save(update_fields=fields_to_update)
import uuid # Required for unique movie instances
from datetime import date
class Director(models.Model):
# Model representing a director.
name = models.CharField(max_length=100)
date_of_birth = models.DateField(null=True, blank=True)
date_of_death = models.DateField('died', null=True, blank=True)
class Meta:
ordering = ['name']
def get_absolute_url(self):
# Returns the url to access a particular director instance.
return reverse('director-detail', args=[str(self.id)])
def __str__(self):
# String for representing the Model object.
return self.name
from djstripe.models import Customer, Subscription
class Profile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE, primary_key=True)
USER_TYPE_CHOICES = (
(1, 'Free - access independent films and media outlets'),
(2, 'Low Subscription - access to Hollywood films'),
(3, 'Premium Subscription - access to A-list movies')
)
user_type = models.IntegerField('Subscription Tier', default=1, choices=USER_TYPE_CHOICES)
# Assigns a Stripe customer and subscription to a User.
customer = models.ForeignKey(Customer, null=True, blank=True, on_delete=models.SET_NULL)
subscription = models.ForeignKey(Subscription, null=True, blank=True, on_delete=models.SET_NULL)
def get_user_type(self):
return dict(self.USER_TYPE_CHOICES).get(self.user_type)
def get_user_type_short(self):
return dict(self.USER_TYPE_CHOICES).get(self.user_type).split()[0]
class Contact(models.Model):
name = models.CharField(max_length=100)
email = models.EmailField()
subject = models.CharField(max_length=100)
body = models.TextField()
def __str__(self):
return self.name | fps = models.CharField(max_length=200)
dimensions = models.CharField(max_length=200)
| random_line_split |
models.py | from django.db import models
from django.urls import reverse # To generate URLS by reversing URL patterns
from django.conf import settings
from django.core.validators import MaxValueValidator, MinValueValidator
# Imports Django's default User system.
from django.contrib.auth.models import User
# Movie rating imports. Docs: https://django-star-ratings.readthedocs.io/en/latest/?badge=latest/
from django.contrib.contenttypes.fields import GenericRelation
from star_ratings.models import Rating
# Amazon AWS S3 import.
from s3direct.fields import S3DirectField
# IMDbPY import. Docs: https://imdbpy.readthedocs.io/en/latest/
import imdb
# Video stats with MoviePy. Docs: https://zulko.github.io/moviepy/index.html
from moviepy.editor import VideoFileClip
# Google Scholar import. Docs: https://scholarly.readthedocs.io/en/latest/?badge=latest
from scholarly import scholarly, ProxyGenerator
# Django taggit import for managing comma-separated tags. Docs: https://django-taggit.readthedocs.io/en/latest/
from taggit.managers import TaggableManager
import sys, re, os, datetime
class Genre(models.Model):
# Model representing a movie genre (e.g. Science Fiction, Non Fiction).
name = models.CharField(
max_length=200,
help_text="Enter a movie genre (e.g. Science Fiction, French Poetry etc.)"
)
def __str__(self):
# String for representing the Model object (in Admin site etc.)
return self.name
class Language(models.Model):
# Model representing a Language (e.g. English, French, Japanese, etc.)
name = models.CharField(max_length=200,
help_text="Enter the movie's natural language (e.g. English, French, Japanese etc.)")
def __str__(self):
# String for representing the Model object (in Admin site etc.)
return self.name
class Movie(models.Model):
# Model representing a movie (but not a specific copy of a movie).
title = models.CharField(max_length=200, null=True, blank=True, help_text='This field will be overwritten if given a valid IMDB id and left blank.')
imdb_link = models.CharField('IMDB Link', max_length=100, blank=True, null=True, help_text='For example, here is <a target="_blank" '
'href="https://www.imdb.com/title/tt3322364/">Concussion\'s link</a>.')
# Foreign Key used because movie can only have one director, but directors can have multiple movies
# Director as a string rather than object because it hasn't been declared yet in file.
director = models.ForeignKey('Director', on_delete=models.SET_NULL, null=True, blank=True, help_text='This field will be overwritten \
if given a valid IMDB id and left blank.')
language = models.ForeignKey('Language', on_delete=models.SET_NULL, null=True, blank=True)
summary = models.TextField(max_length=5000, null=True, blank=True, help_text="Enter a brief description of the movie. This field will \
be overwritten if given a valid IMDB id and left blank.")
# Genre class has already been defined so we can specify the object above.
genre = models.ForeignKey('Genre', on_delete=models.SET_NULL, null=True, blank=True, help_text='This field will be overwritten if given \
a valid IMDB id and left blank.')
tags = TaggableManager(blank=True)
year = models.CharField(max_length=200, null=True, blank=True, help_text='This field will be overwritten if given a valid IMDB id and left blank.')
thumbnail = models.CharField('Thumbnail', max_length=500, blank=True, null=True, help_text='This field will be overwritten \
if given a valid IMDB id and left blank.')
file = S3DirectField(dest='videos', blank=True, null=True)
ads = models.CharField('Google VAMP Ads Link', max_length=1000, blank=True, null=True, help_text="""For example, here is a <a target="_blank"
href="https://pubads.g.doubleclick.net/gampad/ads?sz=640x480&iu=/124319096/external/ad_rule_samples&ciu_szs=300x250&ad_rule=1&impl=s&gdfp_req=1&env=vp&output=vmap&unviewed_position_start=1&cust_params=deployment%3Ddevsite%26sample_ar%3Dpremidpost&cmsid=496&vid=short_onecue&correlator=">
Google VAMP example link</a>.""")
duration = models.CharField(max_length=200)
fps = models.CharField(max_length=200)
dimensions = models.CharField(max_length=200)
max_num_find_articles = models.IntegerField('Max number of research articles', default=5, validators=[MinValueValidator(0), MaxValueValidator(100)], help_text="Default number is 5.")
found_articles = models.TextField('Found Research Articles', max_length=5000, null=True, blank=True, help_text="HTML list output of found research \
articles on Google Scholar. Clear the text to find new articles.")
ratings = GenericRelation(Rating, related_query_name='movie-rating')
class Meta:
ordering = ['title', 'director']
def display_genre(self):
# Creates a string for the Genre. This is required to display genre in Admin.
return ', '.join([genre.name for genre in self.genre.all()[:3]])
display_genre.short_description = 'Genre'
def get_absolute_url(self):
# Returns the url to access a particular movie instance.
return reverse('movie-detail', args=[str(self.id)])
def __str__(self):
# String for representing the Model object.
return self.title if self.title else ''
def get_movie_url(self):
return (self.file).replace(" ", "+")
def get_video_stats(self):
#filename = str(settings.BASE_DIR) + self.file.url
clip = VideoFileClip(self.get_movie_url())
duration = str(datetime.timedelta(seconds=round(clip.duration)))
fps = clip.fps
width, height = clip.size
return [duration, fps, (width, height)]
def get_imdb_stats(self):
ia = imdb.IMDb()
reg = re.compile(r'^.*(ch|co|ev|nm|tt)(\d{7}\d*)\/?$')
id_found = reg.match(self.imdb_link)
if id_found:
movie = ia.get_movie(id_found.group(2))
return [movie['year'], movie['directors'][0], movie['genres'][0], movie['title'], movie.get('plot')[0], movie['cover url']]
else:
raise Exception(f"No imdb match found for imdb link: {self.imdb_link}")
def get_research_articles(self, max_num):
# Search string for Google Scholar to look for.
# e.g. "{self.title} {self.director.name}" would equate to "Concussion Peter Landesman" for the movie Concussion.
search_str = f'{self.title} {self.director.name}'
output = f""
try:
pg = ProxyGenerator()
ip = os.environ['PROXY_IP']
pg.SingleProxy(http = ip, https = ip)
o = scholarly.use_proxy(pg)
search_query = scholarly.search_pubs(search_str)
for i in range(0, max_num):
curr = next(search_query)
# For debugging purposes, this is how you pretty print the search query's contents.
#scholarly.pprint(curr)
# Grab the title of the article.
title = curr['bib']['title']
# Begin our formatted html output for each found research article.
output += f"""
<li>
"""
# See if a publication url (i.e. curr['pub_url']) exists. If so, add an external link to it.
if 'pub_url' in curr:
output += f"""
<a target='_blank' href=\"{curr['pub_url']}\">{title}</a>
"""
else:
output += f"""
{title}
"""
output += f"""
<br>
"""
# Writes the abstract (i.e.curr['bib']['abstract']) if it exists.
if 'bib' in curr and 'abstract' in curr['bib']:
output += f"""
<p>{curr['bib']['abstract']}</p>
"""
output += f"""
</li>
"""
except Exception as e:
pass
# Useful for seeing errors in your terminal. Replace pass with the print statement below.
#print(sys.stderr, e)
return output
def | (self, *args, **kwargs):
super(Movie, self).save(*args, **kwargs)
# Uses a custom save to end date any subCases
orig = Movie.objects.get(id=self.id)
fields_to_update = []
try:
specs = self.get_video_stats()
orig.duration = specs[0]
orig.fps = specs[1]
orig.dimensions = specs[2]
fields_to_update.extend(['duration', 'fps', 'dimensions'])
except Exception as e:
pass
try:
imdb_stats = self.get_imdb_stats()
orig.title = imdb_stats[3]
orig.year = imdb_stats[0]
orig.thumbnail = imdb_stats[5]
# Checks if a director name already exists. If not, create and assign to the movie.
director = None
try:
directors = Director.objects.all()
for d in directors:
if (str(d) == str(imdb_stats[1])):
director = d
break
orig.director = director if director is not None else Director.objects.create(name=imdb_stats[1])
except:
orig.director = Director.objects.create(name=imdb_stats[1])
# Checks if a genre name already exists. If not, create and assign to the movie.
genre = None
try:
genres = Genre.objects.all()
for g in genres:
if (str(g) == str(imdb_stats[2])):
genre = g
break
genre = genre if genre is not None else Genre.objects.create(name=imdb_stats[2])
except:
genre = Genre.objects.create(name=imdb_stats[2])
orig.genre = genre
# Updates values only if their fields are left blank by the admin.
if not self.year:
fields_to_update.append('year')
if not self.genre:
fields_to_update.append('genre')
if not self.title:
fields_to_update.append('title')
if not self.director:
fields_to_update.append('director')
if not self.summary:
orig.summary = imdb_stats[4]
fields_to_update.append('summary')
if not self.thumbnail:
fields_to_update.append('thumbnail')
except Exception as e:
pass
# Searches for research articles by using a single proxy for a Google Scholar search query.
if not self.found_articles:
orig.found_articles = orig.get_research_articles(self.max_num_find_articles)
fields_to_update.append('found_articles')
super(Movie, orig).save(update_fields=fields_to_update)
import uuid # Required for unique movie instances
from datetime import date
class Director(models.Model):
# Model representing a director.
name = models.CharField(max_length=100)
date_of_birth = models.DateField(null=True, blank=True)
date_of_death = models.DateField('died', null=True, blank=True)
class Meta:
ordering = ['name']
def get_absolute_url(self):
# Returns the url to access a particular director instance.
return reverse('director-detail', args=[str(self.id)])
def __str__(self):
# String for representing the Model object.
return self.name
from djstripe.models import Customer, Subscription
class Profile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE, primary_key=True)
USER_TYPE_CHOICES = (
(1, 'Free - access independent films and media outlets'),
(2, 'Low Subscription - access to Hollywood films'),
(3, 'Premium Subscription - access to A-list movies')
)
user_type = models.IntegerField('Subscription Tier', default=1, choices=USER_TYPE_CHOICES)
# Assigns a Stripe customer and subscription to a User.
customer = models.ForeignKey(Customer, null=True, blank=True, on_delete=models.SET_NULL)
subscription = models.ForeignKey(Subscription, null=True, blank=True, on_delete=models.SET_NULL)
def get_user_type(self):
return dict(self.USER_TYPE_CHOICES).get(self.user_type)
def get_user_type_short(self):
return dict(self.USER_TYPE_CHOICES).get(self.user_type).split()[0]
class Contact(models.Model):
name = models.CharField(max_length=100)
email = models.EmailField()
subject = models.CharField(max_length=100)
body = models.TextField()
def __str__(self):
return self.name | save | identifier_name |
models.py | from django.db import models
from django.urls import reverse # To generate URLS by reversing URL patterns
from django.conf import settings
from django.core.validators import MaxValueValidator, MinValueValidator
# Imports Django's default User system.
from django.contrib.auth.models import User
# Movie rating imports. Docs: https://django-star-ratings.readthedocs.io/en/latest/?badge=latest/
from django.contrib.contenttypes.fields import GenericRelation
from star_ratings.models import Rating
# Amazon AWS S3 import.
from s3direct.fields import S3DirectField
# IMDbPY import. Docs: https://imdbpy.readthedocs.io/en/latest/
import imdb
# Video stats with MoviePy. Docs: https://zulko.github.io/moviepy/index.html
from moviepy.editor import VideoFileClip
# Google Scholar import. Docs: https://scholarly.readthedocs.io/en/latest/?badge=latest
from scholarly import scholarly, ProxyGenerator
# Django taggit import for managing comma-separated tags. Docs: https://django-taggit.readthedocs.io/en/latest/
from taggit.managers import TaggableManager
import sys, re, os, datetime
class Genre(models.Model):
# Model representing a movie genre (e.g. Science Fiction, Non Fiction).
name = models.CharField(
max_length=200,
help_text="Enter a movie genre (e.g. Science Fiction, French Poetry etc.)"
)
def __str__(self):
# String for representing the Model object (in Admin site etc.)
return self.name
class Language(models.Model):
# Model representing a Language (e.g. English, French, Japanese, etc.)
name = models.CharField(max_length=200,
help_text="Enter the movie's natural language (e.g. English, French, Japanese etc.)")
def __str__(self):
# String for representing the Model object (in Admin site etc.)
return self.name
class Movie(models.Model):
# Model representing a movie (but not a specific copy of a movie).
title = models.CharField(max_length=200, null=True, blank=True, help_text='This field will be overwritten if given a valid IMDB id and left blank.')
imdb_link = models.CharField('IMDB Link', max_length=100, blank=True, null=True, help_text='For example, here is <a target="_blank" '
'href="https://www.imdb.com/title/tt3322364/">Concussion\'s link</a>.')
# Foreign Key used because movie can only have one director, but directors can have multiple movies
# Director as a string rather than object because it hasn't been declared yet in file.
director = models.ForeignKey('Director', on_delete=models.SET_NULL, null=True, blank=True, help_text='This field will be overwritten \
if given a valid IMDB id and left blank.')
language = models.ForeignKey('Language', on_delete=models.SET_NULL, null=True, blank=True)
summary = models.TextField(max_length=5000, null=True, blank=True, help_text="Enter a brief description of the movie. This field will \
be overwritten if given a valid IMDB id and left blank.")
# Genre class has already been defined so we can specify the object above.
genre = models.ForeignKey('Genre', on_delete=models.SET_NULL, null=True, blank=True, help_text='This field will be overwritten if given \
a valid IMDB id and left blank.')
tags = TaggableManager(blank=True)
year = models.CharField(max_length=200, null=True, blank=True, help_text='This field will be overwritten if given a valid IMDB id and left blank.')
thumbnail = models.CharField('Thumbnail', max_length=500, blank=True, null=True, help_text='This field will be overwritten \
if given a valid IMDB id and left blank.')
file = S3DirectField(dest='videos', blank=True, null=True)
ads = models.CharField('Google VAMP Ads Link', max_length=1000, blank=True, null=True, help_text="""For example, here is a <a target="_blank"
href="https://pubads.g.doubleclick.net/gampad/ads?sz=640x480&iu=/124319096/external/ad_rule_samples&ciu_szs=300x250&ad_rule=1&impl=s&gdfp_req=1&env=vp&output=vmap&unviewed_position_start=1&cust_params=deployment%3Ddevsite%26sample_ar%3Dpremidpost&cmsid=496&vid=short_onecue&correlator=">
Google VAMP example link</a>.""")
duration = models.CharField(max_length=200)
fps = models.CharField(max_length=200)
dimensions = models.CharField(max_length=200)
max_num_find_articles = models.IntegerField('Max number of research articles', default=5, validators=[MinValueValidator(0), MaxValueValidator(100)], help_text="Default number is 5.")
found_articles = models.TextField('Found Research Articles', max_length=5000, null=True, blank=True, help_text="HTML list output of found research \
articles on Google Scholar. Clear the text to find new articles.")
ratings = GenericRelation(Rating, related_query_name='movie-rating')
class Meta:
ordering = ['title', 'director']
def display_genre(self):
# Creates a string for the Genre. This is required to display genre in Admin.
return ', '.join([genre.name for genre in self.genre.all()[:3]])
display_genre.short_description = 'Genre'
def get_absolute_url(self):
# Returns the url to access a particular movie instance.
return reverse('movie-detail', args=[str(self.id)])
def __str__(self):
# String for representing the Model object.
return self.title if self.title else ''
def get_movie_url(self):
return (self.file).replace(" ", "+")
def get_video_stats(self):
#filename = str(settings.BASE_DIR) + self.file.url
clip = VideoFileClip(self.get_movie_url())
duration = str(datetime.timedelta(seconds=round(clip.duration)))
fps = clip.fps
width, height = clip.size
return [duration, fps, (width, height)]
def get_imdb_stats(self):
ia = imdb.IMDb()
reg = re.compile(r'^.*(ch|co|ev|nm|tt)(\d{7}\d*)\/?$')
id_found = reg.match(self.imdb_link)
if id_found:
movie = ia.get_movie(id_found.group(2))
return [movie['year'], movie['directors'][0], movie['genres'][0], movie['title'], movie.get('plot')[0], movie['cover url']]
else:
raise Exception(f"No imdb match found for imdb link: {self.imdb_link}")
def get_research_articles(self, max_num):
# Search string for Google Scholar to look for.
# e.g. "{self.title} {self.director.name}" would equate to "Concussion Peter Landesman" for the movie Concussion.
search_str = f'{self.title} {self.director.name}'
output = f""
try:
pg = ProxyGenerator()
ip = os.environ['PROXY_IP']
pg.SingleProxy(http = ip, https = ip)
o = scholarly.use_proxy(pg)
search_query = scholarly.search_pubs(search_str)
for i in range(0, max_num):
curr = next(search_query)
# For debugging purposes, this is how you pretty print the search query's contents.
#scholarly.pprint(curr)
# Grab the title of the article.
title = curr['bib']['title']
# Begin our formatted html output for each found research article.
output += f"""
<li>
"""
# See if a publication url (i.e. curr['pub_url']) exists. If so, add an external link to it.
if 'pub_url' in curr:
output += f"""
<a target='_blank' href=\"{curr['pub_url']}\">{title}</a>
"""
else:
output += f"""
{title}
"""
output += f"""
<br>
"""
# Writes the abstract (i.e.curr['bib']['abstract']) if it exists.
if 'bib' in curr and 'abstract' in curr['bib']:
output += f"""
<p>{curr['bib']['abstract']}</p>
"""
output += f"""
</li>
"""
except Exception as e:
pass
# Useful for seeing errors in your terminal. Replace pass with the print statement below.
#print(sys.stderr, e)
return output
def save(self, *args, **kwargs):
super(Movie, self).save(*args, **kwargs)
# Uses a custom save to end date any subCases
orig = Movie.objects.get(id=self.id)
fields_to_update = []
try:
specs = self.get_video_stats()
orig.duration = specs[0]
orig.fps = specs[1]
orig.dimensions = specs[2]
fields_to_update.extend(['duration', 'fps', 'dimensions'])
except Exception as e:
pass
try:
imdb_stats = self.get_imdb_stats()
orig.title = imdb_stats[3]
orig.year = imdb_stats[0]
orig.thumbnail = imdb_stats[5]
# Checks if a director name already exists. If not, create and assign to the movie.
director = None
try:
directors = Director.objects.all()
for d in directors:
if (str(d) == str(imdb_stats[1])):
director = d
break
orig.director = director if director is not None else Director.objects.create(name=imdb_stats[1])
except:
orig.director = Director.objects.create(name=imdb_stats[1])
# Checks if a genre name already exists. If not, create and assign to the movie.
genre = None
try:
genres = Genre.objects.all()
for g in genres:
if (str(g) == str(imdb_stats[2])):
genre = g
break
genre = genre if genre is not None else Genre.objects.create(name=imdb_stats[2])
except:
genre = Genre.objects.create(name=imdb_stats[2])
orig.genre = genre
# Updates values only if their fields are left blank by the admin.
if not self.year:
fields_to_update.append('year')
if not self.genre:
fields_to_update.append('genre')
if not self.title:
fields_to_update.append('title')
if not self.director:
fields_to_update.append('director')
if not self.summary:
orig.summary = imdb_stats[4]
fields_to_update.append('summary')
if not self.thumbnail:
fields_to_update.append('thumbnail')
except Exception as e:
pass
# Searches for research articles by using a single proxy for a Google Scholar search query.
if not self.found_articles:
|
super(Movie, orig).save(update_fields=fields_to_update)
import uuid # Required for unique movie instances
from datetime import date
class Director(models.Model):
# Model representing a director.
name = models.CharField(max_length=100)
date_of_birth = models.DateField(null=True, blank=True)
date_of_death = models.DateField('died', null=True, blank=True)
class Meta:
ordering = ['name']
def get_absolute_url(self):
# Returns the url to access a particular director instance.
return reverse('director-detail', args=[str(self.id)])
def __str__(self):
# String for representing the Model object.
return self.name
from djstripe.models import Customer, Subscription
class Profile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE, primary_key=True)
USER_TYPE_CHOICES = (
(1, 'Free - access independent films and media outlets'),
(2, 'Low Subscription - access to Hollywood films'),
(3, 'Premium Subscription - access to A-list movies')
)
user_type = models.IntegerField('Subscription Tier', default=1, choices=USER_TYPE_CHOICES)
# Assigns a Stripe customer and subscription to a User.
customer = models.ForeignKey(Customer, null=True, blank=True, on_delete=models.SET_NULL)
subscription = models.ForeignKey(Subscription, null=True, blank=True, on_delete=models.SET_NULL)
def get_user_type(self):
return dict(self.USER_TYPE_CHOICES).get(self.user_type)
def get_user_type_short(self):
return dict(self.USER_TYPE_CHOICES).get(self.user_type).split()[0]
class Contact(models.Model):
name = models.CharField(max_length=100)
email = models.EmailField()
subject = models.CharField(max_length=100)
body = models.TextField()
def __str__(self):
return self.name | orig.found_articles = orig.get_research_articles(self.max_num_find_articles)
fields_to_update.append('found_articles') | conditional_block |
main.py | from dataclasses import dataclass
from scipy.integrate import solve_ivp, simps
from functools import partial
from typing import List, Iterable, Callable, Tuple
from numpy import exp, ndarray, sqrt
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
from consts import *
import consts
τ = 2 * np.pi
i = complex(0, 1)
# 2020-11-15
"""
One of your goals is to figure out if you can use hydrogen (1d?) WFs as basis functions to create
arbitrary solns to the Schrodinger equation, thereby making chemistry simulation and modeling
much more computationally efficient.
It appears that for Hydrogen atoms, you can use linear combinations of 1D WFs as basis functions
in 2 adn 3d by choosing the right coefficients, and the right modifier fn (sin, cos etc) across
θ and φ to apply to 2 and 3d situations.
You need to verify that this is correct adn quantify. A challenge is finding accurate 2D orbitals
to compare your results to, and in visualizing and/or quantifying your 3D results to compare
to real results in 3d.
In parallel to verifying this, assume it's right, and try to model a 2-nucleus system. For
example, a H2 molecule. Attempt, in 1D, to find a combination of H atomic orbitals (perhaps
offset in x) that create the H2 molecular orbitals. These orbitals you're attempting to
match can be taken from real data, or by integrating. (May need to break up integration
into three areas, to avoid singularities at each nucleus).
"""
@dataclass
class Hydrogen3d:
"""A Hydrogen 3d superposition"""
# todo: Or is it n = 1, 3, 5...
# coeffs: List[complex] # Positive coefficients: n = 0, 1, 2, 3...
n: int
l: int
m: int
x: np.ndarray
components: List[np.ndarray]
def __init__(self, coeffs: List[complex]):
self.coeffs = coeffs
self.components = []
# n = 1 # todo: Only odd 1d coeffs for now.
n = 0 # todo: Only odd 1d coeffs for now.
for c in self.coeffs:
E = -2 / (n + 1) ** 2
x, ψ = h_static_3d(E)
# if n == 1:
if n == 0:
self.x = x
self.components.append(c * ψ)
# n += 2
n += 1
def value(self, r: float, θ: float, φ: float) -> complex:
"""Get a single value."""
result = 0
for comp in self.components:
result += np.interp([r], self.x, comp)[0]
return result
def value_comp(self, x: float, j: int) -> complex:
"""Get a single value, from a specific component."""
return np.interp([x], self.x, self.components[j])[0]
def plot(self, range_: Tuple[float, float] = (-20, 20), shift: float = 0., size: int = 10_000, show: bool = True) -> None:
ψ = np.zeros(len(self.x), dtype=np.complex128)
for ψi in self.components:
ψ += ψi
# todo: DRY with other series'
plt.plot(self.x + shift, ψ.real)
# plt.plot(self.x, ψ.imag)
# plt.xlim(range_[0], range_[1])
plt.xlim(0, range_[1])
if show:
plt.show()
@dataclass
class Pt:
x: float
y: float
z: float
@dataclass
class Vec:
x: float
y: float
z: float
def __add__(self, other: 'Vec') -> 'Vec':
return Vec(self.x + other.x, self.y + other.y, self.z + other.z)
def __sub__(self, other: 'Vec') -> 'Vec':
return Vec(self.x - other.x, self.y - other.y, self.z - other.z)
def scalar_mul(self, val: float) -> 'Vec':
return Vec(val * self.x, val * self.y, val * self.z)
def length(self) -> float:
return sqrt(self.x**2 + self.y**2 + self.z**2)
matplotlib.use("Qt5Agg")
# import seaborn as sns
# import plotly
# import plotly.graph_objects as go
# A global state var
V_prev: Callable = lambda sx: 0
# Lookin into matrix mechanics, and Feynman path integral approaches too
# orbitals are characterized (in simple cases) by quantum numbers n, l, and m, corresponding to
# energy, angular momentum, and magnetic (ang momentum vec component)
# spin?
# Do electrons (electrically) interact with themselves?
# Breaking up a numerical problem into a number of solveable analytic ones??? Eg set up
# an arbitrary V as a series of step Vs which have anal solns
# Free variables: 2? Energy, and ψ_p_0(ψ). Eg we can set ψ to what we wish, find the ψ_p that
# works with it (and the E set), then normalize.
PRECISION = 100_000
@dataclass
class Nucleus:
n_prot: float
n_neut: float
sx: float
vx: float
def mass(self):
return self.n_prot * m_p + self.n_neut * m_n
def charge(self):
# Charge from protons only.
return self.n_prot * e
def nuc_pot(nuclei: Iterable[Nucleus], sx: float) -> float:
result = 0
for nuclei in nuclei:
# Coulomb potential
result -= e / abs(nuclei.sx - sx)
return result
def ti_schrod_rhs(
E: float, V: Callable, x: float, y: Tuple[complex, complex]
) -> Tuple[complex, complex]:
"""
d²ψ/dx² = 2m/ħ² * (V(x) - E)ψ
"""
ψ, φ = y
ψ_p = φ
φ_p = 2 * m_e / ħ ** 2 * (V(x) - E) * ψ
return ψ_p, φ_p
def solve(E: float, V: Callable, ψ0: float, ψ_p0: float, x_span: Tuple[float, float]):
"""
Calculate the wave function for electrons in an arbitrary potential, at a single snapshot
in time.
"""
rhs = partial(ti_schrod_rhs, E, V)
return solve_ivp(
rhs, x_span, (ψ0, ψ_p0), t_eval=np.linspace(x_span[0], x_span[1], PRECISION)
)
def h_static(E: float, normalize=True) -> Tuple[ndarray, ndarray]:
ψ0 = 0
ψ_p0 = 0.1
x_span = (-100, 0.0000001)
V_elec = partial(nuc_pot, [Nucleus(1, 0, 0, 0)])
# V_elec = partial(nuc_pot, [Nucleus(1, 0, 0, 0), Nucleus(1, 0, 1, 0)])
# Left and right of the x=0 coulomb singularity. Assume odd solution around x=0.
soln_orig = solve(E, V_elec, ψ0, ψ_p0, x_span)
soln_left = soln_orig.y[0]
soln_right = np.flip(soln_left)
soln = np.concatenate([soln_left, -soln_right])
x = np.concatenate([soln_orig.t, np.flip(-soln_orig.t)])
if normalize:
norm = simps(np.conj(soln) * soln, x=x)
return x, soln / norm ** 0.5
return x, soln
def h_static_3d(E: float, normalize=False) -> Tuple[ndarray, ndarray]:
"""We create the radial part of the 3d version from the "radial density" information."""
# todo: Why don't we get a result if we fail to normalize here?
# Normalize the radial part, not the whole thing; this gives us reasonable values,
# without dealing with the asymptote near the origin.
r, ψ = h_static(E, normalize=True)
ψ = sqrt(ψ**2 / r**2)
# Post-process by flipping between 0s, to make up for info lost
# during square root.
ε = 1e-3 # thresh for hit a 0.
ψ_processed = np.copy(ψ)
in_inversion = False
slope_neg_prev = True
for j in range(ψ.size):
if j == 0: # We use slopes; don't mis-index
ψ_processed[j] = ψ[j]
continue
slope_neg = ψ[j] < ψ[j-1]
# Just started or ended an inversion.
if ψ[j] <= ε and slope_neg != slope_neg_prev:
in_inversion = not in_inversion
if in_inversion:
ψ_processed[j] = -ψ[j]
else:
ψ_processed[j] = ψ[j]
slope_neg_prev = slope_neg
if normalize:
norm = simps(np.conj(ψ_processed) * ψ_processed, x=r)
return r, ψ_processed / norm ** 0.5
return r, ψ_processed
def plot_h_static(n: int = 1):
"""This 1d model represents the radial component of the wave function;
ie all of a 2d shell condensed down 2 dimensions to a point."""
# Negative E implies bound state; positive scattering.
# ψ_p0 should be 0 for continuity across the origin.
# E should be a whittaker energy, ie -1/2, -2/9, -1/8, -.08 etc
# Only odd states (n = 1, 3, 5 etc) correspond to 3d H atom.
E = -2 / (n + 1) ** 2
x, ψ = h_static(E)
ψ = ψ**2
fig, ax = plt.subplots()
ax.plot(x, ψ)
ax.grid(True)
plt.xlim(0, 20)
plt.show()
def plot_h_static_3d(n: int = 1):
"""Like H static, but perhaps this is the right model for 3D."""
# todo: Major DRY
E = -2 / (n + 1) ** 2
x, ψ = h_static_3d(E)
fig, ax = plt.subplots()
ax.plot(x, ψ)
ax.grid(True)
plt.xlim(0, 20)
plt.ylim(-0.02, 0.02)
plt.show()
def check_wf_1d(x: ndarray, ψ: ndarray, E: float) -> ndarray:
"""Given a wave function as a set of discrete points, (Or a fn?) determine how much
it close it is to the schrodinger equation by analyzing the derivatives.
The result is a percent diff.
ψ = -1/2ψ'' / (E-V)
ψ = -1/2ψ'' / (E-1/abs(r))
or, reversed:
ψ'' = -2(E - 1/abs(r))ψ
"""
# todo: Center it up? This approach lags.
# ψ_pp = np.diff(np.diff(ψ))
dx = (x[-1] - x[0]) / x.size
ψ_pp = np.diff(np.diff(ψ)) / dx
ψ_pp = np.append(ψ_pp, np.array([0, 0])) # make the lengths match
ψ_pp_ideal = -2 * (E - 1/np.abs(x)) * ψ
# plt.plot(x, ψ)
# plt.plot(x, ψ_pp)
# plt.xlim(0, 10)
# plt.show()
# For now, assume assume a single protein in the nucleus, at x=0.
ψ_ideal = -1/2 * ψ_pp / (E - 1/np.abs(x))
# plt.plot(x, ψ_ideal)
# plt.plot(x, ψ)
# plt.xlim(0, 10)
# plt.show()
plt.plot(x, ψ)
# plt.plot(x, ψ_pp_ideal)
plt.xlim(0, 10)
plt.show()
# result = (ψ - ψ_ideal) / ψ_ideal
result = (ψ_pp - ψ_pp_ideal) / ψ_pp_ideal
# plt.plot(x, result)
# plt.xlim(0, 10)
# plt.show()
return result
# def check_wf(ψ: Callable[(float, float), ]):
def check_wf_2d(ψ: ndarray):
"""Given a wave function as a set of discrete points, (Or a fn?) determine how much
it close it is to the schrodinger equation by analyzing the derivatives."""
pass
def run_check():
n = 1
E = -2 / (n + 1) ** 2
x, ψ = h_static(E)
print(check_wf_1d(x, ψ, E))
def calc_energy(n: int) -> float:
"""Numerically calculate the energy of a wave function generated
by `h_static`. For n=1, we'd like to see if we can back-calculate E = -1/2."""
E = -2 / (n + 1) ** 2
x, ψ = h_static(E)
# Calculate potential between the e- field and the nucleus point by integrating.
# todo: Let's do this manually first, then try to apply a scipy.integrate approach.
dx = 1
result = 0
ψ2 = np.conj(ψ) * ψ
sample_pts = np.arange(x[0], x[-1], dx)
for pt in sample_pts:
k = 1
Q = 1
V = k * Q / x
q = 1
E = V * q * np.interp([pt], x, ψ2)[0]
result += E / dx
return result
def h2_potential(dist: float) -> float:
"""Given a distance, calculate the potential energy between
2 n=1 S orbital hydrogen atoms"""
pass
def h2_potential(x: float) -> float:
"""Calcualte the electric potential between 2 hydrogen atoms"""
# Start with the perspectic of one atom. Calculate the interaction between
# its nucleus and the other atom's nucleus, and electron.
# Our convention will be attraction is positive potential.
n = 1
E = -2 / (n + 1) ** 2
H = Hydrogen3d([0, 1])
nuc_nuc_V = consts.k * consts.e**2 / x
dx = 2
dv = dx**3
nuc_elec_V = 0
# We'll say the molecules are at the same z and y coordinates,
# but separated on the x axis by input argument `x`.
# Sample point coordinates are centered on the non-POV atom.
# todo: You will gain much precision by taking sample areas
# todo closer together near the nucleus
# `sample_range` applies to all 3 dimensions.
sample_range = np.arange(-10, 10, dx)
sample_pts = []
for j in range(sample_range.size):
for k_ in range(sample_range.size):
for l in range(sample_range.size):
sample_pts.append(Pt(j, k_, l))
# sample_pts = sample_range # 1d
print("num samples: ", len(sample_pts))
for pt in sample_pts:
# We need to integrate over volume, eg by splitting up into
# small cubes.
# Offset the x value by the distance between nuclei.
r = sqrt((pt.x + x)**2 + pt.y**2 + pt.z**2)
ψ_local = H.value(r, 0, 0)
# Divide by the number of sample points: The total answer
# ψ^2 adds up to 1, so this weights each segment evenly.
elec_val = np.conj(ψ_local) * ψ_local
# 2 for both interactions
nuc_elec_V -= 2 * consts.k * consts.e * elec_val / pt.x * dv
elec_elec_V = 0
e_e_factor = len(sample_pts)**2
# todo: You have a problem: WFs past the nuclei aren't attracting/repelling
# todo in the correct direction!
for pt0 in sample_pts:
pass
r0 = sqrt(pt0.x ** 2 + pt0.y ** 2 + pt0.z ** 2)
# We're dealing with S orbitals for now; no need to pass anything beyond
# radius to the `value` method.
ψ_local0 = H.value(r0, 0, 0)
for pt1 in sample_pts:
# todo: We only need to calculate wfs for each pt once!
# todo: Current approach could slow it down
r1 = sqrt(pt1.x**2 + pt1.y**2 + pt1.z**2)
ψ_local1 = H.value(r1, 0, 0)
# These are localized for each pt.
dist = sqrt((pt1.x - pt0.x)**2 + (pt1.y - pt0.y)**2 + (pt1.z - pt0.z)**2)
elec_elec_V += consts.k * consts.e * ψ_local0 * ψ_local1 * dv
print(f"NN: {nuc_nuc_V}, NE: {nuc_elec_V}, EE: {elec_elec_V} Net: {nuc_nuc_V + nuc_elec_V + elec_elec_V}")
# potential etc from both elecs adn the other proton.
def h2_force_pov(x: float) -> float:
"""Calcualte the electric potential between 2 hydrogen atoms. In this
function, we view things from the perspective of the proton of one
of the atoms, and calculate everything else relative to it."""
# Start with the perspectic of one atom. Calculate the interaction between
# its nucleus and the other atom's nucleus, and electron.
# Our convention will be that towards our POV nucleus is positive;
# repulusion from it is negative.
H = Hydrogen3d([0, 1])
# Calculate proton-proton interaction.
nuc_nuc_V = consts.k * consts.e / x
nuc_nuc_F = Vec(nuc_nuc_V * consts.e / x, 0., 0.)
dx = 0.18
dv = dx**3
# We'll say the molecules are at the same z and y coordinates,
# but separated on the x axis by input argument `x`.
# Sample point coordinates are centered on the non-POV atom.
# todo: You will gain much precision by taking sample areas
# todo closer together near the nucleus
# `sample_range` applies to all 3 dimensions.
sample_range = np.arange(-12.1, 12.1, dx) # Don't let 0 be a pt
# todo: Manually entering the pts we want
# Becaus we're dealing with a 3rd power, we need to keep the sample pts minimal. The ones
# near the center should be more finely spaced. (Or perhaps tune the spacing dynamically
# based on changing slopes?)
# sample_range = np.array([-20, -15, -10, -9, -8.5, -8, -7.5, -7, -6.5, -6, -5.5, -5, -])
sample_pts = []
for j in sample_range:
for k_ in sample_range:
for l in sample_range:
sample_pts.append(Vec(j, k_, l))
print("num samples: ", len(sample_pts | lectron interaction, with the electron from both atoms.
# We integrate over 3d space, using cartesian coordinates.
# Calculate proton-electron interaction.
nuc_elec_F = Vec(0., 0., 0.)
for pt in sample_pts:
# We integrate over volume, eg by splitting up into small cubes
# of len dx, and volume dv.
# Dist of elec from own, and other nuc, for this pt.
# The pt is centered on the POV atom. We use these radii
# to calculate WF strength.
r_own = pt.length()
# We're dealing with (spherically-symmetrical) S orbitals; we only
# need to pass radius to the `value` method.
ψ_local_own = H.value(r_own, 0, 0)
r_other = sqrt((pt.x + x) ** 2 + pt.y ** 2 + pt.z ** 2)
ψ_local_other = H.value(r_other, 0, 0)
# Divide by the number of sample points: The total answer
# ψ^2 adds up to 1, so this weights each segment evenly.
# (r_own for both, since we're calcing the pt rel to the POV nuc)) # todo is this right??
# todo: look to r here for the error?
V_own = consts.k * consts.e * np.conj(ψ_local_own) * ψ_local_own / r_own * dv
V_other = consts.k * consts.e * np.conj(ψ_local_other) * ψ_local_other / r_other * dv
# Net elec potential.
V_combined = V_own + V_other
unit_v = pt.scalar_mul(1. / pt.length())
nuc_elec_F += unit_v.scalar_mul(V_combined * -consts.e / r_own)
print(f"NN F: {nuc_nuc_F}, NE F: {nuc_elec_F}, Net F: {nuc_nuc_F + nuc_elec_F}")
if __name__ == "__main__":
n = 1
# print(calc_energy(n))
# Real dist: 74pm = 1.4 bohrs
h2_force_pov(1.4)
# plot_h_static_3d(n)
# plot_h_static(5)
# test_fft()
# run_fft()
# reimann()
# test_taylor()
# test_fourier()
# inv_gauss()
# h2()
# run_check() | ))
print("Sample range: ", sample_range)
# Calculate nucleus-e | conditional_block |
main.py | from dataclasses import dataclass
from scipy.integrate import solve_ivp, simps
|
from numpy import exp, ndarray, sqrt
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
from consts import *
import consts
τ = 2 * np.pi
i = complex(0, 1)
# 2020-11-15
"""
One of your goals is to figure out if you can use hydrogen (1d?) WFs as basis functions to create
arbitrary solns to the Schrodinger equation, thereby making chemistry simulation and modeling
much more computationally efficient.
It appears that for Hydrogen atoms, you can use linear combinations of 1D WFs as basis functions
in 2 adn 3d by choosing the right coefficients, and the right modifier fn (sin, cos etc) across
θ and φ to apply to 2 and 3d situations.
You need to verify that this is correct adn quantify. A challenge is finding accurate 2D orbitals
to compare your results to, and in visualizing and/or quantifying your 3D results to compare
to real results in 3d.
In parallel to verifying this, assume it's right, and try to model a 2-nucleus system. For
example, a H2 molecule. Attempt, in 1D, to find a combination of H atomic orbitals (perhaps
offset in x) that create the H2 molecular orbitals. These orbitals you're attempting to
match can be taken from real data, or by integrating. (May need to break up integration
into three areas, to avoid singularities at each nucleus).
"""
@dataclass
class Hydrogen3d:
"""A Hydrogen 3d superposition"""
# todo: Or is it n = 1, 3, 5...
# coeffs: List[complex] # Positive coefficients: n = 0, 1, 2, 3...
n: int
l: int
m: int
x: np.ndarray
components: List[np.ndarray]
def __init__(self, coeffs: List[complex]):
self.coeffs = coeffs
self.components = []
# n = 1 # todo: Only odd 1d coeffs for now.
n = 0 # todo: Only odd 1d coeffs for now.
for c in self.coeffs:
E = -2 / (n + 1) ** 2
x, ψ = h_static_3d(E)
# if n == 1:
if n == 0:
self.x = x
self.components.append(c * ψ)
# n += 2
n += 1
def value(self, r: float, θ: float, φ: float) -> complex:
"""Get a single value."""
result = 0
for comp in self.components:
result += np.interp([r], self.x, comp)[0]
return result
def value_comp(self, x: float, j: int) -> complex:
"""Get a single value, from a specific component."""
return np.interp([x], self.x, self.components[j])[0]
def plot(self, range_: Tuple[float, float] = (-20, 20), shift: float = 0., size: int = 10_000, show: bool = True) -> None:
ψ = np.zeros(len(self.x), dtype=np.complex128)
for ψi in self.components:
ψ += ψi
# todo: DRY with other series'
plt.plot(self.x + shift, ψ.real)
# plt.plot(self.x, ψ.imag)
# plt.xlim(range_[0], range_[1])
plt.xlim(0, range_[1])
if show:
plt.show()
@dataclass
class Pt:
x: float
y: float
z: float
@dataclass
class Vec:
x: float
y: float
z: float
def __add__(self, other: 'Vec') -> 'Vec':
return Vec(self.x + other.x, self.y + other.y, self.z + other.z)
def __sub__(self, other: 'Vec') -> 'Vec':
return Vec(self.x - other.x, self.y - other.y, self.z - other.z)
def scalar_mul(self, val: float) -> 'Vec':
return Vec(val * self.x, val * self.y, val * self.z)
def length(self) -> float:
return sqrt(self.x**2 + self.y**2 + self.z**2)
matplotlib.use("Qt5Agg")
# import seaborn as sns
# import plotly
# import plotly.graph_objects as go
# A global state var
V_prev: Callable = lambda sx: 0
# Lookin into matrix mechanics, and Feynman path integral approaches too
# orbitals are characterized (in simple cases) by quantum numbers n, l, and m, corresponding to
# energy, angular momentum, and magnetic (ang momentum vec component)
# spin?
# Do electrons (electrically) interact with themselves?
# Breaking up a numerical problem into a number of solveable analytic ones??? Eg set up
# an arbitrary V as a series of step Vs which have anal solns
# Free variables: 2? Energy, and ψ_p_0(ψ). Eg we can set ψ to what we wish, find the ψ_p that
# works with it (and the E set), then normalize.
PRECISION = 100_000
@dataclass
class Nucleus:
n_prot: float
n_neut: float
sx: float
vx: float
def mass(self):
return self.n_prot * m_p + self.n_neut * m_n
def charge(self):
# Charge from protons only.
return self.n_prot * e
def nuc_pot(nuclei: Iterable[Nucleus], sx: float) -> float:
result = 0
for nuclei in nuclei:
# Coulomb potential
result -= e / abs(nuclei.sx - sx)
return result
def ti_schrod_rhs(
E: float, V: Callable, x: float, y: Tuple[complex, complex]
) -> Tuple[complex, complex]:
"""
d²ψ/dx² = 2m/ħ² * (V(x) - E)ψ
"""
ψ, φ = y
ψ_p = φ
φ_p = 2 * m_e / ħ ** 2 * (V(x) - E) * ψ
return ψ_p, φ_p
def solve(E: float, V: Callable, ψ0: float, ψ_p0: float, x_span: Tuple[float, float]):
"""
Calculate the wave function for electrons in an arbitrary potential, at a single snapshot
in time.
"""
rhs = partial(ti_schrod_rhs, E, V)
return solve_ivp(
rhs, x_span, (ψ0, ψ_p0), t_eval=np.linspace(x_span[0], x_span[1], PRECISION)
)
def h_static(E: float, normalize=True) -> Tuple[ndarray, ndarray]:
ψ0 = 0
ψ_p0 = 0.1
x_span = (-100, 0.0000001)
V_elec = partial(nuc_pot, [Nucleus(1, 0, 0, 0)])
# V_elec = partial(nuc_pot, [Nucleus(1, 0, 0, 0), Nucleus(1, 0, 1, 0)])
# Left and right of the x=0 coulomb singularity. Assume odd solution around x=0.
soln_orig = solve(E, V_elec, ψ0, ψ_p0, x_span)
soln_left = soln_orig.y[0]
soln_right = np.flip(soln_left)
soln = np.concatenate([soln_left, -soln_right])
x = np.concatenate([soln_orig.t, np.flip(-soln_orig.t)])
if normalize:
norm = simps(np.conj(soln) * soln, x=x)
return x, soln / norm ** 0.5
return x, soln
def h_static_3d(E: float, normalize=False) -> Tuple[ndarray, ndarray]:
"""We create the radial part of the 3d version from the "radial density" information."""
# todo: Why don't we get a result if we fail to normalize here?
# Normalize the radial part, not the whole thing; this gives us reasonable values,
# without dealing with the asymptote near the origin.
r, ψ = h_static(E, normalize=True)
ψ = sqrt(ψ**2 / r**2)
# Post-process by flipping between 0s, to make up for info lost
# during square root.
ε = 1e-3 # thresh for hit a 0.
ψ_processed = np.copy(ψ)
in_inversion = False
slope_neg_prev = True
for j in range(ψ.size):
if j == 0: # We use slopes; don't mis-index
ψ_processed[j] = ψ[j]
continue
slope_neg = ψ[j] < ψ[j-1]
# Just started or ended an inversion.
if ψ[j] <= ε and slope_neg != slope_neg_prev:
in_inversion = not in_inversion
if in_inversion:
ψ_processed[j] = -ψ[j]
else:
ψ_processed[j] = ψ[j]
slope_neg_prev = slope_neg
if normalize:
norm = simps(np.conj(ψ_processed) * ψ_processed, x=r)
return r, ψ_processed / norm ** 0.5
return r, ψ_processed
def plot_h_static(n: int = 1):
"""This 1d model represents the radial component of the wave function;
ie all of a 2d shell condensed down 2 dimensions to a point."""
# Negative E implies bound state; positive scattering.
# ψ_p0 should be 0 for continuity across the origin.
# E should be a whittaker energy, ie -1/2, -2/9, -1/8, -.08 etc
# Only odd states (n = 1, 3, 5 etc) correspond to 3d H atom.
E = -2 / (n + 1) ** 2
x, ψ = h_static(E)
ψ = ψ**2
fig, ax = plt.subplots()
ax.plot(x, ψ)
ax.grid(True)
plt.xlim(0, 20)
plt.show()
def plot_h_static_3d(n: int = 1):
"""Like H static, but perhaps this is the right model for 3D."""
# todo: Major DRY
E = -2 / (n + 1) ** 2
x, ψ = h_static_3d(E)
fig, ax = plt.subplots()
ax.plot(x, ψ)
ax.grid(True)
plt.xlim(0, 20)
plt.ylim(-0.02, 0.02)
plt.show()
def check_wf_1d(x: ndarray, ψ: ndarray, E: float) -> ndarray:
"""Given a wave function as a set of discrete points, (Or a fn?) determine how much
it close it is to the schrodinger equation by analyzing the derivatives.
The result is a percent diff.
ψ = -1/2ψ'' / (E-V)
ψ = -1/2ψ'' / (E-1/abs(r))
or, reversed:
ψ'' = -2(E - 1/abs(r))ψ
"""
# todo: Center it up? This approach lags.
# ψ_pp = np.diff(np.diff(ψ))
dx = (x[-1] - x[0]) / x.size
ψ_pp = np.diff(np.diff(ψ)) / dx
ψ_pp = np.append(ψ_pp, np.array([0, 0])) # make the lengths match
ψ_pp_ideal = -2 * (E - 1/np.abs(x)) * ψ
# plt.plot(x, ψ)
# plt.plot(x, ψ_pp)
# plt.xlim(0, 10)
# plt.show()
# For now, assume assume a single protein in the nucleus, at x=0.
ψ_ideal = -1/2 * ψ_pp / (E - 1/np.abs(x))
# plt.plot(x, ψ_ideal)
# plt.plot(x, ψ)
# plt.xlim(0, 10)
# plt.show()
plt.plot(x, ψ)
# plt.plot(x, ψ_pp_ideal)
plt.xlim(0, 10)
plt.show()
# result = (ψ - ψ_ideal) / ψ_ideal
result = (ψ_pp - ψ_pp_ideal) / ψ_pp_ideal
# plt.plot(x, result)
# plt.xlim(0, 10)
# plt.show()
return result
# def check_wf(ψ: Callable[(float, float), ]):
def check_wf_2d(ψ: ndarray):
"""Given a wave function as a set of discrete points, (Or a fn?) determine how much
it close it is to the schrodinger equation by analyzing the derivatives."""
pass
def run_check():
n = 1
E = -2 / (n + 1) ** 2
x, ψ = h_static(E)
print(check_wf_1d(x, ψ, E))
def calc_energy(n: int) -> float:
"""Numerically calculate the energy of a wave function generated
by `h_static`. For n=1, we'd like to see if we can back-calculate E = -1/2."""
E = -2 / (n + 1) ** 2
x, ψ = h_static(E)
# Calculate potential between the e- field and the nucleus point by integrating.
# todo: Let's do this manually first, then try to apply a scipy.integrate approach.
dx = 1
result = 0
ψ2 = np.conj(ψ) * ψ
sample_pts = np.arange(x[0], x[-1], dx)
for pt in sample_pts:
k = 1
Q = 1
V = k * Q / x
q = 1
E = V * q * np.interp([pt], x, ψ2)[0]
result += E / dx
return result
def h2_potential(dist: float) -> float:
"""Given a distance, calculate the potential energy between
2 n=1 S orbital hydrogen atoms"""
pass
def h2_potential(x: float) -> float:
"""Calcualte the electric potential between 2 hydrogen atoms"""
# Start with the perspectic of one atom. Calculate the interaction between
# its nucleus and the other atom's nucleus, and electron.
# Our convention will be attraction is positive potential.
n = 1
E = -2 / (n + 1) ** 2
H = Hydrogen3d([0, 1])
nuc_nuc_V = consts.k * consts.e**2 / x
dx = 2
dv = dx**3
nuc_elec_V = 0
# We'll say the molecules are at the same z and y coordinates,
# but separated on the x axis by input argument `x`.
# Sample point coordinates are centered on the non-POV atom.
# todo: You will gain much precision by taking sample areas
# todo closer together near the nucleus
# `sample_range` applies to all 3 dimensions.
sample_range = np.arange(-10, 10, dx)
sample_pts = []
for j in range(sample_range.size):
for k_ in range(sample_range.size):
for l in range(sample_range.size):
sample_pts.append(Pt(j, k_, l))
# sample_pts = sample_range # 1d
print("num samples: ", len(sample_pts))
for pt in sample_pts:
# We need to integrate over volume, eg by splitting up into
# small cubes.
# Offset the x value by the distance between nuclei.
r = sqrt((pt.x + x)**2 + pt.y**2 + pt.z**2)
ψ_local = H.value(r, 0, 0)
# Divide by the number of sample points: The total answer
# ψ^2 adds up to 1, so this weights each segment evenly.
elec_val = np.conj(ψ_local) * ψ_local
# 2 for both interactions
nuc_elec_V -= 2 * consts.k * consts.e * elec_val / pt.x * dv
elec_elec_V = 0
e_e_factor = len(sample_pts)**2
# todo: You have a problem: WFs past the nuclei aren't attracting/repelling
# todo in the correct direction!
for pt0 in sample_pts:
pass
r0 = sqrt(pt0.x ** 2 + pt0.y ** 2 + pt0.z ** 2)
# We're dealing with S orbitals for now; no need to pass anything beyond
# radius to the `value` method.
ψ_local0 = H.value(r0, 0, 0)
for pt1 in sample_pts:
# todo: We only need to calculate wfs for each pt once!
# todo: Current approach could slow it down
r1 = sqrt(pt1.x**2 + pt1.y**2 + pt1.z**2)
ψ_local1 = H.value(r1, 0, 0)
# These are localized for each pt.
dist = sqrt((pt1.x - pt0.x)**2 + (pt1.y - pt0.y)**2 + (pt1.z - pt0.z)**2)
elec_elec_V += consts.k * consts.e * ψ_local0 * ψ_local1 * dv
print(f"NN: {nuc_nuc_V}, NE: {nuc_elec_V}, EE: {elec_elec_V} Net: {nuc_nuc_V + nuc_elec_V + elec_elec_V}")
# potential etc from both elecs adn the other proton.
def h2_force_pov(x: float) -> float:
"""Calcualte the electric potential between 2 hydrogen atoms. In this
function, we view things from the perspective of the proton of one
of the atoms, and calculate everything else relative to it."""
# Start with the perspectic of one atom. Calculate the interaction between
# its nucleus and the other atom's nucleus, and electron.
# Our convention will be that towards our POV nucleus is positive;
# repulusion from it is negative.
H = Hydrogen3d([0, 1])
# Calculate proton-proton interaction.
nuc_nuc_V = consts.k * consts.e / x
nuc_nuc_F = Vec(nuc_nuc_V * consts.e / x, 0., 0.)
dx = 0.18
dv = dx**3
# We'll say the molecules are at the same z and y coordinates,
# but separated on the x axis by input argument `x`.
# Sample point coordinates are centered on the non-POV atom.
# todo: You will gain much precision by taking sample areas
# todo closer together near the nucleus
# `sample_range` applies to all 3 dimensions.
sample_range = np.arange(-12.1, 12.1, dx) # Don't let 0 be a pt
# todo: Manually entering the pts we want
# Becaus we're dealing with a 3rd power, we need to keep the sample pts minimal. The ones
# near the center should be more finely spaced. (Or perhaps tune the spacing dynamically
# based on changing slopes?)
# sample_range = np.array([-20, -15, -10, -9, -8.5, -8, -7.5, -7, -6.5, -6, -5.5, -5, -])
sample_pts = []
for j in sample_range:
for k_ in sample_range:
for l in sample_range:
sample_pts.append(Vec(j, k_, l))
print("num samples: ", len(sample_pts))
print("Sample range: ", sample_range)
# Calculate nucleus-electron interaction, with the electron from both atoms.
# We integrate over 3d space, using cartesian coordinates.
# Calculate proton-electron interaction.
nuc_elec_F = Vec(0., 0., 0.)
for pt in sample_pts:
# We integrate over volume, eg by splitting up into small cubes
# of len dx, and volume dv.
# Dist of elec from own, and other nuc, for this pt.
# The pt is centered on the POV atom. We use these radii
# to calculate WF strength.
r_own = pt.length()
# We're dealing with (spherically-symmetrical) S orbitals; we only
# need to pass radius to the `value` method.
ψ_local_own = H.value(r_own, 0, 0)
r_other = sqrt((pt.x + x) ** 2 + pt.y ** 2 + pt.z ** 2)
ψ_local_other = H.value(r_other, 0, 0)
# Divide by the number of sample points: The total answer
# ψ^2 adds up to 1, so this weights each segment evenly.
# (r_own for both, since we're calcing the pt rel to the POV nuc)) # todo is this right??
# todo: look to r here for the error?
V_own = consts.k * consts.e * np.conj(ψ_local_own) * ψ_local_own / r_own * dv
V_other = consts.k * consts.e * np.conj(ψ_local_other) * ψ_local_other / r_other * dv
# Net elec potential.
V_combined = V_own + V_other
unit_v = pt.scalar_mul(1. / pt.length())
nuc_elec_F += unit_v.scalar_mul(V_combined * -consts.e / r_own)
print(f"NN F: {nuc_nuc_F}, NE F: {nuc_elec_F}, Net F: {nuc_nuc_F + nuc_elec_F}")
if __name__ == "__main__":
n = 1
# print(calc_energy(n))
# Real dist: 74pm = 1.4 bohrs
h2_force_pov(1.4)
# plot_h_static_3d(n)
# plot_h_static(5)
# test_fft()
# run_fft()
# reimann()
# test_taylor()
# test_fourier()
# inv_gauss()
# h2()
# run_check() | from functools import partial
from typing import List, Iterable, Callable, Tuple | random_line_split |
main.py | from dataclasses import dataclass
from scipy.integrate import solve_ivp, simps
from functools import partial
from typing import List, Iterable, Callable, Tuple
from numpy import exp, ndarray, sqrt
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
from consts import *
import consts
τ = 2 * np.pi
i = complex(0, 1)
# 2020-11-15
"""
One of your goals is to figure out if you can use hydrogen (1d?) WFs as basis functions to create
arbitrary solns to the Schrodinger equation, thereby making chemistry simulation and modeling
much more computationally efficient.
It appears that for Hydrogen atoms, you can use linear combinations of 1D WFs as basis functions
in 2 adn 3d by choosing the right coefficients, and the right modifier fn (sin, cos etc) across
θ and φ to apply to 2 and 3d situations.
You need to verify that this is correct adn quantify. A challenge is finding accurate 2D orbitals
to compare your results to, and in visualizing and/or quantifying your 3D results to compare
to real results in 3d.
In parallel to verifying this, assume it's right, and try to model a 2-nucleus system. For
example, a H2 molecule. Attempt, in 1D, to find a combination of H atomic orbitals (perhaps
offset in x) that create the H2 molecular orbitals. These orbitals you're attempting to
match can be taken from real data, or by integrating. (May need to break up integration
into three areas, to avoid singularities at each nucleus).
"""
@dataclass
class Hydrogen3d:
"""A Hydrogen 3d superposition"""
# todo: Or is it n = 1, 3, 5...
# coeffs: List[complex] # Positive coefficients: n = 0, 1, 2, 3...
n: int
l: int
m: int
x: np.ndarray
components: List[np.ndarray]
def __init__(self, coeffs: List[complex]):
self.coeffs = coeffs
self.components = []
# n = 1 # todo: Only odd 1d coeffs for now.
n = 0 # todo: Only odd 1d coeffs for now.
for c in self.coeffs:
E = -2 / (n + 1) ** 2
x, ψ = h_static_3d(E)
# if n == 1:
if n == 0:
self.x = x
self.components.append(c * ψ)
# n += 2
n += 1
def value(self, r: float, θ: float, φ: float) -> complex:
"""Get a single value."""
result = 0
for comp in self.components:
result += np.interp([r], self.x, comp)[0]
return result
def value_comp(self, x: float, j: int) -> complex:
"""Get a single value, from a specific component."""
return np.interp([x], self.x, self.components[j])[0]
def plot(self, range_: Tuple[float, float] = (-20, 20), shift: float = 0., size: int = 10_000, show: bool = True) -> None:
ψ = np.zeros(len(self.x), dtype=np.complex128)
for ψi in self.components:
ψ += ψi
# todo: DRY with other series'
plt.plot(self.x + shift, ψ.real)
# plt.plot(self.x, ψ.imag)
# plt.xlim(range_[0], range_[1])
plt.xlim(0, range_[1])
if show:
plt.show()
@dataclass
class Pt:
x: float
y: float
z: float
@dataclass
class Vec:
x: float
y: float
z: float
def __add__(self, other: 'Vec') -> 'Vec':
return Vec(self.x + other.x, self.y + other.y, self.z + other.z)
def __sub__(self, other: 'Vec') -> 'Vec':
return Vec(self.x - other.x, self.y - other.y, self.z - other.z)
def scalar_mul(self, val: float) -> 'Vec':
return Vec(val * self.x, val * self.y, val * self.z)
def length(self) -> float:
return sqrt(self.x**2 + self.y**2 + self.z**2)
matplotlib.use("Qt5Agg")
# import seaborn as sns
# import plotly
# import plotly.graph_objects as go
# A global state var
V_prev: Callable = lambda sx: 0
# Lookin into matrix mechanics, and Feynman path integral approaches too
# orbitals are characterized (in simple cases) by quantum numbers n, l, and m, corresponding to
# energy, angular momentum, and magnetic (ang momentum vec component)
# spin?
# Do electrons (electrically) interact with themselves?
# Breaking up a numerical problem into a number of solveable analytic ones??? Eg set up
# an arbitrary V as a series of step Vs which have anal solns
# Free variables: 2? Energy, and ψ_p_0(ψ). Eg we can set ψ to what we wish, find the ψ_p that
# works with it (and the E set), then normalize.
PRECISION = 100_000
@dataclass
class Nucleus:
n_prot: float
n_neut: float
sx: float
vx: float
def mass(self):
return self.n_prot * m_p + self.n_neut * m_n
def charge(self):
# Charge from protons only.
return self.n_prot * e
def nuc_pot(nuclei: Iterable[Nucleus], sx: float) -> float:
result = 0
for nuclei in nuclei:
# Coulomb potential
result -= e / abs(nuclei.sx - sx)
return result
def ti_schrod_rhs(
| : Callable, x: float, y: Tuple[complex, complex]
) -> Tuple[complex, complex]:
"""
d²ψ/dx² = 2m/ħ² * (V(x) - E)ψ
"""
ψ, φ = y
ψ_p = φ
φ_p = 2 * m_e / ħ ** 2 * (V(x) - E) * ψ
return ψ_p, φ_p
def solve(E: float, V: Callable, ψ0: float, ψ_p0: float, x_span: Tuple[float, float]):
"""
Calculate the wave function for electrons in an arbitrary potential, at a single snapshot
in time.
"""
rhs = partial(ti_schrod_rhs, E, V)
return solve_ivp(
rhs, x_span, (ψ0, ψ_p0), t_eval=np.linspace(x_span[0], x_span[1], PRECISION)
)
def h_static(E: float, normalize=True) -> Tuple[ndarray, ndarray]:
ψ0 = 0
ψ_p0 = 0.1
x_span = (-100, 0.0000001)
V_elec = partial(nuc_pot, [Nucleus(1, 0, 0, 0)])
# V_elec = partial(nuc_pot, [Nucleus(1, 0, 0, 0), Nucleus(1, 0, 1, 0)])
# Left and right of the x=0 coulomb singularity. Assume odd solution around x=0.
soln_orig = solve(E, V_elec, ψ0, ψ_p0, x_span)
soln_left = soln_orig.y[0]
soln_right = np.flip(soln_left)
soln = np.concatenate([soln_left, -soln_right])
x = np.concatenate([soln_orig.t, np.flip(-soln_orig.t)])
if normalize:
norm = simps(np.conj(soln) * soln, x=x)
return x, soln / norm ** 0.5
return x, soln
def h_static_3d(E: float, normalize=False) -> Tuple[ndarray, ndarray]:
"""We create the radial part of the 3d version from the "radial density" information."""
# todo: Why don't we get a result if we fail to normalize here?
# Normalize the radial part, not the whole thing; this gives us reasonable values,
# without dealing with the asymptote near the origin.
r, ψ = h_static(E, normalize=True)
ψ = sqrt(ψ**2 / r**2)
# Post-process by flipping between 0s, to make up for info lost
# during square root.
ε = 1e-3 # thresh for hit a 0.
ψ_processed = np.copy(ψ)
in_inversion = False
slope_neg_prev = True
for j in range(ψ.size):
if j == 0: # We use slopes; don't mis-index
ψ_processed[j] = ψ[j]
continue
slope_neg = ψ[j] < ψ[j-1]
# Just started or ended an inversion.
if ψ[j] <= ε and slope_neg != slope_neg_prev:
in_inversion = not in_inversion
if in_inversion:
ψ_processed[j] = -ψ[j]
else:
ψ_processed[j] = ψ[j]
slope_neg_prev = slope_neg
if normalize:
norm = simps(np.conj(ψ_processed) * ψ_processed, x=r)
return r, ψ_processed / norm ** 0.5
return r, ψ_processed
def plot_h_static(n: int = 1):
"""This 1d model represents the radial component of the wave function;
ie all of a 2d shell condensed down 2 dimensions to a point."""
# Negative E implies bound state; positive scattering.
# ψ_p0 should be 0 for continuity across the origin.
# E should be a whittaker energy, ie -1/2, -2/9, -1/8, -.08 etc
# Only odd states (n = 1, 3, 5 etc) correspond to 3d H atom.
E = -2 / (n + 1) ** 2
x, ψ = h_static(E)
ψ = ψ**2
fig, ax = plt.subplots()
ax.plot(x, ψ)
ax.grid(True)
plt.xlim(0, 20)
plt.show()
def plot_h_static_3d(n: int = 1):
"""Like H static, but perhaps this is the right model for 3D."""
# todo: Major DRY
E = -2 / (n + 1) ** 2
x, ψ = h_static_3d(E)
fig, ax = plt.subplots()
ax.plot(x, ψ)
ax.grid(True)
plt.xlim(0, 20)
plt.ylim(-0.02, 0.02)
plt.show()
def check_wf_1d(x: ndarray, ψ: ndarray, E: float) -> ndarray:
"""Given a wave function as a set of discrete points, (Or a fn?) determine how much
it close it is to the schrodinger equation by analyzing the derivatives.
The result is a percent diff.
ψ = -1/2ψ'' / (E-V)
ψ = -1/2ψ'' / (E-1/abs(r))
or, reversed:
ψ'' = -2(E - 1/abs(r))ψ
"""
# todo: Center it up? This approach lags.
# ψ_pp = np.diff(np.diff(ψ))
dx = (x[-1] - x[0]) / x.size
ψ_pp = np.diff(np.diff(ψ)) / dx
ψ_pp = np.append(ψ_pp, np.array([0, 0])) # make the lengths match
ψ_pp_ideal = -2 * (E - 1/np.abs(x)) * ψ
# plt.plot(x, ψ)
# plt.plot(x, ψ_pp)
# plt.xlim(0, 10)
# plt.show()
# For now, assume assume a single protein in the nucleus, at x=0.
ψ_ideal = -1/2 * ψ_pp / (E - 1/np.abs(x))
# plt.plot(x, ψ_ideal)
# plt.plot(x, ψ)
# plt.xlim(0, 10)
# plt.show()
plt.plot(x, ψ)
# plt.plot(x, ψ_pp_ideal)
plt.xlim(0, 10)
plt.show()
# result = (ψ - ψ_ideal) / ψ_ideal
result = (ψ_pp - ψ_pp_ideal) / ψ_pp_ideal
# plt.plot(x, result)
# plt.xlim(0, 10)
# plt.show()
return result
# def check_wf(ψ: Callable[(float, float), ]):
def check_wf_2d(ψ: ndarray):
"""Given a wave function as a set of discrete points, (Or a fn?) determine how much
it close it is to the schrodinger equation by analyzing the derivatives."""
pass
def run_check():
n = 1
E = -2 / (n + 1) ** 2
x, ψ = h_static(E)
print(check_wf_1d(x, ψ, E))
def calc_energy(n: int) -> float:
"""Numerically calculate the energy of a wave function generated
by `h_static`. For n=1, we'd like to see if we can back-calculate E = -1/2."""
E = -2 / (n + 1) ** 2
x, ψ = h_static(E)
# Calculate potential between the e- field and the nucleus point by integrating.
# todo: Let's do this manually first, then try to apply a scipy.integrate approach.
dx = 1
result = 0
ψ2 = np.conj(ψ) * ψ
sample_pts = np.arange(x[0], x[-1], dx)
for pt in sample_pts:
k = 1
Q = 1
V = k * Q / x
q = 1
E = V * q * np.interp([pt], x, ψ2)[0]
result += E / dx
return result
def h2_potential(dist: float) -> float:
"""Given a distance, calculate the potential energy between
2 n=1 S orbital hydrogen atoms"""
pass
def h2_potential(x: float) -> float:
"""Calcualte the electric potential between 2 hydrogen atoms"""
# Start with the perspectic of one atom. Calculate the interaction between
# its nucleus and the other atom's nucleus, and electron.
# Our convention will be attraction is positive potential.
n = 1
E = -2 / (n + 1) ** 2
H = Hydrogen3d([0, 1])
nuc_nuc_V = consts.k * consts.e**2 / x
dx = 2
dv = dx**3
nuc_elec_V = 0
# We'll say the molecules are at the same z and y coordinates,
# but separated on the x axis by input argument `x`.
# Sample point coordinates are centered on the non-POV atom.
# todo: You will gain much precision by taking sample areas
# todo closer together near the nucleus
# `sample_range` applies to all 3 dimensions.
sample_range = np.arange(-10, 10, dx)
sample_pts = []
for j in range(sample_range.size):
for k_ in range(sample_range.size):
for l in range(sample_range.size):
sample_pts.append(Pt(j, k_, l))
# sample_pts = sample_range # 1d
print("num samples: ", len(sample_pts))
for pt in sample_pts:
# We need to integrate over volume, eg by splitting up into
# small cubes.
# Offset the x value by the distance between nuclei.
r = sqrt((pt.x + x)**2 + pt.y**2 + pt.z**2)
ψ_local = H.value(r, 0, 0)
# Divide by the number of sample points: The total answer
# ψ^2 adds up to 1, so this weights each segment evenly.
elec_val = np.conj(ψ_local) * ψ_local
# 2 for both interactions
nuc_elec_V -= 2 * consts.k * consts.e * elec_val / pt.x * dv
elec_elec_V = 0
e_e_factor = len(sample_pts)**2
# todo: You have a problem: WFs past the nuclei aren't attracting/repelling
# todo in the correct direction!
for pt0 in sample_pts:
pass
r0 = sqrt(pt0.x ** 2 + pt0.y ** 2 + pt0.z ** 2)
# We're dealing with S orbitals for now; no need to pass anything beyond
# radius to the `value` method.
ψ_local0 = H.value(r0, 0, 0)
for pt1 in sample_pts:
# todo: We only need to calculate wfs for each pt once!
# todo: Current approach could slow it down
r1 = sqrt(pt1.x**2 + pt1.y**2 + pt1.z**2)
ψ_local1 = H.value(r1, 0, 0)
# These are localized for each pt.
dist = sqrt((pt1.x - pt0.x)**2 + (pt1.y - pt0.y)**2 + (pt1.z - pt0.z)**2)
elec_elec_V += consts.k * consts.e * ψ_local0 * ψ_local1 * dv
print(f"NN: {nuc_nuc_V}, NE: {nuc_elec_V}, EE: {elec_elec_V} Net: {nuc_nuc_V + nuc_elec_V + elec_elec_V}")
# potential etc from both elecs adn the other proton.
def h2_force_pov(x: float) -> float:
"""Calcualte the electric potential between 2 hydrogen atoms. In this
function, we view things from the perspective of the proton of one
of the atoms, and calculate everything else relative to it."""
# Start with the perspectic of one atom. Calculate the interaction between
# its nucleus and the other atom's nucleus, and electron.
# Our convention will be that towards our POV nucleus is positive;
# repulusion from it is negative.
H = Hydrogen3d([0, 1])
# Calculate proton-proton interaction.
nuc_nuc_V = consts.k * consts.e / x
nuc_nuc_F = Vec(nuc_nuc_V * consts.e / x, 0., 0.)
dx = 0.18
dv = dx**3
# We'll say the molecules are at the same z and y coordinates,
# but separated on the x axis by input argument `x`.
# Sample point coordinates are centered on the non-POV atom.
# todo: You will gain much precision by taking sample areas
# todo closer together near the nucleus
# `sample_range` applies to all 3 dimensions.
sample_range = np.arange(-12.1, 12.1, dx) # Don't let 0 be a pt
# todo: Manually entering the pts we want
# Becaus we're dealing with a 3rd power, we need to keep the sample pts minimal. The ones
# near the center should be more finely spaced. (Or perhaps tune the spacing dynamically
# based on changing slopes?)
# sample_range = np.array([-20, -15, -10, -9, -8.5, -8, -7.5, -7, -6.5, -6, -5.5, -5, -])
sample_pts = []
for j in sample_range:
for k_ in sample_range:
for l in sample_range:
sample_pts.append(Vec(j, k_, l))
print("num samples: ", len(sample_pts))
print("Sample range: ", sample_range)
# Calculate nucleus-electron interaction, with the electron from both atoms.
# We integrate over 3d space, using cartesian coordinates.
# Calculate proton-electron interaction.
nuc_elec_F = Vec(0., 0., 0.)
for pt in sample_pts:
# We integrate over volume, eg by splitting up into small cubes
# of len dx, and volume dv.
# Dist of elec from own, and other nuc, for this pt.
# The pt is centered on the POV atom. We use these radii
# to calculate WF strength.
r_own = pt.length()
# We're dealing with (spherically-symmetrical) S orbitals; we only
# need to pass radius to the `value` method.
ψ_local_own = H.value(r_own, 0, 0)
r_other = sqrt((pt.x + x) ** 2 + pt.y ** 2 + pt.z ** 2)
ψ_local_other = H.value(r_other, 0, 0)
# Divide by the number of sample points: The total answer
# ψ^2 adds up to 1, so this weights each segment evenly.
# (r_own for both, since we're calcing the pt rel to the POV nuc)) # todo is this right??
# todo: look to r here for the error?
V_own = consts.k * consts.e * np.conj(ψ_local_own) * ψ_local_own / r_own * dv
V_other = consts.k * consts.e * np.conj(ψ_local_other) * ψ_local_other / r_other * dv
# Net elec potential.
V_combined = V_own + V_other
unit_v = pt.scalar_mul(1. / pt.length())
nuc_elec_F += unit_v.scalar_mul(V_combined * -consts.e / r_own)
print(f"NN F: {nuc_nuc_F}, NE F: {nuc_elec_F}, Net F: {nuc_nuc_F + nuc_elec_F}")
if __name__ == "__main__":
n = 1
# print(calc_energy(n))
# Real dist: 74pm = 1.4 bohrs
h2_force_pov(1.4)
# plot_h_static_3d(n)
# plot_h_static(5)
# test_fft()
# run_fft()
# reimann()
# test_taylor()
# test_fourier()
# inv_gauss()
# h2()
# run_check() | E: float, V | identifier_name |
main.py | from dataclasses import dataclass
from scipy.integrate import solve_ivp, simps
from functools import partial
from typing import List, Iterable, Callable, Tuple
from numpy import exp, ndarray, sqrt
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
from consts import *
import consts
τ = 2 * np.pi
i = complex(0, 1)
# 2020-11-15
"""
One of your goals is to figure out if you can use hydrogen (1d?) WFs as basis functions to create
arbitrary solns to the Schrodinger equation, thereby making chemistry simulation and modeling
much more computationally efficient.
It appears that for Hydrogen atoms, you can use linear combinations of 1D WFs as basis functions
in 2 adn 3d by choosing the right coefficients, and the right modifier fn (sin, cos etc) across
θ and φ to apply to 2 and 3d situations.
You need to verify that this is correct adn quantify. A challenge is finding accurate 2D orbitals
to compare your results to, and in visualizing and/or quantifying your 3D results to compare
to real results in 3d.
In parallel to verifying this, assume it's right, and try to model a 2-nucleus system. For
example, a H2 molecule. Attempt, in 1D, to find a combination of H atomic orbitals (perhaps
offset in x) that create the H2 molecular orbitals. These orbitals you're attempting to
match can be taken from real data, or by integrating. (May need to break up integration
into three areas, to avoid singularities at each nucleus).
"""
@dataclass
class Hydrogen3d:
"""A Hydrogen 3d superposition"""
# todo: Or is it n = 1, 3, 5...
# coeffs: List[complex] # Positive coefficients: n = 0, 1, 2, 3...
n: int
l: int
m: int
x: np.ndarray
components: List[np.ndarray]
def __init__(self, coeffs: List[complex]):
self.coeffs = coeffs
self.components = []
# n = 1 # todo: Only odd 1d coeffs for now.
n = 0 # todo: Only odd 1d coeffs for now.
for c in self.coeffs:
E = -2 / (n + 1) ** 2
x, ψ = h_static_3d(E)
# if n == 1:
if n == 0:
self.x = x
self.components.append(c * ψ)
# n += 2
n += 1
def value(self, r: float, θ: float, φ: float) -> complex:
"""Get a single value."""
result = 0
for comp in self.components:
result += np.interp([r], self.x, comp)[0]
return result
def value_comp(self, x: float, j: int) -> complex:
"""Get a single value, from a specific component."""
return np.interp([x], self.x, self.components[j])[0]
def plot(self, range_: Tuple[float, float] = (-20, 20), shift: float = 0., size: int = 10_000, show: bool = True) -> None:
ψ = np.zeros(len(self.x), dtype=np.complex128)
for ψi in self.components:
ψ += ψi
# todo: DRY with other series'
plt.plot(self.x + shift, ψ.real)
# plt.plot(self.x, ψ.imag)
# plt.xlim(range_[0], range_[1])
plt.xlim(0, range_[1])
if show:
plt.show()
@dataclass
class Pt:
x: float
y: float
z: float
@dataclass
class Vec:
x: float
y: float
z: float
def __add__(self, other: 'Vec') -> 'Vec':
return Vec(self.x + other.x, self.y + other.y, self.z + other.z)
def __sub__(self, other: 'Vec') -> 'Vec':
return Vec(self.x - other.x, self.y - other.y, self.z - other.z)
def scalar_mul(self, val: float) -> 'Vec':
return Vec(val * self.x, val * self.y, val * self.z)
def length(self) -> float:
return sqrt(self.x**2 + self.y**2 + self.z**2)
matplotlib.use("Qt5Agg")
# import seaborn as sns
# import plotly
# import plotly.graph_objects as go
# A global state var
V_prev: Callable = lambda sx: 0
# Lookin into matrix mechanics, and Feynman path integral approaches too
# orbitals are characterized (in simple cases) by quantum numbers n, l, and m, corresponding to
# energy, angular momentum, and magnetic (ang momentum vec component)
# spin?
# Do electrons (electrically) interact with themselves?
# Breaking up a numerical problem into a number of solveable analytic ones??? Eg set up
# an arbitrary V as a series of step Vs which have anal solns
# Free variables: 2? Energy, and ψ_p_0(ψ). Eg we can set ψ to what we wish, find the ψ_p that
# works with it (and the E set), then normalize.
PRECISION = 100_000
@dataclass
class Nucleus:
n_prot: float
n_neut: float
sx: float
vx: float
def mass(self):
return self.n_prot * m_p + self.n_neut * m_n
def charge(self):
# Charge from protons only.
return self.n_prot * e
def nuc_pot(nuclei: Iterable[Nucleus], sx: float) -> float:
result = 0
for nuclei in nuclei:
# Coulomb potential
result -= e / abs(nuclei.sx - sx)
return result
def ti_schrod_rhs(
E: float, V: Callable, x: float, y: Tuple[complex, complex]
) -> Tuple[complex, complex]:
"""
d²ψ/dx² = 2m/ħ² * (V(x) - E)ψ
"""
ψ, φ = y
ψ_p = φ
φ_p = 2 * m_e / ħ ** 2 * (V(x) - E) * ψ
return ψ_p, φ_p
def solve(E: float, V: Callable, ψ0: float, ψ_p0: float, x_span: Tuple[float, float]):
"""
Calculate the wave function for electrons in an arbitrary potential, at a single snapshot
in time.
"""
rhs = partial(ti_schrod_rhs, E, V)
return solve_ivp(
rhs, x_span, (ψ0, ψ_p0), t_eval=np.linspace(x_span[0], x_span[1], PRECISION)
)
def h_static(E: float, normalize=True) -> Tuple[ndarray, ndarray]:
ψ0 = 0
ψ_p0 = 0.1
x_span = (-100, 0.0000001)
V_elec = partial(nuc_pot, [Nucleus(1, 0, 0, 0)])
# V_elec = partial(nuc_pot, [Nucleus(1, 0, 0, 0), Nucleus(1, 0, 1, 0)])
# Left and right of the x=0 coulomb singularity. Assume odd solution around x=0.
soln_orig = solve(E, V_elec, ψ0, ψ_p0, x_span)
soln_left = soln_orig.y[0]
soln_right = np.flip(soln_left)
soln = np.concatenate([soln_left, -soln_right])
x = np.concatenate([soln_orig.t, np.flip(-soln_orig.t)])
if normalize:
norm = simps(np.conj(soln) * soln, x=x)
return x, soln / norm ** 0.5
return x, soln
def h_static_3d(E: float, normalize=False) -> Tuple[ndarray, ndarray]:
"""We create the radial part of the 3d version from the "radial density" information."""
# todo: Why don't we get a result if we fail to normalize here?
# Normalize the radial part, not the whole thing; this gives us reasonable values,
# without dealing with the asymptote near the origin.
r, ψ = h_static(E, normalize=True)
ψ = sqrt(ψ**2 / r**2)
# Post-process by flipping between 0s, to make up for info lost
# during square root.
ε = 1e-3 # thresh for hit a 0.
ψ_processed = np.copy(ψ)
in_inversion = False
slope_neg_prev = True
for j in range(ψ.size):
if j == 0: # We use slopes; don't mis-index
ψ_processed[j] = ψ[j]
continue
slope_neg = ψ[j] < ψ[j-1]
# Just started or ended an inversion.
if ψ[j] <= ε and slope_neg != slope_neg_prev:
in_inversion = not in_inversion
if in_inversion:
ψ_processed[j] = -ψ[j]
else:
ψ_processed[j] = ψ[j]
slope_neg_prev = slope_neg
if normalize:
norm = simps(np.conj(ψ_processed) * ψ_processed, x=r)
return r, ψ_processed / norm ** 0.5
return r, ψ_processed
def plot_h_static(n: int = 1):
"""This 1d model represents the radial component of the wave function;
ie all of a 2d shell condensed down 2 dimensions to a point."""
# Negative E implies bound state; positive scattering.
# ψ_p0 should be 0 for continuity across the origin.
# E should be a whittaker energy, ie -1/2, -2/9, -1/8, -.08 etc
# Only odd states (n = 1, 3, 5 etc) correspond to 3d H atom.
E = -2 / (n + 1) ** 2
x, ψ = h_static(E)
ψ = ψ**2
fig, ax = plt.subplots()
ax.plot(x, ψ)
ax.grid(True)
plt.xlim(0, 20)
plt.show()
def plot_h_static_3d(n: int = 1):
"""Like H static, but perhaps this is the right model for 3D."""
# todo: Major DRY
E = -2 / (n + 1) ** 2
x, ψ = h_static_3d(E)
fig, ax = plt.subplots()
ax.plot(x, ψ)
ax.grid(True)
plt.xlim(0, 20)
plt.ylim(-0.02, 0.02)
plt.show()
def check_wf_1d(x: ndarray, ψ: ndarray, E: float) -> ndarray:
"""Given a wave function as a set of discrete points, (Or a fn?) determine how much
it close it is to the schrodinger equation by analyzing the derivatives.
The result is a percent diff.
ψ = -1/2ψ'' / (E-V)
ψ = -1/2ψ'' / (E-1/abs(r))
or, reversed:
ψ'' = -2(E - 1/abs(r))ψ
"""
# todo: Center it up? This approach lags.
# ψ_pp = np.diff(np.diff(ψ))
dx = (x[-1] - x[0]) / x.size
ψ_pp = np.diff(np.diff(ψ)) / dx
ψ_pp = np.append(ψ_pp, np.array([0, 0])) # make the lengths match
ψ_pp_ideal = -2 * (E - 1/np.abs(x)) * ψ
# plt.plot(x, ψ)
# plt.plot(x, ψ_pp)
# plt.xlim(0, 10)
# plt.show()
# For now, assume assume a single protein in the nucleus, at x=0.
ψ_ideal = -1/2 * ψ_pp / (E - 1/np.abs(x))
# plt.plot(x, ψ_ideal)
# plt.plot(x, ψ)
# plt.xlim(0, 10)
# plt.show()
plt.plot(x, ψ)
# plt.plot(x, ψ_pp_ideal)
plt.xlim(0, 10)
plt.show()
# result = (ψ - ψ_ideal) / ψ_ideal
result = (ψ_pp - ψ_pp_ideal) / ψ_pp_ideal
# plt.plot(x, result)
# plt.xlim(0, 10)
# plt.show()
return result
# def check_wf(ψ: Callable[(float, float), ]):
def check_wf_2d(ψ: ndarray):
"""Given a wave function as a set of discrete points, (Or a fn?) determine how much
it close it is to the schrodinger equation by analyzing the derivatives."""
pass
def run_check():
n = 1
E = -2 / (n + 1) ** 2
x, ψ = h_static(E)
print(check_wf_1d(x, ψ, E))
def calc_energy(n: int) -> float:
"""Numerically calculate the energy of a wave function generated
by `h_static`. For n=1, we'd lik | 2 n=1 S orbital hydrogen atoms"""
pass
def h2_potential(x: float) -> float:
"""Calcualte the electric potential between 2 hydrogen atoms"""
# Start with the perspectic of one atom. Calculate the interaction between
# its nucleus and the other atom's nucleus, and electron.
# Our convention will be attraction is positive potential.
n = 1
E = -2 / (n + 1) ** 2
H = Hydrogen3d([0, 1])
nuc_nuc_V = consts.k * consts.e**2 / x
dx = 2
dv = dx**3
nuc_elec_V = 0
# We'll say the molecules are at the same z and y coordinates,
# but separated on the x axis by input argument `x`.
# Sample point coordinates are centered on the non-POV atom.
# todo: You will gain much precision by taking sample areas
# todo closer together near the nucleus
# `sample_range` applies to all 3 dimensions.
sample_range = np.arange(-10, 10, dx)
sample_pts = []
for j in range(sample_range.size):
for k_ in range(sample_range.size):
for l in range(sample_range.size):
sample_pts.append(Pt(j, k_, l))
# sample_pts = sample_range # 1d
print("num samples: ", len(sample_pts))
for pt in sample_pts:
# We need to integrate over volume, eg by splitting up into
# small cubes.
# Offset the x value by the distance between nuclei.
r = sqrt((pt.x + x)**2 + pt.y**2 + pt.z**2)
ψ_local = H.value(r, 0, 0)
# Divide by the number of sample points: The total answer
# ψ^2 adds up to 1, so this weights each segment evenly.
elec_val = np.conj(ψ_local) * ψ_local
# 2 for both interactions
nuc_elec_V -= 2 * consts.k * consts.e * elec_val / pt.x * dv
elec_elec_V = 0
e_e_factor = len(sample_pts)**2
# todo: You have a problem: WFs past the nuclei aren't attracting/repelling
# todo in the correct direction!
for pt0 in sample_pts:
pass
r0 = sqrt(pt0.x ** 2 + pt0.y ** 2 + pt0.z ** 2)
# We're dealing with S orbitals for now; no need to pass anything beyond
# radius to the `value` method.
ψ_local0 = H.value(r0, 0, 0)
for pt1 in sample_pts:
# todo: We only need to calculate wfs for each pt once!
# todo: Current approach could slow it down
r1 = sqrt(pt1.x**2 + pt1.y**2 + pt1.z**2)
ψ_local1 = H.value(r1, 0, 0)
# These are localized for each pt.
dist = sqrt((pt1.x - pt0.x)**2 + (pt1.y - pt0.y)**2 + (pt1.z - pt0.z)**2)
elec_elec_V += consts.k * consts.e * ψ_local0 * ψ_local1 * dv
print(f"NN: {nuc_nuc_V}, NE: {nuc_elec_V}, EE: {elec_elec_V} Net: {nuc_nuc_V + nuc_elec_V + elec_elec_V}")
# potential etc from both elecs adn the other proton.
def h2_force_pov(x: float) -> float:
"""Calcualte the electric potential between 2 hydrogen atoms. In this
function, we view things from the perspective of the proton of one
of the atoms, and calculate everything else relative to it."""
# Start with the perspectic of one atom. Calculate the interaction between
# its nucleus and the other atom's nucleus, and electron.
# Our convention will be that towards our POV nucleus is positive;
# repulusion from it is negative.
H = Hydrogen3d([0, 1])
# Calculate proton-proton interaction.
nuc_nuc_V = consts.k * consts.e / x
nuc_nuc_F = Vec(nuc_nuc_V * consts.e / x, 0., 0.)
dx = 0.18
dv = dx**3
# We'll say the molecules are at the same z and y coordinates,
# but separated on the x axis by input argument `x`.
# Sample point coordinates are centered on the non-POV atom.
# todo: You will gain much precision by taking sample areas
# todo closer together near the nucleus
# `sample_range` applies to all 3 dimensions.
sample_range = np.arange(-12.1, 12.1, dx) # Don't let 0 be a pt
# todo: Manually entering the pts we want
# Becaus we're dealing with a 3rd power, we need to keep the sample pts minimal. The ones
# near the center should be more finely spaced. (Or perhaps tune the spacing dynamically
# based on changing slopes?)
# sample_range = np.array([-20, -15, -10, -9, -8.5, -8, -7.5, -7, -6.5, -6, -5.5, -5, -])
sample_pts = []
for j in sample_range:
for k_ in sample_range:
for l in sample_range:
sample_pts.append(Vec(j, k_, l))
print("num samples: ", len(sample_pts))
print("Sample range: ", sample_range)
# Calculate nucleus-electron interaction, with the electron from both atoms.
# We integrate over 3d space, using cartesian coordinates.
# Calculate proton-electron interaction.
nuc_elec_F = Vec(0., 0., 0.)
for pt in sample_pts:
# We integrate over volume, eg by splitting up into small cubes
# of len dx, and volume dv.
# Dist of elec from own, and other nuc, for this pt.
# The pt is centered on the POV atom. We use these radii
# to calculate WF strength.
r_own = pt.length()
# We're dealing with (spherically-symmetrical) S orbitals; we only
# need to pass radius to the `value` method.
ψ_local_own = H.value(r_own, 0, 0)
r_other = sqrt((pt.x + x) ** 2 + pt.y ** 2 + pt.z ** 2)
ψ_local_other = H.value(r_other, 0, 0)
# Divide by the number of sample points: The total answer
# ψ^2 adds up to 1, so this weights each segment evenly.
# (r_own for both, since we're calcing the pt rel to the POV nuc)) # todo is this right??
# todo: look to r here for the error?
V_own = consts.k * consts.e * np.conj(ψ_local_own) * ψ_local_own / r_own * dv
V_other = consts.k * consts.e * np.conj(ψ_local_other) * ψ_local_other / r_other * dv
# Net elec potential.
V_combined = V_own + V_other
unit_v = pt.scalar_mul(1. / pt.length())
nuc_elec_F += unit_v.scalar_mul(V_combined * -consts.e / r_own)
print(f"NN F: {nuc_nuc_F}, NE F: {nuc_elec_F}, Net F: {nuc_nuc_F + nuc_elec_F}")
if __name__ == "__main__":
n = 1
# print(calc_energy(n))
# Real dist: 74pm = 1.4 bohrs
h2_force_pov(1.4)
# plot_h_static_3d(n)
# plot_h_static(5)
# test_fft()
# run_fft()
# reimann()
# test_taylor()
# test_fourier()
# inv_gauss()
# h2()
# run_check() | e to see if we can back-calculate E = -1/2."""
E = -2 / (n + 1) ** 2
x, ψ = h_static(E)
# Calculate potential between the e- field and the nucleus point by integrating.
# todo: Let's do this manually first, then try to apply a scipy.integrate approach.
dx = 1
result = 0
ψ2 = np.conj(ψ) * ψ
sample_pts = np.arange(x[0], x[-1], dx)
for pt in sample_pts:
k = 1
Q = 1
V = k * Q / x
q = 1
E = V * q * np.interp([pt], x, ψ2)[0]
result += E / dx
return result
def h2_potential(dist: float) -> float:
"""Given a distance, calculate the potential energy between
| identifier_body |
syncmgr.rs | //!
//! Manages header synchronization with peers.
//!
use nakamoto_common::bitcoin::consensus::params::Params;
use nakamoto_common::bitcoin::network::constants::ServiceFlags;
use nakamoto_common::bitcoin::network::message_blockdata::Inventory;
use nakamoto_common::bitcoin_hashes::Hash;
use nakamoto_common::block::store;
use nakamoto_common::block::time::{Clock, LocalDuration, LocalTime};
use nakamoto_common::block::tree::{BlockReader, BlockTree, Error, ImportResult};
use nakamoto_common::block::{BlockHash, BlockHeader, Height};
use nakamoto_common::collections::{AddressBook, HashMap};
use nakamoto_common::nonempty::NonEmpty;
use super::output::{Disconnect, SetTimer, Wire};
use super::{DisconnectReason, Link, Locators, PeerId, Socket};
/// How long to wait for a request, eg. `getheaders` to be fulfilled.
pub const REQUEST_TIMEOUT: LocalDuration = LocalDuration::from_secs(30);
/// How long before the tip of the chain is considered stale. This takes into account
/// that the block timestamp may have been set sometime in the future.
pub const TIP_STALE_DURATION: LocalDuration = LocalDuration::from_mins(60 * 2);
/// Maximum number of headers sent in a `headers` message.
pub const MAX_MESSAGE_HEADERS: usize = 2000;
/// Maximum number of inventories sent in an `inv` message.
pub const MAX_MESSAGE_INVS: usize = 50000;
/// Idle timeout.
pub const IDLE_TIMEOUT: LocalDuration = LocalDuration::BLOCK_INTERVAL;
/// Services required from peers for header sync.
pub const REQUIRED_SERVICES: ServiceFlags = ServiceFlags::NETWORK;
/// Maximum headers announced in a `headers` message, when unsolicited.
const MAX_UNSOLICITED_HEADERS: usize = 24;
/// How long to wait between checks for longer chains from peers.
const PEER_SAMPLE_INTERVAL: LocalDuration = LocalDuration::from_mins(60);
/// What to do if a timeout for a peer is received.
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
enum OnTimeout {
/// Disconnect peer on timeout.
Disconnect,
/// Do nothing on timeout.
Ignore,
/// Retry with a different peer on timeout.
Retry(usize),
}
/// State of a sync peer.
#[derive(Debug)]
struct Peer {
height: Height,
preferred: bool,
tip: BlockHash,
link: Link,
last_active: Option<LocalTime>,
last_asked: Option<Locators>,
_socket: Socket,
}
/// Sync manager configuration.
#[derive(Debug)]
pub struct Config {
/// Maximum number of messages in a `headers` message.
pub max_message_headers: usize,
/// How long to wait for a response from a peer.
pub request_timeout: LocalDuration,
/// Consensus parameters.
pub params: Params,
}
/// The sync manager state.
#[derive(Debug)]
pub struct SyncManager<U, C> {
/// Sync manager configuration.
pub config: Config,
/// Sync-specific peer state.
peers: AddressBook<PeerId, Peer>,
/// Last time our tip was updated.
last_tip_update: Option<LocalTime>,
/// Last time we sampled our peers for their active chain.
last_peer_sample: Option<LocalTime>,
/// Last time we idled.
last_idle: Option<LocalTime>,
/// In-flight requests to peers.
inflight: HashMap<PeerId, GetHeaders>,
/// Upstream protocol channel.
upstream: U,
/// Clock.
clock: C,
}
/// An event emitted by the sync manager.
#[derive(Debug, Clone)]
pub enum Event {
/// A block was added to the main chain.
BlockConnected {
/// Block height.
height: Height,
/// Block header.
header: BlockHeader,
},
/// A block was removed from the main chain.
BlockDisconnected {
/// Block height.
height: Height,
/// Block header.
header: BlockHeader,
},
/// A new block was discovered via a peer.
BlockDiscovered(PeerId, BlockHash),
/// Syncing headers.
Syncing {
/// Current block header height.
current: Height,
/// Best known block header height.
best: Height,
},
/// Synced up to the specified hash and height.
Synced(BlockHash, Height),
/// Potential stale tip detected on the active chain.
StaleTip(LocalTime),
/// Peer misbehaved.
PeerMisbehaved(PeerId),
/// Peer height updated.
PeerHeightUpdated {
/// Best height known.
height: Height,
},
}
impl std::fmt::Display for Event {
fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Event::PeerMisbehaved(addr) => {
write!(fmt, "{}: Peer misbehaved", addr)
}
Event::PeerHeightUpdated { height } => {
write!(fmt, "Peer height updated to {}", height)
}
Event::Synced(hash, height) => {
write!(
fmt,
"Headers synced up to height {} with hash {}",
height, hash
)
}
Event::Syncing { current, best } => write!(fmt, "Syncing headers {}/{}", current, best),
Event::BlockConnected { height, header } => {
write!(
fmt,
"Block {} connected at height {}",
header.block_hash(),
height
)
}
Event::BlockDisconnected { height, header } => {
write!(
fmt,
"Block {} disconnected at height {}",
header.block_hash(),
height
)
}
Event::BlockDiscovered(from, hash) => {
write!(fmt, "{}: Discovered new block: {}", from, &hash)
}
Event::StaleTip(last_update) => {
write!(
fmt,
"Potential stale tip detected (last update was {})",
last_update
)
}
}
}
}
/// A `getheaders` request sent to a peer.
#[derive(Clone, Debug, PartialEq, Eq)]
struct GetHeaders {
/// Locators hashes.
locators: Locators,
/// Time at which the request was sent.
sent_at: LocalTime,
/// What to do if this request times out.
on_timeout: OnTimeout,
} | pub fn new(config: Config, rng: fastrand::Rng, upstream: U, clock: C) -> Self {
let peers = AddressBook::new(rng.clone());
let last_tip_update = None;
let last_peer_sample = None;
let last_idle = None;
let inflight = HashMap::with_hasher(rng.into());
Self {
peers,
config,
last_tip_update,
last_peer_sample,
last_idle,
inflight,
upstream,
clock,
}
}
/// Initialize the sync manager. Should only be called once.
pub fn initialize<T: BlockReader>(&mut self, tree: &T) {
// TODO: `tip` should return the height.
let (hash, _) = tree.tip();
let height = tree.height();
self.idle(tree);
self.upstream.event(Event::Synced(hash, height));
}
/// Called periodically.
pub fn idle<T: BlockReader>(&mut self, tree: &T) {
let now = self.clock.local_time();
// Nb. The idle timeout is very long: as long as the block interval.
// This shouldn't be a problem, as the sync manager can make progress without it.
if now - self.last_idle.unwrap_or_default() >= IDLE_TIMEOUT {
if !self.sync(tree) {
self.sample_peers(tree);
}
self.last_idle = Some(now);
self.upstream.set_timer(IDLE_TIMEOUT);
}
}
/// Called when a new peer was negotiated.
pub fn peer_negotiated<T: BlockReader>(
&mut self,
socket: Socket,
height: Height,
services: ServiceFlags,
preferred: bool,
link: Link,
tree: &T,
) {
if link.is_outbound() && !services.has(REQUIRED_SERVICES) {
return;
}
if height > self.best_height().unwrap_or_else(|| tree.height()) {
self.upstream.event(Event::PeerHeightUpdated { height });
}
self.register(socket, height, preferred, link);
self.sync(tree);
}
/// Called when a peer disconnected.
pub fn peer_disconnected(&mut self, id: &PeerId) {
self.unregister(id);
}
/// Called when we received a `getheaders` message from a peer.
pub fn received_getheaders<T: BlockReader>(
&mut self,
addr: &PeerId,
(locator_hashes, stop_hash): Locators,
tree: &T,
) {
let max = self.config.max_message_headers;
if self.is_syncing() || max == 0 {
return;
}
let headers = tree.locate_headers(&locator_hashes, stop_hash, max);
if headers.is_empty() {
return;
}
self.upstream.headers(*addr, headers);
}
/// Import blocks into our block tree.
pub fn import_blocks<T: BlockTree, I: Iterator<Item = BlockHeader>>(
&mut self,
blocks: I,
tree: &mut T,
) -> Result<ImportResult, Error> {
match tree.import_blocks(blocks, &self.clock) {
Ok(ImportResult::TipChanged(header, tip, height, reverted, connected)) => {
let result = ImportResult::TipChanged(
header,
tip,
height,
reverted.clone(),
connected.clone(),
);
for (height, header) in reverted {
self.upstream
.event(Event::BlockDisconnected { height, header });
}
for (height, header) in connected {
self.upstream
.event(Event::BlockConnected { height, header });
}
self.upstream.event(Event::Synced(tip, height));
self.broadcast_tip(&tip, tree);
Ok(result)
}
Ok(result @ ImportResult::TipUnchanged) => Ok(result),
Err(err) => Err(err),
}
}
/// Called when we receive headers from a peer.
pub fn received_headers<T: BlockTree>(
&mut self,
from: &PeerId,
headers: Vec<BlockHeader>,
clock: &impl Clock,
tree: &mut T,
) -> Result<ImportResult, store::Error> {
let request = self.inflight.remove(from);
let headers = if let Some(headers) = NonEmpty::from_vec(headers) {
headers
} else {
return Ok(ImportResult::TipUnchanged);
};
let length = headers.len();
if length > MAX_MESSAGE_HEADERS {
log::debug!("Received more than maximum headers allowed from {}", from);
self.record_misbehavior(from);
self.upstream
.disconnect(*from, DisconnectReason::PeerMisbehaving("too many headers"));
return Ok(ImportResult::TipUnchanged);
}
// When unsolicited, we don't want to process too many headers in case of a DoS.
if length > MAX_UNSOLICITED_HEADERS && request.is_none() {
log::debug!("Received {} unsolicited headers from {}", length, from);
return Ok(ImportResult::TipUnchanged);
}
if let Some(peer) = self.peers.get_mut(from) {
peer.last_active = Some(clock.local_time());
} else {
return Ok(ImportResult::TipUnchanged);
}
log::debug!("[sync] Received {} block header(s) from {}", length, from);
let root = headers.first().block_hash();
let best = headers.last().block_hash();
if tree.contains(&best) {
return Ok(ImportResult::TipUnchanged);
}
match self.import_blocks(headers.into_iter(), tree) {
Ok(ImportResult::TipUnchanged) => {
// Try to find a common ancestor that leads up to the first header in
// the list we received.
let locators = (tree.locator_hashes(tree.height()), root);
let timeout = self.config.request_timeout;
self.request(*from, locators, timeout, OnTimeout::Ignore);
Ok(ImportResult::TipUnchanged)
}
Ok(ImportResult::TipChanged(header, tip, height, reverted, connected)) => {
// Update peer height.
if let Some(peer) = self.peers.get_mut(from) {
if height > peer.height {
peer.tip = tip;
peer.height = height;
}
}
// Keep track of when we last updated our tip. This is useful to check
// whether our tip is stale.
self.last_tip_update = Some(clock.local_time());
// If we received less than the maximum number of headers, we must be in sync.
// Otherwise, ask for the next batch of headers.
if length < MAX_MESSAGE_HEADERS {
// If these headers were unsolicited, we may already be ready/synced.
// Otherwise, we're finally in sync.
self.broadcast_tip(&tip, tree);
self.sync(tree);
} else {
let locators = (vec![tip], BlockHash::all_zeros());
let timeout = self.config.request_timeout;
self.request(*from, locators, timeout, OnTimeout::Disconnect);
}
Ok(ImportResult::TipChanged(
header, tip, height, reverted, connected,
))
}
Err(err) => self
.handle_error(from, err)
.map(|()| ImportResult::TipUnchanged),
}
}
fn request(
&mut self,
addr: PeerId,
locators: Locators,
timeout: LocalDuration,
on_timeout: OnTimeout,
) {
// Don't request more than once from the same peer.
if self.inflight.contains_key(&addr) {
return;
}
if let Some(peer) = self.peers.get_mut(&addr) {
debug_assert!(peer.last_asked.as_ref() != Some(&locators));
peer.last_asked = Some(locators.clone());
let sent_at = self.clock.local_time();
let req = GetHeaders {
locators,
sent_at,
on_timeout,
};
self.inflight.insert(addr, req.clone());
self.upstream.get_headers(addr, req.locators);
self.upstream.set_timer(timeout);
}
}
/// Called when we received an `inv` message. This will happen if we are out of sync with a
/// peer, and blocks are being announced. Otherwise, we expect to receive a `headers` message.
pub fn received_inv<T: BlockReader>(&mut self, addr: PeerId, inv: Vec<Inventory>, tree: &T) {
// Don't try to fetch headers from `inv` message while syncing. It's not helpful.
if self.is_syncing() {
return;
}
// Ignore and disconnect peers misbehaving.
if inv.len() > MAX_MESSAGE_INVS {
return;
}
let peer = if let Some(peer) = self.peers.get_mut(&addr) {
peer
} else {
return;
};
let mut best_block = None;
for i in &inv {
if let Inventory::Block(hash) = i {
peer.tip = *hash;
// "Headers-first is the primary method of announcement on the network. If a node
// fell back to sending blocks by inv, it's probably for a re-org. The final block
// hash provided should be the highest."
if !tree.is_known(hash) {
self.upstream.event(Event::BlockDiscovered(addr, *hash));
best_block = Some(hash);
}
}
}
if let Some(stop_hash) = best_block {
let locators = (tree.locator_hashes(tree.height()), *stop_hash);
let timeout = self.config.request_timeout;
// Try to find headers leading up to the `inv` entry.
self.request(addr, locators, timeout, OnTimeout::Retry(3));
}
}
/// Called when we received a tick.
pub fn received_wake<T: BlockReader>(&mut self, tree: &T) {
let local_time = self.clock.local_time();
let timeout = self.config.request_timeout;
let timed_out = self
.inflight
.iter()
.filter_map(|(peer, req)| {
if local_time - req.sent_at >= timeout {
Some((*peer, req.on_timeout, req.clone()))
} else {
None
}
})
.collect::<Vec<_>>();
let mut sync = false;
for (peer, on_timeout, req) in timed_out {
self.inflight.remove(&peer);
match on_timeout {
OnTimeout::Ignore => {
// It's likely that the peer just didn't have the requested header.
}
OnTimeout::Retry(0) | OnTimeout::Disconnect => {
self.upstream
.disconnect(peer, DisconnectReason::PeerTimeout("getheaders"));
sync = true;
}
OnTimeout::Retry(n) => {
if let Some((addr, _)) = self.peers.sample_with(|a, p| {
*a != peer && self.is_request_candidate(a, p, &req.locators.0)
}) {
let addr = *addr;
self.request(addr, req.locators, timeout, OnTimeout::Retry(n - 1));
}
}
}
}
// If some of the requests timed out, force a sync, otherwise just idle.
if sync {
self.sync(tree);
} else {
self.idle(tree);
}
}
/// Get the best known height out of all our peers.
pub fn best_height(&self) -> Option<Height> {
self.peers.iter().map(|(_, p)| p.height).max()
}
/// Are we currently syncing?
pub fn is_syncing(&self) -> bool {
!self.inflight.is_empty()
}
///////////////////////////////////////////////////////////////////////////
fn handle_error(&mut self, from: &PeerId, err: Error) -> Result<(), store::Error> {
match err {
// If this is an error with the underlying store, we have to propagate
// this up, because we can't handle it here.
Error::Store(e) => Err(e),
// If we got a bad block from the peer, we can handle it here.
Error::InvalidBlockPoW
| Error::InvalidBlockTarget(_, _)
| Error::InvalidBlockHash(_, _)
| Error::InvalidBlockHeight(_)
| Error::InvalidBlockTime(_, _) => {
log::debug!("{}: Received invalid headers: {}", from, err);
self.record_misbehavior(from);
self.upstream
.disconnect(*from, DisconnectReason::PeerMisbehaving("invalid headers"));
Ok(())
}
// Harmless errors can be ignored.
Error::DuplicateBlock(_) | Error::BlockMissing(_) => Ok(()),
// TODO: This will be removed.
Error::BlockImportAborted(_, _, _) => Ok(()),
// These shouldn't happen here.
// TODO: Perhaps there's a better way to have this error not show up here.
Error::Interrupted | Error::GenesisMismatch => Ok(()),
}
}
fn record_misbehavior(&mut self, peer: &PeerId) {
self.upstream.event(Event::PeerMisbehaved(*peer));
}
/// Check whether our current tip is stale.
///
/// *Nb. This doesn't check whether we've already requested new blocks.*
fn stale_tip<T: BlockReader>(&self, tree: &T) -> Option<LocalTime> {
let now = self.clock.local_time();
if let Some(last_update) = self.last_tip_update {
if last_update
< now - LocalDuration::from_secs(self.config.params.pow_target_spacing * 3)
{
return Some(last_update);
}
}
// If we don't have the time of the last update, it's probably because we
// are fresh, or restarted our node. In that case we check the last block time
// instead.
let (_, tip) = tree.tip();
let time = LocalTime::from_block_time(tip.time);
if time <= now - TIP_STALE_DURATION {
return Some(time);
}
None
}
/// Register a new peer.
fn register(&mut self, socket: Socket, height: Height, preferred: bool, link: Link) {
let last_active = None;
let last_asked = None;
let tip = BlockHash::all_zeros();
self.peers.insert(
socket.addr,
Peer {
height,
tip,
link,
preferred,
last_active,
last_asked,
_socket: socket,
},
);
}
/// Unregister a peer.
fn unregister(&mut self, id: &PeerId) {
self.inflight.remove(id);
self.peers.remove(id);
}
/// Select a random preferred peer.
fn preferred_peer<T: BlockReader>(&self, locators: &Locators, tree: &T) -> Option<PeerId> {
let peers: Vec<_> = self.peers.shuffled().collect();
let height = tree.height();
let locators = &locators.0;
peers
.iter()
.find(|(a, p)| {
p.preferred && p.height > height && self.is_request_candidate(a, p, locators)
})
.or_else(|| {
peers
.iter()
.find(|(a, p)| p.preferred && self.is_request_candidate(a, p, locators))
})
.or_else(|| {
peers
.iter()
.find(|(a, p)| self.is_request_candidate(a, p, locators))
})
.map(|(a, _)| **a)
}
/// Check whether a peer is a good request candidate for the given locators.
/// This function ensures that we don't ask the same peer twice for the same locators.
fn is_request_candidate(&self, addr: &PeerId, peer: &Peer, locators: &[BlockHash]) -> bool {
!self.inflight.contains_key(addr)
&& peer.link.is_outbound()
&& peer.last_asked.as_ref().map_or(true, |l| l.0 != locators)
}
/// Check whether or not we are in sync with the network.
fn is_synced<T: BlockReader>(&self, tree: &T) -> bool {
if let Some(last_update) = self.stale_tip(tree) {
self.upstream.event(Event::StaleTip(last_update));
return false;
}
let height = tree.height();
// Find the peer with the longest chain and compare our height to it.
if let Some(peer_height) = self.best_height() {
return height >= peer_height;
}
// Assume we're out of sync.
false
}
/// Check if we're currently syncing with these locators.
fn syncing(&self, locators: &Locators) -> bool {
self.inflight.values().any(|r| &r.locators == locators)
}
/// Start syncing if we're out of sync.
/// Returns `true` if we started syncing, and `false` if we were up to date or not able to
/// sync.
fn sync<T: BlockReader>(&mut self, tree: &T) -> bool {
if self.peers.is_empty() {
return false;
}
if self.is_synced(tree) {
let (tip, _) = tree.tip();
let height = tree.height();
// TODO: This event can fire multiple times if `sync` is called while we're already
// in sync.
self.upstream.event(Event::Synced(tip, height));
return false;
}
// ... It looks like we're out of sync ...
let locators = (tree.locator_hashes(tree.height()), BlockHash::all_zeros());
// If we're already fetching these headers, just wait.
if self.syncing(&locators) {
return false;
}
if let Some(addr) = self.preferred_peer(&locators, tree) {
let timeout = self.config.request_timeout;
let current = tree.height();
let best = self.best_height().unwrap_or(current);
if best > current {
self.request(addr, locators, timeout, OnTimeout::Ignore);
self.upstream.event(Event::Syncing { current, best });
return true;
}
}
// TODO: No peer found to sync.. emit event.
false
}
/// Broadcast our best block header to connected peers who don't have it.
fn broadcast_tip<T: BlockReader>(&mut self, hash: &BlockHash, tree: &T) {
if let Some((height, best)) = tree.get_block(hash) {
for (addr, peer) in &*self.peers {
// TODO: Don't broadcast to peer that is currently syncing?
if peer.link == Link::Inbound && height > peer.height {
self.upstream.headers(*addr, vec![*best]);
}
}
}
}
/// Ask all our outbound peers whether they have better block headers.
fn sample_peers<T: BlockReader>(&mut self, tree: &T) {
let now = self.clock.local_time();
if now - self.last_peer_sample.unwrap_or_default() < PEER_SAMPLE_INTERVAL {
return;
}
if self.stale_tip(tree).is_none() {
return;
}
self.last_peer_sample = Some(now);
// If we think we're in sync and we haven't asked other peers in a while, then
// sample their headers just to make sure we're on the right chain.
let locators = tree.locator_hashes(tree.height());
let addrs = self
.peers
.iter()
.filter(|(a, p)| self.is_request_candidate(a, p, &locators))
.map(|(a, _)| *a)
.collect::<Vec<_>>();
for addr in addrs {
self.request(
addr,
(locators.clone(), BlockHash::all_zeros()),
self.config.request_timeout,
OnTimeout::Ignore,
);
}
}
} |
impl<U: SetTimer + Disconnect + Wire<Event>, C: Clock> SyncManager<U, C> {
/// Create a new sync manager. | random_line_split |
syncmgr.rs | //!
//! Manages header synchronization with peers.
//!
use nakamoto_common::bitcoin::consensus::params::Params;
use nakamoto_common::bitcoin::network::constants::ServiceFlags;
use nakamoto_common::bitcoin::network::message_blockdata::Inventory;
use nakamoto_common::bitcoin_hashes::Hash;
use nakamoto_common::block::store;
use nakamoto_common::block::time::{Clock, LocalDuration, LocalTime};
use nakamoto_common::block::tree::{BlockReader, BlockTree, Error, ImportResult};
use nakamoto_common::block::{BlockHash, BlockHeader, Height};
use nakamoto_common::collections::{AddressBook, HashMap};
use nakamoto_common::nonempty::NonEmpty;
use super::output::{Disconnect, SetTimer, Wire};
use super::{DisconnectReason, Link, Locators, PeerId, Socket};
/// How long to wait for a request, eg. `getheaders` to be fulfilled.
pub const REQUEST_TIMEOUT: LocalDuration = LocalDuration::from_secs(30);
/// How long before the tip of the chain is considered stale. This takes into account
/// that the block timestamp may have been set sometime in the future.
pub const TIP_STALE_DURATION: LocalDuration = LocalDuration::from_mins(60 * 2);
/// Maximum number of headers sent in a `headers` message.
pub const MAX_MESSAGE_HEADERS: usize = 2000;
/// Maximum number of inventories sent in an `inv` message.
pub const MAX_MESSAGE_INVS: usize = 50000;
/// Idle timeout.
pub const IDLE_TIMEOUT: LocalDuration = LocalDuration::BLOCK_INTERVAL;
/// Services required from peers for header sync.
pub const REQUIRED_SERVICES: ServiceFlags = ServiceFlags::NETWORK;
/// Maximum headers announced in a `headers` message, when unsolicited.
const MAX_UNSOLICITED_HEADERS: usize = 24;
/// How long to wait between checks for longer chains from peers.
const PEER_SAMPLE_INTERVAL: LocalDuration = LocalDuration::from_mins(60);
/// What to do if a timeout for a peer is received.
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
enum OnTimeout {
/// Disconnect peer on timeout.
Disconnect,
/// Do nothing on timeout.
Ignore,
/// Retry with a different peer on timeout.
Retry(usize),
}
/// State of a sync peer.
#[derive(Debug)]
struct Peer {
height: Height,
preferred: bool,
tip: BlockHash,
link: Link,
last_active: Option<LocalTime>,
last_asked: Option<Locators>,
_socket: Socket,
}
/// Sync manager configuration.
#[derive(Debug)]
pub struct Config {
/// Maximum number of messages in a `headers` message.
pub max_message_headers: usize,
/// How long to wait for a response from a peer.
pub request_timeout: LocalDuration,
/// Consensus parameters.
pub params: Params,
}
/// The sync manager state.
#[derive(Debug)]
pub struct SyncManager<U, C> {
/// Sync manager configuration.
pub config: Config,
/// Sync-specific peer state.
peers: AddressBook<PeerId, Peer>,
/// Last time our tip was updated.
last_tip_update: Option<LocalTime>,
/// Last time we sampled our peers for their active chain.
last_peer_sample: Option<LocalTime>,
/// Last time we idled.
last_idle: Option<LocalTime>,
/// In-flight requests to peers.
inflight: HashMap<PeerId, GetHeaders>,
/// Upstream protocol channel.
upstream: U,
/// Clock.
clock: C,
}
/// An event emitted by the sync manager.
#[derive(Debug, Clone)]
pub enum | {
/// A block was added to the main chain.
BlockConnected {
/// Block height.
height: Height,
/// Block header.
header: BlockHeader,
},
/// A block was removed from the main chain.
BlockDisconnected {
/// Block height.
height: Height,
/// Block header.
header: BlockHeader,
},
/// A new block was discovered via a peer.
BlockDiscovered(PeerId, BlockHash),
/// Syncing headers.
Syncing {
/// Current block header height.
current: Height,
/// Best known block header height.
best: Height,
},
/// Synced up to the specified hash and height.
Synced(BlockHash, Height),
/// Potential stale tip detected on the active chain.
StaleTip(LocalTime),
/// Peer misbehaved.
PeerMisbehaved(PeerId),
/// Peer height updated.
PeerHeightUpdated {
/// Best height known.
height: Height,
},
}
impl std::fmt::Display for Event {
fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Event::PeerMisbehaved(addr) => {
write!(fmt, "{}: Peer misbehaved", addr)
}
Event::PeerHeightUpdated { height } => {
write!(fmt, "Peer height updated to {}", height)
}
Event::Synced(hash, height) => {
write!(
fmt,
"Headers synced up to height {} with hash {}",
height, hash
)
}
Event::Syncing { current, best } => write!(fmt, "Syncing headers {}/{}", current, best),
Event::BlockConnected { height, header } => {
write!(
fmt,
"Block {} connected at height {}",
header.block_hash(),
height
)
}
Event::BlockDisconnected { height, header } => {
write!(
fmt,
"Block {} disconnected at height {}",
header.block_hash(),
height
)
}
Event::BlockDiscovered(from, hash) => {
write!(fmt, "{}: Discovered new block: {}", from, &hash)
}
Event::StaleTip(last_update) => {
write!(
fmt,
"Potential stale tip detected (last update was {})",
last_update
)
}
}
}
}
/// A `getheaders` request sent to a peer.
#[derive(Clone, Debug, PartialEq, Eq)]
struct GetHeaders {
/// Locators hashes.
locators: Locators,
/// Time at which the request was sent.
sent_at: LocalTime,
/// What to do if this request times out.
on_timeout: OnTimeout,
}
impl<U: SetTimer + Disconnect + Wire<Event>, C: Clock> SyncManager<U, C> {
/// Create a new sync manager.
pub fn new(config: Config, rng: fastrand::Rng, upstream: U, clock: C) -> Self {
let peers = AddressBook::new(rng.clone());
let last_tip_update = None;
let last_peer_sample = None;
let last_idle = None;
let inflight = HashMap::with_hasher(rng.into());
Self {
peers,
config,
last_tip_update,
last_peer_sample,
last_idle,
inflight,
upstream,
clock,
}
}
/// Initialize the sync manager. Should only be called once.
pub fn initialize<T: BlockReader>(&mut self, tree: &T) {
// TODO: `tip` should return the height.
let (hash, _) = tree.tip();
let height = tree.height();
self.idle(tree);
self.upstream.event(Event::Synced(hash, height));
}
/// Called periodically.
pub fn idle<T: BlockReader>(&mut self, tree: &T) {
let now = self.clock.local_time();
// Nb. The idle timeout is very long: as long as the block interval.
// This shouldn't be a problem, as the sync manager can make progress without it.
if now - self.last_idle.unwrap_or_default() >= IDLE_TIMEOUT {
if !self.sync(tree) {
self.sample_peers(tree);
}
self.last_idle = Some(now);
self.upstream.set_timer(IDLE_TIMEOUT);
}
}
/// Called when a new peer was negotiated.
pub fn peer_negotiated<T: BlockReader>(
&mut self,
socket: Socket,
height: Height,
services: ServiceFlags,
preferred: bool,
link: Link,
tree: &T,
) {
if link.is_outbound() && !services.has(REQUIRED_SERVICES) {
return;
}
if height > self.best_height().unwrap_or_else(|| tree.height()) {
self.upstream.event(Event::PeerHeightUpdated { height });
}
self.register(socket, height, preferred, link);
self.sync(tree);
}
/// Called when a peer disconnected.
pub fn peer_disconnected(&mut self, id: &PeerId) {
self.unregister(id);
}
/// Called when we received a `getheaders` message from a peer.
pub fn received_getheaders<T: BlockReader>(
&mut self,
addr: &PeerId,
(locator_hashes, stop_hash): Locators,
tree: &T,
) {
let max = self.config.max_message_headers;
if self.is_syncing() || max == 0 {
return;
}
let headers = tree.locate_headers(&locator_hashes, stop_hash, max);
if headers.is_empty() {
return;
}
self.upstream.headers(*addr, headers);
}
/// Import blocks into our block tree.
pub fn import_blocks<T: BlockTree, I: Iterator<Item = BlockHeader>>(
&mut self,
blocks: I,
tree: &mut T,
) -> Result<ImportResult, Error> {
match tree.import_blocks(blocks, &self.clock) {
Ok(ImportResult::TipChanged(header, tip, height, reverted, connected)) => {
let result = ImportResult::TipChanged(
header,
tip,
height,
reverted.clone(),
connected.clone(),
);
for (height, header) in reverted {
self.upstream
.event(Event::BlockDisconnected { height, header });
}
for (height, header) in connected {
self.upstream
.event(Event::BlockConnected { height, header });
}
self.upstream.event(Event::Synced(tip, height));
self.broadcast_tip(&tip, tree);
Ok(result)
}
Ok(result @ ImportResult::TipUnchanged) => Ok(result),
Err(err) => Err(err),
}
}
/// Called when we receive headers from a peer.
pub fn received_headers<T: BlockTree>(
&mut self,
from: &PeerId,
headers: Vec<BlockHeader>,
clock: &impl Clock,
tree: &mut T,
) -> Result<ImportResult, store::Error> {
let request = self.inflight.remove(from);
let headers = if let Some(headers) = NonEmpty::from_vec(headers) {
headers
} else {
return Ok(ImportResult::TipUnchanged);
};
let length = headers.len();
if length > MAX_MESSAGE_HEADERS {
log::debug!("Received more than maximum headers allowed from {}", from);
self.record_misbehavior(from);
self.upstream
.disconnect(*from, DisconnectReason::PeerMisbehaving("too many headers"));
return Ok(ImportResult::TipUnchanged);
}
// When unsolicited, we don't want to process too many headers in case of a DoS.
if length > MAX_UNSOLICITED_HEADERS && request.is_none() {
log::debug!("Received {} unsolicited headers from {}", length, from);
return Ok(ImportResult::TipUnchanged);
}
if let Some(peer) = self.peers.get_mut(from) {
peer.last_active = Some(clock.local_time());
} else {
return Ok(ImportResult::TipUnchanged);
}
log::debug!("[sync] Received {} block header(s) from {}", length, from);
let root = headers.first().block_hash();
let best = headers.last().block_hash();
if tree.contains(&best) {
return Ok(ImportResult::TipUnchanged);
}
match self.import_blocks(headers.into_iter(), tree) {
Ok(ImportResult::TipUnchanged) => {
// Try to find a common ancestor that leads up to the first header in
// the list we received.
let locators = (tree.locator_hashes(tree.height()), root);
let timeout = self.config.request_timeout;
self.request(*from, locators, timeout, OnTimeout::Ignore);
Ok(ImportResult::TipUnchanged)
}
Ok(ImportResult::TipChanged(header, tip, height, reverted, connected)) => {
// Update peer height.
if let Some(peer) = self.peers.get_mut(from) {
if height > peer.height {
peer.tip = tip;
peer.height = height;
}
}
// Keep track of when we last updated our tip. This is useful to check
// whether our tip is stale.
self.last_tip_update = Some(clock.local_time());
// If we received less than the maximum number of headers, we must be in sync.
// Otherwise, ask for the next batch of headers.
if length < MAX_MESSAGE_HEADERS {
// If these headers were unsolicited, we may already be ready/synced.
// Otherwise, we're finally in sync.
self.broadcast_tip(&tip, tree);
self.sync(tree);
} else {
let locators = (vec![tip], BlockHash::all_zeros());
let timeout = self.config.request_timeout;
self.request(*from, locators, timeout, OnTimeout::Disconnect);
}
Ok(ImportResult::TipChanged(
header, tip, height, reverted, connected,
))
}
Err(err) => self
.handle_error(from, err)
.map(|()| ImportResult::TipUnchanged),
}
}
fn request(
&mut self,
addr: PeerId,
locators: Locators,
timeout: LocalDuration,
on_timeout: OnTimeout,
) {
// Don't request more than once from the same peer.
if self.inflight.contains_key(&addr) {
return;
}
if let Some(peer) = self.peers.get_mut(&addr) {
debug_assert!(peer.last_asked.as_ref() != Some(&locators));
peer.last_asked = Some(locators.clone());
let sent_at = self.clock.local_time();
let req = GetHeaders {
locators,
sent_at,
on_timeout,
};
self.inflight.insert(addr, req.clone());
self.upstream.get_headers(addr, req.locators);
self.upstream.set_timer(timeout);
}
}
/// Called when we received an `inv` message. This will happen if we are out of sync with a
/// peer, and blocks are being announced. Otherwise, we expect to receive a `headers` message.
pub fn received_inv<T: BlockReader>(&mut self, addr: PeerId, inv: Vec<Inventory>, tree: &T) {
// Don't try to fetch headers from `inv` message while syncing. It's not helpful.
if self.is_syncing() {
return;
}
// Ignore and disconnect peers misbehaving.
if inv.len() > MAX_MESSAGE_INVS {
return;
}
let peer = if let Some(peer) = self.peers.get_mut(&addr) {
peer
} else {
return;
};
let mut best_block = None;
for i in &inv {
if let Inventory::Block(hash) = i {
peer.tip = *hash;
// "Headers-first is the primary method of announcement on the network. If a node
// fell back to sending blocks by inv, it's probably for a re-org. The final block
// hash provided should be the highest."
if !tree.is_known(hash) {
self.upstream.event(Event::BlockDiscovered(addr, *hash));
best_block = Some(hash);
}
}
}
if let Some(stop_hash) = best_block {
let locators = (tree.locator_hashes(tree.height()), *stop_hash);
let timeout = self.config.request_timeout;
// Try to find headers leading up to the `inv` entry.
self.request(addr, locators, timeout, OnTimeout::Retry(3));
}
}
/// Called when we received a tick.
pub fn received_wake<T: BlockReader>(&mut self, tree: &T) {
let local_time = self.clock.local_time();
let timeout = self.config.request_timeout;
let timed_out = self
.inflight
.iter()
.filter_map(|(peer, req)| {
if local_time - req.sent_at >= timeout {
Some((*peer, req.on_timeout, req.clone()))
} else {
None
}
})
.collect::<Vec<_>>();
let mut sync = false;
for (peer, on_timeout, req) in timed_out {
self.inflight.remove(&peer);
match on_timeout {
OnTimeout::Ignore => {
// It's likely that the peer just didn't have the requested header.
}
OnTimeout::Retry(0) | OnTimeout::Disconnect => {
self.upstream
.disconnect(peer, DisconnectReason::PeerTimeout("getheaders"));
sync = true;
}
OnTimeout::Retry(n) => {
if let Some((addr, _)) = self.peers.sample_with(|a, p| {
*a != peer && self.is_request_candidate(a, p, &req.locators.0)
}) {
let addr = *addr;
self.request(addr, req.locators, timeout, OnTimeout::Retry(n - 1));
}
}
}
}
// If some of the requests timed out, force a sync, otherwise just idle.
if sync {
self.sync(tree);
} else {
self.idle(tree);
}
}
/// Get the best known height out of all our peers.
pub fn best_height(&self) -> Option<Height> {
self.peers.iter().map(|(_, p)| p.height).max()
}
/// Are we currently syncing?
pub fn is_syncing(&self) -> bool {
!self.inflight.is_empty()
}
///////////////////////////////////////////////////////////////////////////
fn handle_error(&mut self, from: &PeerId, err: Error) -> Result<(), store::Error> {
match err {
// If this is an error with the underlying store, we have to propagate
// this up, because we can't handle it here.
Error::Store(e) => Err(e),
// If we got a bad block from the peer, we can handle it here.
Error::InvalidBlockPoW
| Error::InvalidBlockTarget(_, _)
| Error::InvalidBlockHash(_, _)
| Error::InvalidBlockHeight(_)
| Error::InvalidBlockTime(_, _) => {
log::debug!("{}: Received invalid headers: {}", from, err);
self.record_misbehavior(from);
self.upstream
.disconnect(*from, DisconnectReason::PeerMisbehaving("invalid headers"));
Ok(())
}
// Harmless errors can be ignored.
Error::DuplicateBlock(_) | Error::BlockMissing(_) => Ok(()),
// TODO: This will be removed.
Error::BlockImportAborted(_, _, _) => Ok(()),
// These shouldn't happen here.
// TODO: Perhaps there's a better way to have this error not show up here.
Error::Interrupted | Error::GenesisMismatch => Ok(()),
}
}
fn record_misbehavior(&mut self, peer: &PeerId) {
self.upstream.event(Event::PeerMisbehaved(*peer));
}
/// Check whether our current tip is stale.
///
/// *Nb. This doesn't check whether we've already requested new blocks.*
fn stale_tip<T: BlockReader>(&self, tree: &T) -> Option<LocalTime> {
let now = self.clock.local_time();
if let Some(last_update) = self.last_tip_update {
if last_update
< now - LocalDuration::from_secs(self.config.params.pow_target_spacing * 3)
{
return Some(last_update);
}
}
// If we don't have the time of the last update, it's probably because we
// are fresh, or restarted our node. In that case we check the last block time
// instead.
let (_, tip) = tree.tip();
let time = LocalTime::from_block_time(tip.time);
if time <= now - TIP_STALE_DURATION {
return Some(time);
}
None
}
/// Register a new peer.
fn register(&mut self, socket: Socket, height: Height, preferred: bool, link: Link) {
let last_active = None;
let last_asked = None;
let tip = BlockHash::all_zeros();
self.peers.insert(
socket.addr,
Peer {
height,
tip,
link,
preferred,
last_active,
last_asked,
_socket: socket,
},
);
}
/// Unregister a peer.
fn unregister(&mut self, id: &PeerId) {
self.inflight.remove(id);
self.peers.remove(id);
}
/// Select a random preferred peer.
fn preferred_peer<T: BlockReader>(&self, locators: &Locators, tree: &T) -> Option<PeerId> {
let peers: Vec<_> = self.peers.shuffled().collect();
let height = tree.height();
let locators = &locators.0;
peers
.iter()
.find(|(a, p)| {
p.preferred && p.height > height && self.is_request_candidate(a, p, locators)
})
.or_else(|| {
peers
.iter()
.find(|(a, p)| p.preferred && self.is_request_candidate(a, p, locators))
})
.or_else(|| {
peers
.iter()
.find(|(a, p)| self.is_request_candidate(a, p, locators))
})
.map(|(a, _)| **a)
}
/// Check whether a peer is a good request candidate for the given locators.
/// This function ensures that we don't ask the same peer twice for the same locators.
fn is_request_candidate(&self, addr: &PeerId, peer: &Peer, locators: &[BlockHash]) -> bool {
!self.inflight.contains_key(addr)
&& peer.link.is_outbound()
&& peer.last_asked.as_ref().map_or(true, |l| l.0 != locators)
}
/// Check whether or not we are in sync with the network.
fn is_synced<T: BlockReader>(&self, tree: &T) -> bool {
if let Some(last_update) = self.stale_tip(tree) {
self.upstream.event(Event::StaleTip(last_update));
return false;
}
let height = tree.height();
// Find the peer with the longest chain and compare our height to it.
if let Some(peer_height) = self.best_height() {
return height >= peer_height;
}
// Assume we're out of sync.
false
}
/// Check if we're currently syncing with these locators.
fn syncing(&self, locators: &Locators) -> bool {
self.inflight.values().any(|r| &r.locators == locators)
}
/// Start syncing if we're out of sync.
/// Returns `true` if we started syncing, and `false` if we were up to date or not able to
/// sync.
fn sync<T: BlockReader>(&mut self, tree: &T) -> bool {
if self.peers.is_empty() {
return false;
}
if self.is_synced(tree) {
let (tip, _) = tree.tip();
let height = tree.height();
// TODO: This event can fire multiple times if `sync` is called while we're already
// in sync.
self.upstream.event(Event::Synced(tip, height));
return false;
}
// ... It looks like we're out of sync ...
let locators = (tree.locator_hashes(tree.height()), BlockHash::all_zeros());
// If we're already fetching these headers, just wait.
if self.syncing(&locators) {
return false;
}
if let Some(addr) = self.preferred_peer(&locators, tree) {
let timeout = self.config.request_timeout;
let current = tree.height();
let best = self.best_height().unwrap_or(current);
if best > current {
self.request(addr, locators, timeout, OnTimeout::Ignore);
self.upstream.event(Event::Syncing { current, best });
return true;
}
}
// TODO: No peer found to sync.. emit event.
false
}
/// Broadcast our best block header to connected peers who don't have it.
fn broadcast_tip<T: BlockReader>(&mut self, hash: &BlockHash, tree: &T) {
if let Some((height, best)) = tree.get_block(hash) {
for (addr, peer) in &*self.peers {
// TODO: Don't broadcast to peer that is currently syncing?
if peer.link == Link::Inbound && height > peer.height {
self.upstream.headers(*addr, vec![*best]);
}
}
}
}
/// Ask all our outbound peers whether they have better block headers.
fn sample_peers<T: BlockReader>(&mut self, tree: &T) {
let now = self.clock.local_time();
if now - self.last_peer_sample.unwrap_or_default() < PEER_SAMPLE_INTERVAL {
return;
}
if self.stale_tip(tree).is_none() {
return;
}
self.last_peer_sample = Some(now);
// If we think we're in sync and we haven't asked other peers in a while, then
// sample their headers just to make sure we're on the right chain.
let locators = tree.locator_hashes(tree.height());
let addrs = self
.peers
.iter()
.filter(|(a, p)| self.is_request_candidate(a, p, &locators))
.map(|(a, _)| *a)
.collect::<Vec<_>>();
for addr in addrs {
self.request(
addr,
(locators.clone(), BlockHash::all_zeros()),
self.config.request_timeout,
OnTimeout::Ignore,
);
}
}
}
| Event | identifier_name |
syncmgr.rs | //!
//! Manages header synchronization with peers.
//!
use nakamoto_common::bitcoin::consensus::params::Params;
use nakamoto_common::bitcoin::network::constants::ServiceFlags;
use nakamoto_common::bitcoin::network::message_blockdata::Inventory;
use nakamoto_common::bitcoin_hashes::Hash;
use nakamoto_common::block::store;
use nakamoto_common::block::time::{Clock, LocalDuration, LocalTime};
use nakamoto_common::block::tree::{BlockReader, BlockTree, Error, ImportResult};
use nakamoto_common::block::{BlockHash, BlockHeader, Height};
use nakamoto_common::collections::{AddressBook, HashMap};
use nakamoto_common::nonempty::NonEmpty;
use super::output::{Disconnect, SetTimer, Wire};
use super::{DisconnectReason, Link, Locators, PeerId, Socket};
/// How long to wait for a request, eg. `getheaders` to be fulfilled.
pub const REQUEST_TIMEOUT: LocalDuration = LocalDuration::from_secs(30);
/// How long before the tip of the chain is considered stale. This takes into account
/// that the block timestamp may have been set sometime in the future.
pub const TIP_STALE_DURATION: LocalDuration = LocalDuration::from_mins(60 * 2);
/// Maximum number of headers sent in a `headers` message.
pub const MAX_MESSAGE_HEADERS: usize = 2000;
/// Maximum number of inventories sent in an `inv` message.
pub const MAX_MESSAGE_INVS: usize = 50000;
/// Idle timeout.
pub const IDLE_TIMEOUT: LocalDuration = LocalDuration::BLOCK_INTERVAL;
/// Services required from peers for header sync.
pub const REQUIRED_SERVICES: ServiceFlags = ServiceFlags::NETWORK;
/// Maximum headers announced in a `headers` message, when unsolicited.
const MAX_UNSOLICITED_HEADERS: usize = 24;
/// How long to wait between checks for longer chains from peers.
const PEER_SAMPLE_INTERVAL: LocalDuration = LocalDuration::from_mins(60);
/// What to do if a timeout for a peer is received.
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
enum OnTimeout {
/// Disconnect peer on timeout.
Disconnect,
/// Do nothing on timeout.
Ignore,
/// Retry with a different peer on timeout.
Retry(usize),
}
/// State of a sync peer.
#[derive(Debug)]
struct Peer {
height: Height,
preferred: bool,
tip: BlockHash,
link: Link,
last_active: Option<LocalTime>,
last_asked: Option<Locators>,
_socket: Socket,
}
/// Sync manager configuration.
#[derive(Debug)]
pub struct Config {
/// Maximum number of messages in a `headers` message.
pub max_message_headers: usize,
/// How long to wait for a response from a peer.
pub request_timeout: LocalDuration,
/// Consensus parameters.
pub params: Params,
}
/// The sync manager state.
#[derive(Debug)]
pub struct SyncManager<U, C> {
/// Sync manager configuration.
pub config: Config,
/// Sync-specific peer state.
peers: AddressBook<PeerId, Peer>,
/// Last time our tip was updated.
last_tip_update: Option<LocalTime>,
/// Last time we sampled our peers for their active chain.
last_peer_sample: Option<LocalTime>,
/// Last time we idled.
last_idle: Option<LocalTime>,
/// In-flight requests to peers.
inflight: HashMap<PeerId, GetHeaders>,
/// Upstream protocol channel.
upstream: U,
/// Clock.
clock: C,
}
/// An event emitted by the sync manager.
#[derive(Debug, Clone)]
pub enum Event {
/// A block was added to the main chain.
BlockConnected {
/// Block height.
height: Height,
/// Block header.
header: BlockHeader,
},
/// A block was removed from the main chain.
BlockDisconnected {
/// Block height.
height: Height,
/// Block header.
header: BlockHeader,
},
/// A new block was discovered via a peer.
BlockDiscovered(PeerId, BlockHash),
/// Syncing headers.
Syncing {
/// Current block header height.
current: Height,
/// Best known block header height.
best: Height,
},
/// Synced up to the specified hash and height.
Synced(BlockHash, Height),
/// Potential stale tip detected on the active chain.
StaleTip(LocalTime),
/// Peer misbehaved.
PeerMisbehaved(PeerId),
/// Peer height updated.
PeerHeightUpdated {
/// Best height known.
height: Height,
},
}
impl std::fmt::Display for Event {
fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Event::PeerMisbehaved(addr) => {
write!(fmt, "{}: Peer misbehaved", addr)
}
Event::PeerHeightUpdated { height } => {
write!(fmt, "Peer height updated to {}", height)
}
Event::Synced(hash, height) => {
write!(
fmt,
"Headers synced up to height {} with hash {}",
height, hash
)
}
Event::Syncing { current, best } => write!(fmt, "Syncing headers {}/{}", current, best),
Event::BlockConnected { height, header } => {
write!(
fmt,
"Block {} connected at height {}",
header.block_hash(),
height
)
}
Event::BlockDisconnected { height, header } => {
write!(
fmt,
"Block {} disconnected at height {}",
header.block_hash(),
height
)
}
Event::BlockDiscovered(from, hash) => {
write!(fmt, "{}: Discovered new block: {}", from, &hash)
}
Event::StaleTip(last_update) => {
write!(
fmt,
"Potential stale tip detected (last update was {})",
last_update
)
}
}
}
}
/// A `getheaders` request sent to a peer.
#[derive(Clone, Debug, PartialEq, Eq)]
struct GetHeaders {
/// Locators hashes.
locators: Locators,
/// Time at which the request was sent.
sent_at: LocalTime,
/// What to do if this request times out.
on_timeout: OnTimeout,
}
impl<U: SetTimer + Disconnect + Wire<Event>, C: Clock> SyncManager<U, C> {
/// Create a new sync manager.
pub fn new(config: Config, rng: fastrand::Rng, upstream: U, clock: C) -> Self {
let peers = AddressBook::new(rng.clone());
let last_tip_update = None;
let last_peer_sample = None;
let last_idle = None;
let inflight = HashMap::with_hasher(rng.into());
Self {
peers,
config,
last_tip_update,
last_peer_sample,
last_idle,
inflight,
upstream,
clock,
}
}
/// Initialize the sync manager. Should only be called once.
pub fn initialize<T: BlockReader>(&mut self, tree: &T) {
// TODO: `tip` should return the height.
let (hash, _) = tree.tip();
let height = tree.height();
self.idle(tree);
self.upstream.event(Event::Synced(hash, height));
}
/// Called periodically.
pub fn idle<T: BlockReader>(&mut self, tree: &T) {
let now = self.clock.local_time();
// Nb. The idle timeout is very long: as long as the block interval.
// This shouldn't be a problem, as the sync manager can make progress without it.
if now - self.last_idle.unwrap_or_default() >= IDLE_TIMEOUT {
if !self.sync(tree) {
self.sample_peers(tree);
}
self.last_idle = Some(now);
self.upstream.set_timer(IDLE_TIMEOUT);
}
}
/// Called when a new peer was negotiated.
pub fn peer_negotiated<T: BlockReader>(
&mut self,
socket: Socket,
height: Height,
services: ServiceFlags,
preferred: bool,
link: Link,
tree: &T,
) |
/// Called when a peer disconnected.
pub fn peer_disconnected(&mut self, id: &PeerId) {
self.unregister(id);
}
/// Called when we received a `getheaders` message from a peer.
pub fn received_getheaders<T: BlockReader>(
&mut self,
addr: &PeerId,
(locator_hashes, stop_hash): Locators,
tree: &T,
) {
let max = self.config.max_message_headers;
if self.is_syncing() || max == 0 {
return;
}
let headers = tree.locate_headers(&locator_hashes, stop_hash, max);
if headers.is_empty() {
return;
}
self.upstream.headers(*addr, headers);
}
/// Import blocks into our block tree.
pub fn import_blocks<T: BlockTree, I: Iterator<Item = BlockHeader>>(
&mut self,
blocks: I,
tree: &mut T,
) -> Result<ImportResult, Error> {
match tree.import_blocks(blocks, &self.clock) {
Ok(ImportResult::TipChanged(header, tip, height, reverted, connected)) => {
let result = ImportResult::TipChanged(
header,
tip,
height,
reverted.clone(),
connected.clone(),
);
for (height, header) in reverted {
self.upstream
.event(Event::BlockDisconnected { height, header });
}
for (height, header) in connected {
self.upstream
.event(Event::BlockConnected { height, header });
}
self.upstream.event(Event::Synced(tip, height));
self.broadcast_tip(&tip, tree);
Ok(result)
}
Ok(result @ ImportResult::TipUnchanged) => Ok(result),
Err(err) => Err(err),
}
}
/// Called when we receive headers from a peer.
pub fn received_headers<T: BlockTree>(
&mut self,
from: &PeerId,
headers: Vec<BlockHeader>,
clock: &impl Clock,
tree: &mut T,
) -> Result<ImportResult, store::Error> {
let request = self.inflight.remove(from);
let headers = if let Some(headers) = NonEmpty::from_vec(headers) {
headers
} else {
return Ok(ImportResult::TipUnchanged);
};
let length = headers.len();
if length > MAX_MESSAGE_HEADERS {
log::debug!("Received more than maximum headers allowed from {}", from);
self.record_misbehavior(from);
self.upstream
.disconnect(*from, DisconnectReason::PeerMisbehaving("too many headers"));
return Ok(ImportResult::TipUnchanged);
}
// When unsolicited, we don't want to process too many headers in case of a DoS.
if length > MAX_UNSOLICITED_HEADERS && request.is_none() {
log::debug!("Received {} unsolicited headers from {}", length, from);
return Ok(ImportResult::TipUnchanged);
}
if let Some(peer) = self.peers.get_mut(from) {
peer.last_active = Some(clock.local_time());
} else {
return Ok(ImportResult::TipUnchanged);
}
log::debug!("[sync] Received {} block header(s) from {}", length, from);
let root = headers.first().block_hash();
let best = headers.last().block_hash();
if tree.contains(&best) {
return Ok(ImportResult::TipUnchanged);
}
match self.import_blocks(headers.into_iter(), tree) {
Ok(ImportResult::TipUnchanged) => {
// Try to find a common ancestor that leads up to the first header in
// the list we received.
let locators = (tree.locator_hashes(tree.height()), root);
let timeout = self.config.request_timeout;
self.request(*from, locators, timeout, OnTimeout::Ignore);
Ok(ImportResult::TipUnchanged)
}
Ok(ImportResult::TipChanged(header, tip, height, reverted, connected)) => {
// Update peer height.
if let Some(peer) = self.peers.get_mut(from) {
if height > peer.height {
peer.tip = tip;
peer.height = height;
}
}
// Keep track of when we last updated our tip. This is useful to check
// whether our tip is stale.
self.last_tip_update = Some(clock.local_time());
// If we received less than the maximum number of headers, we must be in sync.
// Otherwise, ask for the next batch of headers.
if length < MAX_MESSAGE_HEADERS {
// If these headers were unsolicited, we may already be ready/synced.
// Otherwise, we're finally in sync.
self.broadcast_tip(&tip, tree);
self.sync(tree);
} else {
let locators = (vec![tip], BlockHash::all_zeros());
let timeout = self.config.request_timeout;
self.request(*from, locators, timeout, OnTimeout::Disconnect);
}
Ok(ImportResult::TipChanged(
header, tip, height, reverted, connected,
))
}
Err(err) => self
.handle_error(from, err)
.map(|()| ImportResult::TipUnchanged),
}
}
fn request(
&mut self,
addr: PeerId,
locators: Locators,
timeout: LocalDuration,
on_timeout: OnTimeout,
) {
// Don't request more than once from the same peer.
if self.inflight.contains_key(&addr) {
return;
}
if let Some(peer) = self.peers.get_mut(&addr) {
debug_assert!(peer.last_asked.as_ref() != Some(&locators));
peer.last_asked = Some(locators.clone());
let sent_at = self.clock.local_time();
let req = GetHeaders {
locators,
sent_at,
on_timeout,
};
self.inflight.insert(addr, req.clone());
self.upstream.get_headers(addr, req.locators);
self.upstream.set_timer(timeout);
}
}
/// Called when we received an `inv` message. This will happen if we are out of sync with a
/// peer, and blocks are being announced. Otherwise, we expect to receive a `headers` message.
pub fn received_inv<T: BlockReader>(&mut self, addr: PeerId, inv: Vec<Inventory>, tree: &T) {
// Don't try to fetch headers from `inv` message while syncing. It's not helpful.
if self.is_syncing() {
return;
}
// Ignore and disconnect peers misbehaving.
if inv.len() > MAX_MESSAGE_INVS {
return;
}
let peer = if let Some(peer) = self.peers.get_mut(&addr) {
peer
} else {
return;
};
let mut best_block = None;
for i in &inv {
if let Inventory::Block(hash) = i {
peer.tip = *hash;
// "Headers-first is the primary method of announcement on the network. If a node
// fell back to sending blocks by inv, it's probably for a re-org. The final block
// hash provided should be the highest."
if !tree.is_known(hash) {
self.upstream.event(Event::BlockDiscovered(addr, *hash));
best_block = Some(hash);
}
}
}
if let Some(stop_hash) = best_block {
let locators = (tree.locator_hashes(tree.height()), *stop_hash);
let timeout = self.config.request_timeout;
// Try to find headers leading up to the `inv` entry.
self.request(addr, locators, timeout, OnTimeout::Retry(3));
}
}
/// Called when we received a tick.
pub fn received_wake<T: BlockReader>(&mut self, tree: &T) {
let local_time = self.clock.local_time();
let timeout = self.config.request_timeout;
let timed_out = self
.inflight
.iter()
.filter_map(|(peer, req)| {
if local_time - req.sent_at >= timeout {
Some((*peer, req.on_timeout, req.clone()))
} else {
None
}
})
.collect::<Vec<_>>();
let mut sync = false;
for (peer, on_timeout, req) in timed_out {
self.inflight.remove(&peer);
match on_timeout {
OnTimeout::Ignore => {
// It's likely that the peer just didn't have the requested header.
}
OnTimeout::Retry(0) | OnTimeout::Disconnect => {
self.upstream
.disconnect(peer, DisconnectReason::PeerTimeout("getheaders"));
sync = true;
}
OnTimeout::Retry(n) => {
if let Some((addr, _)) = self.peers.sample_with(|a, p| {
*a != peer && self.is_request_candidate(a, p, &req.locators.0)
}) {
let addr = *addr;
self.request(addr, req.locators, timeout, OnTimeout::Retry(n - 1));
}
}
}
}
// If some of the requests timed out, force a sync, otherwise just idle.
if sync {
self.sync(tree);
} else {
self.idle(tree);
}
}
/// Get the best known height out of all our peers.
pub fn best_height(&self) -> Option<Height> {
self.peers.iter().map(|(_, p)| p.height).max()
}
/// Are we currently syncing?
pub fn is_syncing(&self) -> bool {
!self.inflight.is_empty()
}
///////////////////////////////////////////////////////////////////////////
fn handle_error(&mut self, from: &PeerId, err: Error) -> Result<(), store::Error> {
match err {
// If this is an error with the underlying store, we have to propagate
// this up, because we can't handle it here.
Error::Store(e) => Err(e),
// If we got a bad block from the peer, we can handle it here.
Error::InvalidBlockPoW
| Error::InvalidBlockTarget(_, _)
| Error::InvalidBlockHash(_, _)
| Error::InvalidBlockHeight(_)
| Error::InvalidBlockTime(_, _) => {
log::debug!("{}: Received invalid headers: {}", from, err);
self.record_misbehavior(from);
self.upstream
.disconnect(*from, DisconnectReason::PeerMisbehaving("invalid headers"));
Ok(())
}
// Harmless errors can be ignored.
Error::DuplicateBlock(_) | Error::BlockMissing(_) => Ok(()),
// TODO: This will be removed.
Error::BlockImportAborted(_, _, _) => Ok(()),
// These shouldn't happen here.
// TODO: Perhaps there's a better way to have this error not show up here.
Error::Interrupted | Error::GenesisMismatch => Ok(()),
}
}
fn record_misbehavior(&mut self, peer: &PeerId) {
self.upstream.event(Event::PeerMisbehaved(*peer));
}
/// Check whether our current tip is stale.
///
/// *Nb. This doesn't check whether we've already requested new blocks.*
fn stale_tip<T: BlockReader>(&self, tree: &T) -> Option<LocalTime> {
let now = self.clock.local_time();
if let Some(last_update) = self.last_tip_update {
if last_update
< now - LocalDuration::from_secs(self.config.params.pow_target_spacing * 3)
{
return Some(last_update);
}
}
// If we don't have the time of the last update, it's probably because we
// are fresh, or restarted our node. In that case we check the last block time
// instead.
let (_, tip) = tree.tip();
let time = LocalTime::from_block_time(tip.time);
if time <= now - TIP_STALE_DURATION {
return Some(time);
}
None
}
/// Register a new peer.
fn register(&mut self, socket: Socket, height: Height, preferred: bool, link: Link) {
let last_active = None;
let last_asked = None;
let tip = BlockHash::all_zeros();
self.peers.insert(
socket.addr,
Peer {
height,
tip,
link,
preferred,
last_active,
last_asked,
_socket: socket,
},
);
}
/// Unregister a peer.
fn unregister(&mut self, id: &PeerId) {
self.inflight.remove(id);
self.peers.remove(id);
}
/// Select a random preferred peer.
fn preferred_peer<T: BlockReader>(&self, locators: &Locators, tree: &T) -> Option<PeerId> {
let peers: Vec<_> = self.peers.shuffled().collect();
let height = tree.height();
let locators = &locators.0;
peers
.iter()
.find(|(a, p)| {
p.preferred && p.height > height && self.is_request_candidate(a, p, locators)
})
.or_else(|| {
peers
.iter()
.find(|(a, p)| p.preferred && self.is_request_candidate(a, p, locators))
})
.or_else(|| {
peers
.iter()
.find(|(a, p)| self.is_request_candidate(a, p, locators))
})
.map(|(a, _)| **a)
}
/// Check whether a peer is a good request candidate for the given locators.
/// This function ensures that we don't ask the same peer twice for the same locators.
fn is_request_candidate(&self, addr: &PeerId, peer: &Peer, locators: &[BlockHash]) -> bool {
!self.inflight.contains_key(addr)
&& peer.link.is_outbound()
&& peer.last_asked.as_ref().map_or(true, |l| l.0 != locators)
}
/// Check whether or not we are in sync with the network.
fn is_synced<T: BlockReader>(&self, tree: &T) -> bool {
if let Some(last_update) = self.stale_tip(tree) {
self.upstream.event(Event::StaleTip(last_update));
return false;
}
let height = tree.height();
// Find the peer with the longest chain and compare our height to it.
if let Some(peer_height) = self.best_height() {
return height >= peer_height;
}
// Assume we're out of sync.
false
}
/// Check if we're currently syncing with these locators.
fn syncing(&self, locators: &Locators) -> bool {
self.inflight.values().any(|r| &r.locators == locators)
}
/// Start syncing if we're out of sync.
/// Returns `true` if we started syncing, and `false` if we were up to date or not able to
/// sync.
fn sync<T: BlockReader>(&mut self, tree: &T) -> bool {
if self.peers.is_empty() {
return false;
}
if self.is_synced(tree) {
let (tip, _) = tree.tip();
let height = tree.height();
// TODO: This event can fire multiple times if `sync` is called while we're already
// in sync.
self.upstream.event(Event::Synced(tip, height));
return false;
}
// ... It looks like we're out of sync ...
let locators = (tree.locator_hashes(tree.height()), BlockHash::all_zeros());
// If we're already fetching these headers, just wait.
if self.syncing(&locators) {
return false;
}
if let Some(addr) = self.preferred_peer(&locators, tree) {
let timeout = self.config.request_timeout;
let current = tree.height();
let best = self.best_height().unwrap_or(current);
if best > current {
self.request(addr, locators, timeout, OnTimeout::Ignore);
self.upstream.event(Event::Syncing { current, best });
return true;
}
}
// TODO: No peer found to sync.. emit event.
false
}
/// Broadcast our best block header to connected peers who don't have it.
fn broadcast_tip<T: BlockReader>(&mut self, hash: &BlockHash, tree: &T) {
if let Some((height, best)) = tree.get_block(hash) {
for (addr, peer) in &*self.peers {
// TODO: Don't broadcast to peer that is currently syncing?
if peer.link == Link::Inbound && height > peer.height {
self.upstream.headers(*addr, vec![*best]);
}
}
}
}
/// Ask all our outbound peers whether they have better block headers.
fn sample_peers<T: BlockReader>(&mut self, tree: &T) {
let now = self.clock.local_time();
if now - self.last_peer_sample.unwrap_or_default() < PEER_SAMPLE_INTERVAL {
return;
}
if self.stale_tip(tree).is_none() {
return;
}
self.last_peer_sample = Some(now);
// If we think we're in sync and we haven't asked other peers in a while, then
// sample their headers just to make sure we're on the right chain.
let locators = tree.locator_hashes(tree.height());
let addrs = self
.peers
.iter()
.filter(|(a, p)| self.is_request_candidate(a, p, &locators))
.map(|(a, _)| *a)
.collect::<Vec<_>>();
for addr in addrs {
self.request(
addr,
(locators.clone(), BlockHash::all_zeros()),
self.config.request_timeout,
OnTimeout::Ignore,
);
}
}
}
| {
if link.is_outbound() && !services.has(REQUIRED_SERVICES) {
return;
}
if height > self.best_height().unwrap_or_else(|| tree.height()) {
self.upstream.event(Event::PeerHeightUpdated { height });
}
self.register(socket, height, preferred, link);
self.sync(tree);
} | identifier_body |
syncmgr.rs | //!
//! Manages header synchronization with peers.
//!
use nakamoto_common::bitcoin::consensus::params::Params;
use nakamoto_common::bitcoin::network::constants::ServiceFlags;
use nakamoto_common::bitcoin::network::message_blockdata::Inventory;
use nakamoto_common::bitcoin_hashes::Hash;
use nakamoto_common::block::store;
use nakamoto_common::block::time::{Clock, LocalDuration, LocalTime};
use nakamoto_common::block::tree::{BlockReader, BlockTree, Error, ImportResult};
use nakamoto_common::block::{BlockHash, BlockHeader, Height};
use nakamoto_common::collections::{AddressBook, HashMap};
use nakamoto_common::nonempty::NonEmpty;
use super::output::{Disconnect, SetTimer, Wire};
use super::{DisconnectReason, Link, Locators, PeerId, Socket};
/// How long to wait for a request, eg. `getheaders` to be fulfilled.
pub const REQUEST_TIMEOUT: LocalDuration = LocalDuration::from_secs(30);
/// How long before the tip of the chain is considered stale. This takes into account
/// that the block timestamp may have been set sometime in the future.
pub const TIP_STALE_DURATION: LocalDuration = LocalDuration::from_mins(60 * 2);
/// Maximum number of headers sent in a `headers` message.
pub const MAX_MESSAGE_HEADERS: usize = 2000;
/// Maximum number of inventories sent in an `inv` message.
pub const MAX_MESSAGE_INVS: usize = 50000;
/// Idle timeout.
pub const IDLE_TIMEOUT: LocalDuration = LocalDuration::BLOCK_INTERVAL;
/// Services required from peers for header sync.
pub const REQUIRED_SERVICES: ServiceFlags = ServiceFlags::NETWORK;
/// Maximum headers announced in a `headers` message, when unsolicited.
const MAX_UNSOLICITED_HEADERS: usize = 24;
/// How long to wait between checks for longer chains from peers.
const PEER_SAMPLE_INTERVAL: LocalDuration = LocalDuration::from_mins(60);
/// What to do if a timeout for a peer is received.
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
enum OnTimeout {
/// Disconnect peer on timeout.
Disconnect,
/// Do nothing on timeout.
Ignore,
/// Retry with a different peer on timeout.
Retry(usize),
}
/// State of a sync peer.
#[derive(Debug)]
struct Peer {
height: Height,
preferred: bool,
tip: BlockHash,
link: Link,
last_active: Option<LocalTime>,
last_asked: Option<Locators>,
_socket: Socket,
}
/// Sync manager configuration.
#[derive(Debug)]
pub struct Config {
/// Maximum number of messages in a `headers` message.
pub max_message_headers: usize,
/// How long to wait for a response from a peer.
pub request_timeout: LocalDuration,
/// Consensus parameters.
pub params: Params,
}
/// The sync manager state.
#[derive(Debug)]
pub struct SyncManager<U, C> {
/// Sync manager configuration.
pub config: Config,
/// Sync-specific peer state.
peers: AddressBook<PeerId, Peer>,
/// Last time our tip was updated.
last_tip_update: Option<LocalTime>,
/// Last time we sampled our peers for their active chain.
last_peer_sample: Option<LocalTime>,
/// Last time we idled.
last_idle: Option<LocalTime>,
/// In-flight requests to peers.
inflight: HashMap<PeerId, GetHeaders>,
/// Upstream protocol channel.
upstream: U,
/// Clock.
clock: C,
}
/// An event emitted by the sync manager.
#[derive(Debug, Clone)]
pub enum Event {
/// A block was added to the main chain.
BlockConnected {
/// Block height.
height: Height,
/// Block header.
header: BlockHeader,
},
/// A block was removed from the main chain.
BlockDisconnected {
/// Block height.
height: Height,
/// Block header.
header: BlockHeader,
},
/// A new block was discovered via a peer.
BlockDiscovered(PeerId, BlockHash),
/// Syncing headers.
Syncing {
/// Current block header height.
current: Height,
/// Best known block header height.
best: Height,
},
/// Synced up to the specified hash and height.
Synced(BlockHash, Height),
/// Potential stale tip detected on the active chain.
StaleTip(LocalTime),
/// Peer misbehaved.
PeerMisbehaved(PeerId),
/// Peer height updated.
PeerHeightUpdated {
/// Best height known.
height: Height,
},
}
impl std::fmt::Display for Event {
fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Event::PeerMisbehaved(addr) => {
write!(fmt, "{}: Peer misbehaved", addr)
}
Event::PeerHeightUpdated { height } => {
write!(fmt, "Peer height updated to {}", height)
}
Event::Synced(hash, height) => {
write!(
fmt,
"Headers synced up to height {} with hash {}",
height, hash
)
}
Event::Syncing { current, best } => write!(fmt, "Syncing headers {}/{}", current, best),
Event::BlockConnected { height, header } => {
write!(
fmt,
"Block {} connected at height {}",
header.block_hash(),
height
)
}
Event::BlockDisconnected { height, header } => {
write!(
fmt,
"Block {} disconnected at height {}",
header.block_hash(),
height
)
}
Event::BlockDiscovered(from, hash) => {
write!(fmt, "{}: Discovered new block: {}", from, &hash)
}
Event::StaleTip(last_update) => {
write!(
fmt,
"Potential stale tip detected (last update was {})",
last_update
)
}
}
}
}
/// A `getheaders` request sent to a peer.
#[derive(Clone, Debug, PartialEq, Eq)]
struct GetHeaders {
/// Locators hashes.
locators: Locators,
/// Time at which the request was sent.
sent_at: LocalTime,
/// What to do if this request times out.
on_timeout: OnTimeout,
}
impl<U: SetTimer + Disconnect + Wire<Event>, C: Clock> SyncManager<U, C> {
/// Create a new sync manager.
pub fn new(config: Config, rng: fastrand::Rng, upstream: U, clock: C) -> Self {
let peers = AddressBook::new(rng.clone());
let last_tip_update = None;
let last_peer_sample = None;
let last_idle = None;
let inflight = HashMap::with_hasher(rng.into());
Self {
peers,
config,
last_tip_update,
last_peer_sample,
last_idle,
inflight,
upstream,
clock,
}
}
/// Initialize the sync manager. Should only be called once.
pub fn initialize<T: BlockReader>(&mut self, tree: &T) {
// TODO: `tip` should return the height.
let (hash, _) = tree.tip();
let height = tree.height();
self.idle(tree);
self.upstream.event(Event::Synced(hash, height));
}
/// Called periodically.
pub fn idle<T: BlockReader>(&mut self, tree: &T) {
let now = self.clock.local_time();
// Nb. The idle timeout is very long: as long as the block interval.
// This shouldn't be a problem, as the sync manager can make progress without it.
if now - self.last_idle.unwrap_or_default() >= IDLE_TIMEOUT {
if !self.sync(tree) {
self.sample_peers(tree);
}
self.last_idle = Some(now);
self.upstream.set_timer(IDLE_TIMEOUT);
}
}
/// Called when a new peer was negotiated.
pub fn peer_negotiated<T: BlockReader>(
&mut self,
socket: Socket,
height: Height,
services: ServiceFlags,
preferred: bool,
link: Link,
tree: &T,
) {
if link.is_outbound() && !services.has(REQUIRED_SERVICES) {
return;
}
if height > self.best_height().unwrap_or_else(|| tree.height()) {
self.upstream.event(Event::PeerHeightUpdated { height });
}
self.register(socket, height, preferred, link);
self.sync(tree);
}
/// Called when a peer disconnected.
pub fn peer_disconnected(&mut self, id: &PeerId) {
self.unregister(id);
}
/// Called when we received a `getheaders` message from a peer.
pub fn received_getheaders<T: BlockReader>(
&mut self,
addr: &PeerId,
(locator_hashes, stop_hash): Locators,
tree: &T,
) {
let max = self.config.max_message_headers;
if self.is_syncing() || max == 0 {
return;
}
let headers = tree.locate_headers(&locator_hashes, stop_hash, max);
if headers.is_empty() {
return;
}
self.upstream.headers(*addr, headers);
}
/// Import blocks into our block tree.
pub fn import_blocks<T: BlockTree, I: Iterator<Item = BlockHeader>>(
&mut self,
blocks: I,
tree: &mut T,
) -> Result<ImportResult, Error> {
match tree.import_blocks(blocks, &self.clock) {
Ok(ImportResult::TipChanged(header, tip, height, reverted, connected)) => {
let result = ImportResult::TipChanged(
header,
tip,
height,
reverted.clone(),
connected.clone(),
);
for (height, header) in reverted {
self.upstream
.event(Event::BlockDisconnected { height, header });
}
for (height, header) in connected {
self.upstream
.event(Event::BlockConnected { height, header });
}
self.upstream.event(Event::Synced(tip, height));
self.broadcast_tip(&tip, tree);
Ok(result)
}
Ok(result @ ImportResult::TipUnchanged) => Ok(result),
Err(err) => Err(err),
}
}
/// Called when we receive headers from a peer.
pub fn received_headers<T: BlockTree>(
&mut self,
from: &PeerId,
headers: Vec<BlockHeader>,
clock: &impl Clock,
tree: &mut T,
) -> Result<ImportResult, store::Error> {
let request = self.inflight.remove(from);
let headers = if let Some(headers) = NonEmpty::from_vec(headers) {
headers
} else {
return Ok(ImportResult::TipUnchanged);
};
let length = headers.len();
if length > MAX_MESSAGE_HEADERS {
log::debug!("Received more than maximum headers allowed from {}", from);
self.record_misbehavior(from);
self.upstream
.disconnect(*from, DisconnectReason::PeerMisbehaving("too many headers"));
return Ok(ImportResult::TipUnchanged);
}
// When unsolicited, we don't want to process too many headers in case of a DoS.
if length > MAX_UNSOLICITED_HEADERS && request.is_none() {
log::debug!("Received {} unsolicited headers from {}", length, from);
return Ok(ImportResult::TipUnchanged);
}
if let Some(peer) = self.peers.get_mut(from) {
peer.last_active = Some(clock.local_time());
} else {
return Ok(ImportResult::TipUnchanged);
}
log::debug!("[sync] Received {} block header(s) from {}", length, from);
let root = headers.first().block_hash();
let best = headers.last().block_hash();
if tree.contains(&best) {
return Ok(ImportResult::TipUnchanged);
}
match self.import_blocks(headers.into_iter(), tree) {
Ok(ImportResult::TipUnchanged) => {
// Try to find a common ancestor that leads up to the first header in
// the list we received.
let locators = (tree.locator_hashes(tree.height()), root);
let timeout = self.config.request_timeout;
self.request(*from, locators, timeout, OnTimeout::Ignore);
Ok(ImportResult::TipUnchanged)
}
Ok(ImportResult::TipChanged(header, tip, height, reverted, connected)) => {
// Update peer height.
if let Some(peer) = self.peers.get_mut(from) {
if height > peer.height {
peer.tip = tip;
peer.height = height;
}
}
// Keep track of when we last updated our tip. This is useful to check
// whether our tip is stale.
self.last_tip_update = Some(clock.local_time());
// If we received less than the maximum number of headers, we must be in sync.
// Otherwise, ask for the next batch of headers.
if length < MAX_MESSAGE_HEADERS {
// If these headers were unsolicited, we may already be ready/synced.
// Otherwise, we're finally in sync.
self.broadcast_tip(&tip, tree);
self.sync(tree);
} else {
let locators = (vec![tip], BlockHash::all_zeros());
let timeout = self.config.request_timeout;
self.request(*from, locators, timeout, OnTimeout::Disconnect);
}
Ok(ImportResult::TipChanged(
header, tip, height, reverted, connected,
))
}
Err(err) => self
.handle_error(from, err)
.map(|()| ImportResult::TipUnchanged),
}
}
fn request(
&mut self,
addr: PeerId,
locators: Locators,
timeout: LocalDuration,
on_timeout: OnTimeout,
) {
// Don't request more than once from the same peer.
if self.inflight.contains_key(&addr) {
return;
}
if let Some(peer) = self.peers.get_mut(&addr) {
debug_assert!(peer.last_asked.as_ref() != Some(&locators));
peer.last_asked = Some(locators.clone());
let sent_at = self.clock.local_time();
let req = GetHeaders {
locators,
sent_at,
on_timeout,
};
self.inflight.insert(addr, req.clone());
self.upstream.get_headers(addr, req.locators);
self.upstream.set_timer(timeout);
}
}
/// Called when we received an `inv` message. This will happen if we are out of sync with a
/// peer, and blocks are being announced. Otherwise, we expect to receive a `headers` message.
pub fn received_inv<T: BlockReader>(&mut self, addr: PeerId, inv: Vec<Inventory>, tree: &T) {
// Don't try to fetch headers from `inv` message while syncing. It's not helpful.
if self.is_syncing() {
return;
}
// Ignore and disconnect peers misbehaving.
if inv.len() > MAX_MESSAGE_INVS {
return;
}
let peer = if let Some(peer) = self.peers.get_mut(&addr) {
peer
} else {
return;
};
let mut best_block = None;
for i in &inv {
if let Inventory::Block(hash) = i {
peer.tip = *hash;
// "Headers-first is the primary method of announcement on the network. If a node
// fell back to sending blocks by inv, it's probably for a re-org. The final block
// hash provided should be the highest."
if !tree.is_known(hash) {
self.upstream.event(Event::BlockDiscovered(addr, *hash));
best_block = Some(hash);
}
}
}
if let Some(stop_hash) = best_block {
let locators = (tree.locator_hashes(tree.height()), *stop_hash);
let timeout = self.config.request_timeout;
// Try to find headers leading up to the `inv` entry.
self.request(addr, locators, timeout, OnTimeout::Retry(3));
}
}
/// Called when we received a tick.
pub fn received_wake<T: BlockReader>(&mut self, tree: &T) {
let local_time = self.clock.local_time();
let timeout = self.config.request_timeout;
let timed_out = self
.inflight
.iter()
.filter_map(|(peer, req)| {
if local_time - req.sent_at >= timeout {
Some((*peer, req.on_timeout, req.clone()))
} else {
None
}
})
.collect::<Vec<_>>();
let mut sync = false;
for (peer, on_timeout, req) in timed_out {
self.inflight.remove(&peer);
match on_timeout {
OnTimeout::Ignore => {
// It's likely that the peer just didn't have the requested header.
}
OnTimeout::Retry(0) | OnTimeout::Disconnect => {
self.upstream
.disconnect(peer, DisconnectReason::PeerTimeout("getheaders"));
sync = true;
}
OnTimeout::Retry(n) => {
if let Some((addr, _)) = self.peers.sample_with(|a, p| {
*a != peer && self.is_request_candidate(a, p, &req.locators.0)
}) {
let addr = *addr;
self.request(addr, req.locators, timeout, OnTimeout::Retry(n - 1));
}
}
}
}
// If some of the requests timed out, force a sync, otherwise just idle.
if sync {
self.sync(tree);
} else {
self.idle(tree);
}
}
/// Get the best known height out of all our peers.
pub fn best_height(&self) -> Option<Height> {
self.peers.iter().map(|(_, p)| p.height).max()
}
/// Are we currently syncing?
pub fn is_syncing(&self) -> bool {
!self.inflight.is_empty()
}
///////////////////////////////////////////////////////////////////////////
fn handle_error(&mut self, from: &PeerId, err: Error) -> Result<(), store::Error> {
match err {
// If this is an error with the underlying store, we have to propagate
// this up, because we can't handle it here.
Error::Store(e) => Err(e),
// If we got a bad block from the peer, we can handle it here.
Error::InvalidBlockPoW
| Error::InvalidBlockTarget(_, _)
| Error::InvalidBlockHash(_, _)
| Error::InvalidBlockHeight(_)
| Error::InvalidBlockTime(_, _) => {
log::debug!("{}: Received invalid headers: {}", from, err);
self.record_misbehavior(from);
self.upstream
.disconnect(*from, DisconnectReason::PeerMisbehaving("invalid headers"));
Ok(())
}
// Harmless errors can be ignored.
Error::DuplicateBlock(_) | Error::BlockMissing(_) => Ok(()),
// TODO: This will be removed.
Error::BlockImportAborted(_, _, _) => Ok(()),
// These shouldn't happen here.
// TODO: Perhaps there's a better way to have this error not show up here.
Error::Interrupted | Error::GenesisMismatch => Ok(()),
}
}
fn record_misbehavior(&mut self, peer: &PeerId) {
self.upstream.event(Event::PeerMisbehaved(*peer));
}
/// Check whether our current tip is stale.
///
/// *Nb. This doesn't check whether we've already requested new blocks.*
fn stale_tip<T: BlockReader>(&self, tree: &T) -> Option<LocalTime> {
let now = self.clock.local_time();
if let Some(last_update) = self.last_tip_update {
if last_update
< now - LocalDuration::from_secs(self.config.params.pow_target_spacing * 3)
{
return Some(last_update);
}
}
// If we don't have the time of the last update, it's probably because we
// are fresh, or restarted our node. In that case we check the last block time
// instead.
let (_, tip) = tree.tip();
let time = LocalTime::from_block_time(tip.time);
if time <= now - TIP_STALE_DURATION |
None
}
/// Register a new peer.
fn register(&mut self, socket: Socket, height: Height, preferred: bool, link: Link) {
let last_active = None;
let last_asked = None;
let tip = BlockHash::all_zeros();
self.peers.insert(
socket.addr,
Peer {
height,
tip,
link,
preferred,
last_active,
last_asked,
_socket: socket,
},
);
}
/// Unregister a peer.
fn unregister(&mut self, id: &PeerId) {
self.inflight.remove(id);
self.peers.remove(id);
}
/// Select a random preferred peer.
fn preferred_peer<T: BlockReader>(&self, locators: &Locators, tree: &T) -> Option<PeerId> {
let peers: Vec<_> = self.peers.shuffled().collect();
let height = tree.height();
let locators = &locators.0;
peers
.iter()
.find(|(a, p)| {
p.preferred && p.height > height && self.is_request_candidate(a, p, locators)
})
.or_else(|| {
peers
.iter()
.find(|(a, p)| p.preferred && self.is_request_candidate(a, p, locators))
})
.or_else(|| {
peers
.iter()
.find(|(a, p)| self.is_request_candidate(a, p, locators))
})
.map(|(a, _)| **a)
}
/// Check whether a peer is a good request candidate for the given locators.
/// This function ensures that we don't ask the same peer twice for the same locators.
fn is_request_candidate(&self, addr: &PeerId, peer: &Peer, locators: &[BlockHash]) -> bool {
!self.inflight.contains_key(addr)
&& peer.link.is_outbound()
&& peer.last_asked.as_ref().map_or(true, |l| l.0 != locators)
}
/// Check whether or not we are in sync with the network.
fn is_synced<T: BlockReader>(&self, tree: &T) -> bool {
if let Some(last_update) = self.stale_tip(tree) {
self.upstream.event(Event::StaleTip(last_update));
return false;
}
let height = tree.height();
// Find the peer with the longest chain and compare our height to it.
if let Some(peer_height) = self.best_height() {
return height >= peer_height;
}
// Assume we're out of sync.
false
}
/// Check if we're currently syncing with these locators.
fn syncing(&self, locators: &Locators) -> bool {
self.inflight.values().any(|r| &r.locators == locators)
}
/// Start syncing if we're out of sync.
/// Returns `true` if we started syncing, and `false` if we were up to date or not able to
/// sync.
fn sync<T: BlockReader>(&mut self, tree: &T) -> bool {
if self.peers.is_empty() {
return false;
}
if self.is_synced(tree) {
let (tip, _) = tree.tip();
let height = tree.height();
// TODO: This event can fire multiple times if `sync` is called while we're already
// in sync.
self.upstream.event(Event::Synced(tip, height));
return false;
}
// ... It looks like we're out of sync ...
let locators = (tree.locator_hashes(tree.height()), BlockHash::all_zeros());
// If we're already fetching these headers, just wait.
if self.syncing(&locators) {
return false;
}
if let Some(addr) = self.preferred_peer(&locators, tree) {
let timeout = self.config.request_timeout;
let current = tree.height();
let best = self.best_height().unwrap_or(current);
if best > current {
self.request(addr, locators, timeout, OnTimeout::Ignore);
self.upstream.event(Event::Syncing { current, best });
return true;
}
}
// TODO: No peer found to sync.. emit event.
false
}
/// Broadcast our best block header to connected peers who don't have it.
fn broadcast_tip<T: BlockReader>(&mut self, hash: &BlockHash, tree: &T) {
if let Some((height, best)) = tree.get_block(hash) {
for (addr, peer) in &*self.peers {
// TODO: Don't broadcast to peer that is currently syncing?
if peer.link == Link::Inbound && height > peer.height {
self.upstream.headers(*addr, vec![*best]);
}
}
}
}
/// Ask all our outbound peers whether they have better block headers.
fn sample_peers<T: BlockReader>(&mut self, tree: &T) {
let now = self.clock.local_time();
if now - self.last_peer_sample.unwrap_or_default() < PEER_SAMPLE_INTERVAL {
return;
}
if self.stale_tip(tree).is_none() {
return;
}
self.last_peer_sample = Some(now);
// If we think we're in sync and we haven't asked other peers in a while, then
// sample their headers just to make sure we're on the right chain.
let locators = tree.locator_hashes(tree.height());
let addrs = self
.peers
.iter()
.filter(|(a, p)| self.is_request_candidate(a, p, &locators))
.map(|(a, _)| *a)
.collect::<Vec<_>>();
for addr in addrs {
self.request(
addr,
(locators.clone(), BlockHash::all_zeros()),
self.config.request_timeout,
OnTimeout::Ignore,
);
}
}
}
| {
return Some(time);
} | conditional_block |
dtm.py | #!/usr/bin/env python3
import sys, resource, os, shutil, re, string, gc, subprocess
import django
import nltk
from multiprocess import Pool
from nltk.stem import SnowballStemmer
from nltk import word_tokenize
from time import time, sleep
from functools import partial
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from scipy.sparse import csr_matrix, find
import numpy as np
from django.utils import timezone
from django.core import management
# Import django stuff
sys.path.append('/home/galm/software/django/tmv/BasicBrowser')
# sys.path.append('/home/max/Desktop/django/BasicBrowser/')
import db as db
from tmv_app.models import *
from scoping.models import Doc, Query
from django.db import connection, transaction
cursor = connection.cursor()
def f_gamma2(docs,gamma,docsizes,docUTset,topic_ids):
vl = []
for d in docs:
if gamma[2][d] > 0.001:
dt = (
docUTset[gamma[0][d]],
topic_ids[gamma[1][d]],
gamma[2][d],
gamma[2][d] / docsizes[gamma[0][d]],
run_id
)
vl.append(dt)
return vl
def tokenize(text):
transtable = {ord(c): None for c in string.punctuation + string.digits}
tokens = nltk.word_tokenize(text.translate(transtable))
tokens = [i for i in tokens if len(i) > 2]
return tokens
def add_features(title):
django.db.connections.close_all()
term, created = Term.objects.get_or_create(title=title)
term.run_id.add(run_id)
django.db.connections.close_all()
return term.pk
class snowball_stemmer(object):
def __init__(self):
self.stemmer = SnowballStemmer("english")
def __call__(self, doc):
return [self.stemmer.stem(t) for t in tokenize(doc)]
def proc_docs(docs):
stoplist = set(nltk.corpus.stopwords.words("english"))
stoplist.add('elsevier')
stoplist.add('rights')
stoplist.add('reserved')
stoplist.add('john')
stoplist.add('wiley')
stoplist.add('sons')
stoplist.add('copyright')
abstracts = [re.split("\([C-c]\) [1-2][0-9]{3} Elsevier",x.content)[0] for x in docs.iterator()]
abstracts = [x.split("Published by Elsevier")[0] for x in abstracts]
abstracts = [x.split("Copyright (C)")[0] for x in abstracts]
abstracts = [re.split("\. \(C\) [1-2][0-9]{3} ",x)[0] for x in abstracts]
docsizes = [len(x) for x in abstracts]
ids = [x.UT for x in docs.iterator()]
PYs = [x.PY for x in docs.iterator()]
return [abstracts, docsizes, ids, stoplist, PYs]
def readInfo(p):
d = {}
with open(p) as f:
for line in f:
(key, val) = line.strip().split(' ',1)
try:
d[key] = int(val)
except:
d[key] = val
return(d)
def | (topic_n,info,topic_ids,vocab_ids,ys):
print(topic_n)
django.db.connections.close_all()
p = "%03d" % (topic_n,)
p = "dtm-output/lda-seq/topic-"+p+"-var-e-log-prob.dat"
tlambda = np.fromfile(p, sep=" ").reshape((info['NUM_TERMS'],info['SEQ_LENGTH']))
for t in range(len(tlambda)):
for py in range(len(tlambda[t])):
score = np.exp(tlambda[t][py])
if score > 0.001:
tt = TopicTerm(
topic_id = topic_ids[topic_n],
term_id = vocab_ids[t],
PY = ys[py],
score = score,
run_id=run_id
)
tt.save()
#db.add_topic_term(topic_n+info['first_topic'], t+info['first_word'], py, score)
django.db.connections.close_all()
#########################################################
## Main function
def main():
try:
qid = int(sys.argv[1])
except:
print("please provide a query ID!")
sys.exit()
#sleep(7200)
Ks = [100,150,200,250]
#Ks = [10,20]
for K in Ks:
#K = 80
n_features=20000
global run_id
run_id = db.init(n_features,1)
stat = RunStats.objects.get(pk=run_id)
stat.method='BD'
stat.save()
stat.query=Query.objects.get(pk=qid)
##########################
## create input folder
if (os.path.isdir('dtm-input')):
shutil.rmtree('dtm-input')
os.mkdir('dtm-input')
yrange = list(range(1990,2017))
#yrange = list(range(2010,2012))
#yrange = list(range(1990,1997))
docs = Doc.objects.filter(
query=Query.objects.get(pk=qid),
content__iregex='\w',
relevant=True,
PY__in=yrange
).order_by('PY')
abstracts, docsizes, ids, stoplist, PYs = proc_docs(docs)
#########################
## Get the features now
print("Extracting word features...")
vectorizer = CountVectorizer(max_df=0.95, min_df=10,
max_features=n_features,
ngram_range=(1,1),
tokenizer=snowball_stemmer(),
stop_words=stoplist)
t0 = time()
dtm = vectorizer.fit_transform(abstracts)
print("done in %0.3fs." % (time() - t0))
del abstracts
gc.collect()
# Get the vocab, add it to db
vocab = vectorizer.get_feature_names()
vocab_ids = []
pool = Pool(processes=8)
vocab_ids.append(pool.map(add_features,vocab))
pool.terminate()
del vocab
vocab_ids = vocab_ids[0]
django.db.connections.close_all()
with open('dtm-input/foo-mult.dat','w') as mult:
for d in range(dtm.shape[0]):
words = find(dtm[d])
uwords = len(words[0])
mult.write(str(uwords) + " ")
for w in range(uwords):
index = words[1][w]
count = words[2][w]
mult.write(str(index)+":"+str(count)+" ")
mult.write('\n')
##########################
##put PY stuff in the seq file
ycounts = docs.values('PY').annotate(
count = models.Count('pk')
)
with open('dtm-input/foo-seq.dat','w') as seq:
seq.write(str(len(yrange)))
for y in ycounts:
seq.write('\n')
seq.write(str(y['count']))
##########################
# Run the dtm
subprocess.Popen([
"/home/galm/software/dtm/dtm/main",
"--ntopics={}".format(K),
"--mode=fit",
"--rng_seed=0",
"--initialize_lda=true",
"--corpus_prefix=/home/galm/projects/sustainability/dtm-input/foo",
"--outname=/home/galm/projects/sustainability/dtm-output",
"--top_chain_var=0.005",
"--alpha=0.01",
"--lda_sequence_min_iter=10",
"--lda_sequence_max_iter=20",
"--lda_max_em_iter=20"
]).wait()
##########################
## Upload the dtm results to the db
info = readInfo("dtm-output/lda-seq/info.dat")
topic_ids = db.add_topics(K)
#################################
# TopicTerms
topics = range(info['NUM_TOPICS'])
pool = Pool(processes=8)
pool.map(partial(
dtm_topic,
info=info,
topic_ids=topic_ids,
vocab_ids=vocab_ids,
ys = yrange
),topics)
pool.terminate()
gc.collect()
######################################
# Doctopics
gamma = np.fromfile('dtm-output/lda-seq/gam.dat', dtype=float,sep=" ")
gamma = gamma.reshape((len(gamma)/info['NUM_TOPICS'],info['NUM_TOPICS']))
gamma = find(csr_matrix(gamma))
glength = len(gamma[0])
chunk_size = 100000
ps = 16
parallel_add = True
all_dts = []
make_t = 0
add_t = 0
def insert_many(values_list):
query='''
INSERT INTO "tmv_app_doctopic"
("doc_id", "topic_id", "score", "scaled_score", "run_id")
VALUES (%s,%s,%s,%s,%s)
'''
cursor = connection.cursor()
cursor.executemany(query,values_list)
for i in range(glength//chunk_size+1):
dts = []
values_list = []
f = i*chunk_size
l = (i+1)*chunk_size
if l > glength:
l = glength
docs = range(f,l)
doc_batches = []
for p in range(ps):
doc_batches.append([x for x in docs if x % ps == p])
pool = Pool(processes=ps)
make_t0 = time()
values_list.append(pool.map(partial(f_gamma2, gamma=gamma,
docsizes=docsizes,docUTset=ids,topic_ids=topic_ids),doc_batches))
#dts.append(pool.map(partial(f_gamma, gamma=gamma,
# docsizes=docsizes,docUTset=ids,topic_ids=topic_ids),doc_batches))
pool.terminate()
make_t += time() - make_t0
django.db.connections.close_all()
add_t0 = time()
values_list = [item for sublist in values_list for item in sublist]
pool = Pool(processes=ps)
pool.map(insert_many,values_list)
pool.terminate()
add_t += time() - add_t0
gc.collect()
sys.stdout.flush()
stats = RunStats.objects.get(run_id=run_id)
stats.last_update=timezone.now()
stats.save()
management.call_command('update_run',run_id)
if __name__ == '__main__':
t0 = time()
main()
totalTime = time() - t0
tm = int(totalTime//60)
ts = int(totalTime-(tm*60))
print("done! total time: " + str(tm) + " minutes and " + str(ts) + " seconds")
print("a maximum of " + str(resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1000) + " MB was used")
| dtm_topic | identifier_name |
dtm.py | #!/usr/bin/env python3
import sys, resource, os, shutil, re, string, gc, subprocess
import django
import nltk
from multiprocess import Pool
from nltk.stem import SnowballStemmer
from nltk import word_tokenize
from time import time, sleep
from functools import partial
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from scipy.sparse import csr_matrix, find
import numpy as np
from django.utils import timezone
from django.core import management
# Import django stuff
sys.path.append('/home/galm/software/django/tmv/BasicBrowser')
# sys.path.append('/home/max/Desktop/django/BasicBrowser/')
import db as db
from tmv_app.models import *
from scoping.models import Doc, Query
from django.db import connection, transaction
cursor = connection.cursor()
def f_gamma2(docs,gamma,docsizes,docUTset,topic_ids):
vl = []
for d in docs:
if gamma[2][d] > 0.001:
|
return vl
def tokenize(text):
transtable = {ord(c): None for c in string.punctuation + string.digits}
tokens = nltk.word_tokenize(text.translate(transtable))
tokens = [i for i in tokens if len(i) > 2]
return tokens
def add_features(title):
django.db.connections.close_all()
term, created = Term.objects.get_or_create(title=title)
term.run_id.add(run_id)
django.db.connections.close_all()
return term.pk
class snowball_stemmer(object):
def __init__(self):
self.stemmer = SnowballStemmer("english")
def __call__(self, doc):
return [self.stemmer.stem(t) for t in tokenize(doc)]
def proc_docs(docs):
stoplist = set(nltk.corpus.stopwords.words("english"))
stoplist.add('elsevier')
stoplist.add('rights')
stoplist.add('reserved')
stoplist.add('john')
stoplist.add('wiley')
stoplist.add('sons')
stoplist.add('copyright')
abstracts = [re.split("\([C-c]\) [1-2][0-9]{3} Elsevier",x.content)[0] for x in docs.iterator()]
abstracts = [x.split("Published by Elsevier")[0] for x in abstracts]
abstracts = [x.split("Copyright (C)")[0] for x in abstracts]
abstracts = [re.split("\. \(C\) [1-2][0-9]{3} ",x)[0] for x in abstracts]
docsizes = [len(x) for x in abstracts]
ids = [x.UT for x in docs.iterator()]
PYs = [x.PY for x in docs.iterator()]
return [abstracts, docsizes, ids, stoplist, PYs]
def readInfo(p):
d = {}
with open(p) as f:
for line in f:
(key, val) = line.strip().split(' ',1)
try:
d[key] = int(val)
except:
d[key] = val
return(d)
def dtm_topic(topic_n,info,topic_ids,vocab_ids,ys):
print(topic_n)
django.db.connections.close_all()
p = "%03d" % (topic_n,)
p = "dtm-output/lda-seq/topic-"+p+"-var-e-log-prob.dat"
tlambda = np.fromfile(p, sep=" ").reshape((info['NUM_TERMS'],info['SEQ_LENGTH']))
for t in range(len(tlambda)):
for py in range(len(tlambda[t])):
score = np.exp(tlambda[t][py])
if score > 0.001:
tt = TopicTerm(
topic_id = topic_ids[topic_n],
term_id = vocab_ids[t],
PY = ys[py],
score = score,
run_id=run_id
)
tt.save()
#db.add_topic_term(topic_n+info['first_topic'], t+info['first_word'], py, score)
django.db.connections.close_all()
#########################################################
## Main function
def main():
try:
qid = int(sys.argv[1])
except:
print("please provide a query ID!")
sys.exit()
#sleep(7200)
Ks = [100,150,200,250]
#Ks = [10,20]
for K in Ks:
#K = 80
n_features=20000
global run_id
run_id = db.init(n_features,1)
stat = RunStats.objects.get(pk=run_id)
stat.method='BD'
stat.save()
stat.query=Query.objects.get(pk=qid)
##########################
## create input folder
if (os.path.isdir('dtm-input')):
shutil.rmtree('dtm-input')
os.mkdir('dtm-input')
yrange = list(range(1990,2017))
#yrange = list(range(2010,2012))
#yrange = list(range(1990,1997))
docs = Doc.objects.filter(
query=Query.objects.get(pk=qid),
content__iregex='\w',
relevant=True,
PY__in=yrange
).order_by('PY')
abstracts, docsizes, ids, stoplist, PYs = proc_docs(docs)
#########################
## Get the features now
print("Extracting word features...")
vectorizer = CountVectorizer(max_df=0.95, min_df=10,
max_features=n_features,
ngram_range=(1,1),
tokenizer=snowball_stemmer(),
stop_words=stoplist)
t0 = time()
dtm = vectorizer.fit_transform(abstracts)
print("done in %0.3fs." % (time() - t0))
del abstracts
gc.collect()
# Get the vocab, add it to db
vocab = vectorizer.get_feature_names()
vocab_ids = []
pool = Pool(processes=8)
vocab_ids.append(pool.map(add_features,vocab))
pool.terminate()
del vocab
vocab_ids = vocab_ids[0]
django.db.connections.close_all()
with open('dtm-input/foo-mult.dat','w') as mult:
for d in range(dtm.shape[0]):
words = find(dtm[d])
uwords = len(words[0])
mult.write(str(uwords) + " ")
for w in range(uwords):
index = words[1][w]
count = words[2][w]
mult.write(str(index)+":"+str(count)+" ")
mult.write('\n')
##########################
##put PY stuff in the seq file
ycounts = docs.values('PY').annotate(
count = models.Count('pk')
)
with open('dtm-input/foo-seq.dat','w') as seq:
seq.write(str(len(yrange)))
for y in ycounts:
seq.write('\n')
seq.write(str(y['count']))
##########################
# Run the dtm
subprocess.Popen([
"/home/galm/software/dtm/dtm/main",
"--ntopics={}".format(K),
"--mode=fit",
"--rng_seed=0",
"--initialize_lda=true",
"--corpus_prefix=/home/galm/projects/sustainability/dtm-input/foo",
"--outname=/home/galm/projects/sustainability/dtm-output",
"--top_chain_var=0.005",
"--alpha=0.01",
"--lda_sequence_min_iter=10",
"--lda_sequence_max_iter=20",
"--lda_max_em_iter=20"
]).wait()
##########################
## Upload the dtm results to the db
info = readInfo("dtm-output/lda-seq/info.dat")
topic_ids = db.add_topics(K)
#################################
# TopicTerms
topics = range(info['NUM_TOPICS'])
pool = Pool(processes=8)
pool.map(partial(
dtm_topic,
info=info,
topic_ids=topic_ids,
vocab_ids=vocab_ids,
ys = yrange
),topics)
pool.terminate()
gc.collect()
######################################
# Doctopics
gamma = np.fromfile('dtm-output/lda-seq/gam.dat', dtype=float,sep=" ")
gamma = gamma.reshape((len(gamma)/info['NUM_TOPICS'],info['NUM_TOPICS']))
gamma = find(csr_matrix(gamma))
glength = len(gamma[0])
chunk_size = 100000
ps = 16
parallel_add = True
all_dts = []
make_t = 0
add_t = 0
def insert_many(values_list):
query='''
INSERT INTO "tmv_app_doctopic"
("doc_id", "topic_id", "score", "scaled_score", "run_id")
VALUES (%s,%s,%s,%s,%s)
'''
cursor = connection.cursor()
cursor.executemany(query,values_list)
for i in range(glength//chunk_size+1):
dts = []
values_list = []
f = i*chunk_size
l = (i+1)*chunk_size
if l > glength:
l = glength
docs = range(f,l)
doc_batches = []
for p in range(ps):
doc_batches.append([x for x in docs if x % ps == p])
pool = Pool(processes=ps)
make_t0 = time()
values_list.append(pool.map(partial(f_gamma2, gamma=gamma,
docsizes=docsizes,docUTset=ids,topic_ids=topic_ids),doc_batches))
#dts.append(pool.map(partial(f_gamma, gamma=gamma,
# docsizes=docsizes,docUTset=ids,topic_ids=topic_ids),doc_batches))
pool.terminate()
make_t += time() - make_t0
django.db.connections.close_all()
add_t0 = time()
values_list = [item for sublist in values_list for item in sublist]
pool = Pool(processes=ps)
pool.map(insert_many,values_list)
pool.terminate()
add_t += time() - add_t0
gc.collect()
sys.stdout.flush()
stats = RunStats.objects.get(run_id=run_id)
stats.last_update=timezone.now()
stats.save()
management.call_command('update_run',run_id)
if __name__ == '__main__':
t0 = time()
main()
totalTime = time() - t0
tm = int(totalTime//60)
ts = int(totalTime-(tm*60))
print("done! total time: " + str(tm) + " minutes and " + str(ts) + " seconds")
print("a maximum of " + str(resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1000) + " MB was used")
| dt = (
docUTset[gamma[0][d]],
topic_ids[gamma[1][d]],
gamma[2][d],
gamma[2][d] / docsizes[gamma[0][d]],
run_id
)
vl.append(dt) | conditional_block |
dtm.py | #!/usr/bin/env python3
import sys, resource, os, shutil, re, string, gc, subprocess
import django
import nltk
from multiprocess import Pool
from nltk.stem import SnowballStemmer
from nltk import word_tokenize
from time import time, sleep
from functools import partial
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from scipy.sparse import csr_matrix, find
import numpy as np
from django.utils import timezone
from django.core import management
# Import django stuff
sys.path.append('/home/galm/software/django/tmv/BasicBrowser')
# sys.path.append('/home/max/Desktop/django/BasicBrowser/')
import db as db
from tmv_app.models import *
from scoping.models import Doc, Query
from django.db import connection, transaction
cursor = connection.cursor()
def f_gamma2(docs,gamma,docsizes,docUTset,topic_ids):
vl = []
for d in docs:
if gamma[2][d] > 0.001:
dt = (
docUTset[gamma[0][d]],
topic_ids[gamma[1][d]],
gamma[2][d],
gamma[2][d] / docsizes[gamma[0][d]],
run_id
)
vl.append(dt)
return vl
def tokenize(text):
transtable = {ord(c): None for c in string.punctuation + string.digits}
tokens = nltk.word_tokenize(text.translate(transtable))
tokens = [i for i in tokens if len(i) > 2]
return tokens
def add_features(title):
django.db.connections.close_all()
term, created = Term.objects.get_or_create(title=title)
term.run_id.add(run_id)
django.db.connections.close_all()
return term.pk
class snowball_stemmer(object):
def __init__(self):
self.stemmer = SnowballStemmer("english")
def __call__(self, doc):
return [self.stemmer.stem(t) for t in tokenize(doc)]
def proc_docs(docs):
stoplist = set(nltk.corpus.stopwords.words("english"))
stoplist.add('elsevier')
stoplist.add('rights')
stoplist.add('reserved')
stoplist.add('john')
stoplist.add('wiley')
stoplist.add('sons')
stoplist.add('copyright')
abstracts = [re.split("\([C-c]\) [1-2][0-9]{3} Elsevier",x.content)[0] for x in docs.iterator()]
abstracts = [x.split("Published by Elsevier")[0] for x in abstracts]
abstracts = [x.split("Copyright (C)")[0] for x in abstracts]
abstracts = [re.split("\. \(C\) [1-2][0-9]{3} ",x)[0] for x in abstracts]
docsizes = [len(x) for x in abstracts]
ids = [x.UT for x in docs.iterator()]
PYs = [x.PY for x in docs.iterator()]
return [abstracts, docsizes, ids, stoplist, PYs]
def readInfo(p):
d = {}
with open(p) as f:
for line in f:
(key, val) = line.strip().split(' ',1)
try:
d[key] = int(val)
except:
d[key] = val
return(d)
def dtm_topic(topic_n,info,topic_ids,vocab_ids,ys):
print(topic_n)
django.db.connections.close_all()
p = "%03d" % (topic_n,)
p = "dtm-output/lda-seq/topic-"+p+"-var-e-log-prob.dat"
tlambda = np.fromfile(p, sep=" ").reshape((info['NUM_TERMS'],info['SEQ_LENGTH']))
for t in range(len(tlambda)):
for py in range(len(tlambda[t])):
score = np.exp(tlambda[t][py])
if score > 0.001:
tt = TopicTerm(
topic_id = topic_ids[topic_n],
term_id = vocab_ids[t],
PY = ys[py],
score = score,
run_id=run_id
)
tt.save()
#db.add_topic_term(topic_n+info['first_topic'], t+info['first_word'], py, score)
django.db.connections.close_all()
#########################################################
## Main function
def main():
try:
qid = int(sys.argv[1])
except:
print("please provide a query ID!")
sys.exit()
#sleep(7200)
Ks = [100,150,200,250]
#Ks = [10,20]
for K in Ks:
#K = 80
n_features=20000
global run_id
run_id = db.init(n_features,1)
stat = RunStats.objects.get(pk=run_id)
stat.method='BD'
stat.save()
stat.query=Query.objects.get(pk=qid)
##########################
## create input folder
if (os.path.isdir('dtm-input')):
shutil.rmtree('dtm-input')
os.mkdir('dtm-input')
yrange = list(range(1990,2017))
#yrange = list(range(2010,2012))
#yrange = list(range(1990,1997))
docs = Doc.objects.filter(
query=Query.objects.get(pk=qid),
content__iregex='\w',
relevant=True,
PY__in=yrange
).order_by('PY')
abstracts, docsizes, ids, stoplist, PYs = proc_docs(docs)
#########################
## Get the features now
print("Extracting word features...")
vectorizer = CountVectorizer(max_df=0.95, min_df=10,
max_features=n_features,
ngram_range=(1,1),
tokenizer=snowball_stemmer(),
stop_words=stoplist)
t0 = time()
dtm = vectorizer.fit_transform(abstracts)
print("done in %0.3fs." % (time() - t0))
del abstracts
gc.collect()
# Get the vocab, add it to db
vocab = vectorizer.get_feature_names()
vocab_ids = []
pool = Pool(processes=8)
vocab_ids.append(pool.map(add_features,vocab))
pool.terminate()
del vocab
vocab_ids = vocab_ids[0]
django.db.connections.close_all()
with open('dtm-input/foo-mult.dat','w') as mult:
for d in range(dtm.shape[0]):
words = find(dtm[d])
uwords = len(words[0])
mult.write(str(uwords) + " ")
for w in range(uwords):
index = words[1][w]
count = words[2][w]
mult.write(str(index)+":"+str(count)+" ")
mult.write('\n')
##########################
##put PY stuff in the seq file
ycounts = docs.values('PY').annotate(
count = models.Count('pk')
)
with open('dtm-input/foo-seq.dat','w') as seq:
seq.write(str(len(yrange)))
for y in ycounts:
seq.write('\n')
seq.write(str(y['count']))
##########################
# Run the dtm
subprocess.Popen([
"/home/galm/software/dtm/dtm/main",
"--ntopics={}".format(K),
"--mode=fit",
"--rng_seed=0",
"--initialize_lda=true",
"--corpus_prefix=/home/galm/projects/sustainability/dtm-input/foo",
"--outname=/home/galm/projects/sustainability/dtm-output",
"--top_chain_var=0.005",
"--alpha=0.01",
"--lda_sequence_min_iter=10",
"--lda_sequence_max_iter=20",
"--lda_max_em_iter=20"
]).wait()
##########################
## Upload the dtm results to the db
info = readInfo("dtm-output/lda-seq/info.dat")
topic_ids = db.add_topics(K)
#################################
# TopicTerms
topics = range(info['NUM_TOPICS'])
pool = Pool(processes=8)
pool.map(partial(
dtm_topic,
info=info,
topic_ids=topic_ids,
vocab_ids=vocab_ids,
ys = yrange
),topics)
pool.terminate()
gc.collect()
######################################
# Doctopics
gamma = np.fromfile('dtm-output/lda-seq/gam.dat', dtype=float,sep=" ")
gamma = gamma.reshape((len(gamma)/info['NUM_TOPICS'],info['NUM_TOPICS']))
gamma = find(csr_matrix(gamma))
glength = len(gamma[0])
chunk_size = 100000 | parallel_add = True
all_dts = []
make_t = 0
add_t = 0
def insert_many(values_list):
query='''
INSERT INTO "tmv_app_doctopic"
("doc_id", "topic_id", "score", "scaled_score", "run_id")
VALUES (%s,%s,%s,%s,%s)
'''
cursor = connection.cursor()
cursor.executemany(query,values_list)
for i in range(glength//chunk_size+1):
dts = []
values_list = []
f = i*chunk_size
l = (i+1)*chunk_size
if l > glength:
l = glength
docs = range(f,l)
doc_batches = []
for p in range(ps):
doc_batches.append([x for x in docs if x % ps == p])
pool = Pool(processes=ps)
make_t0 = time()
values_list.append(pool.map(partial(f_gamma2, gamma=gamma,
docsizes=docsizes,docUTset=ids,topic_ids=topic_ids),doc_batches))
#dts.append(pool.map(partial(f_gamma, gamma=gamma,
# docsizes=docsizes,docUTset=ids,topic_ids=topic_ids),doc_batches))
pool.terminate()
make_t += time() - make_t0
django.db.connections.close_all()
add_t0 = time()
values_list = [item for sublist in values_list for item in sublist]
pool = Pool(processes=ps)
pool.map(insert_many,values_list)
pool.terminate()
add_t += time() - add_t0
gc.collect()
sys.stdout.flush()
stats = RunStats.objects.get(run_id=run_id)
stats.last_update=timezone.now()
stats.save()
management.call_command('update_run',run_id)
if __name__ == '__main__':
t0 = time()
main()
totalTime = time() - t0
tm = int(totalTime//60)
ts = int(totalTime-(tm*60))
print("done! total time: " + str(tm) + " minutes and " + str(ts) + " seconds")
print("a maximum of " + str(resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1000) + " MB was used") | ps = 16 | random_line_split |
dtm.py | #!/usr/bin/env python3
import sys, resource, os, shutil, re, string, gc, subprocess
import django
import nltk
from multiprocess import Pool
from nltk.stem import SnowballStemmer
from nltk import word_tokenize
from time import time, sleep
from functools import partial
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from scipy.sparse import csr_matrix, find
import numpy as np
from django.utils import timezone
from django.core import management
# Import django stuff
sys.path.append('/home/galm/software/django/tmv/BasicBrowser')
# sys.path.append('/home/max/Desktop/django/BasicBrowser/')
import db as db
from tmv_app.models import *
from scoping.models import Doc, Query
from django.db import connection, transaction
cursor = connection.cursor()
def f_gamma2(docs,gamma,docsizes,docUTset,topic_ids):
vl = []
for d in docs:
if gamma[2][d] > 0.001:
dt = (
docUTset[gamma[0][d]],
topic_ids[gamma[1][d]],
gamma[2][d],
gamma[2][d] / docsizes[gamma[0][d]],
run_id
)
vl.append(dt)
return vl
def tokenize(text):
|
def add_features(title):
django.db.connections.close_all()
term, created = Term.objects.get_or_create(title=title)
term.run_id.add(run_id)
django.db.connections.close_all()
return term.pk
class snowball_stemmer(object):
def __init__(self):
self.stemmer = SnowballStemmer("english")
def __call__(self, doc):
return [self.stemmer.stem(t) for t in tokenize(doc)]
def proc_docs(docs):
stoplist = set(nltk.corpus.stopwords.words("english"))
stoplist.add('elsevier')
stoplist.add('rights')
stoplist.add('reserved')
stoplist.add('john')
stoplist.add('wiley')
stoplist.add('sons')
stoplist.add('copyright')
abstracts = [re.split("\([C-c]\) [1-2][0-9]{3} Elsevier",x.content)[0] for x in docs.iterator()]
abstracts = [x.split("Published by Elsevier")[0] for x in abstracts]
abstracts = [x.split("Copyright (C)")[0] for x in abstracts]
abstracts = [re.split("\. \(C\) [1-2][0-9]{3} ",x)[0] for x in abstracts]
docsizes = [len(x) for x in abstracts]
ids = [x.UT for x in docs.iterator()]
PYs = [x.PY for x in docs.iterator()]
return [abstracts, docsizes, ids, stoplist, PYs]
def readInfo(p):
d = {}
with open(p) as f:
for line in f:
(key, val) = line.strip().split(' ',1)
try:
d[key] = int(val)
except:
d[key] = val
return(d)
def dtm_topic(topic_n,info,topic_ids,vocab_ids,ys):
print(topic_n)
django.db.connections.close_all()
p = "%03d" % (topic_n,)
p = "dtm-output/lda-seq/topic-"+p+"-var-e-log-prob.dat"
tlambda = np.fromfile(p, sep=" ").reshape((info['NUM_TERMS'],info['SEQ_LENGTH']))
for t in range(len(tlambda)):
for py in range(len(tlambda[t])):
score = np.exp(tlambda[t][py])
if score > 0.001:
tt = TopicTerm(
topic_id = topic_ids[topic_n],
term_id = vocab_ids[t],
PY = ys[py],
score = score,
run_id=run_id
)
tt.save()
#db.add_topic_term(topic_n+info['first_topic'], t+info['first_word'], py, score)
django.db.connections.close_all()
#########################################################
## Main function
def main():
try:
qid = int(sys.argv[1])
except:
print("please provide a query ID!")
sys.exit()
#sleep(7200)
Ks = [100,150,200,250]
#Ks = [10,20]
for K in Ks:
#K = 80
n_features=20000
global run_id
run_id = db.init(n_features,1)
stat = RunStats.objects.get(pk=run_id)
stat.method='BD'
stat.save()
stat.query=Query.objects.get(pk=qid)
##########################
## create input folder
if (os.path.isdir('dtm-input')):
shutil.rmtree('dtm-input')
os.mkdir('dtm-input')
yrange = list(range(1990,2017))
#yrange = list(range(2010,2012))
#yrange = list(range(1990,1997))
docs = Doc.objects.filter(
query=Query.objects.get(pk=qid),
content__iregex='\w',
relevant=True,
PY__in=yrange
).order_by('PY')
abstracts, docsizes, ids, stoplist, PYs = proc_docs(docs)
#########################
## Get the features now
print("Extracting word features...")
vectorizer = CountVectorizer(max_df=0.95, min_df=10,
max_features=n_features,
ngram_range=(1,1),
tokenizer=snowball_stemmer(),
stop_words=stoplist)
t0 = time()
dtm = vectorizer.fit_transform(abstracts)
print("done in %0.3fs." % (time() - t0))
del abstracts
gc.collect()
# Get the vocab, add it to db
vocab = vectorizer.get_feature_names()
vocab_ids = []
pool = Pool(processes=8)
vocab_ids.append(pool.map(add_features,vocab))
pool.terminate()
del vocab
vocab_ids = vocab_ids[0]
django.db.connections.close_all()
with open('dtm-input/foo-mult.dat','w') as mult:
for d in range(dtm.shape[0]):
words = find(dtm[d])
uwords = len(words[0])
mult.write(str(uwords) + " ")
for w in range(uwords):
index = words[1][w]
count = words[2][w]
mult.write(str(index)+":"+str(count)+" ")
mult.write('\n')
##########################
##put PY stuff in the seq file
ycounts = docs.values('PY').annotate(
count = models.Count('pk')
)
with open('dtm-input/foo-seq.dat','w') as seq:
seq.write(str(len(yrange)))
for y in ycounts:
seq.write('\n')
seq.write(str(y['count']))
##########################
# Run the dtm
subprocess.Popen([
"/home/galm/software/dtm/dtm/main",
"--ntopics={}".format(K),
"--mode=fit",
"--rng_seed=0",
"--initialize_lda=true",
"--corpus_prefix=/home/galm/projects/sustainability/dtm-input/foo",
"--outname=/home/galm/projects/sustainability/dtm-output",
"--top_chain_var=0.005",
"--alpha=0.01",
"--lda_sequence_min_iter=10",
"--lda_sequence_max_iter=20",
"--lda_max_em_iter=20"
]).wait()
##########################
## Upload the dtm results to the db
info = readInfo("dtm-output/lda-seq/info.dat")
topic_ids = db.add_topics(K)
#################################
# TopicTerms
topics = range(info['NUM_TOPICS'])
pool = Pool(processes=8)
pool.map(partial(
dtm_topic,
info=info,
topic_ids=topic_ids,
vocab_ids=vocab_ids,
ys = yrange
),topics)
pool.terminate()
gc.collect()
######################################
# Doctopics
gamma = np.fromfile('dtm-output/lda-seq/gam.dat', dtype=float,sep=" ")
gamma = gamma.reshape((len(gamma)/info['NUM_TOPICS'],info['NUM_TOPICS']))
gamma = find(csr_matrix(gamma))
glength = len(gamma[0])
chunk_size = 100000
ps = 16
parallel_add = True
all_dts = []
make_t = 0
add_t = 0
def insert_many(values_list):
query='''
INSERT INTO "tmv_app_doctopic"
("doc_id", "topic_id", "score", "scaled_score", "run_id")
VALUES (%s,%s,%s,%s,%s)
'''
cursor = connection.cursor()
cursor.executemany(query,values_list)
for i in range(glength//chunk_size+1):
dts = []
values_list = []
f = i*chunk_size
l = (i+1)*chunk_size
if l > glength:
l = glength
docs = range(f,l)
doc_batches = []
for p in range(ps):
doc_batches.append([x for x in docs if x % ps == p])
pool = Pool(processes=ps)
make_t0 = time()
values_list.append(pool.map(partial(f_gamma2, gamma=gamma,
docsizes=docsizes,docUTset=ids,topic_ids=topic_ids),doc_batches))
#dts.append(pool.map(partial(f_gamma, gamma=gamma,
# docsizes=docsizes,docUTset=ids,topic_ids=topic_ids),doc_batches))
pool.terminate()
make_t += time() - make_t0
django.db.connections.close_all()
add_t0 = time()
values_list = [item for sublist in values_list for item in sublist]
pool = Pool(processes=ps)
pool.map(insert_many,values_list)
pool.terminate()
add_t += time() - add_t0
gc.collect()
sys.stdout.flush()
stats = RunStats.objects.get(run_id=run_id)
stats.last_update=timezone.now()
stats.save()
management.call_command('update_run',run_id)
if __name__ == '__main__':
t0 = time()
main()
totalTime = time() - t0
tm = int(totalTime//60)
ts = int(totalTime-(tm*60))
print("done! total time: " + str(tm) + " minutes and " + str(ts) + " seconds")
print("a maximum of " + str(resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1000) + " MB was used")
| transtable = {ord(c): None for c in string.punctuation + string.digits}
tokens = nltk.word_tokenize(text.translate(transtable))
tokens = [i for i in tokens if len(i) > 2]
return tokens | identifier_body |
Section3.py |
# coding: utf-8
# In[ ]:
from sklearn import datasets
import numpy as np
iris = datasets.load_iris()
X = iris.data[:,[2,3]]
y = iris.target
print("Class labels:",np.unique(y))
print(iris)
# In[ ]:
from sklearn.model_selection import train_test_split
X_train,X_test,y_train,y_test = train_test_split(X,y,test_size = 0.3,random_state = 1,stratify = y)
#stratify >> 層かプリンティング(クラスラベルの比率を一定にする) …賢い
print("label counts in y:",np.bincount(y))
print("label counts in y_train:",np.bincount(y_train))
print("label counts in y_test:",np.bincount(y_test))
# In[ ]:
from sklearn.preprocessing import StandardScaler
ss = StandardScaler()
ss.fit(X_train)
X_train_std = ss.transform(X_train)
X_test_std = ss.transform(X_test)
# print(X_train_std,X_test_std)
#fit と transform は全く別のメソッド
from sklearn.linear_model import Perceptron
ppn = Perceptron(n_iter = 40,eta0 = 0.1,random_state = 1)
ppn.fit(X_train_std,y_train)
# In[ ]:
y_pred = ppn.predict(X_test_std)
print("miss_classified:%d" % (y_test != y_pred).sum())
# %で埋め込み数字 sprintf というらしい
# In[ ]:
from sklearn.metrics import accuracy_score
print("Accuracy: %.2f" % accuracy_score(y_test,y_pred))
print("Accuracy: %.2f" % ppn.score(X_test_std,y_test))
get_ipython().run_line_magic('whos', '')
# In[ ]:
from matplotlib.colors import ListedColormap
import matplotlib.pyplot as plt
def plot_decision_regions(X,y,classifier,test_idx = None,resolution = 0.02):
markers = ("s","x","o","^","v")
colors = ("red","blue","lightgreen","gray","cyan")
cmap = ListedColormap(colors[:len(np.unique(y))])
x1_min,x1_max = X[:,0].min() - 1 ,X[:,0].max() + 1
x2_min,x2_max = X[:,1].min() - 1 ,X[:,1].max() + 1
xx1,xx2 = np.meshgrid(np.arange(x1_min,x1_max,resolution),np.arange(x2_min,x2_max,resolution))
Z = classifier.predict(np.array([xx1.ravel(),xx2.ravel()]).T)
Z = Z.reshape(xx1.shape)
plt.contourf(xx1,xx2,Z,alpha = 0.3 ,cmap = cmap)
plt.xlim(xx1.min(),xx1.max())
plt.ylim(xx2.min(),xx2.max())
for idx ,cl in enumerate(np.unique(y)):
plt.scatter(x = X[y == cl,0] , y = X[y == cl,1] ,alpha = 0.8 , c = colors[idx] ,marker = markers[idx] , label = cl, edgecolor = "black" )
if test_idx:
X_test,y_test = X[test_idx,:],y[test_idx]
plt.scatter(X_test[:,0],X_test[:,1],c = "",edgecolor = "black", | np.hstack((y_train,y_test))
plot_decision_regions(X = X_combined_std,y = y_combined,classifier = ppn,test_idx = range(105,150))
plt.xlabel("petal length")
plt.ylabel("petal width")
plt.legend("upper left")
plt.show()
# In[ ]:
import matplotlib.pyplot as plt
import numpy as np
import math
def sigmoid(z):
return 1.0 / (1.0 + np.exp(-z))
z = np.arange(-7,7,0.1)
phi_z = sigmoid(z)
plt.plot(z,phi_z)
plt.axvline(0.0,color = "k") #axvline >> 垂直線
plt.ylim(-0.1,1.1)
plt.xlabel("z")
plt.ylabel("$\phi$") # phiの記号の記述
plt.yticks([0.0,0.5,1.0])
ax = plt.gca() #gca >> graphic of axis
ax.yaxis.grid(True)
plt.tight_layout()
plt.show()
# In[ ]:
#class label が 0 or 1 の場合
def cost_1(z):
return -np.log(sigmoid(z))
def cost_0(z):
return -np.log(1-sigmoid(z))
z = np.arange(-10,10,0.1)
phi_z = sigmoid(z)
c1 = [cost_1(x) for x in z]
plt.plot(phi_z,c1,label = "J(w) y = 1")
c0 = [cost_0(x) for x in z]
plt.plot(phi_z,c0,label = "J(w) y = -1")
plt.ylim(0.0,5.1)
plt.xlim([0,1])
plt.xlabel("$\phi$(z)")
plt.ylabel("J(w)")
plt.legend(loc = "upper left")
plt.tight_layout()
plt.show()
# In[ ]:
class LogisticRegressionGD(object):
def __init__(self,eta = 0.05,n_iter = 100,random_state = 1):
self.eta = eta
self.n_iter = n_iter
self.random_state = random_state
def fit(self,X,y):
rgen = np.random.RandomState(self.random_state)
self.w = rgen.normal(loc = 0.0,scale = 0.01,size =1+ X.shape[1])
self.gosagun = []
for i in range(self.n_iter):
net_input = self.net_input(X)
output = self.activation(net_input)
errors = (y - output)
self.w[1:] += self.eta * X.T.dot(errors)
self.w[0] += self.eta * errors.sum() #.sum() >> 破壊的メソッド
cost = -y.dot(np.log(output)) - (1-y).dot(np.log(1-output))
self.gosagun.append(cost)
return self
def net_input(self,X):
return np.dot(X,self.w[1:]) + self.w[0]
def activation(self,z):
return 1.0 / (1.0 + np.exp(-np.clip(z,-250,250))) #z >250 の時 z -250 に変換する
def predict(self,X):
return np.where(self.activation(self.net_input(X)) >= 0.5,1,-1)
# In[ ]:
X_train_01_subset = X_train[ (y_train == 0) | (y_train == 1) ]
y_train_01_subset = y_train[ (y_train == 0) | (y_train == 1) ]
lrgd = LogisticRegressionGD(eta = 0.05,n_iter = 1000,random_state = 1)
lrgd.fit(X_train_01_subset,y_train_01_subset)
plot_decision_regions(X_train_01_subset,y_train_01_subset,classifier = lrgd)
plt.xlabel("petal length")
plt.ylabel("petal width")
plt.legend(loc = "upper left")
plt.tight_layout()
plt.show()
# In[ ]:
from sklearn.linear_model import LogisticRegression
lr = LogisticRegression(C = 100.0,random_state = 1)
lr.fit(X_train_std,y_train)
plot_decision_regions(X_combined_std,y_combined,classifier = lr,test_idx = range(105,150))
plt.xlabel("petal length")
plt.ylabel("petal width")
plt.legend(loc = "upper left")
plt.tight_layout()
plt.show()
# In[ ]:
lr.predict_proba(X_test_std) #probability of 1st,2nd,3rd
# In[ ]:
lr.predict_proba(X_test_std[:10,:]).argmax(axis = 1)
# In[ ]:
lr.predict(X_test_std[:10,:])
# In[ ]:
weights ,params = [],[]
for c in np.arange(-5,5):
lr = LogisticRegression(C = 10.0 ** c,random_state = 1)
lr.fit(X_train_std,y_train)
weights.append(lr.coef_[1])
params.append(10.0 ** c)
weights = np.array(weights)
plt.plot(params,weights[:,0],label = "petal length")
plt.plot(params,weights[:,1],label = "petal width")
plt.ylabel("weight coefficient")
plt.xlabel("c")
plt.xscale("log")
plt.show()
# In[ ]:
from sklearn.svm import SVC
svm = SVC(kernel = "linear" , C=1.0 ,random_state = 1)
svm.fit(X_train_std,y_train)
plot_decision_regions(X_combined_std,y_combined,classifier = svm,test_idx = range(105,150))
plt.xlabel("petal length")
plt.ylabel("petal width")
plt.legend(loc = "upper left")
plt.tight_layout()
plt.show()
# In[ ]:
from sklearn.linear_model import SGDClassifier
ppn = SGDClassifier(loss = "perceptron")
lr = SGDClassifier(loss = "log")
svm = SGDClassifier(loss = "hinge")
# In[ ]:
import matplotlib.pyplot as plt
import numpy as np
np.random.seed(1)
X_xor = np.random.randn(200,2)#random.randn(nand.arrayの形)
y_xor = np.logical_xor(X_xor[:,0] > 0 ,X_xor[:,1] > 0)#論理和でTrue or False を割り当てる
y_xor = np.where(y_xor,1,-1) #True >> 1 ,False >> 1の割り当て
plt.scatter(X_xor[y_xor == 1,0],X_xor[y_xor ==1,1] ,c = "b", marker = "x",label = "1")
plt.scatter(X_xor[y_xor == -1,0],X_xor[y_xor == -1,1],c = "r",marker = "s",label = "-1")
plt.xlim([-3,3])
plt.ylim([-3,3])
plt.legend(loc = "best")
plt.tight_layout()
plt.show()
# In[ ]:
svm = SVC(kernel = "rbf",random_state = 1 ,gamma = 0.10,C = 10.0)
svm.fit(X_xor,y_xor)
plot_decision_regions(X_xor,y_xor,classifier = svm)
plt.tight_layout()
plt.show()
# In[ ]:
svm = SVC(kernel = "rbf",random_state = 1,gamma = 0.2 ,C = 1.0)
svm.fit(X_train_std,y_train)
plot_decision_regions(X_combined_std,y_combined,classifier = svm,test_idx = range(105,150))
plt.xlabel("petal lenght")
plt.ylabel("petal width")
plt.tight_layout()
plt.show()
# In[ ]:
svm = SVC(kernel = "rbf" ,random_state = 1,gamma = 100 ,C = 1.0)#rbf >> Radial Basis Function 動型基底カーネル(ガウスカーネル)
svm.fit(X_train_std,y_train)
plot_decision_regions(X_combined_std,y_combined,classifier = svm,test_idx = range(105,149))
plt.xlabel("petal length")
plt.ylabel("petal width")
plt.legend(loc = "upper left")
plt.show()
# In[ ]:
#decision Tree
#informatin gain 1.Gini impurity 2.entropy 3.classification error
import matplotlib.pyplot as plt
import numpy as np
def gini(p):
return (p)*(1 - (p)) + (1 - p) * (1 - (1 - p))
def entropy(p):#エントロピー
return - p * np.log2(p) - (1 - p) * np.log2((1 - p))
def error(p):#分類誤差
return 1 - np.max([p,1 - p])
x = np.arange(0.0 ,1.0 ,0.01)
ent = [entropy(p) if p != 0 else None for p in x]
sc_ent = [e * 0.5 if e else None for e in ent]
err = [error(i) for i in x]
fig = plt.figure()
ax = plt.subplot(111)
functions = [ent,sc_ent,gini(x),err]
names = ["entropy","scaled_entropy","Gini Impurity","misclassification error"]
linestyles = ["-","-","--","-."]
colors = ["black","red","green","cyan"]
for i , lab ,ls, c in zip(functions,names,linestyles,colors):
line = ax.plot(x,i,label = lab ,linestyle = ls, color = c, lw =2)
ax.legend(loc = "upper center",bbox_to_anchor = (0.5,1.15),ncol = 5 ,fancybox = True, shadow = False)
ax.axhline(y = 0.5 ,linewidth = 1 ,color = "k", linestyle = "--")
ax.axhline(y = 1.0 ,linewidth = 1, color = "k",linestyle = "--")
plt.xlabel("p")
plt.ylabel("Inpurity index")
plt.show()
# In[ ]:
from sklearn.tree import DecisionTreeClassifier
tree = DecisionTreeClassifier(criterion = "gini",max_depth = 4 ,random_state = 1)
tree.fit(X_train,y_train)
X_combined = np.vstack((X_train,X_test))
y_combined = np.hstack((y_train,y_test))
plot_decision_regions(X_combined,y_combined,classifier = tree,test_idx = range(105,150))
plt.xlabel("petal length")
plt.ylabel("petal width")
plt.tight_layout()
plt.show()
# In[ ]:
from pydotplus import graph_from_dot_data
from sklearn.tree import export_graphviz
dot_data = export_graphviz(tree,filled = True,rounded = True ,class_names = ["Setosa","Versicolor","Virginica"],feature_names = ["petal length","petal width"],out_file = None)
graph = graph_from_dot_data(dot_data)
graph.write_png("tree.png")
# In[ ]:
from sklearn.ensemble import RandomForestClassifier
forest = RandomForestClassifier(criterion = "gini",n_estimators = 25,random_state = 1,n_jobs = 2)
#n_estimators >> 決定木の数
forest.fit(X_train,y_train)
plot_decision_regions(X_combined,y_combined,classifier = forest,test_idx = range(105,150))
plt.xlabel("petal length")
plt.ylabel("petal width")
plt.tight_layout()
plt.show()
# In[ ]:
from sklearn.neighbors import KNeighborsClassifier
#minkowski >> ユークリッド距離
knn.fit(X_train_std,y_train)
plot_decision_regions(X_combined_std,y_combined,classifier = knn,test_idx = range(105,150))
plt.xlabel("petal length")
plt.ylabel("petal width")
plt.legend("upper left")
plt.tight_layout()
plt.show()
| alpha = 1,marker = "o",s = 100, label = "test_set")
# In[ ]:
X_combined_std = np.vstack((X_train_std,X_test_std))
y_combined = | conditional_block |
Section3.py |
# coding: utf-8
# In[ ]:
from sklearn import datasets
import numpy as np
iris = datasets.load_iris()
X = iris.data[:,[2,3]]
y = iris.target
print("Class labels:",np.unique(y))
print(iris)
# In[ ]:
from sklearn.model_selection import train_test_split
X_train,X_test,y_train,y_test = train_test_split(X,y,test_size = 0.3,random_state = 1,stratify = y)
#stratify >> 層かプリンティング(クラスラベルの比率を一定にする) …賢い
print("label counts in y:",np.bincount(y))
print("label counts in y_train:",np.bincount(y_train))
print("label counts in y_test:",np.bincount(y_test))
# In[ ]:
from sklearn.preprocessing import StandardScaler
ss = StandardScaler()
ss.fit(X_train)
X_train_std = ss.transform(X_train)
X_test_std = ss.transform(X_test)
# print(X_train_std,X_test_std)
#fit と transform は全く別のメソッド
from sklearn.linear_model import Perceptron
ppn = Perceptron(n_iter = 40,eta0 = 0.1,random_state = 1)
ppn.fit(X_train_std,y_train)
# In[ ]:
y_pred = ppn.predict(X_test_std)
print("miss_classified:%d" % (y_test != y_pred).sum())
# %で埋め込み数字 sprintf というらしい
# In[ ]:
from sklearn.metrics import accuracy_score
print("Accuracy: %.2f" % accuracy_score(y_test,y_pred))
print("Accuracy: %.2f" % ppn.score(X_test_std,y_test))
get_ipython().run_line_magic('whos', '')
# In[ ]:
from matplotlib.colors import ListedColormap
import matplotlib.pyplot as plt
def plot_decision_regions(X,y,classifier,test_idx = None,resolution = 0.02):
markers = ("s","x","o","^","v")
colors = ("red","blue","lightgreen","gray","cyan")
cmap = ListedColormap(colors[:len(np.unique(y))])
x1_min,x1_max = X[:,0].min() - 1 ,X[:,0].max() + 1
x2_min,x2_max = X[:,1].min() - 1 ,X[:,1].max() + 1
xx1,xx2 = np.meshgrid(np.arange(x1_min,x1_max,resolution),np.arange(x2_min,x2_max,resolution))
Z = classifier.predict(np.array([xx1.ravel(),xx2.ravel()]).T)
Z = Z.reshape(xx1.shape)
plt.contourf(xx1,xx2,Z,alpha = 0.3 ,cmap = cmap)
plt.xlim(xx1.min(),xx1.max())
plt.ylim(xx2.min(),xx2.max())
for idx ,cl in enumerate(np.unique(y)):
plt.scatter(x = X[y == cl,0] , y = X[y == cl,1] ,alpha = 0.8 , c = colors[idx] ,marker = markers[idx] , label = cl, edgecolor = "black" )
if test_idx:
X_test,y_test = X[test_idx,:],y[test_idx]
plt.scatter(X_test[:,0],X_test[:,1],c = "",edgecolor = "black",alpha = 1,marker = "o",s = 100, label = "test_set")
# In[ ]:
X_combined_std = np.vstack((X_train_std,X_test_std))
y_combined = np.hstack((y_train,y_test))
plot_decision_regions(X = X_combined_std,y = y_combined,classifier = ppn,test_idx = range(105,150))
plt.xlabel("petal length")
plt.ylabel("petal width")
plt.legend("upper left")
plt.show()
# In[ ]:
import matplotlib.pyplot as plt
import numpy as np
import math
def sigmoid(z):
return 1.0 / (1.0 + np.exp(-z))
z = np.arange(-7,7,0.1)
phi_z = sigmoid(z)
plt.plot(z,phi_z)
plt.axvline(0.0,color = "k") #axvline >> 垂直線
plt.ylim(-0.1,1.1)
plt.xlabel("z")
plt.ylabel("$\phi$") # phiの記号の記述
plt.yticks([0.0,0.5,1.0])
ax = plt.gca() #gca >> graphic of axis
ax.yaxis.grid(True)
plt.tight_layout()
plt.show()
# In[ ]:
#class label が 0 or 1 の場合
def cost_1(z):
return -np.log(sigmoid(z))
def cost_0(z):
return -np.log(1-sigmoid(z))
z = np.arange(-10,10,0.1)
phi_z = sigmoid(z)
c1 = [cost_1(x) f | ,c1,label = "J(w) y = 1")
c0 = [cost_0(x) for x in z]
plt.plot(phi_z,c0,label = "J(w) y = -1")
plt.ylim(0.0,5.1)
plt.xlim([0,1])
plt.xlabel("$\phi$(z)")
plt.ylabel("J(w)")
plt.legend(loc = "upper left")
plt.tight_layout()
plt.show()
# In[ ]:
class LogisticRegressionGD(object):
def __init__(self,eta = 0.05,n_iter = 100,random_state = 1):
self.eta = eta
self.n_iter = n_iter
self.random_state = random_state
def fit(self,X,y):
rgen = np.random.RandomState(self.random_state)
self.w = rgen.normal(loc = 0.0,scale = 0.01,size =1+ X.shape[1])
self.gosagun = []
for i in range(self.n_iter):
net_input = self.net_input(X)
output = self.activation(net_input)
errors = (y - output)
self.w[1:] += self.eta * X.T.dot(errors)
self.w[0] += self.eta * errors.sum() #.sum() >> 破壊的メソッド
cost = -y.dot(np.log(output)) - (1-y).dot(np.log(1-output))
self.gosagun.append(cost)
return self
def net_input(self,X):
return np.dot(X,self.w[1:]) + self.w[0]
def activation(self,z):
return 1.0 / (1.0 + np.exp(-np.clip(z,-250,250))) #z >250 の時 z -250 に変換する
def predict(self,X):
return np.where(self.activation(self.net_input(X)) >= 0.5,1,-1)
# In[ ]:
X_train_01_subset = X_train[ (y_train == 0) | (y_train == 1) ]
y_train_01_subset = y_train[ (y_train == 0) | (y_train == 1) ]
lrgd = LogisticRegressionGD(eta = 0.05,n_iter = 1000,random_state = 1)
lrgd.fit(X_train_01_subset,y_train_01_subset)
plot_decision_regions(X_train_01_subset,y_train_01_subset,classifier = lrgd)
plt.xlabel("petal length")
plt.ylabel("petal width")
plt.legend(loc = "upper left")
plt.tight_layout()
plt.show()
# In[ ]:
from sklearn.linear_model import LogisticRegression
lr = LogisticRegression(C = 100.0,random_state = 1)
lr.fit(X_train_std,y_train)
plot_decision_regions(X_combined_std,y_combined,classifier = lr,test_idx = range(105,150))
plt.xlabel("petal length")
plt.ylabel("petal width")
plt.legend(loc = "upper left")
plt.tight_layout()
plt.show()
# In[ ]:
lr.predict_proba(X_test_std) #probability of 1st,2nd,3rd
# In[ ]:
lr.predict_proba(X_test_std[:10,:]).argmax(axis = 1)
# In[ ]:
lr.predict(X_test_std[:10,:])
# In[ ]:
weights ,params = [],[]
for c in np.arange(-5,5):
lr = LogisticRegression(C = 10.0 ** c,random_state = 1)
lr.fit(X_train_std,y_train)
weights.append(lr.coef_[1])
params.append(10.0 ** c)
weights = np.array(weights)
plt.plot(params,weights[:,0],label = "petal length")
plt.plot(params,weights[:,1],label = "petal width")
plt.ylabel("weight coefficient")
plt.xlabel("c")
plt.xscale("log")
plt.show()
# In[ ]:
from sklearn.svm import SVC
svm = SVC(kernel = "linear" , C=1.0 ,random_state = 1)
svm.fit(X_train_std,y_train)
plot_decision_regions(X_combined_std,y_combined,classifier = svm,test_idx = range(105,150))
plt.xlabel("petal length")
plt.ylabel("petal width")
plt.legend(loc = "upper left")
plt.tight_layout()
plt.show()
# In[ ]:
from sklearn.linear_model import SGDClassifier
ppn = SGDClassifier(loss = "perceptron")
lr = SGDClassifier(loss = "log")
svm = SGDClassifier(loss = "hinge")
# In[ ]:
import matplotlib.pyplot as plt
import numpy as np
np.random.seed(1)
X_xor = np.random.randn(200,2)#random.randn(nand.arrayの形)
y_xor = np.logical_xor(X_xor[:,0] > 0 ,X_xor[:,1] > 0)#論理和でTrue or False を割り当てる
y_xor = np.where(y_xor,1,-1) #True >> 1 ,False >> 1の割り当て
plt.scatter(X_xor[y_xor == 1,0],X_xor[y_xor ==1,1] ,c = "b", marker = "x",label = "1")
plt.scatter(X_xor[y_xor == -1,0],X_xor[y_xor == -1,1],c = "r",marker = "s",label = "-1")
plt.xlim([-3,3])
plt.ylim([-3,3])
plt.legend(loc = "best")
plt.tight_layout()
plt.show()
# In[ ]:
svm = SVC(kernel = "rbf",random_state = 1 ,gamma = 0.10,C = 10.0)
svm.fit(X_xor,y_xor)
plot_decision_regions(X_xor,y_xor,classifier = svm)
plt.tight_layout()
plt.show()
# In[ ]:
svm = SVC(kernel = "rbf",random_state = 1,gamma = 0.2 ,C = 1.0)
svm.fit(X_train_std,y_train)
plot_decision_regions(X_combined_std,y_combined,classifier = svm,test_idx = range(105,150))
plt.xlabel("petal lenght")
plt.ylabel("petal width")
plt.tight_layout()
plt.show()
# In[ ]:
svm = SVC(kernel = "rbf" ,random_state = 1,gamma = 100 ,C = 1.0)#rbf >> Radial Basis Function 動型基底カーネル(ガウスカーネル)
svm.fit(X_train_std,y_train)
plot_decision_regions(X_combined_std,y_combined,classifier = svm,test_idx = range(105,149))
plt.xlabel("petal length")
plt.ylabel("petal width")
plt.legend(loc = "upper left")
plt.show()
# In[ ]:
#decision Tree
#informatin gain 1.Gini impurity 2.entropy 3.classification error
import matplotlib.pyplot as plt
import numpy as np
def gini(p):
return (p)*(1 - (p)) + (1 - p) * (1 - (1 - p))
def entropy(p):#エントロピー
return - p * np.log2(p) - (1 - p) * np.log2((1 - p))
def error(p):#分類誤差
return 1 - np.max([p,1 - p])
x = np.arange(0.0 ,1.0 ,0.01)
ent = [entropy(p) if p != 0 else None for p in x]
sc_ent = [e * 0.5 if e else None for e in ent]
err = [error(i) for i in x]
fig = plt.figure()
ax = plt.subplot(111)
functions = [ent,sc_ent,gini(x),err]
names = ["entropy","scaled_entropy","Gini Impurity","misclassification error"]
linestyles = ["-","-","--","-."]
colors = ["black","red","green","cyan"]
for i , lab ,ls, c in zip(functions,names,linestyles,colors):
line = ax.plot(x,i,label = lab ,linestyle = ls, color = c, lw =2)
ax.legend(loc = "upper center",bbox_to_anchor = (0.5,1.15),ncol = 5 ,fancybox = True, shadow = False)
ax.axhline(y = 0.5 ,linewidth = 1 ,color = "k", linestyle = "--")
ax.axhline(y = 1.0 ,linewidth = 1, color = "k",linestyle = "--")
plt.xlabel("p")
plt.ylabel("Inpurity index")
plt.show()
# In[ ]:
from sklearn.tree import DecisionTreeClassifier
tree = DecisionTreeClassifier(criterion = "gini",max_depth = 4 ,random_state = 1)
tree.fit(X_train,y_train)
X_combined = np.vstack((X_train,X_test))
y_combined = np.hstack((y_train,y_test))
plot_decision_regions(X_combined,y_combined,classifier = tree,test_idx = range(105,150))
plt.xlabel("petal length")
plt.ylabel("petal width")
plt.tight_layout()
plt.show()
# In[ ]:
from pydotplus import graph_from_dot_data
from sklearn.tree import export_graphviz
dot_data = export_graphviz(tree,filled = True,rounded = True ,class_names = ["Setosa","Versicolor","Virginica"],feature_names = ["petal length","petal width"],out_file = None)
graph = graph_from_dot_data(dot_data)
graph.write_png("tree.png")
# In[ ]:
from sklearn.ensemble import RandomForestClassifier
forest = RandomForestClassifier(criterion = "gini",n_estimators = 25,random_state = 1,n_jobs = 2)
#n_estimators >> 決定木の数
forest.fit(X_train,y_train)
plot_decision_regions(X_combined,y_combined,classifier = forest,test_idx = range(105,150))
plt.xlabel("petal length")
plt.ylabel("petal width")
plt.tight_layout()
plt.show()
# In[ ]:
from sklearn.neighbors import KNeighborsClassifier
#minkowski >> ユークリッド距離
knn.fit(X_train_std,y_train)
plot_decision_regions(X_combined_std,y_combined,classifier = knn,test_idx = range(105,150))
plt.xlabel("petal length")
plt.ylabel("petal width")
plt.legend("upper left")
plt.tight_layout()
plt.show()
| or x in z]
plt.plot(phi_z | identifier_body |
Section3.py | # coding: utf-8
# In[ ]:
from sklearn import datasets
import numpy as np
iris = datasets.load_iris()
X = iris.data[:,[2,3]]
y = iris.target
print("Class labels:",np.unique(y))
print(iris)
# In[ ]:
from sklearn.model_selection import train_test_split
X_train,X_test,y_train,y_test = train_test_split(X,y,test_size = 0.3,random_state = 1,stratify = y)
#stratify >> 層かプリンティング(クラスラベルの比率を一定にする) …賢い
print("label counts in y:",np.bincount(y))
print("label counts in y_train:",np.bincount(y_train))
print("label counts in y_test:",np.bincount(y_test))
# In[ ]:
from sklearn.preprocessing import StandardScaler
ss = StandardScaler()
ss.fit(X_train)
X_train_std = ss.transform(X_train)
X_test_std = ss.transform(X_test)
# print(X_train_std,X_test_std)
#fit と transform は全く別のメソッド
from sklearn.linear_model import Perceptron
ppn = Perceptron(n_iter = 40,eta0 = 0.1,random_state = 1)
ppn.fit(X_train_std,y_train)
# In[ ]:
y_pred = ppn.predict(X_test_std)
print("miss_classified:%d" % (y_test != y_pred).sum())
# %で埋め込み数字 sprintf というらしい
# In[ ]:
from sklearn.metrics import accuracy_score
print("Accuracy: %.2f" % accuracy_score(y_test,y_pred))
print("Accuracy: %.2f" % ppn.score(X_test_std,y_test))
get_ipython().run_line_magic('whos', '')
# In[ ]:
from matplotlib.colors import ListedColormap
import matplotlib.pyplot as plt
def plot_decision_regions(X,y,classifier,test_idx = None,resolution = 0.02):
markers = ("s","x","o","^","v")
colors = ("red","blue","lightgreen","gray","cyan")
cmap = ListedColormap(colors[:len(np.unique(y))])
x1_min,x1_max = X[:,0].min() - 1 ,X[:,0].max() + 1
x2_min,x2_max = X[:,1].min() - 1 ,X[:,1].max() + 1
xx1,xx2 = np.meshgrid(np.arange(x1_min,x1_max,resolution),np.arange(x2_min,x2_max,resolution))
Z = classifier.predict(np.array([xx1.ravel(),xx2.ravel()]).T)
Z = Z.reshape(xx1.shape)
plt.contourf(xx1,xx2,Z,alpha = 0.3 ,cmap = cmap)
plt.xlim(xx1.min(),xx1.max())
plt.ylim(xx2.min(),xx2.max())
for idx ,cl in enumerate(np.unique(y)):
plt.scatter(x = X[y == cl,0] , y = X[y == cl,1] ,alpha = 0.8 , c = colors[idx] ,marker = markers[idx] , label = cl, edgecolor = "black" )
if test_idx:
X_test,y_test = X[test_idx,:],y[test_idx]
plt.scatter(X_test[:,0],X_test[:,1],c = "",edgecolor = "black",alpha = 1,marker = "o",s = 100, label = "test_set")
# In[ ]:
X_combined_std = np.vstack((X_train_std,X_test_std))
y_combined = np.hstack((y_train,y_test))
plot_decision_regions(X = X_combined_std,y = y_combined,classifier = ppn,test_idx = range(105,150))
plt.xlabel("petal length")
plt.ylabel("petal width")
plt.legend("upper left")
plt.show()
# In[ ]:
import matplotlib.pyplot as plt
import numpy as np
import math
def sigmoid(z):
return 1.0 / (1.0 + np.exp(-z))
z = np.arange(-7,7,0.1)
phi_z = sigmoid(z)
plt.plot(z,phi_z)
plt.axvline(0.0,color = "k") #axvline >> 垂直線
plt.ylim(-0.1,1.1)
plt.xlabel("z")
plt.ylabel("$\phi$") # phiの記号の記述
plt.yticks([0.0,0.5,1.0])
ax = plt.gca() #gca >> graphic of axis
ax.yaxis.grid(True)
plt.tight_layout()
plt.show()
# In[ ]:
#class label が 0 or 1 の場合
def cost_1(z):
return -np.log(sigmoid(z))
def cost_0(z):
return -np.log(1-sigmoid(z))
z = np.arange(-10,10,0.1)
phi_z = sigmoid(z)
c1 = [cost_1(x) for x in z]
plt.plot(phi_z,c1,label = "J(w) y = 1")
c0 = [cost_0(x) for x in z]
plt.plot(phi_z,c0,label = "J(w) y = -1")
plt.ylim(0.0,5.1)
plt.xlim([0,1])
plt.xlabel("$\phi$(z)")
plt.ylabel("J(w)")
plt.legend(loc = "upper left")
plt.tight_layout()
plt.show()
# In[ ]:
class LogisticRegressionGD(object):
def __init__(self,eta = 0.05,n_iter = 100,random_state = 1):
self.eta = eta
self.n_iter = n_iter
self.random_state = random_state
def fit(self,X,y):
rgen = np.random.RandomState(self.random_state)
self.w = rgen.normal(loc = 0.0,scale = 0.01,size =1+ X.shape[1])
self.gosagun = []
for i in range(self.n_iter):
net_input = self.net_input(X)
output = self.activation(net_input)
errors = (y - output)
self.w[1:] += self.eta * X.T.dot(errors)
self.w[0] += self.eta * errors.sum() #.sum() >> 破壊的メソッド
cost = -y.dot(np.log(output)) - (1-y).dot(np.log(1-output))
self.gosagun.append(cost)
return self
def net_input(self,X):
return np.dot(X,self.w[1:]) + self.w[0]
def activation(self,z):
return 1.0 / (1.0 + np.exp(-np.clip(z,-250,250))) #z >250 の時 z -250 に変換する
def predict(self,X):
return np.where(self.activation(self.net_input(X)) >= 0.5,1,-1)
# In[ ]:
X_train_01_subset = X_train[ (y_train == 0) | (y_train == 1) ]
y_train_01_subset = y_train[ (y_train == 0) | (y_train == 1) ]
lrgd = LogisticRegressionGD(eta = 0.05,n_iter = 1000,random_state = 1)
lrgd.fit(X_train_01_subset,y_train_01_subset)
plot_decision_regions(X_train_01_subset,y_train_01_subset,classifier = lrgd)
plt.xlabel("petal length")
plt.ylabel("petal width")
plt.legend(loc = "upper left")
plt.tight_layout()
plt.show()
# In[ ]:
from sklearn.linear_model import LogisticRegression
lr = LogisticRegression(C = 100.0,random_state = 1)
lr.fit(X_train_std,y_train)
plot_decision_regions(X_combined_std,y_combined,classifier = lr,test_idx = range(105,150))
plt.xlabel("petal length")
plt.ylabel("petal width")
plt.legend(loc = "upper left")
plt.tight_layout()
plt.show()
# In[ ]:
lr.predict_proba(X_test_std) #probability of 1st,2nd,3rd
# In[ ]:
lr.predict_proba(X_test_std[:10,:]).argmax(axis = 1)
# In[ ]:
lr.predict(X_test_std[:10,:])
# In[ ]:
weights ,params = [],[]
for c in np.arange(-5,5):
lr = LogisticRegression(C = 10.0 ** c,random_state = 1)
lr.fit(X_train_std,y_train)
weights.append(lr.coef_[1])
params.append(10.0 ** c)
weights = np.array(weights)
plt.plot(params,weights[:,0],label = "petal length")
plt.plot(params,weights[:,1],label = "petal width")
plt.ylabel("weight coefficient")
plt.xlabel("c")
plt.xscale("log")
plt.show()
# In[ ]:
from sklearn.svm import SVC
svm = SVC(kernel = "linear" , C=1.0 ,random_state = 1)
svm.fit(X_train_std,y_train)
plot_decision_regions(X_combined_std,y_combined,classifier = svm,test_idx = range(105,150))
plt.xlabel("petal length")
plt.ylabel("petal width")
plt.legend(loc = "upper left")
plt.tight_layout()
plt.show()
# In[ ]:
from sklearn.linear_model import SGDClassifier
ppn = SGDClassifier(loss = "perceptron")
lr = SGDClassifier(loss = "log")
svm = SGDClassifier(loss = "hinge")
# In[ ]:
import matplotlib.pyplot as plt
import numpy as np
np.random.seed(1)
X_xor = np.random.randn(200,2)#random.randn(nand.arrayの形)
y_xor = np.logical_xor(X_xor[:,0] > 0 ,X_xor[:,1] > 0)#論理和でTrue or False を割り当てる
y_xor = np.where(y_xor,1,-1) #True >> 1 ,False >> 1の割り当て
plt.scatter(X_xor[y_xor == 1,0],X_xor[y_xor ==1,1] ,c = "b", marker = "x",label = "1")
plt.scatter(X_xor[y_xor == -1,0],X_xor[y_xor == -1,1],c = "r",marker = "s",label = "-1")
plt.xlim([-3,3])
plt.ylim([-3,3])
plt.legend(loc = "best")
|
# In[ ]:
svm = SVC(kernel = "rbf",random_state = 1 ,gamma = 0.10,C = 10.0)
svm.fit(X_xor,y_xor)
plot_decision_regions(X_xor,y_xor,classifier = svm)
plt.tight_layout()
plt.show()
# In[ ]:
svm = SVC(kernel = "rbf",random_state = 1,gamma = 0.2 ,C = 1.0)
svm.fit(X_train_std,y_train)
plot_decision_regions(X_combined_std,y_combined,classifier = svm,test_idx = range(105,150))
plt.xlabel("petal lenght")
plt.ylabel("petal width")
plt.tight_layout()
plt.show()
# In[ ]:
svm = SVC(kernel = "rbf" ,random_state = 1,gamma = 100 ,C = 1.0)#rbf >> Radial Basis Function 動型基底カーネル(ガウスカーネル)
svm.fit(X_train_std,y_train)
plot_decision_regions(X_combined_std,y_combined,classifier = svm,test_idx = range(105,149))
plt.xlabel("petal length")
plt.ylabel("petal width")
plt.legend(loc = "upper left")
plt.show()
# In[ ]:
#decision Tree
#informatin gain 1.Gini impurity 2.entropy 3.classification error
import matplotlib.pyplot as plt
import numpy as np
def gini(p):
return (p)*(1 - (p)) + (1 - p) * (1 - (1 - p))
def entropy(p):#エントロピー
return - p * np.log2(p) - (1 - p) * np.log2((1 - p))
def error(p):#分類誤差
return 1 - np.max([p,1 - p])
x = np.arange(0.0 ,1.0 ,0.01)
ent = [entropy(p) if p != 0 else None for p in x]
sc_ent = [e * 0.5 if e else None for e in ent]
err = [error(i) for i in x]
fig = plt.figure()
ax = plt.subplot(111)
functions = [ent,sc_ent,gini(x),err]
names = ["entropy","scaled_entropy","Gini Impurity","misclassification error"]
linestyles = ["-","-","--","-."]
colors = ["black","red","green","cyan"]
for i , lab ,ls, c in zip(functions,names,linestyles,colors):
line = ax.plot(x,i,label = lab ,linestyle = ls, color = c, lw =2)
ax.legend(loc = "upper center",bbox_to_anchor = (0.5,1.15),ncol = 5 ,fancybox = True, shadow = False)
ax.axhline(y = 0.5 ,linewidth = 1 ,color = "k", linestyle = "--")
ax.axhline(y = 1.0 ,linewidth = 1, color = "k",linestyle = "--")
plt.xlabel("p")
plt.ylabel("Inpurity index")
plt.show()
# In[ ]:
from sklearn.tree import DecisionTreeClassifier
tree = DecisionTreeClassifier(criterion = "gini",max_depth = 4 ,random_state = 1)
tree.fit(X_train,y_train)
X_combined = np.vstack((X_train,X_test))
y_combined = np.hstack((y_train,y_test))
plot_decision_regions(X_combined,y_combined,classifier = tree,test_idx = range(105,150))
plt.xlabel("petal length")
plt.ylabel("petal width")
plt.tight_layout()
plt.show()
# In[ ]:
from pydotplus import graph_from_dot_data
from sklearn.tree import export_graphviz
dot_data = export_graphviz(tree,filled = True,rounded = True ,class_names = ["Setosa","Versicolor","Virginica"],feature_names = ["petal length","petal width"],out_file = None)
graph = graph_from_dot_data(dot_data)
graph.write_png("tree.png")
# In[ ]:
from sklearn.ensemble import RandomForestClassifier
forest = RandomForestClassifier(criterion = "gini",n_estimators = 25,random_state = 1,n_jobs = 2)
#n_estimators >> 決定木の数
forest.fit(X_train,y_train)
plot_decision_regions(X_combined,y_combined,classifier = forest,test_idx = range(105,150))
plt.xlabel("petal length")
plt.ylabel("petal width")
plt.tight_layout()
plt.show()
# In[ ]:
from sklearn.neighbors import KNeighborsClassifier
#minkowski >> ユークリッド距離
knn.fit(X_train_std,y_train)
plot_decision_regions(X_combined_std,y_combined,classifier = knn,test_idx = range(105,150))
plt.xlabel("petal length")
plt.ylabel("petal width")
plt.legend("upper left")
plt.tight_layout()
plt.show() | plt.tight_layout()
plt.show()
| random_line_split |
Section3.py |
# coding: utf-8
# In[ ]:
from sklearn import datasets
import numpy as np
iris = datasets.load_iris()
X = iris.data[:,[2,3]]
y = iris.target
print("Class labels:",np.unique(y))
print(iris)
# In[ ]:
from sklearn.model_selection import train_test_split
X_train,X_test,y_train,y_test = train_test_split(X,y,test_size = 0.3,random_state = 1,stratify = y)
#stratify >> 層かプリンティング(クラスラベルの比率を一定にする) …賢い
print("label counts in y:",np.bincount(y))
print("label counts in y_train:",np.bincount(y_train))
print("label counts in y_test:",np.bincount(y_test))
# In[ ]:
from sklearn.preprocessing import StandardScaler
ss = StandardScaler()
ss.fit(X_train)
X_train_std = ss.transform(X_train)
X_test_std = ss.transform(X_test)
# print(X_train_std,X_test_std)
#fit と transform は全く別のメソッド
from sklearn.linear_model import Perceptron
ppn = Perceptron(n_iter = 40,eta0 = 0.1,random_state = 1)
ppn.fit(X_train_std,y_train)
# In[ ]:
y_pred = ppn.predict(X_test_std)
print("miss_classified:%d" % (y_test != y_pred).sum())
# %で埋め込み数字 sprintf というらしい
# In[ ]:
from sklearn.metrics import accuracy_score
print("Accuracy: %.2f" % accuracy_score(y_test,y_pred))
print("Accuracy: %.2f" % ppn.score(X_test_std,y_test))
get_ipython().run_line_magic('whos', '')
# In[ ]:
from matplotlib.colors import ListedColormap
import matplotlib.pyplot as plt
def plot_decision_regions(X,y,classifier,test_idx = None,resolution = 0.02):
markers = ("s","x","o","^","v")
colors = ("red","blue","lightgreen","gray","cyan")
cmap = ListedColormap(colors[:len(np.unique(y))])
x1_min,x1_max = X[:,0].min() - 1 ,X[:,0].max() + 1
x2_min,x2_max = X[:,1].min() - 1 ,X[:,1].max() + 1
xx1,xx2 = np.meshgrid(np.arange(x1_min,x1_max,resolution),np.arange(x2_min,x2_max,resolution))
Z = classifier.predict(np.array([xx1.ravel(),xx2.ravel()]).T)
Z = Z.reshape(xx1.shape)
plt.contourf(xx1,xx2,Z,alpha = 0.3 ,cmap = cmap)
plt.xlim(xx1.min(),xx1.max())
plt.ylim(xx2.min(),xx2.max())
for idx ,cl in enumerate(np.unique(y)):
plt.scatter(x = X[y == cl,0] , y = X[y == cl,1] ,alpha = 0.8 , c = colors[idx] ,marker = markers[idx] , label = cl, edgecolor = "black" )
if test_idx:
X_test,y_test = X[test_idx,:],y[test_idx]
plt.scatter(X_test[:,0],X_test[:,1],c = "",edgecolor = "black",alpha = 1,marker = "o",s = 100, label = "test_set")
# In[ ]:
X_combined_std = np.vstack((X_train_std,X_test_std))
y_combined = np.hstack((y_train,y_test))
plot_decision_regions(X = X_combined_std,y = y_combined,classifier = ppn,test_idx = range(105,150))
plt.xlabel("petal length")
plt.ylabel("petal width")
plt.legend("upper left")
plt.show()
# In[ ]:
import matplotlib.pyplot as plt
import numpy as np
import math
def sigmoid(z):
return 1.0 / (1.0 + np.exp(-z))
z = np.arange(-7,7,0.1)
phi_z = sigmoid(z)
plt.plot(z,phi_z)
plt.axvline(0.0,color = "k") #axvline >> 垂直線
plt.ylim(-0.1,1.1)
plt.xlabel("z")
plt.ylabel("$\phi$") # phiの記号の記述
plt.yticks([0.0,0.5,1.0])
ax = plt.gca() #gca >> graphic of axis
ax.yaxis.grid(True)
plt.tight_layout()
plt.show()
# In[ ]:
#class label が 0 or 1 の場合
def cost_1(z):
return -np.log(sigmoid(z))
def cost_0(z):
return -np.log(1-sigmoid(z))
z = np.arange(-10,10,0.1)
phi_z = sigmoid(z)
c1 = [cost_1(x) for x in z]
plt.plot(phi_z,c1,label = "J(w) y = 1")
c0 = [cost_0(x) for x in z]
plt.plot(phi_z,c0,label = "J(w) y = -1")
plt.ylim(0.0,5.1)
plt.xlim([0,1])
plt.xlabel("$\phi$(z)")
plt.ylabel("J(w)")
plt.legend(loc = "upper left")
plt.tight_layout()
plt.show()
# In[ ]:
class LogisticRegressionGD(object):
def __init__(self,eta = 0.05,n_iter = 100,random_state = 1):
self.eta = eta
self.n_iter = n | random_state = random_state
def fit(self,X,y):
rgen = np.random.RandomState(self.random_state)
self.w = rgen.normal(loc = 0.0,scale = 0.01,size =1+ X.shape[1])
self.gosagun = []
for i in range(self.n_iter):
net_input = self.net_input(X)
output = self.activation(net_input)
errors = (y - output)
self.w[1:] += self.eta * X.T.dot(errors)
self.w[0] += self.eta * errors.sum() #.sum() >> 破壊的メソッド
cost = -y.dot(np.log(output)) - (1-y).dot(np.log(1-output))
self.gosagun.append(cost)
return self
def net_input(self,X):
return np.dot(X,self.w[1:]) + self.w[0]
def activation(self,z):
return 1.0 / (1.0 + np.exp(-np.clip(z,-250,250))) #z >250 の時 z -250 に変換する
def predict(self,X):
return np.where(self.activation(self.net_input(X)) >= 0.5,1,-1)
# In[ ]:
X_train_01_subset = X_train[ (y_train == 0) | (y_train == 1) ]
y_train_01_subset = y_train[ (y_train == 0) | (y_train == 1) ]
lrgd = LogisticRegressionGD(eta = 0.05,n_iter = 1000,random_state = 1)
lrgd.fit(X_train_01_subset,y_train_01_subset)
plot_decision_regions(X_train_01_subset,y_train_01_subset,classifier = lrgd)
plt.xlabel("petal length")
plt.ylabel("petal width")
plt.legend(loc = "upper left")
plt.tight_layout()
plt.show()
# In[ ]:
from sklearn.linear_model import LogisticRegression
lr = LogisticRegression(C = 100.0,random_state = 1)
lr.fit(X_train_std,y_train)
plot_decision_regions(X_combined_std,y_combined,classifier = lr,test_idx = range(105,150))
plt.xlabel("petal length")
plt.ylabel("petal width")
plt.legend(loc = "upper left")
plt.tight_layout()
plt.show()
# In[ ]:
lr.predict_proba(X_test_std) #probability of 1st,2nd,3rd
# In[ ]:
lr.predict_proba(X_test_std[:10,:]).argmax(axis = 1)
# In[ ]:
lr.predict(X_test_std[:10,:])
# In[ ]:
weights ,params = [],[]
for c in np.arange(-5,5):
lr = LogisticRegression(C = 10.0 ** c,random_state = 1)
lr.fit(X_train_std,y_train)
weights.append(lr.coef_[1])
params.append(10.0 ** c)
weights = np.array(weights)
plt.plot(params,weights[:,0],label = "petal length")
plt.plot(params,weights[:,1],label = "petal width")
plt.ylabel("weight coefficient")
plt.xlabel("c")
plt.xscale("log")
plt.show()
# In[ ]:
from sklearn.svm import SVC
svm = SVC(kernel = "linear" , C=1.0 ,random_state = 1)
svm.fit(X_train_std,y_train)
plot_decision_regions(X_combined_std,y_combined,classifier = svm,test_idx = range(105,150))
plt.xlabel("petal length")
plt.ylabel("petal width")
plt.legend(loc = "upper left")
plt.tight_layout()
plt.show()
# In[ ]:
from sklearn.linear_model import SGDClassifier
ppn = SGDClassifier(loss = "perceptron")
lr = SGDClassifier(loss = "log")
svm = SGDClassifier(loss = "hinge")
# In[ ]:
import matplotlib.pyplot as plt
import numpy as np
np.random.seed(1)
X_xor = np.random.randn(200,2)#random.randn(nand.arrayの形)
y_xor = np.logical_xor(X_xor[:,0] > 0 ,X_xor[:,1] > 0)#論理和でTrue or False を割り当てる
y_xor = np.where(y_xor,1,-1) #True >> 1 ,False >> 1の割り当て
plt.scatter(X_xor[y_xor == 1,0],X_xor[y_xor ==1,1] ,c = "b", marker = "x",label = "1")
plt.scatter(X_xor[y_xor == -1,0],X_xor[y_xor == -1,1],c = "r",marker = "s",label = "-1")
plt.xlim([-3,3])
plt.ylim([-3,3])
plt.legend(loc = "best")
plt.tight_layout()
plt.show()
# In[ ]:
svm = SVC(kernel = "rbf",random_state = 1 ,gamma = 0.10,C = 10.0)
svm.fit(X_xor,y_xor)
plot_decision_regions(X_xor,y_xor,classifier = svm)
plt.tight_layout()
plt.show()
# In[ ]:
svm = SVC(kernel = "rbf",random_state = 1,gamma = 0.2 ,C = 1.0)
svm.fit(X_train_std,y_train)
plot_decision_regions(X_combined_std,y_combined,classifier = svm,test_idx = range(105,150))
plt.xlabel("petal lenght")
plt.ylabel("petal width")
plt.tight_layout()
plt.show()
# In[ ]:
svm = SVC(kernel = "rbf" ,random_state = 1,gamma = 100 ,C = 1.0)#rbf >> Radial Basis Function 動型基底カーネル(ガウスカーネル)
svm.fit(X_train_std,y_train)
plot_decision_regions(X_combined_std,y_combined,classifier = svm,test_idx = range(105,149))
plt.xlabel("petal length")
plt.ylabel("petal width")
plt.legend(loc = "upper left")
plt.show()
# In[ ]:
#decision Tree
#informatin gain 1.Gini impurity 2.entropy 3.classification error
import matplotlib.pyplot as plt
import numpy as np
def gini(p):
return (p)*(1 - (p)) + (1 - p) * (1 - (1 - p))
def entropy(p):#エントロピー
return - p * np.log2(p) - (1 - p) * np.log2((1 - p))
def error(p):#分類誤差
return 1 - np.max([p,1 - p])
x = np.arange(0.0 ,1.0 ,0.01)
ent = [entropy(p) if p != 0 else None for p in x]
sc_ent = [e * 0.5 if e else None for e in ent]
err = [error(i) for i in x]
fig = plt.figure()
ax = plt.subplot(111)
functions = [ent,sc_ent,gini(x),err]
names = ["entropy","scaled_entropy","Gini Impurity","misclassification error"]
linestyles = ["-","-","--","-."]
colors = ["black","red","green","cyan"]
for i , lab ,ls, c in zip(functions,names,linestyles,colors):
line = ax.plot(x,i,label = lab ,linestyle = ls, color = c, lw =2)
ax.legend(loc = "upper center",bbox_to_anchor = (0.5,1.15),ncol = 5 ,fancybox = True, shadow = False)
ax.axhline(y = 0.5 ,linewidth = 1 ,color = "k", linestyle = "--")
ax.axhline(y = 1.0 ,linewidth = 1, color = "k",linestyle = "--")
plt.xlabel("p")
plt.ylabel("Inpurity index")
plt.show()
# In[ ]:
from sklearn.tree import DecisionTreeClassifier
tree = DecisionTreeClassifier(criterion = "gini",max_depth = 4 ,random_state = 1)
tree.fit(X_train,y_train)
X_combined = np.vstack((X_train,X_test))
y_combined = np.hstack((y_train,y_test))
plot_decision_regions(X_combined,y_combined,classifier = tree,test_idx = range(105,150))
plt.xlabel("petal length")
plt.ylabel("petal width")
plt.tight_layout()
plt.show()
# In[ ]:
from pydotplus import graph_from_dot_data
from sklearn.tree import export_graphviz
dot_data = export_graphviz(tree,filled = True,rounded = True ,class_names = ["Setosa","Versicolor","Virginica"],feature_names = ["petal length","petal width"],out_file = None)
graph = graph_from_dot_data(dot_data)
graph.write_png("tree.png")
# In[ ]:
from sklearn.ensemble import RandomForestClassifier
forest = RandomForestClassifier(criterion = "gini",n_estimators = 25,random_state = 1,n_jobs = 2)
#n_estimators >> 決定木の数
forest.fit(X_train,y_train)
plot_decision_regions(X_combined,y_combined,classifier = forest,test_idx = range(105,150))
plt.xlabel("petal length")
plt.ylabel("petal width")
plt.tight_layout()
plt.show()
# In[ ]:
from sklearn.neighbors import KNeighborsClassifier
#minkowski >> ユークリッド距離
knn.fit(X_train_std,y_train)
plot_decision_regions(X_combined_std,y_combined,classifier = knn,test_idx = range(105,150))
plt.xlabel("petal length")
plt.ylabel("petal width")
plt.legend("upper left")
plt.tight_layout()
plt.show()
| _iter
self. | identifier_name |
pipeline_cutouts.py | # Python 3.6. Written by Alex Clarke
# Breakup a large fits image into smaller ones, with overlap, and save to disk.
# Sourecfinding is run on each cutout, and catalogues are sifted to remove duplicates from the overlap.
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import multiprocessing
import itertools
import bdsf
import glob
import pickle
from matplotlib.pyplot import cm
from astropy.io import fits
from astropy.nddata import Cutout2D
from astropy.wcs import WCS
from memory_profiler import profile
# list of functions
# load/save pickle objects
# save_cutout
# do_image_chopping
# make_image_cubes
# do_sourcefinding
# ------ ------ ------ ------ ------ ------ ------ ------ ------ ------
#Loading/saving python data objects
def save_obj(obj, name ):
with open(name + '.pkl', 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
def load_obj(name ):
with open(name + '.pkl', 'rb') as f:
return pickle.load(f)
# ------ ------ ------ ------ ------ ------ ------ ------ ------ ------
def update_header_from_cutout2D(hdu, cutout):
# update data
newdata = np.zeros((1,1,cutout.data.shape[0], cutout.data.shape[1]), dtype=np.float32)
newdata[0,0,:,:] = cutout.data
hdu.data = newdata
# update header cards returned from cutout2D wcs:
hdu.header.set('CRVAL1', cutout.wcs.wcs.crval[0])
hdu.header.set('CRVAL2', cutout.wcs.wcs.crval[1])
hdu.header.set('CRPIX1', cutout.wcs.wcs.crpix[0])
hdu.header.set('CRPIX2', cutout.wcs.wcs.crpix[1])
hdu.header.set('CDELT1', cutout.wcs.wcs.cdelt[0])
hdu.header.set('CDELT2', cutout.wcs.wcs.cdelt[1])
hdu.header.set('NAXIS1', cutout.wcs.pixel_shape[0])
hdu.header.set('NAXIS2', cutout.wcs.pixel_shape[1])
return hdu
# ------ ------ ------ ------ ------ ------ ------ ------ ------ ------
def do_primarybeam_correction(pbname, imagename):
print(' Preparing to apply the primary beam correction to {0}'.format(imagename))
hdu = fits.open(imagename)[0]
pb = fits.open(pbname)[0]
wcs = WCS(pb.header)
# cutout pb field of view to match image field of view
x_size = hdu.header['NAXIS1']
x_pixel_deg = hdu.header['CDELT2'] # CDELT1 is negative, so take positive one
size = (x_size*x_pixel_deg*u.degree, x_size*x_pixel_deg*u.degree) # angular size of cutout, using astropy coord. approx 32768*0.6 arcseconds.
position = SkyCoord(pb.header['CRVAL1']*u.degree, pb.header['CRVAL2']*u.degree) # RA and DEC of beam PB pointing
print(' Cutting out image FOV from primary beam image...')
cutout = Cutout2D(pb.data[0,0,:,:], position=position, size=size, mode='trim', wcs=wcs.celestial, copy=True)
# Update the FITS header with the cutout WCS by hand using my own function
# don't use cutout.wcs.to_header() because it doesn't account for the freq and stokes axes. is only compatible with 2D fits images.
#pb.header.update(cutout.wcs.to_header()) #
pb = update_header_from_cutout2D(pb, cutout)
# write updated fits file to disk
pb.writeto(pbname[:-5]+'_cutout.fits', overwrite=True) # Write the cutout to a new FITS file
# regrid PB image cutout to match pixel scale of the image FOV
print(' Regridding image...')
# get header of image to match PB to
montage.mGetHdr(imagename, 'hdu_tmp.hdr')
# regrid pb image (270 pixels) to size of ref image (32k pixels)
montage.reproject(in_images=pbname[:-5]+'_cutout.fits', out_images=pbname[:-5]+'_cutout_regrid.fits', header='hdu_tmp.hdr', exact_size=True)
os.remove('hdu_tmp.hdr') # get rid of header text file saved to disk
# update montage output to float32
pb = fits.open(pbname[:-5]+'_cutout_regrid.fits', mode='update')
newdata = np.zeros((1,1,pb[0].data.shape[0], pb[0].data.shape[1]), dtype=np.float32)
newdata[0,0,:,:] = pb[0].data
pb[0].data = newdata # naxis will automatically update to 4 in the header
# fix nans introduced in primary beam by montage at edges and write to new file
print(' A small buffer of NaNs is introduced around the image by Montage when regridding to match the size, \n these have been set to the value of their nearest neighbours to maintain the same image dimensions')
mask = np.isnan(pb[0].data)
pb[0].data[mask] = np.interp(np.flatnonzero(mask), np.flatnonzero(~mask), pb[0].data[~mask])
pb.flush()
pb.close()
# apply primary beam correction
pb = fits.open(pbname[:-5]+'_cutout_regrid.fits')[0]
hdu.data = hdu.data / pb.data
hdu.writeto(imagename[:-5]+'_PBCOR.fits', overwrite=True)
print(' Primary beam correction applied to {0}'.format(imagename[:-5]+'_PBCOR.fits') )
# ------ ------ ------ ------ ------ ------ ------ ------ ------ ------
def do_image_chopping(input_image, split_into):
hdu = fits.open(input_image)[0]
wcs = WCS(hdu.header)
# currently hard coded to only accept square images
im_width = hdu.header['NAXIS1'] # get image width
print(' Input fits image dimensions: {0}'.format(im_width))
print(' Cutting into {0} images of dimensions {1}'.format(split_into**2, im_width/split_into))
# get centre positions for each new fits image. assuming x=y. divide image width by split_into*2
positions = np.array(range(1,(split_into*2),2))*(im_width/(split_into*2))
# round to integer as in pixel coordinates. this approximation shouldn't matter since we include a buffer later
positions = positions.astype(int) # keep as original
positions_x = positions # make copy to append to in loop
positions_y = positions # make copy to append to in loop
# Make a 2D array of all centre positions. length = split_into**2.
for i in range(split_into-1):
# stack x coords repeating split_into times.
positions_x = np.hstack(( positions_x, positions )) # e.g. [ x1, x2, x3, x4, x1, x2, x3, x4, repeat split_into times]
# stack y coords, but np.roll shifts array indices by 1 to get different combinations
positions_y = np.hstack(( positions_y, np.roll(positions,i+1) )) # e.g. [ (y1, y2, y3, y4), (y2, y3, y4, y1), (y3, y4, y1, y2), ... ]
# create 2D array with coordinates: [ [x1,y1], [x2,y2], [x3,y3]... ]
position_coords_inpixels = np.array([positions_x,positions_y]).T
# create buffer of 5% so images overlap. This can be small... only needs to account for image edge cutting through
size = (im_width/split_into) * 1.05 # e.g. 4000 pixel image becomes 4200. sifting to remove duplicates later
# size array needs to be same shape as position_coords_inpixels
size_inpixels = np.array([[size,size]]*(split_into**2)).astype(int)
# loop over images to be cut out
plt.figure() # plot original image and overlay cutout boundaries at the end.
data[data<1e-7]=1e-7 # min pixel brightness to display
data[data>1e-5]=1e-5 # max pixel brightness to display
plt.imshow(hdu.data[0,0,:,:], origin='lower')
colourlist=iter(cm.rainbow(np.linspace(0,1,split_into**2))) # each cutout a different colour
for i in range(split_into**2):
pri | # get cutout file names, must be in same order so they are matched correctly
images_560 = sorted(glob.glob('560*_cutout.fits'))
images_1400 = sorted(glob.glob('1400*_cutout.fits'))
# loop over image cutouts to make cube for each of them
for file560, file1400, i in zip(images_560, images_1400, range(len(images_560))):
print(' Making cube {0} of {1}'.format(i, len(images_560)-1))
hdu560 = fits.open(file560)[0]
hdu1400 = fits.open(file1400)[0]
# make cube from the input files along freq axis
cube = np.zeros((2,hdu560.data.shape[0],hdu560.data.shape[1]))
cube[0,:,:] = hdu560.data[0,0,:,:] # add 560 Mhz data
cube[1,:,:] = hdu1400.data[0,0,:,:] # add 1400 Mhz data
hdu_new = fits.PrimaryHDU(data=cube, header=hdu560.header)
# update frequency info in the header. It puts 560MHz as ch0, but incorrectly assigns the interval to the next freq channel
hdu_new.header.set('CDELT3', 840000000) # 1400 MHz - 560 MHz = 840 MHz.
hdu_new.writeto('cube_cutout_'+str(i)+'.fits')
# ------ ------ ------ ------ ------ ------ ------ ------ ------ ------
def do_sourcefinding(imagename, si=True):
# get beam info manually. SKA image seems to cause PyBDSF issues finding this info.
f = fits.open(imagename)
beam_maj = f[0].header['BMAJ']
beam_min = f[0].header['BMIN']
#beam_pa = f[0].header['BPA'] # not in SKA fits header, but we know it's circular
beam_pa = 0
f.close()
# using some sensible and thorough hyper-parameters. PSF_vary and adaptive_rms_box is more computationally intensive, but needed.
if si==True:
img = bdsf.process_image(imagename, adaptive_rms_box=False, spectralindex_do=True, advanced_opts=True,\
atrous_do=False, output_opts=True, output_all=True, opdir_overwrite='append', beam=(beam_maj, beam_min, beam_pa),\
blank_limit=None, thresh='hard', thresh_isl=4.0, thresh_pix=5.0, \
collapse_mode='average', collapse_wt='unity', frequency_sp=[560e6, 1400e6])
if si==False:
img = bdsf.process_image(imagename, adaptive_rms_box=True, advanced_opts=True,\
atrous_do=False, output_opts=True, output_all=True, opdir_overwrite='append', beam=(beam_maj, beam_min, beam_pa),\
blank_limit=None, thresh='hard', thresh_isl=4.0, thresh_pix=5.0, psf_snrtop=0.30)
# ------ ------ ------ ------ ------ ------ ------ ------ ------ ------
if __name__ == '__main__':
# Applying primary beam correction
do_primarybeam_correction('560mhz_primarybeam.fits', '560mhz1000hours.fits')
do_primarybeam_correction('1400mhz_primarybeam.fits', '1400mhz1000hours.fits')
# divide x and y axes by split_into. This gives split_into**2 output images.
# a 3 by 3 grid allows pybdsf to run efficiently (fails on the 4GB 32k x 32k pixel image) whilst avoiding cutting through the centre of the image
split_into = 3
# load image to get properties
input_image_560 = '560mhz1000hours.fits'
input_image_1400 = '1400mhz1000hours.fits'
# cut up images and save to disk
do_image_chopping(input_image_560, split_into)
do_image_chopping(input_image_1400, split_into)
# make image cube of the frequencies per cutout and save to disk, so pybdsf can use spectral index mode
# currently not working since don't need this part at the moment.
make_image_cubes()
# sourcefinding on individual frequency bands
imagenames = glob.glob('*_cutout.fits')
for image in imagenames:
do_sourcefinding(image)
# sourcefinding on cube to get spectral indcies (si=True)
# currently not working since need to chop images to same field of view before making cubes.
# use code from pipeline.py if needed?
#imagenames = sorted(glob.glob('cube_cutout_*.fits'))
#for image in imagenames:
# do_sourcefinding(image, si=True)
#
| nt(' Cutting out image {0} of {1}'.format(i+1, split_into**2))
cutout = Cutout2D(hdu.data[0,0,:,:], position=tuple(position_coords_inpixels[i], size=tuple(size_inpixels[i]), mode='trim', wcs=wcs.celestial, copy=True)
cutout.plot_on_original(color=next(colourlist))
# Update the FITS header with the cutout WCS by hand using my own function
hdu = update_header_from_cutout2D(hdu, cutout)
hdu.writeto(input_image[:-5]+'_'+str(i)+'_cutout.fits', overwrite=True) # Write the cutout to a new FITS file
print(' Saving cutout arrangement as {0}'.format(input_image+'_cutouts.png'))
plt.savefig(input_image+'_cutout_annotation.png')
# ------ ------ ------ ------ ------ ------ ------ ------ ------ ------
# make image cube for pybdsf spectral index mode, looping over all cutouts
def make_image_cubes_for_cutouts():
| conditional_block |
pipeline_cutouts.py | # Python 3.6. Written by Alex Clarke
# Breakup a large fits image into smaller ones, with overlap, and save to disk.
# Sourecfinding is run on each cutout, and catalogues are sifted to remove duplicates from the overlap.
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import multiprocessing
import itertools
import bdsf
import glob
import pickle
from matplotlib.pyplot import cm
from astropy.io import fits
from astropy.nddata import Cutout2D
from astropy.wcs import WCS
from memory_profiler import profile
# list of functions
# load/save pickle objects
# save_cutout
# do_image_chopping
# make_image_cubes
# do_sourcefinding
# ------ ------ ------ ------ ------ ------ ------ ------ ------ ------
#Loading/saving python data objects
def sav | j, name ):
with open(name + '.pkl', 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
def load_obj(name ):
with open(name + '.pkl', 'rb') as f:
return pickle.load(f)
# ------ ------ ------ ------ ------ ------ ------ ------ ------ ------
def update_header_from_cutout2D(hdu, cutout):
# update data
newdata = np.zeros((1,1,cutout.data.shape[0], cutout.data.shape[1]), dtype=np.float32)
newdata[0,0,:,:] = cutout.data
hdu.data = newdata
# update header cards returned from cutout2D wcs:
hdu.header.set('CRVAL1', cutout.wcs.wcs.crval[0])
hdu.header.set('CRVAL2', cutout.wcs.wcs.crval[1])
hdu.header.set('CRPIX1', cutout.wcs.wcs.crpix[0])
hdu.header.set('CRPIX2', cutout.wcs.wcs.crpix[1])
hdu.header.set('CDELT1', cutout.wcs.wcs.cdelt[0])
hdu.header.set('CDELT2', cutout.wcs.wcs.cdelt[1])
hdu.header.set('NAXIS1', cutout.wcs.pixel_shape[0])
hdu.header.set('NAXIS2', cutout.wcs.pixel_shape[1])
return hdu
# ------ ------ ------ ------ ------ ------ ------ ------ ------ ------
def do_primarybeam_correction(pbname, imagename):
print(' Preparing to apply the primary beam correction to {0}'.format(imagename))
hdu = fits.open(imagename)[0]
pb = fits.open(pbname)[0]
wcs = WCS(pb.header)
# cutout pb field of view to match image field of view
x_size = hdu.header['NAXIS1']
x_pixel_deg = hdu.header['CDELT2'] # CDELT1 is negative, so take positive one
size = (x_size*x_pixel_deg*u.degree, x_size*x_pixel_deg*u.degree) # angular size of cutout, using astropy coord. approx 32768*0.6 arcseconds.
position = SkyCoord(pb.header['CRVAL1']*u.degree, pb.header['CRVAL2']*u.degree) # RA and DEC of beam PB pointing
print(' Cutting out image FOV from primary beam image...')
cutout = Cutout2D(pb.data[0,0,:,:], position=position, size=size, mode='trim', wcs=wcs.celestial, copy=True)
# Update the FITS header with the cutout WCS by hand using my own function
# don't use cutout.wcs.to_header() because it doesn't account for the freq and stokes axes. is only compatible with 2D fits images.
#pb.header.update(cutout.wcs.to_header()) #
pb = update_header_from_cutout2D(pb, cutout)
# write updated fits file to disk
pb.writeto(pbname[:-5]+'_cutout.fits', overwrite=True) # Write the cutout to a new FITS file
# regrid PB image cutout to match pixel scale of the image FOV
print(' Regridding image...')
# get header of image to match PB to
montage.mGetHdr(imagename, 'hdu_tmp.hdr')
# regrid pb image (270 pixels) to size of ref image (32k pixels)
montage.reproject(in_images=pbname[:-5]+'_cutout.fits', out_images=pbname[:-5]+'_cutout_regrid.fits', header='hdu_tmp.hdr', exact_size=True)
os.remove('hdu_tmp.hdr') # get rid of header text file saved to disk
# update montage output to float32
pb = fits.open(pbname[:-5]+'_cutout_regrid.fits', mode='update')
newdata = np.zeros((1,1,pb[0].data.shape[0], pb[0].data.shape[1]), dtype=np.float32)
newdata[0,0,:,:] = pb[0].data
pb[0].data = newdata # naxis will automatically update to 4 in the header
# fix nans introduced in primary beam by montage at edges and write to new file
print(' A small buffer of NaNs is introduced around the image by Montage when regridding to match the size, \n these have been set to the value of their nearest neighbours to maintain the same image dimensions')
mask = np.isnan(pb[0].data)
pb[0].data[mask] = np.interp(np.flatnonzero(mask), np.flatnonzero(~mask), pb[0].data[~mask])
pb.flush()
pb.close()
# apply primary beam correction
pb = fits.open(pbname[:-5]+'_cutout_regrid.fits')[0]
hdu.data = hdu.data / pb.data
hdu.writeto(imagename[:-5]+'_PBCOR.fits', overwrite=True)
print(' Primary beam correction applied to {0}'.format(imagename[:-5]+'_PBCOR.fits') )
# ------ ------ ------ ------ ------ ------ ------ ------ ------ ------
def do_image_chopping(input_image, split_into):
hdu = fits.open(input_image)[0]
wcs = WCS(hdu.header)
# currently hard coded to only accept square images
im_width = hdu.header['NAXIS1'] # get image width
print(' Input fits image dimensions: {0}'.format(im_width))
print(' Cutting into {0} images of dimensions {1}'.format(split_into**2, im_width/split_into))
# get centre positions for each new fits image. assuming x=y. divide image width by split_into*2
positions = np.array(range(1,(split_into*2),2))*(im_width/(split_into*2))
# round to integer as in pixel coordinates. this approximation shouldn't matter since we include a buffer later
positions = positions.astype(int) # keep as original
positions_x = positions # make copy to append to in loop
positions_y = positions # make copy to append to in loop
# Make a 2D array of all centre positions. length = split_into**2.
for i in range(split_into-1):
# stack x coords repeating split_into times.
positions_x = np.hstack(( positions_x, positions )) # e.g. [ x1, x2, x3, x4, x1, x2, x3, x4, repeat split_into times]
# stack y coords, but np.roll shifts array indices by 1 to get different combinations
positions_y = np.hstack(( positions_y, np.roll(positions,i+1) )) # e.g. [ (y1, y2, y3, y4), (y2, y3, y4, y1), (y3, y4, y1, y2), ... ]
# create 2D array with coordinates: [ [x1,y1], [x2,y2], [x3,y3]... ]
position_coords_inpixels = np.array([positions_x,positions_y]).T
# create buffer of 5% so images overlap. This can be small... only needs to account for image edge cutting through
size = (im_width/split_into) * 1.05 # e.g. 4000 pixel image becomes 4200. sifting to remove duplicates later
# size array needs to be same shape as position_coords_inpixels
size_inpixels = np.array([[size,size]]*(split_into**2)).astype(int)
# loop over images to be cut out
plt.figure() # plot original image and overlay cutout boundaries at the end.
data[data<1e-7]=1e-7 # min pixel brightness to display
data[data>1e-5]=1e-5 # max pixel brightness to display
plt.imshow(hdu.data[0,0,:,:], origin='lower')
colourlist=iter(cm.rainbow(np.linspace(0,1,split_into**2))) # each cutout a different colour
for i in range(split_into**2):
print(' Cutting out image {0} of {1}'.format(i+1, split_into**2))
cutout = Cutout2D(hdu.data[0,0,:,:], position=tuple(position_coords_inpixels[i], size=tuple(size_inpixels[i]), mode='trim', wcs=wcs.celestial, copy=True)
cutout.plot_on_original(color=next(colourlist))
# Update the FITS header with the cutout WCS by hand using my own function
hdu = update_header_from_cutout2D(hdu, cutout)
hdu.writeto(input_image[:-5]+'_'+str(i)+'_cutout.fits', overwrite=True) # Write the cutout to a new FITS file
print(' Saving cutout arrangement as {0}'.format(input_image+'_cutouts.png'))
plt.savefig(input_image+'_cutout_annotation.png')
# ------ ------ ------ ------ ------ ------ ------ ------ ------ ------
# make image cube for pybdsf spectral index mode, looping over all cutouts
def make_image_cubes_for_cutouts():
# get cutout file names, must be in same order so they are matched correctly
images_560 = sorted(glob.glob('560*_cutout.fits'))
images_1400 = sorted(glob.glob('1400*_cutout.fits'))
# loop over image cutouts to make cube for each of them
for file560, file1400, i in zip(images_560, images_1400, range(len(images_560))):
print(' Making cube {0} of {1}'.format(i, len(images_560)-1))
hdu560 = fits.open(file560)[0]
hdu1400 = fits.open(file1400)[0]
# make cube from the input files along freq axis
cube = np.zeros((2,hdu560.data.shape[0],hdu560.data.shape[1]))
cube[0,:,:] = hdu560.data[0,0,:,:] # add 560 Mhz data
cube[1,:,:] = hdu1400.data[0,0,:,:] # add 1400 Mhz data
hdu_new = fits.PrimaryHDU(data=cube, header=hdu560.header)
# update frequency info in the header. It puts 560MHz as ch0, but incorrectly assigns the interval to the next freq channel
hdu_new.header.set('CDELT3', 840000000) # 1400 MHz - 560 MHz = 840 MHz.
hdu_new.writeto('cube_cutout_'+str(i)+'.fits')
# ------ ------ ------ ------ ------ ------ ------ ------ ------ ------
def do_sourcefinding(imagename, si=True):
# get beam info manually. SKA image seems to cause PyBDSF issues finding this info.
f = fits.open(imagename)
beam_maj = f[0].header['BMAJ']
beam_min = f[0].header['BMIN']
#beam_pa = f[0].header['BPA'] # not in SKA fits header, but we know it's circular
beam_pa = 0
f.close()
# using some sensible and thorough hyper-parameters. PSF_vary and adaptive_rms_box is more computationally intensive, but needed.
if si==True:
img = bdsf.process_image(imagename, adaptive_rms_box=False, spectralindex_do=True, advanced_opts=True,\
atrous_do=False, output_opts=True, output_all=True, opdir_overwrite='append', beam=(beam_maj, beam_min, beam_pa),\
blank_limit=None, thresh='hard', thresh_isl=4.0, thresh_pix=5.0, \
collapse_mode='average', collapse_wt='unity', frequency_sp=[560e6, 1400e6])
if si==False:
img = bdsf.process_image(imagename, adaptive_rms_box=True, advanced_opts=True,\
atrous_do=False, output_opts=True, output_all=True, opdir_overwrite='append', beam=(beam_maj, beam_min, beam_pa),\
blank_limit=None, thresh='hard', thresh_isl=4.0, thresh_pix=5.0, psf_snrtop=0.30)
# ------ ------ ------ ------ ------ ------ ------ ------ ------ ------
if __name__ == '__main__':
# Applying primary beam correction
do_primarybeam_correction('560mhz_primarybeam.fits', '560mhz1000hours.fits')
do_primarybeam_correction('1400mhz_primarybeam.fits', '1400mhz1000hours.fits')
# divide x and y axes by split_into. This gives split_into**2 output images.
# a 3 by 3 grid allows pybdsf to run efficiently (fails on the 4GB 32k x 32k pixel image) whilst avoiding cutting through the centre of the image
split_into = 3
# load image to get properties
input_image_560 = '560mhz1000hours.fits'
input_image_1400 = '1400mhz1000hours.fits'
# cut up images and save to disk
do_image_chopping(input_image_560, split_into)
do_image_chopping(input_image_1400, split_into)
# make image cube of the frequencies per cutout and save to disk, so pybdsf can use spectral index mode
# currently not working since don't need this part at the moment.
make_image_cubes()
# sourcefinding on individual frequency bands
imagenames = glob.glob('*_cutout.fits')
for image in imagenames:
do_sourcefinding(image)
# sourcefinding on cube to get spectral indcies (si=True)
# currently not working since need to chop images to same field of view before making cubes.
# use code from pipeline.py if needed?
#imagenames = sorted(glob.glob('cube_cutout_*.fits'))
#for image in imagenames:
# do_sourcefinding(image, si=True)
#
| e_obj(ob | identifier_name |
pipeline_cutouts.py | # Python 3.6. Written by Alex Clarke
# Breakup a large fits image into smaller ones, with overlap, and save to disk.
# Sourecfinding is run on each cutout, and catalogues are sifted to remove duplicates from the overlap.
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import multiprocessing
import itertools
import bdsf
import glob
import pickle
from matplotlib.pyplot import cm
from astropy.io import fits
from astropy.nddata import Cutout2D
from astropy.wcs import WCS
from memory_profiler import profile
# list of functions
# load/save pickle objects
# save_cutout
# do_image_chopping
# make_image_cubes
# do_sourcefinding
# ------ ------ ------ ------ ------ ------ ------ ------ ------ ------
#Loading/saving python data objects
def save_obj(obj, name ):
with open(name + '.pkl', 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
def load_obj(name ):
with open(name + '.pkl', 'rb') as f:
return pickle.load(f)
# ------ ------ ------ ------ ------ ------ ------ ------ ------ ------
def update_header_from_cutout2D(hdu, cutout):
# update data
newdata = np.zeros((1,1,cutout.data.shape[0], cutout.data.shape[1]), dtype=np.float32)
newdata[0,0,:,:] = cutout.data
hdu.data = newdata
# update header cards returned from cutout2D wcs:
hdu.header.set('CRVAL1', cutout.wcs.wcs.crval[0])
hdu.header.set('CRVAL2', cutout.wcs.wcs.crval[1])
hdu.header.set('CRPIX1', cutout.wcs.wcs.crpix[0])
hdu.header.set('CRPIX2', cutout.wcs.wcs.crpix[1])
hdu.header.set('CDELT1', cutout.wcs.wcs.cdelt[0])
hdu.header.set('CDELT2', cutout.wcs.wcs.cdelt[1])
hdu.header.set('NAXIS1', cutout.wcs.pixel_shape[0])
hdu.header.set('NAXIS2', cutout.wcs.pixel_shape[1])
return hdu
# ------ ------ ------ ------ ------ ------ ------ ------ ------ ------
def do_primarybeam_correction(pbname, imagename):
print(' Preparing to apply the primary beam correction to {0}'.format(imagename))
hdu = fits.open(imagename)[0]
pb = fits.open(pbname)[0]
wcs = WCS(pb.header)
# cutout pb field of view to match image field of view
x_size = hdu.header['NAXIS1']
x_pixel_deg = hdu.header['CDELT2'] # CDELT1 is negative, so take positive one
size = (x_size*x_pixel_deg*u.degree, x_size*x_pixel_deg*u.degree) # angular size of cutout, using astropy coord. approx 32768*0.6 arcseconds.
position = SkyCoord(pb.header['CRVAL1']*u.degree, pb.header['CRVAL2']*u.degree) # RA and DEC of beam PB pointing
print(' Cutting out image FOV from primary beam image...')
cutout = Cutout2D(pb.data[0,0,:,:], position=position, size=size, mode='trim', wcs=wcs.celestial, copy=True)
# Update the FITS header with the cutout WCS by hand using my own function
# don't use cutout.wcs.to_header() because it doesn't account for the freq and stokes axes. is only compatible with 2D fits images.
#pb.header.update(cutout.wcs.to_header()) #
pb = update_header_from_cutout2D(pb, cutout)
# write updated fits file to disk
pb.writeto(pbname[:-5]+'_cutout.fits', overwrite=True) # Write the cutout to a new FITS file
# regrid PB image cutout to match pixel scale of the image FOV
print(' Regridding image...')
# get header of image to match PB to
montage.mGetHdr(imagename, 'hdu_tmp.hdr')
# regrid pb image (270 pixels) to size of ref image (32k pixels)
montage.reproject(in_images=pbname[:-5]+'_cutout.fits', out_images=pbname[:-5]+'_cutout_regrid.fits', header='hdu_tmp.hdr', exact_size=True)
os.remove('hdu_tmp.hdr') # get rid of header text file saved to disk
# update montage output to float32
pb = fits.open(pbname[:-5]+'_cutout_regrid.fits', mode='update')
newdata = np.zeros((1,1,pb[0].data.shape[0], pb[0].data.shape[1]), dtype=np.float32)
newdata[0,0,:,:] = pb[0].data
pb[0].data = newdata # naxis will automatically update to 4 in the header
# fix nans introduced in primary beam by montage at edges and write to new file
print(' A small buffer of NaNs is introduced around the image by Montage when regridding to match the size, \n these have been set to the value of their nearest neighbours to maintain the same image dimensions')
mask = np.isnan(pb[0].data)
pb[0].data[mask] = np.interp(np.flatnonzero(mask), np.flatnonzero(~mask), pb[0].data[~mask])
pb.flush()
pb.close()
# apply primary beam correction
pb = fits.open(pbname[:-5]+'_cutout_regrid.fits')[0]
hdu.data = hdu.data / pb.data
hdu.writeto(imagename[:-5]+'_PBCOR.fits', overwrite=True)
print(' Primary beam correction applied to {0}'.format(imagename[:-5]+'_PBCOR.fits') )
# ------ ------ ------ ------ ------ ------ ------ ------ ------ ------
def do_image_chopping(input_image, split_into):
hdu |
def do_sourcefinding(imagename, si=True):
# get beam info manually. SKA image seems to cause PyBDSF issues finding this info.
f = fits.open(imagename)
beam_maj = f[0].header['BMAJ']
beam_min = f[0].header['BMIN']
#beam_pa = f[0].header['BPA'] # not in SKA fits header, but we know it's circular
beam_pa = 0
f.close()
# using some sensible and thorough hyper-parameters. PSF_vary and adaptive_rms_box is more computationally intensive, but needed.
if si==True:
img = bdsf.process_image(imagename, adaptive_rms_box=False, spectralindex_do=True, advanced_opts=True,\
atrous_do=False, output_opts=True, output_all=True, opdir_overwrite='append', beam=(beam_maj, beam_min, beam_pa),\
blank_limit=None, thresh='hard', thresh_isl=4.0, thresh_pix=5.0, \
collapse_mode='average', collapse_wt='unity', frequency_sp=[560e6, 1400e6])
if si==False:
img = bdsf.process_image(imagename, adaptive_rms_box=True, advanced_opts=True,\
atrous_do=False, output_opts=True, output_all=True, opdir_overwrite='append', beam=(beam_maj, beam_min, beam_pa),\
blank_limit=None, thresh='hard', thresh_isl=4.0, thresh_pix=5.0, psf_snrtop=0.30)
# ------ ------ ------ ------ ------ ------ ------ ------ ------ ------
if __name__ == '__main__':
# Applying primary beam correction
do_primarybeam_correction('560mhz_primarybeam.fits', '560mhz1000hours.fits')
do_primarybeam_correction('1400mhz_primarybeam.fits', '1400mhz1000hours.fits')
# divide x and y axes by split_into. This gives split_into**2 output images.
# a 3 by 3 grid allows pybdsf to run efficiently (fails on the 4GB 32k x 32k pixel image) whilst avoiding cutting through the centre of the image
split_into = 3
# load image to get properties
input_image_560 = '560mhz1000hours.fits'
input_image_1400 = '1400mhz1000hours.fits'
# cut up images and save to disk
do_image_chopping(input_image_560, split_into)
do_image_chopping(input_image_1400, split_into)
# make image cube of the frequencies per cutout and save to disk, so pybdsf can use spectral index mode
# currently not working since don't need this part at the moment.
make_image_cubes()
# sourcefinding on individual frequency bands
imagenames = glob.glob('*_cutout.fits')
for image in imagenames:
do_sourcefinding(image)
# sourcefinding on cube to get spectral indcies (si=True)
# currently not working since need to chop images to same field of view before making cubes.
# use code from pipeline.py if needed?
#imagenames = sorted(glob.glob('cube_cutout_*.fits'))
#for image in imagenames:
# do_sourcefinding(image, si=True)
#
| = fits.open(input_image)[0]
wcs = WCS(hdu.header)
# currently hard coded to only accept square images
im_width = hdu.header['NAXIS1'] # get image width
print(' Input fits image dimensions: {0}'.format(im_width))
print(' Cutting into {0} images of dimensions {1}'.format(split_into**2, im_width/split_into))
# get centre positions for each new fits image. assuming x=y. divide image width by split_into*2
positions = np.array(range(1,(split_into*2),2))*(im_width/(split_into*2))
# round to integer as in pixel coordinates. this approximation shouldn't matter since we include a buffer later
positions = positions.astype(int) # keep as original
positions_x = positions # make copy to append to in loop
positions_y = positions # make copy to append to in loop
# Make a 2D array of all centre positions. length = split_into**2.
for i in range(split_into-1):
# stack x coords repeating split_into times.
positions_x = np.hstack(( positions_x, positions )) # e.g. [ x1, x2, x3, x4, x1, x2, x3, x4, repeat split_into times]
# stack y coords, but np.roll shifts array indices by 1 to get different combinations
positions_y = np.hstack(( positions_y, np.roll(positions,i+1) )) # e.g. [ (y1, y2, y3, y4), (y2, y3, y4, y1), (y3, y4, y1, y2), ... ]
# create 2D array with coordinates: [ [x1,y1], [x2,y2], [x3,y3]... ]
position_coords_inpixels = np.array([positions_x,positions_y]).T
# create buffer of 5% so images overlap. This can be small... only needs to account for image edge cutting through
size = (im_width/split_into) * 1.05 # e.g. 4000 pixel image becomes 4200. sifting to remove duplicates later
# size array needs to be same shape as position_coords_inpixels
size_inpixels = np.array([[size,size]]*(split_into**2)).astype(int)
# loop over images to be cut out
plt.figure() # plot original image and overlay cutout boundaries at the end.
data[data<1e-7]=1e-7 # min pixel brightness to display
data[data>1e-5]=1e-5 # max pixel brightness to display
plt.imshow(hdu.data[0,0,:,:], origin='lower')
colourlist=iter(cm.rainbow(np.linspace(0,1,split_into**2))) # each cutout a different colour
for i in range(split_into**2):
print(' Cutting out image {0} of {1}'.format(i+1, split_into**2))
cutout = Cutout2D(hdu.data[0,0,:,:], position=tuple(position_coords_inpixels[i], size=tuple(size_inpixels[i]), mode='trim', wcs=wcs.celestial, copy=True)
cutout.plot_on_original(color=next(colourlist))
# Update the FITS header with the cutout WCS by hand using my own function
hdu = update_header_from_cutout2D(hdu, cutout)
hdu.writeto(input_image[:-5]+'_'+str(i)+'_cutout.fits', overwrite=True) # Write the cutout to a new FITS file
print(' Saving cutout arrangement as {0}'.format(input_image+'_cutouts.png'))
plt.savefig(input_image+'_cutout_annotation.png')
# ------ ------ ------ ------ ------ ------ ------ ------ ------ ------
# make image cube for pybdsf spectral index mode, looping over all cutouts
def make_image_cubes_for_cutouts():
# get cutout file names, must be in same order so they are matched correctly
images_560 = sorted(glob.glob('560*_cutout.fits'))
images_1400 = sorted(glob.glob('1400*_cutout.fits'))
# loop over image cutouts to make cube for each of them
for file560, file1400, i in zip(images_560, images_1400, range(len(images_560))):
print(' Making cube {0} of {1}'.format(i, len(images_560)-1))
hdu560 = fits.open(file560)[0]
hdu1400 = fits.open(file1400)[0]
# make cube from the input files along freq axis
cube = np.zeros((2,hdu560.data.shape[0],hdu560.data.shape[1]))
cube[0,:,:] = hdu560.data[0,0,:,:] # add 560 Mhz data
cube[1,:,:] = hdu1400.data[0,0,:,:] # add 1400 Mhz data
hdu_new = fits.PrimaryHDU(data=cube, header=hdu560.header)
# update frequency info in the header. It puts 560MHz as ch0, but incorrectly assigns the interval to the next freq channel
hdu_new.header.set('CDELT3', 840000000) # 1400 MHz - 560 MHz = 840 MHz.
hdu_new.writeto('cube_cutout_'+str(i)+'.fits')
# ------ ------ ------ ------ ------ ------ ------ ------ ------ ------
| identifier_body |
pipeline_cutouts.py | # Python 3.6. Written by Alex Clarke
# Breakup a large fits image into smaller ones, with overlap, and save to disk.
# Sourecfinding is run on each cutout, and catalogues are sifted to remove duplicates from the overlap.
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import multiprocessing
import itertools
import bdsf
import glob
import pickle
from matplotlib.pyplot import cm
from astropy.io import fits
from astropy.nddata import Cutout2D
from astropy.wcs import WCS
from memory_profiler import profile
# list of functions
# load/save pickle objects
# save_cutout
# do_image_chopping
# make_image_cubes
# do_sourcefinding
# ------ ------ ------ ------ ------ ------ ------ ------ ------ ------
#Loading/saving python data objects
def save_obj(obj, name ):
with open(name + '.pkl', 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
def load_obj(name ):
with open(name + '.pkl', 'rb') as f:
return pickle.load(f)
# ------ ------ ------ ------ ------ ------ ------ ------ ------ ------
def update_header_from_cutout2D(hdu, cutout):
# update data
newdata = np.zeros((1,1,cutout.data.shape[0], cutout.data.shape[1]), dtype=np.float32)
newdata[0,0,:,:] = cutout.data
hdu.data = newdata
# update header cards returned from cutout2D wcs:
hdu.header.set('CRVAL1', cutout.wcs.wcs.crval[0])
hdu.header.set('CRVAL2', cutout.wcs.wcs.crval[1])
hdu.header.set('CRPIX1', cutout.wcs.wcs.crpix[0])
hdu.header.set('CRPIX2', cutout.wcs.wcs.crpix[1])
hdu.header.set('CDELT1', cutout.wcs.wcs.cdelt[0])
hdu.header.set('CDELT2', cutout.wcs.wcs.cdelt[1])
hdu.header.set('NAXIS1', cutout.wcs.pixel_shape[0])
hdu.header.set('NAXIS2', cutout.wcs.pixel_shape[1])
return hdu
# ------ ------ ------ ------ ------ ------ ------ ------ ------ ------
def do_primarybeam_correction(pbname, imagename):
print(' Preparing to apply the primary beam correction to {0}'.format(imagename))
hdu = fits.open(imagename)[0]
pb = fits.open(pbname)[0]
wcs = WCS(pb.header)
# cutout pb field of view to match image field of view
x_size = hdu.header['NAXIS1']
x_pixel_deg = hdu.header['CDELT2'] # CDELT1 is negative, so take positive one
size = (x_size*x_pixel_deg*u.degree, x_size*x_pixel_deg*u.degree) # angular size of cutout, using astropy coord. approx 32768*0.6 arcseconds.
position = SkyCoord(pb.header['CRVAL1']*u.degree, pb.header['CRVAL2']*u.degree) # RA and DEC of beam PB pointing
print(' Cutting out image FOV from primary beam image...')
cutout = Cutout2D(pb.data[0,0,:,:], position=position, size=size, mode='trim', wcs=wcs.celestial, copy=True)
# Update the FITS header with the cutout WCS by hand using my own function
# don't use cutout.wcs.to_header() because it doesn't account for the freq and stokes axes. is only compatible with 2D fits images.
#pb.header.update(cutout.wcs.to_header()) #
pb = update_header_from_cutout2D(pb, cutout)
# write updated fits file to disk
pb.writeto(pbname[:-5]+'_cutout.fits', overwrite=True) # Write the cutout to a new FITS file
# regrid PB image cutout to match pixel scale of the image FOV
print(' Regridding image...')
# get header of image to match PB to
montage.mGetHdr(imagename, 'hdu_tmp.hdr')
# regrid pb image (270 pixels) to size of ref image (32k pixels)
montage.reproject(in_images=pbname[:-5]+'_cutout.fits', out_images=pbname[:-5]+'_cutout_regrid.fits', header='hdu_tmp.hdr', exact_size=True)
os.remove('hdu_tmp.hdr') # get rid of header text file saved to disk
# update montage output to float32
pb = fits.open(pbname[:-5]+'_cutout_regrid.fits', mode='update')
newdata = np.zeros((1,1,pb[0].data.shape[0], pb[0].data.shape[1]), dtype=np.float32)
newdata[0,0,:,:] = pb[0].data
pb[0].data = newdata # naxis will automatically update to 4 in the header
# fix nans introduced in primary beam by montage at edges and write to new file
print(' A small buffer of NaNs is introduced around the image by Montage when regridding to match the size, \n these have been set to the value of their nearest neighbours to maintain the same image dimensions')
mask = np.isnan(pb[0].data)
pb[0].data[mask] = np.interp(np.flatnonzero(mask), np.flatnonzero(~mask), pb[0].data[~mask])
pb.flush()
pb.close()
# apply primary beam correction
pb = fits.open(pbname[:-5]+'_cutout_regrid.fits')[0]
hdu.data = hdu.data / pb.data
hdu.writeto(imagename[:-5]+'_PBCOR.fits', overwrite=True)
print(' Primary beam correction applied to {0}'.format(imagename[:-5]+'_PBCOR.fits') ) |
def do_image_chopping(input_image, split_into):
hdu = fits.open(input_image)[0]
wcs = WCS(hdu.header)
# currently hard coded to only accept square images
im_width = hdu.header['NAXIS1'] # get image width
print(' Input fits image dimensions: {0}'.format(im_width))
print(' Cutting into {0} images of dimensions {1}'.format(split_into**2, im_width/split_into))
# get centre positions for each new fits image. assuming x=y. divide image width by split_into*2
positions = np.array(range(1,(split_into*2),2))*(im_width/(split_into*2))
# round to integer as in pixel coordinates. this approximation shouldn't matter since we include a buffer later
positions = positions.astype(int) # keep as original
positions_x = positions # make copy to append to in loop
positions_y = positions # make copy to append to in loop
# Make a 2D array of all centre positions. length = split_into**2.
for i in range(split_into-1):
# stack x coords repeating split_into times.
positions_x = np.hstack(( positions_x, positions )) # e.g. [ x1, x2, x3, x4, x1, x2, x3, x4, repeat split_into times]
# stack y coords, but np.roll shifts array indices by 1 to get different combinations
positions_y = np.hstack(( positions_y, np.roll(positions,i+1) )) # e.g. [ (y1, y2, y3, y4), (y2, y3, y4, y1), (y3, y4, y1, y2), ... ]
# create 2D array with coordinates: [ [x1,y1], [x2,y2], [x3,y3]... ]
position_coords_inpixels = np.array([positions_x,positions_y]).T
# create buffer of 5% so images overlap. This can be small... only needs to account for image edge cutting through
size = (im_width/split_into) * 1.05 # e.g. 4000 pixel image becomes 4200. sifting to remove duplicates later
# size array needs to be same shape as position_coords_inpixels
size_inpixels = np.array([[size,size]]*(split_into**2)).astype(int)
# loop over images to be cut out
plt.figure() # plot original image and overlay cutout boundaries at the end.
data[data<1e-7]=1e-7 # min pixel brightness to display
data[data>1e-5]=1e-5 # max pixel brightness to display
plt.imshow(hdu.data[0,0,:,:], origin='lower')
colourlist=iter(cm.rainbow(np.linspace(0,1,split_into**2))) # each cutout a different colour
for i in range(split_into**2):
print(' Cutting out image {0} of {1}'.format(i+1, split_into**2))
cutout = Cutout2D(hdu.data[0,0,:,:], position=tuple(position_coords_inpixels[i], size=tuple(size_inpixels[i]), mode='trim', wcs=wcs.celestial, copy=True)
cutout.plot_on_original(color=next(colourlist))
# Update the FITS header with the cutout WCS by hand using my own function
hdu = update_header_from_cutout2D(hdu, cutout)
hdu.writeto(input_image[:-5]+'_'+str(i)+'_cutout.fits', overwrite=True) # Write the cutout to a new FITS file
print(' Saving cutout arrangement as {0}'.format(input_image+'_cutouts.png'))
plt.savefig(input_image+'_cutout_annotation.png')
# ------ ------ ------ ------ ------ ------ ------ ------ ------ ------
# make image cube for pybdsf spectral index mode, looping over all cutouts
def make_image_cubes_for_cutouts():
# get cutout file names, must be in same order so they are matched correctly
images_560 = sorted(glob.glob('560*_cutout.fits'))
images_1400 = sorted(glob.glob('1400*_cutout.fits'))
# loop over image cutouts to make cube for each of them
for file560, file1400, i in zip(images_560, images_1400, range(len(images_560))):
print(' Making cube {0} of {1}'.format(i, len(images_560)-1))
hdu560 = fits.open(file560)[0]
hdu1400 = fits.open(file1400)[0]
# make cube from the input files along freq axis
cube = np.zeros((2,hdu560.data.shape[0],hdu560.data.shape[1]))
cube[0,:,:] = hdu560.data[0,0,:,:] # add 560 Mhz data
cube[1,:,:] = hdu1400.data[0,0,:,:] # add 1400 Mhz data
hdu_new = fits.PrimaryHDU(data=cube, header=hdu560.header)
# update frequency info in the header. It puts 560MHz as ch0, but incorrectly assigns the interval to the next freq channel
hdu_new.header.set('CDELT3', 840000000) # 1400 MHz - 560 MHz = 840 MHz.
hdu_new.writeto('cube_cutout_'+str(i)+'.fits')
# ------ ------ ------ ------ ------ ------ ------ ------ ------ ------
def do_sourcefinding(imagename, si=True):
# get beam info manually. SKA image seems to cause PyBDSF issues finding this info.
f = fits.open(imagename)
beam_maj = f[0].header['BMAJ']
beam_min = f[0].header['BMIN']
#beam_pa = f[0].header['BPA'] # not in SKA fits header, but we know it's circular
beam_pa = 0
f.close()
# using some sensible and thorough hyper-parameters. PSF_vary and adaptive_rms_box is more computationally intensive, but needed.
if si==True:
img = bdsf.process_image(imagename, adaptive_rms_box=False, spectralindex_do=True, advanced_opts=True,\
atrous_do=False, output_opts=True, output_all=True, opdir_overwrite='append', beam=(beam_maj, beam_min, beam_pa),\
blank_limit=None, thresh='hard', thresh_isl=4.0, thresh_pix=5.0, \
collapse_mode='average', collapse_wt='unity', frequency_sp=[560e6, 1400e6])
if si==False:
img = bdsf.process_image(imagename, adaptive_rms_box=True, advanced_opts=True,\
atrous_do=False, output_opts=True, output_all=True, opdir_overwrite='append', beam=(beam_maj, beam_min, beam_pa),\
blank_limit=None, thresh='hard', thresh_isl=4.0, thresh_pix=5.0, psf_snrtop=0.30)
# ------ ------ ------ ------ ------ ------ ------ ------ ------ ------
if __name__ == '__main__':
# Applying primary beam correction
do_primarybeam_correction('560mhz_primarybeam.fits', '560mhz1000hours.fits')
do_primarybeam_correction('1400mhz_primarybeam.fits', '1400mhz1000hours.fits')
# divide x and y axes by split_into. This gives split_into**2 output images.
# a 3 by 3 grid allows pybdsf to run efficiently (fails on the 4GB 32k x 32k pixel image) whilst avoiding cutting through the centre of the image
split_into = 3
# load image to get properties
input_image_560 = '560mhz1000hours.fits'
input_image_1400 = '1400mhz1000hours.fits'
# cut up images and save to disk
do_image_chopping(input_image_560, split_into)
do_image_chopping(input_image_1400, split_into)
# make image cube of the frequencies per cutout and save to disk, so pybdsf can use spectral index mode
# currently not working since don't need this part at the moment.
make_image_cubes()
# sourcefinding on individual frequency bands
imagenames = glob.glob('*_cutout.fits')
for image in imagenames:
do_sourcefinding(image)
# sourcefinding on cube to get spectral indcies (si=True)
# currently not working since need to chop images to same field of view before making cubes.
# use code from pipeline.py if needed?
#imagenames = sorted(glob.glob('cube_cutout_*.fits'))
#for image in imagenames:
# do_sourcefinding(image, si=True)
# |
# ------ ------ ------ ------ ------ ------ ------ ------ ------ ------ | random_line_split |
web.go | package rreader
import (
"fmt"
//"github.com/stretchrcom/goweb/goweb"
"encoding/json"
"github.com/ziutek/mymysql/mysql"
"net/http"
"strconv"
"strings"
"time"
)
type HomeView struct {
Feeds []FeedViewItem
}
type FeedViewItem struct {
Id int
Title string
Link string
Description string
LastUpdate int64
Group string
Unread int
Active int
}
type ChannelView struct {
Items []ChannelViewItem
}
type ChannelViewItem struct {
Id int
Title string
Published string
Updated int64
Link string
Author string
FeedTitle string
FeedId int
IsRead int
Labels string
}
type ErrorResponse struct {
ErrStr string
}
func respondError( w http.ResponseWriter, errStr string ){
w.WriteHeader( 500 ) //Internal server error
fmt.Fprint(w, errStr )
}
func serveHome(w http.ResponseWriter, r *http.Request) {
if r.FormValue("update") != "" {
UpdateFeeds(60)
}
conn := GetConnection().Clone()
if err := conn.Connect(); err != nil {
respondError( w, err.Error() )
return
}
defer conn.Close()
var userId uint32 = 1
rows, _, err := conn.Query("SELECT `id`,`title`,`link`,`description`,`last_update`,`user_id`,`group`,`unread`,`active` FROM home_view WHERE user_id=%d", userId)
if err != nil {
respondError( w, err.Error() )
return
}
feeds := make([]FeedViewItem, len(rows))
for id, row := range rows {
feeds[id] = FeedViewItem{row.Int(0), row.Str(1), row.Str(2), row.Str(3), row.Int64(4), row.Str(6), row.Int(7), row.Int(8)}
}
b, err := json.Marshal(HomeView{feeds})
if err != nil {
respondError( w, err.Error() )
return
}
fmt.Fprint(w, string(b))
//c.Format = goweb.JSON_FORMAT
//c.RespondWithData(HomeView{feeds})
}
func getFeedsQueryFromForm(r *http.Request) (string, error) {
group := r.FormValue("group")
feed := r.FormValue("feed")
starred := r.FormValue("starred")
if group != "" {
return fmt.Sprintf("`group`='%s'", gConn.Escape(group)), nil
} else if feed != "" {
feedId, err := strconv.ParseInt(feed, 10, 64)
if err != nil {
return "", err
}
return fmt.Sprintf("`feedid`=%d", feedId), nil
} else if starred != "" {
return fmt.Sprintf("`label`='star'"), nil
}
return "", nil
}
func serveFeedItems(w http.ResponseWriter, r *http.Request) {
var userId uint32 = 1
searchQuery := fmt.Sprintf("userid=%d", userId)
start, err := strconv.ParseInt(r.FormValue("start"), 10, 64)
if err != nil {
start = 0
}
extraSearch, err := getFeedsQueryFromForm(r)
if err != nil {
respondError( w, err.Error() )
return
}
if extraSearch != "" {
searchQuery = fmt.Sprintf("%s AND %s", searchQuery, extraSearch )
}
conn := GetConnection().Clone()
if err := conn.Connect(); err != nil {
respondError( w, err.Error() )
return
}
defer conn.Close()
rows, _, err := conn.Query("SELECT `id`,`title`,`published`,`updated`,`link`,`author`,`feedtitle`,`feedid`,`is_read`,`label` FROM `entrylist` WHERE %s ORDER BY `updated` DESC LIMIT %d,100", searchQuery, start)
if err != nil {
respondError( w, err.Error() )
return
}
feeds := make([]ChannelViewItem, len(rows))
for id, row := range rows {
feeds[id] = ChannelViewItem{row.Int(0), row.Str(1), row.Str(2), row.Int64(3), row.Str(4), row.Str(5), row.Str(6), row.Int(7), row.Int(8), row.Str(9) }
}
b, err := json.Marshal(ChannelView{feeds})
if err != nil {
respondError( w, err.Error() )
return
}
fmt.Fprint(w, string(b))
//c.Format = goweb.JSON_FORMAT
//c.RespondWithData(ChannelView{feeds})
}
type FeedEntryModel struct {
Content string
}
func serveGetItem(w http.ResponseWriter, r *http.Request) |
func serveUpdateItemLabels(w http.ResponseWriter, r *http.Request) {
if r.Method != "POST" {
w.WriteHeader(400)
fmt.Fprint(w, "Not a post request.")
return
}
conn := GetConnection().Clone()
if err := conn.Connect(); err != nil {
respondError( w, err.Error() )
return
}
defer conn.Close()
entryId, err := strconv.ParseInt(r.FormValue("id"), 10, 64)
labels := conn.Escape( r.FormValue("labels") )
userId := 1
if err != nil {
respondError( w, err.Error() )
return
}
getQuery := func() string {
if labels == "" {
return fmt.Sprintf("DELETE FROM `user_entry_label` WHERE `user_id`=%d AND `feed_entry_id`=%d", userId, entryId )
}
return fmt.Sprintf("INSERT IGNORE INTO `user_entry_label`(`user_id`,`feed_entry_id`,`label`) VALUES (%d,%d,'%s')", userId, entryId, labels )
}
_, _, err = conn.Query(getQuery())
if err != nil {
respondError( w, err.Error() )
return
}
fmt.Fprint(w, "{\"success\":1}")
}
func updatePriorities(transaction mysql.Transaction, userId int, newPriorities map[string]GroupInfo) error {
for feedIdStr, priority := range newPriorities {
feedId, err := strconv.ParseInt(feedIdStr, 10, 64)
if err != nil {
return err
}
_, _, err = transaction.Query("UPDATE `user_feed` SET `priority`=%d, `group`='%s' WHERE feed_id=%d AND user_id=%d",
priority.Priority,
gConn.Escape(priority.Group),
feedId,
userId)
if err != nil {
return err
}
}
return nil
}
type GroupInfo struct {
Priority int
Group string
}
func serveUpdateOrder(w http.ResponseWriter, r *http.Request) {
if r.Method != "POST" {
w.WriteHeader(400)
fmt.Fprint(w, "Not a post request.")
return
}
userId := 1
decoder := json.NewDecoder(r.Body)
var newPriorities map[string]GroupInfo = make(map[string]GroupInfo)
err := decoder.Decode(&newPriorities)
if err != nil {
respondError( w, err.Error() )
return
}
conn := GetConnection().Clone()
if err := conn.Connect(); err != nil {
respondError( w, err.Error() )
return
}
defer conn.Close()
transaction, err := conn.Begin()
if err != nil {
respondError( w, err.Error() )
return
}
err = updatePriorities(transaction, userId, newPriorities)
if err != nil {
transaction.Rollback()
respondError( w, err.Error() )
return
}
transaction.Commit()
}
func serveMarkRead(w http.ResponseWriter, r *http.Request) {
if r.Method != "POST" {
w.WriteHeader(400)
fmt.Fprint(w, "Not a post request.")
return
}
var userId uint32 = 1
decoder := json.NewDecoder(r.Body)
var readFeedsIds []int
err := decoder.Decode(&readFeedsIds)
if err != nil {
respondError( w, err.Error() )
return
}
if len(readFeedsIds) == 0 {
respondError( w, "The array of feeds to mark as read is empty." )
return
}
readFeeds := []string {}
for _, id := range(readFeedsIds) {
readFeeds = append(readFeeds, strconv.Itoa( id ) )
}
conn := GetConnection().Clone()
if err := conn.Connect(); err != nil {
respondError( w, err.Error() )
return
}
defer conn.Close()
_, _, err = conn.Query("UPDATE `user_feed` SET `newest_read`=NOW(), `unread_items`=0 WHERE user_id=%d AND feed_id IN (%s)",
userId,
strings.Join( readFeeds, "," ) )
if err != nil {
respondError( w, err.Error() )
return
}
_, _, err = conn.Query("DELETE `user_feed_readitems` FROM `user_feed_readitems` INNER JOIN `feed_entry` ON `user_feed_readitems`.`entry_id` = `feed_entry`.`id` WHERE user_feed_readitems.`user_id`=%d AND `feed_entry`.`feed_id` IN (%s)",
userId,
strings.Join( readFeeds, "," ) )
if err != nil {
respondError( w, err.Error() )
return
}
fmt.Fprint(w, "{\"success\":1}")
}
func serveAddFeed(w http.ResponseWriter, r *http.Request) {
if r.Method != "POST" {
w.WriteHeader(400)
fmt.Fprint(w, "Not a post request.")
return
}
var userId uint32 = 1
conn := GetConnection().Clone()
var request struct {
Uri string
}
decoder := json.NewDecoder(r.Body)
err := decoder.Decode(&request)
if err != nil {
respondError( w, err.Error() )
return
}
if err := conn.Connect(); err != nil {
respondError( w, err.Error() )
return
}
defer conn.Close()
transaction, err := gConn.Begin()
if err != nil {
respondError( w, err.Error() )
return
}
rows, _, err := transaction.Query("SELECT `id` FROM `feed` WHERE `feedURL`='%s'", request.Uri)
var feedId uint64 = 0
if err != nil {
transaction.Rollback()
respondError( w, err.Error() )
return
}
if len(rows) > 0 {
feedId = rows[0].Uint64(0)
} else {
feedId, err = AddFeed(transaction, request.Uri)
if err != nil {
transaction.Rollback()
respondError( w, err.Error() )
return
}
}
_, _, err = transaction.Query("INSERT INTO `user_feed`(`user_id`,`feed_id`,`newest_read`,`unread_items`,`active`) VALUES (%d, %d, %d,0,0)",
userId,
feedId,
time.Now().Unix() )
if err != nil {
transaction.Rollback()
respondError( w, err.Error() )
return
}
transaction.Commit()
fmt.Fprint(w, "{\"success\":1}")
}
func StartWebserver() {
http.HandleFunc("/home", serveHome )
http.HandleFunc("/feed", serveFeedItems )
http.HandleFunc("/item", serveGetItem )
http.HandleFunc("/updateLabels", serveUpdateItemLabels )
http.HandleFunc("/updateOrder", serveUpdateOrder )
http.HandleFunc("/markRead", serveMarkRead )
http.HandleFunc("/add", serveAddFeed )
http.Handle("/", http.StripPrefix("/", http.FileServer(http.Dir("web"))) )
http.ListenAndServe(":8080", nil)
}
| {
id, err := strconv.ParseInt(r.FormValue("id"), 10, 64)
userId := 1
if err != nil {
respondError( w, err.Error() )
return
}
conn := GetConnection().Clone()
if err := conn.Connect(); err != nil {
respondError( w, err.Error() )
return
}
defer conn.Close()
rows, _, err := conn.Query("SELECT `content` FROM `feed_entry` WHERE id=%d", id)
if err != nil {
respondError( w, err.Error() )
return
}
if len(rows) == 0 {
fmt.Fprint(w, "{\"error\": \"Could not find entry\"}")
return
}
row := rows[0]
b, err := json.Marshal(FeedEntryModel{row.Str(0)})
if err != nil {
respondError( w, err.Error() )
return
}
//TODO:The update and replace statments should be in a transaction
//TODO: Only insert into user_feed_readitems when the items is newer than the user_feeds.newest_read
_, _, err = conn.QueryFirst("REPLACE INTO `user_feed_readitems`(user_id,entry_id) VALUES (%d,%d)", userId, id)
if err != nil {
respondError( w, err.Error() )
return
}
/*
_, _, err = GetConnection().QueryFirst("UPDATE user_feed SET unread_items=GREATEST(unread_items-1,0) WHERE user_id=%d AND feed_id=%d", userId, feedId)
if err != nil {
panic(err)
}
*/
//TODO: Extremelly inneficient; Make better method
_, _, err = conn.QueryFirst("CALL update_unread()")
if err != nil {
respondError( w, err.Error() )
return
}
fmt.Fprint(w, string(b))
} | identifier_body |
web.go | package rreader
import (
"fmt"
//"github.com/stretchrcom/goweb/goweb"
"encoding/json"
"github.com/ziutek/mymysql/mysql"
"net/http"
"strconv"
"strings"
"time"
)
type HomeView struct {
Feeds []FeedViewItem
}
type FeedViewItem struct {
Id int
Title string
Link string
Description string
LastUpdate int64
Group string
Unread int
Active int
}
type ChannelView struct {
Items []ChannelViewItem
}
type ChannelViewItem struct {
Id int
Title string
Published string
Updated int64
Link string
Author string
FeedTitle string
FeedId int
IsRead int
Labels string
}
type ErrorResponse struct {
ErrStr string
}
func respondError( w http.ResponseWriter, errStr string ){
w.WriteHeader( 500 ) //Internal server error
fmt.Fprint(w, errStr )
}
func serveHome(w http.ResponseWriter, r *http.Request) {
if r.FormValue("update") != "" {
UpdateFeeds(60)
}
conn := GetConnection().Clone()
if err := conn.Connect(); err != nil {
respondError( w, err.Error() )
return
}
defer conn.Close()
var userId uint32 = 1
rows, _, err := conn.Query("SELECT `id`,`title`,`link`,`description`,`last_update`,`user_id`,`group`,`unread`,`active` FROM home_view WHERE user_id=%d", userId)
if err != nil {
respondError( w, err.Error() )
return
}
feeds := make([]FeedViewItem, len(rows))
for id, row := range rows {
feeds[id] = FeedViewItem{row.Int(0), row.Str(1), row.Str(2), row.Str(3), row.Int64(4), row.Str(6), row.Int(7), row.Int(8)}
}
b, err := json.Marshal(HomeView{feeds})
if err != nil {
respondError( w, err.Error() )
return
}
fmt.Fprint(w, string(b))
//c.Format = goweb.JSON_FORMAT
//c.RespondWithData(HomeView{feeds})
}
func getFeedsQueryFromForm(r *http.Request) (string, error) {
group := r.FormValue("group")
feed := r.FormValue("feed")
starred := r.FormValue("starred")
if group != "" {
return fmt.Sprintf("`group`='%s'", gConn.Escape(group)), nil
} else if feed != "" {
feedId, err := strconv.ParseInt(feed, 10, 64)
if err != nil {
return "", err
}
return fmt.Sprintf("`feedid`=%d", feedId), nil
} else if starred != "" {
return fmt.Sprintf("`label`='star'"), nil
}
return "", nil
}
func serveFeedItems(w http.ResponseWriter, r *http.Request) {
var userId uint32 = 1
searchQuery := fmt.Sprintf("userid=%d", userId)
start, err := strconv.ParseInt(r.FormValue("start"), 10, 64)
if err != nil {
start = 0
}
extraSearch, err := getFeedsQueryFromForm(r)
if err != nil {
respondError( w, err.Error() )
return
}
if extraSearch != "" {
searchQuery = fmt.Sprintf("%s AND %s", searchQuery, extraSearch )
}
conn := GetConnection().Clone()
if err := conn.Connect(); err != nil {
respondError( w, err.Error() )
return
}
defer conn.Close()
rows, _, err := conn.Query("SELECT `id`,`title`,`published`,`updated`,`link`,`author`,`feedtitle`,`feedid`,`is_read`,`label` FROM `entrylist` WHERE %s ORDER BY `updated` DESC LIMIT %d,100", searchQuery, start)
if err != nil {
respondError( w, err.Error() )
return
}
feeds := make([]ChannelViewItem, len(rows))
for id, row := range rows {
feeds[id] = ChannelViewItem{row.Int(0), row.Str(1), row.Str(2), row.Int64(3), row.Str(4), row.Str(5), row.Str(6), row.Int(7), row.Int(8), row.Str(9) }
}
b, err := json.Marshal(ChannelView{feeds})
if err != nil {
respondError( w, err.Error() )
return
}
fmt.Fprint(w, string(b))
//c.Format = goweb.JSON_FORMAT
//c.RespondWithData(ChannelView{feeds})
}
type FeedEntryModel struct {
Content string
}
func serveGetItem(w http.ResponseWriter, r *http.Request) {
id, err := strconv.ParseInt(r.FormValue("id"), 10, 64)
userId := 1
if err != nil {
respondError( w, err.Error() )
return
}
conn := GetConnection().Clone()
if err := conn.Connect(); err != nil {
respondError( w, err.Error() )
return
}
defer conn.Close()
rows, _, err := conn.Query("SELECT `content` FROM `feed_entry` WHERE id=%d", id)
if err != nil {
respondError( w, err.Error() )
return
}
if len(rows) == 0 {
fmt.Fprint(w, "{\"error\": \"Could not find entry\"}")
return
}
row := rows[0]
b, err := json.Marshal(FeedEntryModel{row.Str(0)})
if err != nil {
respondError( w, err.Error() )
return
}
//TODO:The update and replace statments should be in a transaction
//TODO: Only insert into user_feed_readitems when the items is newer than the user_feeds.newest_read
_, _, err = conn.QueryFirst("REPLACE INTO `user_feed_readitems`(user_id,entry_id) VALUES (%d,%d)", userId, id)
if err != nil |
/*
_, _, err = GetConnection().QueryFirst("UPDATE user_feed SET unread_items=GREATEST(unread_items-1,0) WHERE user_id=%d AND feed_id=%d", userId, feedId)
if err != nil {
panic(err)
}
*/
//TODO: Extremelly inneficient; Make better method
_, _, err = conn.QueryFirst("CALL update_unread()")
if err != nil {
respondError( w, err.Error() )
return
}
fmt.Fprint(w, string(b))
}
func serveUpdateItemLabels(w http.ResponseWriter, r *http.Request) {
if r.Method != "POST" {
w.WriteHeader(400)
fmt.Fprint(w, "Not a post request.")
return
}
conn := GetConnection().Clone()
if err := conn.Connect(); err != nil {
respondError( w, err.Error() )
return
}
defer conn.Close()
entryId, err := strconv.ParseInt(r.FormValue("id"), 10, 64)
labels := conn.Escape( r.FormValue("labels") )
userId := 1
if err != nil {
respondError( w, err.Error() )
return
}
getQuery := func() string {
if labels == "" {
return fmt.Sprintf("DELETE FROM `user_entry_label` WHERE `user_id`=%d AND `feed_entry_id`=%d", userId, entryId )
}
return fmt.Sprintf("INSERT IGNORE INTO `user_entry_label`(`user_id`,`feed_entry_id`,`label`) VALUES (%d,%d,'%s')", userId, entryId, labels )
}
_, _, err = conn.Query(getQuery())
if err != nil {
respondError( w, err.Error() )
return
}
fmt.Fprint(w, "{\"success\":1}")
}
func updatePriorities(transaction mysql.Transaction, userId int, newPriorities map[string]GroupInfo) error {
for feedIdStr, priority := range newPriorities {
feedId, err := strconv.ParseInt(feedIdStr, 10, 64)
if err != nil {
return err
}
_, _, err = transaction.Query("UPDATE `user_feed` SET `priority`=%d, `group`='%s' WHERE feed_id=%d AND user_id=%d",
priority.Priority,
gConn.Escape(priority.Group),
feedId,
userId)
if err != nil {
return err
}
}
return nil
}
type GroupInfo struct {
Priority int
Group string
}
func serveUpdateOrder(w http.ResponseWriter, r *http.Request) {
if r.Method != "POST" {
w.WriteHeader(400)
fmt.Fprint(w, "Not a post request.")
return
}
userId := 1
decoder := json.NewDecoder(r.Body)
var newPriorities map[string]GroupInfo = make(map[string]GroupInfo)
err := decoder.Decode(&newPriorities)
if err != nil {
respondError( w, err.Error() )
return
}
conn := GetConnection().Clone()
if err := conn.Connect(); err != nil {
respondError( w, err.Error() )
return
}
defer conn.Close()
transaction, err := conn.Begin()
if err != nil {
respondError( w, err.Error() )
return
}
err = updatePriorities(transaction, userId, newPriorities)
if err != nil {
transaction.Rollback()
respondError( w, err.Error() )
return
}
transaction.Commit()
}
func serveMarkRead(w http.ResponseWriter, r *http.Request) {
if r.Method != "POST" {
w.WriteHeader(400)
fmt.Fprint(w, "Not a post request.")
return
}
var userId uint32 = 1
decoder := json.NewDecoder(r.Body)
var readFeedsIds []int
err := decoder.Decode(&readFeedsIds)
if err != nil {
respondError( w, err.Error() )
return
}
if len(readFeedsIds) == 0 {
respondError( w, "The array of feeds to mark as read is empty." )
return
}
readFeeds := []string {}
for _, id := range(readFeedsIds) {
readFeeds = append(readFeeds, strconv.Itoa( id ) )
}
conn := GetConnection().Clone()
if err := conn.Connect(); err != nil {
respondError( w, err.Error() )
return
}
defer conn.Close()
_, _, err = conn.Query("UPDATE `user_feed` SET `newest_read`=NOW(), `unread_items`=0 WHERE user_id=%d AND feed_id IN (%s)",
userId,
strings.Join( readFeeds, "," ) )
if err != nil {
respondError( w, err.Error() )
return
}
_, _, err = conn.Query("DELETE `user_feed_readitems` FROM `user_feed_readitems` INNER JOIN `feed_entry` ON `user_feed_readitems`.`entry_id` = `feed_entry`.`id` WHERE user_feed_readitems.`user_id`=%d AND `feed_entry`.`feed_id` IN (%s)",
userId,
strings.Join( readFeeds, "," ) )
if err != nil {
respondError( w, err.Error() )
return
}
fmt.Fprint(w, "{\"success\":1}")
}
func serveAddFeed(w http.ResponseWriter, r *http.Request) {
if r.Method != "POST" {
w.WriteHeader(400)
fmt.Fprint(w, "Not a post request.")
return
}
var userId uint32 = 1
conn := GetConnection().Clone()
var request struct {
Uri string
}
decoder := json.NewDecoder(r.Body)
err := decoder.Decode(&request)
if err != nil {
respondError( w, err.Error() )
return
}
if err := conn.Connect(); err != nil {
respondError( w, err.Error() )
return
}
defer conn.Close()
transaction, err := gConn.Begin()
if err != nil {
respondError( w, err.Error() )
return
}
rows, _, err := transaction.Query("SELECT `id` FROM `feed` WHERE `feedURL`='%s'", request.Uri)
var feedId uint64 = 0
if err != nil {
transaction.Rollback()
respondError( w, err.Error() )
return
}
if len(rows) > 0 {
feedId = rows[0].Uint64(0)
} else {
feedId, err = AddFeed(transaction, request.Uri)
if err != nil {
transaction.Rollback()
respondError( w, err.Error() )
return
}
}
_, _, err = transaction.Query("INSERT INTO `user_feed`(`user_id`,`feed_id`,`newest_read`,`unread_items`,`active`) VALUES (%d, %d, %d,0,0)",
userId,
feedId,
time.Now().Unix() )
if err != nil {
transaction.Rollback()
respondError( w, err.Error() )
return
}
transaction.Commit()
fmt.Fprint(w, "{\"success\":1}")
}
func StartWebserver() {
http.HandleFunc("/home", serveHome )
http.HandleFunc("/feed", serveFeedItems )
http.HandleFunc("/item", serveGetItem )
http.HandleFunc("/updateLabels", serveUpdateItemLabels )
http.HandleFunc("/updateOrder", serveUpdateOrder )
http.HandleFunc("/markRead", serveMarkRead )
http.HandleFunc("/add", serveAddFeed )
http.Handle("/", http.StripPrefix("/", http.FileServer(http.Dir("web"))) )
http.ListenAndServe(":8080", nil)
}
| {
respondError( w, err.Error() )
return
} | conditional_block |
web.go | package rreader
import (
"fmt"
//"github.com/stretchrcom/goweb/goweb"
"encoding/json"
"github.com/ziutek/mymysql/mysql"
"net/http"
"strconv"
"strings"
"time"
)
type HomeView struct {
Feeds []FeedViewItem
}
type FeedViewItem struct {
Id int
Title string
Link string
Description string
LastUpdate int64
Group string
Unread int
Active int
}
type ChannelView struct {
Items []ChannelViewItem
}
type ChannelViewItem struct {
Id int
Title string
Published string
Updated int64
Link string
Author string
FeedTitle string
FeedId int
IsRead int
Labels string
}
type ErrorResponse struct {
ErrStr string
}
func respondError( w http.ResponseWriter, errStr string ){
w.WriteHeader( 500 ) //Internal server error
fmt.Fprint(w, errStr )
}
func serveHome(w http.ResponseWriter, r *http.Request) {
if r.FormValue("update") != "" {
UpdateFeeds(60)
}
conn := GetConnection().Clone()
if err := conn.Connect(); err != nil {
respondError( w, err.Error() )
return
}
defer conn.Close()
var userId uint32 = 1
rows, _, err := conn.Query("SELECT `id`,`title`,`link`,`description`,`last_update`,`user_id`,`group`,`unread`,`active` FROM home_view WHERE user_id=%d", userId)
if err != nil {
respondError( w, err.Error() )
return
}
feeds := make([]FeedViewItem, len(rows))
for id, row := range rows {
feeds[id] = FeedViewItem{row.Int(0), row.Str(1), row.Str(2), row.Str(3), row.Int64(4), row.Str(6), row.Int(7), row.Int(8)}
}
b, err := json.Marshal(HomeView{feeds})
if err != nil {
respondError( w, err.Error() )
return
}
fmt.Fprint(w, string(b))
//c.Format = goweb.JSON_FORMAT
//c.RespondWithData(HomeView{feeds})
}
func getFeedsQueryFromForm(r *http.Request) (string, error) {
group := r.FormValue("group")
feed := r.FormValue("feed")
starred := r.FormValue("starred")
if group != "" {
return fmt.Sprintf("`group`='%s'", gConn.Escape(group)), nil
} else if feed != "" {
feedId, err := strconv.ParseInt(feed, 10, 64)
if err != nil {
return "", err
}
return fmt.Sprintf("`feedid`=%d", feedId), nil
} else if starred != "" {
return fmt.Sprintf("`label`='star'"), nil
}
return "", nil
}
func | (w http.ResponseWriter, r *http.Request) {
var userId uint32 = 1
searchQuery := fmt.Sprintf("userid=%d", userId)
start, err := strconv.ParseInt(r.FormValue("start"), 10, 64)
if err != nil {
start = 0
}
extraSearch, err := getFeedsQueryFromForm(r)
if err != nil {
respondError( w, err.Error() )
return
}
if extraSearch != "" {
searchQuery = fmt.Sprintf("%s AND %s", searchQuery, extraSearch )
}
conn := GetConnection().Clone()
if err := conn.Connect(); err != nil {
respondError( w, err.Error() )
return
}
defer conn.Close()
rows, _, err := conn.Query("SELECT `id`,`title`,`published`,`updated`,`link`,`author`,`feedtitle`,`feedid`,`is_read`,`label` FROM `entrylist` WHERE %s ORDER BY `updated` DESC LIMIT %d,100", searchQuery, start)
if err != nil {
respondError( w, err.Error() )
return
}
feeds := make([]ChannelViewItem, len(rows))
for id, row := range rows {
feeds[id] = ChannelViewItem{row.Int(0), row.Str(1), row.Str(2), row.Int64(3), row.Str(4), row.Str(5), row.Str(6), row.Int(7), row.Int(8), row.Str(9) }
}
b, err := json.Marshal(ChannelView{feeds})
if err != nil {
respondError( w, err.Error() )
return
}
fmt.Fprint(w, string(b))
//c.Format = goweb.JSON_FORMAT
//c.RespondWithData(ChannelView{feeds})
}
type FeedEntryModel struct {
Content string
}
func serveGetItem(w http.ResponseWriter, r *http.Request) {
id, err := strconv.ParseInt(r.FormValue("id"), 10, 64)
userId := 1
if err != nil {
respondError( w, err.Error() )
return
}
conn := GetConnection().Clone()
if err := conn.Connect(); err != nil {
respondError( w, err.Error() )
return
}
defer conn.Close()
rows, _, err := conn.Query("SELECT `content` FROM `feed_entry` WHERE id=%d", id)
if err != nil {
respondError( w, err.Error() )
return
}
if len(rows) == 0 {
fmt.Fprint(w, "{\"error\": \"Could not find entry\"}")
return
}
row := rows[0]
b, err := json.Marshal(FeedEntryModel{row.Str(0)})
if err != nil {
respondError( w, err.Error() )
return
}
//TODO:The update and replace statments should be in a transaction
//TODO: Only insert into user_feed_readitems when the items is newer than the user_feeds.newest_read
_, _, err = conn.QueryFirst("REPLACE INTO `user_feed_readitems`(user_id,entry_id) VALUES (%d,%d)", userId, id)
if err != nil {
respondError( w, err.Error() )
return
}
/*
_, _, err = GetConnection().QueryFirst("UPDATE user_feed SET unread_items=GREATEST(unread_items-1,0) WHERE user_id=%d AND feed_id=%d", userId, feedId)
if err != nil {
panic(err)
}
*/
//TODO: Extremelly inneficient; Make better method
_, _, err = conn.QueryFirst("CALL update_unread()")
if err != nil {
respondError( w, err.Error() )
return
}
fmt.Fprint(w, string(b))
}
func serveUpdateItemLabels(w http.ResponseWriter, r *http.Request) {
if r.Method != "POST" {
w.WriteHeader(400)
fmt.Fprint(w, "Not a post request.")
return
}
conn := GetConnection().Clone()
if err := conn.Connect(); err != nil {
respondError( w, err.Error() )
return
}
defer conn.Close()
entryId, err := strconv.ParseInt(r.FormValue("id"), 10, 64)
labels := conn.Escape( r.FormValue("labels") )
userId := 1
if err != nil {
respondError( w, err.Error() )
return
}
getQuery := func() string {
if labels == "" {
return fmt.Sprintf("DELETE FROM `user_entry_label` WHERE `user_id`=%d AND `feed_entry_id`=%d", userId, entryId )
}
return fmt.Sprintf("INSERT IGNORE INTO `user_entry_label`(`user_id`,`feed_entry_id`,`label`) VALUES (%d,%d,'%s')", userId, entryId, labels )
}
_, _, err = conn.Query(getQuery())
if err != nil {
respondError( w, err.Error() )
return
}
fmt.Fprint(w, "{\"success\":1}")
}
func updatePriorities(transaction mysql.Transaction, userId int, newPriorities map[string]GroupInfo) error {
for feedIdStr, priority := range newPriorities {
feedId, err := strconv.ParseInt(feedIdStr, 10, 64)
if err != nil {
return err
}
_, _, err = transaction.Query("UPDATE `user_feed` SET `priority`=%d, `group`='%s' WHERE feed_id=%d AND user_id=%d",
priority.Priority,
gConn.Escape(priority.Group),
feedId,
userId)
if err != nil {
return err
}
}
return nil
}
type GroupInfo struct {
Priority int
Group string
}
func serveUpdateOrder(w http.ResponseWriter, r *http.Request) {
if r.Method != "POST" {
w.WriteHeader(400)
fmt.Fprint(w, "Not a post request.")
return
}
userId := 1
decoder := json.NewDecoder(r.Body)
var newPriorities map[string]GroupInfo = make(map[string]GroupInfo)
err := decoder.Decode(&newPriorities)
if err != nil {
respondError( w, err.Error() )
return
}
conn := GetConnection().Clone()
if err := conn.Connect(); err != nil {
respondError( w, err.Error() )
return
}
defer conn.Close()
transaction, err := conn.Begin()
if err != nil {
respondError( w, err.Error() )
return
}
err = updatePriorities(transaction, userId, newPriorities)
if err != nil {
transaction.Rollback()
respondError( w, err.Error() )
return
}
transaction.Commit()
}
func serveMarkRead(w http.ResponseWriter, r *http.Request) {
if r.Method != "POST" {
w.WriteHeader(400)
fmt.Fprint(w, "Not a post request.")
return
}
var userId uint32 = 1
decoder := json.NewDecoder(r.Body)
var readFeedsIds []int
err := decoder.Decode(&readFeedsIds)
if err != nil {
respondError( w, err.Error() )
return
}
if len(readFeedsIds) == 0 {
respondError( w, "The array of feeds to mark as read is empty." )
return
}
readFeeds := []string {}
for _, id := range(readFeedsIds) {
readFeeds = append(readFeeds, strconv.Itoa( id ) )
}
conn := GetConnection().Clone()
if err := conn.Connect(); err != nil {
respondError( w, err.Error() )
return
}
defer conn.Close()
_, _, err = conn.Query("UPDATE `user_feed` SET `newest_read`=NOW(), `unread_items`=0 WHERE user_id=%d AND feed_id IN (%s)",
userId,
strings.Join( readFeeds, "," ) )
if err != nil {
respondError( w, err.Error() )
return
}
_, _, err = conn.Query("DELETE `user_feed_readitems` FROM `user_feed_readitems` INNER JOIN `feed_entry` ON `user_feed_readitems`.`entry_id` = `feed_entry`.`id` WHERE user_feed_readitems.`user_id`=%d AND `feed_entry`.`feed_id` IN (%s)",
userId,
strings.Join( readFeeds, "," ) )
if err != nil {
respondError( w, err.Error() )
return
}
fmt.Fprint(w, "{\"success\":1}")
}
func serveAddFeed(w http.ResponseWriter, r *http.Request) {
if r.Method != "POST" {
w.WriteHeader(400)
fmt.Fprint(w, "Not a post request.")
return
}
var userId uint32 = 1
conn := GetConnection().Clone()
var request struct {
Uri string
}
decoder := json.NewDecoder(r.Body)
err := decoder.Decode(&request)
if err != nil {
respondError( w, err.Error() )
return
}
if err := conn.Connect(); err != nil {
respondError( w, err.Error() )
return
}
defer conn.Close()
transaction, err := gConn.Begin()
if err != nil {
respondError( w, err.Error() )
return
}
rows, _, err := transaction.Query("SELECT `id` FROM `feed` WHERE `feedURL`='%s'", request.Uri)
var feedId uint64 = 0
if err != nil {
transaction.Rollback()
respondError( w, err.Error() )
return
}
if len(rows) > 0 {
feedId = rows[0].Uint64(0)
} else {
feedId, err = AddFeed(transaction, request.Uri)
if err != nil {
transaction.Rollback()
respondError( w, err.Error() )
return
}
}
_, _, err = transaction.Query("INSERT INTO `user_feed`(`user_id`,`feed_id`,`newest_read`,`unread_items`,`active`) VALUES (%d, %d, %d,0,0)",
userId,
feedId,
time.Now().Unix() )
if err != nil {
transaction.Rollback()
respondError( w, err.Error() )
return
}
transaction.Commit()
fmt.Fprint(w, "{\"success\":1}")
}
func StartWebserver() {
http.HandleFunc("/home", serveHome )
http.HandleFunc("/feed", serveFeedItems )
http.HandleFunc("/item", serveGetItem )
http.HandleFunc("/updateLabels", serveUpdateItemLabels )
http.HandleFunc("/updateOrder", serveUpdateOrder )
http.HandleFunc("/markRead", serveMarkRead )
http.HandleFunc("/add", serveAddFeed )
http.Handle("/", http.StripPrefix("/", http.FileServer(http.Dir("web"))) )
http.ListenAndServe(":8080", nil)
}
| serveFeedItems | identifier_name |
web.go | package rreader
import (
"fmt"
//"github.com/stretchrcom/goweb/goweb"
"encoding/json"
"github.com/ziutek/mymysql/mysql"
"net/http"
"strconv"
"strings"
"time"
)
type HomeView struct {
Feeds []FeedViewItem
}
type FeedViewItem struct {
Id int
Title string
Link string
Description string
LastUpdate int64
Group string
Unread int
Active int
}
type ChannelView struct {
Items []ChannelViewItem
}
type ChannelViewItem struct {
Id int
Title string
Published string
Updated int64
Link string
Author string
FeedTitle string
FeedId int
IsRead int
Labels string
}
type ErrorResponse struct {
ErrStr string
}
func respondError( w http.ResponseWriter, errStr string ){
w.WriteHeader( 500 ) //Internal server error
fmt.Fprint(w, errStr )
}
func serveHome(w http.ResponseWriter, r *http.Request) {
if r.FormValue("update") != "" {
UpdateFeeds(60)
}
conn := GetConnection().Clone()
if err := conn.Connect(); err != nil {
respondError( w, err.Error() )
return
}
defer conn.Close()
var userId uint32 = 1
rows, _, err := conn.Query("SELECT `id`,`title`,`link`,`description`,`last_update`,`user_id`,`group`,`unread`,`active` FROM home_view WHERE user_id=%d", userId)
if err != nil {
respondError( w, err.Error() )
return
}
feeds := make([]FeedViewItem, len(rows))
for id, row := range rows {
feeds[id] = FeedViewItem{row.Int(0), row.Str(1), row.Str(2), row.Str(3), row.Int64(4), row.Str(6), row.Int(7), row.Int(8)}
}
b, err := json.Marshal(HomeView{feeds})
if err != nil {
respondError( w, err.Error() )
return
}
fmt.Fprint(w, string(b))
//c.Format = goweb.JSON_FORMAT
//c.RespondWithData(HomeView{feeds})
}
func getFeedsQueryFromForm(r *http.Request) (string, error) {
group := r.FormValue("group")
feed := r.FormValue("feed")
starred := r.FormValue("starred")
if group != "" {
return fmt.Sprintf("`group`='%s'", gConn.Escape(group)), nil
} else if feed != "" {
feedId, err := strconv.ParseInt(feed, 10, 64)
if err != nil {
return "", err
}
return fmt.Sprintf("`feedid`=%d", feedId), nil
} else if starred != "" {
return fmt.Sprintf("`label`='star'"), nil
}
return "", nil
}
func serveFeedItems(w http.ResponseWriter, r *http.Request) {
var userId uint32 = 1
searchQuery := fmt.Sprintf("userid=%d", userId)
start, err := strconv.ParseInt(r.FormValue("start"), 10, 64)
if err != nil {
start = 0
}
extraSearch, err := getFeedsQueryFromForm(r)
if err != nil {
respondError( w, err.Error() )
return
}
if extraSearch != "" {
searchQuery = fmt.Sprintf("%s AND %s", searchQuery, extraSearch )
}
conn := GetConnection().Clone()
if err := conn.Connect(); err != nil {
respondError( w, err.Error() )
return
}
defer conn.Close()
rows, _, err := conn.Query("SELECT `id`,`title`,`published`,`updated`,`link`,`author`,`feedtitle`,`feedid`,`is_read`,`label` FROM `entrylist` WHERE %s ORDER BY `updated` DESC LIMIT %d,100", searchQuery, start)
if err != nil {
respondError( w, err.Error() )
return
}
feeds := make([]ChannelViewItem, len(rows))
for id, row := range rows {
feeds[id] = ChannelViewItem{row.Int(0), row.Str(1), row.Str(2), row.Int64(3), row.Str(4), row.Str(5), row.Str(6), row.Int(7), row.Int(8), row.Str(9) }
}
b, err := json.Marshal(ChannelView{feeds})
if err != nil {
respondError( w, err.Error() )
return
}
fmt.Fprint(w, string(b))
//c.Format = goweb.JSON_FORMAT
//c.RespondWithData(ChannelView{feeds})
}
type FeedEntryModel struct {
Content string
}
func serveGetItem(w http.ResponseWriter, r *http.Request) {
id, err := strconv.ParseInt(r.FormValue("id"), 10, 64)
userId := 1
if err != nil {
respondError( w, err.Error() )
return
}
conn := GetConnection().Clone()
if err := conn.Connect(); err != nil {
respondError( w, err.Error() )
return
}
defer conn.Close()
rows, _, err := conn.Query("SELECT `content` FROM `feed_entry` WHERE id=%d", id)
if err != nil {
respondError( w, err.Error() )
return
}
if len(rows) == 0 {
fmt.Fprint(w, "{\"error\": \"Could not find entry\"}")
return
}
row := rows[0]
b, err := json.Marshal(FeedEntryModel{row.Str(0)})
if err != nil {
respondError( w, err.Error() )
return
}
//TODO:The update and replace statments should be in a transaction
//TODO: Only insert into user_feed_readitems when the items is newer than the user_feeds.newest_read
_, _, err = conn.QueryFirst("REPLACE INTO `user_feed_readitems`(user_id,entry_id) VALUES (%d,%d)", userId, id)
if err != nil {
respondError( w, err.Error() )
return
}
/*
_, _, err = GetConnection().QueryFirst("UPDATE user_feed SET unread_items=GREATEST(unread_items-1,0) WHERE user_id=%d AND feed_id=%d", userId, feedId)
if err != nil {
panic(err)
}
*/ | respondError( w, err.Error() )
return
}
fmt.Fprint(w, string(b))
}
func serveUpdateItemLabels(w http.ResponseWriter, r *http.Request) {
if r.Method != "POST" {
w.WriteHeader(400)
fmt.Fprint(w, "Not a post request.")
return
}
conn := GetConnection().Clone()
if err := conn.Connect(); err != nil {
respondError( w, err.Error() )
return
}
defer conn.Close()
entryId, err := strconv.ParseInt(r.FormValue("id"), 10, 64)
labels := conn.Escape( r.FormValue("labels") )
userId := 1
if err != nil {
respondError( w, err.Error() )
return
}
getQuery := func() string {
if labels == "" {
return fmt.Sprintf("DELETE FROM `user_entry_label` WHERE `user_id`=%d AND `feed_entry_id`=%d", userId, entryId )
}
return fmt.Sprintf("INSERT IGNORE INTO `user_entry_label`(`user_id`,`feed_entry_id`,`label`) VALUES (%d,%d,'%s')", userId, entryId, labels )
}
_, _, err = conn.Query(getQuery())
if err != nil {
respondError( w, err.Error() )
return
}
fmt.Fprint(w, "{\"success\":1}")
}
func updatePriorities(transaction mysql.Transaction, userId int, newPriorities map[string]GroupInfo) error {
for feedIdStr, priority := range newPriorities {
feedId, err := strconv.ParseInt(feedIdStr, 10, 64)
if err != nil {
return err
}
_, _, err = transaction.Query("UPDATE `user_feed` SET `priority`=%d, `group`='%s' WHERE feed_id=%d AND user_id=%d",
priority.Priority,
gConn.Escape(priority.Group),
feedId,
userId)
if err != nil {
return err
}
}
return nil
}
type GroupInfo struct {
Priority int
Group string
}
func serveUpdateOrder(w http.ResponseWriter, r *http.Request) {
if r.Method != "POST" {
w.WriteHeader(400)
fmt.Fprint(w, "Not a post request.")
return
}
userId := 1
decoder := json.NewDecoder(r.Body)
var newPriorities map[string]GroupInfo = make(map[string]GroupInfo)
err := decoder.Decode(&newPriorities)
if err != nil {
respondError( w, err.Error() )
return
}
conn := GetConnection().Clone()
if err := conn.Connect(); err != nil {
respondError( w, err.Error() )
return
}
defer conn.Close()
transaction, err := conn.Begin()
if err != nil {
respondError( w, err.Error() )
return
}
err = updatePriorities(transaction, userId, newPriorities)
if err != nil {
transaction.Rollback()
respondError( w, err.Error() )
return
}
transaction.Commit()
}
func serveMarkRead(w http.ResponseWriter, r *http.Request) {
if r.Method != "POST" {
w.WriteHeader(400)
fmt.Fprint(w, "Not a post request.")
return
}
var userId uint32 = 1
decoder := json.NewDecoder(r.Body)
var readFeedsIds []int
err := decoder.Decode(&readFeedsIds)
if err != nil {
respondError( w, err.Error() )
return
}
if len(readFeedsIds) == 0 {
respondError( w, "The array of feeds to mark as read is empty." )
return
}
readFeeds := []string {}
for _, id := range(readFeedsIds) {
readFeeds = append(readFeeds, strconv.Itoa( id ) )
}
conn := GetConnection().Clone()
if err := conn.Connect(); err != nil {
respondError( w, err.Error() )
return
}
defer conn.Close()
_, _, err = conn.Query("UPDATE `user_feed` SET `newest_read`=NOW(), `unread_items`=0 WHERE user_id=%d AND feed_id IN (%s)",
userId,
strings.Join( readFeeds, "," ) )
if err != nil {
respondError( w, err.Error() )
return
}
_, _, err = conn.Query("DELETE `user_feed_readitems` FROM `user_feed_readitems` INNER JOIN `feed_entry` ON `user_feed_readitems`.`entry_id` = `feed_entry`.`id` WHERE user_feed_readitems.`user_id`=%d AND `feed_entry`.`feed_id` IN (%s)",
userId,
strings.Join( readFeeds, "," ) )
if err != nil {
respondError( w, err.Error() )
return
}
fmt.Fprint(w, "{\"success\":1}")
}
func serveAddFeed(w http.ResponseWriter, r *http.Request) {
if r.Method != "POST" {
w.WriteHeader(400)
fmt.Fprint(w, "Not a post request.")
return
}
var userId uint32 = 1
conn := GetConnection().Clone()
var request struct {
Uri string
}
decoder := json.NewDecoder(r.Body)
err := decoder.Decode(&request)
if err != nil {
respondError( w, err.Error() )
return
}
if err := conn.Connect(); err != nil {
respondError( w, err.Error() )
return
}
defer conn.Close()
transaction, err := gConn.Begin()
if err != nil {
respondError( w, err.Error() )
return
}
rows, _, err := transaction.Query("SELECT `id` FROM `feed` WHERE `feedURL`='%s'", request.Uri)
var feedId uint64 = 0
if err != nil {
transaction.Rollback()
respondError( w, err.Error() )
return
}
if len(rows) > 0 {
feedId = rows[0].Uint64(0)
} else {
feedId, err = AddFeed(transaction, request.Uri)
if err != nil {
transaction.Rollback()
respondError( w, err.Error() )
return
}
}
_, _, err = transaction.Query("INSERT INTO `user_feed`(`user_id`,`feed_id`,`newest_read`,`unread_items`,`active`) VALUES (%d, %d, %d,0,0)",
userId,
feedId,
time.Now().Unix() )
if err != nil {
transaction.Rollback()
respondError( w, err.Error() )
return
}
transaction.Commit()
fmt.Fprint(w, "{\"success\":1}")
}
func StartWebserver() {
http.HandleFunc("/home", serveHome )
http.HandleFunc("/feed", serveFeedItems )
http.HandleFunc("/item", serveGetItem )
http.HandleFunc("/updateLabels", serveUpdateItemLabels )
http.HandleFunc("/updateOrder", serveUpdateOrder )
http.HandleFunc("/markRead", serveMarkRead )
http.HandleFunc("/add", serveAddFeed )
http.Handle("/", http.StripPrefix("/", http.FileServer(http.Dir("web"))) )
http.ListenAndServe(":8080", nil)
} | //TODO: Extremelly inneficient; Make better method
_, _, err = conn.QueryFirst("CALL update_unread()")
if err != nil { | random_line_split |
sandbox.ts | import invariant from 'tiny-invariant';
import { datasetParser, isElement, evaluate, createSequence } from 'yuzu-utils';
import { IComponentConstructable } from 'yuzu/types';
import { Component } from 'yuzu';
import { createContext, IContext } from './context';
export type entrySelectorFn = (sbx: Sandbox<any>) => boolean | HTMLElement[];
export type sandboxComponentOptions = [
IComponentConstructable<Component<any, any>>,
Record<string, any>,
];
export interface ISandboxRegistryEntry {
component: IComponentConstructable<Component<any, any>>;
selector: string | entrySelectorFn;
[key: string]: any;
}
export interface ISandboxOptions {
components?: (
| IComponentConstructable<Component<any, any>>
| sandboxComponentOptions
)[];
root: HTMLElement | string;
context: IContext<any>;
id: string;
}
const nextSbUid = createSequence();
const nextChildUid = createSequence();
/**
* A sandbox can be used to initialize a set of components based on an element's innerHTML.
*
* Lets say we have the following component:
*
* ```js
* class Counter extends Component {
* static root = '.Counter';
*
* // other stuff here ...
* }
* ```
*
* We can register the component inside a sandbox like this:
*
* ```js
* const sandbox = new Sandbox({
* components: [Counter],
* id: 'main', // optional
* });
*
* sandbox.mount('#main');
* ```
*
* In this way the sandbox will attach itself to the element matching `#main` and will traverse its children
* looking for every `.Counter` element attaching an instance of the Counter component onto it.
*
* To prevent a component for being initialized (for example when you want to initialize it at a later moment)
* just add a `data-skip` attribute to its root element.
*
* @class
* @param {object} config
* @param {Component[]|[Component, object][]} [config.components] Array of components constructor or array with [ComponentConstructor, options]
* @param {HTMLElement|string} [config.root=document.body] Root element of the sandbox. Either a DOM element or a CSS selector
* @param {string} [config.id] ID of the sandbox
* @property {string} $id Sandbox internal id
* @property {HTMLElement} $el Sandbox root DOM element
* @property {Context} $ctx Internal [context](/packages/yuzu-application/api/context). Used to share data across child instances
* @property {object[]} $registry Registered components storage
* @property {Map} $instances Running instances storage
* @returns {Sandbox}
*/
export class Sandbox<S = {}> extends Component<S, ISandboxOptions> {
public static SB_DATA_ATTR = 'data-yuzu-sb';
public defaultOptions(): ISandboxOptions {
return {
components: [],
context: createContext(),
id: '',
root: document.body,
};
}
public $id: string;
public $ctx?: IContext;
public $registry: ISandboxRegistryEntry[] = [];
public $instances = new Map<
string | entrySelectorFn,
Component<any, any>[]
>();
/**
* Creates a sandbox instance.
*
* @constructor
*/
public constructor(options: Partial<ISandboxOptions> = {}) {
super(options);
const { components = [], id } = this.options;
this.$id = id || nextSbUid('_sbx-');
components.forEach((config) => {
if (!Array.isArray(config)) {
if (config.root) {
this.register({ component: config, selector: config.root });
}
if (process.env.NODE_ENV !== 'production') {
!config.root &&
this.$warn(
`Skipping component ${config.displayName ||
config.name} because static "root" selector is missing`,
);
}
} else {
const [component, params = {}] = config;
const selector = component.root || params.selector;
if (selector) {
this.register({ component, selector, ...params });
}
if (process.env.NODE_ENV !== 'production') {
!selector &&
this.$warn(
`Skipping component ${component.displayName ||
component.name} because a static "root" selector is missing and no "selector" param is passed-in`,
);
}
}
});
return this;
}
/**
* ```js
* register(params)
* ```
*
* Registers a new component into the sandbox. The registered components
* will be traversed on `.mount()` initializing every matching component.
*
* @param {object} params Every property other than `component` and `selector` will be used as component option
* @param {Component} params.component Component constructor
* @param {string} params.selector Child component root CSS selector
* @example
* sandbox.register({
* component: Counter,
* selector: '.Counter',
* theme: 'dark' // <-- instance options
* });
*/
public register<C extends Component<any, any>>(params: {
component: IComponentConstructable<C>;
selector: string | entrySelectorFn;
[key: string]: any;
}): void {
invariant(
Component.isComponent(params.component),
'Missing or invalid `component` property',
);
invariant(
typeof params.selector === 'string' ||
typeof params.selector === 'function',
'Missing `selector` property',
);
this.$registry.push(params);
}
/**
* ```js
* start([data])
* ```
*
* **DEPRECATED!** Use `sandbox.mount(root)` instead.
*
* Starts the sandbox with an optional context.
*
* The store will be available inside each component at `this.$context`.
*
* @deprecated
* @param {object} [data] Optional context data object to be injected into the child components.
* @fires Sandbox#beforeStart
* @fires Sandbox#start Events dispatched after all components are initialized
* @returns {Sandbox}
* @example
* sandbox.start();
*
* // with context data
* sandbox.start({ globalTheme: 'dark' });
*/
public start(data = {}): this {
Object.defineProperty(this, '$legacyStart', { value: true });
if (process.env.NODE_ENV !== 'production') {
this.$warn(`Sandbox.start is deprecated. Use the "mount" method instead`);
}
this.mount(this.options.root);
this.setup();
this.$ctx && this.$ctx.update(data);
this.discover();
return this;
}
/**
* ```js
* mount([el], [state])
* ```
*
* Enhances `Component.mount()` by firing the child components discovery logic.
* By default will use `document.body` as mount element.
*
* @param {string|Element} el Component's root element
* @param {object|null} [state={}] Initial state
* @fires Sandbox#beforeStart
* @fires Sandbox#start Events dispatched after all components are initialized
* @returns {Sandbox}
*/
public mount(el: string | Element, state: Partial<S> | null = {}): this {
super.mount(el, state);
this.$el.setAttribute(Sandbox.SB_DATA_ATTR, '');
if (!this.hasOwnProperty('$legacyStart')) {
this.setup();
this.discover();
}
return this;
}
/**
* Setups the sandbox context passed in the options.
*
* @ignore
*/
public setup(): void {
this.$ctx = this.options.context;
this.$ctx.inject(this);
}
/**
* Initializes the sandbox child components.
*
* @ignore
* @returns {Promise}
*/
public discover(): Promise<void> {
invariant(isElement(this.$el), '"this.$el" is not a DOM element');
this.emit('beforeStart');
const sbSelector = `[${Sandbox.SB_DATA_ATTR}]`;
const ret = this.$registry.map(
async ({ component: ComponentConstructor, selector, ...options }) => {
if (this.$instances.has(selector)) {
this.$warn(
`Component ${ComponentConstructor} already initialized on ${selector}`,
);
return;
}
const targets = this.resolveSelector(selector);
let instances: Promise<Component<any, any>>[] | undefined;
if (targets === true) {
instances = [this.createInstance(ComponentConstructor, options)];
} else if (Array.isArray(targets)) {
const { $el } = this;
instances = targets
.filter((el) => {
return (
isElement(el) &&
!el.dataset.skip &&
!el.closest('[data-skip]') &&
el.closest(sbSelector) === $el
);
})
.map((el) => {
return this.createInstance(ComponentConstructor, options, el);
});
}
if (instances) {
this.$instances.set(selector, await Promise.all(instances));
}
return true;
},
);
return Promise.all(ret).then(() => {
this.emit('start');
});
}
/**
* Resolves a configured component selector to a list of DOM nodes or a boolean (for detached components)
*
* @ignore
* @param {string|function} selector Selector string or function.
* @returns {HTMLElement[]|boolean}
*/
public resolveSelector(
selector: string | entrySelectorFn,
): HTMLElement[] | boolean {
let targets = evaluate(selector, this);
if (typeof targets === 'string') {
targets = this.findNodes(targets) as HTMLElement[];
}
return targets;
}
/**
* Creates a component instance.
* Reads inline components from the passed-in root DOM element.
*
* @ignore
* @param {object} options instance options
* @param {HTMLElement} [el] Root element
* @returns {Component}
*/
public createInstance<C extends Component<any, any>>(
ComponentConstructor: IComponentConstructable<C>,
options: Record<string, any>,
el?: HTMLElement,
): Promise<C> {
const inlineOptions = el ? datasetParser(el) : {};
return this.setRef({
id: nextChildUid(this.$id + '-c.'),
...options,
...inlineOptions,
component: ComponentConstructor,
el,
});
}
/**
* ```js
* stop()
* ```
*
* **DEPRECATED!** Use `sandbox.destroy()` instead.
*
* Stops every running component, clears sandbox events and destroys the instance.
*
* @deprecated
* @fires Sandbox#beforeStop
* @fires Sandbox#stop
* @returns {Promise<void>}
* @example
* sandbox.stop();
*/
public async stop(): Promise<void> {
if (process.env.NODE_ENV !== 'production') |
return this.destroy();
}
/**
* ```js
* destroy()
* ```
*
* Enhances `Component.destroy()`.
* Stops every running component, clears sandbox events and destroys the instance.
*
* @deprecated
* @fires Sandbox#beforeStop
* @fires Sandbox#stop
* @returns {Promise<void>}
* @example
* sandbox.destroy();
*/
public async destroy(): Promise<void> {
this.emit('beforeStop');
await this.beforeDestroy();
this.removeListeners();
try {
if (this.$el) {
this.$el.removeAttribute(Sandbox.SB_DATA_ATTR);
}
await this.destroyRefs();
this.$active = false;
} catch (e) {
this.emit('error', e);
return Promise.reject(e);
}
this.$instances.clear();
this.emit('stop');
this.clear();
return super.destroy();
}
/**
* Removes events and associated store
*
* @ignore
*/
public clear(): void {
this.$ctx = undefined; // release the context
this.off('beforeStart');
this.off('start');
this.off('error');
this.off('beforeStop');
this.off('stop');
}
}
| {
this.$warn(
`Sandbox.stop is deprecated. Use the "destroy" method instead`,
);
} | conditional_block |
sandbox.ts | import invariant from 'tiny-invariant';
import { datasetParser, isElement, evaluate, createSequence } from 'yuzu-utils';
import { IComponentConstructable } from 'yuzu/types';
import { Component } from 'yuzu';
import { createContext, IContext } from './context';
export type entrySelectorFn = (sbx: Sandbox<any>) => boolean | HTMLElement[];
export type sandboxComponentOptions = [
IComponentConstructable<Component<any, any>>,
Record<string, any>,
];
export interface ISandboxRegistryEntry {
component: IComponentConstructable<Component<any, any>>;
selector: string | entrySelectorFn;
[key: string]: any;
}
export interface ISandboxOptions {
components?: (
| IComponentConstructable<Component<any, any>>
| sandboxComponentOptions
)[];
root: HTMLElement | string;
context: IContext<any>;
id: string;
}
const nextSbUid = createSequence();
const nextChildUid = createSequence();
/**
* A sandbox can be used to initialize a set of components based on an element's innerHTML.
*
* Lets say we have the following component:
*
* ```js
* class Counter extends Component {
* static root = '.Counter';
*
* // other stuff here ...
* }
* ```
*
* We can register the component inside a sandbox like this:
*
* ```js
* const sandbox = new Sandbox({
* components: [Counter],
* id: 'main', // optional
* });
*
* sandbox.mount('#main');
* ```
*
* In this way the sandbox will attach itself to the element matching `#main` and will traverse its children
* looking for every `.Counter` element attaching an instance of the Counter component onto it.
*
* To prevent a component for being initialized (for example when you want to initialize it at a later moment)
* just add a `data-skip` attribute to its root element.
*
* @class
* @param {object} config
* @param {Component[]|[Component, object][]} [config.components] Array of components constructor or array with [ComponentConstructor, options]
* @param {HTMLElement|string} [config.root=document.body] Root element of the sandbox. Either a DOM element or a CSS selector
* @param {string} [config.id] ID of the sandbox
* @property {string} $id Sandbox internal id
* @property {HTMLElement} $el Sandbox root DOM element
* @property {Context} $ctx Internal [context](/packages/yuzu-application/api/context). Used to share data across child instances
* @property {object[]} $registry Registered components storage
* @property {Map} $instances Running instances storage
* @returns {Sandbox}
*/
export class Sandbox<S = {}> extends Component<S, ISandboxOptions> {
public static SB_DATA_ATTR = 'data-yuzu-sb';
public defaultOptions(): ISandboxOptions {
return {
components: [],
context: createContext(),
id: '',
root: document.body,
};
}
public $id: string;
public $ctx?: IContext;
public $registry: ISandboxRegistryEntry[] = [];
public $instances = new Map<
string | entrySelectorFn,
Component<any, any>[]
>();
/**
* Creates a sandbox instance.
*
* @constructor
*/
public constructor(options: Partial<ISandboxOptions> = {}) {
super(options);
const { components = [], id } = this.options;
this.$id = id || nextSbUid('_sbx-');
components.forEach((config) => {
if (!Array.isArray(config)) {
if (config.root) {
this.register({ component: config, selector: config.root });
}
if (process.env.NODE_ENV !== 'production') {
!config.root &&
this.$warn(
`Skipping component ${config.displayName ||
config.name} because static "root" selector is missing`,
);
}
} else {
const [component, params = {}] = config;
const selector = component.root || params.selector;
if (selector) {
this.register({ component, selector, ...params });
}
if (process.env.NODE_ENV !== 'production') {
!selector &&
this.$warn(
`Skipping component ${component.displayName ||
component.name} because a static "root" selector is missing and no "selector" param is passed-in`,
);
}
}
});
return this;
}
/**
* ```js
* register(params)
* ```
*
* Registers a new component into the sandbox. The registered components
* will be traversed on `.mount()` initializing every matching component.
*
* @param {object} params Every property other than `component` and `selector` will be used as component option
* @param {Component} params.component Component constructor
* @param {string} params.selector Child component root CSS selector
* @example
* sandbox.register({
* component: Counter,
* selector: '.Counter',
* theme: 'dark' // <-- instance options
* });
*/
public register<C extends Component<any, any>>(params: {
component: IComponentConstructable<C>;
selector: string | entrySelectorFn;
[key: string]: any;
}): void {
invariant(
Component.isComponent(params.component),
'Missing or invalid `component` property',
);
invariant(
typeof params.selector === 'string' ||
typeof params.selector === 'function',
'Missing `selector` property',
);
this.$registry.push(params);
}
/**
* ```js
* start([data])
* ```
*
* **DEPRECATED!** Use `sandbox.mount(root)` instead.
*
* Starts the sandbox with an optional context.
*
* The store will be available inside each component at `this.$context`.
*
* @deprecated
* @param {object} [data] Optional context data object to be injected into the child components.
* @fires Sandbox#beforeStart
* @fires Sandbox#start Events dispatched after all components are initialized
* @returns {Sandbox}
* @example
* sandbox.start();
*
* // with context data
* sandbox.start({ globalTheme: 'dark' });
*/
public start(data = {}): this {
Object.defineProperty(this, '$legacyStart', { value: true });
if (process.env.NODE_ENV !== 'production') {
this.$warn(`Sandbox.start is deprecated. Use the "mount" method instead`);
}
this.mount(this.options.root);
this.setup();
this.$ctx && this.$ctx.update(data);
this.discover();
return this;
}
/**
* ```js
* mount([el], [state])
* ```
*
* Enhances `Component.mount()` by firing the child components discovery logic.
* By default will use `document.body` as mount element.
*
* @param {string|Element} el Component's root element
* @param {object|null} [state={}] Initial state
* @fires Sandbox#beforeStart
* @fires Sandbox#start Events dispatched after all components are initialized
* @returns {Sandbox}
*/
public mount(el: string | Element, state: Partial<S> | null = {}): this {
super.mount(el, state);
this.$el.setAttribute(Sandbox.SB_DATA_ATTR, '');
if (!this.hasOwnProperty('$legacyStart')) {
this.setup();
this.discover();
}
return this;
}
/**
* Setups the sandbox context passed in the options.
*
* @ignore
*/
public setup(): void {
this.$ctx = this.options.context;
this.$ctx.inject(this);
}
/**
* Initializes the sandbox child components.
*
* @ignore
* @returns {Promise}
*/
public discover(): Promise<void> {
invariant(isElement(this.$el), '"this.$el" is not a DOM element');
this.emit('beforeStart');
const sbSelector = `[${Sandbox.SB_DATA_ATTR}]`;
const ret = this.$registry.map(
async ({ component: ComponentConstructor, selector, ...options }) => {
if (this.$instances.has(selector)) {
this.$warn(
`Component ${ComponentConstructor} already initialized on ${selector}`,
);
return;
}
const targets = this.resolveSelector(selector);
let instances: Promise<Component<any, any>>[] | undefined;
if (targets === true) {
instances = [this.createInstance(ComponentConstructor, options)];
} else if (Array.isArray(targets)) {
const { $el } = this;
instances = targets
.filter((el) => {
return (
isElement(el) &&
!el.dataset.skip &&
!el.closest('[data-skip]') &&
el.closest(sbSelector) === $el
);
})
.map((el) => {
return this.createInstance(ComponentConstructor, options, el);
});
}
if (instances) {
this.$instances.set(selector, await Promise.all(instances));
}
return true;
},
);
return Promise.all(ret).then(() => {
this.emit('start');
});
}
/**
* Resolves a configured component selector to a list of DOM nodes or a boolean (for detached components)
*
* @ignore
* @param {string|function} selector Selector string or function.
* @returns {HTMLElement[]|boolean}
*/
public resolveSelector(
selector: string | entrySelectorFn,
): HTMLElement[] | boolean {
let targets = evaluate(selector, this);
if (typeof targets === 'string') {
targets = this.findNodes(targets) as HTMLElement[];
}
return targets;
}
/**
* Creates a component instance.
* Reads inline components from the passed-in root DOM element.
*
* @ignore
* @param {object} options instance options
* @param {HTMLElement} [el] Root element
* @returns {Component}
*/
public createInstance<C extends Component<any, any>>(
ComponentConstructor: IComponentConstructable<C>,
options: Record<string, any>,
el?: HTMLElement,
): Promise<C> {
const inlineOptions = el ? datasetParser(el) : {};
return this.setRef({
id: nextChildUid(this.$id + '-c.'),
...options,
...inlineOptions,
component: ComponentConstructor,
el,
});
}
/**
* ```js
* stop()
* ```
*
* **DEPRECATED!** Use `sandbox.destroy()` instead.
*
* Stops every running component, clears sandbox events and destroys the instance.
*
* @deprecated
* @fires Sandbox#beforeStop
* @fires Sandbox#stop
* @returns {Promise<void>}
* @example
* sandbox.stop();
*/
public async stop(): Promise<void> {
if (process.env.NODE_ENV !== 'production') {
this.$warn(
`Sandbox.stop is deprecated. Use the "destroy" method instead`,
);
}
return this.destroy();
}
/**
* ```js
* destroy()
* ```
*
* Enhances `Component.destroy()`.
* Stops every running component, clears sandbox events and destroys the instance.
*
* @deprecated
* @fires Sandbox#beforeStop
* @fires Sandbox#stop
* @returns {Promise<void>}
* @example
* sandbox.destroy();
*/
public async destroy(): Promise<void> {
this.emit('beforeStop');
await this.beforeDestroy();
this.removeListeners();
try {
if (this.$el) {
this.$el.removeAttribute(Sandbox.SB_DATA_ATTR);
}
await this.destroyRefs();
this.$active = false;
} catch (e) {
this.emit('error', e);
return Promise.reject(e);
}
this.$instances.clear();
this.emit('stop');
this.clear();
return super.destroy();
}
/**
* Removes events and associated store
*
* @ignore
*/
public clear(): void {
this.$ctx = undefined; // release the context
this.off('beforeStart');
this.off('start'); | this.off('beforeStop');
this.off('stop');
}
} | this.off('error'); | random_line_split |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.