code stringlengths 42 43.2k | apis list | extract_api stringlengths 115 61.9k |
|---|---|---|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from network import NN
from evaluate import accuracy
def read_data(fpath):
iris = pd.read_csv(fpath)
iris.loc[iris['species'] == 'virginica', 'species'] = 0
iris.loc[iris['species'] == 'versicolor', 'species'] = 1
iris.loc[iris['sp... | [
"evaluate.accuracy"
] | [((159, 177), 'pandas.read_csv', 'pd.read_csv', (['fpath'], {}), '(fpath)\n', (170, 177), True, 'import pandas as pd\n'), ((519, 587), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X[:, 0]', 'X[:, 1]'], {'c': 'y[:, 0]', 's': '(40)', 'cmap': 'plt.cm.Spectral'}), '(X[:, 0], X[:, 1], c=y[:, 0], s=40, cmap=plt.cm.Spectral... |
import datetime
import os
import copy
import json
import numpy as np
from pytz import timezone
from gamified_squad import GamifiedSquad
from agent import CustomAgent
import generic
import evaluate
SAVE_CHECKPOINT = 100000
def train():
time_1 = datetime.datetime.now()
config = generic.load_config()
env =... | [
"evaluate.evaluate"
] | [((252, 275), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (273, 275), False, 'import datetime\n'), ((289, 310), 'generic.load_config', 'generic.load_config', ([], {}), '()\n', (308, 310), False, 'import generic\n'), ((321, 342), 'gamified_squad.GamifiedSquad', 'GamifiedSquad', (['config'], {}), ... |
#!/usr/bin/env python
# coding: utf-8
from __future__ import division, print_function, unicode_literals
import argparse
import json
import os
import shutil
import time
import torch
from utils import util
from evaluate import MultiWozEvaluator
from model.model import Model
parser = argparse.ArgumentParser(descriptio... | [
"evaluate.MultiWozEvaluator"
] | [((286, 328), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""S2S"""'}), "(description='S2S')\n", (309, 328), False, 'import argparse\n'), ((1679, 1707), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (1696, 1707), False, 'import torch\n'), ((1718, 1762), ... |
import argparse
import os
import torch
import yaml
import torch.nn as nn
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
from utils.model import get_vocoder, get_param_num
from utils.tools import to_device, log, synth_one_sample
from model im... | [
"evaluate.evaluate"
] | [((740, 825), 'dataset.Dataset', 'Dataset', (['"""train.txt"""', 'preprocess_config', 'train_config'], {'sort': '(True)', 'drop_last': '(True)'}), "('train.txt', preprocess_config, train_config, sort=True, drop_last=True\n )\n", (747, 825), False, 'from dataset import Dataset\n'), ((1141, 1245), 'torch.utils.data.Da... |
from i3Deep import utils
import os
from evaluate import evaluate
import numpy as np
from skimage.segmentation.random_walker_segmentation import random_walker
from tqdm import tqdm
import torchio
import torch
def compute_predictions(image_path, mask_path, gt_path, save_path, nr_modalities, class_labels, resize... | [
"evaluate.evaluate"
] | [((432, 463), 'i3Deep.utils.load_filenames', 'utils.load_filenames', (['mask_path'], {}), '(mask_path)\n', (452, 463), False, 'from i3Deep import utils\n'), ((1785, 1827), 'evaluate.evaluate', 'evaluate', (['gt_path', 'save_path', 'class_labels'], {}), '(gt_path, save_path, class_labels)\n', (1793, 1827), False, 'from ... |
import random
import os
import sys
from models.bert import BERT_Model
from models.bilstm_crf_ import BiLSTM_CRF_Model
from data import build_corpus
from config import ModelPathConfig,ResultPathConfig
from datetime import datetime
from utils import extend_map_bert,save_model,load_model,extend_map,add_label_for_lstmcrf
... | [
"evaluate.unitstopd",
"evaluate.evaluate_single_label",
"evaluate.evaluate_entity_label"
] | [((504, 540), 'os.path.exists', 'os.path.exists', (['ModelPathConfig.bert'], {}), '(ModelPathConfig.bert)\n', (518, 540), False, 'import os\n'), ((609, 630), 'data.build_corpus', 'build_corpus', (['"""train"""'], {}), "('train')\n", (621, 630), False, 'from data import build_corpus\n'), ((670, 690), 'data.build_corpus'... |
from __future__ import print_function, division
import sys
sys.path.append('core')
import argparse
import os
import cv2
import time
import numpy as np
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.utils.data import DataLoader... | [
"evaluate.validate_chairs",
"evaluate.validate_kitti",
"evaluate.validate_sintel"
] | [((59, 82), 'sys.path.append', 'sys.path.append', (['"""core"""'], {}), "('core')\n", (74, 82), False, 'import sys\n'), ((14695, 14726), 'datasets.fetch_dataloader', 'datasets.fetch_dataloader', (['args'], {}), '(args)\n', (14720, 14726), False, 'import datasets\n'), ((17465, 17490), 'argparse.ArgumentParser', 'argpars... |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
import numpy as np
import tensorflow as tf
from evaluate import evaluate
from utils import get_data, tf_melspectogram
from shallow_nn import shallow_nn
from deep_nn import deep_nn
from sh... | [
"evaluate.evaluate"
] | [((461, 569), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""epochs"""', '(100)', '"""Number of mini-batches to train on. (default: %(default)d)"""'], {}), "('epochs', 100,\n 'Number of mini-batches to train on. (default: %(default)d)')\n", (488, 569), True, 'import tensorflow as tf\n'),... |
from implicit_neural_networks import IMLP
import torch
import torch.optim as optim
import numpy as np
from evaluate import evaluate_model
from datetime import datetime
from loss_utils import get_gradient_loss, get_rigidity_loss, \
get_optical_flow_loss, get_optical_flow_alpha_loss
from unwrap_utils import get_tupl... | [
"evaluate.evaluate_model"
] | [((659, 683), 'numpy.int64', 'np.int64', (["config['resx']"], {}), "(config['resx'])\n", (667, 683), True, 'import numpy as np\n'), ((695, 719), 'numpy.int64', 'np.int64', (["config['resy']"], {}), "(config['resy'])\n", (703, 719), True, 'import numpy as np\n'), ((893, 927), 'numpy.int64', 'np.int64', (["config['evalua... |
'''Train CIFAR10/100 with PyTorch using standard Contrastive Learning. This script tunes the L2 reg weight of the
final classifier.'''
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import math
import os
import argparse
from models import *
from configs import get_datasets
from evaluate impor... | [
"evaluate.encode_feature_averaging",
"evaluate.train_clf"
] | [((368, 447), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Final evaluation with feature averaging."""'}), "(description='Final evaluation with feature averaging.')\n", (391, 447), False, 'import argparse\n'), ((1181, 1208), 'os.path.isdir', 'os.path.isdir', (['"""checkpoint"""'], {}),... |
import os, sys
root_path = os.path.realpath(__file__).split('/evaluate/multipose_coco_eval.py')[0]
os.chdir(root_path)
sys.path.append(root_path)
from network.posenet import poseNet
from evaluate.tester import Tester
backbone = 'resnet101'
# Set Training parameters
params = Tester.TestParams()
params.subnet_name = '... | [
"evaluate.tester.Tester",
"evaluate.tester.Tester.TestParams"
] | [((99, 118), 'os.chdir', 'os.chdir', (['root_path'], {}), '(root_path)\n', (107, 118), False, 'import os, sys\n'), ((119, 145), 'sys.path.append', 'sys.path.append', (['root_path'], {}), '(root_path)\n', (134, 145), False, 'import os, sys\n'), ((278, 297), 'evaluate.tester.Tester.TestParams', 'Tester.TestParams', ([], ... |
import torch
from torch import nn
from Fashion_Mnist import load_data_fashion_mnist
from evaluate import Accumulator, accurate_num, evaluate_accuracy
net = nn.Sequential(nn.Flatten(),
nn.Linear(784,512), nn.BatchNorm1d(512),
nn.ReLU(),
nn.Linear(512, ... | [
"evaluate.Accumulator",
"evaluate.evaluate_accuracy",
"evaluate.accurate_num"
] | [((874, 895), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (893, 895), False, 'from torch import nn\n'), ((988, 1023), 'Fashion_Mnist.load_data_fashion_mnist', 'load_data_fashion_mnist', (['batch_size'], {}), '(batch_size)\n', (1011, 1023), False, 'from Fashion_Mnist import load_data_fashion_mn... |
import argparse, os
import matplotlib
matplotlib.use('Agg')
import torch
from evaluate import evaluate_synthesis, evaluate_projection
import numpy as np
from synth.synthesize import create_synth
from utils.data import get_external_sounds
parser = argparse.ArgumentParser()
parser.add_argument('--model_path', type=st... | [
"evaluate.evaluate_projection",
"evaluate.evaluate_synthesis"
] | [((38, 59), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (52, 59), False, 'import matplotlib\n'), ((247, 272), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (270, 272), False, 'import argparse, os\n'), ((2635, 2688), 'torch.load', 'torch.load', (['args.model_path'], {'... |
import evaluate
import pandas as pd
import sys
import glob
sys.path.append('../gopher')
import utils
import numpy as np
import json
def get_runs(glob_pattern):
bin_run = {}
for run_dir in glob.glob(glob_pattern):
config = utils.get_config(run_dir)
if config['loss_fn']['value'] == 'poisson':
... | [
"evaluate.change_resolution",
"evaluate.get_performance"
] | [((59, 87), 'sys.path.append', 'sys.path.append', (['"""../gopher"""'], {}), "('../gopher')\n", (74, 87), False, 'import sys\n'), ((415, 446), 'utils.make_dir', 'utils.make_dir', (['"""inter_results"""'], {}), "('inter_results')\n", (429, 446), False, 'import utils\n'), ((481, 550), 'utils.collect_whole_testset', 'util... |
import logging
import numpy as np
import torch
from torch import nn
from anchor_based import anchor_helper
from anchor_based.dsnet import DSNet
from anchor_based.losses import calc_cls_loss, calc_loc_loss
from evaluate import evaluate
from helpers import data_helper, vsumm_helper, bbox_helper
logger = logging.getLog... | [
"evaluate.evaluate"
] | [((306, 325), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (323, 325), False, 'import logging\n'), ((644, 798), 'anchor_based.dsnet.DSNet', 'DSNet', ([], {'base_model': 'args.base_model', 'num_feature': 'args.num_feature', 'num_hidden': 'args.num_hidden', 'anchor_scales': 'args.anchor_scales', 'num_head'... |
import chess
import chess.pgn
from evaluate import Evaluator
from weightsHandler import WeightsHandler
import config
def main():
# Open PGN file with games database
gamesFile = open(config.GAMES_FILE_NAME)
# Initialize counter
gamesCounter = 0
# Initialize move selection module
# moveSelector... | [
"evaluate.Evaluator"
] | [((409, 475), 'evaluate.Evaluator', 'Evaluator', (['config.MAX_ITER_MTD', 'config.MAX_DEPTH', 'config.MAX_SCORE'], {}), '(config.MAX_ITER_MTD, config.MAX_DEPTH, config.MAX_SCORE)\n', (418, 475), False, 'from evaluate import Evaluator\n'), ((563, 591), 'weightsHandler.WeightsHandler', 'WeightsHandler', (['"""weights.py"... |
import evaluate
from formulas import jaccard, ochiai, tarantula, ample, wong1, wong2, wong3, op1, op2, gp_list, gpif, gpasgn, gpcall, gpseq
import math
import sys
def compare_formula(spectra_list, f1, f2):
f1_list = list(map(lambda sp : f1(sp[0], sp[1], sp[2], sp[3]), spectra_list))
f2_list = list(map(lambda s... | [
"evaluate.spectra_list"
] | [((1864, 1898), 'evaluate.spectra_list', 'evaluate.spectra_list', (['[data_path]'], {}), '([data_path])\n', (1885, 1898), False, 'import evaluate\n'), ((2784, 2818), 'evaluate.spectra_list', 'evaluate.spectra_list', (['[data_path]'], {}), '([data_path])\n', (2805, 2818), False, 'import evaluate\n')] |
# -*- coding: utf-8 -*-
# @Time : 2018/11/4 15:34
# @Author : QuietWoods
# @FileName: eval_full_model.py
# @Software: PyCharm
""" Evaluate the baselines ont ROUGE/METEOR"""
import argparse
import json
import os
from os.path import join, exists
from evaluate import eval_meteor, eval_rouge
try:
_DATA_DIR = os.... | [
"evaluate.eval_meteor",
"evaluate.eval_rouge"
] | [((515, 530), 'os.path.exists', 'exists', (['ref_dir'], {}), '(ref_dir)\n', (521, 530), False, 'from os.path import join, exists\n'), ((839, 931), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Evaluate the output files for the RL full models"""'}), "(description=\n 'Evaluate the outp... |
import argparse
import os
import torch
import yaml
import torch.nn as nn
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
from utils.model import get_model, get_vocoder, get_param_num
from utils.tools import get_configs_of, to_device, log, synth_one_sampl... | [
"evaluate.evaluate"
] | [((641, 726), 'dataset.Dataset', 'Dataset', (['"""train.txt"""', 'preprocess_config', 'train_config'], {'sort': '(True)', 'drop_last': '(True)'}), "('train.txt', preprocess_config, train_config, sort=True, drop_last=True\n )\n", (648, 726), False, 'from dataset import Dataset\n'), ((930, 1034), 'torch.utils.data.Dat... |
import random
import numpy as np
import os
import logging
import torch
from utilities import get_device, current_utc_time
import pandas as pd
from imp import reload
from data_loader import get_loader, prepare_dataset
from transformers import AdamW, get_linear_schedule_with_warmup
from models import get_model
... | [
"evaluate.evaluate_model"
] | [((439, 454), 'imp.reload', 'reload', (['logging'], {}), '(logging)\n', (445, 454), False, 'from imp import reload\n'), ((583, 621), 'pandas.read_excel', 'pd.read_excel', (['"""./data/P1-Golden.xlsx"""'], {}), "('./data/P1-Golden.xlsx')\n", (596, 621), True, 'import pandas as pd\n'), ((704, 722), 'utilities.current_utc... |
from pathlib import Path
import random
from fire import Fire
from munch import Munch
import torch
import numpy as np
from config import config, debug_options
from dataset import get_iterator
from utils import wait_for_key, suppress_stdout
from train import train
from evaluate import evaluate
from infer import infer
... | [
"evaluate.evaluate"
] | [((1658, 1690), 'random.seed', 'random.seed', (["args['random_seed']"], {}), "(args['random_seed'])\n", (1669, 1690), False, 'import random\n'), ((1695, 1730), 'numpy.random.seed', 'np.random.seed', (["args['random_seed']"], {}), "(args['random_seed'])\n", (1709, 1730), True, 'import numpy as np\n'), ((1735, 1773), 'to... |
import subprocess as sp
import datetime
import os
from math import isclose
import sys
import pytest
import json
import argparse
sys.path.append("../../../DeepSpeedExamples/BingBertSquad")
import evaluate as eval
squad_dir = "/data/BingBertSquad"
base_dir = "../../../DeepSpeedExamples/BingBertSquad"
script_file_name ... | [
"evaluate.evaluate"
] | [((129, 188), 'sys.path.append', 'sys.path.append', (['"""../../../DeepSpeedExamples/BingBertSquad"""'], {}), "('../../../DeepSpeedExamples/BingBertSquad')\n", (144, 188), False, 'import sys\n'), ((1098, 1138), 'os.path.join', 'os.path.join', (['tmpdir', '"""temp_config.json"""'], {}), "(tmpdir, 'temp_config.json')\n",... |
"""
This tests how assymetric window sizes affects the evaluation results on the development set.
"""
from config import base
import evaluate as e
config = base.get_config()
config['test_filepath'] = 'resources/test/teddev/data-with-doc.csv'
window_sizes = [(4, 0), (4, 1), (4, 2), (4, 3), (3, 4), (2, 4), (1, 4), (0,... | [
"evaluate.load_data",
"evaluate.evaluate"
] | [((158, 175), 'config.base.get_config', 'base.get_config', ([], {}), '()\n', (173, 175), False, 'from config import base\n'), ((556, 574), 'evaluate.evaluate', 'e.evaluate', (['config'], {}), '(config)\n', (566, 574), True, 'import evaluate as e\n'), ((591, 627), 'evaluate.load_data', 'e.load_data', (["config['test_fil... |
import os
from matplotlib.colors import from_levels_and_colors
import numpy as np
import argparse
from config import config
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = config['gpu_num']
from Models import Resnet3DBuilder, resnet3d_model, densenet3d_model
from Models.training imp... | [
"evaluate.evaluate"
] | [((782, 803), 'keras.backend.image_data_format', 'K.image_data_format', ([], {}), '()\n', (801, 803), True, 'from keras import backend as K\n'), ((2403, 2439), 'os.path.join', 'os.path.join', (['projectRoot', '"""MSA.npy"""'], {}), "(projectRoot, 'MSA.npy')\n", (2415, 2439), False, 'import os\n'), ((2463, 2499), 'os.pa... |
import yaml
import torch
import torch.nn as nn
from torch.nn.utils import clip_grad_norm_
from tqdm import tqdm
from transformers.models.bert.tokenization_bert import BertTokenizer
from train_util import create_masks, create_train_data, seed_everything
from dataset import ChatDataSet, SampledDataLoader
from torch.opti... | [
"evaluate.evaluate"
] | [((2101, 2132), 'train_util.seed_everything', 'seed_everything', (["config['seed']"], {}), "(config['seed'])\n", (2116, 2132), False, 'from train_util import create_masks, create_train_data, seed_everything\n'), ((2145, 2201), 'transformers.models.bert.tokenization_bert.BertTokenizer.from_pretrained', 'BertTokenizer.fr... |
from typing import List
from numpy import ndarray
import matplotlib.pyplot as plt
import threading
# import tensorflow as tf
from PIL import Image, ImageTk
import PySimpleGUI as sg
from config import *
import evaluate
import utility
# Global variables
input_values = [] # Input values for evaluator (ex. image paths)... | [
"evaluate.Evaluator"
] | [((473, 490), 'threading.Event', 'threading.Event', ([], {}), '()\n', (488, 490), False, 'import threading\n'), ((534, 556), 'PySimpleGUI.theme', 'sg.theme', (['WINDOW_THEME'], {}), '(WINDOW_THEME)\n', (542, 556), True, 'import PySimpleGUI as sg\n'), ((572, 644), 'PySimpleGUI.Image', 'sg.Image', ([], {'size': '(900, 90... |
import argparse, torch, gc, os, random, json
from data import MyDataset, load_data, my_collate_fn, device, word2sememe, label_multihot, word2POS, word2chara, word2Cn, my_collate_fn_test
from model import Encoder
from tqdm import tqdm
from evaluate import evaluate, evaluate_test
import numpy as np
def main(epoch... | [
"evaluate.evaluate_test",
"evaluate.evaluate"
] | [((695, 808), 'data.load_data', 'load_data', (['hownet_file', 'sememe_file', 'word_index_file', 'word_vector_file', 'dictionary_file', 'word_cilinClass_file'], {}), '(hownet_file, sememe_file, word_index_file, word_vector_file,\n dictionary_file, word_cilinClass_file)\n', (704, 808), False, 'from data import MyDatas... |
# Copyright (c) Facebook, Inc. and its affiliates.
import argparse
import collections
import copy
import dotdict
import json
import numpy as np
import os
import random
import regex
import tempfile
import torch
import torch.nn as nn
from chinese_converter import to_traditional, to_simplified
from tqdm import tqdm
from... | [
"evaluate.evaluate"
] | [((402, 429), 'torch.nn.CosineSimilarity', 'nn.CosineSimilarity', ([], {'dim': '(-1)'}), '(dim=-1)\n', (421, 429), True, 'import torch.nn as nn\n'), ((801, 827), 'os.path.exists', 'os.path.exists', (['stats_path'], {}), '(stats_path)\n', (815, 827), False, 'import os\n'), ((2770, 2795), 'os.path.exists', 'os.path.exist... |
#!/usr/bin/env python
# <NAME> (<EMAIL>)
# April 2018
import os, sys
BASE_DIR = os.path.normpath(
os.path.join(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(os.path.join(BASE_DIR, '..'))
from datasets import dataset
from datetime import datetime
from generate_outputs import *
from network impor... | [
"evaluate.evaluate"
] | [((181, 209), 'os.path.join', 'os.path.join', (['BASE_DIR', '""".."""'], {}), "(BASE_DIR, '..')\n", (193, 209), False, 'import os, sys\n'), ((3418, 3442), 'tensorflow.train.Saver', 'tf.train.Saver', (['var_list'], {}), '(var_list)\n', (3432, 3442), True, 'import tensorflow as tf\n'), ((3455, 3498), 'tensorflow.train.ge... |
# Lint as: python3
"""Pipeline for training and evaluating a loudness predictor."""
import os
from typing import List
from absl import app
from absl import flags
from absl import logging
import tensorflow.compat.v2 as tf
from data_processing import get_datasets, get_testdata
from evaluate import evaluate, write_predi... | [
"evaluate.evaluate",
"evaluate.write_predictions"
] | [((392, 415), 'tensorflow.compat.v2.enable_v2_behavior', 'tf.enable_v2_behavior', ([], {}), '()\n', (413, 415), True, 'import tensorflow.compat.v2 as tf\n'), ((438, 513), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""mode"""', '"""train"""', '"""Whether to train, test, or predict."""'], {}), "('mode', 'train... |
import keras.backend as K
from sacred import Experiment
from sacred.utils import apply_backspaces_and_linefeeds
from sacred.observers import FileStorageObserver
from utils.util import prepare_dataset, split_data
from train import train
from evaluate import evaluate
import tensorflow as tf
import GPy
import GPyOpt
... | [
"evaluate.evaluate"
] | [((402, 426), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (424, 426), True, 'import tensorflow as tf\n'), ((548, 578), 'keras.backend.set_image_dim_ordering', 'K.set_image_dim_ordering', (['"""tf"""'], {}), "('tf')\n", (572, 578), True, 'import keras.backend as K\n'), ((968, 1034), 'fu... |
from __future__ import print_function
import os
import sys
import importlib
import mxnet as mx
from dataset.iterator import DetRecordIter
from config.config import cfg
from evaluate.eval_metric import MApMetric, VOC07MApMetric
import logging
from symbol.symbol_factory import get_symbol
def evaluate_net(net, path_imgre... | [
"evaluate.eval_metric.VOC07MApMetric",
"evaluate.eval_metric.MApMetric"
] | [((403, 411), 'mxnet.cpu', 'mx.cpu', ([], {}), '()\n', (409, 411), True, 'import mxnet as mx\n'), ((1895, 1916), 'logging.basicConfig', 'logging.basicConfig', ([], {}), '()\n', (1914, 1916), False, 'import logging\n'), ((1930, 1949), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (1947, 1949), False, 'impo... |
from evaluate import get_env, get_state_action_size, evaluate
from policy import NeuroevoPolicy
from evolutionary_methods import ga, ga_transfer_learning
from argparse import ArgumentParser
import numpy as np
import logging
import sys
use_tqdm = False
if "tqdm" in sys.modules:
use_tqdm = True
from tqdm import t... | [
"evaluate.get_state_action_size",
"evaluate.evaluate",
"evaluate.get_env"
] | [((375, 395), 'policy.NeuroevoPolicy', 'NeuroevoPolicy', (['s', 'a'], {}), '(s, a)\n', (389, 395), False, 'from policy import NeuroevoPolicy\n'), ((432, 461), 'evaluate.evaluate', 'evaluate', (['env', 'params', 'policy'], {}), '(env, params, policy)\n', (440, 461), False, 'from evaluate import get_env, get_state_action... |
#!/usr/bin/env python2
# Copyright (C) <2019> Intel Corporation
# SPDX-License-Identifier: MIT
# Author: <NAME>
import sys
import argparse
import os
import copy
import math
import pickle
import matplotlib.pyplot as plt
import evaluate
def update_results(path, gt_dict, seqs):
result_items = ['ate', 'ate_rmse', 'a... | [
"evaluate.evaluate",
"evaluate.parse_input"
] | [((471, 487), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (481, 487), False, 'import os\n'), ((4206, 4231), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (4229, 4231), False, 'import argparse\n'), ((2373, 2402), 'evaluate.parse_input', 'evaluate.parse_input', (['gt_file'], {}), '(g... |
import os
import json
from exp import ex
from args import get_args
from train import _train
from utils import wait_for_key, count_parameters
from evaluate import _evaluate
from infer import _infer
from vis_tsne import _tsne, _silhouette
from distance import _distance
from extract_keyword import extract_and_save_all
f... | [
"evaluate._evaluate"
] | [((615, 641), 'ckpt.get_model_ckpt', 'get_model_ckpt', (['model_name'], {}), '(model_name)\n', (629, 641), False, 'from ckpt import get_model_ckpt\n'), ((687, 717), 'logger.get_logger', 'get_logger', ([], {'log_file': 'no_logger'}), '(log_file=no_logger)\n', (697, 717), False, 'from logger import get_logger\n'), ((799,... |
import torch
import torch.nn as nn
from tqdm import tqdm
import numpy as np
import logging
from evaluate import eval_func, re_rank
from evaluate import euclidean_dist
from utils import AvgerageMeter
import os.path as osp
import os
from model import convert_model
from optim import make_optimizer, WarmupMultiStepLR
tr... | [
"evaluate.euclidean_dist"
] | [((782, 797), 'utils.AvgerageMeter', 'AvgerageMeter', ([], {}), '()\n', (795, 797), False, 'from utils import AvgerageMeter\n'), ((821, 836), 'utils.AvgerageMeter', 'AvgerageMeter', ([], {}), '()\n', (834, 836), False, 'from utils import AvgerageMeter\n'), ((916, 956), 'logging.getLogger', 'logging.getLogger', (['"""re... |
#libraries for transformers training (text)
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
from tqdm import tqdm, trange
from transformers import AutoConfig, AutoTokenizer
from arguments import args
from dataset import TextDataset
from evaluate import e... | [
"evaluate.evaluate"
] | [((1506, 1540), 'tqdm.trange', 'trange', (['args.num_eps'], {'desc': '"""Epoch"""'}), "(args.num_eps, desc='Epoch')\n", (1512, 1540), False, 'from tqdm import tqdm, trange\n'), ((2817, 2838), 'torchaudio.load', 'torchaudio.load', (['path'], {}), '(path)\n', (2832, 2838), False, 'import torchaudio\n'), ((2856, 2923), 't... |
import unittest
import torch
from collections import OrderedDict
from evaluate.coco_eval import run_eval
from lib.network.rtpose_vgg import get_model, use_vgg
from lib.network.openpose import OpenPose_Model, use_vgg
from torch import load
with torch.autograd.no_grad():
weight_name = './network/weight/best_pose.pth... | [
"evaluate.coco_eval.run_eval"
] | [((245, 269), 'torch.autograd.no_grad', 'torch.autograd.no_grad', ([], {}), '()\n', (267, 269), False, 'import torch\n'), ((339, 362), 'torch.load', 'torch.load', (['weight_name'], {}), '(weight_name)\n', (349, 362), False, 'import torch\n'), ((389, 402), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (400... |
import argparse
from args import init_parser, post_processing
import numpy as np
from envs import make_env
# find the carla module
import os
import math
import random
import time
import torch
import shutil
parser = argparse.ArgumentParser(description='SPC')
init_parser(parser) # See `args.py` for default arguments... | [
"evaluate.evaluate_policy"
] | [((219, 261), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""SPC"""'}), "(description='SPC')\n", (242, 261), False, 'import argparse\n'), ((262, 281), 'args.init_parser', 'init_parser', (['parser'], {}), '(parser)\n', (273, 281), False, 'from args import init_parser, post_processing\n'),... |
from evaluate import Evaluation
from collections import Counter
import itertools
#create a class with all the characteristics of the hand
class PokerHand:
def __init__(
self,
hand,
first_card=None,
second_card=None,
third_card=None,
fourth_card=None,
fifth_card=None,
):
self.hand = hand
self.... | [
"evaluate.Evaluation"
] | [((2339, 2355), 'collections.Counter', 'Counter', (['values1'], {}), '(values1)\n', (2346, 2355), False, 'from collections import Counter\n'), ((2441, 2457), 'collections.Counter', 'Counter', (['values2'], {}), '(values2)\n', (2448, 2457), False, 'from collections import Counter\n'), ((1053, 1075), 'evaluate.Evaluation... |
import numpy as np
import random
import itertools as it
import pygame as pg
# Local import
import env
import agentsEnv as ag
import reward
import policyGradient as PG
from model import GeneratePolicyNet
from evaluate import Evaluate
from visualize import draw
def main():
# action space
actionSpace = [[10, 0... | [
"evaluate.Evaluate"
] | [((546, 593), 'agentsEnv.CheckBoundaryAndAdjust', 'ag.CheckBoundaryAndAdjust', (['xBoundary', 'yBoundary'], {}), '(xBoundary, yBoundary)\n', (571, 593), True, 'import agentsEnv as ag\n'), ((623, 643), 'numpy.array', 'np.array', (['[180, 180]'], {}), '([180, 180])\n', (631, 643), True, 'import numpy as np\n'), ((671, 69... |
import os
import logging
import tempfile
import argparse
import numpy as np
import torch
import torchmodels
import torch.optim as op
import torch.utils.data as td
import utils
import models
import models.jlu
import dataset
import evaluate
import inference
from . import embeds
MODES = ["word", "label", "intent"]
par... | [
"evaluate.evaluate"
] | [((326, 376), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'fromfile_prefix_chars': '"""@"""'}), "(fromfile_prefix_chars='@')\n", (349, 376), False, 'import argparse\n'), ((498, 541), 'utils.add_logging_arguments', 'utils.add_logging_arguments', (['group', '"""train"""'], {}), "(group, 'train')\n", (525,... |
import tensorflow as tf
from PIL import Image
import numpy as np
import time
from tqdm import tqdm
from nltk.translate import bleu
from nltk.translate.bleu_score import sentence_bleu
from evaluate import evaluate, bleu_score
from model import Attention, CNN_Encoder, RNN_Decoder
#from loss import loss_function
from pre... | [
"evaluate.evaluate"
] | [((728, 752), 'data_download.data_download', 'data_download', ([], {'data': '(True)'}), '(data=True)\n', (741, 752), False, 'from data_download import data_download\n'), ((807, 846), 'preprocessing.datalimit', 'datalimit', (['limit', 'annotation_file', 'PATH'], {}), '(limit, annotation_file, PATH)\n', (816, 846), False... |
'''train the model'''
import argparse
import logging
import os
import glob
from model.data_loader import *
from model.x2net import x2Net
from model.x3net import x3Net
from model.x4net import x4Net
from model.loss_fn import loss_fn
from model.metrics import metrics
from evaluate import evaluate
import utils
import nu... | [
"evaluate.evaluate"
] | [((403, 428), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (426, 428), False, 'import argparse\n'), ((1673, 1695), 'utils.RunningAverage', 'utils.RunningAverage', ([], {}), '()\n', (1693, 1695), False, 'import utils\n'), ((3073, 3123), 'logging.info', 'logging.info', (["('- Train metrics: ' +... |
import matplotlib.pyplot as plt
import numpy as np
import matplotlib as mpl
import pandas as pd
import sys
sys.path.append("..")
mpl.use('tkagg') # issues with Big Sur
import matplotlib.pyplot as plt
from strategy.williams_R import williamsR
from backtest import Backtest
from evaluate import SharpeRatio, MaxDrawdown,... | [
"evaluate.MaxDrawdown",
"evaluate.CAGR",
"evaluate.SharpeRatio"
] | [((107, 128), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (122, 128), False, 'import sys\n'), ((129, 145), 'matplotlib.use', 'mpl.use', (['"""tkagg"""'], {}), "('tkagg')\n", (136, 145), True, 'import matplotlib as mpl\n'), ((344, 471), 'pandas.read_csv', 'pd.read_csv', (['"""../../database/mic... |
import tensorflow as tf
import utilities
import visualize
import evaluate
if __name__ == '__main__':
train_dataset, test_dataset, encoder = utilities.load_data()
model = tf.keras.Sequential([
encoder,
tf.keras.layers.Embedding(
input_dim=len(encoder.get_vocabulary()),
... | [
"evaluate.predict"
] | [((146, 167), 'utilities.load_data', 'utilities.load_data', ([], {}), '()\n', (165, 167), False, 'import utilities\n'), ((911, 966), 'visualize.display_results', 'visualize.display_results', (['test_loss', 'test_acc', 'history'], {}), '(test_loss, test_acc, history)\n', (936, 966), False, 'import visualize\n'), ((972, ... |
import os
import sys
sys.path.insert(1, os.path.join(sys.path[0], '../utils'))
import numpy as np
import argparse
import h5py
import math
import time
import logging
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from utilities import (crea... | [
"evaluate.Evaluator",
"evaluate.StatisticsContainer"
] | [((40, 77), 'os.path.join', 'os.path.join', (['sys.path[0]', '"""../utils"""'], {}), "(sys.path[0], '../utils')\n", (52, 77), False, 'import os\n'), ((1793, 1819), 'utilities.get_labels', 'get_labels', (['taxonomy_level'], {}), '(taxonomy_level)\n', (1803, 1819), False, 'from utilities import create_folder, get_filenam... |
#!/usr/bin/env python
from __future__ import print_function, division
from evaluate import Evaluation
import math
import heapq
import logging
def logger():
return logging.getLogger(__name__)
def expectimax(game, placements, feature_weights, beam = 1, return_all = False):
eval = Evaluation(game, f... | [
"evaluate.Evaluation"
] | [((178, 205), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (195, 205), False, 'import logging\n'), ((302, 335), 'evaluate.Evaluation', 'Evaluation', (['game', 'feature_weights'], {}), '(game, feature_weights)\n', (312, 335), False, 'from evaluate import Evaluation\n'), ((1336, 1383), 'h... |
import os
import sys
import time
import json
import torch
from torch import nn, optim
from torch.utils.data import DataLoader, sampler
from tqdm import tqdm
import matplotlib.pyplot as plt
import numpy as np
import cv2
import math
from argument import get_args
from backbone import darknet53
from dataset import BOP_D... | [
"evaluate.evaluate"
] | [((781, 801), 'torch.manual_seed', 'torch.manual_seed', (['(0)'], {}), '(0)\n', (798, 801), False, 'import torch\n'), ((802, 819), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (816, 819), True, 'import numpy as np\n'), ((1800, 1815), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1813, 1815), Fa... |
from data import dev_data, sample_num
from optimizer import *
from feature import Features, FeatureVector
from model import write_predictions
from evaluate import evaluate
def train(
data,
feature_names,
tagset,
epochs,
optimizer,
score_func=perceptron_score,
step_size=1,
):
"""
Tr... | [
"evaluate.evaluate"
] | [((786, 803), 'feature.FeatureVector', 'FeatureVector', (['{}'], {}), '({})\n', (799, 803), False, 'from feature import Features, FeatureVector\n'), ((1337, 1402), 'evaluate.evaluate', 'evaluate', (['dev_data', 'parameters', 'feature_names', 'tagset', 'score_func'], {}), '(dev_data, parameters, feature_names, tagset, s... |
from typing import Iterable, List
import pysam
from evaluate.classification import (
Classification,
RecallClassification,
PrecisionClassification,
)
class Classifier:
def __init__(self, sam: Iterable[pysam.AlignedSegment] = None, name: str = ""):
if sam is None:
sam = []
... | [
"evaluate.classification.RecallClassification",
"evaluate.classification.Classification",
"evaluate.classification.PrecisionClassification"
] | [((682, 704), 'evaluate.classification.Classification', 'Classification', (['record'], {}), '(record)\n', (696, 704), False, 'from evaluate.classification import Classification, RecallClassification, PrecisionClassification\n'), ((801, 829), 'evaluate.classification.RecallClassification', 'RecallClassification', (['rec... |
import torch
import torch.optim as optim
from torch.optim.lr_scheduler import StepLR
import torch.nn.utils.prune as prune
import numpy as np
import gin
import sys
sys.path.append('../src')
from models import LeNetFC, LeNetConv, Conv2
from train import train
from evaluate import test
from prepare_data import load_mnis... | [
"evaluate.test"
] | [((164, 189), 'sys.path.append', 'sys.path.append', (['"""../src"""'], {}), "('../src')\n", (179, 189), False, 'import sys\n'), ((1900, 1943), 'torch.device', 'torch.device', (["('cuda' if use_cuda else 'cpu')"], {}), "('cuda' if use_cuda else 'cpu')\n", (1912, 1943), False, 'import torch\n'), ((2072, 2115), 'torch.opt... |
""" Evaluate the baselines on ROUGE"""
import json
import os
from os.path import join, exists
import argparse
from evaluate import eval_rouge
def main(dec_dir, ref_dir):
dec_pattern = r'test-(\d+).txt'
ref_pattern = 'test-#ID#.txt'
output = eval_rouge(dec_pattern, dec_dir, ref_pattern, ref_dir)
metric ... | [
"evaluate.eval_rouge"
] | [((254, 308), 'evaluate.eval_rouge', 'eval_rouge', (['dec_pattern', 'dec_dir', 'ref_pattern', 'ref_dir'], {}), '(dec_pattern, dec_dir, ref_pattern, ref_dir)\n', (264, 308), False, 'from evaluate import eval_rouge\n'), ((479, 547), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Evaluate s... |
import sys
import numpy as np
from os.path import join
from copy import deepcopy
import torch
from torch.nn.functional import softmax
from torch.nn.utils import clip_grad_norm_
from transformers import BertTokenizer, BertForQuestionAnswering
from utils import AdamW
from data import get_dataloader
from evaluate import... | [
"evaluate.metric_max_over_ground_truths"
] | [((417, 435), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (431, 435), True, 'import numpy as np\n'), ((436, 457), 'torch.manual_seed', 'torch.manual_seed', (['(42)'], {}), '(42)\n', (453, 457), False, 'import torch\n'), ((476, 544), 'transformers.BertTokenizer.from_pretrained', 'BertTokenizer.from_... |
'''
* @author Waileinsamkeit
* @email <EMAIL>
* @create date 2020-08-07 15:52:06
* @modify date 2020-08-07 15:52:06
'''
import random
import pandas as pd
import time
import os
from tabulate import tabulate
from utils import extend_map,add_label_for_lstmcrf,save_model,flatten_lists,load_model
from models.hmm impo... | [
"evaluate.unitstopd",
"evaluate.evaluate_single_label",
"evaluate.evaluate_entity_label"
] | [((701, 726), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (724, 726), False, 'import torch\n'), ((767, 778), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (776, 778), False, 'import os\n'), ((732, 756), 'torch.cuda.set_device', 'torch.cuda.set_device', (['(1)'], {}), '(1)\n', (753, 756), False... |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
last mod 7/2/18
usage for new detector:
first disable metrics check
min_sensor_prob to <<0
use simple scoreToProb
use the plots to figure out a good scoreToProb function
then you can run metrics check
current avg precisions:
.5 iou -- .855, .783, .774... | [
"evaluate.soMetricIoU",
"evaluate.MetricAvgPrec"
] | [((2937, 2952), 'evaluate.MetricAvgPrec', 'MetricAvgPrec', ([], {}), '()\n', (2950, 2952), False, 'from evaluate import MetricAvgPrec, soMetricIoU\n'), ((5510, 5539), 'numpy.percentile', 'np.percentile', (['allscores', '(0.5)'], {}), '(allscores, 0.5)\n', (5523, 5539), True, 'import numpy as np\n'), ((5554, 5584), 'num... |
import torch
import time
import numpy as np
from torch import nn, optim
import torch.utils.data as data
import torch.nn.utils.rnn as rnn_utils
from data_process import MyDataset
from evaluate import valid_evaluate, test_evaluate
def train_epoch(model, train_data, loss_weights, optimizer, epoch, config, padded_quotes,... | [
"evaluate.test_evaluate",
"evaluate.valid_evaluate"
] | [((368, 379), 'time.time', 'time.time', ([], {}), '()\n', (377, 379), False, 'import time\n'), ((480, 605), 'torch.utils.data.DataLoader', 'data.DataLoader', (['train_data'], {'collate_fn': 'train_data.my_collate', 'batch_size': 'config.batch_size', 'num_workers': '(0)', 'shuffle': '(True)'}), '(train_data, collate_fn=... |
import torch
from torch.utils import data
from utils import config
from network.model import Model
from network import dataset
from evaluate import evaluate
from sklearn.metrics import recall_score
from focal_loss.focal_loss import FocalLoss
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
def ... | [
"evaluate.evaluate"
] | [((495, 561), 'torch.utils.tensorboard.SummaryWriter', 'SummaryWriter', ([], {'comment': 'f"""LR_{config.LR}_BATCH_{config.BATCH_SIZE}"""'}), "(comment=f'LR_{config.LR}_BATCH_{config.BATCH_SIZE}')\n", (508, 561), False, 'from torch.utils.tensorboard import SummaryWriter\n'), ((609, 636), 'focal_loss.focal_loss.FocalLos... |
import utils
import logging
import torch
import torch.nn as nn
from optimization import BertAdam
from tqdm import trange
from evaluate import evaluate
from dataloader import get_dataloader
from utils.logger import save_result_to_csv_and_json
def train(model, config):
# Prepare optimizer 看不懂的东西,先不管
param_optim... | [
"evaluate.evaluate"
] | [((1377, 1408), 'dataloader.get_dataloader', 'get_dataloader', (['config', '"""train"""'], {}), "(config, 'train')\n", (1391, 1408), False, 'from dataloader import get_dataloader\n'), ((1435, 1465), 'dataloader.get_dataloader', 'get_dataloader', (['config', '"""test"""'], {}), "(config, 'test')\n", (1449, 1465), False,... |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# @Version : Python 3.6
import os
import torch
import torch.nn as nn
import torch.optim as optim
from tqdm import tqdm
from transformers import AdamW
from transformers import get_linear_schedule_with_warmup
from transformers import WEIGHTS_NAME, CONFIG_NAME
from config... | [
"evaluate.Eval"
] | [((5095, 5103), 'config.Config', 'Config', ([], {}), '()\n', (5101, 5103), False, 'from config import Config\n'), ((5390, 5428), 'utils.SemEvalDataLoader', 'SemEvalDataLoader', (['rel2id', 'user_config'], {}), '(rel2id, user_config)\n', (5407, 5428), False, 'from utils import RelationLoader, SemEvalDataLoader\n'), ((67... |
from __future__ import print_function
from __future__ import division
import threading
import os
import argparse
import time
import tensorflow as tf
import itertools
from atari_env import make_env, S_DIM, A_DIM
from net import Net
from worker import Worker
from utils import print_params_nums
from evaluate import Eval... | [
"evaluate.Evaluate"
] | [((561, 578), 'itertools.count', 'itertools.count', ([], {}), '()\n', (576, 578), False, 'import itertools\n'), ((612, 645), 'net.Net', 'Net', (['S_DIM', 'A_DIM', '"""global"""', 'args'], {}), "(S_DIM, A_DIM, 'global', args)\n", (615, 645), False, 'from net import Net\n'), ((985, 1014), 'tensorflow.train.Saver', 'tf.tr... |
import os
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data as data
import copy
import argparse
import logging
import gc
import datetime
import pprint
from collections import OrderedDict, defaultdict
from functools import partial
from torch.optim... | [
"evaluate.evaluate_precision_recall_f1",
"evaluate.evaluate_accuracy"
] | [((5760, 5772), 'gc.collect', 'gc.collect', ([], {}), '()\n', (5770, 5772), False, 'import gc\n'), ((9457, 9496), 'numpy.array', 'np.array', (["results['prediction']['pred']"], {}), "(results['prediction']['pred'])\n", (9465, 9496), True, 'import numpy as np\n'), ((9616, 9662), 'numpy.array', 'np.array', (["results['da... |
#!/usr/bin/python
import sys,os
import argparse
import glob
from generate_windows import Generate_windows
from evaluate import Evaluate
from scanTranscriptome_forward import Scan_Forward
from scanTranscriptome_reverse import Scan_Backward
from postprocess import Postprocess
#
from multiprocessing import Pool
import da... | [
"evaluate.Evaluate"
] | [((354, 379), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (377, 379), False, 'import argparse\n'), ((3810, 3833), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3831, 3833), False, 'import datetime\n'), ((3953, 3976), 'datetime.datetime.now', 'datetime.datetime.now', ([... |
#!/usr/bin/env python2
from __future__ import print_function
import math
import os
import random
import sys
from joblib import Parallel, delayed
import numpy as np
from tqdm import tqdm
import evaluate
import bo_target as bo_target_module
import gflags
FLAGS = gflags.FLAGS
gflags.DEFINE_string('prog_file', '', 't... | [
"evaluate.get_parser"
] | [((280, 334), 'gflags.DEFINE_string', 'gflags.DEFINE_string', (['"""prog_file"""', '""""""', '"""the prog file"""'], {}), "('prog_file', '', 'the prog file')\n", (300, 334), False, 'import gflags\n'), ((335, 430), 'gflags.DEFINE_string', 'gflags.DEFINE_string', (['"""gold_prog_file"""', '""""""', '"""the file for list ... |
# Project hiatus
# main script with a parser for the model
# 12/10/2020
# <NAME>
# loading required packages
import os
import argparse
from sklearn.model_selection import train_test_split
from torch.utils.data import Subset
# for manual visualisation
from rasterio.plot import show
# putting the right work directory
... | [
"evaluate.evaluate_model"
] | [((320, 383), 'os.chdir', 'os.chdir', (['"""/home/adminlocal/Bureau/GIT/hiatus_change_detection"""'], {}), "('/home/adminlocal/Bureau/GIT/hiatus_change_detection')\n", (328, 383), False, 'import os\n'), ((502, 535), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (525, 535)... |
from pathlib import Path
import random
from fire import Fire
from munch import Munch
import torch
import numpy as np
from config import config, debug_options
from dataloader.dataset_multichoice import get_iterator
from utils import wait_for_key, suppress_stdout
from train import train
from evaluate import evaluate, ... | [
"evaluate.qa_similarity",
"evaluate.evaluate"
] | [((3035, 3067), 'random.seed', 'random.seed', (["args['random_seed']"], {}), "(args['random_seed'])\n", (3046, 3067), False, 'import random\n'), ((3072, 3107), 'numpy.random.seed', 'np.random.seed', (["args['random_seed']"], {}), "(args['random_seed'])\n", (3086, 3107), True, 'import numpy as np\n'), ((3112, 3150), 'to... |
import torch
import torch.nn as nn
from helper import *
from rnn import VanillaRNN
from gru import GruRNN
from evaluate import evaluate
def train(model, model_optimizer, inp, target):
hidden = model.init_hidden()
model.zero_grad()
loss = 0
for c in range(chunk_len):
output, hidden = model(inp[... | [
"evaluate.evaluate"
] | [((821, 869), 'rnn.VanillaRNN', 'VanillaRNN', (['input_size', 'hidden_size', 'output_size'], {}), '(input_size, hidden_size, output_size)\n', (831, 869), False, 'from rnn import VanillaRNN\n'), ((886, 930), 'gru.GruRNN', 'GruRNN', (['input_size', 'hidden_size', 'output_size'], {}), '(input_size, hidden_size, output_siz... |
import os
import torch
import shutil
import numpy as np
from utils import get_yaml_value, get_id, get_model_list
from evaluate import evaluate
from Preprocessing import Create_Testing_Datasets
from torchvision import models
from NetVLAD.netvlad import NetVLAD, EmbedNet
encoder = models.resnet18(pretrained=True)
base_... | [
"evaluate.evaluate"
] | [((282, 314), 'torchvision.models.resnet18', 'models.resnet18', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (297, 314), False, 'from torchvision import models\n'), ((328, 475), 'torch.nn.Sequential', 'torch.nn.Sequential', (['encoder.conv1', 'encoder.bn1', 'encoder.relu', 'encoder.maxpool', 'encoder.layer1',... |
import pickle
import random
import pyro
import torch
from pyro.contrib.examples.util import print_and_log
from pyro.infer import SVI, JitTrace_ELBO, JitTraceEnum_ELBO, Trace_ELBO, TraceEnum_ELBO, config_enumerate
from pyro.optim import ClippedAdam
from tqdm import tqdm
from torch.utils.data import DataLoader, Dataset
... | [
"evaluate.evaluate"
] | [((704, 729), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (727, 729), False, 'import torch\n'), ((1298, 1315), 'models.Generator', 'Generator', (['config'], {}), '(config)\n', (1307, 1315), False, 'from models import Generator\n'), ((1435, 1459), 'pyro.optim.ClippedAdam', 'ClippedAdam', (['a... |
from intervaltree import IntervalTree, Interval
import math
from evaluate.classification import Classification
from typing import TextIO, Type, Optional
import pysam
from evaluate.filter import Filter
class Masker(Filter):
def __init__(self, tree: IntervalTree = None):
if tree is None:
tree = ... | [
"evaluate.classification.Classification"
] | [((540, 554), 'intervaltree.IntervalTree', 'IntervalTree', ([], {}), '()\n', (552, 554), False, 'from intervaltree import IntervalTree, Interval\n'), ((955, 977), 'evaluate.classification.Classification', 'Classification', (['record'], {}), '(record)\n', (969, 977), False, 'from evaluate.classification import Classific... |
import h5py
from keras.models import load_model
from plot_history import plot_history
from evaluate import evaluate
import click
@click.command()
@click.argument('name')
def main(name):
print('Plotting statistics for Architecture:', name)
print('Loading history...')
h = h5py.File('history_{}.h5'.format(na... | [
"evaluate.evaluate"
] | [((132, 147), 'click.command', 'click.command', ([], {}), '()\n', (145, 147), False, 'import click\n'), ((149, 171), 'click.argument', 'click.argument', (['"""name"""'], {}), "('name')\n", (163, 171), False, 'import click\n'), ((557, 635), 'plot_history.plot_history', 'plot_history', (["h['loss']", "h['val_loss']", "h[... |
import tensorflow as tf
from .util import get_next_batch, get_batches
import numpy as np
import sys
sys.path.append('../')
from evaluate import Metrics
class BiLSTM(object):
def __init__(self, vocab_size, tag_size, batch_size = 64, lr = 0.001, iteration = 20, hidden_size = 128, embedding_size = 128):
... | [
"evaluate.Metrics"
] | [((104, 126), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (119, 126), False, 'import sys\n'), ((787, 853), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.int32', 'shape': '[None, None]', 'name': '"""input_x"""'}), "(dtype=tf.int32, shape=[None, None], name='input_x')\n", (801... |
import logging
import os
import sys
from datetime import datetime
import torch
import torch.nn.functional as F
from torch.cuda.amp import GradScaler, autocast
from utils import save_config_file, accuracy, save_checkpoint
from evaluate import Evaluator
root = logging.getLogger()
handler = logging.StreamHandler(sys.stdo... | [
"evaluate.Evaluator"
] | [((260, 279), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (277, 279), False, 'import logging\n'), ((290, 323), 'logging.StreamHandler', 'logging.StreamHandler', (['sys.stdout'], {}), '(sys.stdout)\n', (311, 323), False, 'import logging\n'), ((336, 409), 'logging.Formatter', 'logging.Formatter', (['"""%(... |
from torch.utils.data import DataLoader, random_split
from torch.utils.tensorboard import SummaryWriter
from dataset import HANDataset
import torch
import torch.nn as nn
import time
import numpy as np
from config import Config
from tqdm import tqdm
import os
from pathlib import Path
from evaluate import evaluate
import... | [
"evaluate.evaluate"
] | [((1965, 2005), 'dataset.HANDataset', 'HANDataset', (['"""data/train/news_parsed.tsv"""'], {}), "('data/train/news_parsed.tsv')\n", (1975, 2005), False, 'from dataset import HANDataset\n'), ((2159, 2211), 'torch.utils.data.random_split', 'random_split', (['dataset', '(train_size, validation_size)'], {}), '(dataset, (tr... |
import numpy as np
import tensorflow as tf
from keras import initializers
from keras.models import Model
from keras.layers import Embedding, Input, Dense, Flatten, concatenate, Dot, Lambda, multiply, Reshape, merge
from keras.optimizers import Adagrad, Adam, SGD, RMSprop
from keras import backend as K
from evaluate imp... | [
"evaluate.evaluate_model"
] | [((437, 484), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Run DMF."""'}), "(description='Run DMF.')\n", (460, 484), False, 'import argparse\n'), ((2159, 2210), 'keras.layers.Input', 'Input', ([], {'shape': '(1,)', 'dtype': '"""int32"""', 'name': '"""user_input"""'}), "(shape=(1,), dty... |
from i3Deep import utils
import os
from evaluate import evaluate
import numpy as np
# from imcut.pycut import ImageGraphCut
from imcut.pycut import ImageGraphCut
from tqdm import tqdm
import copy
def compute_predictions(image_path, mask_path, gt_path, save_path, version, nr_modalities, class_labels, resize=Tr... | [
"evaluate.evaluate"
] | [((420, 451), 'i3Deep.utils.load_filenames', 'utils.load_filenames', (['mask_path'], {}), '(mask_path)\n', (440, 451), False, 'from i3Deep import utils\n'), ((3690, 3732), 'evaluate.evaluate', 'evaluate', (['gt_path', 'save_path', 'class_labels'], {}), '(gt_path, save_path, class_labels)\n', (3698, 3732), False, 'from ... |
from .lstm import BiLSTM
import tensorflow as tf
from tensorflow.contrib import crf
from .util import get_batches
import sys
sys.path.append('../')
from evaluate import Metrics
class BiLSTM_CRF(object):
def __init__(self, vocab_size, tag_size, batch_size = 64, lr = 0.001, iteration = 30, hidden_size = 1... | [
"evaluate.Metrics"
] | [((130, 152), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (145, 152), False, 'import sys\n'), ((356, 380), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (378, 380), True, 'import tensorflow as tf\n'), ((1171, 1183), 'tensorflow.Session', 'tf.Session', ([], {})... |
import cv2
import torch
from network.rtpose_vgg import get_model, use_vgg
from evaluate.coco_eval import get_multiplier, get_outputs, handle_paf_and_heat
from openpose_utils import get_pose
from network.post import decode_pose
import argparse
import os
from pathlib import Path
parser = argparse.ArgumentParser(descript... | [
"evaluate.coco_eval.handle_paf_and_heat",
"evaluate.coco_eval.get_outputs",
"evaluate.coco_eval.get_multiplier"
] | [((288, 341), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Generate Poses"""'}), "(description='Generate Poses')\n", (311, 341), False, 'import argparse\n'), ((514, 535), 'pathlib.Path', 'Path', (['args.train_path'], {}), '(args.train_path)\n', (518, 535), False, 'from pathlib import P... |
import torch
import torch.optim as optim
import torch.nn as nn
import numpy as np
from evaluate import evaluate
from utils.utils import *
def train(model, train_data_loader, dev_data_loader, saver, total_epoch, lr, log_path, start_epoch=0):
f_log = open(log_path, 'w')
criterion = nn.CrossEntropyLoss()
op... | [
"evaluate.evaluate"
] | [((292, 313), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (311, 313), True, 'import torch.nn as nn\n'), ((1605, 1637), 'evaluate.evaluate', 'evaluate', (['model', 'dev_data_loader'], {}), '(model, dev_data_loader)\n', (1613, 1637), False, 'from evaluate import evaluate\n'), ((1091, 1111), 'tor... |
from __future__ import print_function, division
import sys
sys.path.append('core')
import argparse
import os
import cv2
import time
import numpy as np
import matplotlib.pyplot as plt
from pathlib import Path
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torchvisio... | [
"evaluate.validate_chairs",
"evaluate.validate_kitti",
"evaluate.validate_sintel"
] | [((59, 82), 'sys.path.append', 'sys.path.append', (['"""core"""'], {}), "('core')\n", (74, 82), False, 'import sys\n'), ((4252, 4290), 'torch.nn.BCEWithLogitsLoss', 'nn.BCEWithLogitsLoss', ([], {'reduction': '"""none"""'}), "(reduction='none')\n", (4272, 4290), True, 'import torch.nn as nn\n'), ((6681, 6719), 'torch.nn... |
import torch
import os
import numpy as np
import tqdm
import matplotlib.pyplot as plt
from PIL import Image
from dataset.NAIC_dataset import get_loaders, get_baseline_loader
from config import get_config
from models.model import build_model, get_model
from models.sync_bn.batchnorm import convert_model
from evaluate imp... | [
"evaluate.euclidean_dist",
"evaluate.cos_dist",
"evaluate.re_rank"
] | [((5154, 5166), 'config.get_config', 'get_config', ([], {}), '()\n', (5164, 5166), False, 'from config import get_config\n'), ((5257, 5299), 'os.path.join', 'os.path.join', (['config.dataset_root', '"""初赛训练集"""'], {}), "(config.dataset_root, '初赛训练集')\n", (5269, 5299), False, 'import os\n'), ((5369, 5562), 'dataset.NAIC... |
#pipenv run
import argparse
import os
import math
import json
from datetime import datetime
from models import models
from db import db, Result
from uuid import uuid4, UUID
from keras import backend as K
import numpy as np
import evaluate
from data_gen import data
from config import config
def test_model(model, tr... | [
"evaluate.get_labels",
"evaluate.get_results",
"evaluate.transform_binary_probabilities"
] | [((765, 804), 'evaluate.get_results', 'evaluate.get_results', (['model', 'validation'], {}), '(model, validation)\n', (785, 804), False, 'import evaluate\n'), ((876, 909), 'evaluate.get_results', 'evaluate.get_results', (['model', 'test'], {}), '(model, test)\n', (896, 909), False, 'import evaluate\n'), ((1786, 1829), ... |
"""Train the model"""
import argparse
import datetime
import os
import megengine as mge
# mge.core.set_option("async_level", 0)
from megengine.optimizer import Adam, MultiStepLR, LRScheduler
from megengine.autodiff import GradManager
import megengine.distributed as dist
from tqdm import tqdm
import dataset.data_loa... | [
"evaluate.evaluate"
] | [((542, 567), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (565, 567), False, 'import argparse\n'), ((1046, 1061), 'megengine.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (1059, 1061), True, 'import megengine.distributed as dist\n'), ((2349, 2364), 'megengine.distributed.get_rank... |
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# Copyright 2020 The HuggingFace Team. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
# Uses some code from
# https://github.com/huggingface/transformers/blob/master/examples/seq2seq/finetune_trainer.py
import argparse
import configpars... | [
"evaluate.get_avg_results",
"evaluate.evaluate",
"evaluate.print_results"
] | [((806, 831), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (829, 831), False, 'import torch\n'), ((890, 915), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (913, 915), False, 'import argparse\n'), ((2213, 2260), 'configparser.ConfigParser', 'configparser.ConfigParser... |
import numpy as np
import cv2
import matplotlib.pyplot as plt
from matplotlib.animation import FFMpegWriter
from PIL import Image
import torch
from tqdm import tqdm
import os
from pathlib import Path
openpose_dir = Path('../pytorch_Realtime_Multi-Person_Pose_Estimation/')
import sys
sys.path.append(s... | [
"evaluate.coco_eval.get_outputs",
"evaluate.coco_eval.get_multiplier"
] | [((230, 287), 'pathlib.Path', 'Path', (['"""../pytorch_Realtime_Multi-Person_Pose_Estimation/"""'], {}), "('../pytorch_Realtime_Multi-Person_Pose_Estimation/')\n", (234, 287), False, 'from pathlib import Path\n'), ((897, 937), 'matplotlib.animation.FFMpegWriter', 'FFMpegWriter', ([], {'fps': 'fps', 'metadata': 'metadat... |
"""Trains off-policy algorithms, such as QMIX and IQL."""
import json
import os
import random
import sys
import time
sys.path.append('../env/')
import numpy as np
import tensorflow as tf
import alg_iql
import alg_qmix
import env_wrapper
import evaluate
import replay_buffer
def train_function(config):
config_... | [
"evaluate.test"
] | [((119, 145), 'sys.path.append', 'sys.path.append', (['"""../env/"""'], {}), "('../env/')\n", (134, 145), False, 'import sys\n'), ((444, 464), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (458, 464), True, 'import numpy as np\n'), ((469, 486), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\... |
'''Module for boilerplate code around the training loop.
Defines an abstract base class `Model` for Bayesian word embedding models, a
function `train()` that runs the training loop, and a function `add_cli_args()`
that adds command line arguments to control the training loop (e.g., the number
of training steps and the... | [
"evaluate.Evaluator",
"evaluate.add_cli_args"
] | [((1296, 1337), 'numpy.random.RandomState', 'np.random.RandomState', ([], {'seed': 'args.rng_seed'}), '(seed=args.rng_seed)\n', (1317, 1337), True, 'import numpy as np\n'), ((4431, 4480), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['args.output', 'session.graph'], {}), '(args.output, session.graph)\n', ... |
import argparse
import json
import numpy as np
import os
import torch
from datetime import datetime
from pathlib import Path
from sklearn import metrics
from evaluate import run_model
from loader import load_data
from model import TripleMRNet
def train(rundir, task, backbone, epochs, learning_rate, use_gpu,
... | [
"evaluate.run_model"
] | [((380, 404), 'loader.load_data', 'load_data', (['task', 'use_gpu'], {}), '(task, use_gpu)\n', (389, 404), False, 'from loader import load_data\n'), ((422, 452), 'model.TripleMRNet', 'TripleMRNet', ([], {'backbone': 'backbone'}), '(backbone=backbone)\n', (433, 452), False, 'from model import TripleMRNet\n'), ((489, 509... |
"""Runner for flow/utils/leaderboard/evaluate.py/evaluate_policy."""
from solution import BENCHMARK, get_actions, get_states
from evaluate import evaluate_policy
# Evaluate the solution
mean, stdev = evaluate_policy(
benchmark=BENCHMARK, _get_actions=get_actions, _get_states=get_states)
# Print results
print(mean... | [
"evaluate.evaluate_policy"
] | [((202, 293), 'evaluate.evaluate_policy', 'evaluate_policy', ([], {'benchmark': 'BENCHMARK', '_get_actions': 'get_actions', '_get_states': 'get_states'}), '(benchmark=BENCHMARK, _get_actions=get_actions, _get_states=\n get_states)\n', (217, 293), False, 'from evaluate import evaluate_policy\n')] |
from pathlib import Path
import numpy as np
import scipy
from sklearn.base import TransformerMixin, BaseEstimator
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.model_selection import train_test_split
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler... | [
"evaluate.evaluate"
] | [((505, 526), 'pathlib.Path', 'Path', (['"""../data/clean"""'], {}), "('../data/clean')\n", (509, 526), False, 'from pathlib import Path\n'), ((1334, 1354), 'utils.loadClean', 'loadClean', (['input_dir'], {}), '(input_dir)\n', (1343, 1354), False, 'from utils import loadClean, writeResults, preprocessClfParser\n'), ((1... |
import os
import os.path as osp
import argparse
import numpy as np
import json
import time
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.optim import lr_scheduler
import torchvision.datasets as datasets
import torchvision.transforms as transforms
from torch... | [
"evaluate.language_eval"
] | [((1018, 1029), 'time.time', 'time.time', ([], {}), '()\n', (1027, 1029), False, 'import time\n'), ((1039, 1095), 'coco_loader.coco_loader', 'coco_loader', (['args.coco_root'], {'split': 'split', 'ncap_per_img': '(1)'}), '(args.coco_root, split=split, ncap_per_img=1)\n', (1050, 1095), False, 'from coco_loader import co... |
import tensorflow as tf
from train import train
from evaluate import evaluate
if __name__ == '__main__':
# Parameters
# Data loading parameters
tf.app.flags.DEFINE_float("dev_sample_rate", .05,
"Percentage of the training data to use for validation(default:0.05)")
tf.app.f... | [
"evaluate.evaluate"
] | [((157, 282), 'tensorflow.app.flags.DEFINE_float', 'tf.app.flags.DEFINE_float', (['"""dev_sample_rate"""', '(0.05)', '"""Percentage of the training data to use for validation(default:0.05)"""'], {}), "('dev_sample_rate', 0.05,\n 'Percentage of the training data to use for validation(default:0.05)')\n", (182, 282), T... |
'''
@Author: dzy
@Date: 2021-09-13 11:07:48
@LastEditTime: 2021-09-26 20:25:17
@LastEditors: dzy
@Description: Helper functions or classes used for the model.
@FilePath: /JDProductSummaryGeneration/src/train.py
'''
import pickle
import os
import sys
import pathlib
import numpy as np
from torch import optim
from torch... | [
"evaluate.evaluate"
] | [((536, 561), 'sys.path.append', 'sys.path.append', (['abs_path'], {}), '(abs_path)\n', (551, 561), False, 'import sys\n'), ((1173, 1222), 'torch.device', 'torch.device', (["('cuda' if config.is_cuda else 'cpu')"], {}), "('cuda' if config.is_cuda else 'cpu')\n", (1185, 1222), False, 'import torch\n'), ((1236, 1246), 'm... |
import os
import re
import sys
sys.path.append('.')
import cv2
import math
import time
import scipy
import argparse
import matplotlib
import numpy as np
import pylab as plt
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from collections import OrderedDict
from sci... | [
"evaluate.coco_eval.get_outputs"
] | [((31, 51), 'sys.path.append', 'sys.path.append', (['"""."""'], {}), "('.')\n", (46, 51), False, 'import sys\n'), ((2565, 2590), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2588, 2590), False, 'import argparse\n'), ((3055, 3079), 'lib.config.update_config', 'update_config', (['cfg', 'args']... |
"""
Model training and evaluation.
The model is evaluated when (1) loss < 0.001 or (2) the number of
epochs is reached. The best model is saved in the experiment folder.
"""
from evaluate import evaluate
from time import time
import utils as ut
import numpy as np
import csv
import os
#from tqdm import tqdm
def train... | [
"evaluate.evaluate"
] | [((1764, 1782), 'numpy.mean', 'np.mean', (['loss_list'], {}), '(loss_list)\n', (1771, 1782), True, 'import numpy as np\n'), ((2145, 2151), 'time.time', 'time', ([], {}), '()\n', (2149, 2151), False, 'from time import time\n'), ((1464, 1483), 'numpy.mean', 'np.mean', (['loss_batch'], {}), '(loss_batch)\n', (1471, 1483),... |
"""Train Neural Network
Adapted from CS230 code examples for computer vision.
Source: https://github.com/cs230-stanford/cs230-code-examples/tree/master/pytorch
"""
import argparse
import logging
import os
import numpy as np
import torch
import torch.optim as optim
from torch.autograd import Variable
from tqdm import... | [
"evaluate.evaluate"
] | [((544, 569), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (567, 569), False, 'import argparse\n'), ((2222, 2253), 'deep_net_utils.RunningAverage', 'deep_net_utils.RunningAverage', ([], {}), '()\n', (2251, 2253), False, 'import deep_net_utils\n'), ((4205, 4255), 'logging.info', 'logging.info'... |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: penghuailiang
# @Date : 2019-09-20
import utils
import ops
import logging
import torch
import align
import cv2
import os
import util.logit as log
import numpy as np
from dataset import FaceDataset
from imitator import Imitator
from extractor import Extractor
fr... | [
"evaluate.Evaluate"
] | [((552, 577), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (575, 577), False, 'import torch\n'), ((582, 660), 'util.logit.info', 'log.info', (['"""neural face network use gpu: %s"""', '(support_gpu and arguments.use_gpu)'], {}), "('neural face network use gpu: %s', support_gpu and arguments.u... |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by appli... | [
"evaluate.evaluate"
] | [((623, 645), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (638, 645), False, 'import sys\n'), ((1134, 1167), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (1157, 1167), False, 'import warnings\n'), ((1211, 1241), 'paddle.set_device', 'paddle.se... |
import numpy as np
from rebuild import rebuild
from render import render
from depth import depth
from evaluate import evaluate
from PIL import Image
def rendering(dir):
#z的尺度与x和y相同,大小等同于测试图像大小,位置与测试图像像素点一一对应
#imgs为渲染结果,大小等同于测试图像大小,位置与测试图像像素点一一对应
train_lvectors = np.zeros([7,3])# the direction of light
... | [
"evaluate.evaluate"
] | [((277, 293), 'numpy.zeros', 'np.zeros', (['[7, 3]'], {}), '([7, 3])\n', (285, 293), True, 'import numpy as np\n'), ((677, 694), 'numpy.zeros', 'np.zeros', (['[10, 3]'], {}), '([10, 3])\n', (685, 694), True, 'import numpy as np\n'), ((1073, 1096), 'numpy.zeros', 'np.zeros', (['[7, 168, 168]'], {}), '([7, 168, 168])\n',... |
import numpy as np
import os
import pickle
import copy
import json
import warnings
warnings.filterwarnings("ignore")
os.environ["CUDA_VISIBLE_DEVICES"]="6,7"
from load_img import Load_from_Folder, Load_Images
from evaluate import Time, MSE, PSNR
from MBMBVQ import MBMBVQ
from EntropyCoding import EntropyCoding
class ... | [
"evaluate.PSNR",
"evaluate.MSE"
] | [((84, 117), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (107, 117), False, 'import warnings\n'), ((1844, 1903), 'load_img.Load_from_Folder', 'Load_from_Folder', ([], {'folder': '"""./test_data/"""', 'color': '"""YUV"""', 'ct': '(-1)'}), "(folder='./test_data/', color='... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.