Python tqdm.tqdm方法代码示例

本文整理汇总了Python中tqdm.tqdm方法的典型用法代码示例。如果您正苦于以下问题:Python tqdm.tqdm方法的具体用法?Python tqdm.tqdm怎么用?Python tqdm.tqdm使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在模块tqdm的用法示例。

在下文中一共展示了tqdm.tqdm方法的27个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: encode

# 需要导入模块: import tqdm [as 别名]
# 或者: from tqdm import tqdm [as 别名]
def encode(self, texts, verbose=True):
        texts_tokens = []
        if verbose:
            for text in tqdm(texts, ncols=80, leave=False):
                text = self.nlp(text_standardize(ftfy.fix_text(text)))
                text_tokens = []
                for token in text:
                    text_tokens.extend(
                        [self.encoder.get(t, 0) for t in
                         self.bpe(token.text.lower()).split(' ')])
                texts_tokens.append(text_tokens)
        else:
            for text in texts:
                text = self.nlp(text_standardize(ftfy.fix_text(text)))
                text_tokens = []
                for token in text:
                    text_tokens.extend(
                        [self.encoder.get(t, 0) for t in
                         self.bpe(token.text.lower()).split(' ')])
                texts_tokens.append(text_tokens)
        return texts_tokens 
开发者ID:atcbosselut,项目名称:comet-commonsense,代码行数:23,代码来源:utils.py


示例2: run_test

# 需要导入模块: import tqdm [as 别名]
# 或者: from tqdm import tqdm [as 别名]
def run_test(work_type: FunctionType, job_sets: Sequence, trials: int,
             pool_class: type, worker_count: int) -> Mapping:
    pool = pool_class(worker_count)
    if work_type == 'compute':
        test_func = pool.run_compute_test
    elif work_type == 'network':
        test_func = pool.run_network_test
    else:
        raise Exception("Invalid work type: {}".format(work_type))
    results = map(
        lambda jobs: test_func(jobs, trials, show_progress=True),
        tqdm(job_sets, desc=pool_class.__name__),
    )
    summarized_results = list(map(summarize_test, results))
    pool.destroy_pool()
    return summarized_results 
开发者ID:JohnStarich,项目名称:python-pool-performance,代码行数:18,代码来源:pools.py


示例3: auto_inverse

# 需要导入模块: import tqdm [as 别名]
# 或者: from tqdm import tqdm [as 别名]
def auto_inverse(self, whole_spectrum):
        whole_spectrum = np.copy(whole_spectrum).astype(complex)
        whole_spectrum[whole_spectrum < 1] = 1
        overwrap = self.buffer_size * 2
        height = whole_spectrum.shape[0]
        parallel_dif = (height-overwrap) // self.parallel
        if height < self.parallel*overwrap:
            raise Exception('voice length is too small to use gpu, or parallel number is too big')
        spec = [self.inverse(whole_spectrum[range(i, i+parallel_dif*self.parallel, parallel_dif), :]) for i in tqdm.tqdm(range(parallel_dif+overwrap))]
        spec = spec[overwrap:]
        spec = np.concatenate(spec, axis=1)
        spec = spec.reshape(-1, self.wave_len)
        #Below code don't consider wave_len and wave_dif, I'll fix.
        wave = np.fft.ifft(spec, axis=1).real
        pad = np.zeros((wave.shape[0], 2), dtype=float)
        wave = np.concatenate([wave, pad], axis=1)
        dst = np.zeros((wave.shape[0]+3)*self.wave_dif, dtype=float)
        for i in range(4):
            w = wave[range(i, wave.shape[0], 4),:]
            w = w.reshape(-1)
            dst[i*self.wave_dif:i*self.wave_dif+len(w)] += w
        return dst*0.5 
开发者ID:pstuvwx,项目名称:Deep_VoiceChanger,代码行数:27,代码来源:gla_gpu.py


示例4: _read_file

# 需要导入模块: import tqdm [as 别名]
# 或者: from tqdm import tqdm [as 别名]
def _read_file(path):
        """
        :param path: embed file path
        :return:
        """
        embed_dict = {}
        with open(path, encoding='utf-8') as f:
            lines = f.readlines()
            lines = tqdm.tqdm(lines)
            for line in lines:
                values = line.strip().split(' ')
                if len(values) == 1 or len(values) == 2 or len(values) == 3:
                    continue
                w, v = values[0], values[1:]
                embed_dict[w] = v
        return embed_dict 
开发者ID:bamtercelboo,项目名称:pytorch_NER_BiLSTM_CNN_CRF,代码行数:18,代码来源:Embed.py


示例5: extract_features

# 需要导入模块: import tqdm [as 别名]
# 或者: from tqdm import tqdm [as 别名]
def extract_features(path, model_type):
	if model_type == 'inceptionv3':
		from keras.applications.inception_v3 import preprocess_input
		target_size = (299, 299)
	elif model_type == 'vgg16':
		from keras.applications.vgg16 import preprocess_input
		target_size = (224, 224)
	# Get CNN Model from model.py
	model = CNNModel(model_type)
	features = dict()
	# Extract features from each photo
	for name in tqdm(os.listdir(path)):
		# Loading and resizing image
		filename = path + name
		image = load_img(filename, target_size=target_size)
		# Convert the image pixels to a numpy array
		image = img_to_array(image)
		# Reshape data for the model
		image = image.reshape((1, image.shape[0], image.shape[1], image.shape[2]))
		# Prepare the image for the CNN Model model
		image = preprocess_input(image)
		# Pass image into model to get encoded features
		feature = model.predict(image, verbose=0)
		# Store encoded features for the image
		image_id = name.split('.')[0]
		features[image_id] = feature
	return features 
开发者ID:dabasajay,项目名称:Image-Caption-Generator,代码行数:29,代码来源:preprocessing.py


示例6: train

# 需要导入模块: import tqdm [as 别名]
# 或者: from tqdm import tqdm [as 别名]
def train(self, dataset):
        self.model.train()
        self.optimizer.zero_grad()
        total_loss = 0.0
        indices = torch.randperm(len(dataset), dtype=torch.long, device='cpu')
        for idx in tqdm(range(len(dataset)), desc='Training epoch ' + str(self.epoch + 1) + ''):
            ltree, linput, rtree, rinput, label = dataset[indices[idx]]
            target = utils.map_label_to_target(label, dataset.num_classes)
            linput, rinput = linput.to(self.device), rinput.to(self.device)
            target = target.to(self.device)
            output = self.model(ltree, linput, rtree, rinput)
            loss = self.criterion(output, target)
            total_loss += loss.item()
            loss.backward()
            if idx % self.args.batchsize == 0 and idx > 0:
                self.optimizer.step()
                self.optimizer.zero_grad()
        self.epoch += 1
        return total_loss / len(dataset)
    # helper function for testing 
开发者ID:dasguptar,项目名称:treelstm.pytorch,代码行数:23,代码来源:trainer.py


示例7: test

# 需要导入模块: import tqdm [as 别名]
# 或者: from tqdm import tqdm [as 别名]
def test(self, dataset):
        self.model.eval()
        with torch.no_grad():
            total_loss = 0.0
            predictions = torch.zeros(len(dataset), dtype=torch.float, device='cpu')
            indices = torch.arange(1, dataset.num_classes + 1, dtype=torch.float, device='cpu')
            for idx in tqdm(range(len(dataset)), desc='Testing epoch  ' + str(self.epoch) + ''):
                ltree, linput, rtree, rinput, label = dataset[idx]
                target = utils.map_label_to_target(label, dataset.num_classes)
                linput, rinput = linput.to(self.device), rinput.to(self.device)
                target = target.to(self.device)
                output = self.model(ltree, linput, rtree, rinput)
                loss = self.criterion(output, target)
                total_loss += loss.item()
                output = output.squeeze().to('cpu')
                predictions[idx] = torch.dot(indices, torch.exp(output))
        return total_loss / len(dataset), predictions 
开发者ID:dasguptar,项目名称:treelstm.pytorch,代码行数:19,代码来源:trainer.py


示例8: load_embedding

# 需要导入模块: import tqdm [as 别名]
# 或者: from tqdm import tqdm [as 别名]
def load_embedding(self, f, reset=[]):
        vectors = {}
        for line in tqdm(f.readlines(), desc='Loading embeddings'):
            tokens = line.rstrip('\n').split(' ')
            word = tokens[0].lower() if self.lower else tokens[0]
            if self.include_unseen:
                self.add(word)
            if word in self.tok2idx:
                vectors[word] = [float(x) for x in tokens[1:]]
        dim = len(vectors.values()[0])
        def to_vector(tok):
            if tok in vectors and tok not in reset:
                return vectors[tok]
            elif tok not in vectors:
                return np.random.normal(-0.05, 0.05, size=dim)
            else:
                return [0.0]*dim
        self.embed = mx.nd.array([vectors[tok] if tok in vectors and tok not in reset
                                  else [0.0]*dim for tok in self.idx2tok]) 
开发者ID:awslabs,项目名称:dynamic-training-with-apache-mxnet-on-aws,代码行数:21,代码来源:dataset.py


示例9: _process_repo_serial

# 需要导入模块: import tqdm [as 别名]
# 或者: from tqdm import tqdm [as 别名]
def _process_repo_serial(git_repo_dir, sqlite_db_file, commits, extraction_settings):
    """ Processes all commits in a given git repository in a serial manner.
    Args:
        git_repo_dir: path to the git repository that is mined
        sqlite_db_file: path (including database name) where the sqlite database will be created
        commits: list of commits that have to be processed
        extraction_settings: settings for the extraction
    Returns:
        sqlite database will be written at specified location
    """
    git_repo = pydriller.GitRepository(git_repo_dir)
    con = sqlite3.connect(sqlite_db_file)
    for commit in tqdm(commits, desc='Serial'):
        args = {'git_repo_dir': git_repo_dir, 'commit_hash': commit.hash, 'extraction_settings': extraction_settings}
        result = _process_commit(args)
        if not result['edits'].empty:
            result['edits'].to_sql('edits', con, if_exists='append', index=False)
        if not result['commit'].empty:
            result['commit'].to_sql('commits', con, if_exists='append', index=False) 
开发者ID:gotec,项目名称:git2net,代码行数:27,代码来源:extraction.py


示例10: convert_images2bmp

# 需要导入模块: import tqdm [as 别名]
# 或者: from tqdm import tqdm [as 别名]
def convert_images2bmp():
    # cv2.imread() jpg at 230 img/s, *.bmp at 400 img/s
    for path in ['../coco/images/val2014/', '../coco/images/train2014/']:
        folder = os.sep + Path(path).name
        output = path.replace(folder, folder + 'bmp')
        if os.path.exists(output):
            shutil.rmtree(output)  # delete output folder
        os.makedirs(output)  # make new output folder
        for f in tqdm(glob.glob('%s*.jpg' % path)):
            save_name = f.replace('.jpg', '.bmp').replace(folder, folder + 'bmp')
            cv2.imwrite(save_name, cv2.imread(f))
    for label_path in ['../coco/trainvalno5k.txt', '../coco/5k.txt']:
        with open(label_path, 'r') as file:
            lines = file.read()
        lines = lines.replace('2014/', '2014bmp/').replace('.jpg', '.bmp').replace(
            '/Users/glennjocher/PycharmProjects/', '../')
        with open(label_path.replace('5k', '5k_bmp'), 'w') as file:
            file.write(lines) 
开发者ID:zbyuan,项目名称:pruning_yolov3,代码行数:22,代码来源:datasets.py


示例11: crop_images_random

# 需要导入模块: import tqdm [as 别名]
# 或者: from tqdm import tqdm [as 别名]
def crop_images_random(path='../images/', scale=0.50):  # from utils.utils import *; crop_images_random()
    # crops images into random squares up to scale fraction
    # WARNING: overwrites images!
    for file in tqdm(sorted(glob.glob('%s/*.*' % path))):
        img = cv2.imread(file)  # BGR
        if img is not None:
            h, w = img.shape[:2]
            # create random mask
            a = 30  # minimum size (pixels)
            mask_h = random.randint(a, int(max(a, h * scale)))  # mask height
            mask_w = mask_h  # mask width
            # box
            xmin = max(0, random.randint(0, w) - mask_w // 2)
            ymin = max(0, random.randint(0, h) - mask_h // 2)
            xmax = min(w, xmin + mask_w)
            ymax = min(h, ymin + mask_h)
            # apply random color mask
            cv2.imwrite(file, img[ymin:ymax, xmin:xmax]) 
开发者ID:zbyuan,项目名称:pruning_yolov3,代码行数:23,代码来源:utils.py


示例12: coco_single_class_labels

# 需要导入模块: import tqdm [as 别名]
# 或者: from tqdm import tqdm [as 别名]
def coco_single_class_labels(path='../coco/labels/train2014/', label_class=43):
    # Makes single-class coco datasets. from utils.utils import *; coco_single_class_labels()
    if os.path.exists('new/'):
        shutil.rmtree('new/')  # delete output folder
    os.makedirs('new/')  # make new output folder
    os.makedirs('new/labels/')
    os.makedirs('new/images/')
    for file in tqdm(sorted(glob.glob('%s/*.*' % path))):
        with open(file, 'r') as f:
            labels = np.array([x.split() for x in f.read().splitlines()], dtype=np.float32)
        i = labels[:, 0] == label_class
        if any(i):
            img_file = file.replace('labels', 'images').replace('txt', 'jpg')
            labels[:, 0] = 0  # reset class to 0
            with open('new/images.txt', 'a') as f:  # add image to dataset list
                f.write(img_file + '\n')
            with open('new/labels/' + Path(file).name, 'a') as f:  # write label
                for l in labels[i]:
                    f.write('%g %.6f %.6f %.6f %.6f\n' % tuple(l))
            shutil.copyfile(src=img_file, dst='new/images/' + Path(file).name.replace('txt', 'jpg'))  # copy images 
开发者ID:zbyuan,项目名称:pruning_yolov3,代码行数:22,代码来源:utils.py


示例13: input_file_to_training_data

# 需要导入模块: import tqdm [as 别名]
# 或者: from tqdm import tqdm [as 别名]
def input_file_to_training_data(args, input_file, epoch, tokenizer, num_files):
    print(input_file)
    with DocumentDatabase(reduce_memory=args.reduce_memory) as docs:
        with open(input_file) as f:
            doc = []
            for line in tqdm(f, desc="Loading Dataset", unit=" lines"):
                line = line.strip()
                if line == "":
                    docs.add_document(doc)
                    doc = []
                else:
                    tokens = tokenizer.tokenize(line)
                    doc.append(tokens)
            if doc:
                docs.add_document(doc)  # If the last doc didn't end on a newline, make sure it still gets added
        if len(docs) <= 1:
            exit("ERROR: No document breaks were found in the input file! These are necessary to allow the script to "
                    "ensure that random NextSentences are not sampled from the same document. Please add blank lines to "
                    "indicate breaks between documents in your input file. If your dataset does not contain multiple "
                    "documents, blank lines can be inserted at any natural boundary, such as the ends of chapters, "
                    "sections or paragraphs.")
        for i in range(args.epochs_to_generate):
            create_training_file(docs, tokenizer, args, epoch + i * num_files) 
开发者ID:allenai,项目名称:tpu_pretrain,代码行数:26,代码来源:pregenerate_training_data.py


示例14: train

# 需要导入模块: import tqdm [as 别名]
# 或者: from tqdm import tqdm [as 别名]
def train(self):
        """
        Training loop based on the number of episodes
        :return:
        """
        for episode in tqdm(range(self.current_episode, self.config.num_episodes)):
            self.current_episode = episode
            # reset environment
            self.env.reset()
            self.train_one_epoch()
            # The target network has its weights kept frozen most of the time
            if self.current_episode % self.config.target_update == 0:
                self.target_model.load_state_dict(self.policy_model.state_dict())
        self.env.render()
        self.env.close() 
开发者ID:moemen95,项目名称:Pytorch-Project-Template,代码行数:18,代码来源:dqn.py


示例15: test

# 需要导入模块: import tqdm [as 别名]
# 或者: from tqdm import tqdm [as 别名]
def test():
    data = np.random.randint(0, 1000, size=(N_OBS, N_FEATURE))
    y = np.random.randint(2, size=N_OBS)
    train = data[0:N_OBS // 2]
    ytrain = y[0:N_OBS // 2]
    test = data[N_OBS // 2:]
    ytest = y[N_OBS // 2:]
    learner = ClassificationTree(number_of_features=N_FEATURE)
    for t, x in enumerate(tqdm(train)):
        learner.update(x, ytrain[t])
    correct_num = 0
    for t, x in enumerate(tqdm(test)):
        y_pred = learner.predict(x)
        if y_pred == ytest[t]:
            correct_num += 1
    print(correct_num) 
开发者ID:jeongyoonlee,项目名称:Kaggler,代码行数:23,代码来源:test_classification_tree.py


示例16: save_tfrecord

# 需要导入模块: import tqdm [as 别名]
# 或者: from tqdm import tqdm [as 别名]
def save_tfrecord(filename, dataset, verbose=False):
    observations = len(dataset['length'])
    serialized = []
    with Pool(processes=4) as pool:
        for serialized_string in tqdm(pool.imap(
            tfrecord_serializer,
            zip(dataset['length'], dataset['source'], dataset['target']),
            chunksize=10
        ), total=observations, disable=not verbose):
            serialized.append(serialized_string)
    # Save seriealized dataset
    writer = tf.python_io.TFRecordWriter(
        filename,
        options=tf.python_io.TFRecordOptions(
            tf.python_io.TFRecordCompressionType.ZLIB
        )
    )
    for serialized_string in tqdm(serialized, disable=not verbose):
        writer.write(serialized_string)
    writer.close() 
开发者ID:distillpub,项目名称:post--memorization-in-rnns,代码行数:26,代码来源:generate.py


示例17: http_get

# 需要导入模块: import tqdm [as 别名]
# 或者: from tqdm import tqdm [as 别名]
def http_get(url: str, temp_file: IO) -> None:
    req = requests.get(url, stream=True)
    content_length = req.headers.get('Content-Length')
    total = int(content_length) if content_length is not None else None
    progress = tqdm(unit="B", total=total)
    for chunk in req.iter_content(chunk_size=1024):
        if chunk: # filter out keep-alive new chunks
            progress.update(len(chunk))
            temp_file.write(chunk)
    progress.close() 
开发者ID:ymcui,项目名称:cmrc2019,代码行数:12,代码来源:file_utils.py


示例18: set_progress_bar

# 需要导入模块: import tqdm [as 别名]
# 或者: from tqdm import tqdm [as 别名]
def set_progress_bar(num_examples):
    bar = tqdm(total=num_examples)
    bar.update(0)
    return bar 
开发者ID:atcbosselut,项目名称:comet-commonsense,代码行数:6,代码来源:utils.py


示例19: get_generation_sequences

# 需要导入模块: import tqdm [as 别名]
# 或者: from tqdm import tqdm [as 别名]
def get_generation_sequences(data, split, text_encoder, test,
                             max_e1=10, max_e2=15):
    sequences = []
    count = 0
    final_event1 = None
    final_event2 = None
    final_relation = None
    discarded = []
    for event1, relation, event2, _ in tqdm(data[split]["total"]):
        e1, r, e2 = do_example(text_encoder, event1, relation, event2)
        if (split == "train" and len(e1) > max_e1 or
                len(e2) > max_e2):
            discarded.append(count)
            count += 1
            continue
        final = compile_final_sequence(
            e1, e2, r, text_encoder)
        sequences.append(final)
        count += 1
        if count > 10 and test:
            break
    return sequences, discarded 
开发者ID:atcbosselut,项目名称:comet-commonsense,代码行数:33,代码来源:conceptnet.py


示例20: get_generation_sequences

# 需要导入模块: import tqdm [as 别名]
# 或者: from tqdm import tqdm [as 别名]
def get_generation_sequences(opt, data, split, text_encoder, test):
    sequences = []
    count = 0
    final_prefix = None
    final_suffix = None
    for prefix, category, suffix in tqdm(data[split]["total"]):
        final_prefix, final_suffix = do_example(
            text_encoder, prefix, suffix, True, True)
        # if do_prefix:
        #     if "___" in prefix:
        #         final_prefix = handle_underscores(prefix, text_encoder, True)
        #     else:
        #         final_prefix = text_encoder.encode([prefix], verbose=False)[0]
        # if do_suffix:
        #     if "_" in suffix:
        #         final_suffix = handle_underscores(suffix, text_encoder)
        #     else:
        #         final_suffix = text_encoder.encode([suffix], verbose=False)[0]
        final = compile_final_sequence(
            opt, final_prefix, final_suffix, category, text_encoder)
        sequences.append(final)
        count += 1
        if count > 10 and test:
            break
    return sequences 
开发者ID:atcbosselut,项目名称:comet-commonsense,代码行数:34,代码来源:atomic.py


示例21: _run_test

# 需要导入模块: import tqdm [as 别名]
# 或者: from tqdm import tqdm [as 别名]
def _run_test(self, work_func: FunctionType, work_resource: object,
                  jobs: int, trials: int,
                  show_progress: bool=False) -> Mapping:
        results = {
            'jobs': jobs,
            'trials': trials,
            'time': [],
            'blocks': [],
        }
        # Forcibly evaluate the inputs to prevent time/resources taken up later
        inputs = list(zip(
            [work_resource] * jobs,
            range(jobs)
        ))
        trial_iter = range(trials)
        if show_progress is True and trials > 2:
            trial_iter = tqdm(trial_iter, desc='trials')
        gc.collect()
        for _ in trial_iter:
            # Run trial of pool map function and measure it
            gc.collect()
            blocks_start = sys.getallocatedblocks()
            time_start = time.time()
            list(self.map(work_func, inputs))
            time_end = time.time()
            results['time'].append(time_end - time_start)
            # Get allocated blocks before garbage collection to show peak usage
            blocks_end = sys.getallocatedblocks()
            results['blocks'].append(blocks_end - blocks_start)
        return results 
开发者ID:JohnStarich,项目名称:python-pool-performance,代码行数:32,代码来源:pool.py


示例22: get_mc_predictions

# 需要导入模块: import tqdm [as 别名]
# 或者: from tqdm import tqdm [as 别名]
def get_mc_predictions(model, X, nb_iter=50, batch_size=256):
    """
    TODO
    :param model:
    :param X:
    :param nb_iter:
    :param batch_size:
    :return:
    """
    output_dim = model.layers[-1].output.shape[-1].value
    get_output = K.function(
        [model.layers[0].input, K.learning_phase()],
        [model.layers[-1].output]
    )
    def predict():
        n_batches = int(np.ceil(X.shape[0] / float(batch_size)))
        output = np.zeros(shape=(len(X), output_dim))
        for i in range(n_batches):
            output[i * batch_size:(i + 1) * batch_size] = \
                get_output([X[i * batch_size:(i + 1) * batch_size], 1])[0]
        return output
    preds_mc = []
    for i in tqdm(range(nb_iter)):
        preds_mc.append(predict())
    return np.asarray(preds_mc) 
开发者ID:StephanZheng,项目名称:neural-fingerprinting,代码行数:30,代码来源:util.py


示例23: attack

# 需要导入模块: import tqdm [as 别名]
# 或者: from tqdm import tqdm [as 别名]
def attack(self, X, Y):
        """
        Perform the L_2 attack on the given images for the given targets.
        :param X: samples to generate advs
        :param Y: the original class labels
        If self.targeted is true, then the targets represents the target labels.
        If self.targeted is false, then targets are the original class labels.
        """
        nb_classes = Y.shape[1]
        # random select target class for targeted attack
        y_target = np.copy(Y)
        if self.TARGETED:
            for i in range(Y.shape[0]):
                current = int(np.argmax(Y[i]))
                target = np.random.choice(other_classes(nb_classes, current))
                y_target[i] = np.eye(nb_classes)[target]
        X_adv = np.zeros_like(X)
        for i in tqdm(range(0, X.shape[0], self.batch_size)):
            start = i
            end = i + self.batch_size
            end = np.minimum(end, X.shape[0])
            X_adv[start:end] = self.attack_batch(X[start:end], y_target[start:end])
        return X_adv 
开发者ID:StephanZheng,项目名称:neural-fingerprinting,代码行数:29,代码来源:cw_attacks.py


示例24: pre_encode

# 需要导入模块: import tqdm [as 别名]
# 或者: from tqdm import tqdm [as 别名]
def pre_encode():
    import tqdm
    path = input('enter wave path...')
    ds = WaveDataset(path, -1, True)
    num = ds.max // dif
    imgs = [ds.get_example(i) for i in tqdm.tqdm(range(num))]
    dst = np.concatenate(imgs, axis=1)
    print(dst.shape)
    np.save(path[:-3]+'npy', dst)
    print('encoded file saved at', path[:-3]+'npy') 
开发者ID:pstuvwx,项目名称:Deep_VoiceChanger,代码行数:15,代码来源:dataset.py


示例25: extract_features_wrapper

# 需要导入模块: import tqdm [as 别名]
# 或者: from tqdm import tqdm [as 别名]
def extract_features_wrapper(paths, path2gt, model='vggish', save_as=False):
    """Wrapper function for extracting features (MusiCNN, VGGish or OpenL3) per batch.
       If a save_as string argument is passed, the features wiil be saved in
       the specified file.
    """
    if model == 'vggish':
        feature_extractor = extract_vggish_features
    elif model == 'openl3' or model == 'musicnn':
        feature_extractor = extract_other_features
    else:
        raise NotImplementedError('Current implementation only supports MusiCNN, VGGish and OpenL3 features')
    batch_size = config['batch_size']
    first_batch = True
    for batch_id in tqdm(range(ceil(len(paths)/batch_size))):
        batch_paths = paths[(batch_id)*batch_size:(batch_id+1)*batch_size]
        [x, y, refs] = feature_extractor(batch_paths, path2gt, model)
        if first_batch:
            [X, Y, IDS] = [x, y, refs]
            first_batch = False
        else:
            X = np.concatenate((X, x), axis=0)
            Y = np.concatenate((Y, y), axis=0)
            IDS = np.concatenate((IDS, refs), axis=0)
    if save_as:  # save data to file
        # create a directory where to store the extracted training features
        audio_representations_folder = DATA_FOLDER + 'audio_representations/'
        if not os.path.exists(audio_representations_folder):
            os.makedirs(audio_representations_folder)
        np.savez(audio_representations_folder + save_as, X=X, Y=Y, IDS=IDS)
        print('Audio features stored: ', save_as)
    return [X, Y, IDS] 
开发者ID:jordipons,项目名称:sklearn-audio-transfer-learning,代码行数:36,代码来源:audio_transfer_learning.py


示例26: evaluate_model

# 需要导入模块: import tqdm [as 别名]
# 或者: from tqdm import tqdm [as 别名]
def evaluate_model(model, images, captions, tokenizer, max_length):
	actual, predicted = list(), list()
	for image_id, caption_list in tqdm(captions.items()):
		yhat = generate_caption(model, tokenizer, images[image_id], max_length)
		ground_truth = [caption.split() for caption in caption_list]
		actual.append(ground_truth)
		predicted.append(yhat.split())
	print('BLEU Scores :')
	print('A perfect match results in a score of 1.0, whereas a perfect mismatch results in a score of 0.0.')
	print('BLEU-1: %f' % corpus_bleu(actual, predicted, weights=(1.0, 0, 0, 0)))
	print('BLEU-2: %f' % corpus_bleu(actual, predicted, weights=(0.5, 0.5, 0, 0)))
	print('BLEU-3: %f' % corpus_bleu(actual, predicted, weights=(0.3, 0.3, 0.3, 0)))
	print('BLEU-4: %f' % corpus_bleu(actual, predicted, weights=(0.25, 0.25, 0.25, 0.25))) 
开发者ID:dabasajay,项目名称:Image-Caption-Generator,代码行数:15,代码来源:model.py


示例27: evaluate_model_beam_search

# 需要导入模块: import tqdm [as 别名]
# 或者: from tqdm import tqdm [as 别名]
def evaluate_model_beam_search(model, images, captions, tokenizer, max_length, beam_index=3):
	actual, predicted = list(), list()
	for image_id, caption_list in tqdm(captions.items()):
		yhat = generate_caption_beam_search(model, tokenizer, images[image_id], max_length, beam_index=beam_index)
		ground_truth = [caption.split() for caption in caption_list]
		actual.append(ground_truth)
		predicted.append(yhat.split())
	print('BLEU Scores :')
	print('A perfect match results in a score of 1.0, whereas a perfect mismatch results in a score of 0.0.')
	print('BLEU-1: %f' % corpus_bleu(actual, predicted, weights=(1.0, 0, 0, 0)))
	print('BLEU-2: %f' % corpus_bleu(actual, predicted, weights=(0.5, 0.5, 0, 0)))
	print('BLEU-3: %f' % corpus_bleu(actual, predicted, weights=(0.3, 0.3, 0.3, 0)))
	print('BLEU-4: %f' % corpus_bleu(actual, predicted, weights=(0.25, 0.25, 0.25, 0.25))) 
开发者ID:dabasajay,项目名称:Image-Caption-Generator,代码行数:15,代码来源:model.py



注:本文中的tqdm.tqdm方法示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。