Python tqdm.trange方法代码示例

本文整理汇总了Python中tqdm.trange方法的典型用法代码示例。如果您正苦于以下问题:Python tqdm.trange方法的具体用法?Python tqdm.trange怎么用?Python tqdm.trange使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在模块tqdm的用法示例。

在下文中一共展示了tqdm.trange方法的27个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: validate_on_lfw

# 需要导入模块: import tqdm [as 别名]
# 或者: from tqdm import trange [as 别名]
def validate_on_lfw(model, lfw_160_path):
    # Read the file containing the pairs used for testing
    pairs = lfw.read_pairs('validation-LFW-pairs.txt')
    # Get the paths for the corresponding images
    paths, actual_issame = lfw.get_paths(lfw_160_path, pairs)
    num_pairs = len(actual_issame)
    all_embeddings = np.zeros((num_pairs * 2, 512), dtype='float32')
    for k in tqdm.trange(num_pairs):
        img1 = cv2.imread(paths[k * 2], cv2.IMREAD_COLOR)[:, :, ::-1]
        img2 = cv2.imread(paths[k * 2 + 1], cv2.IMREAD_COLOR)[:, :, ::-1]
        batch = np.stack([img1, img2], axis=0)
        embeddings = model.eval_embeddings(batch)
        all_embeddings[k * 2: k * 2 + 2, :] = embeddings
    tpr, fpr, accuracy, val, val_std, far = lfw.evaluate(
        all_embeddings, actual_issame, distance_metric=1, subtract_mean=True)
    print('Accuracy: %2.5f+-%2.5f' % (np.mean(accuracy), np.std(accuracy)))
    print('Validation rate: %2.5f+-%2.5f @ FAR=%2.5f' % (val, val_std, far))
    auc = metrics.auc(fpr, tpr)
    print('Area Under Curve (AUC): %1.3f' % auc)
    eer = brentq(lambda x: 1. - x - interpolate.interp1d(fpr, tpr)(x), 0., 1.)
    print('Equal Error Rate (EER): %1.3f' % eer) 
开发者ID:ppwwyyxx,项目名称:Adversarial-Face-Attack,代码行数:27,代码来源:face_attack.py


示例2: create_training_file

# 需要导入模块: import tqdm [as 别名]
# 或者: from tqdm import trange [as 别名]
def create_training_file(docs, tokenizer, args, epoch_num):
    epoch_filename = args.output_dir / "epoch_{}.json".format(epoch_num)
    num_instances = 0
    with epoch_filename.open('w') as epoch_file:
        for doc_idx in trange(len(docs), desc="Document"):
            doc_instances = create_instances_from_document(
                docs, doc_idx, max_seq_length=args.max_seq_len, short_seq_prob=args.short_seq_prob,
                masked_lm_prob=args.masked_lm_prob, max_predictions_per_seq=args.max_predictions_per_seq,
                whole_word_mask=args.do_whole_word_mask, tokenizer=tokenizer,
                next_sent_prediction=args.do_next_sent_prediction)
            doc_instances = [json.dumps(instance) for instance in doc_instances]
            for instance in doc_instances:
                epoch_file.write(instance + '\n')
                num_instances += 1
    metrics_file = args.output_dir / "epoch_{}_metrics.json".format(epoch_num)
    with metrics_file.open('w') as metrics_file:
        metrics = {
            "num_training_examples": num_instances,
            "max_seq_len": args.max_seq_len
        }
        metrics_file.write(json.dumps(metrics)) 
开发者ID:allenai,项目名称:tpu_pretrain,代码行数:23,代码来源:pregenerate_training_data.py


示例3: _preprocess

# 需要导入模块: import tqdm [as 别名]
# 或者: from tqdm import trange [as 别名]
def _preprocess(self, ids, ids_file):
        print("Preprocessing mask, this will take a while. " + \
              "But don't worry, it only run once for each split.")
        tbar = trange(len(ids))
        new_ids = []
        for i in tbar:
            img_id = ids[i]
            cocotarget = self.coco.loadAnns(self.coco.getAnnIds(imgIds=img_id))
            img_metadata = self.coco.loadImgs(img_id)[0]
            mask = self._gen_seg_mask(cocotarget, img_metadata['height'],
                                      img_metadata['width'])
            # more than 1k pixels
            if (mask > 0).sum() > 1000:
                new_ids.append(img_id)
            tbar.set_description('Doing: {}/{}, got {} qualified images'. \
                                 format(i, len(ids), len(new_ids)))
        print('Found number of qualified images: ', len(new_ids))
        torch.save(new_ids, ids_file)
        return new_ids 
开发者ID:clovaai,项目名称:overhaul-distillation,代码行数:21,代码来源:coco.py


示例4: start

# 需要导入模块: import tqdm [as 别名]
# 或者: from tqdm import trange [as 别名]
def start(self):
        """
        Start testing with a progress bar.
        """
        if not self._reset_called:
            self.ds.reset_state()
        itr = self.ds.__iter__()
        if self.warmup:
            for _ in tqdm.trange(self.warmup, **get_tqdm_kwargs()):
                next(itr)
        # add smoothing for speed benchmark
        with get_tqdm(total=self.test_size,
                      leave=True, smoothing=0.2) as pbar:
            for idx, dp in enumerate(itr):
                pbar.update()
                if idx == self.test_size - 1:
                    break 
开发者ID:tensorpack,项目名称:dataflow,代码行数:19,代码来源:common.py


示例5: test

# 需要导入模块: import tqdm [as 别名]
# 或者: from tqdm import trange [as 别名]
def test(data):
    print('Testing model...')
    model = Model(data).to(device)
    model.load_state_dict(torch.load(data.model_path))
    instances = data.ids
    pred_results = []
    model.eval()
    test_num = len(instances)
    total_batch = test_num // data.batch_size + 1
    for batch in trange(total_batch):
        start, end = slice_set(batch, data.batch_size, test_num)
        instance = instances[start:end]
        if not instance: continue
        _, mask, *model_input, char_recover = load_batch(instance, True)
        tag_seq = model(mask, *model_input)
        pred_label = seq2label(tag_seq, mask, data.label_alphabet, char_recover)
        pred_results += pred_label
    return pred_results 
开发者ID:kdsec,项目名称:chinese-opinion-target-extraction,代码行数:20,代码来源:main.py


示例6: create_and_train_model

# 需要导入模块: import tqdm [as 别名]
# 或者: from tqdm import trange [as 别名]
def create_and_train_model(self):
        """
        Model training and scoring.
        """
        print("\nTraining started.\n")
        self.model = SignedGraphConvolutionalNetwork(self.device, self.args, self.X).to(self.device)
        self.optimizer = torch.optim.Adam(self.model.parameters(),
                                          lr=self.args.learning_rate,
                                          weight_decay=self.args.weight_decay)
        self.model.train()
        self.epochs = trange(self.args.epochs, desc="Loss")
        for epoch in self.epochs:
            start_time = time.time()
            self.optimizer.zero_grad()
            loss, _ = self.model(self.positive_edges, self.negative_edges, self.y)
            loss.backward()
            self.epochs.set_description("SGCN (Loss=%g)" % round(loss.item(), 4))
            self.optimizer.step()
            self.logs["training_time"].append([epoch+1, time.time()-start_time])
            if self.args.test_size > 0:
                self.score_model(epoch) 
开发者ID:benedekrozemberczki,项目名称:SGCN,代码行数:23,代码来源:sgcn.py


示例7: _preprocess

# 需要导入模块: import tqdm [as 别名]
# 或者: from tqdm import trange [as 别名]
def _preprocess(self, ids, ids_file):
        print("Preprocessing mask, this will take a while." + \
              "But don't worry, it only run once for each split.")
        tbar = trange(len(ids))
        new_ids = []
        for i in tbar:
            img_id = ids[i]
            cocotarget = self.coco.loadAnns(self.coco.getAnnIds(imgIds=img_id))
            img_metadata = self.coco.loadImgs(img_id)[0]
            mask = self._gen_seg_mask(cocotarget, img_metadata['height'], img_metadata['width'])
            # more than 1k pixels
            if (mask > 0).sum() > 1000:
                new_ids.append(img_id)
            tbar.set_description('Doing: {}/{}, got {} qualified images'. \
                                 format(i, len(ids), len(new_ids)))
        print('Found number of qualified images: ', len(new_ids))
        with open(ids_file, 'wb') as f:
            pickle.dump(new_ids, f)
        return new_ids 
开发者ID:LikeLy-Journey,项目名称:SegmenTron,代码行数:21,代码来源:mscoco.py


示例8: create_dataset

# 需要导入模块: import tqdm [as 别名]
# 或者: from tqdm import trange [as 别名]
def create_dataset(seqs: List[List[str]],
                   tags: List[List[str]],
                   word_to_ix: Mapping[str, int],
                   max_seq_len: int,
                   pad_ix: int) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
    """Convert List[str] -> torch.Tensor.
    Returns:
        seqs_tensor: shape=[num_seqs, max_seq_len].
        seqs_mask: shape=[num_seqs, max_seq_len].
        tags_tesnor: shape=[num_seqs, max_seq_len].
    """
    assert len(seqs) == len(tags)
    num_seqs = len(seqs)
    seqs_tensor = torch.ones(num_seqs, max_seq_len) * pad_ix
    seqs_mask = torch.zeros(num_seqs, max_seq_len)
    tags_tesnor = torch.ones(num_seqs, max_seq_len) * pad_ix
    for i in trange(num_seqs):
        seqs_mask[i, : len(seqs[i])] = 1
        for j, word in enumerate(seqs[i]):
            seqs_tensor[i, j] = word_to_ix.get(word, word_to_ix['[UNK]'])
        for j, tag in enumerate(tags[i]):
            tags_tesnor[i, j] = word_to_ix.get(tag, word_to_ix['[UNK]'])
    return seqs_tensor.long(), seqs_mask, tags_tesnor.long() 
开发者ID:WiseDoge,项目名称:CoupletAI,代码行数:25,代码来源:preprocess.py


示例9: train

# 需要导入模块: import tqdm [as 别名]
# 或者: from tqdm import trange [as 别名]
def train(unet, batch_size, epochs, epoch_lapse, threshold, learning_rate, criterion, optimizer, x_train, y_train, x_val, y_val, width_out, height_out):
    epoch_iter = np.ceil(x_train.shape[0] / batch_size).astype(int)
    t = trange(epochs, leave=True)
    for _ in t:
        total_loss = 0
        for i in range(epoch_iter):
            batch_train_x = torch.from_numpy(x_train[i * batch_size : (i + 1) * batch_size]).float()
            batch_train_y = torch.from_numpy(y_train[i * batch_size : (i + 1) * batch_size]).long()
            if use_gpu:
                batch_train_x = batch_train_x.cuda()
                batch_train_y = batch_train_y.cuda()
            batch_loss = train_step(batch_train_x , batch_train_y, optimizer, criterion, unet, width_out, height_out)
            total_loss += batch_loss
        if (_+1) % epoch_lapse == 0:
            val_loss = get_val_loss(x_val, y_val, width_out, height_out, unet)
            print("Total loss in epoch %f : %f and validation loss : %f" %(_+1, total_loss, val_loss))
    gc.collect() 
开发者ID:Hsankesara,项目名称:DeepResearch,代码行数:19,代码来源:run_unet.py


示例10: count_seqs_with_words

# 需要导入模块: import tqdm [as 别名]
# 或者: from tqdm import trange [as 别名]
def count_seqs_with_words(seqs, halflength, ming, maxg, alpha, revcomp, desc):
    if alpha == 'protein':
        ambiguous_character = 'X'
    else:
        ambiguous_character = 'N'
    gapped_kmer_dict = {}  # each key is the gapped k-mer word
    for g in trange(ming, maxg + 1, 1, desc=desc):
        w = g+2*halflength # length of the word
        gap = g * ambiguous_character
        for seq in seqs:
            slen = len(seq)
            for i in range(0, slen-w+1):
                word = seq[i : i+w]
                # skip word if it contains an ambiguous character
                if ambiguous_character in word:
                    continue
                # convert word to a gapped word. Only the first and last half-length letters are preserved
                word = word[0:halflength] + gap + word[-halflength:]
                update_gapped_kmer_dict(gapped_kmer_dict, word, revcomp)
    return gapped_kmer_dict 
开发者ID:daquang,项目名称:YAMDA,代码行数:22,代码来源:initialize.py


示例11: image_copy_to_dir

# 需要导入模块: import tqdm [as 别名]
# 或者: from tqdm import trange [as 别名]
def image_copy_to_dir(mode, x_paths, y_paths):
    target_path = '/run/media/tkwoo/myWorkspace/workspace/01.dataset/03.Mask_data/cityscape'
    target_path = os.path.join(target_path, mode)
    for idx in trange(len(x_paths)):
        image = cv2.imread(x_paths[idx], 1)
        mask = cv2.imread(y_paths[idx], 0)
        image = cv2.resize(image, None, fx=0.25, fy=0.25, interpolation=cv2.INTER_LINEAR)
        mask = cv2.resize(mask, None, fx=0.25, fy=0.25, interpolation=cv2.INTER_NEAREST)
        cv2.imwrite(os.path.join(target_path, 'image', os.path.basename(x_paths[idx])), image)
        cv2.imwrite(os.path.join(target_path, 'mask', os.path.basename(y_paths[idx])), mask)
        # show = image.copy()
        # mask = (mask.astype(np.float32)*255/33).astype(np.uint8)
        # mask_color = cv2.applyColorMap(mask, cv2.COLORMAP_JET)
        # show = cv2.addWeighted(show, 0.5, mask_color, 0.5, 0.0)
        # cv2.imshow('show', show)
        # key = cv2.waitKey(1)
        # if key == 27:
        #     return 
开发者ID:dhkim0225,项目名称:keras-image-segmentation,代码行数:24,代码来源:h5_test.py


示例12: _make_progress_bar

# 需要导入模块: import tqdm [as 别名]
# 或者: from tqdm import trange [as 别名]
def _make_progress_bar(self, iterations):
        """
        Creates a progress bar using :class:`tqdm`.
        Parameters
        ----------
        iterations: `int`
            Number of iterations to be performed.
        Returns
        -------
        progress_bar: :class:`tqdm.std.tqdm`
            An iterator object.
        """
        progress_bar = tqdm.trange(
            iterations,
            unit_scale=(self._chunksize // 1024),
            unit="KiB",
            dynamic_ncols=True,
            bar_format='{desc}: {percentage:3.0f}%|{bar}| {n_fmt}/{total_fmt}KiB '
                '[{elapsed}<{remaining}, {rate_fmt}{postfix}]',
        )
        return progress_bar 
开发者ID:ritiek,项目名称:spotify-downloader,代码行数:26,代码来源:track.py


示例13: _preprocess

# 需要导入模块: import tqdm [as 别名]
# 或者: from tqdm import trange [as 别名]
def _preprocess(self, ids, ids_file):
        print("Preprocessing mask, this will take a while." + \
              "But don't worry, it only run once for each split.")
        tbar = trange(len(ids))
        new_ids = []
        for i in tbar:
            img_id = ids[i]
            cocotarget = self.coco.loadAnns(self.coco.getAnnIds(imgIds=img_id))
            img_metadata = self.coco.loadImgs(img_id)[0]
            mask = self._gen_seg_mask(cocotarget, img_metadata['height'],
                                      img_metadata['width'])
            # more than 1k pixels
            if (mask > 0).sum() > 1000:
                new_ids.append(img_id)
            tbar.set_description('Doing: {}/{}, got {} qualified images'.\
                format(i, len(ids), len(new_ids)))
        print('Found number of qualified images: ', len(new_ids))
        with open(ids_file, 'wb') as f:
            pickle.dump(new_ids, f)
        return new_ids 
开发者ID:dmlc,项目名称:gluon-cv,代码行数:22,代码来源:segmentation.py


示例14: _preprocess

# 需要导入模块: import tqdm [as 别名]
# 或者: from tqdm import trange [as 别名]
def _preprocess(self, ids, ids_file):
        print("Preprocessing mask, this will take a while." + \
            "But don't worry, it only run once for each split.")
        tbar = trange(len(ids))
        new_ids = []
        for i in tbar:
            img_id = ids[i]
            cocotarget = self.coco.loadAnns(self.coco.getAnnIds(imgIds=img_id))
            img_metadata = self.coco.loadImgs(img_id)[0]
            mask = self._gen_seg_mask(cocotarget, img_metadata['height'],
                                      img_metadata['width'])
            # more than 1k pixels
            if (mask > 0).sum() > 1000:
                new_ids.append(img_id)
            tbar.set_description('Doing: {}/{}, got {} qualified images'.\
                format(i, len(ids), len(new_ids)))
        print('Found number of qualified images: ', len(new_ids))
        torch.save(new_ids, ids_file)
        return new_ids 
开发者ID:zhanghang1989,项目名称:PyTorch-Encoding,代码行数:21,代码来源:coco.py


示例15: _filter_idx

# 需要导入模块: import tqdm [as 别名]
# 或者: from tqdm import trange [as 别名]
def _filter_idx(self,
                    idx,
                    idx_file,
                    pixels_thr=1000):
        logging.info("Filtering mask index")
        tbar = trange(len(idx))
        filtered_idx = []
        for i in tbar:
            img_id = idx[i]
            coco_target = self.coco.loadAnns(self.coco.getAnnIds(imgIds=img_id))
            img_metadata = self.coco.loadImgs(img_id)[0]
            mask = self._gen_seg_mask(
                coco_target,
                img_metadata["height"],
                img_metadata["width"])
            if (mask > 0).sum() > pixels_thr:
                filtered_idx.append(img_id)
            tbar.set_description("Doing: {}/{}, got {} qualified images".format(i, len(idx), len(filtered_idx)))
        logging.info("Found number of qualified images: {}".format(len(filtered_idx)))
        np.save(idx_file, np.array(filtered_idx, np.int32))
        return filtered_idx 
开发者ID:osmr,项目名称:imgclsmob,代码行数:23,代码来源:coco_seg_dataset.py


示例16: _filter_idx

# 需要导入模块: import tqdm [as 别名]
# 或者: from tqdm import trange [as 别名]
def _filter_idx(self,
                    idx_list,
                    idx_file_path,
                    pixels_thr=1000):
        logging.info("Filtering mask index:")
        tbar = trange(len(idx_list))
        filtered_idx = []
        for i in tbar:
            img_id = idx_list[i]
            coco_target = self.coco.loadAnns(self.coco.getAnnIds(imgIds=img_id))
            img_metadata = self.coco.loadImgs(img_id)[0]
            mask = self._gen_seg_mask(
                coco_target,
                img_metadata["height"],
                img_metadata["width"])
            if (mask > 0).sum() > pixels_thr:
                filtered_idx.append(img_id)
            tbar.set_description("Doing: {}/{}, got {} qualified images".format(i, len(idx_list), len(filtered_idx)))
        logging.info("Found number of qualified images: {}".format(len(filtered_idx)))
        np.save(idx_file_path, np.array(filtered_idx, np.int32))
        return filtered_idx 
开发者ID:osmr,项目名称:imgclsmob,代码行数:23,代码来源:coco_seg_dataset.py


示例17: fit

# 需要导入模块: import tqdm [as 别名]
# 或者: from tqdm import trange [as 别名]
def fit(self):
        """
        Fitting a model.
        """
        self.base_model_fit()
        self.create_split()
        self.setup_model()
        self.model.train()
        self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.args.learning_rate)
        self.optimizer.zero_grad()
        print("\nLearning the joint model.\n")
        random.shuffle(self.persona_walker.paths)
        self.walk_steps = trange(len(self.persona_walker.paths), desc="Loss")
        for step in self.walk_steps:
            self.reset_average_loss(step)
            walk = self.persona_walker.paths[step]
            self.process_walk(walk)
            loss_score = self.optimize()
            self.update_average_loss(loss_score) 
开发者ID:benedekrozemberczki,项目名称:Splitter,代码行数:21,代码来源:splitter.py


示例18: chunk

# 需要导入模块: import tqdm [as 别名]
# 或者: from tqdm import trange [as 别名]
def chunk(*data, **kwargs):
    chunk_size = kwargs.pop('chunk_size', 100)
    shuffle = kwargs.pop('shuffle', False)
    show_progress = kwargs.pop('show_progress', None)
    N = len(data[0])
    if shuffle:
        permutation = np.random.permutation(N)
    else:
        permutation = np.arange(N)
    num_chunks = N // chunk_size
    if N % chunk_size > 0:
        num_chunks += 1
    rng = tqdm.trange(num_chunks, desc=show_progress) if show_progress is not None else range(num_chunks)
    for c in rng:
        chunk_slice = slice(c * chunk_size, (c + 1) * chunk_size)
        idx = permutation[chunk_slice]
        yield idx, tuple(d[idx] for d in data) 
开发者ID:sharadmv,项目名称:parasol,代码行数:19,代码来源:nn.py


示例19: rollouts

# 需要导入模块: import tqdm [as 别名]
# 或者: from tqdm import trange [as 别名]
def rollouts(self, num_rollouts, num_steps, show_progress=False,
                 noise=None,
                 callback=lambda x: None,
                 **kwargs):
        states, actions, costs = (
            np.empty([num_rollouts, num_steps] + [self.get_state_dim()]),
            np.empty([num_rollouts, num_steps] + [self.get_action_dim()]),
            np.empty([num_rollouts, num_steps])
        )
        infos = [None] * num_rollouts
        rollouts = tqdm.trange(num_rollouts, desc='Rollouts') if show_progress else range(num_rollouts)
        for i in rollouts:
            with contextlib.ExitStack() as stack:
                context = callback(i)
                if context is not None:
                    stack.enter_context(callback(i))
                n = None
                if noise is not None:
                    n = noise()
                states[i], actions[i], costs[i], infos[i] = \
                        self.rollout(num_steps, noise=n,**kwargs)
        return states, actions, costs, infos 
开发者ID:sharadmv,项目名称:parasol,代码行数:24,代码来源:env.py


示例20: train_network

# 需要导入模块: import tqdm [as 别名]
# 或者: from tqdm import trange [as 别名]
def train_network(start_epoch, epochs, scheduler, model, train_loader, val_loader, optimizer, criterion, device, dtype,
                  batch_size, log_interval, csv_logger, save_path, claimed_acc1, claimed_acc5, best_test):
    for epoch in trange(start_epoch, epochs + 1):
        if not isinstance(scheduler, CyclicLR):
            scheduler.step()
        train_loss, train_accuracy1, train_accuracy5, = train(model, train_loader, epoch, optimizer, criterion, device,
                                                              dtype, batch_size, log_interval, scheduler)
        test_loss, test_accuracy1, test_accuracy5 = test(model, val_loader, criterion, device, dtype)
        csv_logger.write({'epoch': epoch + 1, 'val_error1': 1 - test_accuracy1, 'val_error5': 1 - test_accuracy5,
                          'val_loss': test_loss, 'train_error1': 1 - train_accuracy1,
                          'train_error5': 1 - train_accuracy5, 'train_loss': train_loss})
        save_checkpoint({'epoch': epoch + 1, 'state_dict': model.state_dict(), 'best_prec1': best_test,
                         'optimizer': optimizer.state_dict()}, test_accuracy1 > best_test, filepath=save_path)
        csv_logger.plot_progress(claimed_acc1=claimed_acc1, claimed_acc5=claimed_acc5)
        if test_accuracy1 > best_test:
            best_test = test_accuracy1
    csv_logger.write_text('Best accuracy is {:.2f}% top-1'.format(best_test * 100.)) 
开发者ID:Randl,项目名称:MobileNetV2-pytorch,代码行数:22,代码来源:imagenet.py


示例21: get_training_bbox

# 需要导入模块: import tqdm [as 别名]
# 或者: from tqdm import trange [as 别名]
def get_training_bbox(bbox_dir, imglist):
        import xml.etree.ElementTree as ET
        ret = []
        def parse_bbox(fname):
            root = ET.parse(fname).getroot()
            size = root.find('size').getchildren()
            size = map(int, [size[0].text, size[1].text])
            box = root.find('object').find('bndbox').getchildren()
            box = map(lambda x: float(x.text), box)
            return np.asarray(box, dtype='float32')
        with timed_operation('Loading Bounding Boxes ...'):
            cnt = 0
            for k in tqdm.trange(len(imglist)):
                fname = imglist[k][0]
                fname = fname[:-4] + 'xml'
                fname = os.path.join(bbox_dir, fname)
                try:
                    ret.append(parse_bbox(fname))
                    cnt += 1
                except Exception:
                    ret.append(None)
            logger.info("{}/{} images have bounding box.".format(cnt, len(imglist)))
        return ret 
开发者ID:tensorpack,项目名称:dataflow,代码行数:28,代码来源:ilsvrc.py


示例22: _evaluate

# 需要导入模块: import tqdm [as 别名]
# 或者: from tqdm import trange [as 别名]
def _evaluate(self, data_generator, metrics, num_batches, l2_regularization, description='Running evaluation'):
        '''
        Internal method used by both `evaluate()` and `train()` that performs
        the actual evaluation. For the first three arguments, please refer
        to the documentation of the public `evaluate()` method.
        Arguments:
            description (string, optional): A description string that will be prepended
                to the progress bar while the evaluation is being processed. During
                training, this description is used to clarify whether the evaluation
                is being performed on the training or validation dataset.
        '''
        # Reset all metrics' accumulator variables.
        self.sess.run(self.metrics_reset_op)
        # Set up the progress bar.
        tr = trange(num_batches, file=sys.stdout)
        tr.set_description(description)
        # Accumulate metrics in batches.
        for step in tr:
            batch_images, batch_labels = next(data_generator)
            self.sess.run(self.metric_update_ops,
                          feed_dict={self.image_input: batch_images,
                                     self.labels: batch_labels,
                                     self.keep_prob: 1.0,
                                     self.l2_regularization_rate: l2_regularization})
        # Compute final metric values.
        self.metric_values = self.sess.run(self.metric_value_tensors)
        evaluation_results_string = ''
        for i, metric_name in enumerate(self.metric_names):
            evaluation_results_string += metric_name + ': {:.4f}  '.format(self.metric_values[i])
        print(evaluation_results_string) 
开发者ID:pierluigiferrari,项目名称:fcn8s_tensorflow,代码行数:40,代码来源:fcn8s_tensorflow.py


示例23: find_bounds_clr

# 需要导入模块: import tqdm [as 别名]
# 或者: from tqdm import trange [as 别名]
def find_bounds_clr(model, loader, optimizer, criterion, device, dtype, min_lr=8e-6, max_lr=8e-5, step_size=2000,
                    mode='triangular', save_path='.'):
    model.train()
    correct1, correct5 = 0, 0
    scheduler = CyclicLR(optimizer, base_lr=min_lr, max_lr=max_lr, step_size_up=step_size, mode=mode)
    epoch_count = step_size // len(loader)  # Assuming step_size is multiple of batch per epoch
    accuracy = []
    for _ in trange(epoch_count):
        for batch_idx, (data, target) in enumerate(tqdm(loader)):
            if scheduler is not None:
                scheduler.step()
            data, target = data.to(device=device, dtype=dtype), target.to(device=device)
            optimizer.zero_grad()
            output = model(data)
            loss = criterion(output, target)
            loss.backward()
            optimizer.step()
            corr = correct(output, target)
            accuracy.append(corr[0] / data.shape[0])
    lrs = np.linspace(min_lr, max_lr, step_size)
    plt.plot(lrs, accuracy)
    plt.show()
    plt.savefig(os.path.join(save_path, 'find_bounds_clr.pdf'))
    np.save(os.path.join(save_path, 'acc.npy'), accuracy)
    return 
开发者ID:Randl,项目名称:MobileNetV3-pytorch,代码行数:31,代码来源:run.py


示例24: train

# 需要导入模块: import tqdm [as 别名]
# 或者: from tqdm import trange [as 别名]
def train(data):
    print('Training model...')
    save_data_setting(data)
    model = Model(data).to(device)
    optimizer = optim.RMSprop(model.parameters(), lr=data.lr, momentum=data.momentum)
    for epoch in range(data.epoch):
        print('Epoch: %s/%s' % (epoch, data.epoch))
        optimizer = lr_decay(optimizer, epoch, data.lr_decay, data.lr)
        total_loss = 0
        random.shuffle(data.ids)
        model.train()
        model.zero_grad()
        train_num = len(data.ids)
        total_batch = train_num // data.batch_size + 1
        for batch in trange(total_batch):
            start, end = slice_set(batch, data.batch_size, train_num)
            instance = data.ids[start:end]
            if not instance: continue
            *model_input, _ = load_batch(instance)
            loss = model.neg_log_likelihood_loss(*model_input)
            total_loss += loss.data.item()
            loss.backward()
            optimizer.step()
            model.zero_grad()
        print('Epoch %d loss = %.3f' % (epoch, total_loss))
    torch.save(model.state_dict(), data.model_path) 
开发者ID:kdsec,项目名称:chinese-opinion-target-extraction,代码行数:28,代码来源:main.py


示例25: benchmark_ds

# 需要导入模块: import tqdm [as 别名]
# 或者: from tqdm import trange [as 别名]
def benchmark_ds(ds, count, warmup=200):
    itr = ds.make_initializable_iterator()
    dp = itr.get_next()
    dpop = tf.group(*dp)
    with tf.Session(config=get_default_sess_config()) as sess:
        sess.run(itr.initializer)
        for _ in tqdm.trange(warmup):
            sess.run(dpop)
        for _ in tqdm.trange(count, smoothing=0.1):
            sess.run(dpop) 
开发者ID:tensorpack,项目名称:benchmarks,代码行数:13,代码来源:benchmark-tfdata.py


示例26: simulate

# 需要导入模块: import tqdm [as 别名]
# 或者: from tqdm import trange [as 别名]
def simulate(runs, time, bandits):
    rewards = np.zeros((len(bandits), runs, time))
    best_action_counts = np.zeros(rewards.shape)
    for i, bandit in enumerate(bandits):
        for r in trange(runs):
            bandit.reset()
            for t in range(time):
                action = bandit.act()
                reward = bandit.step(action)
                rewards[i, r, t] = reward
                if action == bandit.best_action:
                    best_action_counts[i, r, t] = 1
    mean_best_action_counts = best_action_counts.mean(axis=1)
    mean_rewards = rewards.mean(axis=1)
    return mean_best_action_counts, mean_rewards 
开发者ID:ShangtongZhang,项目名称:reinforcement-learning-an-introduction,代码行数:17,代码来源:ten_armed_testbed.py


示例27: ensemble_test

# 需要导入模块: import tqdm [as 别名]
# 或者: from tqdm import trange [as 别名]
def ensemble_test(input_path, output_path):
    df_landmarks = [pd.read_csv(p) for p in input_path.split(',')]
    records = []
    for i in tqdm.trange(len(df_landmarks[0])):
        filename = df_landmarks[0].iloc[i]['filename']
        box_and_landmarks = [get_box_and_landmark(df_landmarks[j].iloc[i])
                             for j in range(len(df_landmarks))]
        boxes = [v[0] for v in box_and_landmarks]
        landmarks = [v[1] for v in box_and_landmarks]
        l2_boxes = get_error(boxes)
        l2_landmarks = get_error(landmarks)
        records.append((filename, boxes, landmarks, l2_boxes, l2_landmarks))
    final_records = []
    records = list(sorted(records, key=lambda v: v[4], reverse=True))
    for filename, boxes, landmarks, l2_boxes, l2_landmarks in records:
        i, j, iou = get_top2(boxes)
        box = (boxes[i] + boxes[j]) / 2
        landmark = (landmarks[i] + landmarks[j]) / 2
        xc = (box[2] - box[0]) / 2
        yc = (box[3] - box[1]) / 2
        w = box[2] - box[0]
        h = box[3] - box[1]
        xl, yl = landmark[0][0], landmark[0][1]
        xn, yn = landmark[1][0], landmark[1][1]
        xr, yr = landmark[2][0], landmark[2][1]
        xd, yd = landmark[3][0], landmark[3][1]
        final_records.append((filename, xc, yc, w, h, xl, yl, xn, yn, xr, yr, xd, yd))
    columns = ['filename', 'x', 'y', 'w', 'h', 'xl', 'yl', 'xn', 'yn', 'xr', 'yr', 'xd', 'yd']
    df = pd.DataFrame.from_records(final_records, columns=columns)
    df.to_csv(output_path, index=False) 
开发者ID:pudae,项目名称:kaggle-humpback,代码行数:38,代码来源:ensemble_landmarks.py



注:本文中的tqdm.trange方法示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。