From d75ea7c21c1fc2ddecb931a7b55b7bd19c6c64cf Mon Sep 17 00:00:00 2001 From: Leng Yue Date: Sun, 5 Mar 2023 16:07:22 -0500 Subject: [PATCH] Reorganize folders & better inference archs (#34) * move files for future archs * remove redundent code * organize files * continue reorganize * split gradio ui from inference & integrate batch inference * fix imports * move files to correct folder * moved files & optimize for future archs * move files & optimize for future archs * fix onnx export --- README.en.md | 12 +- README.md | 12 +- fish_diffusion/archs/diffsinger.py | 109 --- .../archs/diffsinger/diffsinger.py | 202 ++--- .../diffsinger}/diffusions/__init__.py | 0 .../archs/diffsinger/diffusions/builder.py | 8 + .../diffsinger}/diffusions/diffusion.py | 14 +- .../diffsinger}/diffusions/noise_predictor.py | 0 fish_diffusion/denoisers/__init__.py | 5 - fish_diffusion/denoisers/attention.py | 57 -- fish_diffusion/denoisers/builder.py | 3 - fish_diffusion/diffusions/builder.py | 3 - .../{ => modules}/encoders/__init__.py | 0 .../{ => modules}/encoders/attention.py | 0 .../{ => modules}/encoders/builder.py | 0 .../{ => modules}/encoders/fast_speech.py | 206 ++++- .../{ => modules}/encoders/identity.py | 0 .../encoders/naive_projection.py | 0 .../feature_extractors/__init__.py | 2 - .../{ => modules}/feature_extractors/base.py | 1 - .../feature_extractors/builder.py | 0 .../feature_extractors/chinese_hubert.py | 0 .../feature_extractors/content_vec.py | 0 .../feature_extractors/hubert_soft.py | 0 .../opencpop_transcription.py | 0 .../feature_extractors/whisper.py | 0 .../pitch_extractors}/__init__.py | 0 .../pitch_extractors}/builder.py | 0 .../pitch_extractors}/crepe.py | 0 .../pitch_extractors}/parsel_mouth.py | 0 .../pitch_extractors}/world.py | 0 .../modules/positional_embedding.py | 205 ----- .../{ => modules}/vocoders/__init__.py | 0 .../{ => modules}/vocoders/builder.py | 0 .../vocoders/istft_net/istft_net.py | 0 .../{ => modules}/vocoders/istft_net/mel.py | 0 .../vocoders/istft_net/models.py | 0 .../vocoders/nsf_hifigan/__init__.py | 0 .../vocoders/nsf_hifigan/models.py | 0 .../vocoders/nsf_hifigan/nsf_hifigan.py | 0 .../{denoisers => modules}/wavenet.py | 12 +- fish_diffusion/schedulers/__init__.py | 3 + .../scheduler.py => schedulers/builder.py} | 0 fish_diffusion/utils/audio.py | 31 +- fish_diffusion/utils/inference.py | 8 +- fish_diffusion/utils/ssim.py | 114 --- inference.py | 431 ---------- poetry.lock | 772 +++++++++--------- tests/test_nsf_hifigan.py | 4 +- tests/test_visualize_pitch.py | 2 +- tools/batch_inference.py | 53 -- .../clean_speaker_embeddings.py | 0 tools/{ => diffusion}/diff_svc_converter.py | 1 - .../flask_api.py} | 87 +- tools/diffusion/gradio_ui.py | 92 +++ tools/diffusion/inference.py | 354 ++++++++ .../diffusion/inference_svs.py | 13 +- .../diffusion/inference_svs_ds.py | 14 +- tools/{ => diffusion}/onnx/demo.py | 7 +- tools/{ => diffusion}/onnx/export.py | 15 +- tools/{ => diffusion}/onnx/export_moess.py | 14 +- tools/diffusion/train.py | 93 +++ tools/nsf_hifigan/export.py | 2 +- tools/nsf_hifigan/train.py | 6 +- tools/preprocessing/extract_features.py | 11 +- tools/whisper/train.py | 2 +- 66 files changed, 1373 insertions(+), 1607 deletions(-) delete mode 100644 fish_diffusion/archs/diffsinger.py rename train.py => fish_diffusion/archs/diffsinger/diffsinger.py (57%) rename fish_diffusion/{ => archs/diffsinger}/diffusions/__init__.py (100%) create mode 100644 fish_diffusion/archs/diffsinger/diffusions/builder.py rename fish_diffusion/{ => archs/diffsinger}/diffusions/diffusion.py (95%) rename fish_diffusion/{ => archs/diffsinger}/diffusions/noise_predictor.py (100%) delete mode 100644 fish_diffusion/denoisers/__init__.py delete mode 100644 fish_diffusion/denoisers/attention.py delete mode 100644 fish_diffusion/denoisers/builder.py delete mode 100644 fish_diffusion/diffusions/builder.py rename fish_diffusion/{ => modules}/encoders/__init__.py (100%) rename fish_diffusion/{ => modules}/encoders/attention.py (100%) rename fish_diffusion/{ => modules}/encoders/builder.py (100%) rename fish_diffusion/{ => modules}/encoders/fast_speech.py (76%) rename fish_diffusion/{ => modules}/encoders/identity.py (100%) rename fish_diffusion/{ => modules}/encoders/naive_projection.py (100%) rename fish_diffusion/{ => modules}/feature_extractors/__init__.py (88%) rename fish_diffusion/{ => modules}/feature_extractors/base.py (97%) rename fish_diffusion/{ => modules}/feature_extractors/builder.py (100%) rename fish_diffusion/{ => modules}/feature_extractors/chinese_hubert.py (100%) rename fish_diffusion/{ => modules}/feature_extractors/content_vec.py (100%) rename fish_diffusion/{ => modules}/feature_extractors/hubert_soft.py (100%) rename fish_diffusion/{ => modules}/feature_extractors/opencpop_transcription.py (100%) rename fish_diffusion/{ => modules}/feature_extractors/whisper.py (100%) rename fish_diffusion/{feature_extractors/pitch => modules/pitch_extractors}/__init__.py (100%) rename fish_diffusion/{feature_extractors/pitch => modules/pitch_extractors}/builder.py (100%) rename fish_diffusion/{feature_extractors/pitch => modules/pitch_extractors}/crepe.py (100%) rename fish_diffusion/{feature_extractors/pitch => modules/pitch_extractors}/parsel_mouth.py (100%) rename fish_diffusion/{feature_extractors/pitch => modules/pitch_extractors}/world.py (100%) delete mode 100644 fish_diffusion/modules/positional_embedding.py rename fish_diffusion/{ => modules}/vocoders/__init__.py (100%) rename fish_diffusion/{ => modules}/vocoders/builder.py (100%) rename fish_diffusion/{ => modules}/vocoders/istft_net/istft_net.py (100%) rename fish_diffusion/{ => modules}/vocoders/istft_net/mel.py (100%) rename fish_diffusion/{ => modules}/vocoders/istft_net/models.py (100%) rename fish_diffusion/{ => modules}/vocoders/nsf_hifigan/__init__.py (100%) rename fish_diffusion/{ => modules}/vocoders/nsf_hifigan/models.py (100%) rename fish_diffusion/{ => modules}/vocoders/nsf_hifigan/nsf_hifigan.py (100%) rename fish_diffusion/{denoisers => modules}/wavenet.py (97%) create mode 100644 fish_diffusion/schedulers/__init__.py rename fish_diffusion/{utils/scheduler.py => schedulers/builder.py} (100%) delete mode 100644 fish_diffusion/utils/ssim.py delete mode 100644 inference.py delete mode 100644 tools/batch_inference.py rename tools/{ => diffusion}/clean_speaker_embeddings.py (100%) rename tools/{ => diffusion}/diff_svc_converter.py (99%) rename tools/{vst_flask_api.py => diffusion/flask_api.py} (50%) create mode 100644 tools/diffusion/gradio_ui.py create mode 100644 tools/diffusion/inference.py rename inference_svs.py => tools/diffusion/inference_svs.py (94%) rename inference_svs_ds.py => tools/diffusion/inference_svs_ds.py (96%) rename tools/{ => diffusion}/onnx/demo.py (90%) rename tools/{ => diffusion}/onnx/export.py (95%) rename tools/{ => diffusion}/onnx/export_moess.py (96%) create mode 100644 tools/diffusion/train.py diff --git a/README.en.md b/README.en.md index 52e51c23..fb335982 100644 --- a/README.en.md +++ b/README.en.md @@ -99,34 +99,34 @@ python tools/preprocessing/extract_features.py --config configs/svc_hubert_soft. ```bash # Single machine single card / multi-card training -python train.py --config configs/svc_hubert_soft.py +python tools/diffusion/train.py --config configs/svc_hubert_soft.py # Resume training -python train.py --config configs/svc_hubert_soft.py --resume [checkpoint] +python tools/diffusion/train.py --config configs/svc_hubert_soft.py --resume [checkpoint] # Fine-tune the pre-trained model # Note: You should adjust the learning rate scheduler in the config file to warmup_cosine_finetune -python train.py --config configs/svc_cn_hubert_soft_finetune.py --pretrained [checkpoint] +python tools/diffusion/train.py --config configs/svc_cn_hubert_soft_finetune.py --pretrained [checkpoint] ``` ## Inference ```bash # Inference using shell, you can use --help to view more parameters -python inference.py --config [config] \ +python tools/diffusion/inference.py --config [config] \ --checkpoint [checkpoint] \ --input [input audio] \ --output [output audio] # Gradio Web Inference, other parameters will be used as gradio default parameters -python inference.py --config [config] \ +python tools/diffusion/inference.py --config [config] \ --checkpoint [checkpoint] \ --gradio ``` ## Convert a DiffSVC model to Fish Diffusion ```bash -python tools/diff_svc_converter.py --config configs/svc_hubert_soft_diff_svc.py \ +python tools/diffusion/diff_svc_converter.py --config configs/svc_hubert_soft_diff_svc.py \ --input-path [DiffSVC ckpt] \ --output-path [Fish Diffusion ckpt] ``` diff --git a/README.md b/README.md index 2539d767..91083498 100644 --- a/README.md +++ b/README.md @@ -118,34 +118,34 @@ python tools/preprocessing/extract_features.py --config configs/svc_hubert_soft. ```bash # 单机单卡 / 单机多卡训练 -python train.py --config configs/svc_hubert_soft.py +python tools/diffusion/train.py --config configs/svc_hubert_soft.py # 继续训练 -python train.py --config configs/svc_hubert_soft.py --resume [checkpoint] +python tools/diffusion/train.py --config configs/svc_hubert_soft.py --resume [checkpoint] # 微调预训练模型 # 注意: 你应该调整配置文件中的学习率调度器为 warmup_cosine_finetune -python train.py --config configs/svc_cn_hubert_soft_finetune.py --pretrained [checkpoint] +python tools/diffusion/train.py --config configs/svc_cn_hubert_soft_finetune.py --pretrained [checkpoint] ``` ## 推理 ```bash # 命令行推理, 你可以使用 --help 查看更多参数 -python inference.py --config [config] \ +python tools/diffusion/inference.py --config [config] \ --checkpoint [checkpoint] \ --input [input audio] \ --output [output audio] # Gradio Web 推理, 其他参数会被转为 Gradio 默认参数 -python inference.py --config [config] \ +python tools/diffusion/inference.py --config [config] \ --checkpoint [checkpoint] \ --gradio ``` ## 将 DiffSVC 模型转换为 Fish Diffusion 模型 ```bash -python tools/diff_svc_converter.py --config configs/svc_hubert_soft_diff_svc.py \ +python tools/diffusion/diff_svc_converter.py --config configs/svc_hubert_soft_diff_svc.py \ --input-path [DiffSVC ckpt] \ --output-path [Fish Diffusion ckpt] ``` diff --git a/fish_diffusion/archs/diffsinger.py b/fish_diffusion/archs/diffsinger.py deleted file mode 100644 index fb823329..00000000 --- a/fish_diffusion/archs/diffsinger.py +++ /dev/null @@ -1,109 +0,0 @@ -import torch -import torch.nn as nn - -from fish_diffusion.diffusions import DIFFUSIONS -from fish_diffusion.encoders import ENCODERS - - -class DiffSinger(nn.Module): - """DiffSinger""" - - def __init__(self, model_config): - super(DiffSinger, self).__init__() - - self.text_encoder = ENCODERS.build(model_config.text_encoder) - self.diffusion = DIFFUSIONS.build(model_config.diffusion) - self.speaker_encoder = ENCODERS.build(model_config.speaker_encoder) - self.pitch_encoder = ENCODERS.build(model_config.pitch_encoder) - - if "pitch_shift_encoder" in model_config: - self.pitch_shift_encoder = ENCODERS.build(model_config.pitch_shift_encoder) - - @staticmethod - def get_mask_from_lengths(lengths, max_len=None): - batch_size = lengths.shape[0] - if max_len is None: - max_len = torch.max(lengths).item() - - ids = ( - torch.arange(0, max_len) - .unsqueeze(0) - .expand(batch_size, -1) - .to(lengths.device) - ) - mask = ids >= lengths.unsqueeze(1).expand(-1, max_len) - - return mask - - def forward_features( - self, - speakers, - contents, - contents_lens, - contents_max_len, - mel_lens=None, - mel_max_len=None, - pitches=None, - pitch_shift=None, - ): - src_masks = ( - self.get_mask_from_lengths(contents_lens, contents_max_len) - if contents_lens is not None - else None - ) - - features = self.text_encoder(contents, src_masks) - - speaker_embed = ( - self.speaker_encoder(speakers).unsqueeze(1).expand(-1, contents_max_len, -1) - ) - - features += speaker_embed - features += self.pitch_encoder(pitches) - - if pitch_shift is not None and hasattr(self, "pitch_shift_encoder"): - features += self.pitch_shift_encoder(pitch_shift)[:, None] - - mel_masks = ( - self.get_mask_from_lengths(mel_lens, mel_max_len) - if mel_lens is not None - else None - ) - - return dict( - features=features, - src_masks=src_masks, - mel_masks=mel_masks, - ) - - def forward( - self, - speakers, - contents, - contents_lens, - contents_max_len, - mel=None, - mel_lens=None, - mel_max_len=None, - pitches=None, - pitch_shift=None, - ): - features = self.forward_features( - speakers=speakers, - contents=contents, - contents_lens=contents_lens, - contents_max_len=contents_max_len, - mel_lens=mel_lens, - mel_max_len=mel_max_len, - pitches=pitches, - pitch_shift=pitch_shift, - ) - - output_dict = self.diffusion.train_step( - features["features"], mel, features["mel_masks"] - ) - - # For validation - output_dict["features"] = features["features"] - - return output_dict diff --git a/train.py b/fish_diffusion/archs/diffsinger/diffsinger.py similarity index 57% rename from train.py rename to fish_diffusion/archs/diffsinger/diffsinger.py index a9d4c941..c0a7ec10 100644 --- a/train.py +++ b/fish_diffusion/archs/diffsinger/diffsinger.py @@ -1,24 +1,125 @@ -from argparse import ArgumentParser - import matplotlib.pyplot as plt import pytorch_lightning as pl import torch +import torch.nn as nn import wandb -from loguru import logger -from mmengine import Config from mmengine.optim import OPTIMIZERS from pytorch_lightning.loggers import TensorBoardLogger, WandbLogger -from fish_diffusion.archs.diffsinger import DiffSinger -from fish_diffusion.datasets.utils import build_loader_from_config -from fish_diffusion.utils.scheduler import LR_SCHEUDLERS +from fish_diffusion.modules.encoders import ENCODERS +from fish_diffusion.modules.vocoders import VOCODERS +from fish_diffusion.modules.vocoders.builder import VOCODERS +from fish_diffusion.schedulers import LR_SCHEUDLERS from fish_diffusion.utils.viz import viz_synth_sample -from fish_diffusion.vocoders import VOCODERS -torch.set_float32_matmul_precision("medium") +from .diffusions import DIFFUSIONS + + +class DiffSinger(nn.Module): + """DiffSinger""" + + def __init__(self, model_config): + super(DiffSinger, self).__init__() + + self.text_encoder = ENCODERS.build(model_config.text_encoder) + self.diffusion = DIFFUSIONS.build(model_config.diffusion) + self.speaker_encoder = ENCODERS.build(model_config.speaker_encoder) + self.pitch_encoder = ENCODERS.build(model_config.pitch_encoder) + + if "pitch_shift_encoder" in model_config: + self.pitch_shift_encoder = ENCODERS.build(model_config.pitch_shift_encoder) + + @staticmethod + def get_mask_from_lengths(lengths, max_len=None): + batch_size = lengths.shape[0] + if max_len is None: + max_len = torch.max(lengths).item() + + ids = ( + torch.arange(0, max_len) + .unsqueeze(0) + .expand(batch_size, -1) + .to(lengths.device) + ) + mask = ids >= lengths.unsqueeze(1).expand(-1, max_len) + + return mask + + def forward_features( + self, + speakers, + contents, + contents_lens, + contents_max_len, + mel_lens=None, + mel_max_len=None, + pitches=None, + pitch_shift=None, + ): + src_masks = ( + self.get_mask_from_lengths(contents_lens, contents_max_len) + if contents_lens is not None + else None + ) + + features = self.text_encoder(contents, src_masks) + + speaker_embed = ( + self.speaker_encoder(speakers).unsqueeze(1).expand(-1, contents_max_len, -1) + ) + + features += speaker_embed + features += self.pitch_encoder(pitches) + + if pitch_shift is not None and hasattr(self, "pitch_shift_encoder"): + features += self.pitch_shift_encoder(pitch_shift)[:, None] + + mel_masks = ( + self.get_mask_from_lengths(mel_lens, mel_max_len) + if mel_lens is not None + else None + ) + + return dict( + features=features, + src_masks=src_masks, + mel_masks=mel_masks, + ) + + def forward( + self, + speakers, + contents, + contents_lens, + contents_max_len, + mel=None, + mel_lens=None, + mel_max_len=None, + pitches=None, + pitch_shift=None, + ): + features = self.forward_features( + speakers=speakers, + contents=contents, + contents_lens=contents_lens, + contents_max_len=contents_max_len, + mel_lens=mel_lens, + mel_max_len=mel_max_len, + pitches=pitches, + pitch_shift=pitch_shift, + ) + + output_dict = self.diffusion.train_step( + features["features"], mel, features["mel_masks"] + ) + # For validation + output_dict["features"] = features["features"] -class FishDiffusion(pl.LightningModule): + return output_dict + + +class DiffSingerLightning(pl.LightningModule): def __init__(self, config): super().__init__() self.save_hyperparameters() @@ -129,84 +230,3 @@ def training_step(self, batch, batch_idx): def validation_step(self, batch, batch_idx): return self._step(batch, batch_idx, mode="valid") - - -if __name__ == "__main__": - pl.seed_everything(42, workers=True) - - parser = ArgumentParser() - parser.add_argument("--config", type=str, required=True) - parser.add_argument("--resume", type=str, default=None) - parser.add_argument( - "--tensorboard", - action="store_true", - default=False, - help="Use tensorboard logger, default is wandb.", - ) - parser.add_argument("--resume-id", type=str, default=None, help="Wandb run id.") - parser.add_argument("--entity", type=str, default=None, help="Wandb entity.") - parser.add_argument("--name", type=str, default=None, help="Wandb run name.") - parser.add_argument( - "--pretrained", type=str, default=None, help="Pretrained model." - ) - parser.add_argument( - "--only-train-speaker-embeddings", - action="store_true", - default=False, - help="Only train speaker embeddings.", - ) - - args = parser.parse_args() - - cfg = Config.fromfile(args.config) - - model = FishDiffusion(cfg) - - # We only load the state_dict of the model, not the optimizer. - if args.pretrained: - state_dict = torch.load(args.pretrained, map_location="cpu") - if "state_dict" in state_dict: - state_dict = state_dict["state_dict"] - - result = model.load_state_dict(state_dict, strict=False) - - missing_keys = set(result.missing_keys) - unexpected_keys = set(result.unexpected_keys) - - # Make sure incorrect keys are just noise predictor keys. - unexpected_keys = unexpected_keys - set( - i.replace(".naive_noise_predictor.", ".") for i in missing_keys - ) - - assert len(unexpected_keys) == 0 - - if args.only_train_speaker_embeddings: - for name, param in model.named_parameters(): - if "speaker_encoder" not in name: - param.requires_grad = False - - logger.info( - "Only train speaker embeddings, all other parameters are frozen." - ) - - logger = ( - TensorBoardLogger("logs", name=cfg.model.type) - if args.tensorboard - else WandbLogger( - project=cfg.model.type, - save_dir="logs", - log_model=True, - name=args.name, - entity=args.entity, - resume="must" if args.resume_id else False, - id=args.resume_id, - ) - ) - - trainer = pl.Trainer( - logger=logger, - **cfg.trainer, - ) - - train_loader, valid_loader = build_loader_from_config(cfg, trainer.num_devices) - trainer.fit(model, train_loader, valid_loader, ckpt_path=args.resume) diff --git a/fish_diffusion/diffusions/__init__.py b/fish_diffusion/archs/diffsinger/diffusions/__init__.py similarity index 100% rename from fish_diffusion/diffusions/__init__.py rename to fish_diffusion/archs/diffsinger/diffusions/__init__.py diff --git a/fish_diffusion/archs/diffsinger/diffusions/builder.py b/fish_diffusion/archs/diffsinger/diffusions/builder.py new file mode 100644 index 00000000..cd8a5dc1 --- /dev/null +++ b/fish_diffusion/archs/diffsinger/diffusions/builder.py @@ -0,0 +1,8 @@ +from mmengine import Registry + +from fish_diffusion.modules.wavenet import WaveNet + +DIFFUSIONS = Registry("diffusions") +DENOISERS = Registry("denoisers") + +DENOISERS.register_module(name="WaveNetDenoiser", module=WaveNet) diff --git a/fish_diffusion/diffusions/diffusion.py b/fish_diffusion/archs/diffsinger/diffusions/diffusion.py similarity index 95% rename from fish_diffusion/diffusions/diffusion.py rename to fish_diffusion/archs/diffsinger/diffusions/diffusion.py index 5e46ba5c..393428ad 100644 --- a/fish_diffusion/diffusions/diffusion.py +++ b/fish_diffusion/archs/diffsinger/diffusions/diffusion.py @@ -1,5 +1,4 @@ import json -from collections import deque from functools import partial import numpy as np @@ -8,14 +7,8 @@ from torch import nn from tqdm import tqdm -from fish_diffusion.denoisers import DENOISERS -from fish_diffusion.diffusions.noise_predictor import ( - NaiveNoisePredictor, - PLMSNoisePredictor, -) -from fish_diffusion.utils.ssim import ssim_loss - -from .builder import DIFFUSIONS +from .builder import DENOISERS, DIFFUSIONS +from .noise_predictor import NaiveNoisePredictor, PLMSNoisePredictor def get_noise_schedule_list(schedule_mode, timesteps, max_beta=0.01, s=0.008): @@ -157,9 +150,6 @@ def get_mel_loss(self, loss_fn, noise, epsilon): loss = F.smooth_l1_loss(noise, epsilon) elif loss_fn == "l2": loss = F.mse_loss(noise, epsilon) - elif loss_fn == "ssim": - # There is a bug we need to fix in the SSIM implementation - loss = ssim_loss(noise, epsilon) elif callable(loss_fn): loss = loss_fn(noise, epsilon) else: diff --git a/fish_diffusion/diffusions/noise_predictor.py b/fish_diffusion/archs/diffsinger/diffusions/noise_predictor.py similarity index 100% rename from fish_diffusion/diffusions/noise_predictor.py rename to fish_diffusion/archs/diffsinger/diffusions/noise_predictor.py diff --git a/fish_diffusion/denoisers/__init__.py b/fish_diffusion/denoisers/__init__.py deleted file mode 100644 index eba5d3ff..00000000 --- a/fish_diffusion/denoisers/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -from .attention import AttentionDenoiser -from .builder import DENOISERS -from .wavenet import WaveNetDenoiser - -__all__ = ["DENOISERS", "WaveNetDenoiser", "AttentionDenoiser"] diff --git a/fish_diffusion/denoisers/attention.py b/fish_diffusion/denoisers/attention.py deleted file mode 100644 index 7e9a85ff..00000000 --- a/fish_diffusion/denoisers/attention.py +++ /dev/null @@ -1,57 +0,0 @@ -from typing import Iterable - -from torch import nn -from torch.nn import functional as F -from whisper.model import LayerNorm, ResidualAttentionBlock, sinusoids - -from fish_diffusion.denoisers.wavenet import DiffusionEmbedding - -from .builder import DENOISERS - - -@DENOISERS.register_module() -class AttentionDenoiser(nn.Module): - def __init__( - self, - mel_channels: int = 128, - condition_channels: int = 128, - hidden_size: int = 256, - n_head: int = 4, - n_layer: int = 4, - max_len: int = 10000, - ): - super().__init__() - - self.mel_proj = nn.Linear(mel_channels, hidden_size) - self.diffusion_embedding = DiffusionEmbedding(hidden_size) - self.condition_proj = nn.Linear(condition_channels, hidden_size) - - self.register_buffer("positional_embedding", sinusoids(max_len, hidden_size)) - - self.blocks: Iterable[ResidualAttentionBlock] = nn.ModuleList( - [ResidualAttentionBlock(hidden_size, n_head) for _ in range(n_layer)] - ) - - self.ln_post = LayerNorm(hidden_size) - self.output_proj = nn.Linear(hidden_size, mel_channels) - - def forward(self, x, diffusion_step, conditioner): - """ - - :param x: [B, M, T] - :param diffusion_step: [B,] - :param conditioner: [B, M, T] - :return: - """ - - x = F.gelu(self.mel_proj(x.transpose(1, 2))) - step = self.diffusion_embedding(diffusion_step)[:, None] - positional_embedding = self.positional_embedding[None, : x.shape[1]] - condition = F.gelu(self.condition_proj(conditioner.transpose(1, 2))) - - for block in self.blocks: - x = block(x + step + condition + positional_embedding) - - x = self.ln_post(x) - - return self.output_proj(x).transpose(1, 2) diff --git a/fish_diffusion/denoisers/builder.py b/fish_diffusion/denoisers/builder.py deleted file mode 100644 index d7558a09..00000000 --- a/fish_diffusion/denoisers/builder.py +++ /dev/null @@ -1,3 +0,0 @@ -from mmengine import Registry - -DENOISERS = Registry("denoiser") diff --git a/fish_diffusion/diffusions/builder.py b/fish_diffusion/diffusions/builder.py deleted file mode 100644 index c8975d36..00000000 --- a/fish_diffusion/diffusions/builder.py +++ /dev/null @@ -1,3 +0,0 @@ -from mmengine import Registry - -DIFFUSIONS = Registry("diffusions") diff --git a/fish_diffusion/encoders/__init__.py b/fish_diffusion/modules/encoders/__init__.py similarity index 100% rename from fish_diffusion/encoders/__init__.py rename to fish_diffusion/modules/encoders/__init__.py diff --git a/fish_diffusion/encoders/attention.py b/fish_diffusion/modules/encoders/attention.py similarity index 100% rename from fish_diffusion/encoders/attention.py rename to fish_diffusion/modules/encoders/attention.py diff --git a/fish_diffusion/encoders/builder.py b/fish_diffusion/modules/encoders/builder.py similarity index 100% rename from fish_diffusion/encoders/builder.py rename to fish_diffusion/modules/encoders/builder.py diff --git a/fish_diffusion/encoders/fast_speech.py b/fish_diffusion/modules/encoders/fast_speech.py similarity index 76% rename from fish_diffusion/encoders/fast_speech.py rename to fish_diffusion/modules/encoders/fast_speech.py index 6ee20678..107ff767 100644 --- a/fish_diffusion/encoders/fast_speech.py +++ b/fish_diffusion/modules/encoders/fast_speech.py @@ -4,14 +4,210 @@ import torch.nn as nn import torch.nn.functional as F -from fish_diffusion.modules.positional_embedding import ( - RelPositionalEncoding, - SinusoidalPositionalEmbedding, -) - from .builder import ENCODERS +class PositionalEncoding(torch.nn.Module): + """Positional encoding. + Args: + d_model (int): Embedding dimension. + dropout_rate (float): Dropout rate. + max_len (int): Maximum input length. + reverse (bool): Whether to reverse the input position. + """ + + def __init__(self, d_model, dropout_rate, max_len=5000, reverse=False): + """Construct an PositionalEncoding object.""" + super(PositionalEncoding, self).__init__() + self.d_model = d_model + self.reverse = reverse + self.xscale = math.sqrt(self.d_model) + self.dropout = torch.nn.Dropout(p=dropout_rate) + self.pe = None + self.extend_pe(torch.tensor(0.0).expand(1, max_len)) + + def extend_pe(self, x): + """Reset the positional encodings.""" + if self.pe is not None: + if self.pe.size(1) >= x.size(1): + if self.pe.dtype != x.dtype or self.pe.device != x.device: + self.pe = self.pe.to(dtype=x.dtype, device=x.device) + return + pe = torch.zeros(x.size(1), self.d_model) + if self.reverse: + position = torch.arange( + x.size(1) - 1, -1, -1.0, dtype=torch.float32 + ).unsqueeze(1) + else: + position = torch.arange(0, x.size(1), dtype=torch.float32).unsqueeze(1) + div_term = torch.exp( + torch.arange(0, self.d_model, 2, dtype=torch.float32) + * -(math.log(10000.0) / self.d_model) + ) + pe[:, 0::2] = torch.sin(position * div_term) + pe[:, 1::2] = torch.cos(position * div_term) + pe = pe.unsqueeze(0) + self.pe = pe.to(device=x.device, dtype=x.dtype) + + def forward(self, x: torch.Tensor): + """Add positional encoding. + Args: + x (torch.Tensor): Input tensor (batch, time, `*`). + Returns: + torch.Tensor: Encoded tensor (batch, time, `*`). + """ + self.extend_pe(x) + x = x * self.xscale + self.pe[:, : x.size(1)] + return self.dropout(x) + + +class ScaledPositionalEncoding(PositionalEncoding): + """Scaled positional encoding module. + See Sec. 3.2 https://arxiv.org/abs/1809.08895 + Args: + d_model (int): Embedding dimension. + dropout_rate (float): Dropout rate. + max_len (int): Maximum input length. + """ + + def __init__(self, d_model, dropout_rate, max_len=5000): + """Initialize class.""" + super().__init__(d_model=d_model, dropout_rate=dropout_rate, max_len=max_len) + self.alpha = torch.nn.Parameter(torch.tensor(1.0)) + + def reset_parameters(self): + """Reset parameters.""" + self.alpha.data = torch.tensor(1.0) + + def forward(self, x): + """Add positional encoding. + Args: + x (torch.Tensor): Input tensor (batch, time, `*`). + Returns: + torch.Tensor: Encoded tensor (batch, time, `*`). + """ + self.extend_pe(x) + x = x + self.alpha * self.pe[:, : x.size(1)] + return self.dropout(x) + + +class RelPositionalEncoding(PositionalEncoding): + """Relative positional encoding module. + See : Appendix B in https://arxiv.org/abs/1901.02860 + Args: + d_model (int): Embedding dimension. + dropout_rate (float): Dropout rate. + max_len (int): Maximum input length. + """ + + def __init__(self, d_model, dropout_rate, max_len=5000): + """Initialize class.""" + super().__init__(d_model, dropout_rate, max_len, reverse=True) + + def forward(self, x): + """Compute positional encoding. + Args: + x (torch.Tensor): Input tensor (batch, time, `*`). + Returns: + torch.Tensor: Encoded tensor (batch, time, `*`). + torch.Tensor: Positional embedding tensor (1, time, `*`). + """ + self.extend_pe(x) + x = x * self.xscale + pos_emb = self.pe[:, : x.size(1)] + + return self.dropout(x) + self.dropout(pos_emb) + + +class SinusoidalPositionalEmbedding(nn.Module): + """This module produces sinusoidal positional embeddings of any length. + + Padding symbols are ignored. + """ + + def __init__(self, embedding_dim, padding_idx, init_size=1024): + super().__init__() + self.embedding_dim = embedding_dim + self.padding_idx = padding_idx + self.weights = SinusoidalPositionalEmbedding.get_embedding( + init_size, + embedding_dim, + padding_idx, + ) + self.register_buffer("_float_tensor", torch.FloatTensor(1)) + + @staticmethod + def get_embedding(num_embeddings, embedding_dim, padding_idx=None): + """Build sinusoidal embeddings. + + This matches the implementation in tensor2tensor, but differs slightly + from the description in Section 3.5 of "Attention Is All You Need". + """ + half_dim = embedding_dim // 2 + emb = math.log(10000) / (half_dim - 1) + emb = torch.exp(torch.arange(half_dim, dtype=torch.float) * -emb) + emb = torch.arange(num_embeddings, dtype=torch.float).unsqueeze( + 1 + ) * emb.unsqueeze(0) + emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view( + num_embeddings, -1 + ) + if embedding_dim % 2 == 1: + # zero pad + emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1) + if padding_idx is not None: + emb[padding_idx, :] = 0 + return emb + + @staticmethod + def make_positions(tensor, padding_idx): + """Replace non-padding symbols with their position numbers. + + Position numbers begin at padding_idx+1. Padding symbols are ignored. + """ + # The series of casts and type-conversions here are carefully + # balanced to both work with ONNX export and XLA. In particular XLA + # prefers ints, cumsum defaults to output longs, and ONNX doesn"t know + # how to handle the dtype kwarg in cumsum. + mask = tensor.ne(padding_idx).int() + return (torch.cumsum(mask, dim=1).type_as(mask) * mask).long() + padding_idx + + def forward( + self, input, incremental_state=None, timestep=None, positions=None, **kwargs + ): + """Input is expected to be of size [bsz x seqlen].""" + bsz, seq_len = input.shape[:2] + max_pos = self.padding_idx + 1 + seq_len + if self.weights is None or max_pos > self.weights.size(0): + # recompute/expand embeddings if needed + self.weights = SinusoidalPositionalEmbedding.get_embedding( + max_pos, + self.embedding_dim, + self.padding_idx, + ) + self.weights = self.weights.to(self._float_tensor) + + if incremental_state is not None: + # positions is the same for every token when decoding a single step + pos = timestep.view(-1)[0] + 1 if timestep is not None else seq_len + return self.weights[self.padding_idx + pos, :].expand(bsz, 1, -1) + + positions = ( + self.make_positions(input, self.padding_idx) + if positions is None + else positions + ) + return ( + self.weights.index_select(0, positions.view(-1)) + .view(bsz, seq_len, -1) + .detach() + ) + + def max_positions(self): + """Maximum number of supported positions.""" + return int(1e5) # an arbitrary large number + + class Swish(torch.autograd.Function): @staticmethod def forward(ctx, i): diff --git a/fish_diffusion/encoders/identity.py b/fish_diffusion/modules/encoders/identity.py similarity index 100% rename from fish_diffusion/encoders/identity.py rename to fish_diffusion/modules/encoders/identity.py diff --git a/fish_diffusion/encoders/naive_projection.py b/fish_diffusion/modules/encoders/naive_projection.py similarity index 100% rename from fish_diffusion/encoders/naive_projection.py rename to fish_diffusion/modules/encoders/naive_projection.py diff --git a/fish_diffusion/feature_extractors/__init__.py b/fish_diffusion/modules/feature_extractors/__init__.py similarity index 88% rename from fish_diffusion/feature_extractors/__init__.py rename to fish_diffusion/modules/feature_extractors/__init__.py index bcfb16f6..536d7762 100644 --- a/fish_diffusion/feature_extractors/__init__.py +++ b/fish_diffusion/modules/feature_extractors/__init__.py @@ -3,12 +3,10 @@ from .content_vec import ContentVec from .hubert_soft import HubertSoft from .opencpop_transcription import OpenCpopTranscriptionToPhonemesDuration -from .pitch import PITCH_EXTRACTORS from .whisper import AlignedWhisper __all__ = [ "FEATURE_EXTRACTORS", - "PITCH_EXTRACTORS", "ChineseHubertSoft", "HubertSoft", "OpenCpopTranscriptionToPhonemesDuration", diff --git a/fish_diffusion/feature_extractors/base.py b/fish_diffusion/modules/feature_extractors/base.py similarity index 97% rename from fish_diffusion/feature_extractors/base.py rename to fish_diffusion/modules/feature_extractors/base.py index bc1bb50c..21811279 100644 --- a/fish_diffusion/feature_extractors/base.py +++ b/fish_diffusion/modules/feature_extractors/base.py @@ -1,4 +1,3 @@ -import librosa import torchaudio from torch import nn diff --git a/fish_diffusion/feature_extractors/builder.py b/fish_diffusion/modules/feature_extractors/builder.py similarity index 100% rename from fish_diffusion/feature_extractors/builder.py rename to fish_diffusion/modules/feature_extractors/builder.py diff --git a/fish_diffusion/feature_extractors/chinese_hubert.py b/fish_diffusion/modules/feature_extractors/chinese_hubert.py similarity index 100% rename from fish_diffusion/feature_extractors/chinese_hubert.py rename to fish_diffusion/modules/feature_extractors/chinese_hubert.py diff --git a/fish_diffusion/feature_extractors/content_vec.py b/fish_diffusion/modules/feature_extractors/content_vec.py similarity index 100% rename from fish_diffusion/feature_extractors/content_vec.py rename to fish_diffusion/modules/feature_extractors/content_vec.py diff --git a/fish_diffusion/feature_extractors/hubert_soft.py b/fish_diffusion/modules/feature_extractors/hubert_soft.py similarity index 100% rename from fish_diffusion/feature_extractors/hubert_soft.py rename to fish_diffusion/modules/feature_extractors/hubert_soft.py diff --git a/fish_diffusion/feature_extractors/opencpop_transcription.py b/fish_diffusion/modules/feature_extractors/opencpop_transcription.py similarity index 100% rename from fish_diffusion/feature_extractors/opencpop_transcription.py rename to fish_diffusion/modules/feature_extractors/opencpop_transcription.py diff --git a/fish_diffusion/feature_extractors/whisper.py b/fish_diffusion/modules/feature_extractors/whisper.py similarity index 100% rename from fish_diffusion/feature_extractors/whisper.py rename to fish_diffusion/modules/feature_extractors/whisper.py diff --git a/fish_diffusion/feature_extractors/pitch/__init__.py b/fish_diffusion/modules/pitch_extractors/__init__.py similarity index 100% rename from fish_diffusion/feature_extractors/pitch/__init__.py rename to fish_diffusion/modules/pitch_extractors/__init__.py diff --git a/fish_diffusion/feature_extractors/pitch/builder.py b/fish_diffusion/modules/pitch_extractors/builder.py similarity index 100% rename from fish_diffusion/feature_extractors/pitch/builder.py rename to fish_diffusion/modules/pitch_extractors/builder.py diff --git a/fish_diffusion/feature_extractors/pitch/crepe.py b/fish_diffusion/modules/pitch_extractors/crepe.py similarity index 100% rename from fish_diffusion/feature_extractors/pitch/crepe.py rename to fish_diffusion/modules/pitch_extractors/crepe.py diff --git a/fish_diffusion/feature_extractors/pitch/parsel_mouth.py b/fish_diffusion/modules/pitch_extractors/parsel_mouth.py similarity index 100% rename from fish_diffusion/feature_extractors/pitch/parsel_mouth.py rename to fish_diffusion/modules/pitch_extractors/parsel_mouth.py diff --git a/fish_diffusion/feature_extractors/pitch/world.py b/fish_diffusion/modules/pitch_extractors/world.py similarity index 100% rename from fish_diffusion/feature_extractors/pitch/world.py rename to fish_diffusion/modules/pitch_extractors/world.py diff --git a/fish_diffusion/modules/positional_embedding.py b/fish_diffusion/modules/positional_embedding.py deleted file mode 100644 index 7deb8272..00000000 --- a/fish_diffusion/modules/positional_embedding.py +++ /dev/null @@ -1,205 +0,0 @@ -import math - -import torch -from torch import nn - - -class PositionalEncoding(torch.nn.Module): - """Positional encoding. - Args: - d_model (int): Embedding dimension. - dropout_rate (float): Dropout rate. - max_len (int): Maximum input length. - reverse (bool): Whether to reverse the input position. - """ - - def __init__(self, d_model, dropout_rate, max_len=5000, reverse=False): - """Construct an PositionalEncoding object.""" - super(PositionalEncoding, self).__init__() - self.d_model = d_model - self.reverse = reverse - self.xscale = math.sqrt(self.d_model) - self.dropout = torch.nn.Dropout(p=dropout_rate) - self.pe = None - self.extend_pe(torch.tensor(0.0).expand(1, max_len)) - - def extend_pe(self, x): - """Reset the positional encodings.""" - if self.pe is not None: - if self.pe.size(1) >= x.size(1): - if self.pe.dtype != x.dtype or self.pe.device != x.device: - self.pe = self.pe.to(dtype=x.dtype, device=x.device) - return - pe = torch.zeros(x.size(1), self.d_model) - if self.reverse: - position = torch.arange( - x.size(1) - 1, -1, -1.0, dtype=torch.float32 - ).unsqueeze(1) - else: - position = torch.arange(0, x.size(1), dtype=torch.float32).unsqueeze(1) - div_term = torch.exp( - torch.arange(0, self.d_model, 2, dtype=torch.float32) - * -(math.log(10000.0) / self.d_model) - ) - pe[:, 0::2] = torch.sin(position * div_term) - pe[:, 1::2] = torch.cos(position * div_term) - pe = pe.unsqueeze(0) - self.pe = pe.to(device=x.device, dtype=x.dtype) - - def forward(self, x: torch.Tensor): - """Add positional encoding. - Args: - x (torch.Tensor): Input tensor (batch, time, `*`). - Returns: - torch.Tensor: Encoded tensor (batch, time, `*`). - """ - self.extend_pe(x) - x = x * self.xscale + self.pe[:, : x.size(1)] - return self.dropout(x) - - -class ScaledPositionalEncoding(PositionalEncoding): - """Scaled positional encoding module. - See Sec. 3.2 https://arxiv.org/abs/1809.08895 - Args: - d_model (int): Embedding dimension. - dropout_rate (float): Dropout rate. - max_len (int): Maximum input length. - """ - - def __init__(self, d_model, dropout_rate, max_len=5000): - """Initialize class.""" - super().__init__(d_model=d_model, dropout_rate=dropout_rate, max_len=max_len) - self.alpha = torch.nn.Parameter(torch.tensor(1.0)) - - def reset_parameters(self): - """Reset parameters.""" - self.alpha.data = torch.tensor(1.0) - - def forward(self, x): - """Add positional encoding. - Args: - x (torch.Tensor): Input tensor (batch, time, `*`). - Returns: - torch.Tensor: Encoded tensor (batch, time, `*`). - """ - self.extend_pe(x) - x = x + self.alpha * self.pe[:, : x.size(1)] - return self.dropout(x) - - -class RelPositionalEncoding(PositionalEncoding): - """Relative positional encoding module. - See : Appendix B in https://arxiv.org/abs/1901.02860 - Args: - d_model (int): Embedding dimension. - dropout_rate (float): Dropout rate. - max_len (int): Maximum input length. - """ - - def __init__(self, d_model, dropout_rate, max_len=5000): - """Initialize class.""" - super().__init__(d_model, dropout_rate, max_len, reverse=True) - - def forward(self, x): - """Compute positional encoding. - Args: - x (torch.Tensor): Input tensor (batch, time, `*`). - Returns: - torch.Tensor: Encoded tensor (batch, time, `*`). - torch.Tensor: Positional embedding tensor (1, time, `*`). - """ - self.extend_pe(x) - x = x * self.xscale - pos_emb = self.pe[:, : x.size(1)] - - return self.dropout(x) + self.dropout(pos_emb) - - -class SinusoidalPositionalEmbedding(nn.Module): - """This module produces sinusoidal positional embeddings of any length. - - Padding symbols are ignored. - """ - - def __init__(self, embedding_dim, padding_idx, init_size=1024): - super().__init__() - self.embedding_dim = embedding_dim - self.padding_idx = padding_idx - self.weights = SinusoidalPositionalEmbedding.get_embedding( - init_size, - embedding_dim, - padding_idx, - ) - self.register_buffer("_float_tensor", torch.FloatTensor(1)) - - @staticmethod - def get_embedding(num_embeddings, embedding_dim, padding_idx=None): - """Build sinusoidal embeddings. - - This matches the implementation in tensor2tensor, but differs slightly - from the description in Section 3.5 of "Attention Is All You Need". - """ - half_dim = embedding_dim // 2 - emb = math.log(10000) / (half_dim - 1) - emb = torch.exp(torch.arange(half_dim, dtype=torch.float) * -emb) - emb = torch.arange(num_embeddings, dtype=torch.float).unsqueeze( - 1 - ) * emb.unsqueeze(0) - emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view( - num_embeddings, -1 - ) - if embedding_dim % 2 == 1: - # zero pad - emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1) - if padding_idx is not None: - emb[padding_idx, :] = 0 - return emb - - @staticmethod - def make_positions(tensor, padding_idx): - """Replace non-padding symbols with their position numbers. - - Position numbers begin at padding_idx+1. Padding symbols are ignored. - """ - # The series of casts and type-conversions here are carefully - # balanced to both work with ONNX export and XLA. In particular XLA - # prefers ints, cumsum defaults to output longs, and ONNX doesn"t know - # how to handle the dtype kwarg in cumsum. - mask = tensor.ne(padding_idx).int() - return (torch.cumsum(mask, dim=1).type_as(mask) * mask).long() + padding_idx - - def forward( - self, input, incremental_state=None, timestep=None, positions=None, **kwargs - ): - """Input is expected to be of size [bsz x seqlen].""" - bsz, seq_len = input.shape[:2] - max_pos = self.padding_idx + 1 + seq_len - if self.weights is None or max_pos > self.weights.size(0): - # recompute/expand embeddings if needed - self.weights = SinusoidalPositionalEmbedding.get_embedding( - max_pos, - self.embedding_dim, - self.padding_idx, - ) - self.weights = self.weights.to(self._float_tensor) - - if incremental_state is not None: - # positions is the same for every token when decoding a single step - pos = timestep.view(-1)[0] + 1 if timestep is not None else seq_len - return self.weights[self.padding_idx + pos, :].expand(bsz, 1, -1) - - positions = ( - self.make_positions(input, self.padding_idx) - if positions is None - else positions - ) - return ( - self.weights.index_select(0, positions.view(-1)) - .view(bsz, seq_len, -1) - .detach() - ) - - def max_positions(self): - """Maximum number of supported positions.""" - return int(1e5) # an arbitrary large number diff --git a/fish_diffusion/vocoders/__init__.py b/fish_diffusion/modules/vocoders/__init__.py similarity index 100% rename from fish_diffusion/vocoders/__init__.py rename to fish_diffusion/modules/vocoders/__init__.py diff --git a/fish_diffusion/vocoders/builder.py b/fish_diffusion/modules/vocoders/builder.py similarity index 100% rename from fish_diffusion/vocoders/builder.py rename to fish_diffusion/modules/vocoders/builder.py diff --git a/fish_diffusion/vocoders/istft_net/istft_net.py b/fish_diffusion/modules/vocoders/istft_net/istft_net.py similarity index 100% rename from fish_diffusion/vocoders/istft_net/istft_net.py rename to fish_diffusion/modules/vocoders/istft_net/istft_net.py diff --git a/fish_diffusion/vocoders/istft_net/mel.py b/fish_diffusion/modules/vocoders/istft_net/mel.py similarity index 100% rename from fish_diffusion/vocoders/istft_net/mel.py rename to fish_diffusion/modules/vocoders/istft_net/mel.py diff --git a/fish_diffusion/vocoders/istft_net/models.py b/fish_diffusion/modules/vocoders/istft_net/models.py similarity index 100% rename from fish_diffusion/vocoders/istft_net/models.py rename to fish_diffusion/modules/vocoders/istft_net/models.py diff --git a/fish_diffusion/vocoders/nsf_hifigan/__init__.py b/fish_diffusion/modules/vocoders/nsf_hifigan/__init__.py similarity index 100% rename from fish_diffusion/vocoders/nsf_hifigan/__init__.py rename to fish_diffusion/modules/vocoders/nsf_hifigan/__init__.py diff --git a/fish_diffusion/vocoders/nsf_hifigan/models.py b/fish_diffusion/modules/vocoders/nsf_hifigan/models.py similarity index 100% rename from fish_diffusion/vocoders/nsf_hifigan/models.py rename to fish_diffusion/modules/vocoders/nsf_hifigan/models.py diff --git a/fish_diffusion/vocoders/nsf_hifigan/nsf_hifigan.py b/fish_diffusion/modules/vocoders/nsf_hifigan/nsf_hifigan.py similarity index 100% rename from fish_diffusion/vocoders/nsf_hifigan/nsf_hifigan.py rename to fish_diffusion/modules/vocoders/nsf_hifigan/nsf_hifigan.py diff --git a/fish_diffusion/denoisers/wavenet.py b/fish_diffusion/modules/wavenet.py similarity index 97% rename from fish_diffusion/denoisers/wavenet.py rename to fish_diffusion/modules/wavenet.py index dd5e66ad..2028d2cb 100644 --- a/fish_diffusion/denoisers/wavenet.py +++ b/fish_diffusion/modules/wavenet.py @@ -4,8 +4,6 @@ import torch.nn.functional as F from torch import nn -from .builder import DENOISERS - class Mish(nn.Module): def forward(self, x): @@ -151,9 +149,11 @@ def forward(self, x): return x -@DENOISERS.register_module() -class WaveNetDenoiser(nn.Module): - """Conditional Diffusion Denoiser""" +class WaveNet(nn.Module): + """ + WaveNet + https://www.deepmind.com/blog/wavenet-a-generative-model-for-raw-audio + """ def __init__( self, @@ -164,7 +164,7 @@ def __init__( use_linear_bias=False, dilation_cycle=None, ): - super(WaveNetDenoiser, self).__init__() + super(WaveNet, self).__init__() self.input_projection = ConvNorm(mel_channels, residual_channels, kernel_size=1) self.diffusion_embedding = DiffusionEmbedding(residual_channels) diff --git a/fish_diffusion/schedulers/__init__.py b/fish_diffusion/schedulers/__init__.py new file mode 100644 index 00000000..13444963 --- /dev/null +++ b/fish_diffusion/schedulers/__init__.py @@ -0,0 +1,3 @@ +from .builder import LR_SCHEUDLERS + +__all__ = ["LR_SCHEUDLERS"] diff --git a/fish_diffusion/utils/scheduler.py b/fish_diffusion/schedulers/builder.py similarity index 100% rename from fish_diffusion/utils/scheduler.py rename to fish_diffusion/schedulers/builder.py diff --git a/fish_diffusion/utils/audio.py b/fish_diffusion/utils/audio.py index b583a049..79c32eb8 100644 --- a/fish_diffusion/utils/audio.py +++ b/fish_diffusion/utils/audio.py @@ -1,9 +1,10 @@ import math -from typing import Iterable +from typing import Iterable, Union import librosa import numpy as np import torch +from fish_audio_preprocess.utils import loudness_norm, separate_audio from torchaudio.transforms import MelSpectrogram @@ -116,3 +117,31 @@ def slice_audio( for i in range(start, end, chunk_size): yield i, i + chunk_size + + +def separate_vocals( + audio: np.ndarray, sr: int, device: Union[str, torch.device] = "cpu" +): + model = separate_audio.init_model("htdemucs", device=device) + audio = librosa.resample(audio, orig_sr=sr, target_sr=model.samplerate)[None] + + # To two channels + audio = np.concatenate([audio, audio], axis=0) + audio = torch.from_numpy(audio).to(device) + tracks = separate_audio.separate_audio( + model, audio, shifts=1, num_workers=0, progress=True + ) + audio = separate_audio.merge_tracks(tracks, filter=["vocals"]).cpu().numpy() + non_vocals = ( + separate_audio.merge_tracks(tracks, filter=["drums", "bass", "other"]) + .cpu() + .numpy() + ) + + vocals = librosa.resample(audio[0], orig_sr=model.samplerate, target_sr=sr) + non_vocals = librosa.resample(non_vocals[0], orig_sr=model.samplerate, target_sr=sr) + + # Normalize loudness + non_vocals = loudness_norm.loudness_norm(non_vocals, sr) + + return vocals, non_vocals diff --git a/fish_diffusion/utils/inference.py b/fish_diffusion/utils/inference.py index bb40f4fc..e0869cd4 100644 --- a/fish_diffusion/utils/inference.py +++ b/fish_diffusion/utils/inference.py @@ -1,9 +1,9 @@ import torch -from train import FishDiffusion +from fish_diffusion.archs.diffsinger.diffsinger import DiffSingerLightning -def load_checkpoint(config, checkpoint, device="cuda") -> FishDiffusion: +def load_checkpoint(config, checkpoint, device="cuda", model_cls=DiffSingerLightning): """Load checkpoint from path Args: @@ -12,10 +12,10 @@ def load_checkpoint(config, checkpoint, device="cuda") -> FishDiffusion: device: device Returns: - FishDiffusion: model + model """ - model = FishDiffusion(config) + model = model_cls(config) state_dict = torch.load(checkpoint, map_location="cpu") if "state_dict" in state_dict: # Checkpoint is saved by pl diff --git a/fish_diffusion/utils/ssim.py b/fish_diffusion/utils/ssim.py deleted file mode 100644 index 6a192abe..00000000 --- a/fish_diffusion/utils/ssim.py +++ /dev/null @@ -1,114 +0,0 @@ -""" -Adapted from https://github.com/Po-Hsun-Su/pytorch-ssim -""" - -from math import exp - -import numpy as np -import torch -import torch.nn.functional as F -from torch.autograd import Variable - - -def gaussian(window_size, sigma): - gauss = torch.Tensor( - [ - exp(-((x - window_size // 2) ** 2) / float(2 * sigma**2)) - for x in range(window_size) - ] - ) - return gauss / gauss.sum() - - -def create_window(window_size, channel): - _1D_window = gaussian(window_size, 1.5).unsqueeze(1) - _2D_window = _1D_window.mm(_1D_window.t()).float().unsqueeze(0).unsqueeze(0) - window = Variable( - _2D_window.expand(channel, 1, window_size, window_size).contiguous() - ) - return window - - -def _ssim(img1, img2, window, window_size, channel, size_average=True): - mu1 = F.conv2d(img1, window, padding=window_size // 2, groups=channel) - mu2 = F.conv2d(img2, window, padding=window_size // 2, groups=channel) - - mu1_sq = mu1.pow(2) - mu2_sq = mu2.pow(2) - mu1_mu2 = mu1 * mu2 - - sigma1_sq = ( - F.conv2d(img1 * img1, window, padding=window_size // 2, groups=channel) - mu1_sq - ) - sigma2_sq = ( - F.conv2d(img2 * img2, window, padding=window_size // 2, groups=channel) - mu2_sq - ) - sigma12 = ( - F.conv2d(img1 * img2, window, padding=window_size // 2, groups=channel) - - mu1_mu2 - ) - - C1 = 0.01**2 - C2 = 0.03**2 - - ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ( - (mu1_sq + mu2_sq + C1) * (sigma1_sq + sigma2_sq + C2) - ) - - if size_average: - return ssim_map.mean() - else: - return ssim_map.mean(1) - - -class SSIM(torch.nn.Module): - def __init__(self, window_size=11, size_average=True): - super(SSIM, self).__init__() - self.window_size = window_size - self.size_average = size_average - self.channel = 1 - self.window = create_window(window_size, self.channel) - - def forward(self, img1, img2): - (_, channel, _, _) = img1.size() - - if channel == self.channel and self.window.data.type() == img1.data.type(): - window = self.window - else: - window = create_window(self.window_size, channel) - - if img1.is_cuda: - window = window.cuda(img1.get_device()) - window = window.type_as(img1) - - self.window = window - self.channel = channel - - return _ssim(img1, img2, window, self.window_size, channel, self.size_average) - - -window = None - - -def ssim(img1, img2, window_size=11, size_average=True): - (_, channel, _, _) = img1.size() - global window - if window is None: - window = create_window(window_size, channel) - if img1.is_cuda: - window = window.cuda(img1.get_device()) - window = window.type_as(img1) - return _ssim(img1, img2, window, window_size, channel, size_average) - - -def ssim_loss(decoder_output, target, bias=6.0): - # decoder_output : B x T x n_mel - # target : B x T x n_mel - assert decoder_output.shape == target.shape - - decoder_output = decoder_output[:, None] + bias - target = target[:, None] + bias - ssim_loss = 1 - ssim(decoder_output, target, size_average=False) - ssim_loss = ssim_loss.mean() - - return ssim_loss diff --git a/inference.py b/inference.py deleted file mode 100644 index eddc256e..00000000 --- a/inference.py +++ /dev/null @@ -1,431 +0,0 @@ -import argparse -import json -import os -from functools import partial -from typing import Union - -import gradio as gr -import librosa -import numpy as np -import soundfile as sf -import torch -from fish_audio_preprocess.utils import loudness_norm, separate_audio -from loguru import logger -from mmengine import Config - -from fish_diffusion.feature_extractors import FEATURE_EXTRACTORS, PITCH_EXTRACTORS -from fish_diffusion.utils.audio import get_mel_from_audio, slice_audio -from fish_diffusion.utils.inference import load_checkpoint -from fish_diffusion.utils.tensor import repeat_expand - - -@torch.no_grad() -def inference( - config, - checkpoint, - input_path, - output_path, - speaker_id=0, - pitch_adjust=0, - silence_threshold=60, - max_slice_duration=30.0, - extract_vocals=True, - merge_non_vocals=True, - vocals_loudness_gain=0.0, - sampler_interval=None, - sampler_progress=False, - device="cuda", - gradio_progress=None, -): - """Inference - - Args: - config: config - checkpoint: checkpoint path - input_path: input path - output_path: output path - speaker_id: speaker id - pitch_adjust: pitch adjust - silence_threshold: silence threshold of librosa.effects.split - max_slice_duration: maximum duration of each slice - extract_vocals: extract vocals - merge_non_vocals: merge non-vocals, only works when extract_vocals is True - vocals_loudness_gain: loudness gain of vocals (dB) - sampler_interval: sampler interval, lower value means higher quality - sampler_progress: show sampler progress - device: device - gradio_progress: gradio progress callback - """ - - if sampler_interval is not None: - config.model.diffusion.sampler_interval = sampler_interval - - if os.path.isdir(checkpoint): - # Find the latest checkpoint - checkpoints = sorted(os.listdir(checkpoint)) - logger.info(f"Found {len(checkpoints)} checkpoints, using {checkpoints[-1]}") - checkpoint = os.path.join(checkpoint, checkpoints[-1]) - - audio, sr = librosa.load(input_path, sr=config.sampling_rate, mono=True) - - # Extract vocals - - if extract_vocals: - logger.info("Extracting vocals...") - - if gradio_progress is not None: - gradio_progress(0, "Extracting vocals...") - - model = separate_audio.init_model("htdemucs", device=device) - audio = librosa.resample(audio, orig_sr=sr, target_sr=model.samplerate)[None] - - # To two channels - audio = np.concatenate([audio, audio], axis=0) - audio = torch.from_numpy(audio).to(device) - tracks = separate_audio.separate_audio( - model, audio, shifts=1, num_workers=0, progress=True - ) - audio = separate_audio.merge_tracks(tracks, filter=["vocals"]).cpu().numpy() - non_vocals = ( - separate_audio.merge_tracks(tracks, filter=["drums", "bass", "other"]) - .cpu() - .numpy() - ) - - audio = librosa.resample(audio[0], orig_sr=model.samplerate, target_sr=sr) - non_vocals = librosa.resample( - non_vocals[0], orig_sr=model.samplerate, target_sr=sr - ) - - # Normalize loudness - non_vocals = loudness_norm.loudness_norm(non_vocals, sr) - - # Normalize loudness - audio = loudness_norm.loudness_norm(audio, sr) - - # Slice into segments - segments = list( - slice_audio( - audio, sr, max_duration=max_slice_duration, top_db=silence_threshold - ) - ) - logger.info(f"Sliced into {len(segments)} segments") - - # Load models - text_features_extractor = FEATURE_EXTRACTORS.build( - config.preprocessing.text_features_extractor - ).to(device) - text_features_extractor.eval() - - model = load_checkpoint(config, checkpoint, device=device) - - pitch_extractor = PITCH_EXTRACTORS.build(config.preprocessing.pitch_extractor) - assert pitch_extractor is not None, "Pitch extractor not found" - - generated_audio = np.zeros_like(audio) - audio_torch = torch.from_numpy(audio).to(device)[None] - - for idx, (start, end) in enumerate(segments): - if gradio_progress is not None: - gradio_progress(idx / len(segments), "Generating audio...") - - segment = audio_torch[:, start:end] - logger.info( - f"Processing segment {idx + 1}/{len(segments)}, duration: {segment.shape[-1] / sr:.2f}s" - ) - - # Extract mel - mel = get_mel_from_audio(segment, sr) - - # Extract pitch (f0) - pitch = pitch_extractor(segment, sr, pad_to=mel.shape[-1]).float() - pitch *= 2 ** (pitch_adjust / 12) - - # Extract text features - text_features = text_features_extractor(segment, sr)[0] - text_features = repeat_expand(text_features, mel.shape[-1]).T - - # Pitch shift should always be 0 for inference to avoid distortion - pitch_shift = None - if config.model.get("pitch_shift_encoder"): - pitch_shift = torch.zeros((1, 1), device=device) - - # Predict - contents_lens = torch.tensor([mel.shape[-1]]).to(device) - - features = model.model.forward_features( - speakers=torch.tensor([speaker_id]).long().to(device), - contents=text_features[None].to(device), - contents_lens=contents_lens, - contents_max_len=max(contents_lens), - mel_lens=contents_lens, - mel_max_len=max(contents_lens), - pitches=pitch[None].to(device), - pitch_shift=pitch_shift, - ) - - result = model.model.diffusion(features["features"], progress=sampler_progress) - wav = model.vocoder.spec2wav(result[0].T, f0=pitch).cpu().numpy() - max_wav_len = generated_audio.shape[-1] - start - generated_audio[start : start + wav.shape[-1]] = wav[:max_wav_len] - - # Loudness normalization - generated_audio = loudness_norm.loudness_norm(generated_audio, sr) - - # Loudness gain - loudness_float = 10 ** (vocals_loudness_gain / 20) - generated_audio = generated_audio * loudness_float - - # Merge non-vocals - if extract_vocals and merge_non_vocals: - generated_audio = (generated_audio + non_vocals) / 2 - - logger.info("Done") - - if output_path is not None: - sf.write(output_path, generated_audio, sr) - - return generated_audio, sr - - -def parse_args(): - parser = argparse.ArgumentParser() - - parser.add_argument( - "--config", - type=str, - required=True, - help="Path to the config file", - ) - - parser.add_argument( - "--checkpoint", - type=str, - required=True, - help="Path to the checkpoint file", - ) - - parser.add_argument( - "--gradio", - action="store_true", - help="Run in gradio mode", - ) - - parser.add_argument( - "--gradio_share", - action="store_true", - help="Share gradio app", - ) - - parser.add_argument( - "--input", - type=str, - required=False, - help="Path to the input audio file", - ) - - parser.add_argument( - "--output", - type=str, - required=False, - help="Path to the output audio file", - ) - - parser.add_argument( - "--speaker_id", - type=int, - default=0, - help="Speaker id", - ) - - parser.add_argument( - "--speaker_mapping", - type=str, - default=None, - help="Speaker mapping file (gradio mode only)", - ) - - parser.add_argument( - "--pitch_adjust", - type=int, - default=0, - help="Pitch adjustment in semitones", - ) - - parser.add_argument( - "--extract_vocals", - action="store_true", - help="Extract vocals", - ) - - parser.add_argument( - "--merge_non_vocals", - action="store_true", - help="Merge non-vocals", - ) - - parser.add_argument( - "--vocals_loudness_gain", - type=float, - default=0, - help="Loudness gain for vocals", - ) - - parser.add_argument( - "--sampler_interval", - type=int, - default=None, - required=False, - help="Sampler interval, if not specified, will be taken from config", - ) - - parser.add_argument( - "--sampler_progress", - action="store_true", - help="Show sampler progress", - ) - - parser.add_argument( - "--device", - type=str, - default=None, - required=False, - help="Device to use", - ) - - return parser.parse_args() - - -def run_inference( - config_path: str, - model_path: str, - input_path: str, - speaker: Union[int, str], - pitch_adjust: int, - sampler_interval: int, - extract_vocals: bool, - device: str, - progress=gr.Progress(), - speaker_mapping: dict = None, -): - if speaker_mapping is not None and isinstance(speaker, str): - speaker = speaker_mapping[speaker] - - audio, sr = inference( - Config.fromfile(config_path), - model_path, - input_path=input_path, - output_path=None, - speaker_id=speaker, - pitch_adjust=pitch_adjust, - sampler_interval=round(sampler_interval), - extract_vocals=extract_vocals, - merge_non_vocals=False, - device=device, - gradio_progress=progress, - ) - - return (sr, audio) - - -def launch_gradio(args): - with gr.Blocks(title="Fish Diffusion") as app: - gr.Markdown("# Fish Diffusion SVC Inference") - - with gr.Row(): - with gr.Column(): - input_audio = gr.Audio( - label="Input Audio", - type="filepath", - value=args.input, - ) - output_audio = gr.Audio(label="Output Audio") - - with gr.Column(): - if args.speaker_mapping is not None: - speaker_mapping = json.load(open(args.speaker_mapping)) - - speaker = gr.Dropdown( - label="Speaker Name (Used for Multi-Speaker Models)", - choices=list(speaker_mapping.keys()), - value=list(speaker_mapping.keys())[0], - ) - else: - speaker_mapping = None - speaker = gr.Number( - label="Speaker ID (Used for Multi-Speaker Models)", - value=args.speaker_id, - ) - - pitch_adjust = gr.Number( - label="Pitch Adjust (Semitones)", value=args.pitch_adjust - ) - sampler_interval = gr.Slider( - label="Sampler Interval (⬆️ Faster Generation, ⬇️ Better Quality)", - value=args.sampler_interval or 10, - minimum=1, - maximum=100, - ) - extract_vocals = gr.Checkbox( - label="Extract Vocals (For low quality audio)", - value=args.extract_vocals, - ) - device = gr.Radio( - label="Device", choices=["cuda", "cpu"], value=args.device or "cuda" - ) - - run_btn = gr.Button(label="Run") - - run_btn.click( - partial( - run_inference, - args.config, - args.checkpoint, - speaker_mapping=speaker_mapping, - ), - [ - input_audio, - speaker, - pitch_adjust, - sampler_interval, - extract_vocals, - device, - ], - output_audio, - ) - - app.queue(concurrency_count=2).launch(share=args.gradio_share) - - -if __name__ == "__main__": - args = parse_args() - - assert args.gradio or ( - args.input is not None and args.output is not None - ), "Either --gradio or --input and --output should be specified" - - if args.device is None: - device = torch.device("cuda" if torch.cuda.is_available() else "cpu") - else: - device = torch.device(args.device) - - if args.gradio: - args.device = device - launch_gradio(args) - - else: - - inference( - Config.fromfile(args.config), - args.checkpoint, - args.input, - args.output, - speaker_id=args.speaker_id, - pitch_adjust=args.pitch_adjust, - extract_vocals=args.extract_vocals, - merge_non_vocals=args.merge_non_vocals, - vocals_loudness_gain=args.vocals_loudness_gain, - sampler_interval=args.sampler_interval, - sampler_progress=args.sampler_progress, - device=device, - ) diff --git a/poetry.lock b/poetry.lock index 3726a07c..2c3695dc 100644 --- a/poetry.lock +++ b/poetry.lock @@ -283,19 +283,16 @@ files = [ [[package]] name = "babel" -version = "2.11.0" +version = "2.12.1" description = "Internationalization utilities" category = "dev" optional = false -python-versions = ">=3.6" +python-versions = ">=3.7" files = [ - {file = "Babel-2.11.0-py3-none-any.whl", hash = "sha256:1ad3eca1c885218f6dce2ab67291178944f810a10a9b5f3cb8382a5a232b64fe"}, - {file = "Babel-2.11.0.tar.gz", hash = "sha256:5ef4b3226b0180dedded4229651c8b0e1a3a6a2837d45a073272f313e4cf97f6"}, + {file = "Babel-2.12.1-py3-none-any.whl", hash = "sha256:b4246fb7677d3b98f501a39d43396d3cafdc8eadb045f4a31be01863f655c610"}, + {file = "Babel-2.12.1.tar.gz", hash = "sha256:cc2d99999cd01d44420ae725a21c9e3711b3aadc7976d6147f622d8581963455"}, ] -[package.dependencies] -pytz = ">=2015.7" - [[package]] name = "beautifulsoup4" version = "4.11.2" @@ -1112,14 +1109,14 @@ testing = ["covdefaults (>=2.2.2)", "coverage (>=7.0.1)", "pytest (>=7.2)", "pyt [[package]] name = "fish-audio-preprocess" -version = "0.1.9" +version = "0.1.10" description = "Preprocess audio data" category = "main" optional = false python-versions = ">=3.9,<4.0" files = [ - {file = "fish_audio_preprocess-0.1.9-py3-none-any.whl", hash = "sha256:12de186e6401d5b1d321a1e11ec03fffb78065caf47696db6c596dcc22161bd7"}, - {file = "fish_audio_preprocess-0.1.9.tar.gz", hash = "sha256:2652d9516539c8b2601496b9704f8caa7d06241aa303d2896a15d28785bd1ae8"}, + {file = "fish_audio_preprocess-0.1.10-py3-none-any.whl", hash = "sha256:ec76e8675030fa9170f10da617dd4061e92cf1285d00e3c316eedfd24fb2a7b8"}, + {file = "fish_audio_preprocess-0.1.10.tar.gz", hash = "sha256:86ebb6adfedbd16b2e88be6eb44981978a3623bbd5fb44c408b29d5871e13d03"}, ] [package.dependencies] @@ -1181,14 +1178,14 @@ Six = "*" [[package]] name = "flatbuffers" -version = "23.1.21" +version = "23.3.3" description = "The FlatBuffers serialization format for Python" category = "main" optional = false python-versions = "*" files = [ - {file = "flatbuffers-23.1.21-py2.py3-none-any.whl", hash = "sha256:2e4101b291b14f21e87ea20b7bf7127b11563f6084e352d2d708bddd545c9265"}, - {file = "flatbuffers-23.1.21.tar.gz", hash = "sha256:a948913bbb5d83c43a1193d7943c90e6c0ab732e7f2983111104250aeb61ff85"}, + {file = "flatbuffers-23.3.3-py2.py3-none-any.whl", hash = "sha256:5ad36d376240090757e8f0a2cfaf6abcc81c6536c0dc988060375fd0899121f8"}, + {file = "flatbuffers-23.3.3.tar.gz", hash = "sha256:cabd87c4882f37840f6081f094b2c5bc28cefc2a6357732746936d055ab45c3d"}, ] [[package]] @@ -1386,14 +1383,14 @@ smmap = ">=3.0.1,<6" [[package]] name = "gitpython" -version = "3.1.30" -description = "GitPython is a python library used to interact with Git repositories" +version = "3.1.31" +description = "GitPython is a Python library used to interact with Git repositories" category = "main" optional = false python-versions = ">=3.7" files = [ - {file = "GitPython-3.1.30-py3-none-any.whl", hash = "sha256:cd455b0000615c60e286208ba540271af9fe531fa6a87cc590a7298785ab2882"}, - {file = "GitPython-3.1.30.tar.gz", hash = "sha256:769c2d83e13f5d938b7688479da374c4e3d49f71549aaf462b646db9602ea6f8"}, + {file = "GitPython-3.1.31-py3-none-any.whl", hash = "sha256:f04893614f6aa713a60cbbe1e6a97403ef633103cdd0ef5eb6efe0deb98dbe8d"}, + {file = "GitPython-3.1.31.tar.gz", hash = "sha256:8ce3bcf69adfdf7c7d503e78fd3b1c492af782d58893b650adb2ac8912ddd573"}, ] [package.dependencies] @@ -1401,14 +1398,14 @@ gitdb = ">=4.0.1,<5" [[package]] name = "google-auth" -version = "2.16.0" +version = "2.16.2" description = "Google Authentication Library" category = "main" optional = false python-versions = ">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*" files = [ - {file = "google-auth-2.16.0.tar.gz", hash = "sha256:ed7057a101af1146f0554a769930ac9de506aeca4fd5af6543ebe791851a9fbd"}, - {file = "google_auth-2.16.0-py2.py3-none-any.whl", hash = "sha256:5045648c821fb72384cdc0e82cc326df195f113a33049d9b62b74589243d2acc"}, + {file = "google-auth-2.16.2.tar.gz", hash = "sha256:07e14f34ec288e3f33e00e2e3cc40c8942aa5d4ceac06256a28cd8e786591420"}, + {file = "google_auth-2.16.2-py2.py3-none-any.whl", hash = "sha256:2fef3cf94876d1a0e204afece58bb4d83fb57228aaa366c64045039fda6770a2"}, ] [package.dependencies] @@ -1445,14 +1442,14 @@ tool = ["click (>=6.0.0)"] [[package]] name = "gradio" -version = "3.18.0" +version = "3.20.0" description = "Python library for easily interacting with trained machine learning models" category = "main" optional = false python-versions = ">=3.7" files = [ - {file = "gradio-3.18.0-py3-none-any.whl", hash = "sha256:de608f310584d1b16c9554015352adfa4804e6319d20a6b66e271377d2bbb31d"}, - {file = "gradio-3.18.0.tar.gz", hash = "sha256:f66d19c651c740da6cfa2b411b0e19942579532e0ffc5c41f71a2adbf0bc5c30"}, + {file = "gradio-3.20.0-py3-none-any.whl", hash = "sha256:0beb7bf889d25f414082a5d3c57719d3d6f6427d5748a1755e91b823d215b910"}, + {file = "gradio-3.20.0.tar.gz", hash = "sha256:bcc5a9071c03cdce1b4780995298991f748284217a462c292ede84da4b0f273c"}, ] [package.dependencies] @@ -1464,9 +1461,10 @@ ffmpy = "*" fsspec = "*" httpx = "*" jinja2 = "*" -markdown-it-py = {version = ">=2.0.0", extras = ["linkify", "plugins"]} +markdown-it-py = {version = ">=2.0.0", extras = ["linkify"]} markupsafe = "*" matplotlib = "*" +mdit-py-plugins = "<=0.3.3" numpy = "*" orjson = "*" pandas = "*" @@ -1483,61 +1481,61 @@ websockets = ">=10.0" [[package]] name = "grpcio" -version = "1.51.1" +version = "1.51.3" description = "HTTP/2-based RPC framework" category = "main" optional = false python-versions = ">=3.7" files = [ - {file = "grpcio-1.51.1-cp310-cp310-linux_armv7l.whl", hash = "sha256:cc2bece1737b44d878cc1510ea04469a8073dbbcdd762175168937ae4742dfb3"}, - {file = "grpcio-1.51.1-cp310-cp310-macosx_12_0_x86_64.whl", hash = "sha256:e223a9793522680beae44671b9ed8f6d25bbe5ddf8887e66aebad5e0686049ef"}, - {file = "grpcio-1.51.1-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:24ac1154c4b2ab4a0c5326a76161547e70664cd2c39ba75f00fc8a2170964ea2"}, - {file = "grpcio-1.51.1-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e4ef09f8997c4be5f3504cefa6b5c6cc3cf648274ce3cede84d4342a35d76db6"}, - {file = "grpcio-1.51.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a8a0b77e992c64880e6efbe0086fe54dfc0bbd56f72a92d9e48264dcd2a3db98"}, - {file = "grpcio-1.51.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:eacad297ea60c72dd280d3353d93fb1dcca952ec11de6bb3c49d12a572ba31dd"}, - {file = "grpcio-1.51.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:16c71740640ba3a882f50b01bf58154681d44b51f09a5728180a8fdc66c67bd5"}, - {file = "grpcio-1.51.1-cp310-cp310-win32.whl", hash = "sha256:29cb97d41a4ead83b7bcad23bdb25bdd170b1e2cba16db6d3acbb090bc2de43c"}, - {file = "grpcio-1.51.1-cp310-cp310-win_amd64.whl", hash = "sha256:9ff42c5620b4e4530609e11afefa4a62ca91fa0abb045a8957e509ef84e54d30"}, - {file = "grpcio-1.51.1-cp311-cp311-linux_armv7l.whl", hash = "sha256:bc59f7ba87972ab236f8669d8ca7400f02a0eadf273ca00e02af64d588046f02"}, - {file = "grpcio-1.51.1-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:3c2b3842dcf870912da31a503454a33a697392f60c5e2697c91d133130c2c85d"}, - {file = "grpcio-1.51.1-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:22b011674090594f1f3245960ced7386f6af35485a38901f8afee8ad01541dbd"}, - {file = "grpcio-1.51.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:49d680356a975d9c66a678eb2dde192d5dc427a7994fb977363634e781614f7c"}, - {file = "grpcio-1.51.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:094e64236253590d9d4075665c77b329d707b6fca864dd62b144255e199b4f87"}, - {file = "grpcio-1.51.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:257478300735ce3c98d65a930bbda3db172bd4e00968ba743e6a1154ea6edf10"}, - {file = "grpcio-1.51.1-cp311-cp311-win32.whl", hash = "sha256:5a6ebcdef0ef12005d56d38be30f5156d1cb3373b52e96f147f4a24b0ddb3a9d"}, - {file = "grpcio-1.51.1-cp311-cp311-win_amd64.whl", hash = "sha256:3f9b0023c2c92bebd1be72cdfca23004ea748be1813a66d684d49d67d836adde"}, - {file = "grpcio-1.51.1-cp37-cp37m-linux_armv7l.whl", hash = "sha256:cd3baccea2bc5c38aeb14e5b00167bd4e2373a373a5e4d8d850bd193edad150c"}, - {file = "grpcio-1.51.1-cp37-cp37m-macosx_10_10_x86_64.whl", hash = "sha256:17ec9b13cec4a286b9e606b48191e560ca2f3bbdf3986f91e480a95d1582e1a7"}, - {file = "grpcio-1.51.1-cp37-cp37m-manylinux_2_17_aarch64.whl", hash = "sha256:fbdbe9a849854fe484c00823f45b7baab159bdd4a46075302281998cb8719df5"}, - {file = "grpcio-1.51.1-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:31bb6bc7ff145e2771c9baf612f4b9ebbc9605ccdc5f3ff3d5553de7fc0e0d79"}, - {file = "grpcio-1.51.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e473525c28251558337b5c1ad3fa969511e42304524a4e404065e165b084c9e4"}, - {file = "grpcio-1.51.1-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:6f0b89967ee11f2b654c23b27086d88ad7bf08c0b3c2a280362f28c3698b2896"}, - {file = "grpcio-1.51.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:7942b32a291421460d6a07883033e392167d30724aa84987e6956cd15f1a21b9"}, - {file = "grpcio-1.51.1-cp37-cp37m-win32.whl", hash = "sha256:f96ace1540223f26fbe7c4ebbf8a98e3929a6aa0290c8033d12526847b291c0f"}, - {file = "grpcio-1.51.1-cp37-cp37m-win_amd64.whl", hash = "sha256:f1fec3abaf274cdb85bf3878167cfde5ad4a4d97c68421afda95174de85ba813"}, - {file = "grpcio-1.51.1-cp38-cp38-linux_armv7l.whl", hash = "sha256:0e1a9e1b4a23808f1132aa35f968cd8e659f60af3ffd6fb00bcf9a65e7db279f"}, - {file = "grpcio-1.51.1-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:6df3b63538c362312bc5fa95fb965069c65c3ea91d7ce78ad9c47cab57226f54"}, - {file = "grpcio-1.51.1-cp38-cp38-manylinux_2_17_aarch64.whl", hash = "sha256:172405ca6bdfedd6054c74c62085946e45ad4d9cec9f3c42b4c9a02546c4c7e9"}, - {file = "grpcio-1.51.1-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:506b9b7a4cede87d7219bfb31014d7b471cfc77157da9e820a737ec1ea4b0663"}, - {file = "grpcio-1.51.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0fb93051331acbb75b49a2a0fd9239c6ba9528f6bdc1dd400ad1cb66cf864292"}, - {file = "grpcio-1.51.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:5dca372268c6ab6372d37d6b9f9343e7e5b4bc09779f819f9470cd88b2ece3c3"}, - {file = "grpcio-1.51.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:471d39d3370ca923a316d49c8aac66356cea708a11e647e3bdc3d0b5de4f0a40"}, - {file = "grpcio-1.51.1-cp38-cp38-win32.whl", hash = "sha256:75e29a90dc319f0ad4d87ba6d20083615a00d8276b51512e04ad7452b5c23b04"}, - {file = "grpcio-1.51.1-cp38-cp38-win_amd64.whl", hash = "sha256:f1158bccbb919da42544a4d3af5d9296a3358539ffa01018307337365a9a0c64"}, - {file = "grpcio-1.51.1-cp39-cp39-linux_armv7l.whl", hash = "sha256:59dffade859f157bcc55243714d57b286da6ae16469bf1ac0614d281b5f49b67"}, - {file = "grpcio-1.51.1-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:dad6533411d033b77f5369eafe87af8583178efd4039c41d7515d3336c53b4f1"}, - {file = "grpcio-1.51.1-cp39-cp39-manylinux_2_17_aarch64.whl", hash = "sha256:4c4423ea38a7825b8fed8934d6d9aeebdf646c97e3c608c3b0bcf23616f33877"}, - {file = "grpcio-1.51.1-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0dc5354e38e5adf2498312f7241b14c7ce3484eefa0082db4297189dcbe272e6"}, - {file = "grpcio-1.51.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:97d67983189e2e45550eac194d6234fc38b8c3b5396c153821f2d906ed46e0ce"}, - {file = "grpcio-1.51.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:538d981818e49b6ed1e9c8d5e5adf29f71c4e334e7d459bf47e9b7abb3c30e09"}, - {file = "grpcio-1.51.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:9235dcd5144a83f9ca6f431bd0eccc46b90e2c22fe27b7f7d77cabb2fb515595"}, - {file = "grpcio-1.51.1-cp39-cp39-win32.whl", hash = "sha256:aacb54f7789ede5cbf1d007637f792d3e87f1c9841f57dd51abf89337d1b8472"}, - {file = "grpcio-1.51.1-cp39-cp39-win_amd64.whl", hash = "sha256:2b170eaf51518275c9b6b22ccb59450537c5a8555326fd96ff7391b5dd75303c"}, - {file = "grpcio-1.51.1.tar.gz", hash = "sha256:e6dfc2b6567b1c261739b43d9c59d201c1b89e017afd9e684d85aa7a186c9f7a"}, + {file = "grpcio-1.51.3-cp310-cp310-linux_armv7l.whl", hash = "sha256:f601aaeae18dab81930fb8d4f916b0da21e89bb4b5f7367ef793f46b4a76b7b0"}, + {file = "grpcio-1.51.3-cp310-cp310-macosx_12_0_universal2.whl", hash = "sha256:eef0450a4b5ed11feab639bf3eb1b6e23d0efa9b911bf7b06fb60e14f5f8a585"}, + {file = "grpcio-1.51.3-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:82b0ad8ac825d4bb31bff9f638557c045f4a6d824d84b21e893968286f88246b"}, + {file = "grpcio-1.51.3-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3667c06e37d6cd461afdd51cefe6537702f3d1dc5ff4cac07e88d8b4795dc16f"}, + {file = "grpcio-1.51.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3709048fe0aa23dda09b3e69849a12055790171dab9e399a72ea8f9dfbf9ac80"}, + {file = "grpcio-1.51.3-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:200d69857f9910f7458b39b9bcf83ee4a180591b40146ba9e49314e3a7419313"}, + {file = "grpcio-1.51.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:cd9a5e68e79c5f031500e67793048a90209711e0854a9ddee8a3ce51728de4e5"}, + {file = "grpcio-1.51.3-cp310-cp310-win32.whl", hash = "sha256:6604f614016127ae10969176bbf12eb0e03d2fb3d643f050b3b69e160d144fb4"}, + {file = "grpcio-1.51.3-cp310-cp310-win_amd64.whl", hash = "sha256:e95c7ccd4c5807adef1602005513bf7c7d14e5a41daebcf9d8d30d8bf51b8f81"}, + {file = "grpcio-1.51.3-cp311-cp311-linux_armv7l.whl", hash = "sha256:5e77ee138100f0bb55cbd147840f87ee6241dbd25f09ea7cd8afe7efff323449"}, + {file = "grpcio-1.51.3-cp311-cp311-macosx_10_10_universal2.whl", hash = "sha256:68a7514b754e38e8de9075f7bb4dee919919515ec68628c43a894027e40ddec4"}, + {file = "grpcio-1.51.3-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3c1b9f8afa62ff265d86a4747a2990ec5a96e4efce5d5888f245a682d66eca47"}, + {file = "grpcio-1.51.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8de30f0b417744288cec65ec8cf84b8a57995cf7f1e84ccad2704d93f05d0aae"}, + {file = "grpcio-1.51.3-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:b69c7adc7ed60da1cb1b502853db61f453fc745f940cbcc25eb97c99965d8f41"}, + {file = "grpcio-1.51.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d81528ffe0e973dc840ec73a4132fd18b8203ad129d7410155d951a0a7e4f5d0"}, + {file = "grpcio-1.51.3-cp311-cp311-win32.whl", hash = "sha256:040eb421613b57c696063abde405916dd830203c184c9000fc8c3b3b3c950325"}, + {file = "grpcio-1.51.3-cp311-cp311-win_amd64.whl", hash = "sha256:2a8e17286c4240137d933b8ca506465472248b4ce0fe46f3404459e708b65b68"}, + {file = "grpcio-1.51.3-cp37-cp37m-linux_armv7l.whl", hash = "sha256:d5cd1389669a847555df54177b911d9ff6f17345b2a6f19388707b7a9f724c88"}, + {file = "grpcio-1.51.3-cp37-cp37m-macosx_10_10_universal2.whl", hash = "sha256:be1bf35ce82cdbcac14e39d5102d8de4079a1c1a6a06b68e41fcd9ef64f9dd28"}, + {file = "grpcio-1.51.3-cp37-cp37m-manylinux_2_17_aarch64.whl", hash = "sha256:5eed34994c095e2bf7194ffac7381c6068b057ef1e69f8f08db77771350a7566"}, + {file = "grpcio-1.51.3-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3f9a7d88082b2a17ae7bd3c2354d13bab0453899e0851733f6afa6918373f476"}, + {file = "grpcio-1.51.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:36c8abbc5f837111e7bd619612eedc223c290b0903b952ce0c7b00840ea70f14"}, + {file = "grpcio-1.51.3-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:165b05af77e6aecb4210ae7663e25acf234ba78a7c1c157fa5f2efeb0d6ec53c"}, + {file = "grpcio-1.51.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:54e36c2ee304ff15f2bfbdc43d2b56c63331c52d818c364e5b5214e5bc2ad9f6"}, + {file = "grpcio-1.51.3-cp37-cp37m-win32.whl", hash = "sha256:cd0daac21d9ef5e033a5100c1d3aa055bbed28bfcf070b12d8058045c4e821b1"}, + {file = "grpcio-1.51.3-cp37-cp37m-win_amd64.whl", hash = "sha256:2fdd6333ce96435408565a9dbbd446212cd5d62e4d26f6a3c0feb1e3c35f1cc8"}, + {file = "grpcio-1.51.3-cp38-cp38-linux_armv7l.whl", hash = "sha256:54b0c29bdd9a3b1e1b61443ab152f060fc719f1c083127ab08d03fac5efd51be"}, + {file = "grpcio-1.51.3-cp38-cp38-macosx_10_10_universal2.whl", hash = "sha256:ffaaf7e93fcb437356b5a4b23bf36e8a3d0221399ff77fd057e4bc77776a24be"}, + {file = "grpcio-1.51.3-cp38-cp38-manylinux_2_17_aarch64.whl", hash = "sha256:eafbe7501a3268d05f2e450e1ddaffb950d842a8620c13ec328b501d25d2e2c3"}, + {file = "grpcio-1.51.3-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:881ecb34feabf31c6b3b9bbbddd1a5b57e69f805041e5a2c6c562a28574f71c4"}, + {file = "grpcio-1.51.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e860a3222139b41d430939bbec2ec9c3f6c740938bf7a04471a9a8caaa965a2e"}, + {file = "grpcio-1.51.3-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:49ede0528e9dac7e8a9fe30b16c73b630ddd9a576bf4b675eb6b0c53ee5ca00f"}, + {file = "grpcio-1.51.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:6972b009638b40a448d10e1bc18e2223143b8a7aa20d7def0d78dd4af4126d12"}, + {file = "grpcio-1.51.3-cp38-cp38-win32.whl", hash = "sha256:5694448256e3cdfe5bd358f1574a3f2f51afa20cc834713c4b9788d60b7cc646"}, + {file = "grpcio-1.51.3-cp38-cp38-win_amd64.whl", hash = "sha256:3ea4341efe603b049e8c9a5f13c696ca37fcdf8a23ca35f650428ad3606381d9"}, + {file = "grpcio-1.51.3-cp39-cp39-linux_armv7l.whl", hash = "sha256:6c677581ce129f5fa228b8f418cee10bd28dd449f3a544ea73c8ba590ee49d0b"}, + {file = "grpcio-1.51.3-cp39-cp39-macosx_10_10_universal2.whl", hash = "sha256:30e09b5e0531685e176f49679b6a3b190762cc225f4565e55a899f5e14b3aa62"}, + {file = "grpcio-1.51.3-cp39-cp39-manylinux_2_17_aarch64.whl", hash = "sha256:c831f31336e81243f85b6daff3e5e8a123302ce0ea1f2726ad752fd7a59f3aee"}, + {file = "grpcio-1.51.3-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2cd2e4cefb724cab1ba2df4b7535a9980531b9ec51b4dbb5f137a1f3a3754ef0"}, + {file = "grpcio-1.51.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f7a0d0bf44438869d307f85a54f25a896ad6b4b0ca12370f76892ad732928d87"}, + {file = "grpcio-1.51.3-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:c02abd55409bfb293371554adf6a4401197ec2133dd97727c01180889014ba4d"}, + {file = "grpcio-1.51.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:2f8ff75e61e1227ba7a3f16b2eadbcc11d0a54096d52ab75a6b88cfbe56f55d1"}, + {file = "grpcio-1.51.3-cp39-cp39-win32.whl", hash = "sha256:6c99a73a6260bdf844b2e5ddad02dcd530310f80e1fa72c300fa19c1c7496962"}, + {file = "grpcio-1.51.3-cp39-cp39-win_amd64.whl", hash = "sha256:22bdfac4f7f27acdd4da359b5e7e1973dc74bf1ed406729b07d0759fde2f064b"}, + {file = "grpcio-1.51.3.tar.gz", hash = "sha256:be7b2265b7527bb12109a7727581e274170766d5b3c9258d4e466f4872522d7a"}, ] [package.extras] -protobuf = ["grpcio-tools (>=1.51.1)"] +protobuf = ["grpcio-tools (>=1.51.3)"] [[package]] name = "h11" @@ -1599,14 +1597,14 @@ socks = ["socksio (>=1.0.0,<2.0.0)"] [[package]] name = "huggingface-hub" -version = "0.12.0" +version = "0.12.1" description = "Client library to download and publish models, datasets and other repos on the huggingface.co hub" category = "main" optional = false python-versions = ">=3.7.0" files = [ - {file = "huggingface_hub-0.12.0-py3-none-any.whl", hash = "sha256:93809eabbfb2058a808bddf8b2a70f645de3f9df73ce87ddf5163d4c74b71c0c"}, - {file = "huggingface_hub-0.12.0.tar.gz", hash = "sha256:da82c9ec8f9d8f976ffd3fd8249d20bb35c2dd3145a9f7ca1106f0ebefd9afa0"}, + {file = "huggingface_hub-0.12.1-py3-none-any.whl", hash = "sha256:867586cc8543fe1bd43a219fedbea7d71690021ad80f0c46f35c4751069278d7"}, + {file = "huggingface_hub-0.12.1.tar.gz", hash = "sha256:6f960f6246ef9c3446d0d6275e853485515682c350917fdaf2a59705f8b9ebb3"}, ] [package.dependencies] @@ -1715,13 +1713,13 @@ files = [ [[package]] name = "jaconv" -version = "0.3.3" +version = "0.3.4" description = "Pure-Python Japanese character interconverter for Hiragana, Katakana, Hankaku, Zenkaku and more" category = "main" optional = false python-versions = "*" files = [ - {file = "jaconv-0.3.3.tar.gz", hash = "sha256:2aff90c082ad9676a35ad7fa6da68e008789fe24a18b5e42389bd84802ad9cca"}, + {file = "jaconv-0.3.4.tar.gz", hash = "sha256:9e7c55f3f0b0e2dbad62f6c9fa0c30fc6fffdbb78297955509d90856b3a31d6d"}, ] [[package]] @@ -1968,14 +1966,14 @@ tests = ["contextlib2", "matplotlib (>=3.3.0)", "pytest", "pytest-cov", "pytest- [[package]] name = "lightning-utilities" -version = "0.6.0.post0" +version = "0.7.1" description = "PyTorch Lightning Sample project." category = "main" optional = false python-versions = ">=3.7" files = [ - {file = "lightning-utilities-0.6.0.post0.tar.gz", hash = "sha256:6f02cfe59e6576487e709a0e66e07671563bde9e21b40e1c567918e4d753278c"}, - {file = "lightning_utilities-0.6.0.post0-py3-none-any.whl", hash = "sha256:81edf3ce5ebd43389238afc1bca96ea0c6dcd3b4b442f8365c719dd3a82009dc"}, + {file = "lightning-utilities-0.7.1.tar.gz", hash = "sha256:9748a5466490d6e45c2df60c1ee77ff37a1a660ea6313bc0832ab7317a081cef"}, + {file = "lightning_utilities-0.7.1-py3-none-any.whl", hash = "sha256:a7c58e67831c17712736e38e8ad5b81dbf64184ce28684a502e896ecca939b67"}, ] [package.dependencies] @@ -1986,17 +1984,18 @@ typing-extensions = "*" cli = ["fire"] docs = ["sphinx (>=4.0,<5.0)"] test = ["coverage (==6.5.0)"] +typing = ["mypy (>=1.0.0)"] [[package]] name = "linkify-it-py" -version = "1.0.3" +version = "2.0.0" description = "Links recognition library with FULL unicode support." category = "main" optional = false python-versions = ">=3.6" files = [ - {file = "linkify-it-py-1.0.3.tar.gz", hash = "sha256:2b3f168d5ce75e3a425e34b341a6b73e116b5d9ed8dbbbf5dc7456843b7ce2ee"}, - {file = "linkify_it_py-1.0.3-py3-none-any.whl", hash = "sha256:11e29f00150cddaa8f434153f103c14716e7e097a8fd372d9eb1ed06ed91524d"}, + {file = "linkify-it-py-2.0.0.tar.gz", hash = "sha256:476464480906bed8b2fa3813bf55566282e55214ad7e41b7d1c2b564666caf2f"}, + {file = "linkify_it_py-2.0.0-py3-none-any.whl", hash = "sha256:1bff43823e24e507a099e328fc54696124423dd6320c75a9da45b4b754b748ad"}, ] [package.dependencies] @@ -2191,26 +2190,25 @@ testing = ["coverage", "pyyaml"] [[package]] name = "markdown-it-py" -version = "2.1.0" +version = "2.2.0" description = "Python port of markdown-it. Markdown parsing, done right!" category = "main" optional = false python-versions = ">=3.7" files = [ - {file = "markdown-it-py-2.1.0.tar.gz", hash = "sha256:cf7e59fed14b5ae17c0006eff14a2d9a00ed5f3a846148153899a0224e2c07da"}, - {file = "markdown_it_py-2.1.0-py3-none-any.whl", hash = "sha256:93de681e5c021a432c63147656fe21790bc01231e0cd2da73626f1aa3ac0fe27"}, + {file = "markdown-it-py-2.2.0.tar.gz", hash = "sha256:7c9a5e412688bc771c67432cbfebcdd686c93ce6484913dccf06cb5a0bea35a1"}, + {file = "markdown_it_py-2.2.0-py3-none-any.whl", hash = "sha256:5a35f8d1870171d9acc47b99612dc146129b631baf04970128b568f190d0cc30"}, ] [package.dependencies] -linkify-it-py = {version = ">=1.0,<2.0", optional = true, markers = "extra == \"linkify\""} -mdit-py-plugins = {version = "*", optional = true, markers = "extra == \"plugins\""} +linkify-it-py = {version = ">=1,<3", optional = true, markers = "extra == \"linkify\""} mdurl = ">=0.1,<1.0" [package.extras] -benchmarking = ["psutil", "pytest", "pytest-benchmark (>=3.2,<4.0)"] -code-style = ["pre-commit (==2.6)"] -compare = ["commonmark (>=0.9.1,<0.10.0)", "markdown (>=3.3.6,<3.4.0)", "mistletoe (>=0.8.1,<0.9.0)", "mistune (>=2.0.2,<2.1.0)", "panflute (>=2.1.3,<2.2.0)"] -linkify = ["linkify-it-py (>=1.0,<2.0)"] +benchmarking = ["psutil", "pytest", "pytest-benchmark"] +code-style = ["pre-commit (>=3.0,<4.0)"] +compare = ["commonmark (>=0.9,<1.0)", "markdown (>=3.4,<4.0)", "mistletoe (>=1.0,<2.0)", "mistune (>=2.0,<3.0)", "panflute (>=2.3,<3.0)"] +linkify = ["linkify-it-py (>=1,<3)"] plugins = ["mdit-py-plugins"] profiling = ["gprof2dot"] rtd = ["attrs", "myst-parser", "pyyaml", "sphinx", "sphinx-copybutton", "sphinx-design", "sphinx_book_theme"] @@ -2278,53 +2276,53 @@ files = [ [[package]] name = "matplotlib" -version = "3.7.0" +version = "3.7.1" description = "Python plotting package" category = "main" optional = false python-versions = ">=3.8" files = [ - {file = "matplotlib-3.7.0-cp310-cp310-macosx_10_12_universal2.whl", hash = "sha256:3da8b9618188346239e51f1ea6c0f8f05c6e218cfcc30b399dd7dd7f52e8bceb"}, - {file = "matplotlib-3.7.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:c0592ba57217c22987b7322df10f75ef95bc44dce781692b4b7524085de66019"}, - {file = "matplotlib-3.7.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:21269450243d6928da81a9bed201f0909432a74e7d0d65db5545b9fa8a0d0223"}, - {file = "matplotlib-3.7.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eb2e76cd429058d8954121c334dddfcd11a6186c6975bca61f3f248c99031b05"}, - {file = "matplotlib-3.7.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:de20eb1247725a2f889173d391a6d9e7e0f2540feda24030748283108b0478ec"}, - {file = "matplotlib-3.7.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c5465735eaaafd1cfaec3fed60aee776aeb3fd3992aa2e49f4635339c931d443"}, - {file = "matplotlib-3.7.0-cp310-cp310-win32.whl", hash = "sha256:092e6abc80cdf8a95f7d1813e16c0e99ceda8d5b195a3ab859c680f3487b80a2"}, - {file = "matplotlib-3.7.0-cp310-cp310-win_amd64.whl", hash = "sha256:4f640534ec2760e270801056bc0d8a10777c48b30966eef78a7c35d8590915ba"}, - {file = "matplotlib-3.7.0-cp311-cp311-macosx_10_12_universal2.whl", hash = "sha256:f336e7014889c38c59029ebacc35c59236a852e4b23836708cfd3f43d1eaeed5"}, - {file = "matplotlib-3.7.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:3a10428d4f8d1a478ceabd652e61a175b2fdeed4175ab48da4a7b8deb561e3fa"}, - {file = "matplotlib-3.7.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:46ca923e980f76d34c1c633343a72bb042d6ba690ecc649aababf5317997171d"}, - {file = "matplotlib-3.7.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c849aa94ff2a70fb71f318f48a61076d1205c6013b9d3885ade7f992093ac434"}, - {file = "matplotlib-3.7.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:827e78239292e561cfb70abf356a9d7eaf5bf6a85c97877f254009f20b892f89"}, - {file = "matplotlib-3.7.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:691ef1f15360e439886186d0db77b5345b24da12cbc4fc57b26c4826db4d6cab"}, - {file = "matplotlib-3.7.0-cp311-cp311-win32.whl", hash = "sha256:21a8aeac39b4a795e697265d800ce52ab59bdeb6bb23082e2d971f3041074f02"}, - {file = "matplotlib-3.7.0-cp311-cp311-win_amd64.whl", hash = "sha256:01681566e95b9423021b49dea6a2395c16fa054604eacb87f0f4c439750f9114"}, - {file = "matplotlib-3.7.0-cp38-cp38-macosx_10_12_universal2.whl", hash = "sha256:cf119eee4e57389fba5ac8b816934e95c256535e55f0b21628b4205737d1de85"}, - {file = "matplotlib-3.7.0-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:21bd4033c40b95abd5b8453f036ed5aa70856e56ecbd887705c37dce007a4c21"}, - {file = "matplotlib-3.7.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:111ef351f28fd823ed7177632070a6badd6f475607122bc9002a526f2502a0b5"}, - {file = "matplotlib-3.7.0-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:f91d35b3ef51d29d9c661069b9e4ba431ce283ffc533b981506889e144b5b40e"}, - {file = "matplotlib-3.7.0-cp38-cp38-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:0a776462a4a63c0bfc9df106c15a0897aa2dbab6795c693aa366e8e283958854"}, - {file = "matplotlib-3.7.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0dfd4a0cbd151f6439e6d7f8dca5292839ca311e7e650596d073774847ca2e4f"}, - {file = "matplotlib-3.7.0-cp38-cp38-win32.whl", hash = "sha256:56b7b79488209041a9bf7ddc34f1b069274489ce69e34dc63ae241d0d6b4b736"}, - {file = "matplotlib-3.7.0-cp38-cp38-win_amd64.whl", hash = "sha256:8665855f3919c80551f377bc16df618ceabf3ef65270bc14b60302dce88ca9ab"}, - {file = "matplotlib-3.7.0-cp39-cp39-macosx_10_12_universal2.whl", hash = "sha256:f910d924da8b9fb066b5beae0b85e34ed1b6293014892baadcf2a51da1c65807"}, - {file = "matplotlib-3.7.0-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:cf6346644e8fe234dc847e6232145dac199a650d3d8025b3ef65107221584ba4"}, - {file = "matplotlib-3.7.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:3d1e52365d8d5af699f04581ca191112e1d1220a9ce4386b57d807124d8b55e6"}, - {file = "matplotlib-3.7.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c869b646489c6a94375714032e5cec08e3aa8d3f7d4e8ef2b0fb50a52b317ce6"}, - {file = "matplotlib-3.7.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f4ddac5f59e78d04b20469bc43853a8e619bb6505c7eac8ffb343ff2c516d72f"}, - {file = "matplotlib-3.7.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fb0304c1cd802e9a25743414c887e8a7cd51d96c9ec96d388625d2cd1c137ae3"}, - {file = "matplotlib-3.7.0-cp39-cp39-win32.whl", hash = "sha256:a06a6c9822e80f323549c6bc9da96d4f233178212ad9a5f4ab87fd153077a507"}, - {file = "matplotlib-3.7.0-cp39-cp39-win_amd64.whl", hash = "sha256:cb52aa97b92acdee090edfb65d1cb84ea60ab38e871ba8321a10bbcebc2a3540"}, - {file = "matplotlib-3.7.0-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:3493b48e56468c39bd9c1532566dff3b8062952721b7521e1f394eb6791495f4"}, - {file = "matplotlib-3.7.0-pp38-pypy38_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7d0dcd1a0bf8d56551e8617d6dc3881d8a1c7fb37d14e5ec12cbb293f3e6170a"}, - {file = "matplotlib-3.7.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:51fb664c37714cbaac69c16d6b3719f517a13c96c3f76f4caadd5a0aa7ed0329"}, - {file = "matplotlib-3.7.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:4497d88c559b76da320b7759d64db442178beeea06a52dc0c629086982082dcd"}, - {file = "matplotlib-3.7.0-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:9d85355c48ef8b9994293eb7c00f44aa8a43cad7a297fbf0770a25cdb2244b91"}, - {file = "matplotlib-3.7.0-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:03eb2c8ff8d85da679b71e14c7c95d16d014c48e0c0bfa14db85f6cdc5c92aad"}, - {file = "matplotlib-3.7.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:71b751d06b2ed1fd017de512d7439c0259822864ea16731522b251a27c0b2ede"}, - {file = "matplotlib-3.7.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:b51ab8a5d5d3bbd4527af633a638325f492e09e45e78afdf816ef55217a09664"}, - {file = "matplotlib-3.7.0.tar.gz", hash = "sha256:8f6efd313430d7ef70a38a3276281cb2e8646b3a22b3b21eb227da20e15e6813"}, + {file = "matplotlib-3.7.1-cp310-cp310-macosx_10_12_universal2.whl", hash = "sha256:95cbc13c1fc6844ab8812a525bbc237fa1470863ff3dace7352e910519e194b1"}, + {file = "matplotlib-3.7.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:08308bae9e91aca1ec6fd6dda66237eef9f6294ddb17f0d0b3c863169bf82353"}, + {file = "matplotlib-3.7.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:544764ba51900da4639c0f983b323d288f94f65f4024dc40ecb1542d74dc0500"}, + {file = "matplotlib-3.7.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:56d94989191de3fcc4e002f93f7f1be5da476385dde410ddafbb70686acf00ea"}, + {file = "matplotlib-3.7.1-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e99bc9e65901bb9a7ce5e7bb24af03675cbd7c70b30ac670aa263240635999a4"}, + {file = "matplotlib-3.7.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eb7d248c34a341cd4c31a06fd34d64306624c8cd8d0def7abb08792a5abfd556"}, + {file = "matplotlib-3.7.1-cp310-cp310-win32.whl", hash = "sha256:ce463ce590f3825b52e9fe5c19a3c6a69fd7675a39d589e8b5fbe772272b3a24"}, + {file = "matplotlib-3.7.1-cp310-cp310-win_amd64.whl", hash = "sha256:3d7bc90727351fb841e4d8ae620d2d86d8ed92b50473cd2b42ce9186104ecbba"}, + {file = "matplotlib-3.7.1-cp311-cp311-macosx_10_12_universal2.whl", hash = "sha256:770a205966d641627fd5cf9d3cb4b6280a716522cd36b8b284a8eb1581310f61"}, + {file = "matplotlib-3.7.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:f67bfdb83a8232cb7a92b869f9355d677bce24485c460b19d01970b64b2ed476"}, + {file = "matplotlib-3.7.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2bf092f9210e105f414a043b92af583c98f50050559616930d884387d0772aba"}, + {file = "matplotlib-3.7.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:89768d84187f31717349c6bfadc0e0d8c321e8eb34522acec8a67b1236a66332"}, + {file = "matplotlib-3.7.1-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:83111e6388dec67822e2534e13b243cc644c7494a4bb60584edbff91585a83c6"}, + {file = "matplotlib-3.7.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a867bf73a7eb808ef2afbca03bcdb785dae09595fbe550e1bab0cd023eba3de0"}, + {file = "matplotlib-3.7.1-cp311-cp311-win32.whl", hash = "sha256:fbdeeb58c0cf0595efe89c05c224e0a502d1aa6a8696e68a73c3efc6bc354304"}, + {file = "matplotlib-3.7.1-cp311-cp311-win_amd64.whl", hash = "sha256:c0bd19c72ae53e6ab979f0ac6a3fafceb02d2ecafa023c5cca47acd934d10be7"}, + {file = "matplotlib-3.7.1-cp38-cp38-macosx_10_12_universal2.whl", hash = "sha256:6eb88d87cb2c49af00d3bbc33a003f89fd9f78d318848da029383bfc08ecfbfb"}, + {file = "matplotlib-3.7.1-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:cf0e4f727534b7b1457898c4f4ae838af1ef87c359b76dcd5330fa31893a3ac7"}, + {file = "matplotlib-3.7.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:46a561d23b91f30bccfd25429c3c706afe7d73a5cc64ef2dfaf2b2ac47c1a5dc"}, + {file = "matplotlib-3.7.1-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:8704726d33e9aa8a6d5215044b8d00804561971163563e6e6591f9dcf64340cc"}, + {file = "matplotlib-3.7.1-cp38-cp38-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:4cf327e98ecf08fcbb82685acaf1939d3338548620ab8dfa02828706402c34de"}, + {file = "matplotlib-3.7.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:617f14ae9d53292ece33f45cba8503494ee199a75b44de7717964f70637a36aa"}, + {file = "matplotlib-3.7.1-cp38-cp38-win32.whl", hash = "sha256:7c9a4b2da6fac77bcc41b1ea95fadb314e92508bf5493ceff058e727e7ecf5b0"}, + {file = "matplotlib-3.7.1-cp38-cp38-win_amd64.whl", hash = "sha256:14645aad967684e92fc349493fa10c08a6da514b3d03a5931a1bac26e6792bd1"}, + {file = "matplotlib-3.7.1-cp39-cp39-macosx_10_12_universal2.whl", hash = "sha256:81a6b377ea444336538638d31fdb39af6be1a043ca5e343fe18d0f17e098770b"}, + {file = "matplotlib-3.7.1-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:28506a03bd7f3fe59cd3cd4ceb2a8d8a2b1db41afede01f66c42561b9be7b4b7"}, + {file = "matplotlib-3.7.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:8c587963b85ce41e0a8af53b9b2de8dddbf5ece4c34553f7bd9d066148dc719c"}, + {file = "matplotlib-3.7.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8bf26ade3ff0f27668989d98c8435ce9327d24cffb7f07d24ef609e33d582439"}, + {file = "matplotlib-3.7.1-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:def58098f96a05f90af7e92fd127d21a287068202aa43b2a93476170ebd99e87"}, + {file = "matplotlib-3.7.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f883a22a56a84dba3b588696a2b8a1ab0d2c3d41be53264115c71b0a942d8fdb"}, + {file = "matplotlib-3.7.1-cp39-cp39-win32.whl", hash = "sha256:4f99e1b234c30c1e9714610eb0c6d2f11809c9c78c984a613ae539ea2ad2eb4b"}, + {file = "matplotlib-3.7.1-cp39-cp39-win_amd64.whl", hash = "sha256:3ba2af245e36990facf67fde840a760128ddd71210b2ab6406e640188d69d136"}, + {file = "matplotlib-3.7.1-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:3032884084f541163f295db8a6536e0abb0db464008fadca6c98aaf84ccf4717"}, + {file = "matplotlib-3.7.1-pp38-pypy38_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3a2cb34336110e0ed8bb4f650e817eed61fa064acbefeb3591f1b33e3a84fd96"}, + {file = "matplotlib-3.7.1-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b867e2f952ed592237a1828f027d332d8ee219ad722345b79a001f49df0936eb"}, + {file = "matplotlib-3.7.1-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:57bfb8c8ea253be947ccb2bc2d1bb3862c2bccc662ad1b4626e1f5e004557042"}, + {file = "matplotlib-3.7.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:438196cdf5dc8d39b50a45cb6e3f6274edbcf2254f85fa9b895bf85851c3a613"}, + {file = "matplotlib-3.7.1-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:21e9cff1a58d42e74d01153360de92b326708fb205250150018a52c70f43c290"}, + {file = "matplotlib-3.7.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:75d4725d70b7c03e082bbb8a34639ede17f333d7247f56caceb3801cb6ff703d"}, + {file = "matplotlib-3.7.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:97cc368a7268141afb5690760921765ed34867ffb9655dd325ed207af85c7529"}, + {file = "matplotlib-3.7.1.tar.gz", hash = "sha256:7b73305f25eab4541bd7ee0b96d87e53ae9c9f1823be5659b806cd85786fe882"}, ] [package.dependencies] @@ -2398,14 +2396,14 @@ tests = ["coverage", "lmdb", "parameterized", "pytest"] [[package]] name = "more-itertools" -version = "9.0.0" +version = "9.1.0" description = "More routines for operating on iterables, beyond itertools" category = "main" optional = false python-versions = ">=3.7" files = [ - {file = "more-itertools-9.0.0.tar.gz", hash = "sha256:5a6257e40878ef0520b1803990e3e22303a41b5714006c32a3fd8304b26ea1ab"}, - {file = "more_itertools-9.0.0-py3-none-any.whl", hash = "sha256:250e83d7e81d0c87ca6bd942e6aeab8cc9daa6096d12c5308f3f92fa5e5c1f41"}, + {file = "more-itertools-9.1.0.tar.gz", hash = "sha256:cabaa341ad0389ea83c17a94566a53ae4c9d07349861ecb14dc6d0345cf9ac5d"}, + {file = "more_itertools-9.1.0-py3-none-any.whl", hash = "sha256:d2bc7f02446e86a68911e58ded76d6561eea00cddfb2a91e7019bbb586c799f3"}, ] [[package]] @@ -2728,41 +2726,43 @@ typing-extensions = "*" [[package]] name = "onnxruntime" -version = "1.14.0" +version = "1.14.1" description = "ONNX Runtime is a runtime accelerator for Machine Learning models" category = "main" optional = false python-versions = "*" files = [ - {file = "onnxruntime-1.14.0-cp310-cp310-macosx_11_0_x86_64.whl", hash = "sha256:8951b9c8ad180cb8099cab4965126ff24bee4ac70b660682bcaf739b17ec634c"}, - {file = "onnxruntime-1.14.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7e065b6d58833e8b822f5163839bced7b9288b9f6195ca3096170aa7d5256598"}, - {file = "onnxruntime-1.14.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d92aa2cf4f8afc019adaac9bde443bb91d252d8065ae9f0334e64214055c050a"}, - {file = "onnxruntime-1.14.0-cp310-cp310-manylinux_2_27_aarch64.whl", hash = "sha256:001e95fa1df9e855e565de47e535a4310930fb80e36499b643ba4b703b133daa"}, - {file = "onnxruntime-1.14.0-cp310-cp310-manylinux_2_27_x86_64.whl", hash = "sha256:6d7652dd803bbff4d5f3dc0d162c17da880114f9cc1d1fb293d7006ccab77b9e"}, - {file = "onnxruntime-1.14.0-cp310-cp310-win32.whl", hash = "sha256:386066c35ffe5fea2e494b0af253244c08e14d40a485b0327c4918e0715c8ab3"}, - {file = "onnxruntime-1.14.0-cp310-cp310-win_amd64.whl", hash = "sha256:a43e8558be3084f9dc95a2eb9f7f248cc7b87be1dcb9b899db68f24d63bc0d6f"}, - {file = "onnxruntime-1.14.0-cp37-cp37m-macosx_10_15_x86_64.whl", hash = "sha256:6c5df10a5863b106f5e2fda3eec851e867c753f5e32ea6de94cd9a565d644c4c"}, - {file = "onnxruntime-1.14.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d4aae776dfcddef0a3faeb235bc15ecbede0881579abae761c9e063249d70cb"}, - {file = "onnxruntime-1.14.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:37e4f6f676ab8a6d22370f02fc34f3b7e232bcb37dc319ffc5b5822162ad8852"}, - {file = "onnxruntime-1.14.0-cp37-cp37m-manylinux_2_27_aarch64.whl", hash = "sha256:0f77c0e5a9e48afc4e7447cbf38ca51587099bf11bba3a07d6818557ea8f6ef3"}, - {file = "onnxruntime-1.14.0-cp37-cp37m-manylinux_2_27_x86_64.whl", hash = "sha256:d87dd7532d2a927e7ec040e0f6869530cb5cdb5b27ee94559d97e6a7d6f178f1"}, - {file = "onnxruntime-1.14.0-cp37-cp37m-win32.whl", hash = "sha256:5d400dd1f4c8649b45d2734bcb8481a8b791d157ab7c584adf51132133401507"}, - {file = "onnxruntime-1.14.0-cp37-cp37m-win_amd64.whl", hash = "sha256:7b3beac39282bda3617faef8d631fb891f3e1017370a66ee1d7a37a6b9b594d1"}, - {file = "onnxruntime-1.14.0-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:cd5852f4d222a981b6c9da6d26f8dffff584353bb40ac702e1e6ab7320a0a16d"}, - {file = "onnxruntime-1.14.0-cp38-cp38-macosx_11_0_x86_64.whl", hash = "sha256:2b9fc633391787d7286d947a32e28e7388a528cff038af0e44ba8e5c05426726"}, - {file = "onnxruntime-1.14.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ade64278c4d843d18e0f1a101f1a198e7861e04a82d37b229b975e64d6df976d"}, - {file = "onnxruntime-1.14.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f060f48c1ae469f0ef13daee911b1fc991ab4e212102a4aa6c2523c1dc48a28"}, - {file = "onnxruntime-1.14.0-cp38-cp38-manylinux_2_27_aarch64.whl", hash = "sha256:abc7bdd52189c938b318ab353f09d241e2ecb717a0ac78ab7ee9c9fc6dac3496"}, - {file = "onnxruntime-1.14.0-cp38-cp38-manylinux_2_27_x86_64.whl", hash = "sha256:c0cc4b00b4bd7c3b2cb797ab0a786cd95307e762b4621209ffce1b65ded944ec"}, - {file = "onnxruntime-1.14.0-cp38-cp38-win32.whl", hash = "sha256:6f0e447205a18edd9a523de4e52def7e8b6c258e3b6a802b806653d74a1d816e"}, - {file = "onnxruntime-1.14.0-cp38-cp38-win_amd64.whl", hash = "sha256:775e39720684bed634dbfe6de7a6aab91b0c88c11f653ba3aa63f87712b90b72"}, - {file = "onnxruntime-1.14.0-cp39-cp39-macosx_11_0_x86_64.whl", hash = "sha256:f7d6f6308cd2b0397ce13c1abc65fc60fada9f9bf5cf48d65147720020c98237"}, - {file = "onnxruntime-1.14.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9f71c7ad1bf52cadb4ce705dda954c8e6d024eaaad9fd187d55c0be080ddc85f"}, - {file = "onnxruntime-1.14.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8ff9d4b34255721c47aecbe734e40e442069f78a48e9b2a6bc521bba8c392755"}, - {file = "onnxruntime-1.14.0-cp39-cp39-manylinux_2_27_aarch64.whl", hash = "sha256:0e62bfe6bb7c3a73eca17f42e49a8dc4ffd0890f93a3b5a9058e3b955620ae4f"}, - {file = "onnxruntime-1.14.0-cp39-cp39-manylinux_2_27_x86_64.whl", hash = "sha256:65d2de7edbaacf09733d9564e901d5bb848f28a2a6e1bcb6acd1e3d4f634eeec"}, - {file = "onnxruntime-1.14.0-cp39-cp39-win32.whl", hash = "sha256:62cc72f9cf8f8dad84d976c73da3faf912859db829e41596a42782e7ad97f0f4"}, - {file = "onnxruntime-1.14.0-cp39-cp39-win_amd64.whl", hash = "sha256:c12c2814c9a3f886939113508805a53c0a2c8661a2834b31b5c23a7a327500bf"}, + {file = "onnxruntime-1.14.1-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:193ef1ac512e530c6e6e259c26e67212e2cd3f2bfaad6ff935ed3f4281053056"}, + {file = "onnxruntime-1.14.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d2853bbb36cb272d99f6c225e5040eb0ddb37a667fce20d186ecdf0a6fac8af8"}, + {file = "onnxruntime-1.14.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8e1b173365c6894616b8207e23cbb891da9638c5373668d6653e4081ef5f04d0"}, + {file = "onnxruntime-1.14.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:24bf0401c5f92be7230ac660ff07ba06f7c175e99e225d5d48ff09062a3b76e9"}, + {file = "onnxruntime-1.14.1-cp310-cp310-manylinux_2_27_aarch64.whl", hash = "sha256:0a2d09260bbdbe1df678e0a237a5f7b1a44fd11a2f52688d8b6a53a9d03a26db"}, + {file = "onnxruntime-1.14.1-cp310-cp310-manylinux_2_27_x86_64.whl", hash = "sha256:d99d35b9d5c3f46cad1673a39cc753fb57d60784369b59e6f8cd3dfb77df1885"}, + {file = "onnxruntime-1.14.1-cp310-cp310-win32.whl", hash = "sha256:f400356df1b27d9adc5513319e8a89753e48ef0d6c5084caf5db8e132f46e7e8"}, + {file = "onnxruntime-1.14.1-cp310-cp310-win_amd64.whl", hash = "sha256:96a4059dbab162fe5cdb6750f8c70b2106ef2de5d49a7f72085171937d0e36d3"}, + {file = "onnxruntime-1.14.1-cp37-cp37m-macosx_10_15_x86_64.whl", hash = "sha256:fa23df6a349218636290f9fe56d7baaceb1a50cf92255234d495198b47d92327"}, + {file = "onnxruntime-1.14.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bc70e44d9e123d126648da24ffb39e56464272a1660a3eb91f4f5b74263be3ba"}, + {file = "onnxruntime-1.14.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:deff8138045a3affb6be064b598e3ec69a88e4d445359c50464ee5379b8eaf19"}, + {file = "onnxruntime-1.14.1-cp37-cp37m-manylinux_2_27_aarch64.whl", hash = "sha256:7c02acdc1107cbf698dcbf6dadc6f5b6aa179e7fa9a026251e99cf8613bd3129"}, + {file = "onnxruntime-1.14.1-cp37-cp37m-manylinux_2_27_x86_64.whl", hash = "sha256:6efa3b2f4b1eaa6c714c07861993bfd9bb33bd73cdbcaf5b4aadcf1ec13fcaf7"}, + {file = "onnxruntime-1.14.1-cp37-cp37m-win32.whl", hash = "sha256:72fc0acc82c54bf03eba065ad9025baa438c00c54a2ee0beb8ae4b6085cd3a0d"}, + {file = "onnxruntime-1.14.1-cp37-cp37m-win_amd64.whl", hash = "sha256:4d6f08ea40d63ccf90f203f4a2a498f4e590737dcaf16867075cc8e0a86c5554"}, + {file = "onnxruntime-1.14.1-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:c2d9e8f1bc6037f14d8aaa480492792c262fc914936153e40b06b3667bb25549"}, + {file = "onnxruntime-1.14.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:e7424d3befdd95b537c90787bbfaa053b2bb19eb60135abb898cb0e099d7d7ad"}, + {file = "onnxruntime-1.14.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9066d275e6e41d0597e234d2d88c074d4325e650c74a9527a52cadbcf42a0fe2"}, + {file = "onnxruntime-1.14.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8224d3c1f2cd0b899cea7b5a39f28b971debe0da30fcbc61382801d97d6f5740"}, + {file = "onnxruntime-1.14.1-cp38-cp38-manylinux_2_27_aarch64.whl", hash = "sha256:f4ac52ff4ac793683ebd1fbd1ee24197e3b4ca825ee68ff739296a820867debe"}, + {file = "onnxruntime-1.14.1-cp38-cp38-manylinux_2_27_x86_64.whl", hash = "sha256:b1dd8cdd3be36c32ddd8f5763841ed571c3e81da59439a622947bd97efee6e77"}, + {file = "onnxruntime-1.14.1-cp38-cp38-win32.whl", hash = "sha256:95d0f0cd95360c07f1c3ba20962b9bb813627df4bfc1b4b274e1d40044df5ad1"}, + {file = "onnxruntime-1.14.1-cp38-cp38-win_amd64.whl", hash = "sha256:de40a558e00fc00f92e298d5be99eb8075dba51368dabcb259670a00f4670e56"}, + {file = "onnxruntime-1.14.1-cp39-cp39-macosx_10_15_x86_64.whl", hash = "sha256:c65b587a42a89fceceaad367bd69d071ee5c9c7010b76e2adac5e9efd9356fb5"}, + {file = "onnxruntime-1.14.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:6e47ef6a2c6e6dd6ff48bc13f2331d124dff00e1d76627624bb3268c8058f19c"}, + {file = "onnxruntime-1.14.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0afd0f671d068dd99b9d071d88e93a9a57a5ed59af440c0f4d65319ee791603f"}, + {file = "onnxruntime-1.14.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fc65e9061349cdf98ce16b37722b557109f16076632fbfed9a3151895cfd3bb7"}, + {file = "onnxruntime-1.14.1-cp39-cp39-manylinux_2_27_aarch64.whl", hash = "sha256:2ff17c71187391a71e6ccc78ca89aed83bcaed1c085c95267ab1a70897868bdd"}, + {file = "onnxruntime-1.14.1-cp39-cp39-manylinux_2_27_x86_64.whl", hash = "sha256:9b795189916942ce848192200dde5b1f32799ee6c84fc600969a44d88e8a5404"}, + {file = "onnxruntime-1.14.1-cp39-cp39-win32.whl", hash = "sha256:17ca3100112af045118750d24643a01ed4e6d86071a8efaef75cc1d434ea64aa"}, + {file = "onnxruntime-1.14.1-cp39-cp39-win_amd64.whl", hash = "sha256:b5e8c489329ba0fa0639dfd7ec02d6b07cece1bab52ef83884b537247efbda74"}, ] [package.dependencies] @@ -2797,25 +2797,26 @@ dev = ["pytest"] [[package]] name = "opencv-python" -version = "4.7.0.68" +version = "4.7.0.72" description = "Wrapper package for OpenCV python bindings." category = "main" optional = false python-versions = ">=3.6" files = [ - {file = "opencv-python-4.7.0.68.tar.gz", hash = "sha256:9829e6efedde1d1b8419c5bd4d62d289ecbf44ae35b843c6da9e3cbcba1a9a8a"}, - {file = "opencv_python-4.7.0.68-cp37-abi3-macosx_10_13_x86_64.whl", hash = "sha256:abc6adfa8694f71a4caffa922b279bd9d96954a37eee40b147f613c64310b411"}, - {file = "opencv_python-4.7.0.68-cp37-abi3-macosx_11_0_arm64.whl", hash = "sha256:86f4b60b9536948f16d2170ba3a9b22d3955a957dc61a9bc56e53692c6db2c7e"}, - {file = "opencv_python-4.7.0.68-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6d1c993811f92ddd7919314ada7b9be1f23db1c73f1384915c834dee8549c0b9"}, - {file = "opencv_python-4.7.0.68-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a00e12546e5578f6bb7ed408c37fcfea533d74e9691cfaf40926f6b43295577"}, - {file = "opencv_python-4.7.0.68-cp37-abi3-win32.whl", hash = "sha256:e770e9f653a0e5e72b973adb8213fae2df4642730ba1faf31e73a54287a4d5d4"}, - {file = "opencv_python-4.7.0.68-cp37-abi3-win_amd64.whl", hash = "sha256:7a08f9d1f9dd52de63a7bb448ab7d6d4a1a85b767c2358501d968d1e4d95098d"}, + {file = "opencv-python-4.7.0.72.tar.gz", hash = "sha256:3424794a711f33284581f3c1e4b071cfc827d02b99d6fd9a35391f517c453306"}, + {file = "opencv_python-4.7.0.72-cp37-abi3-macosx_10_16_x86_64.whl", hash = "sha256:d4f8880440c433a0025d78804dda6901d1e8e541a561dda66892d90290aef881"}, + {file = "opencv_python-4.7.0.72-cp37-abi3-macosx_11_0_arm64.whl", hash = "sha256:7a297e7651e22eb17c265ddbbc80e2ba2a8ff4f4a1696a67c45e5f5798245842"}, + {file = "opencv_python-4.7.0.72-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cd08343654c6b88c5a8c25bf425f8025aed2e3189b4d7306b5861d32affaf737"}, + {file = "opencv_python-4.7.0.72-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ebfc0a3a2f57716e709028b992e4de7fd8752105d7a768531c4f434043c6f9ff"}, + {file = "opencv_python-4.7.0.72-cp37-abi3-win32.whl", hash = "sha256:eda115797b114fc16ca6f182b91c5d984f0015c19bec3145e55d33d708e9bae1"}, + {file = "opencv_python-4.7.0.72-cp37-abi3-win_amd64.whl", hash = "sha256:812af57553ec1c6709060c63f6b7e9ad07ddc0f592f3ccc6d00c71e0fe0e6376"}, ] [package.dependencies] numpy = [ {version = ">=1.21.2", markers = "python_version >= \"3.10\""}, {version = ">=1.21.4", markers = "python_version >= \"3.10\" and platform_system == \"Darwin\""}, + {version = ">=1.22.0", markers = "python_version >= \"3.11\""}, {version = ">=1.19.3", markers = "python_version >= \"3.6\" and platform_system == \"Linux\" and platform_machine == \"aarch64\" or python_version >= \"3.9\""}, {version = ">=1.17.0", markers = "python_version >= \"3.7\""}, {version = ">=1.17.3", markers = "python_version >= \"3.8\""}, @@ -2847,56 +2848,56 @@ tests = ["asteroid-filterbanks (>=0.3.2)", "musdb (>=0.4.0)", "museval (>=0.4.0) [[package]] name = "orjson" -version = "3.8.6" +version = "3.8.7" description = "Fast, correct Python JSON library supporting dataclasses, datetimes, and numpy" category = "main" optional = false python-versions = ">=3.7" files = [ - {file = "orjson-3.8.6-cp310-cp310-macosx_10_7_x86_64.whl", hash = "sha256:062a9a74c10c439acc35cf67f31ac88d9464a11025700bab421e6cdf54a54a35"}, - {file = "orjson-3.8.6-cp310-cp310-macosx_10_9_x86_64.macosx_11_0_arm64.macosx_10_9_universal2.whl", hash = "sha256:692c255109867cc8211267f4416d2915845273bf4f403bbca5419f5b15ac9175"}, - {file = "orjson-3.8.6-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4a20905c7a5ebc280343704c4dd19343ef966c9dea5a38ade6e0461a6deb8eda"}, - {file = "orjson-3.8.6-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:34ce4a8b8f0fea483bce6985c015953f475540b7d756efd48a571b1803c318ee"}, - {file = "orjson-3.8.6-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e57ecad7616ec842d8c382ed42a778cdcdadc67cfb46b804b43079f937b63b31"}, - {file = "orjson-3.8.6-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:323065cf14fdd4096dbf93ea1634e7e030044af8c1000803bcdc132fbfd395f5"}, - {file = "orjson-3.8.6-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:4cb4f37fca8cf8309de421634447437f229bc03b240cec8ad4ac241fd4b1bcf4"}, - {file = "orjson-3.8.6-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:32353b14c5e0b55b6a8759e993482a2d8c44d492489840718b74658de67671e2"}, - {file = "orjson-3.8.6-cp310-none-win_amd64.whl", hash = "sha256:3e44f78db3a15902b5e8386119979691ed3dd61d1ded10bad2c7106fd50641ef"}, - {file = "orjson-3.8.6-cp311-cp311-macosx_10_7_x86_64.whl", hash = "sha256:c59ec129d523abd4f2d65c0733d0e57af7dd09c69142f1aa564b04358f04ace3"}, - {file = "orjson-3.8.6-cp311-cp311-macosx_10_9_x86_64.macosx_11_0_arm64.macosx_10_9_universal2.whl", hash = "sha256:d44d89314a66e98e690ce64c8771d963eb64ae6cea662d0a1d077ed024627228"}, - {file = "orjson-3.8.6-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:865ef341c4d310ac2689bf811dbc0930b2f13272f8eade1511dc40b186f6d562"}, - {file = "orjson-3.8.6-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:52809a37a0daa6992835ee0625aca22b4c0693dba3cb465948e6c9796de927b0"}, - {file = "orjson-3.8.6-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1d7402121d06d11fafcaed7d06f9d68b11bbe39868e0e1bc19239ee5b6b98b2b"}, - {file = "orjson-3.8.6-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:583338b7dabb509ca4c3b4f160f58a5228bf6c6e0f8a2981663f683791f39d45"}, - {file = "orjson-3.8.6-cp311-none-win_amd64.whl", hash = "sha256:4a6c0a0ef2f535ba7a5d01f014b53d05eeb372d43556edb25c75a4d52690a123"}, - {file = "orjson-3.8.6-cp37-cp37m-macosx_10_7_x86_64.whl", hash = "sha256:9d35573e7f5817a26d8ce1134c3463d31bc3b39aad3ad7ae06bb67d6078fa9c0"}, - {file = "orjson-3.8.6-cp37-cp37m-macosx_10_9_x86_64.macosx_11_0_arm64.macosx_10_9_universal2.whl", hash = "sha256:94d8fdc12adc0450994931d722cb38be5e4caa273219881abb96c15a9e9f151f"}, - {file = "orjson-3.8.6-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e8fc43bfb73d394b9bf12062cd6dab72abf728ac7869f972e4bb7327fd3330b8"}, - {file = "orjson-3.8.6-cp37-cp37m-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a38387387139695a7e52b9f568e39c1632b22eb34939afc5efed265fa8277b84"}, - {file = "orjson-3.8.6-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8e048c6df7453c3da4de10fa5c44f6c655b157b712628888ce880cd5bbf30013"}, - {file = "orjson-3.8.6-cp37-cp37m-manylinux_2_28_x86_64.whl", hash = "sha256:d3b0950d792b25c0aa52505faf09237fd98136d09616a0837f7cdb0fde9e2730"}, - {file = "orjson-3.8.6-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:38bc8a388080d8fd297388bcff4939e350ffafe4a006567e0dd81cdb8c7b86fa"}, - {file = "orjson-3.8.6-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:5b3251ab7113f2400d76f2b4a2d6592e7d5a5cf45fa948c894340553671ef8f1"}, - {file = "orjson-3.8.6-cp37-none-win_amd64.whl", hash = "sha256:2c83a33cf389fd286bd9ef0befc406307444b9553d2e9ba14b90b9332524cfa6"}, - {file = "orjson-3.8.6-cp38-cp38-macosx_10_7_x86_64.whl", hash = "sha256:53f51c23398cfe818d9bb09079d31a60c6cd77e7eee1d555cfcc735460db4190"}, - {file = "orjson-3.8.6-cp38-cp38-macosx_10_9_x86_64.macosx_11_0_arm64.macosx_10_9_universal2.whl", hash = "sha256:6190e23a2fb9fc78228b289b3ec295094671ca0299319c8c72727aa9e7dbe06f"}, - {file = "orjson-3.8.6-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:61fff8a8b4cd4e489b291fe5105b6138b1831490f1a0dc726d5e17ebe811d595"}, - {file = "orjson-3.8.6-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c192813f527f886bd85abc5a9e8d9dde16ffa06d7305de526a7c4657730dbf4e"}, - {file = "orjson-3.8.6-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aae1487fba9d955b2679f0a697665ed8fc32563b3252acc240e097184c184e29"}, - {file = "orjson-3.8.6-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:cd2bd48e9a14f2130790a3c2dcb897bd93c2e5c244919799430a6d9b8212cb50"}, - {file = "orjson-3.8.6-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:006178fd654a0a4f14f5912b8320ba9a26ab9c0ae7ce1c7eeb4b5249d6cada29"}, - {file = "orjson-3.8.6-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:9d5ad2fddccc89ab64b6333823b250ce8430fc51f014954e5a2d4c933f5deb9f"}, - {file = "orjson-3.8.6-cp38-none-win_amd64.whl", hash = "sha256:aef3d558f5bd809733ebf2cbce7e1338ce62812db317478427236b97036aba0f"}, - {file = "orjson-3.8.6-cp39-cp39-macosx_10_7_x86_64.whl", hash = "sha256:7d216a5f3d23eac2c7c654e7bd30280c27cdf5edc32325e6ad8e880d36c265b7"}, - {file = "orjson-3.8.6-cp39-cp39-macosx_10_9_x86_64.macosx_11_0_arm64.macosx_10_9_universal2.whl", hash = "sha256:004122c95e08db7201b80224de3a8f2ad79b9717040e6884c6015f27b010127d"}, - {file = "orjson-3.8.6-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:006c492577ad046cb7e50237a8d8935131a35f7e7f8320fbc3514da6fbc0b436"}, - {file = "orjson-3.8.6-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:67554103b415349b6ee2db82d2422da1c8f4c2d280d20772217f6d1d227410b6"}, - {file = "orjson-3.8.6-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:caa5053f19584816f063c887d94385db481fc01d995d6a717ce4fbb929653ec2"}, - {file = "orjson-3.8.6-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:2bdd64566870a8a0bdcf8c7df2f4452391dd55070f5cd98cc581914e8c263d85"}, - {file = "orjson-3.8.6-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:550a4dec128d1adfd0262ef9ad7878d62d1cc0bddaaa05e41d8ca28414dc86bc"}, - {file = "orjson-3.8.6-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:d3f5ad9442e8a99fb436279a8614a00aca272ea8dabb692cadee70a4874d6e03"}, - {file = "orjson-3.8.6-cp39-none-win_amd64.whl", hash = "sha256:aa7b112e3273d1744f7bc983ffd3dd0d004062c69dfa68e119515a7e115c46c8"}, - {file = "orjson-3.8.6.tar.gz", hash = "sha256:91ef8a554d33fbc5bb61c3972f3e8baa994f72c4967671e64e7dac1cc06f50e1"}, + {file = "orjson-3.8.7-cp310-cp310-macosx_10_7_x86_64.whl", hash = "sha256:f98c82850b7b4b7e27785ca43706fa86c893cdb88d54576bbb9b0d9c1070e421"}, + {file = "orjson-3.8.7-cp310-cp310-macosx_10_9_x86_64.macosx_11_0_arm64.macosx_10_9_universal2.whl", hash = "sha256:1dee503c6c1a0659c5b46f5f39d9ca9d3657b11ca8bb4af8506086df416887d9"}, + {file = "orjson-3.8.7-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cc4fa83831f42ce5c938f8cefc2e175fa1df6f661fdeaba3badf26d2b8cfcf73"}, + {file = "orjson-3.8.7-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9e432c6c9c8b97ad825276d5795286f7cc9689f377a97e3b7ecf14918413303f"}, + {file = "orjson-3.8.7-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ee519964a5a0efb9633f38b1129fd242807c5c57162844efeeaab1c8de080051"}, + {file = "orjson-3.8.7-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:109b539ce5bf60a121454d008fa67c3b67e5a3249e47d277012645922cf74bd0"}, + {file = "orjson-3.8.7-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:ad4d441fbde4133af6fee37f67dbf23181b9c537ecc317346ec8c3b4c8ec7705"}, + {file = "orjson-3.8.7-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:89dc786419e1ce2588345f58dd6a434e6728bce66b94989644234bcdbe39b603"}, + {file = "orjson-3.8.7-cp310-none-win_amd64.whl", hash = "sha256:697abde7350fb8076d44bcb6b4ab3ce415ae2b5a9bb91efc460e5ab0d96bb5d3"}, + {file = "orjson-3.8.7-cp311-cp311-macosx_10_7_x86_64.whl", hash = "sha256:1c19f47b35b9966a3abadf341b18ee4a860431bf2b00fd8d58906d51cf78aa70"}, + {file = "orjson-3.8.7-cp311-cp311-macosx_10_9_x86_64.macosx_11_0_arm64.macosx_10_9_universal2.whl", hash = "sha256:3ffaabb380cd0ee187b4fc362516df6bf739808130b1339445c7d8878fca36e7"}, + {file = "orjson-3.8.7-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d88837002c5a8af970745b8e0ca1b0fdb06aafbe7f1279e110d338ea19f3d23"}, + {file = "orjson-3.8.7-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ff60187d1b7e0bfab376b6002b08c560b7de06c87cf3a8ac639ecf58f84c5f3b"}, + {file = "orjson-3.8.7-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0110970aed35dec293f30ed1e09f8604afd5d15c5ef83de7f6c427619b3ba47b"}, + {file = "orjson-3.8.7-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:51b275475d4e36118b65ad56f9764056a09d985c5d72e64579bf8816f1356a5e"}, + {file = "orjson-3.8.7-cp311-none-win_amd64.whl", hash = "sha256:63144d27735f3b60f079f247ac9a289d80dfe49a7f03880dfa0c0ba64d6491d5"}, + {file = "orjson-3.8.7-cp37-cp37m-macosx_10_7_x86_64.whl", hash = "sha256:a16273d77db746bb1789a2bbfded81148a60743fd6f9d5185e02d92e3732fa18"}, + {file = "orjson-3.8.7-cp37-cp37m-macosx_10_9_x86_64.macosx_11_0_arm64.macosx_10_9_universal2.whl", hash = "sha256:5bb32259ea22cc9dd47a6fdc4b8f9f1e2f798fcf56c7c1122a7df0f4c5d33bf3"}, + {file = "orjson-3.8.7-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ad02e9102d4ba67db30a136e631e32aeebd1dce26c9f5942a457b02df131c5d0"}, + {file = "orjson-3.8.7-cp37-cp37m-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:dbcfcec2b7ac52deb7be3685b551addc28ee8fa454ef41f8b714df6ba0e32a27"}, + {file = "orjson-3.8.7-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e1a0e5504a5fc86083cc210c6946e8d61e13fe9f1d7a7bf81b42f7050a49d4fb"}, + {file = "orjson-3.8.7-cp37-cp37m-manylinux_2_28_x86_64.whl", hash = "sha256:7bd4fd37adb03b1f2a1012d43c9f95973a02164e131dfe3ff804d7e180af5653"}, + {file = "orjson-3.8.7-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:188ed9f9a781333ad802af54c55d5a48991e292239aef41bd663b6e314377eb8"}, + {file = "orjson-3.8.7-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:cc52f58c688cb10afd810280e450f56fbcb27f52c053463e625c8335c95db0dc"}, + {file = "orjson-3.8.7-cp37-none-win_amd64.whl", hash = "sha256:403c8c84ac8a02c40613b0493b74d5256379e65196d39399edbf2ed3169cbeb5"}, + {file = "orjson-3.8.7-cp38-cp38-macosx_10_7_x86_64.whl", hash = "sha256:7d6ac5f8a2a17095cd927c4d52abbb38af45918e0d3abd60fb50cfd49d71ae24"}, + {file = "orjson-3.8.7-cp38-cp38-macosx_10_9_x86_64.macosx_11_0_arm64.macosx_10_9_universal2.whl", hash = "sha256:0295a7bfd713fa89231fd0822c995c31fc2343c59a1d13aa1b8b6651335654f5"}, + {file = "orjson-3.8.7-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:feb32aaaa34cf2f891eb793ad320d4bb6731328496ae59b6c9eb1b620c42b529"}, + {file = "orjson-3.8.7-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:7a3ab1a473894e609b6f1d763838c6689ba2b97620c256a32c4d9f10595ac179"}, + {file = "orjson-3.8.7-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2e8c430d82b532c5ab95634e034bbf6ca7432ffe175a3e63eadd493e00b3a555"}, + {file = "orjson-3.8.7-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:366cc75f7e09106f9dac95a675aef413367b284f25507d21e55bd7f45f445e80"}, + {file = "orjson-3.8.7-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:84d154d07e8b17d97e990d5d710b719a031738eb1687d8a05b9089f0564ff3e0"}, + {file = "orjson-3.8.7-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:06180014afcfdc167ca984b312218aa62ce20093965c437c5f9166764cb65ef7"}, + {file = "orjson-3.8.7-cp38-none-win_amd64.whl", hash = "sha256:41244431ba13f2e6ef22b52c5cf0202d17954489f4a3c0505bd28d0e805c3546"}, + {file = "orjson-3.8.7-cp39-cp39-macosx_10_7_x86_64.whl", hash = "sha256:b20f29fa8371b8023f1791df035a2c3ccbd98baa429ac3114fc104768f7db6f8"}, + {file = "orjson-3.8.7-cp39-cp39-macosx_10_9_x86_64.macosx_11_0_arm64.macosx_10_9_universal2.whl", hash = "sha256:226bfc1da2f21ee74918cee2873ea9a0fec1a8830e533cb287d192d593e99d02"}, + {file = "orjson-3.8.7-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e75c11023ac29e29fd3e75038d0e8dd93f9ea24d7b9a5e871967a8921a88df24"}, + {file = "orjson-3.8.7-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:78604d3acfd7cd502f6381eea0c42281fe2b74755b334074ab3ebc0224100be1"}, + {file = "orjson-3.8.7-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e7129a6847f0494aa1427167486ef6aea2e835ba05f6c627df522692ee228f65"}, + {file = "orjson-3.8.7-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:1a1a8f4980059f48483782c608145b0f74538c266e01c183d9bcd9f8b71dbada"}, + {file = "orjson-3.8.7-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:d60304172a33705ce4bd25a6261ab84bed2dab0b3d3b79672ea16c7648af4832"}, + {file = "orjson-3.8.7-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:4f733062d84389c32c0492e5a4929056fac217034a94523debe0430bcc602cda"}, + {file = "orjson-3.8.7-cp39-none-win_amd64.whl", hash = "sha256:010e2970ec9e826c332819e0da4b14b29b19641da0f1a6af4cec91629ef9b988"}, + {file = "orjson-3.8.7.tar.gz", hash = "sha256:8460c8810652dba59c38c80d27c325b5092d189308d8d4f3e688dbd8d4f3b2dc"}, ] [[package]] @@ -3075,14 +3076,14 @@ tests = ["check-manifest", "coverage", "defusedxml", "markdown2", "olefile", "pa [[package]] name = "platformdirs" -version = "3.0.0" +version = "3.1.0" description = "A small Python package for determining appropriate platform-specific dirs, e.g. a \"user data dir\"." category = "main" optional = false python-versions = ">=3.7" files = [ - {file = "platformdirs-3.0.0-py3-none-any.whl", hash = "sha256:b1d5eb14f221506f50d6604a561f4c5786d9e80355219694a1b244bcd96f4567"}, - {file = "platformdirs-3.0.0.tar.gz", hash = "sha256:8a1228abb1ef82d788f74139988b137e78692984ec7b08eaa6c65f1723af28f9"}, + {file = "platformdirs-3.1.0-py3-none-any.whl", hash = "sha256:13b08a53ed71021350c9e300d4ea8668438fb0046ab3937ac9a29913a1a1350a"}, + {file = "platformdirs-3.1.0.tar.gz", hash = "sha256:accc3665857288317f32c7bebb5a8e482ba717b474f3fc1d18ca7f9214be0cef"}, ] [package.extras] @@ -3091,19 +3092,19 @@ test = ["appdirs (==1.4.4)", "covdefaults (>=2.2.2)", "pytest (>=7.2.1)", "pytes [[package]] name = "pooch" -version = "1.6.0" +version = "1.7.0" description = "\"Pooch manages your Python library's sample data files: it automatically downloads and stores them in a local directory, with support for versioning and corruption checks.\"" category = "main" optional = false -python-versions = ">=3.6" +python-versions = ">=3.7" files = [ - {file = "pooch-1.6.0-py3-none-any.whl", hash = "sha256:3bf0e20027096836b8dbce0152dbb785a269abeb621618eb4bdd275ff1e23c9c"}, - {file = "pooch-1.6.0.tar.gz", hash = "sha256:57d20ec4b10dd694d2b05bb64bc6b109c6e85a6c1405794ce87ed8b341ab3f44"}, + {file = "pooch-1.7.0-py3-none-any.whl", hash = "sha256:74258224fc33d58f53113cf955e8d51bf01386b91492927d0d1b6b341a765ad7"}, + {file = "pooch-1.7.0.tar.gz", hash = "sha256:f174a1041b6447f0eef8860f76d17f60ed2f857dc0efa387a7f08228af05d998"}, ] [package.dependencies] -appdirs = ">=1.3.0" packaging = ">=20.0" +platformdirs = ">=2.5.0" requests = ">=2.19.0" [package.extras] @@ -3216,26 +3217,25 @@ numpy = ">=1.7.0" [[package]] name = "protobuf" -version = "4.21.12" +version = "4.22.0" description = "" category = "main" optional = false python-versions = ">=3.7" files = [ - {file = "protobuf-4.21.12-cp310-abi3-win32.whl", hash = "sha256:b135410244ebe777db80298297a97fbb4c862c881b4403b71bac9d4107d61fd1"}, - {file = "protobuf-4.21.12-cp310-abi3-win_amd64.whl", hash = "sha256:89f9149e4a0169cddfc44c74f230d7743002e3aa0b9472d8c28f0388102fc4c2"}, - {file = "protobuf-4.21.12-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:299ea899484ee6f44604deb71f424234f654606b983cb496ea2a53e3c63ab791"}, - {file = "protobuf-4.21.12-cp37-abi3-manylinux2014_aarch64.whl", hash = "sha256:d1736130bce8cf131ac7957fa26880ca19227d4ad68b4888b3be0dea1f95df97"}, - {file = "protobuf-4.21.12-cp37-abi3-manylinux2014_x86_64.whl", hash = "sha256:78a28c9fa223998472886c77042e9b9afb6fe4242bd2a2a5aced88e3f4422aa7"}, - {file = "protobuf-4.21.12-cp37-cp37m-win32.whl", hash = "sha256:3d164928ff0727d97022957c2b849250ca0e64777ee31efd7d6de2e07c494717"}, - {file = "protobuf-4.21.12-cp37-cp37m-win_amd64.whl", hash = "sha256:f45460f9ee70a0ec1b6694c6e4e348ad2019275680bd68a1d9314b8c7e01e574"}, - {file = "protobuf-4.21.12-cp38-cp38-win32.whl", hash = "sha256:6ab80df09e3208f742c98443b6166bcb70d65f52cfeb67357d52032ea1ae9bec"}, - {file = "protobuf-4.21.12-cp38-cp38-win_amd64.whl", hash = "sha256:1f22ac0ca65bb70a876060d96d914dae09ac98d114294f77584b0d2644fa9c30"}, - {file = "protobuf-4.21.12-cp39-cp39-win32.whl", hash = "sha256:27f4d15021da6d2b706ddc3860fac0a5ddaba34ab679dc182b60a8bb4e1121cc"}, - {file = "protobuf-4.21.12-cp39-cp39-win_amd64.whl", hash = "sha256:237216c3326d46808a9f7c26fd1bd4b20015fb6867dc5d263a493ef9a539293b"}, - {file = "protobuf-4.21.12-py2.py3-none-any.whl", hash = "sha256:a53fd3f03e578553623272dc46ac2f189de23862e68565e83dde203d41b76fc5"}, - {file = "protobuf-4.21.12-py3-none-any.whl", hash = "sha256:b98d0148f84e3a3c569e19f52103ca1feacdac0d2df8d6533cf983d1fda28462"}, - {file = "protobuf-4.21.12.tar.gz", hash = "sha256:7cd532c4566d0e6feafecc1059d04c7915aec8e182d1cf7adee8b24ef1e2e6ab"}, + {file = "protobuf-4.22.0-cp310-abi3-win32.whl", hash = "sha256:b2fea9dc8e3c0f32c38124790ef16cba2ee0628fe2022a52e435e1117bfef9b1"}, + {file = "protobuf-4.22.0-cp310-abi3-win_amd64.whl", hash = "sha256:a33a273d21852f911b8bda47f39f4383fe7c061eb1814db2c76c9875c89c2491"}, + {file = "protobuf-4.22.0-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:e894e9ae603e963f0842498c4cd5d39c6a60f0d7e4c103df50ee939564298658"}, + {file = "protobuf-4.22.0-cp37-abi3-manylinux2014_aarch64.whl", hash = "sha256:7c535d126e7dcc714105ab20b418c4fedbd28f8b8afc42b7350b1e317bbbcc71"}, + {file = "protobuf-4.22.0-cp37-abi3-manylinux2014_x86_64.whl", hash = "sha256:86c3d20428b007537ba6792b475c0853bba7f66b1f60e610d913b77d94b486e4"}, + {file = "protobuf-4.22.0-cp37-cp37m-win32.whl", hash = "sha256:1669cb7524221a8e2d9008d0842453dbefdd0fcdd64d67672f657244867635fb"}, + {file = "protobuf-4.22.0-cp37-cp37m-win_amd64.whl", hash = "sha256:ab4d043865dd04e6b09386981fe8f80b39a1e46139fb4a3c206229d6b9f36ff6"}, + {file = "protobuf-4.22.0-cp38-cp38-win32.whl", hash = "sha256:29288813aacaa302afa2381db1d6e0482165737b0afdf2811df5fa99185c457b"}, + {file = "protobuf-4.22.0-cp38-cp38-win_amd64.whl", hash = "sha256:e474b63bab0a2ea32a7b26a4d8eec59e33e709321e5e16fb66e766b61b82a95e"}, + {file = "protobuf-4.22.0-cp39-cp39-win32.whl", hash = "sha256:47d31bdf58222dd296976aa1646c68c6ee80b96d22e0a3c336c9174e253fd35e"}, + {file = "protobuf-4.22.0-cp39-cp39-win_amd64.whl", hash = "sha256:c27f371f0159feb70e6ea52ed7e768b3f3a4c5676c1900a7e51a24740381650e"}, + {file = "protobuf-4.22.0-py3-none-any.whl", hash = "sha256:c3325803095fb4c2a48649c321d2fbde59f8fbfcb9bfc7a86df27d112831c571"}, + {file = "protobuf-4.22.0.tar.gz", hash = "sha256:652d8dfece122a24d98eebfef30e31e455d300efa41999d1182e015984ac5930"}, ] [[package]] @@ -3349,48 +3349,48 @@ files = [ [[package]] name = "pydantic" -version = "1.10.4" +version = "1.10.5" description = "Data validation and settings management using python type hints" category = "main" optional = false python-versions = ">=3.7" files = [ - {file = "pydantic-1.10.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b5635de53e6686fe7a44b5cf25fcc419a0d5e5c1a1efe73d49d48fe7586db854"}, - {file = "pydantic-1.10.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6dc1cc241440ed7ca9ab59d9929075445da6b7c94ced281b3dd4cfe6c8cff817"}, - {file = "pydantic-1.10.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:51bdeb10d2db0f288e71d49c9cefa609bca271720ecd0c58009bd7504a0c464c"}, - {file = "pydantic-1.10.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:78cec42b95dbb500a1f7120bdf95c401f6abb616bbe8785ef09887306792e66e"}, - {file = "pydantic-1.10.4-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:8775d4ef5e7299a2f4699501077a0defdaac5b6c4321173bcb0f3c496fbadf85"}, - {file = "pydantic-1.10.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:572066051eeac73d23f95ba9a71349c42a3e05999d0ee1572b7860235b850cc6"}, - {file = "pydantic-1.10.4-cp310-cp310-win_amd64.whl", hash = "sha256:7feb6a2d401f4d6863050f58325b8d99c1e56f4512d98b11ac64ad1751dc647d"}, - {file = "pydantic-1.10.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:39f4a73e5342b25c2959529f07f026ef58147249f9b7431e1ba8414a36761f53"}, - {file = "pydantic-1.10.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:983e720704431a6573d626b00662eb78a07148c9115129f9b4351091ec95ecc3"}, - {file = "pydantic-1.10.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:75d52162fe6b2b55964fbb0af2ee58e99791a3138588c482572bb6087953113a"}, - {file = "pydantic-1.10.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fdf8d759ef326962b4678d89e275ffc55b7ce59d917d9f72233762061fd04a2d"}, - {file = "pydantic-1.10.4-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:05a81b006be15655b2a1bae5faa4280cf7c81d0e09fcb49b342ebf826abe5a72"}, - {file = "pydantic-1.10.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d88c4c0e5c5dfd05092a4b271282ef0588e5f4aaf345778056fc5259ba098857"}, - {file = "pydantic-1.10.4-cp311-cp311-win_amd64.whl", hash = "sha256:6a05a9db1ef5be0fe63e988f9617ca2551013f55000289c671f71ec16f4985e3"}, - {file = "pydantic-1.10.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:887ca463c3bc47103c123bc06919c86720e80e1214aab79e9b779cda0ff92a00"}, - {file = "pydantic-1.10.4-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fdf88ab63c3ee282c76d652fc86518aacb737ff35796023fae56a65ced1a5978"}, - {file = "pydantic-1.10.4-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a48f1953c4a1d9bd0b5167ac50da9a79f6072c63c4cef4cf2a3736994903583e"}, - {file = "pydantic-1.10.4-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:a9f2de23bec87ff306aef658384b02aa7c32389766af3c5dee9ce33e80222dfa"}, - {file = "pydantic-1.10.4-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:cd8702c5142afda03dc2b1ee6bc358b62b3735b2cce53fc77b31ca9f728e4bc8"}, - {file = "pydantic-1.10.4-cp37-cp37m-win_amd64.whl", hash = "sha256:6e7124d6855b2780611d9f5e1e145e86667eaa3bd9459192c8dc1a097f5e9903"}, - {file = "pydantic-1.10.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:0b53e1d41e97063d51a02821b80538053ee4608b9a181c1005441f1673c55423"}, - {file = "pydantic-1.10.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:55b1625899acd33229c4352ce0ae54038529b412bd51c4915349b49ca575258f"}, - {file = "pydantic-1.10.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:301d626a59edbe5dfb48fcae245896379a450d04baeed50ef40d8199f2733b06"}, - {file = "pydantic-1.10.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b6f9d649892a6f54a39ed56b8dfd5e08b5f3be5f893da430bed76975f3735d15"}, - {file = "pydantic-1.10.4-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:d7b5a3821225f5c43496c324b0d6875fde910a1c2933d726a743ce328fbb2a8c"}, - {file = "pydantic-1.10.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:f2f7eb6273dd12472d7f218e1fef6f7c7c2f00ac2e1ecde4db8824c457300416"}, - {file = "pydantic-1.10.4-cp38-cp38-win_amd64.whl", hash = "sha256:4b05697738e7d2040696b0a66d9f0a10bec0efa1883ca75ee9e55baf511909d6"}, - {file = "pydantic-1.10.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:a9a6747cac06c2beb466064dda999a13176b23535e4c496c9d48e6406f92d42d"}, - {file = "pydantic-1.10.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:eb992a1ef739cc7b543576337bebfc62c0e6567434e522e97291b251a41dad7f"}, - {file = "pydantic-1.10.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:990406d226dea0e8f25f643b370224771878142155b879784ce89f633541a024"}, - {file = "pydantic-1.10.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2e82a6d37a95e0b1b42b82ab340ada3963aea1317fd7f888bb6b9dfbf4fff57c"}, - {file = "pydantic-1.10.4-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:9193d4f4ee8feca58bc56c8306bcb820f5c7905fd919e0750acdeeeef0615b28"}, - {file = "pydantic-1.10.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:2b3ce5f16deb45c472dde1a0ee05619298c864a20cded09c4edd820e1454129f"}, - {file = "pydantic-1.10.4-cp39-cp39-win_amd64.whl", hash = "sha256:9cbdc268a62d9a98c56e2452d6c41c0263d64a2009aac69246486f01b4f594c4"}, - {file = "pydantic-1.10.4-py3-none-any.whl", hash = "sha256:4948f264678c703f3877d1c8877c4e3b2e12e549c57795107f08cf70c6ec7774"}, - {file = "pydantic-1.10.4.tar.gz", hash = "sha256:b9a3859f24eb4e097502a3be1fb4b2abb79b6103dd9e2e0edb70613a4459a648"}, + {file = "pydantic-1.10.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:5920824fe1e21cbb3e38cf0f3dd24857c8959801d1031ce1fac1d50857a03bfb"}, + {file = "pydantic-1.10.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:3bb99cf9655b377db1a9e47fa4479e3330ea96f4123c6c8200e482704bf1eda2"}, + {file = "pydantic-1.10.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2185a3b3d98ab4506a3f6707569802d2d92c3a7ba3a9a35683a7709ea6c2aaa2"}, + {file = "pydantic-1.10.5-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f582cac9d11c227c652d3ce8ee223d94eb06f4228b52a8adaafa9fa62e73d5c9"}, + {file = "pydantic-1.10.5-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:c9e5b778b6842f135902e2d82624008c6a79710207e28e86966cd136c621bfee"}, + {file = "pydantic-1.10.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:72ef3783be8cbdef6bca034606a5de3862be6b72415dc5cb1fb8ddbac110049a"}, + {file = "pydantic-1.10.5-cp310-cp310-win_amd64.whl", hash = "sha256:45edea10b75d3da43cfda12f3792833a3fa70b6eee4db1ed6aed528cef17c74e"}, + {file = "pydantic-1.10.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:63200cd8af1af2c07964546b7bc8f217e8bda9d0a2ef0ee0c797b36353914984"}, + {file = "pydantic-1.10.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:305d0376c516b0dfa1dbefeae8c21042b57b496892d721905a6ec6b79494a66d"}, + {file = "pydantic-1.10.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1fd326aff5d6c36f05735c7c9b3d5b0e933b4ca52ad0b6e4b38038d82703d35b"}, + {file = "pydantic-1.10.5-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6bb0452d7b8516178c969d305d9630a3c9b8cf16fcf4713261c9ebd465af0d73"}, + {file = "pydantic-1.10.5-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:9a9d9155e2a9f38b2eb9374c88f02fd4d6851ae17b65ee786a87d032f87008f8"}, + {file = "pydantic-1.10.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:f836444b4c5ece128b23ec36a446c9ab7f9b0f7981d0d27e13a7c366ee163f8a"}, + {file = "pydantic-1.10.5-cp311-cp311-win_amd64.whl", hash = "sha256:8481dca324e1c7b715ce091a698b181054d22072e848b6fc7895cd86f79b4449"}, + {file = "pydantic-1.10.5-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:87f831e81ea0589cd18257f84386bf30154c5f4bed373b7b75e5cb0b5d53ea87"}, + {file = "pydantic-1.10.5-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7ce1612e98c6326f10888df951a26ec1a577d8df49ddcaea87773bfbe23ba5cc"}, + {file = "pydantic-1.10.5-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:58e41dd1e977531ac6073b11baac8c013f3cd8706a01d3dc74e86955be8b2c0c"}, + {file = "pydantic-1.10.5-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:6a4b0aab29061262065bbdede617ef99cc5914d1bf0ddc8bcd8e3d7928d85bd6"}, + {file = "pydantic-1.10.5-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:36e44a4de37b8aecffa81c081dbfe42c4d2bf9f6dff34d03dce157ec65eb0f15"}, + {file = "pydantic-1.10.5-cp37-cp37m-win_amd64.whl", hash = "sha256:261f357f0aecda005934e413dfd7aa4077004a174dafe414a8325e6098a8e419"}, + {file = "pydantic-1.10.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:b429f7c457aebb7fbe7cd69c418d1cd7c6fdc4d3c8697f45af78b8d5a7955760"}, + {file = "pydantic-1.10.5-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:663d2dd78596c5fa3eb996bc3f34b8c2a592648ad10008f98d1348be7ae212fb"}, + {file = "pydantic-1.10.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:51782fd81f09edcf265823c3bf43ff36d00db246eca39ee765ef58dc8421a642"}, + {file = "pydantic-1.10.5-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c428c0f64a86661fb4873495c4fac430ec7a7cef2b8c1c28f3d1a7277f9ea5ab"}, + {file = "pydantic-1.10.5-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:76c930ad0746c70f0368c4596020b736ab65b473c1f9b3872310a835d852eb19"}, + {file = "pydantic-1.10.5-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:3257bd714de9db2102b742570a56bf7978e90441193acac109b1f500290f5718"}, + {file = "pydantic-1.10.5-cp38-cp38-win_amd64.whl", hash = "sha256:f5bee6c523d13944a1fdc6f0525bc86dbbd94372f17b83fa6331aabacc8fd08e"}, + {file = "pydantic-1.10.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:532e97c35719f137ee5405bd3eeddc5c06eb91a032bc755a44e34a712420daf3"}, + {file = "pydantic-1.10.5-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ca9075ab3de9e48b75fa8ccb897c34ccc1519177ad8841d99f7fd74cf43be5bf"}, + {file = "pydantic-1.10.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bd46a0e6296346c477e59a954da57beaf9c538da37b9df482e50f836e4a7d4bb"}, + {file = "pydantic-1.10.5-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3353072625ea2a9a6c81ad01b91e5c07fa70deb06368c71307529abf70d23325"}, + {file = "pydantic-1.10.5-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:3f9d9b2be177c3cb6027cd67fbf323586417868c06c3c85d0d101703136e6b31"}, + {file = "pydantic-1.10.5-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:b473d00ccd5c2061fd896ac127b7755baad233f8d996ea288af14ae09f8e0d1e"}, + {file = "pydantic-1.10.5-cp39-cp39-win_amd64.whl", hash = "sha256:5f3bc8f103b56a8c88021d481410874b1f13edf6e838da607dcb57ecff9b4594"}, + {file = "pydantic-1.10.5-py3-none-any.whl", hash = "sha256:7c5b94d598c90f2f46b3a983ffb46ab806a67099d118ae0da7ef21a2a4033b28"}, + {file = "pydantic-1.10.5.tar.gz", hash = "sha256:9e337ac83686645a46db0e825acceea8e02fca4062483f40e9ae178e8bd1103a"}, ] [package.dependencies] @@ -3579,28 +3579,29 @@ six = ">=1.5" [[package]] name = "python-multipart" -version = "0.0.5" +version = "0.0.6" description = "A streaming multipart parser for Python" category = "main" optional = false -python-versions = "*" +python-versions = ">=3.7" files = [ - {file = "python-multipart-0.0.5.tar.gz", hash = "sha256:f7bb5f611fc600d15fa47b3974c8aa16e93724513b49b5f95c81e6624c83fa43"}, + {file = "python_multipart-0.0.6-py3-none-any.whl", hash = "sha256:ee698bab5ef148b0a760751c261902cd096e57e10558e11aca17646b74ee1c18"}, + {file = "python_multipart-0.0.6.tar.gz", hash = "sha256:e9925a80bb668529f1b67c7fdb0a5dacdd7cbfc6fb0bff3ea443fe22bdd62132"}, ] -[package.dependencies] -six = ">=1.4.0" +[package.extras] +dev = ["atomicwrites (==1.2.1)", "attrs (==19.2.0)", "coverage (==6.5.0)", "hatch", "invoke (==1.7.3)", "more-itertools (==4.3.0)", "pbr (==4.3.0)", "pluggy (==1.0.0)", "py (==1.11.0)", "pytest (==7.2.0)", "pytest-cov (==4.0.0)", "pytest-timeout (==2.1.0)", "pyyaml (==5.1)"] [[package]] name = "pytorch-lightning" -version = "1.9.1" +version = "1.9.4" description = "PyTorch Lightning is the lightweight PyTorch wrapper for ML researchers. Scale your models. Write less boilerplate." category = "main" optional = false python-versions = ">=3.7" files = [ - {file = "pytorch-lightning-1.9.1.tar.gz", hash = "sha256:45b1031f1bdf68d9350fa42e5ec01ff8492d5badda9685a2ae48e5fd8598510a"}, - {file = "pytorch_lightning-1.9.1-py3-none-any.whl", hash = "sha256:c143ee0a7e4c5779b54aa1bf1ae5faa19ed3e5546e31dad4a0298db6b115cc21"}, + {file = "pytorch-lightning-1.9.4.tar.gz", hash = "sha256:188a7f4468acf23512e7f4903253d86fc7929a49f0c09d699872e364162001e8"}, + {file = "pytorch_lightning-1.9.4-py3-none-any.whl", hash = "sha256:a2d2bd7657716087c294b076fe385ed17879764d6daaad0a541394a8f7164f93"}, ] [package.dependencies] @@ -3615,15 +3616,16 @@ tqdm = ">=4.57.0" typing-extensions = ">=4.0.0" [package.extras] -all = ["deepspeed (>=0.6.0)", "fairscale (>=0.4.5)", "gym[classic-control] (>=0.17.0)", "hivemind (==1.1.5)", "horovod (>=0.21.2,!=0.24.0)", "hydra-core (>=1.0.5)", "ipython[all] (<8.7.1)", "jsonargparse[signatures] (>=4.18.0)", "matplotlib (>3.1)", "omegaconf (>=2.0.5)", "rich (>=10.14.0,!=10.15.0.a)", "tensorboardX (>=2.2)", "torchvision (>=0.11.1)"] +all = ["colossalai (>=0.2.0)", "deepspeed (>=0.6.0)", "fairscale (>=0.4.5)", "gym[classic-control] (>=0.17.0)", "hivemind (==1.1.5)", "horovod (>=0.21.2,!=0.24.0)", "hydra-core (>=1.0.5)", "ipython[all] (<8.7.1)", "jsonargparse[signatures] (>=4.18.0)", "matplotlib (>3.1)", "omegaconf (>=2.0.5)", "rich (>=10.14.0,!=10.15.0.a)", "tensorboardX (>=2.2)", "torchvision (>=0.11.1)"] +colossalai = ["colossalai (>=0.2.0)"] deepspeed = ["deepspeed (>=0.6.0)"] -dev = ["cloudpickle (>=1.3)", "codecov (==2.1.12)", "coverage (==6.5.0)", "deepspeed (>=0.6.0)", "fairscale (>=0.4.5)", "fastapi (<0.87.0)", "gym[classic-control] (>=0.17.0)", "hivemind (==1.1.5)", "horovod (>=0.21.2,!=0.24.0)", "hydra-core (>=1.0.5)", "ipython[all] (<8.7.1)", "jsonargparse[signatures] (>=4.18.0)", "matplotlib (>3.1)", "omegaconf (>=2.0.5)", "onnxruntime (<1.14.0)", "pandas (>1.0)", "pre-commit (==2.20.0)", "protobuf (<=3.20.1)", "psutil (<5.9.5)", "pytest (==7.2.0)", "pytest-cov (==4.0.0)", "pytest-forked (==1.4.0)", "pytest-rerunfailures (==10.3)", "rich (>=10.14.0,!=10.15.0.a)", "scikit-learn (>0.22.1)", "tensorboard (>=2.9.1)", "tensorboardX (>=2.2)", "torchvision (>=0.11.1)", "uvicorn (<0.19.1)"] +dev = ["cloudpickle (>=1.3)", "codecov (==2.1.12)", "colossalai (>=0.2.0)", "coverage (==6.5.0)", "deepspeed (>=0.6.0)", "fairscale (>=0.4.5)", "fastapi (<0.87.0)", "gym[classic-control] (>=0.17.0)", "hivemind (==1.1.5)", "horovod (>=0.21.2,!=0.24.0)", "hydra-core (>=1.0.5)", "ipython[all] (<8.7.1)", "jsonargparse[signatures] (>=4.18.0)", "matplotlib (>3.1)", "omegaconf (>=2.0.5)", "onnxruntime (<1.14.0)", "pandas (>1.0)", "pre-commit (==2.20.0)", "protobuf (<=3.20.1)", "psutil (<5.9.5)", "pytest (==7.2.0)", "pytest-cov (==4.0.0)", "pytest-forked (==1.4.0)", "pytest-rerunfailures (==10.3)", "rich (>=10.14.0,!=10.15.0.a)", "scikit-learn (>0.22.1)", "tensorboard (>=2.9.1)", "tensorboardX (>=2.2)", "torchvision (>=0.11.1)", "uvicorn (<0.19.1)"] examples = ["gym[classic-control] (>=0.17.0)", "ipython[all] (<8.7.1)", "torchvision (>=0.11.1)"] extra = ["hydra-core (>=1.0.5)", "jsonargparse[signatures] (>=4.18.0)", "matplotlib (>3.1)", "omegaconf (>=2.0.5)", "rich (>=10.14.0,!=10.15.0.a)", "tensorboardX (>=2.2)"] fairscale = ["fairscale (>=0.4.5)"] hivemind = ["hivemind (==1.1.5)"] horovod = ["horovod (>=0.21.2,!=0.24.0)"] -strategies = ["deepspeed (>=0.6.0)", "fairscale (>=0.4.5)", "hivemind (==1.1.5)", "horovod (>=0.21.2,!=0.24.0)"] +strategies = ["colossalai (>=0.2.0)", "deepspeed (>=0.6.0)", "fairscale (>=0.4.5)", "hivemind (==1.1.5)", "horovod (>=0.21.2,!=0.24.0)"] test = ["cloudpickle (>=1.3)", "codecov (==2.1.12)", "coverage (==6.5.0)", "fastapi (<0.87.0)", "onnxruntime (<1.14.0)", "pandas (>1.0)", "pre-commit (==2.20.0)", "protobuf (<=3.20.1)", "psutil (<5.9.5)", "pytest (==7.2.0)", "pytest-cov (==4.0.0)", "pytest-forked (==1.4.0)", "pytest-rerunfailures (==10.3)", "scikit-learn (>0.22.1)", "tensorboard (>=2.9.1)", "uvicorn (<0.19.1)"] [[package]] @@ -3933,19 +3935,19 @@ idna2008 = ["idna"] [[package]] name = "rich" -version = "13.3.1" +version = "13.3.2" description = "Render rich text, tables, progress bars, syntax highlighting, markdown and more to the terminal" category = "main" optional = false python-versions = ">=3.7.0" files = [ - {file = "rich-13.3.1-py3-none-any.whl", hash = "sha256:8aa57747f3fc3e977684f0176a88e789be314a99f99b43b75d1e9cb5dc6db9e9"}, - {file = "rich-13.3.1.tar.gz", hash = "sha256:125d96d20c92b946b983d0d392b84ff945461e5a06d3867e9f9e575f8697b67f"}, + {file = "rich-13.3.2-py3-none-any.whl", hash = "sha256:a104f37270bf677148d8acb07d33be1569eeee87e2d1beb286a4e9113caf6f2f"}, + {file = "rich-13.3.2.tar.gz", hash = "sha256:91954fe80cfb7985727a467ca98a7618e5dd15178cc2da10f553b36a93859001"}, ] [package.dependencies] -markdown-it-py = ">=2.1.0,<3.0.0" -pygments = ">=2.14.0,<3.0.0" +markdown-it-py = ">=2.2.0,<3.0.0" +pygments = ">=2.13.0,<3.0.0" [package.extras] jupyter = ["ipywidgets (>=7.5.1,<9)"] @@ -4089,14 +4091,14 @@ test = ["asv", "gmpy2", "mpmath", "pytest", "pytest-cov", "pytest-xdist", "sciki [[package]] name = "sentry-sdk" -version = "1.15.0" +version = "1.16.0" description = "Python client for Sentry (https://sentry.io)" category = "main" optional = false python-versions = "*" files = [ - {file = "sentry-sdk-1.15.0.tar.gz", hash = "sha256:69ecbb2e1ff4db02a06c4f20f6f69cb5dfe3ebfbc06d023e40d77cf78e9c37e7"}, - {file = "sentry_sdk-1.15.0-py2.py3-none-any.whl", hash = "sha256:7ad4d37dd093f4a7cb5ad804c6efe9e8fab8873f7ffc06042dc3f3fd700a93ec"}, + {file = "sentry-sdk-1.16.0.tar.gz", hash = "sha256:a900845bd78c263d49695d48ce78a4bce1030bbd917e0b6cc021fc000c901113"}, + {file = "sentry_sdk-1.16.0-py2.py3-none-any.whl", hash = "sha256:633edefead34d976ff22e7edc367cdf57768e24bc714615ccae746d9d91795ae"}, ] [package.dependencies] @@ -4105,6 +4107,7 @@ urllib3 = {version = ">=1.26.11", markers = "python_version >= \"3.6\""} [package.extras] aiohttp = ["aiohttp (>=3.5)"] +arq = ["arq (>=0.23)"] beam = ["apache-beam (>=2.12)"] bottle = ["bottle (>=0.12.13)"] celery = ["celery (>=3)"] @@ -4214,14 +4217,14 @@ test = ["pytest"] [[package]] name = "setuptools" -version = "67.3.1" +version = "67.4.0" description = "Easily download, build, install, upgrade, and uninstall Python packages" category = "main" optional = false python-versions = ">=3.7" files = [ - {file = "setuptools-67.3.1-py3-none-any.whl", hash = "sha256:23c86b4e44432bfd8899384afc08872ec166a24f48a3f99f293b0a557e6a6b5d"}, - {file = "setuptools-67.3.1.tar.gz", hash = "sha256:daec07fd848d80676694d6bf69c009d28910aeece68a38dbe88b7e1bb6dba12e"}, + {file = "setuptools-67.4.0-py3-none-any.whl", hash = "sha256:f106dee1b506dee5102cc3f3e9e68137bbad6d47b616be7991714b0c62204251"}, + {file = "setuptools-67.4.0.tar.gz", hash = "sha256:e5fd0a713141a4a105412233c63dc4e17ba0090c8e8334594ac790ec97792330"}, ] [package.extras] @@ -4808,14 +4811,14 @@ tqdm = "*" [[package]] name = "torchmetrics" -version = "0.11.1" +version = "0.11.3" description = "PyTorch native Metrics" category = "main" optional = false python-versions = ">=3.7" files = [ - {file = "torchmetrics-0.11.1-py3-none-any.whl", hash = "sha256:9987d7c21b081cceef246a72be1ce25bf29c842764f59dda54f59e3b4cd1970b"}, - {file = "torchmetrics-0.11.1.tar.gz", hash = "sha256:de2e9feb3316f798ab08b318302ff04e764f47e691f0847f780044279fa176ca"}, + {file = "torchmetrics-0.11.3-py3-none-any.whl", hash = "sha256:7797c6e86f7474b6e0beb46f979044354a831e012199e96e52d2208a15ebe418"}, + {file = "torchmetrics-0.11.3.tar.gz", hash = "sha256:6a2bcc17361f0e4c1668c92595b12ef30ccf9ef1d03263bee7c6136a882afe30"}, ] [package.dependencies] @@ -4824,14 +4827,12 @@ packaging = "*" torch = ">=1.8.1" [package.extras] -all = ["lpips", "nltk (>=3.6)", "pycocotools", "pystoi", "pytorch-lightning (>=1.5)", "regex (>=2021.9.24)", "scipy", "torch-fidelity", "torchvision", "torchvision (>=0.8)", "tqdm (>=4.41.0)", "transformers (>=4.10.0)"] -audio = ["pystoi"] -detection = ["pycocotools", "torchvision (>=0.8)"] -docs = ["docutils (>=0.16)", "myst-parser", "nbsphinx (>=0.8)", "pandoc (>=1.0)", "sphinx (>=4.0,<5.0)", "sphinx-autodoc-typehints (>=1.0)", "sphinx-copybutton (>=0.3)", "sphinx-paramlinks (>=0.5.1)", "sphinx-togglebutton (>=0.2)", "sphinxcontrib-fulltoc (>=1.0)", "sphinxcontrib-mockautodoc"] -image = ["lpips", "scipy", "torch-fidelity", "torchvision"] -integrate = ["pytorch-lightning (>=1.5)"] +all = ["lpips (<=0.1.4)", "nltk (>=3.6)", "pycocotools (>2.0.0)", "pystoi (<=0.3.3)", "regex (>=2021.9.24)", "scipy (>1.0.0)", "torch-fidelity (<=0.3.0)", "torchvision (>=0.8)", "tqdm (>=4.41.0)", "transformers (>=4.10.0)"] +audio = ["pystoi (<=0.3.3)"] +detection = ["pycocotools (>2.0.0)", "torchvision (>=0.8)"] +image = ["lpips (<=0.1.4)", "scipy (>1.0.0)", "torch-fidelity (<=0.3.0)", "torchvision (>=0.8)"] multimodal = ["transformers (>=4.10.0)"] -test = ["bert-score (==0.3.10)", "check-manifest", "cloudpickle (>=1.3)", "coverage (>5.2)", "dython", "fast-bss-eval (>=0.1.0)", "fire", "huggingface-hub (<0.7)", "jiwer (>=2.3.0)", "kornia (>=0.6.7)", "mir-eval (>=0.6)", "mypy (==0.982)", "netcal", "pandas", "phmdoctest (>=1.1.1)", "pre-commit (>=1.0)", "psutil", "pycocotools", "pypesq (>1.2)", "pytest (>=6.0.0,<7.0.0)", "pytest-cov (>2.10)", "pytest-doctestplus (>=0.9.0)", "pytest-rerunfailures (>=10.0)", "pytest-timeout", "pytorch-msssim (==0.2.1)", "requests", "rouge-score (>=0.0.4)", "sacrebleu (>=2.0.0)", "scikit-image (>0.17.1)", "scikit-learn (>1.0,<1.1.1)", "scipy", "torch-complex", "transformers (>4.4.0)", "types-PyYAML", "types-emoji", "types-protobuf", "types-requests", "types-setuptools", "types-six", "types-tabulate"] +test = ["bert-score (==0.3.13)", "cloudpickle (>1.3)", "coverage (>5.2)", "dython (<=0.7.3)", "fast-bss-eval (>=0.1.0)", "fire (<=0.5.0)", "huggingface-hub (<0.7)", "jiwer (>=2.3.0)", "kornia (>=0.6.7)", "mir-eval (>=0.6)", "mypy (==0.982)", "netcal (>1.0.0)", "pandas (>1.0.0)", "phmdoctest (>=1.1.1)", "psutil (<=5.9.4)", "pypesq (>1.2)", "pytest (>=6.0.0)", "pytest-cov (>2.10)", "pytest-doctestplus (>=0.9.0)", "pytest-rerunfailures (>=10.0)", "pytest-timeout (<=2.1.0)", "pytorch-msssim (==0.2.1)", "requests (<=2.28.2)", "rouge-score (>0.1.0)", "sacrebleu (>=2.0.0)", "scikit-image (>0.17.1)", "scikit-learn (>1.0)", "scipy (>1.0.0)", "torch-complex (<=0.4.3)", "transformers (>4.4.0)", "types-PyYAML", "types-emoji", "types-protobuf", "types-requests", "types-setuptools", "types-six", "types-tabulate"] text = ["nltk (>=3.6)", "regex (>=2021.9.24)", "tqdm (>=4.41.0)"] [[package]] @@ -4857,14 +4858,14 @@ files = [ [[package]] name = "tqdm" -version = "4.64.1" +version = "4.65.0" description = "Fast, Extensible Progress Meter" category = "main" optional = false -python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,>=2.7" +python-versions = ">=3.7" files = [ - {file = "tqdm-4.64.1-py2.py3-none-any.whl", hash = "sha256:6fee160d6ffcd1b1c68c65f14c829c22832bc401726335ce92c52d395944a6a1"}, - {file = "tqdm-4.64.1.tar.gz", hash = "sha256:5f4f682a004951c1b450bc753c710e9280c5746ce6ffedee253ddbcbf54cf1e4"}, + {file = "tqdm-4.65.0-py3-none-any.whl", hash = "sha256:c4f53a17fe37e132815abceec022631be8ffe1b9381c2e6e30aa70edc99e9671"}, + {file = "tqdm-4.65.0.tar.gz", hash = "sha256:1871fb68a86b8fb3b59ca4cdd3dcccbc7e6d613eeed31f4c332531977b89beb5"}, ] [package.dependencies] @@ -5183,76 +5184,87 @@ dev = ["black (>=19.3b0)", "pytest (>=4.6.2)"] [[package]] name = "wrapt" -version = "1.14.1" +version = "1.15.0" description = "Module for decorators, wrappers and monkey patching." category = "main" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,>=2.7" files = [ - {file = "wrapt-1.14.1-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:1b376b3f4896e7930f1f772ac4b064ac12598d1c38d04907e696cc4d794b43d3"}, - {file = "wrapt-1.14.1-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:903500616422a40a98a5a3c4ff4ed9d0066f3b4c951fa286018ecdf0750194ef"}, - {file = "wrapt-1.14.1-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:5a9a0d155deafd9448baff28c08e150d9b24ff010e899311ddd63c45c2445e28"}, - {file = "wrapt-1.14.1-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:ddaea91abf8b0d13443f6dac52e89051a5063c7d014710dcb4d4abb2ff811a59"}, - {file = "wrapt-1.14.1-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:36f582d0c6bc99d5f39cd3ac2a9062e57f3cf606ade29a0a0d6b323462f4dd87"}, - {file = "wrapt-1.14.1-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:7ef58fb89674095bfc57c4069e95d7a31cfdc0939e2a579882ac7d55aadfd2a1"}, - {file = "wrapt-1.14.1-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:e2f83e18fe2f4c9e7db597e988f72712c0c3676d337d8b101f6758107c42425b"}, - {file = "wrapt-1.14.1-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:ee2b1b1769f6707a8a445162ea16dddf74285c3964f605877a20e38545c3c462"}, - {file = "wrapt-1.14.1-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:833b58d5d0b7e5b9832869f039203389ac7cbf01765639c7309fd50ef619e0b1"}, - {file = "wrapt-1.14.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:80bb5c256f1415f747011dc3604b59bc1f91c6e7150bd7db03b19170ee06b320"}, - {file = "wrapt-1.14.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:07f7a7d0f388028b2df1d916e94bbb40624c59b48ecc6cbc232546706fac74c2"}, - {file = "wrapt-1.14.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:02b41b633c6261feff8ddd8d11c711df6842aba629fdd3da10249a53211a72c4"}, - {file = "wrapt-1.14.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2fe803deacd09a233e4762a1adcea5db5d31e6be577a43352936179d14d90069"}, - {file = "wrapt-1.14.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:257fd78c513e0fb5cdbe058c27a0624c9884e735bbd131935fd49e9fe719d310"}, - {file = "wrapt-1.14.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:4fcc4649dc762cddacd193e6b55bc02edca674067f5f98166d7713b193932b7f"}, - {file = "wrapt-1.14.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:11871514607b15cfeb87c547a49bca19fde402f32e2b1c24a632506c0a756656"}, - {file = "wrapt-1.14.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:8ad85f7f4e20964db4daadcab70b47ab05c7c1cf2a7c1e51087bfaa83831854c"}, - {file = "wrapt-1.14.1-cp310-cp310-win32.whl", hash = "sha256:a9a52172be0b5aae932bef82a79ec0a0ce87288c7d132946d645eba03f0ad8a8"}, - {file = "wrapt-1.14.1-cp310-cp310-win_amd64.whl", hash = "sha256:6d323e1554b3d22cfc03cd3243b5bb815a51f5249fdcbb86fda4bf62bab9e164"}, - {file = "wrapt-1.14.1-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:43ca3bbbe97af00f49efb06e352eae40434ca9d915906f77def219b88e85d907"}, - {file = "wrapt-1.14.1-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:6b1a564e6cb69922c7fe3a678b9f9a3c54e72b469875aa8018f18b4d1dd1adf3"}, - {file = "wrapt-1.14.1-cp35-cp35m-manylinux2010_i686.whl", hash = "sha256:00b6d4ea20a906c0ca56d84f93065b398ab74b927a7a3dbd470f6fc503f95dc3"}, - {file = "wrapt-1.14.1-cp35-cp35m-manylinux2010_x86_64.whl", hash = "sha256:a85d2b46be66a71bedde836d9e41859879cc54a2a04fad1191eb50c2066f6e9d"}, - {file = "wrapt-1.14.1-cp35-cp35m-win32.whl", hash = "sha256:dbcda74c67263139358f4d188ae5faae95c30929281bc6866d00573783c422b7"}, - {file = "wrapt-1.14.1-cp35-cp35m-win_amd64.whl", hash = "sha256:b21bb4c09ffabfa0e85e3a6b623e19b80e7acd709b9f91452b8297ace2a8ab00"}, - {file = "wrapt-1.14.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:9e0fd32e0148dd5dea6af5fee42beb949098564cc23211a88d799e434255a1f4"}, - {file = "wrapt-1.14.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9736af4641846491aedb3c3f56b9bc5568d92b0692303b5a305301a95dfd38b1"}, - {file = "wrapt-1.14.1-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5b02d65b9ccf0ef6c34cba6cf5bf2aab1bb2f49c6090bafeecc9cd81ad4ea1c1"}, - {file = "wrapt-1.14.1-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:21ac0156c4b089b330b7666db40feee30a5d52634cc4560e1905d6529a3897ff"}, - {file = "wrapt-1.14.1-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:9f3e6f9e05148ff90002b884fbc2a86bd303ae847e472f44ecc06c2cd2fcdb2d"}, - {file = "wrapt-1.14.1-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:6e743de5e9c3d1b7185870f480587b75b1cb604832e380d64f9504a0535912d1"}, - {file = "wrapt-1.14.1-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:d79d7d5dc8a32b7093e81e97dad755127ff77bcc899e845f41bf71747af0c569"}, - {file = "wrapt-1.14.1-cp36-cp36m-win32.whl", hash = "sha256:81b19725065dcb43df02b37e03278c011a09e49757287dca60c5aecdd5a0b8ed"}, - {file = "wrapt-1.14.1-cp36-cp36m-win_amd64.whl", hash = "sha256:b014c23646a467558be7da3d6b9fa409b2c567d2110599b7cf9a0c5992b3b471"}, - {file = "wrapt-1.14.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:88bd7b6bd70a5b6803c1abf6bca012f7ed963e58c68d76ee20b9d751c74a3248"}, - {file = "wrapt-1.14.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b5901a312f4d14c59918c221323068fad0540e34324925c8475263841dbdfe68"}, - {file = "wrapt-1.14.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d77c85fedff92cf788face9bfa3ebaa364448ebb1d765302e9af11bf449ca36d"}, - {file = "wrapt-1.14.1-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8d649d616e5c6a678b26d15ece345354f7c2286acd6db868e65fcc5ff7c24a77"}, - {file = "wrapt-1.14.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:7d2872609603cb35ca513d7404a94d6d608fc13211563571117046c9d2bcc3d7"}, - {file = "wrapt-1.14.1-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:ee6acae74a2b91865910eef5e7de37dc6895ad96fa23603d1d27ea69df545015"}, - {file = "wrapt-1.14.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:2b39d38039a1fdad98c87279b48bc5dce2c0ca0d73483b12cb72aa9609278e8a"}, - {file = "wrapt-1.14.1-cp37-cp37m-win32.whl", hash = "sha256:60db23fa423575eeb65ea430cee741acb7c26a1365d103f7b0f6ec412b893853"}, - {file = "wrapt-1.14.1-cp37-cp37m-win_amd64.whl", hash = "sha256:709fe01086a55cf79d20f741f39325018f4df051ef39fe921b1ebe780a66184c"}, - {file = "wrapt-1.14.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:8c0ce1e99116d5ab21355d8ebe53d9460366704ea38ae4d9f6933188f327b456"}, - {file = "wrapt-1.14.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:e3fb1677c720409d5f671e39bac6c9e0e422584e5f518bfd50aa4cbbea02433f"}, - {file = "wrapt-1.14.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:642c2e7a804fcf18c222e1060df25fc210b9c58db7c91416fb055897fc27e8cc"}, - {file = "wrapt-1.14.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7b7c050ae976e286906dd3f26009e117eb000fb2cf3533398c5ad9ccc86867b1"}, - {file = "wrapt-1.14.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ef3f72c9666bba2bab70d2a8b79f2c6d2c1a42a7f7e2b0ec83bb2f9e383950af"}, - {file = "wrapt-1.14.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:01c205616a89d09827986bc4e859bcabd64f5a0662a7fe95e0d359424e0e071b"}, - {file = "wrapt-1.14.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:5a0f54ce2c092aaf439813735584b9537cad479575a09892b8352fea5e988dc0"}, - {file = "wrapt-1.14.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:2cf71233a0ed05ccdabe209c606fe0bac7379fdcf687f39b944420d2a09fdb57"}, - {file = "wrapt-1.14.1-cp38-cp38-win32.whl", hash = "sha256:aa31fdcc33fef9eb2552cbcbfee7773d5a6792c137b359e82879c101e98584c5"}, - {file = "wrapt-1.14.1-cp38-cp38-win_amd64.whl", hash = "sha256:d1967f46ea8f2db647c786e78d8cc7e4313dbd1b0aca360592d8027b8508e24d"}, - {file = "wrapt-1.14.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3232822c7d98d23895ccc443bbdf57c7412c5a65996c30442ebe6ed3df335383"}, - {file = "wrapt-1.14.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:988635d122aaf2bdcef9e795435662bcd65b02f4f4c1ae37fbee7401c440b3a7"}, - {file = "wrapt-1.14.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9cca3c2cdadb362116235fdbd411735de4328c61425b0aa9f872fd76d02c4e86"}, - {file = "wrapt-1.14.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d52a25136894c63de15a35bc0bdc5adb4b0e173b9c0d07a2be9d3ca64a332735"}, - {file = "wrapt-1.14.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:40e7bc81c9e2b2734ea4bc1aceb8a8f0ceaac7c5299bc5d69e37c44d9081d43b"}, - {file = "wrapt-1.14.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:b9b7a708dd92306328117d8c4b62e2194d00c365f18eff11a9b53c6f923b01e3"}, - {file = "wrapt-1.14.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:6a9a25751acb379b466ff6be78a315e2b439d4c94c1e99cb7266d40a537995d3"}, - {file = "wrapt-1.14.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:34aa51c45f28ba7f12accd624225e2b1e5a3a45206aa191f6f9aac931d9d56fe"}, - {file = "wrapt-1.14.1-cp39-cp39-win32.whl", hash = "sha256:dee0ce50c6a2dd9056c20db781e9c1cfd33e77d2d569f5d1d9321c641bb903d5"}, - {file = "wrapt-1.14.1-cp39-cp39-win_amd64.whl", hash = "sha256:dee60e1de1898bde3b238f18340eec6148986da0455d8ba7848d50470a7a32fb"}, - {file = "wrapt-1.14.1.tar.gz", hash = "sha256:380a85cf89e0e69b7cfbe2ea9f765f004ff419f34194018a6827ac0e3edfed4d"}, + {file = "wrapt-1.15.0-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:ca1cccf838cd28d5a0883b342474c630ac48cac5df0ee6eacc9c7290f76b11c1"}, + {file = "wrapt-1.15.0-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:e826aadda3cae59295b95343db8f3d965fb31059da7de01ee8d1c40a60398b29"}, + {file = "wrapt-1.15.0-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:5fc8e02f5984a55d2c653f5fea93531e9836abbd84342c1d1e17abc4a15084c2"}, + {file = "wrapt-1.15.0-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:96e25c8603a155559231c19c0349245eeb4ac0096fe3c1d0be5c47e075bd4f46"}, + {file = "wrapt-1.15.0-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:40737a081d7497efea35ab9304b829b857f21558acfc7b3272f908d33b0d9d4c"}, + {file = "wrapt-1.15.0-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:f87ec75864c37c4c6cb908d282e1969e79763e0d9becdfe9fe5473b7bb1e5f09"}, + {file = "wrapt-1.15.0-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:1286eb30261894e4c70d124d44b7fd07825340869945c79d05bda53a40caa079"}, + {file = "wrapt-1.15.0-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:493d389a2b63c88ad56cdc35d0fa5752daac56ca755805b1b0c530f785767d5e"}, + {file = "wrapt-1.15.0-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:58d7a75d731e8c63614222bcb21dd992b4ab01a399f1f09dd82af17bbfc2368a"}, + {file = "wrapt-1.15.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:21f6d9a0d5b3a207cdf7acf8e58d7d13d463e639f0c7e01d82cdb671e6cb7923"}, + {file = "wrapt-1.15.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ce42618f67741d4697684e501ef02f29e758a123aa2d669e2d964ff734ee00ee"}, + {file = "wrapt-1.15.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:41d07d029dd4157ae27beab04d22b8e261eddfc6ecd64ff7000b10dc8b3a5727"}, + {file = "wrapt-1.15.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:54accd4b8bc202966bafafd16e69da9d5640ff92389d33d28555c5fd4f25ccb7"}, + {file = "wrapt-1.15.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2fbfbca668dd15b744418265a9607baa970c347eefd0db6a518aaf0cfbd153c0"}, + {file = "wrapt-1.15.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:76e9c727a874b4856d11a32fb0b389afc61ce8aaf281ada613713ddeadd1cfec"}, + {file = "wrapt-1.15.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:e20076a211cd6f9b44a6be58f7eeafa7ab5720eb796975d0c03f05b47d89eb90"}, + {file = "wrapt-1.15.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:a74d56552ddbde46c246b5b89199cb3fd182f9c346c784e1a93e4dc3f5ec9975"}, + {file = "wrapt-1.15.0-cp310-cp310-win32.whl", hash = "sha256:26458da5653aa5b3d8dc8b24192f574a58984c749401f98fff994d41d3f08da1"}, + {file = "wrapt-1.15.0-cp310-cp310-win_amd64.whl", hash = "sha256:75760a47c06b5974aa5e01949bf7e66d2af4d08cb8c1d6516af5e39595397f5e"}, + {file = "wrapt-1.15.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ba1711cda2d30634a7e452fc79eabcadaffedf241ff206db2ee93dd2c89a60e7"}, + {file = "wrapt-1.15.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:56374914b132c702aa9aa9959c550004b8847148f95e1b824772d453ac204a72"}, + {file = "wrapt-1.15.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a89ce3fd220ff144bd9d54da333ec0de0399b52c9ac3d2ce34b569cf1a5748fb"}, + {file = "wrapt-1.15.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3bbe623731d03b186b3d6b0d6f51865bf598587c38d6f7b0be2e27414f7f214e"}, + {file = "wrapt-1.15.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3abbe948c3cbde2689370a262a8d04e32ec2dd4f27103669a45c6929bcdbfe7c"}, + {file = "wrapt-1.15.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:b67b819628e3b748fd3c2192c15fb951f549d0f47c0449af0764d7647302fda3"}, + {file = "wrapt-1.15.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:7eebcdbe3677e58dd4c0e03b4f2cfa346ed4049687d839adad68cc38bb559c92"}, + {file = "wrapt-1.15.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:74934ebd71950e3db69960a7da29204f89624dde411afbfb3b4858c1409b1e98"}, + {file = "wrapt-1.15.0-cp311-cp311-win32.whl", hash = "sha256:bd84395aab8e4d36263cd1b9308cd504f6cf713b7d6d3ce25ea55670baec5416"}, + {file = "wrapt-1.15.0-cp311-cp311-win_amd64.whl", hash = "sha256:a487f72a25904e2b4bbc0817ce7a8de94363bd7e79890510174da9d901c38705"}, + {file = "wrapt-1.15.0-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:4ff0d20f2e670800d3ed2b220d40984162089a6e2c9646fdb09b85e6f9a8fc29"}, + {file = "wrapt-1.15.0-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:9ed6aa0726b9b60911f4aed8ec5b8dd7bf3491476015819f56473ffaef8959bd"}, + {file = "wrapt-1.15.0-cp35-cp35m-manylinux2010_i686.whl", hash = "sha256:896689fddba4f23ef7c718279e42f8834041a21342d95e56922e1c10c0cc7afb"}, + {file = "wrapt-1.15.0-cp35-cp35m-manylinux2010_x86_64.whl", hash = "sha256:75669d77bb2c071333417617a235324a1618dba66f82a750362eccbe5b61d248"}, + {file = "wrapt-1.15.0-cp35-cp35m-win32.whl", hash = "sha256:fbec11614dba0424ca72f4e8ba3c420dba07b4a7c206c8c8e4e73f2e98f4c559"}, + {file = "wrapt-1.15.0-cp35-cp35m-win_amd64.whl", hash = "sha256:fd69666217b62fa5d7c6aa88e507493a34dec4fa20c5bd925e4bc12fce586639"}, + {file = "wrapt-1.15.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:b0724f05c396b0a4c36a3226c31648385deb6a65d8992644c12a4963c70326ba"}, + {file = "wrapt-1.15.0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bbeccb1aa40ab88cd29e6c7d8585582c99548f55f9b2581dfc5ba68c59a85752"}, + {file = "wrapt-1.15.0-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:38adf7198f8f154502883242f9fe7333ab05a5b02de7d83aa2d88ea621f13364"}, + {file = "wrapt-1.15.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:578383d740457fa790fdf85e6d346fda1416a40549fe8db08e5e9bd281c6a475"}, + {file = "wrapt-1.15.0-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:a4cbb9ff5795cd66f0066bdf5947f170f5d63a9274f99bdbca02fd973adcf2a8"}, + {file = "wrapt-1.15.0-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:af5bd9ccb188f6a5fdda9f1f09d9f4c86cc8a539bd48a0bfdc97723970348418"}, + {file = "wrapt-1.15.0-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:b56d5519e470d3f2fe4aa7585f0632b060d532d0696c5bdfb5e8319e1d0f69a2"}, + {file = "wrapt-1.15.0-cp36-cp36m-win32.whl", hash = "sha256:77d4c1b881076c3ba173484dfa53d3582c1c8ff1f914c6461ab70c8428b796c1"}, + {file = "wrapt-1.15.0-cp36-cp36m-win_amd64.whl", hash = "sha256:077ff0d1f9d9e4ce6476c1a924a3332452c1406e59d90a2cf24aeb29eeac9420"}, + {file = "wrapt-1.15.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:5c5aa28df055697d7c37d2099a7bc09f559d5053c3349b1ad0c39000e611d317"}, + {file = "wrapt-1.15.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3a8564f283394634a7a7054b7983e47dbf39c07712d7b177b37e03f2467a024e"}, + {file = "wrapt-1.15.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:780c82a41dc493b62fc5884fb1d3a3b81106642c5c5c78d6a0d4cbe96d62ba7e"}, + {file = "wrapt-1.15.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e169e957c33576f47e21864cf3fc9ff47c223a4ebca8960079b8bd36cb014fd0"}, + {file = "wrapt-1.15.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:b02f21c1e2074943312d03d243ac4388319f2456576b2c6023041c4d57cd7019"}, + {file = "wrapt-1.15.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:f2e69b3ed24544b0d3dbe2c5c0ba5153ce50dcebb576fdc4696d52aa22db6034"}, + {file = "wrapt-1.15.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:d787272ed958a05b2c86311d3a4135d3c2aeea4fc655705f074130aa57d71653"}, + {file = "wrapt-1.15.0-cp37-cp37m-win32.whl", hash = "sha256:02fce1852f755f44f95af51f69d22e45080102e9d00258053b79367d07af39c0"}, + {file = "wrapt-1.15.0-cp37-cp37m-win_amd64.whl", hash = "sha256:abd52a09d03adf9c763d706df707c343293d5d106aea53483e0ec8d9e310ad5e"}, + {file = "wrapt-1.15.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:cdb4f085756c96a3af04e6eca7f08b1345e94b53af8921b25c72f096e704e145"}, + {file = "wrapt-1.15.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:230ae493696a371f1dbffaad3dafbb742a4d27a0afd2b1aecebe52b740167e7f"}, + {file = "wrapt-1.15.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:63424c681923b9f3bfbc5e3205aafe790904053d42ddcc08542181a30a7a51bd"}, + {file = "wrapt-1.15.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d6bcbfc99f55655c3d93feb7ef3800bd5bbe963a755687cbf1f490a71fb7794b"}, + {file = "wrapt-1.15.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c99f4309f5145b93eca6e35ac1a988f0dc0a7ccf9ccdcd78d3c0adf57224e62f"}, + {file = "wrapt-1.15.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:b130fe77361d6771ecf5a219d8e0817d61b236b7d8b37cc045172e574ed219e6"}, + {file = "wrapt-1.15.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:96177eb5645b1c6985f5c11d03fc2dbda9ad24ec0f3a46dcce91445747e15094"}, + {file = "wrapt-1.15.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:d5fe3e099cf07d0fb5a1e23d399e5d4d1ca3e6dfcbe5c8570ccff3e9208274f7"}, + {file = "wrapt-1.15.0-cp38-cp38-win32.whl", hash = "sha256:abd8f36c99512755b8456047b7be10372fca271bf1467a1caa88db991e7c421b"}, + {file = "wrapt-1.15.0-cp38-cp38-win_amd64.whl", hash = "sha256:b06fa97478a5f478fb05e1980980a7cdf2712015493b44d0c87606c1513ed5b1"}, + {file = "wrapt-1.15.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:2e51de54d4fb8fb50d6ee8327f9828306a959ae394d3e01a1ba8b2f937747d86"}, + {file = "wrapt-1.15.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:0970ddb69bba00670e58955f8019bec4a42d1785db3faa043c33d81de2bf843c"}, + {file = "wrapt-1.15.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:76407ab327158c510f44ded207e2f76b657303e17cb7a572ffe2f5a8a48aa04d"}, + {file = "wrapt-1.15.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cd525e0e52a5ff16653a3fc9e3dd827981917d34996600bbc34c05d048ca35cc"}, + {file = "wrapt-1.15.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9d37ac69edc5614b90516807de32d08cb8e7b12260a285ee330955604ed9dd29"}, + {file = "wrapt-1.15.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:078e2a1a86544e644a68422f881c48b84fef6d18f8c7a957ffd3f2e0a74a0d4a"}, + {file = "wrapt-1.15.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:2cf56d0e237280baed46f0b5316661da892565ff58309d4d2ed7dba763d984b8"}, + {file = "wrapt-1.15.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:7dc0713bf81287a00516ef43137273b23ee414fe41a3c14be10dd95ed98a2df9"}, + {file = "wrapt-1.15.0-cp39-cp39-win32.whl", hash = "sha256:46ed616d5fb42f98630ed70c3529541408166c22cdfd4540b88d5f21006b0eff"}, + {file = "wrapt-1.15.0-cp39-cp39-win_amd64.whl", hash = "sha256:eef4d64c650f33347c1f9266fa5ae001440b232ad9b98f1f43dfe7a79435c0a6"}, + {file = "wrapt-1.15.0-py3-none-any.whl", hash = "sha256:64b1df0f83706b4ef4cfb4fb0e4c2669100fd7ecacfb59e091fad300d4e04640"}, + {file = "wrapt-1.15.0.tar.gz", hash = "sha256:d06730c6aed78cee4126234cf2d071e01b44b915e725a6cb439a879ec9754a3a"}, ] [[package]] diff --git a/tests/test_nsf_hifigan.py b/tests/test_nsf_hifigan.py index cb87b032..717b57b9 100644 --- a/tests/test_nsf_hifigan.py +++ b/tests/test_nsf_hifigan.py @@ -1,9 +1,9 @@ import soundfile as sf import torchaudio - -from fish_diffusion.feature_extractors.pitch import ParselMouthPitchExtractor from fish_diffusion.vocoders import NsfHifiGAN +from fish_diffusion.modules.pitch_extractors import ParselMouthPitchExtractor + source = "dataset/valid/opencpop/TruE-干音_0000/0000.wav" gan = NsfHifiGAN() diff --git a/tests/test_visualize_pitch.py b/tests/test_visualize_pitch.py index e2f19682..550ef15e 100644 --- a/tests/test_visualize_pitch.py +++ b/tests/test_visualize_pitch.py @@ -5,7 +5,7 @@ from loguru import logger from matplotlib import pyplot as plt -from fish_diffusion.feature_extractors.pitch import ( +from fish_diffusion.modules.pitch_extractors import ( CrepePitchExtractor, DioPitchExtractor, HarvestPitchExtractor, diff --git a/tools/batch_inference.py b/tools/batch_inference.py deleted file mode 100644 index 0404b9e7..00000000 --- a/tools/batch_inference.py +++ /dev/null @@ -1,53 +0,0 @@ -import argparse -import os - -parser = argparse.ArgumentParser(description="Fish-SVC batch inferencing tool") -parser.add_argument( - "--config", type=str, required=True, help="Path of the config you want to use" -) -parser.add_argument( - "--sampler_interval", - type=int, - default=10, - help="Speedup value for the inference process. Higher values will decrease render quality but increase inference speed.", -) -parser.add_argument( - "--checkpoint", - type=str, - required=True, - help="Path to the checkpoint file to use for rendering.", -) -parser.add_argument( - "--input_audio_folder", - type=str, - required=True, - help="Path to the folder containing input audio WAV files.", -) -parser.add_argument( - "--output_folder", - type=str, - required=True, - help="Path to the folder where rendered audio WAV files will be saved.", -) -parser.add_argument( - "--pitch_adjust", - type=int, - default=0, - help="Pitch adjustment value for the rendered audio, in semitones. Positive values increase pitch, negative values decrease pitch.", -) -args = parser.parse_args() - -config = args.config -sampler_interval = args.sampler_interval -checkpoint = args.checkpoint -input_audio_folder = args.input_audio_folder -output_folder = args.output_folder -pitch_adjust = args.pitch_adjust - -for name in os.listdir(input_audio_folder): - if name.endswith(".wav"): - input_file = os.path.join(input_audio_folder, name) - print(input_file) - render = os.path.join(output_folder, name) - command = f"python inference.py --config {config} --checkpoint {checkpoint} --sampler_interval {sampler_interval} --pitch_adjust {pitch_adjust} --input {input_audio_folder} --output {output_folder}" - os.system(command) diff --git a/tools/clean_speaker_embeddings.py b/tools/diffusion/clean_speaker_embeddings.py similarity index 100% rename from tools/clean_speaker_embeddings.py rename to tools/diffusion/clean_speaker_embeddings.py diff --git a/tools/diff_svc_converter.py b/tools/diffusion/diff_svc_converter.py similarity index 99% rename from tools/diff_svc_converter.py rename to tools/diffusion/diff_svc_converter.py index a1c8c184..94e04ad4 100644 --- a/tools/diff_svc_converter.py +++ b/tools/diffusion/diff_svc_converter.py @@ -3,7 +3,6 @@ import torch from loguru import logger from mmengine import Config - from train import FishDiffusion diff --git a/tools/vst_flask_api.py b/tools/diffusion/flask_api.py similarity index 50% rename from tools/vst_flask_api.py rename to tools/diffusion/flask_api.py index 32502160..63b34f28 100644 --- a/tools/vst_flask_api.py +++ b/tools/diffusion/flask_api.py @@ -7,11 +7,12 @@ import librosa import soundfile +import torch from flask import Flask, request, send_file from flask_cors import CORS from mmengine import Config -from inference import inference +from tools.diffusion.inference import SVCInference app = Flask(__name__) @@ -27,63 +28,38 @@ def voice_change_model(): # 变调信息 f_pitch_change = float(request_form.get("fPitchChange", 0)) # 获取spkid - int_speak_Id = int(request_form.get("sSpeakId", 0)) + int_speaker_id = int(request_form.get("sSpeakId", 0)) if enable_spk_id_cover: - int_speak_Id = spk_id - print("说话人:" + str(int_speak_Id)) + int_speaker_id = spk_id + + print(f"Speaker: {int_speaker_id}, pitch: {f_pitch_change}") + # DAW所需的采样率 daw_sample = int(float(request_form.get("sampleRate", 0))) + # http获得wav文件并转换 input_wav_file = io.BytesIO(wave_file.read()) + # 模型推理 - _audio, _model_sr = svc_model.infer(input_wav_file, f_pitch_change, int_speak_Id) + _audio, _model_sr = model.inference( + input_path=input_wav_file, + output_path=None, + speaker=int_speaker_id, + pitch_adjust=f_pitch_change, + silence_threshold=silence_threshold, + max_slice_duration=max_slice_duration, + extract_vocals=extract_vocals, + sampler_interval=sampler_interval, + ) + tar_audio = librosa.resample(_audio, _model_sr, daw_sample) + # 返回音频 out_wav_path = io.BytesIO() soundfile.write(out_wav_path, tar_audio, daw_sample, format="wav") out_wav_path.seek(0) - return send_file(out_wav_path, download_name="temp.wav", as_attachment=True) - -class SvcFish: - def __init__( - self, - checkpoint_path, - config_path, - sampler_interval=None, - extract_vocals=True, - merge_non_vocals=True, - vocals_loudness_gain=0.0, - silence_threshold=60, - max_slice_duration=30.0, - ): - self.config = Config.fromfile(config_path) - self.checkpoint_path = checkpoint_path - self.sampler_interval = sampler_interval - self.silence_threshold = silence_threshold - self.max_slice_duration = max_slice_duration - self.extract_vocals = extract_vocals - self.merge_non_vocals = merge_non_vocals - self.vocals_loudness_gain = vocals_loudness_gain - - def infer(self, input_path, pitch_adjust, speaker_id): - return inference( - config=self.config, - checkpoint=self.checkpoint_path, - input_path=input_path, - output_path=None, - speaker_id=speaker_id, - pitch_adjust=pitch_adjust, - silence_threshold=self.silence_threshold, - max_slice_duration=self.max_slice_duration, - extract_vocals=self.extract_vocals, - merge_non_vocals=self.merge_non_vocals, - vocals_loudness_gain=self.vocals_loudness_gain, - sampler_interval=self.sampler_interval, - sampler_progress=True, - device="cuda", - gradio_progress=None, - ) + return send_file(out_wav_path, download_name="temp.wav", as_attachment=True) if __name__ == "__main__": @@ -94,10 +70,8 @@ def infer(self, input_path, pitch_adjust, speaker_id): config_path = "configs/svc_cn_hubert_soft_ms.py" # 加速倍率,None即采用配置文件的值 sampler_interval = None - # 是否提取人声,是否合成非人声,以及人声响度增益 + # 是否提取人声 extract_vocals = False - merge_non_vocals = False - vocals_loudness_gain = 0.0 # 默认说话人。以及是否优先使用默认说话人覆盖vst传入的参数。 spk_id = 0 enable_spk_id_cover = False @@ -106,16 +80,11 @@ def infer(self, input_path, pitch_adjust, speaker_id): # 静音阈值 silence_threshold = 60 - svc_model = SvcFish( - checkpoint_path, - config_path, - sampler_interval=sampler_interval, - extract_vocals=extract_vocals, - merge_non_vocals=merge_non_vocals, - vocals_loudness_gain=vocals_loudness_gain, - silence_threshold=silence_threshold, - max_slice_duration=max_slice_duration, - ) + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + + config = Config.fromfile(config_path) + model = SVCInference(config, checkpoint_path) + model = model.to(device) # 此处与vst插件对应,不建议更改 app.run(port=6842, host="0.0.0.0", debug=False, threaded=False) diff --git a/tools/diffusion/gradio_ui.py b/tools/diffusion/gradio_ui.py new file mode 100644 index 00000000..67acad9a --- /dev/null +++ b/tools/diffusion/gradio_ui.py @@ -0,0 +1,92 @@ +from functools import partial +from typing import Union + +import gradio as gr + + +def run_inference( + inference_fn, + input_path: str, + speaker: Union[int, str], + pitch_adjust: int, + sampler_interval: int, + extract_vocals: bool, + progress=gr.Progress(), +): + audio, sr = inference_fn( + input_path=input_path, + output_path=None, + speaker=speaker, + pitch_adjust=pitch_adjust, + sampler_interval=sampler_interval, + extract_vocals=extract_vocals, + gradio_progress=progress, + ) + + return sr, audio + + +def launch_gradio( + config, + inference_fn, + speaker, + pitch_adjust: int, + sampler_interval: int, + extract_vocals: bool, + share: bool = False, +): + with gr.Blocks(title="Fish Diffusion") as app: + gr.Markdown("# Fish Diffusion SVC Inference") + + with gr.Row(): + with gr.Column(): + input_audio = gr.Audio( + label="Input Audio", + type="filepath", + ) + output_audio = gr.Audio(label="Output Audio") + + with gr.Column(): + if hasattr(config, "speaker_mapping"): + speaker_mapping = config.speaker_mapping + speaker = gr.Dropdown( + label="Speaker Name (Used for Multi-Speaker Models)", + choices=list(speaker_mapping.keys()), + value=speaker + if speaker in speaker_mapping + else list(speaker_mapping.keys())[0], + ) + else: + speaker = gr.Number( + label="Speaker ID (Used for Multi-Speaker Models)", + value=int(speaker), + ) + + pitch_adjust = gr.Number( + label="Pitch Adjust (Semitones)", value=pitch_adjust + ) + sampler_interval = gr.Slider( + label="Sampler Interval (⬆️ Faster Generation, ⬇️ Better Quality)", + value=sampler_interval or 10, + minimum=1, + maximum=100, + ) + extract_vocals = gr.Checkbox( + label="Extract Vocals (For low quality audio)", + value=extract_vocals, + ) + run_btn = gr.Button(label="Run") + + run_btn.click( + partial(run_inference, inference_fn), + [ + input_audio, + speaker, + pitch_adjust, + sampler_interval, + extract_vocals, + ], + output_audio, + ) + + app.queue(concurrency_count=2).launch(share=share) diff --git a/tools/diffusion/inference.py b/tools/diffusion/inference.py new file mode 100644 index 00000000..7f794625 --- /dev/null +++ b/tools/diffusion/inference.py @@ -0,0 +1,354 @@ +import argparse +import json +import os +from typing import Optional + +import librosa +import numpy as np +import soundfile as sf +import torch +from fish_audio_preprocess.utils import loudness_norm +from loguru import logger +from mmengine import Config +from torch import nn + +from fish_diffusion.modules.feature_extractors import FEATURE_EXTRACTORS +from fish_diffusion.modules.pitch_extractors import PITCH_EXTRACTORS +from fish_diffusion.utils.audio import get_mel_from_audio, separate_vocals, slice_audio +from fish_diffusion.utils.inference import load_checkpoint +from fish_diffusion.utils.tensor import repeat_expand +from tools.diffusion.gradio_ui import launch_gradio + + +class SVCInference(nn.Module): + def __init__(self, config, checkpoint): + super().__init__() + + self.config = config + + self.text_features_extractor = FEATURE_EXTRACTORS.build( + config.preprocessing.text_features_extractor + ) + self.pitch_extractor = PITCH_EXTRACTORS.build( + config.preprocessing.pitch_extractor + ) + + if os.path.isdir(checkpoint): + # Find the latest checkpoint + checkpoints = sorted(os.listdir(checkpoint)) + logger.info( + f"Found {len(checkpoints)} checkpoints, using {checkpoints[-1]}" + ) + checkpoint = os.path.join(checkpoint, checkpoints[-1]) + + self.model = load_checkpoint(config, checkpoint) + + @property + def device(self): + return next(self.parameters()).device + + @torch.no_grad() + def forward( + self, + audio: torch.Tensor, + sr: int, + pitch_adjust: int = 0, + speaker_id: int = 0, + sampler_progress: bool = False, + sampler_interval: Optional[int] = None, + ): + mel = get_mel_from_audio(audio, sr) + + # Extract and process pitch + pitch = self.pitch_extractor(audio, sr, pad_to=mel.shape[-1]).float() + pitch *= 2 ** (pitch_adjust / 12) + + # Extract and process text features + text_features = self.text_features_extractor(audio, sr)[0] + text_features = repeat_expand(text_features, mel.shape[-1]).T + + # Pitch shift should always be 0 for inference to avoid distortion + pitch_shift = None + if self.config.model.get("pitch_shift_encoder"): + pitch_shift = torch.zeros((1, 1), device=self.device) + + # Predict + contents_lens = torch.tensor([mel.shape[-1]]).to(self.device) + + features = self.model.model.forward_features( + speakers=torch.tensor([speaker_id]).long().to(self.device), + contents=text_features[None].to(self.device), + contents_lens=contents_lens, + contents_max_len=max(contents_lens), + mel_lens=contents_lens, + mel_max_len=max(contents_lens), + pitches=pitch[None].to(self.device), + pitch_shift=pitch_shift, + ) + + result = self.model.model.diffusion( + features["features"], + progress=sampler_progress, + sampler_interval=sampler_interval, + ) + wav = self.model.vocoder.spec2wav(result[0].T, f0=pitch).cpu().numpy() + + return wav + + @torch.no_grad() + def inference( + self, + input_path, + output_path, + speaker=0, + pitch_adjust=0, + silence_threshold=60, + max_slice_duration=30.0, + extract_vocals=True, + sampler_progress=False, + sampler_interval=None, + gradio_progress=None, + ): + """Inference + + Args: + input_path: input path + output_path: output path + speaker: speaker id or speaker name + pitch_adjust: pitch adjust + silence_threshold: silence threshold of librosa.effects.split + max_slice_duration: maximum duration of each slice + extract_vocals: extract vocals + sampler_progress: show sampler progress + sampler_interval: sampler interval + gradio_progress: gradio progress callback + """ + + if os.path.isdir(input_path): + # Batch inference + if output_path is None: + logger.error("Output path is required for batch inference") + return + + if os.path.exists(output_path) and not os.path.isdir(output_path): + logger.error( + f"Output path {output_path} already exists, and it's not a directory" + ) + return + + for file in os.listdir(input_path): + self.inference( + os.path.join(input_path, file), + os.path.join(output_path, file), + speaker, + pitch_adjust, + silence_threshold, + max_slice_duration, + extract_vocals, + sampler_progress, + gradio_progress, + ) + + return + + # Process speaker + try: + speaker_id = self.config.speaker_mapping[speaker] + except KeyError: + # Parse speaker id + speaker_id = int(speaker) + + # Load audio + audio, sr = librosa.load(input_path, sr=config.sampling_rate, mono=True) + + # Extract vocals + + if extract_vocals: + logger.info("Extracting vocals...") + + if gradio_progress is not None: + gradio_progress(0, "Extracting vocals...") + + audio, _ = separate_vocals(audio, sr, self.device) + + # Normalize loudness + audio = loudness_norm.loudness_norm(audio, sr) + + # Slice into segments + segments = list( + slice_audio( + audio, sr, max_duration=max_slice_duration, top_db=silence_threshold + ) + ) + logger.info(f"Sliced into {len(segments)} segments") + + generated_audio = np.zeros_like(audio) + audio_torch = torch.from_numpy(audio).to(self.device)[None] + + for idx, (start, end) in enumerate(segments): + if gradio_progress is not None: + gradio_progress(idx / len(segments), "Generating audio...") + + segment = audio_torch[:, start:end] + logger.info( + f"Processing segment {idx + 1}/{len(segments)}, duration: {segment.shape[-1] / sr:.2f}s" + ) + + wav = self( + segment, + sr, + pitch_adjust=pitch_adjust, + speaker_id=speaker_id, + sampler_progress=sampler_progress, + sampler_interval=sampler_interval, + ) + max_wav_len = generated_audio.shape[-1] - start + generated_audio[start : start + wav.shape[-1]] = wav[:max_wav_len] + + # Loudness normalization + generated_audio = loudness_norm.loudness_norm(generated_audio, sr) + + logger.info("Done") + + if output_path is not None: + sf.write(output_path, generated_audio, sr) + + return generated_audio, sr + + +def parse_args(): + parser = argparse.ArgumentParser() + + parser.add_argument( + "--config", + type=str, + required=True, + help="Path to the config file", + ) + + parser.add_argument( + "--checkpoint", + type=str, + required=True, + help="Path to the checkpoint file", + ) + + parser.add_argument( + "--gradio", + action="store_true", + help="Run in gradio mode", + ) + + parser.add_argument( + "--gradio_share", + action="store_true", + help="Share gradio app", + ) + + parser.add_argument( + "--input", + type=str, + required=False, + help="Path to the input audio file", + ) + + parser.add_argument( + "--output", + type=str, + required=False, + help="Path to the output audio file", + ) + + parser.add_argument( + "--speaker", + type=str, + default="0", + help="Speaker id or speaker name", + ) + + parser.add_argument( + "--speaker_mapping", + type=str, + default=None, + help="Speaker mapping file (if not specified, will be taken from config)", + ) + + parser.add_argument( + "--pitch_adjust", + type=int, + default=0, + help="Pitch adjustment in semitones", + ) + + parser.add_argument( + "--extract_vocals", + action="store_true", + help="Extract vocals", + ) + + parser.add_argument( + "--sampler_interval", + type=int, + default=None, + required=False, + help="Sampler interval, if not specified, will be taken from config", + ) + + parser.add_argument( + "--sampler_progress", + action="store_true", + help="Show sampler progress", + ) + + parser.add_argument( + "--device", + type=str, + default=None, + required=False, + help="Device to use", + ) + + return parser.parse_args() + + +if __name__ == "__main__": + args = parse_args() + + assert args.gradio or ( + args.input is not None and args.output is not None + ), "Either --gradio or --input and --output should be specified" + + if args.device is None: + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + else: + device = torch.device(args.device) + + config = Config.fromfile(args.config) + + if args.speaker_mapping is not None: + config.speaker_mapping = json.load(open(args.speaker_mapping)) + + model = SVCInference(config, args.checkpoint) + model = model.to(device) + + if args.gradio: + launch_gradio( + config, + model.inference, + speaker=args.speaker, + pitch_adjust=args.pitch_adjust, + sampler_interval=args.sampler_interval, + extract_vocals=args.extract_vocals, + share=args.gradio_share, + ) + + else: + model.inference( + input_path=args.input, + output_path=args.output, + speaker=args.speaker, + pitch_adjust=args.pitch_adjust, + extract_vocals=args.extract_vocals, + sampler_progress=args.sampler_progress, + sampler_interval=args.sampler_interval, + ) diff --git a/inference_svs.py b/tools/diffusion/inference_svs.py similarity index 94% rename from inference_svs.py rename to tools/diffusion/inference_svs.py index 0073fe00..3fc7b98e 100644 --- a/inference_svs.py +++ b/tools/diffusion/inference_svs.py @@ -10,9 +10,10 @@ from loguru import logger from mmengine import Config -from fish_diffusion.feature_extractors import FEATURE_EXTRACTORS, PITCH_EXTRACTORS +from fish_diffusion.archs.diffsinger.diffsinger import DiffSingerLightning +from fish_diffusion.modules.feature_extractors import FEATURE_EXTRACTORS +from fish_diffusion.modules.pitch_extractors import PITCH_EXTRACTORS from fish_diffusion.utils.tensor import repeat_expand -from train import FishDiffusion @torch.no_grad() @@ -56,7 +57,7 @@ def inference( ).to(device) phoneme_features_extractor.eval() - model = FishDiffusion(config) + model = DiffSingerLightning(config) state_dict = torch.load(checkpoint, map_location="cpu") if "state_dict" in state_dict: # Checkpoint is saved by pl @@ -134,10 +135,10 @@ def inference( features = model.model.forward_features( speakers=torch.tensor([speaker_id]).long().to(device), contents=phoneme_features[None].to(device), - src_lens=src_lens, - max_src_len=max(src_lens), + contents_lens=src_lens, + contents_max_len=max(src_lens), mel_lens=src_lens, - max_mel_len=max(src_lens), + mel_max_len=max(src_lens), pitches=f0_seq[None], ) diff --git a/inference_svs_ds.py b/tools/diffusion/inference_svs_ds.py similarity index 96% rename from inference_svs_ds.py rename to tools/diffusion/inference_svs_ds.py index 594dfcb9..d50b9f2d 100644 --- a/inference_svs_ds.py +++ b/tools/diffusion/inference_svs_ds.py @@ -3,7 +3,6 @@ import math import os -import librosa import numpy as np import soundfile as sf import torch @@ -11,9 +10,9 @@ from loguru import logger from mmengine import Config -from fish_diffusion.feature_extractors import FEATURE_EXTRACTORS, PITCH_EXTRACTORS +from fish_diffusion.archs.diffsinger.diffsinger import DiffSingerLightning +from fish_diffusion.modules.pitch_extractors import PITCH_EXTRACTORS from fish_diffusion.utils.tensor import repeat_expand -from train import FishDiffusion @torch.no_grad() @@ -52,7 +51,7 @@ def inference( checkpoint = os.path.join(checkpoint, checkpoints[-1]) # Load models - model = FishDiffusion(config) + model = DiffSingerLightning(config) state_dict = torch.load(checkpoint, map_location="cpu") if "state_dict" in state_dict: # Checkpoint is saved by pl @@ -98,7 +97,6 @@ def inference( phones.append(phones_list.index(phone)) durations.append(float(duration)) - print(phones, durations) phones = np.array(phones) durations = np.array(durations) @@ -162,10 +160,10 @@ def inference( features = model.model.forward_features( speakers=torch.tensor([speaker_id]).long().to(device), contents=phoneme_features[None].to(device), - src_lens=src_lens, - max_src_len=max(src_lens), + contents_lens=src_lens, + contents_max_len=max(src_lens), mel_lens=src_lens, - max_mel_len=max(src_lens), + mel_max_len=max(src_lens), pitches=f0_seq[None], ) diff --git a/tools/onnx/demo.py b/tools/diffusion/onnx/demo.py similarity index 90% rename from tools/onnx/demo.py rename to tools/diffusion/onnx/demo.py index dac28994..16fc2e37 100644 --- a/tools/onnx/demo.py +++ b/tools/diffusion/onnx/demo.py @@ -5,7 +5,7 @@ import torch from loguru import logger -from fish_diffusion.feature_extractors.pitch import ParselMouthPitchExtractor +from fish_diffusion.modules.pitch_extractors import ParselMouthPitchExtractor from fish_diffusion.utils.tensor import repeat_expand @@ -14,7 +14,7 @@ def test(): feature_extractor = ort.InferenceSession("exported/feature_extractor.onnx") feature_embedding = ort.InferenceSession("exported/feature_embedding.onnx") diffusion = ort.InferenceSession("exported/diffusion.onnx") - vocoder = ort.InferenceSession("checkpoints/nsf_hifigan_onnx/nsf_hifigan.onnx") + vocoder = ort.InferenceSession("checkpoints/nsf_hifigan/nsf_hifigan.onnx") logger.info("All models loaded.") audio, sr = librosa.load("raw/一半一半.wav", sr=44100, mono=True) @@ -73,8 +73,7 @@ def test(): }, )[0] - audio = np.squeeze(audio, 0) - sf.write("generated.wav", audio, 44100) + sf.write("generated.wav", audio[0, 0], 44100) logger.info("Congratulations! You have generated a speech sample! 🎉") diff --git a/tools/onnx/export.py b/tools/diffusion/onnx/export.py similarity index 95% rename from tools/onnx/export.py rename to tools/diffusion/onnx/export.py index 2a5df072..4eac0767 100644 --- a/tools/onnx/export.py +++ b/tools/diffusion/onnx/export.py @@ -7,9 +7,9 @@ from loguru import logger from mmengine import Config -from fish_diffusion.archs.diffsinger import DiffSinger -from fish_diffusion.feature_extractors import FEATURE_EXTRACTORS -from train import FishDiffusion +from fish_diffusion.archs.diffsinger.diffsinger import DiffSinger, DiffSingerLightning +from fish_diffusion.modules.feature_extractors import FEATURE_EXTRACTORS +from fish_diffusion.utils.inference import load_checkpoint class FeatureEmbeddingWrapper(torch.nn.Module): @@ -248,14 +248,7 @@ def main(config: str, checkpoint: str): device = "cpu" config = Config.fromfile(config) - model = FishDiffusion(config) - state_dict = torch.load( - checkpoint, - map_location=device, - )["state_dict"] - model.load_state_dict(state_dict, strict=False) - model.eval() - model.to(device) + model = load_checkpoint(config, checkpoint, device, model_cls=DiffSingerLightning) # Ignore vocoder model = model.model diff --git a/tools/onnx/export_moess.py b/tools/diffusion/onnx/export_moess.py similarity index 96% rename from tools/onnx/export_moess.py rename to tools/diffusion/onnx/export_moess.py index bb25cd2a..44201516 100644 --- a/tools/onnx/export_moess.py +++ b/tools/diffusion/onnx/export_moess.py @@ -1,7 +1,6 @@ from pathlib import Path import click -import numpy as np import onnxruntime as ort import torch from export import export_feature_extractor @@ -9,8 +8,8 @@ from mmengine import Config from torch.nn import functional as F -from fish_diffusion.archs.diffsinger import DiffSinger -from train import FishDiffusion +from fish_diffusion.archs.diffsinger.diffsinger import DiffSinger, DiffSingerLightning +from fish_diffusion.utils.inference import load_checkpoint def denorm_f0(f0, pitch_padding=None): @@ -278,14 +277,7 @@ def main(config: str, checkpoint: str): device = "cpu" config = Config.fromfile(config) - model = FishDiffusion(config) - state_dict = torch.load( - checkpoint, - map_location=device, - )["state_dict"] - model.load_state_dict(state_dict, strict=False) - model.eval() - model.to(device) + model = load_checkpoint(config, checkpoint, device, model_cls=DiffSingerLightning) # Ignore vocoder model = model.model diff --git a/tools/diffusion/train.py b/tools/diffusion/train.py new file mode 100644 index 00000000..e2e78dea --- /dev/null +++ b/tools/diffusion/train.py @@ -0,0 +1,93 @@ +from argparse import ArgumentParser + +import pytorch_lightning as pl +import torch +from loguru import logger +from mmengine import Config +from pytorch_lightning.loggers import TensorBoardLogger, WandbLogger + +from fish_diffusion.archs.diffsinger.diffsinger import DiffSingerLightning +from fish_diffusion.datasets.utils import build_loader_from_config + +torch.set_float32_matmul_precision("medium") + + +if __name__ == "__main__": + pl.seed_everything(42, workers=True) + + parser = ArgumentParser() + parser.add_argument("--config", type=str, required=True) + parser.add_argument("--resume", type=str, default=None) + parser.add_argument( + "--tensorboard", + action="store_true", + default=False, + help="Use tensorboard logger, default is wandb.", + ) + parser.add_argument("--resume-id", type=str, default=None, help="Wandb run id.") + parser.add_argument("--entity", type=str, default=None, help="Wandb entity.") + parser.add_argument("--name", type=str, default=None, help="Wandb run name.") + parser.add_argument( + "--pretrained", type=str, default=None, help="Pretrained model." + ) + parser.add_argument( + "--only-train-speaker-embeddings", + action="store_true", + default=False, + help="Only train speaker embeddings.", + ) + + args = parser.parse_args() + + cfg = Config.fromfile(args.config) + + model = DiffSingerLightning(cfg) + + # We only load the state_dict of the model, not the optimizer. + if args.pretrained: + state_dict = torch.load(args.pretrained, map_location="cpu") + if "state_dict" in state_dict: + state_dict = state_dict["state_dict"] + + result = model.load_state_dict(state_dict, strict=False) + + missing_keys = set(result.missing_keys) + unexpected_keys = set(result.unexpected_keys) + + # Make sure incorrect keys are just noise predictor keys. + unexpected_keys = unexpected_keys - set( + i.replace(".naive_noise_predictor.", ".") for i in missing_keys + ) + + assert len(unexpected_keys) == 0 + + if args.only_train_speaker_embeddings: + for name, param in model.named_parameters(): + if "speaker_encoder" not in name: + param.requires_grad = False + + logger.info( + "Only train speaker embeddings, all other parameters are frozen." + ) + + logger = ( + TensorBoardLogger("logs", name=cfg.model.type) + if args.tensorboard + else WandbLogger( + project=cfg.model.type, + save_dir="logs", + log_model=True, + name=args.name, + entity=args.entity, + resume="must" if args.resume_id else False, + id=args.resume_id, + ) + ) + + trainer = pl.Trainer( + logger=logger, + **cfg.trainer, + ) + + train_loader, valid_loader = build_loader_from_config(cfg, trainer.num_devices) + trainer.fit(model, train_loader, valid_loader, ckpt_path=args.resume) diff --git a/tools/nsf_hifigan/export.py b/tools/nsf_hifigan/export.py index f7e3aaa0..ff5fbf82 100644 --- a/tools/nsf_hifigan/export.py +++ b/tools/nsf_hifigan/export.py @@ -6,7 +6,7 @@ import torch from loguru import logger -from fish_diffusion.vocoders.nsf_hifigan.nsf_hifigan import NsfHifiGAN +from fish_diffusion.modules.vocoders.nsf_hifigan.nsf_hifigan import NsfHifiGAN class ExportableNsfHiFiGAN(NsfHifiGAN): diff --git a/tools/nsf_hifigan/train.py b/tools/nsf_hifigan/train.py index 2877813e..9d0eda95 100644 --- a/tools/nsf_hifigan/train.py +++ b/tools/nsf_hifigan/train.py @@ -14,9 +14,7 @@ from torchaudio.transforms import MelSpectrogram from fish_diffusion.datasets.utils import build_loader_from_config -from fish_diffusion.utils.audio import dynamic_range_compression -from fish_diffusion.utils.viz import plot_mel -from fish_diffusion.vocoders.nsf_hifigan.models import ( +from fish_diffusion.modules.vocoders.nsf_hifigan.models import ( AttrDict, Generator, MultiPeriodDiscriminator, @@ -25,6 +23,8 @@ feature_loss, generator_loss, ) +from fish_diffusion.utils.audio import dynamic_range_compression +from fish_diffusion.utils.viz import plot_mel torch.set_float32_matmul_precision("medium") diff --git a/tools/preprocessing/extract_features.py b/tools/preprocessing/extract_features.py index 2923ce8f..d2b15e6f 100644 --- a/tools/preprocessing/extract_features.py +++ b/tools/preprocessing/extract_features.py @@ -16,12 +16,13 @@ from mmengine import Config from tqdm import tqdm -from fish_diffusion.feature_extractors import FEATURE_EXTRACTORS, PITCH_EXTRACTORS -from fish_diffusion.feature_extractors.base import BaseFeatureExtractor -from fish_diffusion.feature_extractors.pitch.builder import BasePitchExtractor +from fish_diffusion.modules.feature_extractors import FEATURE_EXTRACTORS +from fish_diffusion.modules.feature_extractors.base import BaseFeatureExtractor +from fish_diffusion.modules.pitch_extractors import PITCH_EXTRACTORS +from fish_diffusion.modules.pitch_extractors.builder import BasePitchExtractor +from fish_diffusion.modules.vocoders import VOCODERS +from fish_diffusion.modules.vocoders.nsf_hifigan.nsf_hifigan import NsfHifiGAN from fish_diffusion.utils.tensor import repeat_expand -from fish_diffusion.vocoders import VOCODERS -from fish_diffusion.vocoders.nsf_hifigan.nsf_hifigan import NsfHifiGAN model_caches = None diff --git a/tools/whisper/train.py b/tools/whisper/train.py index abd1e279..2cc6a105 100644 --- a/tools/whisper/train.py +++ b/tools/whisper/train.py @@ -15,7 +15,7 @@ from torch.utils.data import DataLoader, Dataset from whisper import log_mel_spectrogram, pad_or_trim -from fish_diffusion.feature_extractors.whisper import AlignedWhisper +from fish_diffusion.modules.feature_extractors.whisper import AlignedWhisper phonemes = [] for i in open("dictionaries/opencpop-strict.txt"):