Noename commited on
Commit
013f84f
·
verified ·
1 Parent(s): 31cfeac

Upload audioldm_original_medium.yaml

Browse files
Files changed (1) hide show
  1. audioldm_original_medium.yaml +146 -0
audioldm_original_medium.yaml ADDED
@@ -0,0 +1,146 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ metadata_root: "./synthesize_audio/dataset_root.json"
2
+ log_directory: "./log/latent_diffusion"
3
+ project: "audioldm"
4
+ precision: "high"
5
+
6
+ variables:
7
+ sampling_rate: &sampling_rate 16000
8
+ mel_bins: &mel_bins 64
9
+ latent_embed_dim: &latent_embed_dim 8
10
+ latent_t_size: &latent_t_size 256 # TODO might need to change
11
+ latent_f_size: &latent_f_size 16
12
+ in_channels: &unet_in_channels 8
13
+ optimize_ddpm_parameter: &optimize_ddpm_parameter true
14
+ optimize_gpt: &optimize_gpt true
15
+ warmup_steps: &warmup_steps 2000
16
+
17
+ data:
18
+ train: [ "synthesize_audio" ]
19
+ test: "synthesize_audio"
20
+ val: "synthesize_audio"
21
+ class_label_indices: "synthesize_audio"
22
+ dataloader_add_ons: []
23
+
24
+ step:
25
+ validation_every_n_epochs: 5
26
+ save_checkpoint_every_n_steps: 5000
27
+ # limit_val_batches: 2
28
+ max_steps: 800000
29
+ save_top_k: 1
30
+
31
+ preprocessing:
32
+ audio:
33
+ sampling_rate: *sampling_rate
34
+ max_wav_value: 32768.0
35
+ duration: 10.24
36
+ stft:
37
+ filter_length: 1024
38
+ hop_length: 160
39
+ win_length: 1024
40
+ mel:
41
+ n_mel_channels: *mel_bins
42
+ mel_fmin: 0
43
+ mel_fmax: 8000
44
+
45
+ augmentation:
46
+ mixup: 0.0
47
+
48
+ model:
49
+ target: audioldm_train.modules.latent_diffusion.ddpm.LatentDiffusion
50
+ params:
51
+ # Autoencoder
52
+ first_stage_config:
53
+ base_learning_rate: 8.0e-06
54
+ target: audioldm_train.modules.latent_encoder.autoencoder.AutoencoderKL
55
+ params:
56
+ reload_from_ckpt: "data/checkpoints/vae_mel_16k_64bins.ckpt"
57
+ sampling_rate: *sampling_rate
58
+ batchsize: 4
59
+ monitor: val/rec_loss
60
+ image_key: fbank
61
+ subband: 1
62
+ embed_dim: *latent_embed_dim
63
+ time_shuffle: 1
64
+ lossconfig:
65
+ target: audioldm_train.losses.LPIPSWithDiscriminator
66
+ params:
67
+ disc_start: 50001
68
+ kl_weight: 1000.0
69
+ disc_weight: 0.5
70
+ disc_in_channels: 1
71
+ ddconfig:
72
+ double_z: true
73
+ mel_bins: *mel_bins # The frequency bins of mel spectrogram
74
+ z_channels: 8
75
+ resolution: 256
76
+ downsample_time: false
77
+ in_channels: 1
78
+ out_ch: 1
79
+ ch: 128
80
+ ch_mult:
81
+ - 1
82
+ - 2
83
+ - 4
84
+ num_res_blocks: 2
85
+ attn_resolutions: []
86
+ dropout: 0.0
87
+
88
+ # Other parameters
89
+ base_learning_rate: 1.0e-4
90
+ warmup_steps: *warmup_steps
91
+ optimize_ddpm_parameter: *optimize_ddpm_parameter
92
+ sampling_rate: *sampling_rate
93
+ batchsize: 2
94
+ linear_start: 0.0015
95
+ linear_end: 0.0195
96
+ num_timesteps_cond: 1
97
+ log_every_t: 200
98
+ timesteps: 1000
99
+ unconditional_prob_cfg: 0.1
100
+ parameterization: eps # [eps, x0, v]
101
+ first_stage_key: fbank
102
+ latent_t_size: *latent_t_size # TODO might need to change
103
+ latent_f_size: *latent_f_size
104
+ channels: *latent_embed_dim # TODO might need to change
105
+ monitor: val/loss_simple_ema
106
+ scale_by_std: true
107
+ unet_config:
108
+ target: audioldm_train.modules.diffusionmodules.openaimodel.UNetModel
109
+ params:
110
+ image_size: 64
111
+ extra_film_condition_dim: 512 # If you use film as extra condition, set this parameter. For example if you have two conditioning vectors each have dimension 512, then this number would be 1024
112
+ # context_dim:
113
+ # - 768
114
+ in_channels: *unet_in_channels # The input channel of the UNet model
115
+ out_channels: *latent_embed_dim # TODO might need to change
116
+ model_channels: 192 # TODO might need to change
117
+ attention_resolutions:
118
+ - 8
119
+ - 4
120
+ - 2
121
+ num_res_blocks: 2
122
+ channel_mult:
123
+ - 1
124
+ - 2
125
+ - 3
126
+ - 5
127
+ num_head_channels: 32
128
+ use_spatial_transformer: true
129
+ transformer_depth: 1
130
+ extra_sa_layer: False
131
+
132
+ cond_stage_config:
133
+ film_clap_cond1:
134
+ cond_stage_key: text
135
+ conditioning_key: film
136
+ target: audioldm_train.conditional_models.CLAPAudioEmbeddingClassifierFreev2
137
+ params:
138
+ pretrained_path: data/checkpoints/clap_music_speech_audioset_epoch_15_esc_89.98.pt
139
+ sampling_rate: 16000
140
+ embed_mode: text # or text
141
+ amodel: HTSAT-base
142
+
143
+ evaluation_params:
144
+ unconditional_guidance_scale: 3.5
145
+ ddim_sampling_steps: 200
146
+ n_candidates_per_samples: 3