kevinwang676 commited on
Commit
65ec5fa
·
verified ·
1 Parent(s): d6cd530

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. cosyvoice/__pycache__/__init__.cpython-310.pyc +0 -0
  2. cosyvoice/cli/.ipynb_checkpoints/model-checkpoint.py +466 -0
  3. cosyvoice/cli/__pycache__/__init__.cpython-310.pyc +0 -0
  4. cosyvoice/cli/__pycache__/cosyvoice.cpython-310.pyc +0 -0
  5. cosyvoice/cli/__pycache__/frontend.cpython-310.pyc +0 -0
  6. cosyvoice/cli/__pycache__/model.cpython-310.pyc +0 -0
  7. cosyvoice/dataset/__init__.py +0 -0
  8. cosyvoice/dataset/__pycache__/__init__.cpython-310.pyc +0 -0
  9. cosyvoice/dataset/__pycache__/processor.cpython-310.pyc +0 -0
  10. cosyvoice/dataset/processor.py +435 -0
  11. cosyvoice/flow/__pycache__/decoder.cpython-310.pyc +0 -0
  12. cosyvoice/flow/__pycache__/flow.cpython-310.pyc +0 -0
  13. cosyvoice/flow/__pycache__/flow_matching.cpython-310.pyc +0 -0
  14. cosyvoice/flow/decoder.py +301 -0
  15. cosyvoice/flow/flow.py +239 -0
  16. cosyvoice/flow/flow_matching.py +264 -0
  17. cosyvoice/flow/length_regulator.py +69 -0
  18. cosyvoice/hifigan/__pycache__/f0_predictor.cpython-310.pyc +0 -0
  19. cosyvoice/hifigan/discriminator.py +140 -0
  20. cosyvoice/hifigan/f0_predictor.py +55 -0
  21. cosyvoice/hifigan/generator.py +411 -0
  22. cosyvoice/hifigan/hifigan.py +67 -0
  23. cosyvoice/llm/__pycache__/llm.cpython-310.pyc +0 -0
  24. cosyvoice/utils/class_utils.py +83 -0
  25. cosyvoice/utils/common.py +166 -0
  26. cosyvoice/utils/executor.py +172 -0
  27. cosyvoice/utils/frontend_utils.py +136 -0
  28. cosyvoice/utils/mask.py +267 -0
  29. cosyvoice/utils/scheduler.py +738 -0
  30. cosyvoice/utils/train_utils.py +345 -0
  31. examples/libritts/cosyvoice/conf/cosyvoice.fromscratch.yaml +257 -0
  32. examples/libritts/cosyvoice/conf/cosyvoice.yaml +257 -0
  33. examples/libritts/cosyvoice/conf/ds_stage2.json +42 -0
  34. examples/libritts/cosyvoice/local/download_and_untar.sh +97 -0
  35. examples/libritts/cosyvoice/local/prepare_data.py +53 -0
  36. examples/libritts/cosyvoice/path.sh +3 -0
  37. examples/libritts/cosyvoice/run.sh +126 -0
  38. examples/libritts/cosyvoice/tts_text.json +5 -0
  39. examples/magicdata-read/cosyvoice/path.sh +3 -0
  40. examples/magicdata-read/cosyvoice/run.sh +111 -0
  41. runtime/python/grpc/.ipynb_checkpoints/client-checkpoint.py +106 -0
  42. runtime/python/grpc/.ipynb_checkpoints/cosyvoice_pb2-checkpoint.py +39 -0
  43. runtime/python/grpc/.ipynb_checkpoints/cosyvoice_pb2_grpc-checkpoint.py +66 -0
  44. runtime/python/grpc/__pycache__/cosyvoice_pb2.cpython-310.pyc +0 -0
  45. third_party/Matcha-TTS/configs/callbacks/model_checkpoint.yaml +17 -0
  46. third_party/Matcha-TTS/configs/callbacks/model_summary.yaml +5 -0
  47. third_party/Matcha-TTS/configs/logger/comet.yaml +12 -0
  48. third_party/Matcha-TTS/configs/logger/csv.yaml +7 -0
  49. third_party/Matcha-TTS/configs/logger/many_loggers.yaml +9 -0
  50. third_party/Matcha-TTS/configs/logger/mlflow.yaml +12 -0
cosyvoice/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (172 Bytes). View file
 
cosyvoice/cli/.ipynb_checkpoints/model-checkpoint.py ADDED
@@ -0,0 +1,466 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2024 Alibaba Inc (authors: Xiang Lyu)
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import os
15
+ from typing import Generator
16
+ import torch
17
+ import numpy as np
18
+ import threading
19
+ import time
20
+ from torch.nn import functional as F
21
+ from contextlib import nullcontext
22
+ import uuid
23
+ from cosyvoice.utils.common import fade_in_out
24
+ from cosyvoice.utils.file_utils import convert_onnx_to_trt
25
+ from cosyvoice.flow.flow_matching import EstimatorWrapper
26
+ import queue
27
+
28
+ class CosyVoiceModel:
29
+
30
+ def __init__(self,
31
+ llm: torch.nn.Module,
32
+ flow: torch.nn.Module,
33
+ hift: torch.nn.Module,
34
+ fp16: bool):
35
+ self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
36
+ self.llm = llm
37
+ self.flow = flow
38
+ self.hift = hift
39
+ self.fp16 = fp16
40
+ self.llm.fp16 = fp16
41
+ self.flow.fp16 = fp16
42
+ if self.fp16 is True:
43
+ self.llm.half()
44
+ self.flow.half()
45
+ self.token_min_hop_len = 2 * self.flow.input_frame_rate
46
+ self.token_max_hop_len = 4 * self.flow.input_frame_rate
47
+ self.token_overlap_len = 20
48
+ # here we fix set flow.decoder.estimator.static_chunk_size = 0 for compatibability
49
+ self.flow.decoder.estimator.static_chunk_size = 0
50
+ # mel fade in out
51
+ self.mel_overlap_len = int(self.token_overlap_len / self.flow.input_frame_rate * 22050 / 256)
52
+ self.mel_window = np.hamming(2 * self.mel_overlap_len)
53
+ # hift cache
54
+ self.mel_cache_len = 20
55
+ self.source_cache_len = int(self.mel_cache_len * 256)
56
+ # speech fade in out
57
+ self.speech_window = np.hamming(2 * self.source_cache_len)
58
+ # rtf and decoding related
59
+ self.stream_scale_factor = 1
60
+ assert self.stream_scale_factor >= 1, 'stream_scale_factor should be greater than 1, change it according to your actual rtf'
61
+ self.llm_context = torch.cuda.stream(torch.cuda.Stream(self.device)) if torch.cuda.is_available() else nullcontext()
62
+ self.lock = threading.Lock()
63
+ # dict used to store session related variable
64
+ self.tts_speech_token_dict = {}
65
+ self.llm_end_dict = {}
66
+ self.mel_overlap_dict = {}
67
+ self.flow_cache_dict = {}
68
+ self.hift_cache_dict = {}
69
+
70
+ self.stream_context_pool = queue.Queue()
71
+ for _ in range(10):
72
+ self.stream_context_pool.put(torch.cuda.stream(torch.cuda.Stream(self.device)) if torch.cuda.is_available() else nullcontext())
73
+
74
+ self.is_cuda_available = torch.cuda.is_available()
75
+
76
+ def load(self, llm_model, flow_model, hift_model):
77
+ self.llm.load_state_dict(torch.load(llm_model, map_location=self.device), strict=True)
78
+ self.llm.to(self.device).eval()
79
+ self.flow.load_state_dict(torch.load(flow_model, map_location=self.device), strict=True)
80
+ self.flow.to(self.device).eval()
81
+ # in case hift_model is a hifigan model
82
+ hift_state_dict = {k.replace('generator.', ''): v for k, v in torch.load(hift_model, map_location=self.device).items()}
83
+ self.hift.load_state_dict(hift_state_dict, strict=True)
84
+ self.hift.to(self.device).eval()
85
+
86
+ def load_jit(self, llm_text_encoder_model, llm_llm_model, flow_encoder_model):
87
+ llm_text_encoder = torch.jit.load(llm_text_encoder_model, map_location=self.device)
88
+ self.llm.text_encoder = llm_text_encoder
89
+ llm_llm = torch.jit.load(llm_llm_model, map_location=self.device)
90
+ self.llm.llm = llm_llm
91
+ flow_encoder = torch.jit.load(flow_encoder_model, map_location=self.device)
92
+ self.flow.encoder = flow_encoder
93
+
94
+ def load_trt(self, flow_decoder_estimator_model, flow_decoder_onnx_model, fp16, estimator_count=8): # use 8 estimators
95
+ assert torch.cuda.is_available(), 'tensorrt only supports gpu!'
96
+ if not os.path.exists(flow_decoder_estimator_model):
97
+ convert_onnx_to_trt(flow_decoder_estimator_model, flow_decoder_onnx_model, fp16)
98
+ if os.path.getsize(flow_decoder_estimator_model) == 0:
99
+ raise ValueError('{} is empty file, delete it and export again!'.format(flow_decoder_estimator_model))
100
+ del self.flow.decoder.estimator
101
+ import tensorrt as trt
102
+ with open(flow_decoder_estimator_model, 'rb') as f:
103
+ self.flow.decoder.estimator_engine = trt.Runtime(trt.Logger(trt.Logger.INFO)).deserialize_cuda_engine(f.read())
104
+ if self.flow.decoder.estimator_engine is None:
105
+ raise ValueError('failed to load trt {}'.format(flow_decoder_estimator_model))
106
+ self.flow.decoder.estimator = EstimatorWrapper(self.flow.decoder.estimator_engine, estimator_count=estimator_count)
107
+
108
+ def llm_job(self, text, prompt_text, llm_prompt_speech_token, llm_embedding, uuid):
109
+ with self.llm_context:
110
+ if isinstance(text, Generator):
111
+ assert isinstance(self, CosyVoice2Model), 'streaming input text is only implemented for CosyVoice2!'
112
+ for i in self.llm.inference_bistream(text=text,
113
+ prompt_text=prompt_text.to(self.device),
114
+ prompt_text_len=torch.tensor([prompt_text.shape[1]], dtype=torch.int32).to(self.device),
115
+ prompt_speech_token=llm_prompt_speech_token.to(self.device),
116
+ prompt_speech_token_len=torch.tensor([llm_prompt_speech_token.shape[1]], dtype=torch.int32).to(self.device),
117
+ embedding=llm_embedding.to(self.device)):
118
+ self.tts_speech_token_dict[uuid].append(i)
119
+ else:
120
+ for i in self.llm.inference(text=text.to(self.device),
121
+ text_len=torch.tensor([text.shape[1]], dtype=torch.int32).to(self.device),
122
+ prompt_text=prompt_text.to(self.device),
123
+ prompt_text_len=torch.tensor([prompt_text.shape[1]], dtype=torch.int32).to(self.device),
124
+ prompt_speech_token=llm_prompt_speech_token.to(self.device),
125
+ prompt_speech_token_len=torch.tensor([llm_prompt_speech_token.shape[1]], dtype=torch.int32).to(self.device),
126
+ embedding=llm_embedding.to(self.device)):
127
+ self.tts_speech_token_dict[uuid].append(i)
128
+ self.llm_end_dict[uuid] = True
129
+
130
+ def token2wav(self, token, prompt_token, prompt_feat, embedding, uuid, finalize=False, speed=1.0):
131
+ tts_mel, flow_cache = self.flow.inference(token=token.to(self.device),
132
+ token_len=torch.tensor([token.shape[1]], dtype=torch.int32).to(self.device),
133
+ prompt_token=prompt_token.to(self.device),
134
+ prompt_token_len=torch.tensor([prompt_token.shape[1]], dtype=torch.int32).to(self.device),
135
+ prompt_feat=prompt_feat.to(self.device),
136
+ prompt_feat_len=torch.tensor([prompt_feat.shape[1]], dtype=torch.int32).to(self.device),
137
+ embedding=embedding.to(self.device),
138
+ flow_cache=self.flow_cache_dict[uuid])
139
+ self.flow_cache_dict[uuid] = flow_cache
140
+
141
+ # mel overlap fade in out
142
+ if self.mel_overlap_dict[uuid].shape[2] != 0:
143
+ tts_mel = fade_in_out(tts_mel, self.mel_overlap_dict[uuid], self.mel_window)
144
+ # append hift cache
145
+ if self.hift_cache_dict[uuid] is not None:
146
+ hift_cache_mel, hift_cache_source = self.hift_cache_dict[uuid]['mel'], self.hift_cache_dict[uuid]['source']
147
+ tts_mel = torch.concat([hift_cache_mel, tts_mel], dim=2)
148
+ else:
149
+ hift_cache_source = torch.zeros(1, 1, 0)
150
+ # keep overlap mel and hift cache
151
+ if finalize is False:
152
+ self.mel_overlap_dict[uuid] = tts_mel[:, :, -self.mel_overlap_len:]
153
+ tts_mel = tts_mel[:, :, :-self.mel_overlap_len]
154
+ tts_speech, tts_source = self.hift.inference(speech_feat=tts_mel, cache_source=hift_cache_source)
155
+ if self.hift_cache_dict[uuid] is not None:
156
+ tts_speech = fade_in_out(tts_speech, self.hift_cache_dict[uuid]['speech'], self.speech_window)
157
+ self.hift_cache_dict[uuid] = {'mel': tts_mel[:, :, -self.mel_cache_len:],
158
+ 'source': tts_source[:, :, -self.source_cache_len:],
159
+ 'speech': tts_speech[:, -self.source_cache_len:]}
160
+ tts_speech = tts_speech[:, :-self.source_cache_len]
161
+ else:
162
+ if speed != 1.0:
163
+ assert self.hift_cache_dict[uuid] is None, 'speed change only support non-stream inference mode'
164
+ tts_mel = F.interpolate(tts_mel, size=int(tts_mel.shape[2] / speed), mode='linear')
165
+ tts_speech, tts_source = self.hift.inference(speech_feat=tts_mel, cache_source=hift_cache_source)
166
+ if self.hift_cache_dict[uuid] is not None:
167
+ tts_speech = fade_in_out(tts_speech, self.hift_cache_dict[uuid]['speech'], self.speech_window)
168
+ return tts_speech
169
+
170
+ def tts(self, text, flow_embedding, llm_embedding=torch.zeros(0, 192),
171
+ prompt_text=torch.zeros(1, 0, dtype=torch.int32),
172
+ llm_prompt_speech_token=torch.zeros(1, 0, dtype=torch.int32),
173
+ flow_prompt_speech_token=torch.zeros(1, 0, dtype=torch.int32),
174
+ prompt_speech_feat=torch.zeros(1, 0, 80), stream=False, speed=1.0, **kwargs):
175
+ # this_uuid is used to track variables related to this inference thread
176
+
177
+ stream_context = self.stream_context_pool.get()
178
+ with stream_context:
179
+
180
+ this_uuid = str(uuid.uuid1())
181
+ with self.lock:
182
+ self.tts_speech_token_dict[this_uuid], self.llm_end_dict[this_uuid] = [], False
183
+ self.hift_cache_dict[this_uuid] = None
184
+ self.mel_overlap_dict[this_uuid] = torch.zeros(1, 80, 0)
185
+ self.flow_cache_dict[this_uuid] = torch.zeros(1, 80, 0, 2)
186
+ p = threading.Thread(target=self.llm_job, args=(text, prompt_text, llm_prompt_speech_token, llm_embedding, this_uuid))
187
+ p.start()
188
+ if stream is True:
189
+ token_hop_len = self.token_min_hop_len
190
+ while True:
191
+ time.sleep(0.1)
192
+ if len(self.tts_speech_token_dict[this_uuid]) >= token_hop_len + self.token_overlap_len:
193
+ this_tts_speech_token = torch.tensor(self.tts_speech_token_dict[this_uuid][:token_hop_len + self.token_overlap_len]) \
194
+ .unsqueeze(dim=0)
195
+ this_tts_speech = self.token2wav(token=this_tts_speech_token,
196
+ prompt_token=flow_prompt_speech_token,
197
+ prompt_feat=prompt_speech_feat,
198
+ embedding=flow_embedding,
199
+ uuid=this_uuid,
200
+ finalize=False)
201
+ yield {'tts_speech': this_tts_speech.cpu()}
202
+ with self.lock:
203
+ self.tts_speech_token_dict[this_uuid] = self.tts_speech_token_dict[this_uuid][token_hop_len:]
204
+ # increase token_hop_len for better speech quality
205
+ token_hop_len = min(self.token_max_hop_len, int(token_hop_len * self.stream_scale_factor))
206
+ if self.llm_end_dict[this_uuid] is True and len(self.tts_speech_token_dict[this_uuid]) < token_hop_len + self.token_overlap_len:
207
+ break
208
+ p.join()
209
+ # deal with remain tokens, make sure inference remain token len equals token_hop_len when cache_speech is not None
210
+ this_tts_speech_token = torch.tensor(self.tts_speech_token_dict[this_uuid]).unsqueeze(dim=0)
211
+ this_tts_speech = self.token2wav(token=this_tts_speech_token,
212
+ prompt_token=flow_prompt_speech_token,
213
+ prompt_feat=prompt_speech_feat,
214
+ embedding=flow_embedding,
215
+ uuid=this_uuid,
216
+ finalize=True)
217
+ yield {'tts_speech': this_tts_speech.cpu()}
218
+ else:
219
+ # deal with all tokens
220
+ p.join()
221
+ this_tts_speech_token = torch.tensor(self.tts_speech_token_dict[this_uuid]).unsqueeze(dim=0)
222
+ this_tts_speech = self.token2wav(token=this_tts_speech_token,
223
+ prompt_token=flow_prompt_speech_token,
224
+ prompt_feat=prompt_speech_feat,
225
+ embedding=flow_embedding,
226
+ uuid=this_uuid,
227
+ finalize=True,
228
+ speed=speed)
229
+ yield {'tts_speech': this_tts_speech.cpu()}
230
+ with self.lock:
231
+ self.tts_speech_token_dict.pop(this_uuid)
232
+ self.llm_end_dict.pop(this_uuid)
233
+ self.mel_overlap_dict.pop(this_uuid)
234
+ self.hift_cache_dict.pop(this_uuid)
235
+ self.flow_cache_dict.pop(this_uuid)
236
+
237
+ self.synchronize_stream()
238
+ self.stream_context_pool.put(stream_context)
239
+ torch.cuda.empty_cache()
240
+
241
+ def vc(self, source_speech_token, flow_prompt_speech_token, prompt_speech_feat, flow_embedding, stream=False, speed=1.0, **kwargs):
242
+ # this_uuid is used to track variables related to this inference thread
243
+ this_uuid = str(uuid.uuid1())
244
+ with self.lock:
245
+ self.tts_speech_token_dict[this_uuid], self.llm_end_dict[this_uuid] = source_speech_token.flatten().tolist(), True
246
+ self.hift_cache_dict[this_uuid] = None
247
+ self.mel_overlap_dict[this_uuid] = torch.zeros(1, 80, 0)
248
+ self.flow_cache_dict[this_uuid] = torch.zeros(1, 80, 0, 2)
249
+ if stream is True:
250
+ token_hop_len = self.token_min_hop_len
251
+ while True:
252
+ if len(self.tts_speech_token_dict[this_uuid]) >= token_hop_len + self.token_overlap_len:
253
+ this_tts_speech_token = torch.tensor(self.tts_speech_token_dict[this_uuid][:token_hop_len + self.token_overlap_len]) \
254
+ .unsqueeze(dim=0)
255
+ this_tts_speech = self.token2wav(token=this_tts_speech_token,
256
+ prompt_token=flow_prompt_speech_token,
257
+ prompt_feat=prompt_speech_feat,
258
+ embedding=flow_embedding,
259
+ uuid=this_uuid,
260
+ finalize=False)
261
+ yield {'tts_speech': this_tts_speech.cpu()}
262
+ with self.lock:
263
+ self.tts_speech_token_dict[this_uuid] = self.tts_speech_token_dict[this_uuid][token_hop_len:]
264
+ # increase token_hop_len for better speech quality
265
+ token_hop_len = min(self.token_max_hop_len, int(token_hop_len * self.stream_scale_factor))
266
+ if self.llm_end_dict[this_uuid] is True and len(self.tts_speech_token_dict[this_uuid]) < token_hop_len + self.token_overlap_len:
267
+ break
268
+ # deal with remain tokens, make sure inference remain token len equals token_hop_len when cache_speech is not None
269
+ this_tts_speech_token = torch.tensor(self.tts_speech_token_dict[this_uuid]).unsqueeze(dim=0)
270
+ this_tts_speech = self.token2wav(token=this_tts_speech_token,
271
+ prompt_token=flow_prompt_speech_token,
272
+ prompt_feat=prompt_speech_feat,
273
+ embedding=flow_embedding,
274
+ uuid=this_uuid,
275
+ finalize=True)
276
+ yield {'tts_speech': this_tts_speech.cpu()}
277
+ else:
278
+ # deal with all tokens
279
+ this_tts_speech_token = torch.tensor(self.tts_speech_token_dict[this_uuid]).unsqueeze(dim=0)
280
+ this_tts_speech = self.token2wav(token=this_tts_speech_token,
281
+ prompt_token=flow_prompt_speech_token,
282
+ prompt_feat=prompt_speech_feat,
283
+ embedding=flow_embedding,
284
+ uuid=this_uuid,
285
+ finalize=True,
286
+ speed=speed)
287
+ yield {'tts_speech': this_tts_speech.cpu()}
288
+ with self.lock:
289
+ self.tts_speech_token_dict.pop(this_uuid)
290
+ self.llm_end_dict.pop(this_uuid)
291
+ self.mel_overlap_dict.pop(this_uuid)
292
+ self.hift_cache_dict.pop(this_uuid)
293
+ torch.cuda.empty_cache()
294
+
295
+ def synchronize_stream(self):
296
+ if self.is_cuda_available:
297
+ torch.cuda.current_stream().synchronize()
298
+
299
+
300
+ class CosyVoice2Model(CosyVoiceModel):
301
+
302
+ def __init__(self,
303
+ llm: torch.nn.Module,
304
+ flow: torch.nn.Module,
305
+ hift: torch.nn.Module,
306
+ fp16: bool):
307
+ self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
308
+ self.llm = llm
309
+ self.flow = flow
310
+ self.hift = hift
311
+ self.fp16 = fp16
312
+ self.llm.fp16 = fp16
313
+ self.flow.fp16 = fp16
314
+ if self.fp16 is True:
315
+ self.llm.half()
316
+ self.flow.half()
317
+ self.token_hop_len = 2 * self.flow.input_frame_rate
318
+ # here we fix flow encoder/decoder decoding_chunk_size, in the future we will send it as arguments, or use cache
319
+ self.flow.encoder.static_chunk_size = 2 * self.flow.input_frame_rate
320
+ self.flow.decoder.estimator.static_chunk_size = 2 * self.flow.input_frame_rate * self.flow.token_mel_ratio
321
+ # hift cache
322
+ self.mel_cache_len = 8
323
+ self.source_cache_len = int(self.mel_cache_len * 480)
324
+ # speech fade in out
325
+ self.speech_window = np.hamming(2 * self.source_cache_len)
326
+ # rtf and decoding related
327
+ self.stream_scale_factor = 1
328
+ self.llm_context = torch.cuda.stream(torch.cuda.Stream(self.device)) if torch.cuda.is_available() else nullcontext()
329
+ self.lock = threading.Lock()
330
+ # dict used to store session related variable
331
+ self.tts_speech_token_dict = {}
332
+ self.llm_end_dict = {}
333
+ self.hift_cache_dict = {}
334
+
335
+ self.stream_context_pool = queue.Queue()
336
+ for _ in range(10):
337
+ self.stream_context_pool.put(torch.cuda.stream(torch.cuda.Stream(self.device)) if torch.cuda.is_available() else nullcontext())
338
+
339
+ self.is_cuda_available = torch.cuda.is_available()
340
+
341
+ def load_jit(self, flow_encoder_model):
342
+ flow_encoder = torch.jit.load(flow_encoder_model, map_location=self.device)
343
+ self.flow.encoder = flow_encoder
344
+
345
+ def token2wav(self, token, prompt_token, prompt_feat, embedding, uuid, token_offset, finalize=False, speed=1.0):
346
+
347
+ tts_mel, _ = self.flow.inference(token=token.to(self.device),
348
+ token_len=torch.tensor([token.shape[1]], dtype=torch.int32).to(self.device),
349
+ prompt_token=prompt_token.to(self.device),
350
+ prompt_token_len=torch.tensor([prompt_token.shape[1]], dtype=torch.int32).to(self.device),
351
+ prompt_feat=prompt_feat.to(self.device),
352
+ prompt_feat_len=torch.tensor([prompt_feat.shape[1]], dtype=torch.int32).to(self.device),
353
+ embedding=embedding.to(self.device),
354
+ finalize=finalize)
355
+ tts_mel = tts_mel[:, :, token_offset * self.flow.token_mel_ratio:]
356
+ # append hift cache
357
+ if self.hift_cache_dict[uuid] is not None:
358
+ hift_cache_mel, hift_cache_source = self.hift_cache_dict[uuid]['mel'], self.hift_cache_dict[uuid]['source']
359
+ tts_mel = torch.concat([hift_cache_mel, tts_mel], dim=2)
360
+ else:
361
+ hift_cache_source = torch.zeros(1, 1, 0)
362
+ # keep overlap mel and hift cache
363
+ if finalize is False:
364
+ tts_speech, tts_source = self.hift.inference(speech_feat=tts_mel, cache_source=hift_cache_source)
365
+ if self.hift_cache_dict[uuid] is not None:
366
+ tts_speech = fade_in_out(tts_speech, self.hift_cache_dict[uuid]['speech'], self.speech_window)
367
+ self.hift_cache_dict[uuid] = {'mel': tts_mel[:, :, -self.mel_cache_len:],
368
+ 'source': tts_source[:, :, -self.source_cache_len:],
369
+ 'speech': tts_speech[:, -self.source_cache_len:]}
370
+ tts_speech = tts_speech[:, :-self.source_cache_len]
371
+ else:
372
+ if speed != 1.0:
373
+ assert self.hift_cache_dict[uuid] is None, 'speed change only support non-stream inference mode'
374
+ tts_mel = F.interpolate(tts_mel, size=int(tts_mel.shape[2] / speed), mode='linear')
375
+ tts_speech, tts_source = self.hift.inference(speech_feat=tts_mel, cache_source=hift_cache_source)
376
+ if self.hift_cache_dict[uuid] is not None:
377
+ tts_speech = fade_in_out(tts_speech, self.hift_cache_dict[uuid]['speech'], self.speech_window)
378
+ return tts_speech
379
+
380
+ def tts(self, text, flow_embedding, llm_embedding=torch.zeros(0, 192),
381
+ prompt_text=torch.zeros(1, 0, dtype=torch.int32),
382
+ llm_prompt_speech_token=torch.zeros(1, 0, dtype=torch.int32),
383
+ flow_prompt_speech_token=torch.zeros(1, 0, dtype=torch.int32),
384
+ prompt_speech_feat=torch.zeros(1, 0, 80), stream=False, speed=1.0, **kwargs):
385
+ # this_uuid is used to track variables related to this inference thread
386
+ self.synchronize_stream()
387
+ stream_context = self.stream_context_pool.get()
388
+ with stream_context:
389
+
390
+ this_uuid = str(uuid.uuid1())
391
+ with self.lock:
392
+ self.tts_speech_token_dict[this_uuid], self.llm_end_dict[this_uuid] = [], False
393
+ self.hift_cache_dict[this_uuid] = None
394
+ p = threading.Thread(target=self.llm_job, args=(text, prompt_text, llm_prompt_speech_token, llm_embedding, this_uuid))
395
+ p.start()
396
+ if stream is True:
397
+ token_offset = 0
398
+ while True:
399
+ time.sleep(0.1)
400
+ if len(self.tts_speech_token_dict[this_uuid]) - token_offset >= self.token_hop_len + self.flow.pre_lookahead_len:
401
+ this_tts_speech_token = torch.tensor(self.tts_speech_token_dict[this_uuid][:token_offset + self.token_hop_len + self.flow.pre_lookahead_len]).unsqueeze(dim=0)
402
+ this_tts_speech = self.token2wav(token=this_tts_speech_token,
403
+ prompt_token=flow_prompt_speech_token,
404
+ prompt_feat=prompt_speech_feat,
405
+ embedding=flow_embedding,
406
+ uuid=this_uuid,
407
+ token_offset=token_offset,
408
+ finalize=False)
409
+ token_offset += self.token_hop_len
410
+ yield {'tts_speech': this_tts_speech.cpu()}
411
+ if self.llm_end_dict[this_uuid] is True and len(self.tts_speech_token_dict[this_uuid]) - token_offset < self.token_hop_len + self.flow.pre_lookahead_len:
412
+ break
413
+ p.join()
414
+ # deal with remain tokens, make sure inference remain token len equals token_hop_len when cache_speech is not None
415
+ this_tts_speech_token = torch.tensor(self.tts_speech_token_dict[this_uuid]).unsqueeze(dim=0)
416
+ this_tts_speech = self.token2wav(token=this_tts_speech_token,
417
+ prompt_token=flow_prompt_speech_token,
418
+ prompt_feat=prompt_speech_feat,
419
+ embedding=flow_embedding,
420
+ uuid=this_uuid,
421
+ token_offset=token_offset,
422
+ finalize=True)
423
+ yield {'tts_speech': this_tts_speech.cpu()}
424
+ else:
425
+ # deal with all tokens
426
+ p.join()
427
+ this_tts_speech_token = torch.tensor(self.tts_speech_token_dict[this_uuid]).unsqueeze(dim=0)
428
+ this_tts_speech = self.token2wav(token=this_tts_speech_token,
429
+ prompt_token=flow_prompt_speech_token,
430
+ prompt_feat=prompt_speech_feat,
431
+ embedding=flow_embedding,
432
+ uuid=this_uuid,
433
+ token_offset=0,
434
+ finalize=True,
435
+ speed=speed)
436
+ yield {'tts_speech': this_tts_speech.cpu()}
437
+ with self.lock:
438
+ self.tts_speech_token_dict.pop(this_uuid)
439
+ self.llm_end_dict.pop(this_uuid)
440
+
441
+ self.synchronize_stream()
442
+ self.stream_context_pool.put(stream_context)
443
+ torch.cuda.empty_cache()
444
+
445
+
446
+ class VllmCosyVoice2Model(CosyVoice2Model):
447
+ def __init__(self,
448
+ model_dir: str,
449
+ flow: torch.nn.Module,
450
+ hift: torch.nn.Module,
451
+ fp16: bool):
452
+ try:
453
+ from cosyvoice.llm.llm_vllm import VllmQwen2LM
454
+ except Exception as e:
455
+ raise e
456
+ llm = VllmQwen2LM(model_dir)
457
+ super().__init__(llm,flow,hift,fp16)
458
+
459
+ def load(self, llm_model, flow_model, hift_model):
460
+ self.flow.load_state_dict(torch.load(flow_model, weights_only=True, map_location=self.device), strict=True)
461
+ self.flow.to(self.device).eval()
462
+ # in case hift_model is a hifigan model
463
+ hift_state_dict = {k.replace('generator.', ''): v for k, v in
464
+ torch.load(hift_model, weights_only=True, map_location=self.device).items()}
465
+ self.hift.load_state_dict(hift_state_dict, strict=True)
466
+ self.hift.to(self.device).eval()
cosyvoice/cli/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (176 Bytes). View file
 
cosyvoice/cli/__pycache__/cosyvoice.cpython-310.pyc ADDED
Binary file (8.89 kB). View file
 
cosyvoice/cli/__pycache__/frontend.cpython-310.pyc ADDED
Binary file (10.9 kB). View file
 
cosyvoice/cli/__pycache__/model.cpython-310.pyc ADDED
Binary file (13.6 kB). View file
 
cosyvoice/dataset/__init__.py ADDED
File without changes
cosyvoice/dataset/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (180 Bytes). View file
 
cosyvoice/dataset/__pycache__/processor.cpython-310.pyc ADDED
Binary file (12.9 kB). View file
 
cosyvoice/dataset/processor.py ADDED
@@ -0,0 +1,435 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2024 Alibaba Inc (authors: Xiang Lyu)
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import logging
15
+ import random
16
+
17
+ import pyarrow.parquet as pq
18
+ from io import BytesIO
19
+ import torch
20
+ import torchaudio
21
+ from torch.nn.utils.rnn import pad_sequence
22
+ import torch.nn.functional as F
23
+ import pyworld as pw
24
+
25
+
26
+ AUDIO_FORMAT_SETS = {'flac', 'mp3', 'm4a', 'ogg', 'opus', 'wav', 'wma'}
27
+
28
+
29
+ def parquet_opener(data, mode='train', tts_data={}):
30
+ """ Give url or local file, return file descriptor
31
+ Inplace operation.
32
+
33
+ Args:
34
+ data(Iterable[str]): url or local file list
35
+
36
+ Returns:
37
+ Iterable[{src, stream}]
38
+ """
39
+ for sample in data:
40
+ assert 'src' in sample
41
+ url = sample['src']
42
+ try:
43
+ for df in pq.ParquetFile(url).iter_batches(batch_size=64):
44
+ df = df.to_pandas()
45
+ for i in range(len(df)):
46
+ if mode == 'inference' and df.loc[i, 'utt'] not in tts_data:
47
+ continue
48
+ sample.update(dict(df.loc[i]))
49
+ if mode == 'train':
50
+ # NOTE do not return sample directly, must initialize a new dict
51
+ yield {**sample}
52
+ else:
53
+ for index, text in enumerate(tts_data[df.loc[i, 'utt']]):
54
+ yield {**sample, 'tts_index': index, 'tts_text': text}
55
+ except Exception as ex:
56
+ logging.warning('Failed to open {}, ex info {}'.format(url, ex))
57
+
58
+
59
+ def filter(data,
60
+ max_length=10240,
61
+ min_length=10,
62
+ token_max_length=200,
63
+ token_min_length=1,
64
+ min_output_input_ratio=0.0005,
65
+ max_output_input_ratio=1,
66
+ mode='train'):
67
+ """ Filter sample according to feature and label length
68
+ Inplace operation.
69
+
70
+ Args::
71
+ data: Iterable[{key, wav, label, sample_rate}]
72
+ max_length: drop utterance which is greater than max_length(10ms)
73
+ min_length: drop utterance which is less than min_length(10ms)
74
+ token_max_length: drop utterance which is greater than
75
+ token_max_length, especially when use char unit for
76
+ english modeling
77
+ token_min_length: drop utterance which is
78
+ less than token_max_length
79
+ min_output_input_ratio: minimal ration of
80
+ token_length / feats_length(10ms)
81
+ max_output_input_ratio: maximum ration of
82
+ token_length / feats_length(10ms)
83
+
84
+ Returns:
85
+ Iterable[{key, wav, label, sample_rate}]
86
+ """
87
+ for sample in data:
88
+ sample['speech'], sample['sample_rate'] = torchaudio.load(BytesIO(sample['audio_data']))
89
+ sample['speech'] = sample['speech'].mean(dim=0, keepdim=True)
90
+ del sample['audio_data']
91
+ # sample['wav'] is torch.Tensor, we have 100 frames every second
92
+ num_frames = sample['speech'].size(1) / sample['sample_rate'] * 100
93
+ if num_frames < min_length:
94
+ continue
95
+ if num_frames > max_length:
96
+ continue
97
+ if len(sample['text_token']) < token_min_length:
98
+ continue
99
+ if len(sample['text_token']) > token_max_length:
100
+ continue
101
+ if len(sample['speech_token']) == 0:
102
+ continue
103
+ if num_frames != 0:
104
+ if len(sample['text_token']) / num_frames < min_output_input_ratio:
105
+ continue
106
+ if len(sample['text_token']) / num_frames > max_output_input_ratio:
107
+ continue
108
+ yield sample
109
+
110
+
111
+ def resample(data, resample_rate=22050, min_sample_rate=16000, mode='train'):
112
+ """ Resample data.
113
+ Inplace operation.
114
+
115
+ Args:
116
+ data: Iterable[{key, wav, label, sample_rate}]
117
+ resample_rate: target resample rate
118
+
119
+ Returns:
120
+ Iterable[{key, wav, label, sample_rate}]
121
+ """
122
+ for sample in data:
123
+ assert 'sample_rate' in sample
124
+ assert 'speech' in sample
125
+ sample_rate = sample['sample_rate']
126
+ waveform = sample['speech']
127
+ if sample_rate != resample_rate:
128
+ if sample_rate < min_sample_rate:
129
+ continue
130
+ sample['sample_rate'] = resample_rate
131
+ sample['speech'] = torchaudio.transforms.Resample(
132
+ orig_freq=sample_rate, new_freq=resample_rate)(waveform)
133
+ max_val = sample['speech'].abs().max()
134
+ if max_val > 1:
135
+ sample['speech'] /= max_val
136
+ yield sample
137
+
138
+
139
+ def truncate(data, truncate_length=24576, mode='train'):
140
+ """ Truncate data.
141
+
142
+ Args:
143
+ data: Iterable[{key, wav, label, sample_rate}]
144
+ truncate_length: truncate length
145
+
146
+ Returns:
147
+ Iterable[{key, wav, label, sample_rate}]
148
+ """
149
+ for sample in data:
150
+ waveform = sample['speech']
151
+ if waveform.shape[1] > truncate_length:
152
+ start = random.randint(0, waveform.shape[1] - truncate_length)
153
+ waveform = waveform[:, start: start + truncate_length]
154
+ else:
155
+ waveform = torch.concat([waveform, torch.zeros(1, truncate_length - waveform.shape[1])], dim=1)
156
+ sample['speech'] = waveform
157
+ yield sample
158
+
159
+
160
+ def compute_fbank(data,
161
+ feat_extractor,
162
+ mode='train'):
163
+ """ Extract fbank
164
+
165
+ Args:
166
+ data: Iterable[{key, wav, label, sample_rate}]
167
+
168
+ Returns:
169
+ Iterable[{key, feat, label}]
170
+ """
171
+ for sample in data:
172
+ assert 'sample_rate' in sample
173
+ assert 'speech' in sample
174
+ assert 'utt' in sample
175
+ assert 'text_token' in sample
176
+ waveform = sample['speech']
177
+ mat = feat_extractor(waveform).squeeze(dim=0).transpose(0, 1)
178
+ sample['speech_feat'] = mat
179
+ yield sample
180
+
181
+
182
+ def compute_f0(data, sample_rate, hop_size, mode='train'):
183
+ """ Extract f0
184
+
185
+ Args:
186
+ data: Iterable[{key, wav, label, sample_rate}]
187
+
188
+ Returns:
189
+ Iterable[{key, feat, label}]
190
+ """
191
+ frame_period = hop_size * 1000 / sample_rate
192
+ for sample in data:
193
+ assert 'sample_rate' in sample
194
+ assert 'speech' in sample
195
+ assert 'utt' in sample
196
+ assert 'text_token' in sample
197
+ waveform = sample['speech']
198
+ _f0, t = pw.harvest(waveform.squeeze(dim=0).numpy().astype('double'), sample_rate, frame_period=frame_period)
199
+ if sum(_f0 != 0) < 5: # this happens when the algorithm fails
200
+ _f0, t = pw.dio(waveform.squeeze(dim=0).numpy().astype('double'), sample_rate, frame_period=frame_period) # if harvest fails, try dio
201
+ f0 = pw.stonemask(waveform.squeeze(dim=0).numpy().astype('double'), _f0, t, sample_rate)
202
+ f0 = F.interpolate(torch.from_numpy(f0).view(1, 1, -1), size=sample['speech_feat'].shape[0], mode='linear').view(-1)
203
+ sample['pitch_feat'] = f0
204
+ yield sample
205
+
206
+
207
+ def parse_embedding(data, normalize, mode='train'):
208
+ """ Parse utt_embedding/spk_embedding
209
+
210
+ Args:
211
+ data: Iterable[{key, wav, label, sample_rate}]
212
+
213
+ Returns:
214
+ Iterable[{key, feat, label}]
215
+ """
216
+ for sample in data:
217
+ sample['utt_embedding'] = torch.tensor(sample['utt_embedding'], dtype=torch.float32)
218
+ sample['spk_embedding'] = torch.tensor(sample['spk_embedding'], dtype=torch.float32)
219
+ if normalize:
220
+ sample['utt_embedding'] = F.normalize(sample['utt_embedding'], dim=0)
221
+ sample['spk_embedding'] = F.normalize(sample['spk_embedding'], dim=0)
222
+ yield sample
223
+
224
+
225
+ def tokenize(data, get_tokenizer, allowed_special, mode='train'):
226
+ """ Decode text to chars or BPE
227
+ Inplace operation
228
+
229
+ Args:
230
+ data: Iterable[{key, wav, txt, sample_rate}]
231
+
232
+ Returns:
233
+ Iterable[{key, wav, txt, tokens, label, sample_rate}]
234
+ """
235
+ tokenizer = get_tokenizer()
236
+ for sample in data:
237
+ assert 'text' in sample
238
+ sample['text_token'] = tokenizer.encode(sample['text'], allowed_special=allowed_special)
239
+ if mode == 'inference':
240
+ sample['tts_text_token'] = tokenizer.encode(sample['tts_text'], allowed_special=allowed_special)
241
+ yield sample
242
+
243
+
244
+ def shuffle(data, shuffle_size=10000, mode='train'):
245
+ """ Local shuffle the data
246
+
247
+ Args:
248
+ data: Iterable[{key, feat, label}]
249
+ shuffle_size: buffer size for shuffle
250
+
251
+ Returns:
252
+ Iterable[{key, feat, label}]
253
+ """
254
+ buf = []
255
+ for sample in data:
256
+ buf.append(sample)
257
+ if len(buf) >= shuffle_size:
258
+ random.shuffle(buf)
259
+ for x in buf:
260
+ yield x
261
+ buf = []
262
+ # The sample left over
263
+ random.shuffle(buf)
264
+ for x in buf:
265
+ yield x
266
+
267
+
268
+ def sort(data, sort_size=500, mode='train'):
269
+ """ Sort the data by feature length.
270
+ Sort is used after shuffle and before batch, so we can group
271
+ utts with similar lengths into a batch, and `sort_size` should
272
+ be less than `shuffle_size`
273
+
274
+ Args:
275
+ data: Iterable[{key, feat, label}]
276
+ sort_size: buffer size for sort
277
+
278
+ Returns:
279
+ Iterable[{key, feat, label}]
280
+ """
281
+
282
+ buf = []
283
+ for sample in data:
284
+ buf.append(sample)
285
+ if len(buf) >= sort_size:
286
+ buf.sort(key=lambda x: x['speech_feat'].size(0))
287
+ for x in buf:
288
+ yield x
289
+ buf = []
290
+ # The sample left over
291
+ buf.sort(key=lambda x: x['speech_feat'].size(0))
292
+ for x in buf:
293
+ yield x
294
+
295
+
296
+ def static_batch(data, batch_size=16):
297
+ """ Static batch the data by `batch_size`
298
+
299
+ Args:
300
+ data: Iterable[{key, feat, label}]
301
+ batch_size: batch size
302
+
303
+ Returns:
304
+ Iterable[List[{key, feat, label}]]
305
+ """
306
+ buf = []
307
+ for sample in data:
308
+ buf.append(sample)
309
+ if len(buf) >= batch_size:
310
+ yield buf
311
+ buf = []
312
+ if len(buf) > 0:
313
+ yield buf
314
+
315
+
316
+ def dynamic_batch(data, max_frames_in_batch=12000, mode='train'):
317
+ """ Dynamic batch the data until the total frames in batch
318
+ reach `max_frames_in_batch`
319
+
320
+ Args:
321
+ data: Iterable[{key, feat, label}]
322
+ max_frames_in_batch: max_frames in one batch
323
+
324
+ Returns:
325
+ Iterable[List[{key, feat, label}]]
326
+ """
327
+ buf = []
328
+ longest_frames = 0
329
+ for sample in data:
330
+ assert 'speech_feat' in sample
331
+ assert isinstance(sample['speech_feat'], torch.Tensor)
332
+ new_sample_frames = sample['speech_feat'].size(0)
333
+ longest_frames = max(longest_frames, new_sample_frames)
334
+ frames_after_padding = longest_frames * (len(buf) + 1)
335
+ if frames_after_padding > max_frames_in_batch:
336
+ yield buf
337
+ buf = [sample]
338
+ longest_frames = new_sample_frames
339
+ else:
340
+ buf.append(sample)
341
+ if len(buf) > 0:
342
+ yield buf
343
+
344
+
345
+ def batch(data, batch_type='static', batch_size=16, max_frames_in_batch=12000, mode='train'):
346
+ """ Wrapper for static/dynamic batch
347
+ """
348
+ if mode == 'inference':
349
+ return static_batch(data, 1)
350
+ else:
351
+ if batch_type == 'static':
352
+ return static_batch(data, batch_size)
353
+ elif batch_type == 'dynamic':
354
+ return dynamic_batch(data, max_frames_in_batch)
355
+ else:
356
+ logging.fatal('Unsupported batch type {}'.format(batch_type))
357
+
358
+
359
+ def padding(data, use_spk_embedding, mode='train', gan=False):
360
+ """ Padding the data into training data
361
+
362
+ Args:
363
+ data: Iterable[List[{key, feat, label}]]
364
+
365
+ Returns:
366
+ Iterable[Tuple(keys, feats, labels, feats lengths, label lengths)]
367
+ """
368
+ for sample in data:
369
+ assert isinstance(sample, list)
370
+ speech_feat_len = torch.tensor([x['speech_feat'].size(1) for x in sample],
371
+ dtype=torch.int32)
372
+ order = torch.argsort(speech_feat_len, descending=True)
373
+
374
+ utts = [sample[i]['utt'] for i in order]
375
+ speech = [sample[i]['speech'].squeeze(dim=0) for i in order]
376
+ speech_len = torch.tensor([i.size(0) for i in speech], dtype=torch.int32)
377
+ speech = pad_sequence(speech, batch_first=True, padding_value=0)
378
+ speech_token = [torch.tensor(sample[i]['speech_token']) for i in order]
379
+ speech_token_len = torch.tensor([i.size(0) for i in speech_token], dtype=torch.int32)
380
+ speech_token = pad_sequence(speech_token,
381
+ batch_first=True,
382
+ padding_value=0)
383
+ speech_feat = [sample[i]['speech_feat'] for i in order]
384
+ speech_feat_len = torch.tensor([i.size(0) for i in speech_feat], dtype=torch.int32)
385
+ speech_feat = pad_sequence(speech_feat,
386
+ batch_first=True,
387
+ padding_value=0)
388
+ text = [sample[i]['text'] for i in order]
389
+ text_token = [torch.tensor(sample[i]['text_token']) for i in order]
390
+ text_token_len = torch.tensor([i.size(0) for i in text_token], dtype=torch.int32)
391
+ text_token = pad_sequence(text_token, batch_first=True, padding_value=0)
392
+ utt_embedding = torch.stack([sample[i]['utt_embedding'] for i in order], dim=0)
393
+ spk_embedding = torch.stack([sample[i]['spk_embedding'] for i in order], dim=0)
394
+ batch = {
395
+ "utts": utts,
396
+ "speech": speech,
397
+ "speech_len": speech_len,
398
+ "speech_token": speech_token,
399
+ "speech_token_len": speech_token_len,
400
+ "speech_feat": speech_feat,
401
+ "speech_feat_len": speech_feat_len,
402
+ "text": text,
403
+ "text_token": text_token,
404
+ "text_token_len": text_token_len,
405
+ "utt_embedding": utt_embedding,
406
+ "spk_embedding": spk_embedding,
407
+ }
408
+ if gan is True:
409
+ # in gan train, we need pitch_feat
410
+ pitch_feat = [sample[i]['pitch_feat'] for i in order]
411
+ pitch_feat_len = torch.tensor([i.size(0) for i in pitch_feat], dtype=torch.int32)
412
+ pitch_feat = pad_sequence(pitch_feat,
413
+ batch_first=True,
414
+ padding_value=0)
415
+ batch["pitch_feat"] = pitch_feat
416
+ batch["pitch_feat_len"] = pitch_feat_len
417
+ else:
418
+ # only gan train needs speech, delete it to save memory
419
+ del batch["speech"]
420
+ del batch["speech_len"]
421
+ if mode == 'inference':
422
+ tts_text = [sample[i]['tts_text'] for i in order]
423
+ tts_index = [sample[i]['tts_index'] for i in order]
424
+ tts_text_token = [torch.tensor(sample[i]['tts_text_token']) for i in order]
425
+ tts_text_token_len = torch.tensor([i.size(0) for i in tts_text_token], dtype=torch.int32)
426
+ tts_text_token = pad_sequence(tts_text_token, batch_first=True, padding_value=-1)
427
+ batch.update({'tts_text': tts_text,
428
+ 'tts_index': tts_index,
429
+ 'tts_text_token': tts_text_token,
430
+ 'tts_text_token_len': tts_text_token_len})
431
+ if use_spk_embedding is True:
432
+ batch["embedding"] = batch["spk_embedding"]
433
+ else:
434
+ batch["embedding"] = batch["utt_embedding"]
435
+ yield batch
cosyvoice/flow/__pycache__/decoder.cpython-310.pyc ADDED
Binary file (8.16 kB). View file
 
cosyvoice/flow/__pycache__/flow.cpython-310.pyc ADDED
Binary file (6.47 kB). View file
 
cosyvoice/flow/__pycache__/flow_matching.cpython-310.pyc ADDED
Binary file (8.3 kB). View file
 
cosyvoice/flow/decoder.py ADDED
@@ -0,0 +1,301 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2024 Alibaba Inc (authors: Xiang Lyu, Zhihao Du)
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import torch
15
+ import torch.nn as nn
16
+ import torch.nn.functional as F
17
+ from einops import pack, rearrange, repeat
18
+ from cosyvoice.utils.common import mask_to_bias
19
+ from cosyvoice.utils.mask import add_optional_chunk_mask
20
+ from matcha.models.components.decoder import SinusoidalPosEmb, Block1D, ResnetBlock1D, Downsample1D, TimestepEmbedding, Upsample1D
21
+ from matcha.models.components.transformer import BasicTransformerBlock
22
+
23
+
24
+ class Transpose(torch.nn.Module):
25
+ def __init__(self, dim0: int, dim1: int):
26
+ super().__init__()
27
+ self.dim0 = dim0
28
+ self.dim1 = dim1
29
+
30
+ def forward(self, x: torch.Tensor):
31
+ x = torch.transpose(x, self.dim0, self.dim1)
32
+ return x
33
+
34
+
35
+ class CausalBlock1D(Block1D):
36
+ def __init__(self, dim: int, dim_out: int):
37
+ super(CausalBlock1D, self).__init__(dim, dim_out)
38
+ self.block = torch.nn.Sequential(
39
+ CausalConv1d(dim, dim_out, 3),
40
+ Transpose(1, 2),
41
+ nn.LayerNorm(dim_out),
42
+ Transpose(1, 2),
43
+ nn.Mish(),
44
+ )
45
+
46
+ def forward(self, x: torch.Tensor, mask: torch.Tensor):
47
+ output = self.block(x * mask)
48
+ return output * mask
49
+
50
+
51
+ class CausalResnetBlock1D(ResnetBlock1D):
52
+ def __init__(self, dim: int, dim_out: int, time_emb_dim: int, groups: int = 8):
53
+ super(CausalResnetBlock1D, self).__init__(dim, dim_out, time_emb_dim, groups)
54
+ self.block1 = CausalBlock1D(dim, dim_out)
55
+ self.block2 = CausalBlock1D(dim_out, dim_out)
56
+
57
+
58
+ class CausalConv1d(torch.nn.Conv1d):
59
+ def __init__(
60
+ self,
61
+ in_channels: int,
62
+ out_channels: int,
63
+ kernel_size: int,
64
+ stride: int = 1,
65
+ dilation: int = 1,
66
+ groups: int = 1,
67
+ bias: bool = True,
68
+ padding_mode: str = 'zeros',
69
+ device=None,
70
+ dtype=None
71
+ ) -> None:
72
+ super(CausalConv1d, self).__init__(in_channels, out_channels,
73
+ kernel_size, stride,
74
+ padding=0, dilation=dilation,
75
+ groups=groups, bias=bias,
76
+ padding_mode=padding_mode,
77
+ device=device, dtype=dtype)
78
+ assert stride == 1
79
+ self.causal_padding = (kernel_size - 1, 0)
80
+
81
+ def forward(self, x: torch.Tensor):
82
+ x = F.pad(x, self.causal_padding)
83
+ x = super(CausalConv1d, self).forward(x)
84
+ return x
85
+
86
+
87
+ class ConditionalDecoder(nn.Module):
88
+ def __init__(
89
+ self,
90
+ in_channels,
91
+ out_channels,
92
+ causal=False,
93
+ channels=(256, 256),
94
+ dropout=0.05,
95
+ attention_head_dim=64,
96
+ n_blocks=1,
97
+ num_mid_blocks=2,
98
+ num_heads=4,
99
+ act_fn="snake",
100
+ ):
101
+ """
102
+ This decoder requires an input with the same shape of the target. So, if your text content
103
+ is shorter or longer than the outputs, please re-sampling it before feeding to the decoder.
104
+ """
105
+ super().__init__()
106
+ channels = tuple(channels)
107
+ self.in_channels = in_channels
108
+ self.out_channels = out_channels
109
+ self.causal = causal
110
+ self.time_embeddings = SinusoidalPosEmb(in_channels)
111
+ time_embed_dim = channels[0] * 4
112
+ self.time_mlp = TimestepEmbedding(
113
+ in_channels=in_channels,
114
+ time_embed_dim=time_embed_dim,
115
+ act_fn="silu",
116
+ )
117
+ self.down_blocks = nn.ModuleList([])
118
+ self.mid_blocks = nn.ModuleList([])
119
+ self.up_blocks = nn.ModuleList([])
120
+
121
+ output_channel = in_channels
122
+ for i in range(len(channels)): # pylint: disable=consider-using-enumerate
123
+ input_channel = output_channel
124
+ output_channel = channels[i]
125
+ is_last = i == len(channels) - 1
126
+ resnet = CausalResnetBlock1D(dim=input_channel, dim_out=output_channel, time_emb_dim=time_embed_dim) if self.causal else \
127
+ ResnetBlock1D(dim=input_channel, dim_out=output_channel, time_emb_dim=time_embed_dim)
128
+ transformer_blocks = nn.ModuleList(
129
+ [
130
+ BasicTransformerBlock(
131
+ dim=output_channel,
132
+ num_attention_heads=num_heads,
133
+ attention_head_dim=attention_head_dim,
134
+ dropout=dropout,
135
+ activation_fn=act_fn,
136
+ )
137
+ for _ in range(n_blocks)
138
+ ]
139
+ )
140
+ downsample = (
141
+ Downsample1D(output_channel) if not is_last else
142
+ CausalConv1d(output_channel, output_channel, 3) if self.causal else nn.Conv1d(output_channel, output_channel, 3, padding=1)
143
+ )
144
+ self.down_blocks.append(nn.ModuleList([resnet, transformer_blocks, downsample]))
145
+
146
+ for _ in range(num_mid_blocks):
147
+ input_channel = channels[-1]
148
+ out_channels = channels[-1]
149
+ resnet = CausalResnetBlock1D(dim=input_channel, dim_out=output_channel, time_emb_dim=time_embed_dim) if self.causal else \
150
+ ResnetBlock1D(dim=input_channel, dim_out=output_channel, time_emb_dim=time_embed_dim)
151
+
152
+ transformer_blocks = nn.ModuleList(
153
+ [
154
+ BasicTransformerBlock(
155
+ dim=output_channel,
156
+ num_attention_heads=num_heads,
157
+ attention_head_dim=attention_head_dim,
158
+ dropout=dropout,
159
+ activation_fn=act_fn,
160
+ )
161
+ for _ in range(n_blocks)
162
+ ]
163
+ )
164
+
165
+ self.mid_blocks.append(nn.ModuleList([resnet, transformer_blocks]))
166
+
167
+ channels = channels[::-1] + (channels[0],)
168
+ for i in range(len(channels) - 1):
169
+ input_channel = channels[i] * 2
170
+ output_channel = channels[i + 1]
171
+ is_last = i == len(channels) - 2
172
+ resnet = CausalResnetBlock1D(
173
+ dim=input_channel,
174
+ dim_out=output_channel,
175
+ time_emb_dim=time_embed_dim,
176
+ ) if self.causal else ResnetBlock1D(
177
+ dim=input_channel,
178
+ dim_out=output_channel,
179
+ time_emb_dim=time_embed_dim,
180
+ )
181
+ transformer_blocks = nn.ModuleList(
182
+ [
183
+ BasicTransformerBlock(
184
+ dim=output_channel,
185
+ num_attention_heads=num_heads,
186
+ attention_head_dim=attention_head_dim,
187
+ dropout=dropout,
188
+ activation_fn=act_fn,
189
+ )
190
+ for _ in range(n_blocks)
191
+ ]
192
+ )
193
+ upsample = (
194
+ Upsample1D(output_channel, use_conv_transpose=True)
195
+ if not is_last
196
+ else CausalConv1d(output_channel, output_channel, 3) if self.causal else nn.Conv1d(output_channel, output_channel, 3, padding=1)
197
+ )
198
+ self.up_blocks.append(nn.ModuleList([resnet, transformer_blocks, upsample]))
199
+ self.final_block = CausalBlock1D(channels[-1], channels[-1]) if self.causal else Block1D(channels[-1], channels[-1])
200
+ self.final_proj = nn.Conv1d(channels[-1], self.out_channels, 1)
201
+ self.initialize_weights()
202
+
203
+ def initialize_weights(self):
204
+ for m in self.modules():
205
+ if isinstance(m, nn.Conv1d):
206
+ nn.init.kaiming_normal_(m.weight, nonlinearity="relu")
207
+ if m.bias is not None:
208
+ nn.init.constant_(m.bias, 0)
209
+ elif isinstance(m, nn.GroupNorm):
210
+ nn.init.constant_(m.weight, 1)
211
+ nn.init.constant_(m.bias, 0)
212
+ elif isinstance(m, nn.Linear):
213
+ nn.init.kaiming_normal_(m.weight, nonlinearity="relu")
214
+ if m.bias is not None:
215
+ nn.init.constant_(m.bias, 0)
216
+
217
+ def forward(self, x, mask, mu, t, spks=None, cond=None):
218
+ """Forward pass of the UNet1DConditional model.
219
+
220
+ Args:
221
+ x (torch.Tensor): shape (batch_size, in_channels, time)
222
+ mask (_type_): shape (batch_size, 1, time)
223
+ t (_type_): shape (batch_size)
224
+ spks (_type_, optional): shape: (batch_size, condition_channels). Defaults to None.
225
+ cond (_type_, optional): placeholder for future use. Defaults to None.
226
+
227
+ Raises:
228
+ ValueError: _description_
229
+ ValueError: _description_
230
+
231
+ Returns:
232
+ _type_: _description_
233
+ """
234
+
235
+ t = self.time_embeddings(t).to(t.dtype)
236
+ t = self.time_mlp(t)
237
+
238
+ x = pack([x, mu], "b * t")[0]
239
+
240
+ if spks is not None:
241
+ spks = repeat(spks, "b c -> b c t", t=x.shape[-1])
242
+ x = pack([x, spks], "b * t")[0]
243
+ if cond is not None:
244
+ x = pack([x, cond], "b * t")[0]
245
+
246
+ hiddens = []
247
+ masks = [mask]
248
+ for resnet, transformer_blocks, downsample in self.down_blocks:
249
+ mask_down = masks[-1]
250
+ x = resnet(x, mask_down, t)
251
+ x = rearrange(x, "b c t -> b t c").contiguous()
252
+ # attn_mask = torch.matmul(mask_down.transpose(1, 2).contiguous(), mask_down)
253
+ attn_mask = add_optional_chunk_mask(x, mask_down.bool(), False, False, 0, self.static_chunk_size, -1)
254
+ attn_mask = mask_to_bias(attn_mask == 1, x.dtype)
255
+ for transformer_block in transformer_blocks:
256
+ x = transformer_block(
257
+ hidden_states=x,
258
+ attention_mask=attn_mask,
259
+ timestep=t,
260
+ )
261
+ x = rearrange(x, "b t c -> b c t").contiguous()
262
+ hiddens.append(x) # Save hidden states for skip connections
263
+ x = downsample(x * mask_down)
264
+ masks.append(mask_down[:, :, ::2])
265
+ masks = masks[:-1]
266
+ mask_mid = masks[-1]
267
+
268
+ for resnet, transformer_blocks in self.mid_blocks:
269
+ x = resnet(x, mask_mid, t)
270
+ x = rearrange(x, "b c t -> b t c").contiguous()
271
+ # attn_mask = torch.matmul(mask_mid.transpose(1, 2).contiguous(), mask_mid)
272
+ attn_mask = add_optional_chunk_mask(x, mask_mid.bool(), False, False, 0, self.static_chunk_size, -1)
273
+ attn_mask = mask_to_bias(attn_mask == 1, x.dtype)
274
+ for transformer_block in transformer_blocks:
275
+ x = transformer_block(
276
+ hidden_states=x,
277
+ attention_mask=attn_mask,
278
+ timestep=t,
279
+ )
280
+ x = rearrange(x, "b t c -> b c t").contiguous()
281
+
282
+ for resnet, transformer_blocks, upsample in self.up_blocks:
283
+ mask_up = masks.pop()
284
+ skip = hiddens.pop()
285
+ x = pack([x[:, :, :skip.shape[-1]], skip], "b * t")[0]
286
+ x = resnet(x, mask_up, t)
287
+ x = rearrange(x, "b c t -> b t c").contiguous()
288
+ # attn_mask = torch.matmul(mask_up.transpose(1, 2).contiguous(), mask_up)
289
+ attn_mask = add_optional_chunk_mask(x, mask_up.bool(), False, False, 0, self.static_chunk_size, -1)
290
+ attn_mask = mask_to_bias(attn_mask == 1, x.dtype)
291
+ for transformer_block in transformer_blocks:
292
+ x = transformer_block(
293
+ hidden_states=x,
294
+ attention_mask=attn_mask,
295
+ timestep=t,
296
+ )
297
+ x = rearrange(x, "b t c -> b c t").contiguous()
298
+ x = upsample(x * mask_up)
299
+ x = self.final_block(x, mask_up)
300
+ output = self.final_proj(x * mask_up)
301
+ return output * mask
cosyvoice/flow/flow.py ADDED
@@ -0,0 +1,239 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2024 Alibaba Inc (authors: Xiang Lyu, Zhihao Du)
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import logging
15
+ import random
16
+ from typing import Dict, Optional
17
+ import torch
18
+ import torch.nn as nn
19
+ from torch.nn import functional as F
20
+ from omegaconf import DictConfig
21
+ from cosyvoice.utils.mask import make_pad_mask
22
+
23
+
24
+ class MaskedDiffWithXvec(torch.nn.Module):
25
+ def __init__(self,
26
+ input_size: int = 512,
27
+ output_size: int = 80,
28
+ spk_embed_dim: int = 192,
29
+ output_type: str = "mel",
30
+ vocab_size: int = 4096,
31
+ input_frame_rate: int = 50,
32
+ only_mask_loss: bool = True,
33
+ encoder: torch.nn.Module = None,
34
+ length_regulator: torch.nn.Module = None,
35
+ decoder: torch.nn.Module = None,
36
+ decoder_conf: Dict = {'in_channels': 240, 'out_channel': 80, 'spk_emb_dim': 80, 'n_spks': 1,
37
+ 'cfm_params': DictConfig({'sigma_min': 1e-06, 'solver': 'euler', 't_scheduler': 'cosine',
38
+ 'training_cfg_rate': 0.2, 'inference_cfg_rate': 0.7, 'reg_loss_type': 'l1'}),
39
+ 'decoder_params': {'channels': [256, 256], 'dropout': 0.0, 'attention_head_dim': 64,
40
+ 'n_blocks': 4, 'num_mid_blocks': 12, 'num_heads': 8, 'act_fn': 'gelu'}},
41
+ mel_feat_conf: Dict = {'n_fft': 1024, 'num_mels': 80, 'sampling_rate': 22050,
42
+ 'hop_size': 256, 'win_size': 1024, 'fmin': 0, 'fmax': 8000}):
43
+ super().__init__()
44
+ self.input_size = input_size
45
+ self.output_size = output_size
46
+ self.decoder_conf = decoder_conf
47
+ self.mel_feat_conf = mel_feat_conf
48
+ self.vocab_size = vocab_size
49
+ self.output_type = output_type
50
+ self.input_frame_rate = input_frame_rate
51
+ logging.info(f"input frame rate={self.input_frame_rate}")
52
+ self.input_embedding = nn.Embedding(vocab_size, input_size)
53
+ self.spk_embed_affine_layer = torch.nn.Linear(spk_embed_dim, output_size)
54
+ self.encoder = encoder
55
+ self.encoder_proj = torch.nn.Linear(self.encoder.output_size(), output_size)
56
+ self.decoder = decoder
57
+ self.length_regulator = length_regulator
58
+ self.only_mask_loss = only_mask_loss
59
+
60
+ def forward(
61
+ self,
62
+ batch: dict,
63
+ device: torch.device,
64
+ ) -> Dict[str, Optional[torch.Tensor]]:
65
+ token = batch['speech_token'].to(device)
66
+ token_len = batch['speech_token_len'].to(device)
67
+ feat = batch['speech_feat'].to(device)
68
+ feat_len = batch['speech_feat_len'].to(device)
69
+ embedding = batch['embedding'].to(device)
70
+
71
+ # xvec projection
72
+ embedding = F.normalize(embedding, dim=1)
73
+ embedding = self.spk_embed_affine_layer(embedding)
74
+
75
+ # concat text and prompt_text
76
+ mask = (~make_pad_mask(token_len)).float().unsqueeze(-1).to(device)
77
+ token = self.input_embedding(torch.clamp(token, min=0)) * mask
78
+
79
+ # text encode
80
+ h, h_lengths = self.encoder(token, token_len)
81
+ h = self.encoder_proj(h)
82
+ h, h_lengths = self.length_regulator(h, feat_len)
83
+
84
+ # get conditions
85
+ conds = torch.zeros(feat.shape, device=token.device)
86
+ for i, j in enumerate(feat_len):
87
+ if random.random() < 0.5:
88
+ continue
89
+ index = random.randint(0, int(0.3 * j))
90
+ conds[i, :index] = feat[i, :index]
91
+ conds = conds.transpose(1, 2)
92
+
93
+ mask = (~make_pad_mask(feat_len)).to(h)
94
+ feat = F.interpolate(feat.unsqueeze(dim=1), size=h.shape[1:], mode="nearest").squeeze(dim=1)
95
+ loss, _ = self.decoder.compute_loss(
96
+ feat.transpose(1, 2).contiguous(),
97
+ mask.unsqueeze(1),
98
+ h.transpose(1, 2).contiguous(),
99
+ embedding,
100
+ cond=conds
101
+ )
102
+ return {'loss': loss}
103
+
104
+ @torch.inference_mode()
105
+ def inference(self,
106
+ token,
107
+ token_len,
108
+ prompt_token,
109
+ prompt_token_len,
110
+ prompt_feat,
111
+ prompt_feat_len,
112
+ embedding,
113
+ flow_cache):
114
+ if self.fp16 is True:
115
+ prompt_feat = prompt_feat.half()
116
+ embedding = embedding.half()
117
+
118
+ assert token.shape[0] == 1
119
+ # xvec projection
120
+ embedding = F.normalize(embedding, dim=1)
121
+ embedding = self.spk_embed_affine_layer(embedding)
122
+
123
+ # concat text and prompt_text
124
+ token_len1, token_len2 = prompt_token.shape[1], token.shape[1]
125
+ token, token_len = torch.concat([prompt_token, token], dim=1), prompt_token_len + token_len
126
+ mask = (~make_pad_mask(token_len)).unsqueeze(-1).to(embedding)
127
+ token = self.input_embedding(torch.clamp(token, min=0)) * mask
128
+
129
+ # text encode
130
+ h, h_lengths = self.encoder(token, token_len)
131
+ h = self.encoder_proj(h)
132
+ mel_len1, mel_len2 = prompt_feat.shape[1], int(token_len2 / self.input_frame_rate * 22050 / 256)
133
+ h, h_lengths = self.length_regulator.inference(h[:, :token_len1], h[:, token_len1:], mel_len1, mel_len2, self.input_frame_rate)
134
+
135
+ # get conditions
136
+ conds = torch.zeros([1, mel_len1 + mel_len2, self.output_size], device=token.device).to(h.dtype)
137
+ conds[:, :mel_len1] = prompt_feat
138
+ conds = conds.transpose(1, 2)
139
+
140
+ mask = (~make_pad_mask(torch.tensor([mel_len1 + mel_len2]))).to(h)
141
+ feat, flow_cache = self.decoder(
142
+ mu=h.transpose(1, 2).contiguous(),
143
+ mask=mask.unsqueeze(1),
144
+ spks=embedding,
145
+ cond=conds,
146
+ n_timesteps=10,
147
+ prompt_len=mel_len1,
148
+ flow_cache=flow_cache
149
+ )
150
+ feat = feat[:, :, mel_len1:]
151
+ assert feat.shape[2] == mel_len2
152
+ return feat.float(), flow_cache
153
+
154
+
155
+ class CausalMaskedDiffWithXvec(torch.nn.Module):
156
+ def __init__(self,
157
+ input_size: int = 512,
158
+ output_size: int = 80,
159
+ spk_embed_dim: int = 192,
160
+ output_type: str = "mel",
161
+ vocab_size: int = 4096,
162
+ input_frame_rate: int = 50,
163
+ only_mask_loss: bool = True,
164
+ token_mel_ratio: int = 2,
165
+ pre_lookahead_len: int = 3,
166
+ encoder: torch.nn.Module = None,
167
+ decoder: torch.nn.Module = None,
168
+ decoder_conf: Dict = {'in_channels': 240, 'out_channel': 80, 'spk_emb_dim': 80, 'n_spks': 1,
169
+ 'cfm_params': DictConfig({'sigma_min': 1e-06, 'solver': 'euler', 't_scheduler': 'cosine',
170
+ 'training_cfg_rate': 0.2, 'inference_cfg_rate': 0.7, 'reg_loss_type': 'l1'}),
171
+ 'decoder_params': {'channels': [256, 256], 'dropout': 0.0, 'attention_head_dim': 64,
172
+ 'n_blocks': 4, 'num_mid_blocks': 12, 'num_heads': 8, 'act_fn': 'gelu'}},
173
+ mel_feat_conf: Dict = {'n_fft': 1024, 'num_mels': 80, 'sampling_rate': 22050,
174
+ 'hop_size': 256, 'win_size': 1024, 'fmin': 0, 'fmax': 8000}):
175
+ super().__init__()
176
+ self.input_size = input_size
177
+ self.output_size = output_size
178
+ self.decoder_conf = decoder_conf
179
+ self.mel_feat_conf = mel_feat_conf
180
+ self.vocab_size = vocab_size
181
+ self.output_type = output_type
182
+ self.input_frame_rate = input_frame_rate
183
+ logging.info(f"input frame rate={self.input_frame_rate}")
184
+ self.input_embedding = nn.Embedding(vocab_size, input_size)
185
+ self.spk_embed_affine_layer = torch.nn.Linear(spk_embed_dim, output_size)
186
+ self.encoder = encoder
187
+ self.encoder_proj = torch.nn.Linear(self.encoder.output_size(), output_size)
188
+ self.decoder = decoder
189
+ self.only_mask_loss = only_mask_loss
190
+ self.token_mel_ratio = token_mel_ratio
191
+ self.pre_lookahead_len = pre_lookahead_len
192
+
193
+ @torch.inference_mode()
194
+ def inference(self,
195
+ token,
196
+ token_len,
197
+ prompt_token,
198
+ prompt_token_len,
199
+ prompt_feat,
200
+ prompt_feat_len,
201
+ embedding,
202
+ finalize):
203
+ if self.fp16 is True:
204
+ prompt_feat = prompt_feat.half()
205
+ embedding = embedding.half()
206
+
207
+ assert token.shape[0] == 1
208
+ # xvec projection
209
+ embedding = F.normalize(embedding, dim=1)
210
+ embedding = self.spk_embed_affine_layer(embedding)
211
+
212
+ # concat text and prompt_text
213
+ token, token_len = torch.concat([prompt_token, token], dim=1), prompt_token_len + token_len
214
+ mask = (~make_pad_mask(token_len)).unsqueeze(-1).to(embedding)
215
+ token = self.input_embedding(torch.clamp(token, min=0)) * mask
216
+
217
+ # text encode
218
+ h, h_lengths = self.encoder(token, token_len)
219
+ if finalize is False:
220
+ h = h[:, :-self.pre_lookahead_len * self.token_mel_ratio]
221
+ mel_len1, mel_len2 = prompt_feat.shape[1], h.shape[1] - prompt_feat.shape[1]
222
+ h = self.encoder_proj(h)
223
+
224
+ # get conditions
225
+ conds = torch.zeros([1, mel_len1 + mel_len2, self.output_size], device=token.device).to(h.dtype)
226
+ conds[:, :mel_len1] = prompt_feat
227
+ conds = conds.transpose(1, 2)
228
+
229
+ mask = (~make_pad_mask(torch.tensor([mel_len1 + mel_len2]))).to(h)
230
+ feat, _ = self.decoder(
231
+ mu=h.transpose(1, 2).contiguous(),
232
+ mask=mask.unsqueeze(1),
233
+ spks=embedding,
234
+ cond=conds,
235
+ n_timesteps=10
236
+ )
237
+ feat = feat[:, :, mel_len1:]
238
+ assert feat.shape[2] == mel_len2
239
+ return feat.float(), None
cosyvoice/flow/flow_matching.py ADDED
@@ -0,0 +1,264 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2024 Alibaba Inc (authors: Xiang Lyu, Zhihao Du)
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import threading
15
+ import torch
16
+ import torch.nn.functional as F
17
+ from matcha.models.components.flow_matching import BASECFM
18
+ import queue
19
+
20
+ class EstimatorWrapper:
21
+ def __init__(self, estimator_engine, estimator_count=2,):
22
+ self.estimators = queue.Queue()
23
+ self.estimator_engine = estimator_engine
24
+ for _ in range(estimator_count):
25
+ estimator = estimator_engine.create_execution_context()
26
+ if estimator is not None:
27
+ self.estimators.put(estimator)
28
+
29
+ if self.estimators.empty():
30
+ raise Exception("No available estimator")
31
+
32
+ def acquire_estimator(self):
33
+ return self.estimators.get(), self.estimator_engine
34
+
35
+ def release_estimator(self, estimator):
36
+ self.estimators.put(estimator)
37
+ return
38
+
39
+ class ConditionalCFM(BASECFM):
40
+ def __init__(self, in_channels, cfm_params, n_spks=1, spk_emb_dim=64, estimator: torch.nn.Module = None):
41
+ super().__init__(
42
+ n_feats=in_channels,
43
+ cfm_params=cfm_params,
44
+ n_spks=n_spks,
45
+ spk_emb_dim=spk_emb_dim,
46
+ )
47
+ self.t_scheduler = cfm_params.t_scheduler
48
+ self.training_cfg_rate = cfm_params.training_cfg_rate
49
+ self.inference_cfg_rate = cfm_params.inference_cfg_rate
50
+ in_channels = in_channels + (spk_emb_dim if n_spks > 0 else 0)
51
+ # Just change the architecture of the estimator here
52
+ self.estimator = estimator
53
+ self.lock = threading.Lock()
54
+
55
+ @torch.inference_mode()
56
+ def forward(self, mu, mask, n_timesteps, temperature=1.0, spks=None, cond=None, prompt_len=0, flow_cache=torch.zeros(1, 80, 0, 2)):
57
+ """Forward diffusion
58
+
59
+ Args:
60
+ mu (torch.Tensor): output of encoder
61
+ shape: (batch_size, n_feats, mel_timesteps)
62
+ mask (torch.Tensor): output_mask
63
+ shape: (batch_size, 1, mel_timesteps)
64
+ n_timesteps (int): number of diffusion steps
65
+ temperature (float, optional): temperature for scaling noise. Defaults to 1.0.
66
+ spks (torch.Tensor, optional): speaker ids. Defaults to None.
67
+ shape: (batch_size, spk_emb_dim)
68
+ cond: Not used but kept for future purposes
69
+
70
+ Returns:
71
+ sample: generated mel-spectrogram
72
+ shape: (batch_size, n_feats, mel_timesteps)
73
+ """
74
+
75
+ z = torch.randn_like(mu).to(mu.device).to(mu.dtype) * temperature
76
+ cache_size = flow_cache.shape[2]
77
+ # fix prompt and overlap part mu and z
78
+ if cache_size != 0:
79
+ z[:, :, :cache_size] = flow_cache[:, :, :, 0]
80
+ mu[:, :, :cache_size] = flow_cache[:, :, :, 1]
81
+ z_cache = torch.concat([z[:, :, :prompt_len], z[:, :, -34:]], dim=2)
82
+ mu_cache = torch.concat([mu[:, :, :prompt_len], mu[:, :, -34:]], dim=2)
83
+ flow_cache = torch.stack([z_cache, mu_cache], dim=-1)
84
+
85
+ t_span = torch.linspace(0, 1, n_timesteps + 1, device=mu.device, dtype=mu.dtype)
86
+ if self.t_scheduler == 'cosine':
87
+ t_span = 1 - torch.cos(t_span * 0.5 * torch.pi)
88
+ return self.solve_euler(z, t_span=t_span, mu=mu, mask=mask, spks=spks, cond=cond), flow_cache
89
+
90
+ def solve_euler(self, x, t_span, mu, mask, spks, cond):
91
+ """
92
+ Fixed euler solver for ODEs.
93
+ Args:
94
+ x (torch.Tensor): random noise
95
+ t_span (torch.Tensor): n_timesteps interpolated
96
+ shape: (n_timesteps + 1,)
97
+ mu (torch.Tensor): output of encoder
98
+ shape: (batch_size, n_feats, mel_timesteps)
99
+ mask (torch.Tensor): output_mask
100
+ shape: (batch_size, 1, mel_timesteps)
101
+ spks (torch.Tensor, optional): speaker ids. Defaults to None.
102
+ shape: (batch_size, spk_emb_dim)
103
+ cond: Not used but kept for future purposes
104
+ """
105
+ t, _, dt = t_span[0], t_span[-1], t_span[1] - t_span[0]
106
+ t = t.unsqueeze(dim=0)
107
+
108
+ # I am storing this because I can later plot it by putting a debugger here and saving it to a file
109
+ # Or in future might add like a return_all_steps flag
110
+ sol = []
111
+
112
+ # Do not use concat, it may cause memory format changed and trt infer with wrong results!
113
+ x_in = torch.zeros([2, 80, x.size(2)], device=x.device, dtype=x.dtype)
114
+ mask_in = torch.zeros([2, 1, x.size(2)], device=x.device, dtype=x.dtype)
115
+ mu_in = torch.zeros([2, 80, x.size(2)], device=x.device, dtype=x.dtype)
116
+ t_in = torch.zeros([2], device=x.device, dtype=x.dtype)
117
+ spks_in = torch.zeros([2, 80], device=x.device, dtype=x.dtype)
118
+ cond_in = torch.zeros([2, 80, x.size(2)], device=x.device, dtype=x.dtype)
119
+ for step in range(1, len(t_span)):
120
+ # Classifier-Free Guidance inference introduced in VoiceBox
121
+ x_in[:] = x
122
+ mask_in[:] = mask
123
+ mu_in[0] = mu
124
+ t_in[:] = t.unsqueeze(0)
125
+ spks_in[0] = spks
126
+ cond_in[0] = cond
127
+ dphi_dt = self.forward_estimator(
128
+ x_in, mask_in,
129
+ mu_in, t_in,
130
+ spks_in,
131
+ cond_in
132
+ )
133
+ dphi_dt, cfg_dphi_dt = torch.split(dphi_dt, [x.size(0), x.size(0)], dim=0)
134
+ dphi_dt = ((1.0 + self.inference_cfg_rate) * dphi_dt - self.inference_cfg_rate * cfg_dphi_dt)
135
+ x = x + dt * dphi_dt
136
+ t = t + dt
137
+ sol.append(x)
138
+ if step < len(t_span) - 1:
139
+ dt = t_span[step + 1] - t
140
+
141
+ return sol[-1].float()
142
+
143
+ def forward_estimator(self, x, mask, mu, t, spks, cond):
144
+ if isinstance(self.estimator, torch.nn.Module):
145
+ return self.estimator.forward(x, mask, mu, t, spks, cond)
146
+ else:
147
+ if isinstance(self.estimator, EstimatorWrapper):
148
+ estimator, engine = self.estimator.acquire_estimator()
149
+
150
+ estimator.set_input_shape('x', (2, 80, x.size(2)))
151
+ estimator.set_input_shape('mask', (2, 1, x.size(2)))
152
+ estimator.set_input_shape('mu', (2, 80, x.size(2)))
153
+ estimator.set_input_shape('t', (2,))
154
+ estimator.set_input_shape('spks', (2, 80))
155
+ estimator.set_input_shape('cond', (2, 80, x.size(2)))
156
+
157
+ data_ptrs = [x.contiguous().data_ptr(),
158
+ mask.contiguous().data_ptr(),
159
+ mu.contiguous().data_ptr(),
160
+ t.contiguous().data_ptr(),
161
+ spks.contiguous().data_ptr(),
162
+ cond.contiguous().data_ptr(),
163
+ x.data_ptr()]
164
+
165
+ for idx, data_ptr in enumerate(data_ptrs):
166
+ estimator.set_tensor_address(engine.get_tensor_name(idx), data_ptr)
167
+
168
+ # run trt engine
169
+ estimator.execute_async_v3(torch.cuda.current_stream().cuda_stream)
170
+
171
+ torch.cuda.current_stream().synchronize()
172
+ self.estimator.release_estimator(estimator)
173
+ return x
174
+ else:
175
+ with self.lock:
176
+ self.estimator.set_input_shape('x', (2, 80, x.size(2)))
177
+ self.estimator.set_input_shape('mask', (2, 1, x.size(2)))
178
+ self.estimator.set_input_shape('mu', (2, 80, x.size(2)))
179
+ self.estimator.set_input_shape('t', (2,))
180
+ self.estimator.set_input_shape('spks', (2, 80))
181
+ self.estimator.set_input_shape('cond', (2, 80, x.size(2)))
182
+ # run trt engine
183
+ self.estimator.execute_v2([x.contiguous().data_ptr(),
184
+ mask.contiguous().data_ptr(),
185
+ mu.contiguous().data_ptr(),
186
+ t.contiguous().data_ptr(),
187
+ spks.contiguous().data_ptr(),
188
+ cond.contiguous().data_ptr(),
189
+ x.data_ptr()])
190
+ return x
191
+
192
+ def compute_loss(self, x1, mask, mu, spks=None, cond=None):
193
+ """Computes diffusion loss
194
+
195
+ Args:
196
+ x1 (torch.Tensor): Target
197
+ shape: (batch_size, n_feats, mel_timesteps)
198
+ mask (torch.Tensor): target mask
199
+ shape: (batch_size, 1, mel_timesteps)
200
+ mu (torch.Tensor): output of encoder
201
+ shape: (batch_size, n_feats, mel_timesteps)
202
+ spks (torch.Tensor, optional): speaker embedding. Defaults to None.
203
+ shape: (batch_size, spk_emb_dim)
204
+
205
+ Returns:
206
+ loss: conditional flow matching loss
207
+ y: conditional flow
208
+ shape: (batch_size, n_feats, mel_timesteps)
209
+ """
210
+ b, _, t = mu.shape
211
+
212
+ # random timestep
213
+ t = torch.rand([b, 1, 1], device=mu.device, dtype=mu.dtype)
214
+ if self.t_scheduler == 'cosine':
215
+ t = 1 - torch.cos(t * 0.5 * torch.pi)
216
+ # sample noise p(x_0)
217
+ z = torch.randn_like(x1)
218
+
219
+ y = (1 - (1 - self.sigma_min) * t) * z + t * x1
220
+ u = x1 - (1 - self.sigma_min) * z
221
+
222
+ # during training, we randomly drop condition to trade off mode coverage and sample fidelity
223
+ if self.training_cfg_rate > 0:
224
+ cfg_mask = torch.rand(b, device=x1.device) > self.training_cfg_rate
225
+ mu = mu * cfg_mask.view(-1, 1, 1)
226
+ spks = spks * cfg_mask.view(-1, 1)
227
+ cond = cond * cfg_mask.view(-1, 1, 1)
228
+
229
+ pred = self.estimator(y, mask, mu, t.squeeze(), spks, cond)
230
+ loss = F.mse_loss(pred * mask, u * mask, reduction="sum") / (torch.sum(mask) * u.shape[1])
231
+ return loss, y
232
+
233
+
234
+ class CausalConditionalCFM(ConditionalCFM):
235
+ def __init__(self, in_channels, cfm_params, n_spks=1, spk_emb_dim=64, estimator: torch.nn.Module = None):
236
+ super().__init__(in_channels, cfm_params, n_spks, spk_emb_dim, estimator)
237
+ self.rand_noise = torch.randn([1, 80, 50 * 300])
238
+
239
+ @torch.inference_mode()
240
+ def forward(self, mu, mask, n_timesteps, temperature=1.0, spks=None, cond=None):
241
+ """Forward diffusion
242
+
243
+ Args:
244
+ mu (torch.Tensor): output of encoder
245
+ shape: (batch_size, n_feats, mel_timesteps)
246
+ mask (torch.Tensor): output_mask
247
+ shape: (batch_size, 1, mel_timesteps)
248
+ n_timesteps (int): number of diffusion steps
249
+ temperature (float, optional): temperature for scaling noise. Defaults to 1.0.
250
+ spks (torch.Tensor, optional): speaker ids. Defaults to None.
251
+ shape: (batch_size, spk_emb_dim)
252
+ cond: Not used but kept for future purposes
253
+
254
+ Returns:
255
+ sample: generated mel-spectrogram
256
+ shape: (batch_size, n_feats, mel_timesteps)
257
+ """
258
+
259
+ z = self.rand_noise[:, :, :mu.size(2)].to(mu.device).to(mu.dtype) * temperature
260
+ # fix prompt and overlap part mu and z
261
+ t_span = torch.linspace(0, 1, n_timesteps + 1, device=mu.device, dtype=mu.dtype)
262
+ if self.t_scheduler == 'cosine':
263
+ t_span = 1 - torch.cos(t_span * 0.5 * torch.pi)
264
+ return self.solve_euler(z, t_span=t_span, mu=mu, mask=mask, spks=spks, cond=cond), None
cosyvoice/flow/length_regulator.py ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2024 Alibaba Inc (authors: Xiang Lyu, Zhihao Du)
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import Tuple
15
+ import torch.nn as nn
16
+ import torch
17
+ from torch.nn import functional as F
18
+ from cosyvoice.utils.mask import make_pad_mask
19
+
20
+
21
+ class InterpolateRegulator(nn.Module):
22
+ def __init__(
23
+ self,
24
+ channels: int,
25
+ sampling_ratios: Tuple,
26
+ out_channels: int = None,
27
+ groups: int = 1,
28
+ ):
29
+ super().__init__()
30
+ self.sampling_ratios = sampling_ratios
31
+ out_channels = out_channels or channels
32
+ model = nn.ModuleList([])
33
+ if len(sampling_ratios) > 0:
34
+ for _ in sampling_ratios:
35
+ module = nn.Conv1d(channels, channels, 3, 1, 1)
36
+ norm = nn.GroupNorm(groups, channels)
37
+ act = nn.Mish()
38
+ model.extend([module, norm, act])
39
+ model.append(
40
+ nn.Conv1d(channels, out_channels, 1, 1)
41
+ )
42
+ self.model = nn.Sequential(*model)
43
+
44
+ def forward(self, x, ylens=None):
45
+ # x in (B, T, D)
46
+ mask = (~make_pad_mask(ylens)).to(x).unsqueeze(-1)
47
+ x = F.interpolate(x.transpose(1, 2).contiguous(), size=ylens.max(), mode='linear')
48
+ out = self.model(x).transpose(1, 2).contiguous()
49
+ olens = ylens
50
+ return out * mask, olens
51
+
52
+ def inference(self, x1, x2, mel_len1, mel_len2, input_frame_rate=50):
53
+ # in inference mode, interploate prompt token and token(head/mid/tail) seprately, so we can get a clear separation point of mel
54
+ # x in (B, T, D)
55
+ if x2.shape[1] > 40:
56
+ x2_head = F.interpolate(x2[:, :20].transpose(1, 2).contiguous(), size=int(20 / input_frame_rate * 22050 / 256), mode='linear')
57
+ x2_mid = F.interpolate(x2[:, 20:-20].transpose(1, 2).contiguous(), size=mel_len2 - int(20 / input_frame_rate * 22050 / 256) * 2,
58
+ mode='linear')
59
+ x2_tail = F.interpolate(x2[:, -20:].transpose(1, 2).contiguous(), size=int(20 / input_frame_rate * 22050 / 256), mode='linear')
60
+ x2 = torch.concat([x2_head, x2_mid, x2_tail], dim=2)
61
+ else:
62
+ x2 = F.interpolate(x2.transpose(1, 2).contiguous(), size=mel_len2, mode='linear')
63
+ if x1.shape[1] != 0:
64
+ x1 = F.interpolate(x1.transpose(1, 2).contiguous(), size=mel_len1, mode='linear')
65
+ x = torch.concat([x1, x2], dim=2)
66
+ else:
67
+ x = x2
68
+ out = self.model(x).transpose(1, 2).contiguous()
69
+ return out, mel_len1 + mel_len2
cosyvoice/hifigan/__pycache__/f0_predictor.cpython-310.pyc ADDED
Binary file (1.39 kB). View file
 
cosyvoice/hifigan/discriminator.py ADDED
@@ -0,0 +1,140 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ from torch.nn.utils.parametrizations import weight_norm
4
+ from typing import List, Optional, Tuple
5
+ from einops import rearrange
6
+ from torchaudio.transforms import Spectrogram
7
+
8
+
9
+ class MultipleDiscriminator(nn.Module):
10
+ def __init__(
11
+ self, mpd: nn.Module, mrd: nn.Module
12
+ ):
13
+ super().__init__()
14
+ self.mpd = mpd
15
+ self.mrd = mrd
16
+
17
+ def forward(self, y: torch.Tensor, y_hat: torch.Tensor):
18
+ y_d_rs, y_d_gs, fmap_rs, fmap_gs = [], [], [], []
19
+ this_y_d_rs, this_y_d_gs, this_fmap_rs, this_fmap_gs = self.mpd(y.unsqueeze(dim=1), y_hat.unsqueeze(dim=1))
20
+ y_d_rs += this_y_d_rs
21
+ y_d_gs += this_y_d_gs
22
+ fmap_rs += this_fmap_rs
23
+ fmap_gs += this_fmap_gs
24
+ this_y_d_rs, this_y_d_gs, this_fmap_rs, this_fmap_gs = self.mrd(y, y_hat)
25
+ y_d_rs += this_y_d_rs
26
+ y_d_gs += this_y_d_gs
27
+ fmap_rs += this_fmap_rs
28
+ fmap_gs += this_fmap_gs
29
+ return y_d_rs, y_d_gs, fmap_rs, fmap_gs
30
+
31
+
32
+ class MultiResolutionDiscriminator(nn.Module):
33
+ def __init__(
34
+ self,
35
+ fft_sizes: Tuple[int, ...] = (2048, 1024, 512),
36
+ num_embeddings: Optional[int] = None,
37
+ ):
38
+ """
39
+ Multi-Resolution Discriminator module adapted from https://github.com/descriptinc/descript-audio-codec.
40
+ Additionally, it allows incorporating conditional information with a learned embeddings table.
41
+
42
+ Args:
43
+ fft_sizes (tuple[int]): Tuple of window lengths for FFT. Defaults to (2048, 1024, 512).
44
+ num_embeddings (int, optional): Number of embeddings. None means non-conditional discriminator.
45
+ Defaults to None.
46
+ """
47
+
48
+ super().__init__()
49
+ self.discriminators = nn.ModuleList(
50
+ [DiscriminatorR(window_length=w, num_embeddings=num_embeddings) for w in fft_sizes]
51
+ )
52
+
53
+ def forward(
54
+ self, y: torch.Tensor, y_hat: torch.Tensor, bandwidth_id: torch.Tensor = None
55
+ ) -> Tuple[List[torch.Tensor], List[torch.Tensor], List[List[torch.Tensor]], List[List[torch.Tensor]]]:
56
+ y_d_rs = []
57
+ y_d_gs = []
58
+ fmap_rs = []
59
+ fmap_gs = []
60
+
61
+ for d in self.discriminators:
62
+ y_d_r, fmap_r = d(x=y, cond_embedding_id=bandwidth_id)
63
+ y_d_g, fmap_g = d(x=y_hat, cond_embedding_id=bandwidth_id)
64
+ y_d_rs.append(y_d_r)
65
+ fmap_rs.append(fmap_r)
66
+ y_d_gs.append(y_d_g)
67
+ fmap_gs.append(fmap_g)
68
+
69
+ return y_d_rs, y_d_gs, fmap_rs, fmap_gs
70
+
71
+
72
+ class DiscriminatorR(nn.Module):
73
+ def __init__(
74
+ self,
75
+ window_length: int,
76
+ num_embeddings: Optional[int] = None,
77
+ channels: int = 32,
78
+ hop_factor: float = 0.25,
79
+ bands: Tuple[Tuple[float, float], ...] = ((0.0, 0.1), (0.1, 0.25), (0.25, 0.5), (0.5, 0.75), (0.75, 1.0)),
80
+ ):
81
+ super().__init__()
82
+ self.window_length = window_length
83
+ self.hop_factor = hop_factor
84
+ self.spec_fn = Spectrogram(
85
+ n_fft=window_length, hop_length=int(window_length * hop_factor), win_length=window_length, power=None
86
+ )
87
+ n_fft = window_length // 2 + 1
88
+ bands = [(int(b[0] * n_fft), int(b[1] * n_fft)) for b in bands]
89
+ self.bands = bands
90
+ convs = lambda: nn.ModuleList(
91
+ [
92
+ weight_norm(nn.Conv2d(2, channels, (3, 9), (1, 1), padding=(1, 4))),
93
+ weight_norm(nn.Conv2d(channels, channels, (3, 9), (1, 2), padding=(1, 4))),
94
+ weight_norm(nn.Conv2d(channels, channels, (3, 9), (1, 2), padding=(1, 4))),
95
+ weight_norm(nn.Conv2d(channels, channels, (3, 9), (1, 2), padding=(1, 4))),
96
+ weight_norm(nn.Conv2d(channels, channels, (3, 3), (1, 1), padding=(1, 1))),
97
+ ]
98
+ )
99
+ self.band_convs = nn.ModuleList([convs() for _ in range(len(self.bands))])
100
+
101
+ if num_embeddings is not None:
102
+ self.emb = torch.nn.Embedding(num_embeddings=num_embeddings, embedding_dim=channels)
103
+ torch.nn.init.zeros_(self.emb.weight)
104
+
105
+ self.conv_post = weight_norm(nn.Conv2d(channels, 1, (3, 3), (1, 1), padding=(1, 1)))
106
+
107
+ def spectrogram(self, x):
108
+ # Remove DC offset
109
+ x = x - x.mean(dim=-1, keepdims=True)
110
+ # Peak normalize the volume of input audio
111
+ x = 0.8 * x / (x.abs().max(dim=-1, keepdim=True)[0] + 1e-9)
112
+ x = self.spec_fn(x)
113
+ x = torch.view_as_real(x)
114
+ x = rearrange(x, "b f t c -> b c t f")
115
+ # Split into bands
116
+ x_bands = [x[..., b[0]: b[1]] for b in self.bands]
117
+ return x_bands
118
+
119
+ def forward(self, x: torch.Tensor, cond_embedding_id: torch.Tensor = None):
120
+ x_bands = self.spectrogram(x)
121
+ fmap = []
122
+ x = []
123
+ for band, stack in zip(x_bands, self.band_convs):
124
+ for i, layer in enumerate(stack):
125
+ band = layer(band)
126
+ band = torch.nn.functional.leaky_relu(band, 0.1)
127
+ if i > 0:
128
+ fmap.append(band)
129
+ x.append(band)
130
+ x = torch.cat(x, dim=-1)
131
+ if cond_embedding_id is not None:
132
+ emb = self.emb(cond_embedding_id)
133
+ h = (emb.view(1, -1, 1, 1) * x).sum(dim=1, keepdims=True)
134
+ else:
135
+ h = 0
136
+ x = self.conv_post(x)
137
+ fmap.append(x)
138
+ x += h
139
+
140
+ return x, fmap
cosyvoice/hifigan/f0_predictor.py ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2024 Alibaba Inc (authors: Xiang Lyu, Kai Hu)
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import torch
15
+ import torch.nn as nn
16
+ from torch.nn.utils.parametrizations import weight_norm
17
+
18
+
19
+ class ConvRNNF0Predictor(nn.Module):
20
+ def __init__(self,
21
+ num_class: int = 1,
22
+ in_channels: int = 80,
23
+ cond_channels: int = 512
24
+ ):
25
+ super().__init__()
26
+
27
+ self.num_class = num_class
28
+ self.condnet = nn.Sequential(
29
+ weight_norm(
30
+ nn.Conv1d(in_channels, cond_channels, kernel_size=3, padding=1)
31
+ ),
32
+ nn.ELU(),
33
+ weight_norm(
34
+ nn.Conv1d(cond_channels, cond_channels, kernel_size=3, padding=1)
35
+ ),
36
+ nn.ELU(),
37
+ weight_norm(
38
+ nn.Conv1d(cond_channels, cond_channels, kernel_size=3, padding=1)
39
+ ),
40
+ nn.ELU(),
41
+ weight_norm(
42
+ nn.Conv1d(cond_channels, cond_channels, kernel_size=3, padding=1)
43
+ ),
44
+ nn.ELU(),
45
+ weight_norm(
46
+ nn.Conv1d(cond_channels, cond_channels, kernel_size=3, padding=1)
47
+ ),
48
+ nn.ELU(),
49
+ )
50
+ self.classifier = nn.Linear(in_features=cond_channels, out_features=self.num_class)
51
+
52
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
53
+ x = self.condnet(x)
54
+ x = x.transpose(1, 2)
55
+ return torch.abs(self.classifier(x).squeeze(-1))
cosyvoice/hifigan/generator.py ADDED
@@ -0,0 +1,411 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2024 Alibaba Inc (authors: Xiang Lyu, Kai Hu)
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ """HIFI-GAN"""
16
+
17
+ from typing import Dict, Optional, List
18
+ import numpy as np
19
+ from scipy.signal import get_window
20
+ import torch
21
+ import torch.nn as nn
22
+ import torch.nn.functional as F
23
+ from torch.nn import Conv1d
24
+ from torch.nn import ConvTranspose1d
25
+ from torch.nn.utils import remove_weight_norm
26
+ from torch.nn.utils.parametrizations import weight_norm
27
+ from torch.distributions.uniform import Uniform
28
+
29
+ from cosyvoice.transformer.activation import Snake
30
+ from cosyvoice.utils.common import get_padding
31
+ from cosyvoice.utils.common import init_weights
32
+
33
+
34
+ """hifigan based generator implementation.
35
+
36
+ This code is modified from https://github.com/jik876/hifi-gan
37
+ ,https://github.com/kan-bayashi/ParallelWaveGAN and
38
+ https://github.com/NVIDIA/BigVGAN
39
+
40
+ """
41
+
42
+
43
+ class ResBlock(torch.nn.Module):
44
+ """Residual block module in HiFiGAN/BigVGAN."""
45
+ def __init__(
46
+ self,
47
+ channels: int = 512,
48
+ kernel_size: int = 3,
49
+ dilations: List[int] = [1, 3, 5],
50
+ ):
51
+ super(ResBlock, self).__init__()
52
+ self.convs1 = nn.ModuleList()
53
+ self.convs2 = nn.ModuleList()
54
+
55
+ for dilation in dilations:
56
+ self.convs1.append(
57
+ weight_norm(
58
+ Conv1d(
59
+ channels,
60
+ channels,
61
+ kernel_size,
62
+ 1,
63
+ dilation=dilation,
64
+ padding=get_padding(kernel_size, dilation)
65
+ )
66
+ )
67
+ )
68
+ self.convs2.append(
69
+ weight_norm(
70
+ Conv1d(
71
+ channels,
72
+ channels,
73
+ kernel_size,
74
+ 1,
75
+ dilation=1,
76
+ padding=get_padding(kernel_size, 1)
77
+ )
78
+ )
79
+ )
80
+ self.convs1.apply(init_weights)
81
+ self.convs2.apply(init_weights)
82
+ self.activations1 = nn.ModuleList([
83
+ Snake(channels, alpha_logscale=False)
84
+ for _ in range(len(self.convs1))
85
+ ])
86
+ self.activations2 = nn.ModuleList([
87
+ Snake(channels, alpha_logscale=False)
88
+ for _ in range(len(self.convs2))
89
+ ])
90
+
91
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
92
+ for idx in range(len(self.convs1)):
93
+ xt = self.activations1[idx](x)
94
+ xt = self.convs1[idx](xt)
95
+ xt = self.activations2[idx](xt)
96
+ xt = self.convs2[idx](xt)
97
+ x = xt + x
98
+ return x
99
+
100
+ def remove_weight_norm(self):
101
+ for idx in range(len(self.convs1)):
102
+ remove_weight_norm(self.convs1[idx])
103
+ remove_weight_norm(self.convs2[idx])
104
+
105
+
106
+ class SineGen(torch.nn.Module):
107
+ """ Definition of sine generator
108
+ SineGen(samp_rate, harmonic_num = 0,
109
+ sine_amp = 0.1, noise_std = 0.003,
110
+ voiced_threshold = 0,
111
+ flag_for_pulse=False)
112
+ samp_rate: sampling rate in Hz
113
+ harmonic_num: number of harmonic overtones (default 0)
114
+ sine_amp: amplitude of sine-wavefrom (default 0.1)
115
+ noise_std: std of Gaussian noise (default 0.003)
116
+ voiced_thoreshold: F0 threshold for U/V classification (default 0)
117
+ flag_for_pulse: this SinGen is used inside PulseGen (default False)
118
+ Note: when flag_for_pulse is True, the first time step of a voiced
119
+ segment is always sin(np.pi) or cos(0)
120
+ """
121
+
122
+ def __init__(self, samp_rate, harmonic_num=0,
123
+ sine_amp=0.1, noise_std=0.003,
124
+ voiced_threshold=0):
125
+ super(SineGen, self).__init__()
126
+ self.sine_amp = sine_amp
127
+ self.noise_std = noise_std
128
+ self.harmonic_num = harmonic_num
129
+ self.sampling_rate = samp_rate
130
+ self.voiced_threshold = voiced_threshold
131
+
132
+ def _f02uv(self, f0):
133
+ # generate uv signal
134
+ uv = (f0 > self.voiced_threshold).type(torch.float32)
135
+ return uv
136
+
137
+ @torch.no_grad()
138
+ def forward(self, f0):
139
+ """
140
+ :param f0: [B, 1, sample_len], Hz
141
+ :return: [B, 1, sample_len]
142
+ """
143
+
144
+ F_mat = torch.zeros((f0.size(0), self.harmonic_num + 1, f0.size(-1))).to(f0.device)
145
+ for i in range(self.harmonic_num + 1):
146
+ F_mat[:, i: i + 1, :] = f0 * (i + 1) / self.sampling_rate
147
+
148
+ theta_mat = 2 * np.pi * (torch.cumsum(F_mat, dim=-1) % 1)
149
+ u_dist = Uniform(low=-np.pi, high=np.pi)
150
+ phase_vec = u_dist.sample(sample_shape=(f0.size(0), self.harmonic_num + 1, 1)).to(F_mat.device)
151
+ phase_vec[:, 0, :] = 0
152
+
153
+ # generate sine waveforms
154
+ sine_waves = self.sine_amp * torch.sin(theta_mat + phase_vec)
155
+
156
+ # generate uv signal
157
+ uv = self._f02uv(f0)
158
+
159
+ # noise: for unvoiced should be similar to sine_amp
160
+ # std = self.sine_amp/3 -> max value ~ self.sine_amp
161
+ # . for voiced regions is self.noise_std
162
+ noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3
163
+ noise = noise_amp * torch.randn_like(sine_waves)
164
+
165
+ # first: set the unvoiced part to 0 by uv
166
+ # then: additive noise
167
+ sine_waves = sine_waves * uv + noise
168
+ return sine_waves, uv, noise
169
+
170
+
171
+ class SourceModuleHnNSF(torch.nn.Module):
172
+ """ SourceModule for hn-nsf
173
+ SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1,
174
+ add_noise_std=0.003, voiced_threshod=0)
175
+ sampling_rate: sampling_rate in Hz
176
+ harmonic_num: number of harmonic above F0 (default: 0)
177
+ sine_amp: amplitude of sine source signal (default: 0.1)
178
+ add_noise_std: std of additive Gaussian noise (default: 0.003)
179
+ note that amplitude of noise in unvoiced is decided
180
+ by sine_amp
181
+ voiced_threshold: threhold to set U/V given F0 (default: 0)
182
+ Sine_source, noise_source = SourceModuleHnNSF(F0_sampled)
183
+ F0_sampled (batchsize, length, 1)
184
+ Sine_source (batchsize, length, 1)
185
+ noise_source (batchsize, length 1)
186
+ uv (batchsize, length, 1)
187
+ """
188
+
189
+ def __init__(self, sampling_rate, upsample_scale, harmonic_num=0, sine_amp=0.1,
190
+ add_noise_std=0.003, voiced_threshod=0):
191
+ super(SourceModuleHnNSF, self).__init__()
192
+
193
+ self.sine_amp = sine_amp
194
+ self.noise_std = add_noise_std
195
+
196
+ # to produce sine waveforms
197
+ self.l_sin_gen = SineGen(sampling_rate, harmonic_num,
198
+ sine_amp, add_noise_std, voiced_threshod)
199
+
200
+ # to merge source harmonics into a single excitation
201
+ self.l_linear = torch.nn.Linear(harmonic_num + 1, 1)
202
+ self.l_tanh = torch.nn.Tanh()
203
+
204
+ def forward(self, x):
205
+ """
206
+ Sine_source, noise_source = SourceModuleHnNSF(F0_sampled)
207
+ F0_sampled (batchsize, length, 1)
208
+ Sine_source (batchsize, length, 1)
209
+ noise_source (batchsize, length 1)
210
+ """
211
+ # source for harmonic branch
212
+ with torch.no_grad():
213
+ sine_wavs, uv, _ = self.l_sin_gen(x.transpose(1, 2))
214
+ sine_wavs = sine_wavs.transpose(1, 2)
215
+ uv = uv.transpose(1, 2)
216
+ sine_merge = self.l_tanh(self.l_linear(sine_wavs))
217
+
218
+ # source for noise branch, in the same shape as uv
219
+ noise = torch.randn_like(uv) * self.sine_amp / 3
220
+ return sine_merge, noise, uv
221
+
222
+
223
+ class HiFTGenerator(nn.Module):
224
+ """
225
+ HiFTNet Generator: Neural Source Filter + ISTFTNet
226
+ https://arxiv.org/abs/2309.09493
227
+ """
228
+ def __init__(
229
+ self,
230
+ in_channels: int = 80,
231
+ base_channels: int = 512,
232
+ nb_harmonics: int = 8,
233
+ sampling_rate: int = 22050,
234
+ nsf_alpha: float = 0.1,
235
+ nsf_sigma: float = 0.003,
236
+ nsf_voiced_threshold: float = 10,
237
+ upsample_rates: List[int] = [8, 8],
238
+ upsample_kernel_sizes: List[int] = [16, 16],
239
+ istft_params: Dict[str, int] = {"n_fft": 16, "hop_len": 4},
240
+ resblock_kernel_sizes: List[int] = [3, 7, 11],
241
+ resblock_dilation_sizes: List[List[int]] = [[1, 3, 5], [1, 3, 5], [1, 3, 5]],
242
+ source_resblock_kernel_sizes: List[int] = [7, 11],
243
+ source_resblock_dilation_sizes: List[List[int]] = [[1, 3, 5], [1, 3, 5]],
244
+ lrelu_slope: float = 0.1,
245
+ audio_limit: float = 0.99,
246
+ f0_predictor: torch.nn.Module = None,
247
+ ):
248
+ super(HiFTGenerator, self).__init__()
249
+
250
+ self.out_channels = 1
251
+ self.nb_harmonics = nb_harmonics
252
+ self.sampling_rate = sampling_rate
253
+ self.istft_params = istft_params
254
+ self.lrelu_slope = lrelu_slope
255
+ self.audio_limit = audio_limit
256
+
257
+ self.num_kernels = len(resblock_kernel_sizes)
258
+ self.num_upsamples = len(upsample_rates)
259
+ self.m_source = SourceModuleHnNSF(
260
+ sampling_rate=sampling_rate,
261
+ upsample_scale=np.prod(upsample_rates) * istft_params["hop_len"],
262
+ harmonic_num=nb_harmonics,
263
+ sine_amp=nsf_alpha,
264
+ add_noise_std=nsf_sigma,
265
+ voiced_threshod=nsf_voiced_threshold)
266
+ self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(upsample_rates) * istft_params["hop_len"])
267
+
268
+ self.conv_pre = weight_norm(
269
+ Conv1d(in_channels, base_channels, 7, 1, padding=3)
270
+ )
271
+
272
+ # Up
273
+ self.ups = nn.ModuleList()
274
+ for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
275
+ self.ups.append(
276
+ weight_norm(
277
+ ConvTranspose1d(
278
+ base_channels // (2**i),
279
+ base_channels // (2**(i + 1)),
280
+ k,
281
+ u,
282
+ padding=(k - u) // 2,
283
+ )
284
+ )
285
+ )
286
+
287
+ # Down
288
+ self.source_downs = nn.ModuleList()
289
+ self.source_resblocks = nn.ModuleList()
290
+ downsample_rates = [1] + upsample_rates[::-1][:-1]
291
+ downsample_cum_rates = np.cumprod(downsample_rates)
292
+ for i, (u, k, d) in enumerate(zip(downsample_cum_rates[::-1], source_resblock_kernel_sizes, source_resblock_dilation_sizes)):
293
+ if u == 1:
294
+ self.source_downs.append(
295
+ Conv1d(istft_params["n_fft"] + 2, base_channels // (2 ** (i + 1)), 1, 1)
296
+ )
297
+ else:
298
+ self.source_downs.append(
299
+ Conv1d(istft_params["n_fft"] + 2, base_channels // (2 ** (i + 1)), u * 2, u, padding=(u // 2))
300
+ )
301
+
302
+ self.source_resblocks.append(
303
+ ResBlock(base_channels // (2 ** (i + 1)), k, d)
304
+ )
305
+
306
+ self.resblocks = nn.ModuleList()
307
+ for i in range(len(self.ups)):
308
+ ch = base_channels // (2**(i + 1))
309
+ for _, (k, d) in enumerate(zip(resblock_kernel_sizes, resblock_dilation_sizes)):
310
+ self.resblocks.append(ResBlock(ch, k, d))
311
+
312
+ self.conv_post = weight_norm(Conv1d(ch, istft_params["n_fft"] + 2, 7, 1, padding=3))
313
+ self.ups.apply(init_weights)
314
+ self.conv_post.apply(init_weights)
315
+ self.reflection_pad = nn.ReflectionPad1d((1, 0))
316
+ self.stft_window = torch.from_numpy(get_window("hann", istft_params["n_fft"], fftbins=True).astype(np.float32))
317
+ self.f0_predictor = f0_predictor
318
+
319
+ def remove_weight_norm(self):
320
+ print('Removing weight norm...')
321
+ for l in self.ups:
322
+ remove_weight_norm(l)
323
+ for l in self.resblocks:
324
+ l.remove_weight_norm()
325
+ remove_weight_norm(self.conv_pre)
326
+ remove_weight_norm(self.conv_post)
327
+ self.m_source.remove_weight_norm()
328
+ for l in self.source_downs:
329
+ remove_weight_norm(l)
330
+ for l in self.source_resblocks:
331
+ l.remove_weight_norm()
332
+
333
+ def _stft(self, x):
334
+ spec = torch.stft(
335
+ x,
336
+ self.istft_params["n_fft"], self.istft_params["hop_len"], self.istft_params["n_fft"], window=self.stft_window.to(x.device),
337
+ return_complex=True)
338
+ spec = torch.view_as_real(spec) # [B, F, TT, 2]
339
+ return spec[..., 0], spec[..., 1]
340
+
341
+ def _istft(self, magnitude, phase):
342
+ magnitude = torch.clip(magnitude, max=1e2)
343
+ real = magnitude * torch.cos(phase)
344
+ img = magnitude * torch.sin(phase)
345
+ inverse_transform = torch.istft(torch.complex(real, img), self.istft_params["n_fft"], self.istft_params["hop_len"],
346
+ self.istft_params["n_fft"], window=self.stft_window.to(magnitude.device))
347
+ return inverse_transform
348
+
349
+ def decode(self, x: torch.Tensor, s: torch.Tensor = torch.zeros(1, 1, 0)) -> torch.Tensor:
350
+ s_stft_real, s_stft_imag = self._stft(s.squeeze(1))
351
+ s_stft = torch.cat([s_stft_real, s_stft_imag], dim=1)
352
+
353
+ x = self.conv_pre(x)
354
+ for i in range(self.num_upsamples):
355
+ x = F.leaky_relu(x, self.lrelu_slope)
356
+ x = self.ups[i](x)
357
+
358
+ if i == self.num_upsamples - 1:
359
+ x = self.reflection_pad(x)
360
+
361
+ # fusion
362
+ si = self.source_downs[i](s_stft)
363
+ si = self.source_resblocks[i](si)
364
+ x = x + si
365
+
366
+ xs = None
367
+ for j in range(self.num_kernels):
368
+ if xs is None:
369
+ xs = self.resblocks[i * self.num_kernels + j](x)
370
+ else:
371
+ xs += self.resblocks[i * self.num_kernels + j](x)
372
+ x = xs / self.num_kernels
373
+
374
+ x = F.leaky_relu(x)
375
+ x = self.conv_post(x)
376
+ magnitude = torch.exp(x[:, :self.istft_params["n_fft"] // 2 + 1, :])
377
+ phase = torch.sin(x[:, self.istft_params["n_fft"] // 2 + 1:, :]) # actually, sin is redundancy
378
+
379
+ x = self._istft(magnitude, phase)
380
+ x = torch.clamp(x, -self.audio_limit, self.audio_limit)
381
+ return x
382
+
383
+ def forward(
384
+ self,
385
+ batch: dict,
386
+ device: torch.device,
387
+ ) -> Dict[str, Optional[torch.Tensor]]:
388
+ speech_feat = batch['speech_feat'].transpose(1, 2).to(device)
389
+ # mel->f0
390
+ f0 = self.f0_predictor(speech_feat)
391
+ # f0->source
392
+ s = self.f0_upsamp(f0[:, None]).transpose(1, 2) # bs,n,t
393
+ s, _, _ = self.m_source(s)
394
+ s = s.transpose(1, 2)
395
+ # mel+source->speech
396
+ generated_speech = self.decode(x=speech_feat, s=s)
397
+ return generated_speech, f0
398
+
399
+ @torch.inference_mode()
400
+ def inference(self, speech_feat: torch.Tensor, cache_source: torch.Tensor = torch.zeros(1, 1, 0)) -> torch.Tensor:
401
+ # mel->f0
402
+ f0 = self.f0_predictor(speech_feat)
403
+ # f0->source
404
+ s = self.f0_upsamp(f0[:, None]).transpose(1, 2) # bs,n,t
405
+ s, _, _ = self.m_source(s)
406
+ s = s.transpose(1, 2)
407
+ # use cache_source to avoid glitch
408
+ if cache_source.shape[2] != 0:
409
+ s[:, :, :cache_source.shape[2]] = cache_source
410
+ generated_speech = self.decode(x=speech_feat, s=s)
411
+ return generated_speech, s
cosyvoice/hifigan/hifigan.py ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Dict, Optional
2
+ import torch
3
+ import torch.nn as nn
4
+ import torch.nn.functional as F
5
+ from matcha.hifigan.models import feature_loss, generator_loss, discriminator_loss
6
+ from cosyvoice.utils.losses import tpr_loss, mel_loss
7
+
8
+
9
+ class HiFiGan(nn.Module):
10
+ def __init__(self, generator, discriminator, mel_spec_transform,
11
+ multi_mel_spectral_recon_loss_weight=45, feat_match_loss_weight=2.0,
12
+ tpr_loss_weight=1.0, tpr_loss_tau=0.04):
13
+ super(HiFiGan, self).__init__()
14
+ self.generator = generator
15
+ self.discriminator = discriminator
16
+ self.mel_spec_transform = mel_spec_transform
17
+ self.multi_mel_spectral_recon_loss_weight = multi_mel_spectral_recon_loss_weight
18
+ self.feat_match_loss_weight = feat_match_loss_weight
19
+ self.tpr_loss_weight = tpr_loss_weight
20
+ self.tpr_loss_tau = tpr_loss_tau
21
+
22
+ def forward(
23
+ self,
24
+ batch: dict,
25
+ device: torch.device,
26
+ ) -> Dict[str, Optional[torch.Tensor]]:
27
+ if batch['turn'] == 'generator':
28
+ return self.forward_generator(batch, device)
29
+ else:
30
+ return self.forward_discriminator(batch, device)
31
+
32
+ def forward_generator(self, batch, device):
33
+ real_speech = batch['speech'].to(device)
34
+ pitch_feat = batch['pitch_feat'].to(device)
35
+ # 1. calculate generator outputs
36
+ generated_speech, generated_f0 = self.generator(batch, device)
37
+ # 2. calculate discriminator outputs
38
+ y_d_rs, y_d_gs, fmap_rs, fmap_gs = self.discriminator(real_speech, generated_speech)
39
+ # 3. calculate generator losses, feature loss, mel loss, tpr losses [Optional]
40
+ loss_gen, _ = generator_loss(y_d_gs)
41
+ loss_fm = feature_loss(fmap_rs, fmap_gs)
42
+ loss_mel = mel_loss(real_speech, generated_speech, self.mel_spec_transform)
43
+ if self.tpr_loss_weight != 0:
44
+ loss_tpr = tpr_loss(y_d_rs, y_d_gs, self.tpr_loss_tau)
45
+ else:
46
+ loss_tpr = torch.zeros(1).to(device)
47
+ loss_f0 = F.l1_loss(generated_f0, pitch_feat)
48
+ loss = loss_gen + self.feat_match_loss_weight * loss_fm + \
49
+ self.multi_mel_spectral_recon_loss_weight * loss_mel + \
50
+ self.tpr_loss_weight * loss_tpr + loss_f0
51
+ return {'loss': loss, 'loss_gen': loss_gen, 'loss_fm': loss_fm, 'loss_mel': loss_mel, 'loss_tpr': loss_tpr, 'loss_f0': loss_f0}
52
+
53
+ def forward_discriminator(self, batch, device):
54
+ real_speech = batch['speech'].to(device)
55
+ # 1. calculate generator outputs
56
+ with torch.no_grad():
57
+ generated_speech, generated_f0 = self.generator(batch, device)
58
+ # 2. calculate discriminator outputs
59
+ y_d_rs, y_d_gs, fmap_rs, fmap_gs = self.discriminator(real_speech, generated_speech)
60
+ # 3. calculate discriminator losses, tpr losses [Optional]
61
+ loss_disc, _, _ = discriminator_loss(y_d_rs, y_d_gs)
62
+ if self.tpr_loss_weight != 0:
63
+ loss_tpr = tpr_loss(y_d_rs, y_d_gs, self.tpr_loss_tau)
64
+ else:
65
+ loss_tpr = torch.zeros(1).to(device)
66
+ loss = loss_disc + self.tpr_loss_weight * loss_tpr
67
+ return {'loss': loss, 'loss_disc': loss_disc, 'loss_tpr': loss_tpr}
cosyvoice/llm/__pycache__/llm.cpython-310.pyc ADDED
Binary file (11.8 kB). View file
 
cosyvoice/utils/class_utils.py ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright [2023-11-28] <sxc19@mails.tsinghua.edu.cn, Xingchen Song>
2
+ # 2024 Alibaba Inc (authors: Xiang Lyu)
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ import torch
16
+
17
+ from cosyvoice.transformer.activation import Swish
18
+ from cosyvoice.transformer.subsampling import (
19
+ LinearNoSubsampling,
20
+ EmbedinigNoSubsampling,
21
+ Conv1dSubsampling2,
22
+ Conv2dSubsampling4,
23
+ Conv2dSubsampling6,
24
+ Conv2dSubsampling8,
25
+ )
26
+ from cosyvoice.transformer.embedding import (PositionalEncoding,
27
+ RelPositionalEncoding,
28
+ WhisperPositionalEncoding,
29
+ LearnablePositionalEncoding,
30
+ NoPositionalEncoding)
31
+ from cosyvoice.transformer.attention import (MultiHeadedAttention,
32
+ RelPositionMultiHeadedAttention)
33
+ from cosyvoice.transformer.embedding import EspnetRelPositionalEncoding
34
+ from cosyvoice.transformer.subsampling import LegacyLinearNoSubsampling
35
+ from cosyvoice.llm.llm import TransformerLM, Qwen2LM
36
+ from cosyvoice.flow.flow import MaskedDiffWithXvec, CausalMaskedDiffWithXvec
37
+ from cosyvoice.hifigan.generator import HiFTGenerator
38
+ from cosyvoice.cli.model import CosyVoiceModel, CosyVoice2Model
39
+
40
+
41
+ COSYVOICE_ACTIVATION_CLASSES = {
42
+ "hardtanh": torch.nn.Hardtanh,
43
+ "tanh": torch.nn.Tanh,
44
+ "relu": torch.nn.ReLU,
45
+ "selu": torch.nn.SELU,
46
+ "swish": getattr(torch.nn, "SiLU", Swish),
47
+ "gelu": torch.nn.GELU,
48
+ }
49
+
50
+ COSYVOICE_SUBSAMPLE_CLASSES = {
51
+ "linear": LinearNoSubsampling,
52
+ "linear_legacy": LegacyLinearNoSubsampling,
53
+ "embed": EmbedinigNoSubsampling,
54
+ "conv1d2": Conv1dSubsampling2,
55
+ "conv2d": Conv2dSubsampling4,
56
+ "conv2d6": Conv2dSubsampling6,
57
+ "conv2d8": Conv2dSubsampling8,
58
+ 'paraformer_dummy': torch.nn.Identity
59
+ }
60
+
61
+ COSYVOICE_EMB_CLASSES = {
62
+ "embed": PositionalEncoding,
63
+ "abs_pos": PositionalEncoding,
64
+ "rel_pos": RelPositionalEncoding,
65
+ "rel_pos_espnet": EspnetRelPositionalEncoding,
66
+ "no_pos": NoPositionalEncoding,
67
+ "abs_pos_whisper": WhisperPositionalEncoding,
68
+ "embed_learnable_pe": LearnablePositionalEncoding,
69
+ }
70
+
71
+ COSYVOICE_ATTENTION_CLASSES = {
72
+ "selfattn": MultiHeadedAttention,
73
+ "rel_selfattn": RelPositionMultiHeadedAttention,
74
+ }
75
+
76
+
77
+ def get_model_type(configs):
78
+ # NOTE CosyVoice2Model inherits CosyVoiceModel
79
+ if isinstance(configs['llm'], TransformerLM) and isinstance(configs['flow'], MaskedDiffWithXvec) and isinstance(configs['hift'], HiFTGenerator):
80
+ return CosyVoiceModel
81
+ if isinstance(configs['llm'], Qwen2LM) and isinstance(configs['flow'], CausalMaskedDiffWithXvec) and isinstance(configs['hift'], HiFTGenerator):
82
+ return CosyVoice2Model
83
+ raise TypeError('No valid model type found!')
cosyvoice/utils/common.py ADDED
@@ -0,0 +1,166 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2020 Mobvoi Inc (Binbin Zhang)
2
+ # 2024 Alibaba Inc (authors: Xiang Lyu)
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ # Modified from ESPnet(https://github.com/espnet/espnet)
16
+ """Unility functions for Transformer."""
17
+
18
+ import random
19
+ from typing import List
20
+
21
+ import numpy as np
22
+ import torch
23
+
24
+ IGNORE_ID = -1
25
+
26
+
27
+ def pad_list(xs: List[torch.Tensor], pad_value: int):
28
+ """Perform padding for the list of tensors.
29
+
30
+ Args:
31
+ xs (List): List of Tensors [(T_1, `*`), (T_2, `*`), ..., (T_B, `*`)].
32
+ pad_value (float): Value for padding.
33
+
34
+ Returns:
35
+ Tensor: Padded tensor (B, Tmax, `*`).
36
+
37
+ Examples:
38
+ >>> x = [torch.ones(4), torch.ones(2), torch.ones(1)]
39
+ >>> x
40
+ [tensor([1., 1., 1., 1.]), tensor([1., 1.]), tensor([1.])]
41
+ >>> pad_list(x, 0)
42
+ tensor([[1., 1., 1., 1.],
43
+ [1., 1., 0., 0.],
44
+ [1., 0., 0., 0.]])
45
+
46
+ """
47
+ max_len = max([len(item) for item in xs])
48
+ batchs = len(xs)
49
+ ndim = xs[0].ndim
50
+ if ndim == 1:
51
+ pad_res = torch.zeros(batchs,
52
+ max_len,
53
+ dtype=xs[0].dtype,
54
+ device=xs[0].device)
55
+ elif ndim == 2:
56
+ pad_res = torch.zeros(batchs,
57
+ max_len,
58
+ xs[0].shape[1],
59
+ dtype=xs[0].dtype,
60
+ device=xs[0].device)
61
+ elif ndim == 3:
62
+ pad_res = torch.zeros(batchs,
63
+ max_len,
64
+ xs[0].shape[1],
65
+ xs[0].shape[2],
66
+ dtype=xs[0].dtype,
67
+ device=xs[0].device)
68
+ else:
69
+ raise ValueError(f"Unsupported ndim: {ndim}")
70
+ pad_res.fill_(pad_value)
71
+ for i in range(batchs):
72
+ pad_res[i, :len(xs[i])] = xs[i]
73
+ return pad_res
74
+
75
+
76
+ def th_accuracy(pad_outputs: torch.Tensor, pad_targets: torch.Tensor,
77
+ ignore_label: int) -> torch.Tensor:
78
+ """Calculate accuracy.
79
+
80
+ Args:
81
+ pad_outputs (Tensor): Prediction tensors (B * Lmax, D).
82
+ pad_targets (LongTensor): Target label tensors (B, Lmax).
83
+ ignore_label (int): Ignore label id.
84
+
85
+ Returns:
86
+ torch.Tensor: Accuracy value (0.0 - 1.0).
87
+
88
+ """
89
+ pad_pred = pad_outputs.view(pad_targets.size(0), pad_targets.size(1),
90
+ pad_outputs.size(1)).argmax(2)
91
+ mask = pad_targets != ignore_label
92
+ numerator = torch.sum(
93
+ pad_pred.masked_select(mask) == pad_targets.masked_select(mask))
94
+ denominator = torch.sum(mask)
95
+ return (numerator / denominator).detach()
96
+
97
+
98
+ def get_padding(kernel_size, dilation=1):
99
+ return int((kernel_size * dilation - dilation) / 2)
100
+
101
+
102
+ def init_weights(m, mean=0.0, std=0.01):
103
+ classname = m.__class__.__name__
104
+ if classname.find("Conv") != -1:
105
+ m.weight.data.normal_(mean, std)
106
+
107
+
108
+ # Repetition Aware Sampling in VALL-E 2
109
+ def ras_sampling(weighted_scores, decoded_tokens, sampling, top_p=0.8, top_k=25, win_size=10, tau_r=0.1):
110
+ top_ids = nucleus_sampling(weighted_scores, top_p=top_p, top_k=top_k)
111
+ rep_num = (torch.tensor(decoded_tokens[-win_size:]).to(weighted_scores.device) == top_ids).sum().item()
112
+ if rep_num >= win_size * tau_r:
113
+ top_ids = random_sampling(weighted_scores, decoded_tokens, sampling)
114
+ return top_ids
115
+
116
+
117
+ def nucleus_sampling(weighted_scores, top_p=0.8, top_k=25):
118
+ prob, indices = [], []
119
+ cum_prob = 0.0
120
+ sorted_value, sorted_idx = weighted_scores.softmax(dim=0).sort(descending=True, stable=True)
121
+ for i in range(len(sorted_idx)):
122
+ # sampling both top-p and numbers.
123
+ if cum_prob < top_p and len(prob) < top_k:
124
+ cum_prob += sorted_value[i]
125
+ prob.append(sorted_value[i])
126
+ indices.append(sorted_idx[i])
127
+ else:
128
+ break
129
+ prob = torch.tensor(prob).to(weighted_scores)
130
+ indices = torch.tensor(indices, dtype=torch.long).to(weighted_scores.device)
131
+ top_ids = indices[prob.multinomial(1, replacement=True)]
132
+ return top_ids
133
+
134
+
135
+ def random_sampling(weighted_scores, decoded_tokens, sampling):
136
+ top_ids = weighted_scores.softmax(dim=0).multinomial(1, replacement=True)
137
+ return top_ids
138
+
139
+
140
+ def fade_in_out(fade_in_mel, fade_out_mel, window):
141
+ device = fade_in_mel.device
142
+ fade_in_mel, fade_out_mel = fade_in_mel.cpu(), fade_out_mel.cpu()
143
+ mel_overlap_len = int(window.shape[0] / 2)
144
+ if fade_in_mel.device == torch.device('cpu'):
145
+ fade_in_mel = fade_in_mel.clone()
146
+ fade_in_mel[..., :mel_overlap_len] = fade_in_mel[..., :mel_overlap_len] * window[:mel_overlap_len] + \
147
+ fade_out_mel[..., -mel_overlap_len:] * window[mel_overlap_len:]
148
+ return fade_in_mel.to(device)
149
+
150
+
151
+ def set_all_random_seed(seed):
152
+ random.seed(seed)
153
+ np.random.seed(seed)
154
+ torch.manual_seed(seed)
155
+ torch.cuda.manual_seed_all(seed)
156
+
157
+
158
+ def mask_to_bias(mask: torch.Tensor, dtype: torch.dtype) -> torch.Tensor:
159
+ assert mask.dtype == torch.bool
160
+ assert dtype in [torch.float32, torch.bfloat16, torch.float16]
161
+ mask = mask.to(dtype)
162
+ # attention mask bias
163
+ # NOTE(Mddct): torch.finfo jit issues
164
+ # chunk_masks = (1.0 - chunk_masks) * torch.finfo(dtype).min
165
+ mask = (1.0 - mask) * -1.0e+10
166
+ return mask
cosyvoice/utils/executor.py ADDED
@@ -0,0 +1,172 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2020 Mobvoi Inc (Binbin Zhang)
2
+ # 2024 Alibaba Inc (authors: Xiang Lyu)
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import logging
17
+ from contextlib import nullcontext
18
+ import os
19
+
20
+ import torch
21
+ import torch.distributed as dist
22
+
23
+ from cosyvoice.utils.train_utils import update_parameter_and_lr, log_per_step, log_per_save, batch_forward, batch_backward, save_model, cosyvoice_join
24
+
25
+
26
+ class Executor:
27
+
28
+ def __init__(self, gan: bool = False):
29
+ self.gan = gan
30
+ self.step = 0
31
+ self.epoch = 0
32
+ self.rank = int(os.environ.get('RANK', 0))
33
+ self.device = torch.device('cuda:{}'.format(self.rank))
34
+
35
+ def train_one_epoc(self, model, optimizer, scheduler, train_data_loader, cv_data_loader, writer, info_dict, scaler, group_join):
36
+ ''' Train one epoch
37
+ '''
38
+
39
+ lr = optimizer.param_groups[0]['lr']
40
+ logging.info('Epoch {} TRAIN info lr {} rank {}'.format(self.epoch, lr, self.rank))
41
+ logging.info('using accumulate grad, new batch size is {} times'
42
+ ' larger than before'.format(info_dict['accum_grad']))
43
+ # A context manager to be used in conjunction with an instance of
44
+ # torch.nn.parallel.DistributedDataParallel to be able to train
45
+ # with uneven inputs across participating processes.
46
+ model.train()
47
+ model_context = model.join if info_dict['train_engine'] == 'torch_ddp' else nullcontext
48
+ with model_context():
49
+ for batch_idx, batch_dict in enumerate(train_data_loader):
50
+ info_dict["tag"] = "TRAIN"
51
+ info_dict["step"] = self.step
52
+ info_dict["epoch"] = self.epoch
53
+ info_dict["batch_idx"] = batch_idx
54
+ if cosyvoice_join(group_join, info_dict):
55
+ break
56
+
57
+ # Disable gradient synchronizations across DDP processes.
58
+ # Within this context, gradients will be accumulated on module
59
+ # variables, which will later be synchronized.
60
+ if info_dict['train_engine'] == 'torch_ddp' and (batch_idx + 1) % info_dict["accum_grad"] != 0:
61
+ context = model.no_sync
62
+ # Used for single gpu training and DDP gradient synchronization
63
+ # processes.
64
+ else:
65
+ context = nullcontext
66
+
67
+ with context():
68
+ info_dict = batch_forward(model, batch_dict, scaler, info_dict)
69
+ info_dict = batch_backward(model, scaler, info_dict)
70
+
71
+ info_dict = update_parameter_and_lr(model, optimizer, scheduler, scaler, info_dict)
72
+ log_per_step(writer, info_dict)
73
+ # NOTE specify save_per_step in cosyvoice.yaml if you want to enable step save
74
+ if info_dict['save_per_step'] > 0 and (self.step + 1) % info_dict['save_per_step'] == 0 and \
75
+ (batch_idx + 1) % info_dict["accum_grad"] == 0:
76
+ dist.barrier()
77
+ self.cv(model, cv_data_loader, writer, info_dict, on_batch_end=False)
78
+ model.train()
79
+ if (batch_idx + 1) % info_dict["accum_grad"] == 0:
80
+ self.step += 1
81
+ dist.barrier()
82
+ self.cv(model, cv_data_loader, writer, info_dict, on_batch_end=True)
83
+
84
+ def train_one_epoc_gan(self, model, optimizer, scheduler, optimizer_d, scheduler_d, train_data_loader, cv_data_loader,
85
+ writer, info_dict, scaler, group_join):
86
+ ''' Train one epoch
87
+ '''
88
+
89
+ lr = optimizer.param_groups[0]['lr']
90
+ logging.info('Epoch {} TRAIN info lr {} rank {}'.format(self.epoch, lr, self.rank))
91
+ logging.info('using accumulate grad, new batch size is {} times'
92
+ ' larger than before'.format(info_dict['accum_grad']))
93
+ # A context manager to be used in conjunction with an instance of
94
+ # torch.nn.parallel.DistributedDataParallel to be able to train
95
+ # with uneven inputs across participating processes.
96
+ model.train()
97
+ model_context = model.join if info_dict['train_engine'] == 'torch_ddp' else nullcontext
98
+ with model_context():
99
+ for batch_idx, batch_dict in enumerate(train_data_loader):
100
+ info_dict["tag"] = "TRAIN"
101
+ info_dict["step"] = self.step
102
+ info_dict["epoch"] = self.epoch
103
+ info_dict["batch_idx"] = batch_idx
104
+ if cosyvoice_join(group_join, info_dict):
105
+ break
106
+
107
+ # Disable gradient synchronizations across DDP processes.
108
+ # Within this context, gradients will be accumulated on module
109
+ # variables, which will later be synchronized.
110
+ if info_dict['train_engine'] == 'torch_ddp' and (batch_idx + 1) % info_dict["accum_grad"] != 0:
111
+ context = model.no_sync
112
+ # Used for single gpu training and DDP gradient synchronization
113
+ # processes.
114
+ else:
115
+ context = nullcontext
116
+
117
+ with context():
118
+ batch_dict['turn'] = 'discriminator'
119
+ info_dict = batch_forward(model, batch_dict, scaler, info_dict)
120
+ info_dict = batch_backward(model, scaler, info_dict)
121
+ info_dict = update_parameter_and_lr(model, optimizer_d, scheduler_d, scaler, info_dict)
122
+ optimizer.zero_grad()
123
+ log_per_step(writer, info_dict)
124
+ with context():
125
+ batch_dict['turn'] = 'generator'
126
+ info_dict = batch_forward(model, batch_dict, scaler, info_dict)
127
+ info_dict = batch_backward(model, scaler, info_dict)
128
+ info_dict = update_parameter_and_lr(model, optimizer, scheduler, scaler, info_dict)
129
+ optimizer_d.zero_grad()
130
+ log_per_step(writer, info_dict)
131
+ # NOTE specify save_per_step in cosyvoice.yaml if you want to enable step save
132
+ if info_dict['save_per_step'] > 0 and (self.step + 1) % info_dict['save_per_step'] == 0 and \
133
+ (batch_idx + 1) % info_dict["accum_grad"] == 0:
134
+ dist.barrier()
135
+ self.cv(model, cv_data_loader, writer, info_dict, on_batch_end=False)
136
+ model.train()
137
+ if (batch_idx + 1) % info_dict["accum_grad"] == 0:
138
+ self.step += 1
139
+ dist.barrier()
140
+ self.cv(model, cv_data_loader, writer, info_dict, on_batch_end=True)
141
+
142
+ @torch.inference_mode()
143
+ def cv(self, model, cv_data_loader, writer, info_dict, on_batch_end=True):
144
+ ''' Cross validation on
145
+ '''
146
+ logging.info('Epoch {} Step {} on_batch_end {} CV rank {}'.format(self.epoch, self.step + 1, on_batch_end, self.rank))
147
+ model.eval()
148
+ total_num_utts, total_loss_dict = 0, {} # avoid division by 0
149
+ for batch_idx, batch_dict in enumerate(cv_data_loader):
150
+ info_dict["tag"] = "CV"
151
+ info_dict["step"] = self.step
152
+ info_dict["epoch"] = self.epoch
153
+ info_dict["batch_idx"] = batch_idx
154
+
155
+ num_utts = len(batch_dict["utts"])
156
+ total_num_utts += num_utts
157
+
158
+ if self.gan is True:
159
+ batch_dict['turn'] = 'generator'
160
+ info_dict = batch_forward(model, batch_dict, None, info_dict)
161
+
162
+ for k, v in info_dict['loss_dict'].items():
163
+ if k not in total_loss_dict:
164
+ total_loss_dict[k] = []
165
+ total_loss_dict[k].append(v.item() * num_utts)
166
+ log_per_step(None, info_dict)
167
+ for k, v in total_loss_dict.items():
168
+ total_loss_dict[k] = sum(v) / total_num_utts
169
+ info_dict['loss_dict'] = total_loss_dict
170
+ log_per_save(writer, info_dict)
171
+ model_name = 'epoch_{}_whole'.format(self.epoch) if on_batch_end else 'epoch_{}_step_{}'.format(self.epoch, self.step + 1)
172
+ save_model(model, model_name, info_dict)
cosyvoice/utils/frontend_utils.py ADDED
@@ -0,0 +1,136 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2024 Alibaba Inc (authors: Xiang Lyu, Zhihao Du)
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import re
16
+ import regex
17
+ chinese_char_pattern = re.compile(r'[\u4e00-\u9fff]+')
18
+
19
+
20
+ # whether contain chinese character
21
+ def contains_chinese(text):
22
+ return bool(chinese_char_pattern.search(text))
23
+
24
+
25
+ # replace special symbol
26
+ def replace_corner_mark(text):
27
+ text = text.replace('²', '平方')
28
+ text = text.replace('³', '立方')
29
+ return text
30
+
31
+
32
+ # remove meaningless symbol
33
+ def remove_bracket(text):
34
+ text = text.replace('(', '').replace(')', '')
35
+ text = text.replace('【', '').replace('】', '')
36
+ text = text.replace('`', '').replace('`', '')
37
+ text = text.replace("——", " ")
38
+ return text
39
+
40
+
41
+ # spell Arabic numerals
42
+ def spell_out_number(text: str, inflect_parser):
43
+ new_text = []
44
+ st = None
45
+ for i, c in enumerate(text):
46
+ if not c.isdigit():
47
+ if st is not None:
48
+ num_str = inflect_parser.number_to_words(text[st: i])
49
+ new_text.append(num_str)
50
+ st = None
51
+ new_text.append(c)
52
+ else:
53
+ if st is None:
54
+ st = i
55
+ if st is not None and st < len(text):
56
+ num_str = inflect_parser.number_to_words(text[st:])
57
+ new_text.append(num_str)
58
+ return ''.join(new_text)
59
+
60
+
61
+ # split paragrah logic:
62
+ # 1. per sentence max len token_max_n, min len token_min_n, merge if last sentence len less than merge_len
63
+ # 2. cal sentence len according to lang
64
+ # 3. split sentence according to puncatation
65
+ def split_paragraph(text: str, tokenize, lang="zh", token_max_n=80, token_min_n=60, merge_len=20, comma_split=False):
66
+ def calc_utt_length(_text: str):
67
+ if lang == "zh":
68
+ return len(_text)
69
+ else:
70
+ return len(tokenize(_text))
71
+
72
+ def should_merge(_text: str):
73
+ if lang == "zh":
74
+ return len(_text) < merge_len
75
+ else:
76
+ return len(tokenize(_text)) < merge_len
77
+
78
+ if lang == "zh":
79
+ pounc = ['。', '?', '!', ';', ':', '、', '.', '?', '!', ';']
80
+ else:
81
+ pounc = ['.', '?', '!', ';', ':']
82
+ if comma_split:
83
+ pounc.extend([',', ','])
84
+
85
+ if text[-1] not in pounc:
86
+ if lang == "zh":
87
+ text += "。"
88
+ else:
89
+ text += "."
90
+
91
+ st = 0
92
+ utts = []
93
+ for i, c in enumerate(text):
94
+ if c in pounc:
95
+ if len(text[st: i]) > 0:
96
+ utts.append(text[st: i] + c)
97
+ if i + 1 < len(text) and text[i + 1] in ['"', '”']:
98
+ tmp = utts.pop(-1)
99
+ utts.append(tmp + text[i + 1])
100
+ st = i + 2
101
+ else:
102
+ st = i + 1
103
+
104
+ final_utts = []
105
+ cur_utt = ""
106
+ for utt in utts:
107
+ if calc_utt_length(cur_utt + utt) > token_max_n and calc_utt_length(cur_utt) > token_min_n:
108
+ final_utts.append(cur_utt)
109
+ cur_utt = ""
110
+ cur_utt = cur_utt + utt
111
+ if len(cur_utt) > 0:
112
+ if should_merge(cur_utt) and len(final_utts) != 0:
113
+ final_utts[-1] = final_utts[-1] + cur_utt
114
+ else:
115
+ final_utts.append(cur_utt)
116
+
117
+ return final_utts
118
+
119
+
120
+ # remove blank between chinese character
121
+ def replace_blank(text: str):
122
+ out_str = []
123
+ for i, c in enumerate(text):
124
+ if c == " ":
125
+ if ((text[i + 1].isascii() and text[i + 1] != " ") and
126
+ (text[i - 1].isascii() and text[i - 1] != " ")):
127
+ out_str.append(c)
128
+ else:
129
+ out_str.append(c)
130
+ return "".join(out_str)
131
+
132
+
133
+ def is_only_punctuation(text):
134
+ # Regular expression: Match strings that consist only of punctuation marks or are empty.
135
+ punctuation_pattern = r'^[\p{P}\p{S}]*$'
136
+ return bool(regex.fullmatch(punctuation_pattern, text))
cosyvoice/utils/mask.py ADDED
@@ -0,0 +1,267 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2019 Shigeki Karita
2
+ # 2020 Mobvoi Inc (Binbin Zhang)
3
+ # 2024 Alibaba Inc (authors: Xiang Lyu)
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ import torch
18
+ from cosyvoice.utils.file_utils import logging
19
+ '''
20
+ def subsequent_mask(
21
+ size: int,
22
+ device: torch.device = torch.device("cpu"),
23
+ ) -> torch.Tensor:
24
+ """Create mask for subsequent steps (size, size).
25
+
26
+ This mask is used only in decoder which works in an auto-regressive mode.
27
+ This means the current step could only do attention with its left steps.
28
+
29
+ In encoder, fully attention is used when streaming is not necessary and
30
+ the sequence is not long. In this case, no attention mask is needed.
31
+
32
+ When streaming is need, chunk-based attention is used in encoder. See
33
+ subsequent_chunk_mask for the chunk-based attention mask.
34
+
35
+ Args:
36
+ size (int): size of mask
37
+ str device (str): "cpu" or "cuda" or torch.Tensor.device
38
+ dtype (torch.device): result dtype
39
+
40
+ Returns:
41
+ torch.Tensor: mask
42
+
43
+ Examples:
44
+ >>> subsequent_mask(3)
45
+ [[1, 0, 0],
46
+ [1, 1, 0],
47
+ [1, 1, 1]]
48
+ """
49
+ ret = torch.ones(size, size, device=device, dtype=torch.bool)
50
+ return torch.tril(ret)
51
+ '''
52
+
53
+
54
+ def subsequent_mask(
55
+ size: int,
56
+ device: torch.device = torch.device("cpu"),
57
+ ) -> torch.Tensor:
58
+ """Create mask for subsequent steps (size, size).
59
+
60
+ This mask is used only in decoder which works in an auto-regressive mode.
61
+ This means the current step could only do attention with its left steps.
62
+
63
+ In encoder, fully attention is used when streaming is not necessary and
64
+ the sequence is not long. In this case, no attention mask is needed.
65
+
66
+ When streaming is need, chunk-based attention is used in encoder. See
67
+ subsequent_chunk_mask for the chunk-based attention mask.
68
+
69
+ Args:
70
+ size (int): size of mask
71
+ str device (str): "cpu" or "cuda" or torch.Tensor.device
72
+ dtype (torch.device): result dtype
73
+
74
+ Returns:
75
+ torch.Tensor: mask
76
+
77
+ Examples:
78
+ >>> subsequent_mask(3)
79
+ [[1, 0, 0],
80
+ [1, 1, 0],
81
+ [1, 1, 1]]
82
+ """
83
+ arange = torch.arange(size, device=device)
84
+ mask = arange.expand(size, size)
85
+ arange = arange.unsqueeze(-1)
86
+ mask = mask <= arange
87
+ return mask
88
+
89
+
90
+ def subsequent_chunk_mask_deprecated(
91
+ size: int,
92
+ chunk_size: int,
93
+ num_left_chunks: int = -1,
94
+ device: torch.device = torch.device("cpu"),
95
+ ) -> torch.Tensor:
96
+ """Create mask for subsequent steps (size, size) with chunk size,
97
+ this is for streaming encoder
98
+
99
+ Args:
100
+ size (int): size of mask
101
+ chunk_size (int): size of chunk
102
+ num_left_chunks (int): number of left chunks
103
+ <0: use full chunk
104
+ >=0: use num_left_chunks
105
+ device (torch.device): "cpu" or "cuda" or torch.Tensor.device
106
+
107
+ Returns:
108
+ torch.Tensor: mask
109
+
110
+ Examples:
111
+ >>> subsequent_chunk_mask(4, 2)
112
+ [[1, 1, 0, 0],
113
+ [1, 1, 0, 0],
114
+ [1, 1, 1, 1],
115
+ [1, 1, 1, 1]]
116
+ """
117
+ ret = torch.zeros(size, size, device=device, dtype=torch.bool)
118
+ for i in range(size):
119
+ if num_left_chunks < 0:
120
+ start = 0
121
+ else:
122
+ start = max((i // chunk_size - num_left_chunks) * chunk_size, 0)
123
+ ending = min((i // chunk_size + 1) * chunk_size, size)
124
+ ret[i, start:ending] = True
125
+ return ret
126
+
127
+
128
+ def subsequent_chunk_mask(
129
+ size: int,
130
+ chunk_size: int,
131
+ num_left_chunks: int = -1,
132
+ device: torch.device = torch.device("cpu"),
133
+ ) -> torch.Tensor:
134
+ """Create mask for subsequent steps (size, size) with chunk size,
135
+ this is for streaming encoder
136
+
137
+ Args:
138
+ size (int): size of mask
139
+ chunk_size (int): size of chunk
140
+ num_left_chunks (int): number of left chunks
141
+ <0: use full chunk
142
+ >=0: use num_left_chunks
143
+ device (torch.device): "cpu" or "cuda" or torch.Tensor.device
144
+
145
+ Returns:
146
+ torch.Tensor: mask
147
+
148
+ Examples:
149
+ >>> subsequent_chunk_mask(4, 2)
150
+ [[1, 1, 0, 0],
151
+ [1, 1, 0, 0],
152
+ [1, 1, 1, 1],
153
+ [1, 1, 1, 1]]
154
+ """
155
+ # NOTE this modified implementation meets onnx export requirements, but it doesn't support num_left_chunks
156
+ # actually this is not needed after we have inference cache implemented, will remove it later
157
+ pos_idx = torch.arange(size, device=device)
158
+ block_value = (torch.div(pos_idx, chunk_size, rounding_mode='trunc') + 1) * chunk_size
159
+ ret = pos_idx.unsqueeze(0) < block_value.unsqueeze(1)
160
+ return ret
161
+
162
+
163
+ def add_optional_chunk_mask(xs: torch.Tensor,
164
+ masks: torch.Tensor,
165
+ use_dynamic_chunk: bool,
166
+ use_dynamic_left_chunk: bool,
167
+ decoding_chunk_size: int,
168
+ static_chunk_size: int,
169
+ num_decoding_left_chunks: int,
170
+ enable_full_context: bool = True):
171
+ """ Apply optional mask for encoder.
172
+
173
+ Args:
174
+ xs (torch.Tensor): padded input, (B, L, D), L for max length
175
+ mask (torch.Tensor): mask for xs, (B, 1, L)
176
+ use_dynamic_chunk (bool): whether to use dynamic chunk or not
177
+ use_dynamic_left_chunk (bool): whether to use dynamic left chunk for
178
+ training.
179
+ decoding_chunk_size (int): decoding chunk size for dynamic chunk, it's
180
+ 0: default for training, use random dynamic chunk.
181
+ <0: for decoding, use full chunk.
182
+ >0: for decoding, use fixed chunk size as set.
183
+ static_chunk_size (int): chunk size for static chunk training/decoding
184
+ if it's greater than 0, if use_dynamic_chunk is true,
185
+ this parameter will be ignored
186
+ num_decoding_left_chunks: number of left chunks, this is for decoding,
187
+ the chunk size is decoding_chunk_size.
188
+ >=0: use num_decoding_left_chunks
189
+ <0: use all left chunks
190
+ enable_full_context (bool):
191
+ True: chunk size is either [1, 25] or full context(max_len)
192
+ False: chunk size ~ U[1, 25]
193
+
194
+ Returns:
195
+ torch.Tensor: chunk mask of the input xs.
196
+ """
197
+ # Whether to use chunk mask or not
198
+ if use_dynamic_chunk:
199
+ max_len = xs.size(1)
200
+ if decoding_chunk_size < 0:
201
+ chunk_size = max_len
202
+ num_left_chunks = -1
203
+ elif decoding_chunk_size > 0:
204
+ chunk_size = decoding_chunk_size
205
+ num_left_chunks = num_decoding_left_chunks
206
+ else:
207
+ # chunk size is either [1, 25] or full context(max_len).
208
+ # Since we use 4 times subsampling and allow up to 1s(100 frames)
209
+ # delay, the maximum frame is 100 / 4 = 25.
210
+ chunk_size = torch.randint(1, max_len, (1, )).item()
211
+ num_left_chunks = -1
212
+ if chunk_size > max_len // 2 and enable_full_context:
213
+ chunk_size = max_len
214
+ else:
215
+ chunk_size = chunk_size % 25 + 1
216
+ if use_dynamic_left_chunk:
217
+ max_left_chunks = (max_len - 1) // chunk_size
218
+ num_left_chunks = torch.randint(0, max_left_chunks,
219
+ (1, )).item()
220
+ chunk_masks = subsequent_chunk_mask(xs.size(1), chunk_size,
221
+ num_left_chunks,
222
+ xs.device) # (L, L)
223
+ chunk_masks = chunk_masks.unsqueeze(0) # (1, L, L)
224
+ chunk_masks = masks & chunk_masks # (B, L, L)
225
+ elif static_chunk_size > 0:
226
+ num_left_chunks = num_decoding_left_chunks
227
+ chunk_masks = subsequent_chunk_mask(xs.size(1), static_chunk_size,
228
+ num_left_chunks,
229
+ xs.device) # (L, L)
230
+ chunk_masks = chunk_masks.unsqueeze(0) # (1, L, L)
231
+ chunk_masks = masks & chunk_masks # (B, L, L)
232
+ else:
233
+ chunk_masks = masks
234
+ assert chunk_masks.dtype == torch.bool
235
+ if (chunk_masks.sum(dim=-1) == 0).sum().item() != 0:
236
+ logging.warning('get chunk_masks all false at some timestep, force set to true, make sure they are masked in futuer computation!')
237
+ chunk_masks[chunk_masks.sum(dim=-1)==0] = True
238
+ return chunk_masks
239
+
240
+
241
+ def make_pad_mask(lengths: torch.Tensor, max_len: int = 0) -> torch.Tensor:
242
+ """Make mask tensor containing indices of padded part.
243
+
244
+ See description of make_non_pad_mask.
245
+
246
+ Args:
247
+ lengths (torch.Tensor): Batch of lengths (B,).
248
+ Returns:
249
+ torch.Tensor: Mask tensor containing indices of padded part.
250
+
251
+ Examples:
252
+ >>> lengths = [5, 3, 2]
253
+ >>> make_pad_mask(lengths)
254
+ masks = [[0, 0, 0, 0 ,0],
255
+ [0, 0, 0, 1, 1],
256
+ [0, 0, 1, 1, 1]]
257
+ """
258
+ batch_size = lengths.size(0)
259
+ max_len = max_len if max_len > 0 else lengths.max().item()
260
+ seq_range = torch.arange(0,
261
+ max_len,
262
+ dtype=torch.int64,
263
+ device=lengths.device)
264
+ seq_range_expand = seq_range.unsqueeze(0).expand(batch_size, max_len)
265
+ seq_length_expand = lengths.unsqueeze(-1)
266
+ mask = seq_range_expand >= seq_length_expand
267
+ return mask
cosyvoice/utils/scheduler.py ADDED
@@ -0,0 +1,738 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2020 Mobvoi Inc (Binbin Zhang)
2
+ # 2022 Ximalaya Inc (Yuguang Yang)
3
+ # 2024 Alibaba Inc (authors: Xiang Lyu)
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ # Modified from ESPnet(https://github.com/espnet/espnet)
17
+ # NeMo(https://github.com/NVIDIA/NeMo)
18
+
19
+ from typing import Union
20
+
21
+ import math
22
+ import warnings
23
+ import torch
24
+ from torch.optim.lr_scheduler import _LRScheduler
25
+
26
+
27
+ class WarmupLR(_LRScheduler):
28
+ """The WarmupLR scheduler
29
+
30
+ This scheduler is almost same as NoamLR Scheduler except for following
31
+ difference:
32
+
33
+ NoamLR:
34
+ lr = optimizer.lr * model_size ** -0.5
35
+ * min(step ** -0.5, step * warmup_step ** -1.5)
36
+ WarmupLR:
37
+ lr = optimizer.lr * warmup_step ** 0.5
38
+ * min(step ** -0.5, step * warmup_step ** -1.5)
39
+
40
+ Note that the maximum lr equals to optimizer.lr in this scheduler.
41
+
42
+ """
43
+
44
+ def __init__(
45
+ self,
46
+ optimizer: torch.optim.Optimizer,
47
+ warmup_steps: Union[int, float] = 25000,
48
+ last_epoch: int = -1,
49
+ ):
50
+ self.warmup_steps = warmup_steps
51
+
52
+ # __init__() must be invoked before setting field
53
+ # because step() is also invoked in __init__()
54
+ super().__init__(optimizer, last_epoch)
55
+
56
+ def __repr__(self):
57
+ return f"{self.__class__.__name__}(warmup_steps={self.warmup_steps})"
58
+
59
+ def get_lr(self):
60
+ step_num = self.last_epoch + 1
61
+ if self.warmup_steps == 0:
62
+ return [lr * step_num**-0.5 for lr in self.base_lrs]
63
+ else:
64
+ return [
65
+ lr * self.warmup_steps**0.5 *
66
+ min(step_num**-0.5, step_num * self.warmup_steps**-1.5)
67
+ for lr in self.base_lrs
68
+ ]
69
+
70
+ def set_step(self, step: int):
71
+ self.last_epoch = step
72
+
73
+
74
+ class WarmupPolicy(_LRScheduler):
75
+ """Adds warmup kwargs and warmup logic to lr policy.
76
+ All arguments should be passed as kwargs for clarity,
77
+ Args:
78
+ warmup_steps: Number of training steps in warmup stage
79
+ warmup_ratio: Ratio of warmup steps to total steps
80
+ max_steps: Total number of steps while training or `None` for
81
+ infinite training
82
+ """
83
+
84
+ def __init__(self,
85
+ optimizer,
86
+ *,
87
+ warmup_steps=None,
88
+ warmup_ratio=None,
89
+ max_steps=None,
90
+ min_lr=0.0,
91
+ last_epoch=-1):
92
+ assert not (warmup_steps is not None and warmup_ratio is not None),\
93
+ "Either use particular number of step or ratio"
94
+ assert warmup_ratio is None or max_steps is not None, \
95
+ "If there is a ratio, there should be a total steps"
96
+
97
+ # It is necessary to assign all attributes *before* __init__,
98
+ # as class is wrapped by an inner class.
99
+ self.max_steps = max_steps
100
+ if warmup_steps is not None:
101
+ self.warmup_steps = warmup_steps
102
+ elif warmup_ratio is not None:
103
+ self.warmup_steps = int(warmup_ratio * max_steps)
104
+ else:
105
+ self.warmup_steps = 0
106
+
107
+ self.min_lr = min_lr
108
+ super().__init__(optimizer, last_epoch)
109
+
110
+ def get_lr(self):
111
+ if not self._get_lr_called_within_step:
112
+ warnings.warn(
113
+ "To get the last learning rate computed "
114
+ "by the scheduler, please use `get_last_lr()`.",
115
+ UserWarning,
116
+ stacklevel=2)
117
+
118
+ step = self.last_epoch
119
+
120
+ if step <= self.warmup_steps and self.warmup_steps > 0:
121
+ return self._get_warmup_lr(step)
122
+
123
+ if step > self.max_steps:
124
+ return [self.min_lr for _ in self.base_lrs]
125
+
126
+ return self._get_lr(step)
127
+
128
+ def _get_warmup_lr(self, step):
129
+ lr_val = (step + 1) / (self.warmup_steps + 1)
130
+ return [initial_lr * lr_val for initial_lr in self.base_lrs]
131
+
132
+ def _get_lr(self, step):
133
+ """Simple const lr policy"""
134
+ return self.base_lrs
135
+
136
+
137
+ class SquareRootConstantPolicy(_LRScheduler):
138
+ """Adds warmup kwargs and warmup logic to lr policy.
139
+ All arguments should be passed as kwargs for clarity,
140
+ Args:
141
+ warmup_steps: Number of training steps in warmup stage
142
+ warmup_ratio: Ratio of warmup steps to total steps
143
+ max_steps: Total number of steps while training or `None` for
144
+ infinite training
145
+ """
146
+
147
+ def __init__(self,
148
+ optimizer,
149
+ *,
150
+ constant_steps=None,
151
+ constant_ratio=None,
152
+ max_steps=None,
153
+ min_lr=0.0,
154
+ last_epoch=-1):
155
+ assert not (constant_steps is not None
156
+ and constant_ratio is not None), \
157
+ "Either use particular number of step or ratio"
158
+ assert constant_ratio is None or max_steps is not None, \
159
+ "If there is a ratio, there should be a total steps"
160
+
161
+ # It is necessary to assign all attributes *before* __init__,
162
+ # as class is wrapped by an inner class.
163
+ self.max_steps = max_steps
164
+ if constant_steps is not None:
165
+ self.constant_steps = constant_steps
166
+ elif constant_ratio is not None:
167
+ self.constant_steps = int(constant_ratio * max_steps)
168
+ else:
169
+ self.constant_steps = 0
170
+
171
+ self.constant_lr = 1 / (constant_steps**0.5)
172
+ self.min_lr = min_lr
173
+ super().__init__(optimizer, last_epoch)
174
+
175
+ def get_lr(self):
176
+ if not self._get_lr_called_within_step:
177
+ warnings.warn(
178
+ "To get the last learning rate computed "
179
+ "by the scheduler, please use `get_last_lr()`.",
180
+ UserWarning,
181
+ stacklevel=2)
182
+
183
+ step = self.last_epoch
184
+
185
+ if step <= self.constant_steps:
186
+ return [self.constant_lr for _ in self.base_lrs]
187
+
188
+ if step > self.max_steps:
189
+ return [self.min_lr for _ in self.base_lrs]
190
+
191
+ return self._get_lr(step)
192
+
193
+ def _get_lr(self, step):
194
+ """Simple const lr policy"""
195
+ return self.base_lrs
196
+
197
+
198
+ class WarmupHoldPolicy(WarmupPolicy):
199
+ """Variant of WarmupPolicy which maintains high
200
+ learning rate for a defined number of steps.
201
+ All arguments should be passed as kwargs for clarity,
202
+ Args:
203
+ warmup_steps: Number of training steps in warmup stage
204
+ warmup_ratio: Ratio of warmup steps to total steps
205
+ hold_steps: Number of training steps to
206
+ hold the learning rate after warm up
207
+ hold_ratio: Ratio of hold steps to total steps
208
+ max_steps: Total number of steps while training or `None` for
209
+ infinite training
210
+ """
211
+
212
+ def __init__(
213
+ self,
214
+ optimizer,
215
+ *,
216
+ warmup_steps=None,
217
+ warmup_ratio=None,
218
+ hold_steps=None,
219
+ hold_ratio=None,
220
+ max_steps=None,
221
+ min_lr=0.0,
222
+ last_epoch=-1,
223
+ ):
224
+ assert not (hold_steps is not None and hold_ratio is not None), \
225
+ "Either use particular number of step or ratio"
226
+ assert hold_ratio is None or max_steps is not None, \
227
+ "If there is a ratio, there should be a total steps"
228
+
229
+ self.min_lr = min_lr
230
+ self._last_warmup_lr = 0.0
231
+
232
+ # Necessary to duplicate as class attributes are hidden in inner class
233
+ self.max_steps = max_steps
234
+ if warmup_steps is not None:
235
+ self.warmup_steps = warmup_steps
236
+ elif warmup_ratio is not None:
237
+ self.warmup_steps = int(warmup_ratio * max_steps)
238
+ else:
239
+ self.warmup_steps = 0
240
+
241
+ if hold_steps is not None:
242
+ self.hold_steps = hold_steps + self.warmup_steps
243
+ elif hold_ratio is not None:
244
+ self.hold_steps = int(hold_ratio * max_steps) + self.warmup_steps
245
+ else:
246
+ self.hold_steps = 0
247
+
248
+ super().__init__(
249
+ optimizer,
250
+ warmup_steps=warmup_steps,
251
+ warmup_ratio=warmup_ratio,
252
+ max_steps=max_steps,
253
+ last_epoch=last_epoch,
254
+ min_lr=min_lr,
255
+ )
256
+
257
+ def get_lr(self):
258
+ if not self._get_lr_called_within_step:
259
+ warnings.warn(
260
+ "To get the last learning rate computed by the scheduler,"
261
+ " "
262
+ "please use `get_last_lr()`.",
263
+ UserWarning,
264
+ stacklevel=2)
265
+
266
+ step = self.last_epoch
267
+
268
+ # Warmup phase
269
+ if step <= self.warmup_steps and self.warmup_steps > 0:
270
+ return self._get_warmup_lr(step)
271
+
272
+ # Hold phase
273
+ if (step >= self.warmup_steps) and (step < self.hold_steps):
274
+ return self.base_lrs
275
+
276
+ if step > self.max_steps:
277
+ return [self.min_lr for _ in self.base_lrs]
278
+
279
+ return self._get_lr(step)
280
+
281
+
282
+ class WarmupAnnealHoldPolicy(_LRScheduler):
283
+ """Adds warmup kwargs and warmup logic to lr policy.
284
+ All arguments should be passed as kwargs for clarity,
285
+ Args:
286
+ warmup_steps: Number of training steps in warmup stage
287
+ warmup_ratio: Ratio of warmup steps to total steps
288
+ max_steps: Total number of steps while training or `None` for
289
+ infinite training
290
+ min_lr: Minimum lr to hold the learning rate after decay at.
291
+ constant_steps: Number of steps to keep lr constant at.
292
+ constant_ratio: Ratio of steps to keep lr constant.
293
+ """
294
+
295
+ def __init__(
296
+ self,
297
+ optimizer,
298
+ *,
299
+ warmup_steps=None,
300
+ warmup_ratio=None,
301
+ constant_steps=None,
302
+ constant_ratio=None,
303
+ max_steps=None,
304
+ min_lr=0.0,
305
+ last_epoch=-1,
306
+ ):
307
+ assert not (warmup_steps is not None
308
+ and warmup_ratio is not None), \
309
+ "Either use particular number of step or ratio"
310
+ assert not (constant_steps is not None
311
+ and constant_ratio is not None), \
312
+ "Either use constant_steps or constant_ratio"
313
+ assert warmup_ratio is None or max_steps is not None, \
314
+ "If there is a ratio, there should be a total steps"
315
+
316
+ # It is necessary to assign all attributes *before* __init__,
317
+ # as class is wrapped by an inner class.
318
+ self.max_steps = max_steps
319
+
320
+ if warmup_steps is not None:
321
+ self.warmup_steps = warmup_steps
322
+ elif warmup_ratio is not None:
323
+ self.warmup_steps = int(warmup_ratio * max_steps)
324
+ else:
325
+ self.warmup_steps = 0
326
+
327
+ if constant_steps is not None:
328
+ self.constant_steps = constant_steps
329
+ elif constant_ratio is not None:
330
+ self.constant_steps = int(constant_ratio * max_steps)
331
+ else:
332
+ self.constant_steps = 0
333
+
334
+ self.decay_steps = max_steps - (self.constant_steps +
335
+ self.warmup_steps)
336
+
337
+ self.min_lr = min_lr
338
+ super().__init__(optimizer, last_epoch)
339
+
340
+ def get_lr(self):
341
+ if not self._get_lr_called_within_step:
342
+ warnings.warn(
343
+ "To get the last learning rate computed "
344
+ "by the scheduler, please use `get_last_lr()`.",
345
+ UserWarning,
346
+ stacklevel=2)
347
+
348
+ step = self.last_epoch
349
+
350
+ # Warmup steps
351
+ if self.warmup_steps > 0 and step <= self.warmup_steps:
352
+ return self._get_warmup_lr(step)
353
+
354
+ # Constant steps after warmup and decay
355
+ if self.constant_steps > 0 and (
356
+ self.warmup_steps + self.decay_steps) < step <= self.max_steps:
357
+ return self._get_constant_lr(step)
358
+
359
+ # Min lr after max steps of updates
360
+ if step > self.max_steps:
361
+ return [self.min_lr for _ in self.base_lrs]
362
+
363
+ return self._get_lr(step)
364
+
365
+ def _get_warmup_lr(self, step):
366
+ lr_val = (step + 1) / (self.warmup_steps + 1)
367
+ return [initial_lr * lr_val for initial_lr in self.base_lrs]
368
+
369
+ def _get_constant_lr(self, step):
370
+ return [self.min_lr for _ in self.base_lrs]
371
+
372
+ def _get_lr(self, step):
373
+ """Simple const lr policy"""
374
+ return self.base_lrs
375
+
376
+
377
+ def _squareroot_annealing(initial_lr, step, max_steps, min_lr):
378
+ mult = ((max_steps - step) / max_steps)**0.5
379
+ out_lr = initial_lr * mult
380
+ out_lr = max(out_lr, min_lr)
381
+ return out_lr
382
+
383
+
384
+ def _square_annealing(initial_lr, step, max_steps, min_lr):
385
+ mult = ((max_steps - step) / max_steps)**2
386
+ out_lr = initial_lr * mult
387
+ out_lr = max(out_lr, min_lr)
388
+ return out_lr
389
+
390
+
391
+ def _cosine_annealing(initial_lr, step, max_steps, min_lr):
392
+ mult = 0.5 * (1 + math.cos(math.pi * step / max_steps))
393
+ out_lr = (initial_lr - min_lr) * mult + min_lr
394
+ return out_lr
395
+
396
+
397
+ def _linear_warmup_with_cosine_annealing(max_lr, warmup_steps, step,
398
+ decay_steps, min_lr):
399
+ assert max_lr > min_lr
400
+ # Use linear warmup for the initial part.
401
+ if warmup_steps > 0 and step <= warmup_steps:
402
+ return max_lr * float(step) / float(warmup_steps)
403
+
404
+ # For any steps larger than `decay_steps`, use `min_lr`.
405
+ if step > warmup_steps + decay_steps:
406
+ return min_lr
407
+
408
+ # If we are done with the warmup period, use the decay style.
409
+ num_steps_ = step - warmup_steps
410
+ decay_steps_ = decay_steps
411
+ decay_ratio = float(num_steps_) / float(decay_steps_)
412
+ assert decay_ratio >= 0.0
413
+ assert decay_ratio <= 1.0
414
+ delta_lr = max_lr - min_lr
415
+
416
+ coeff = 0.5 * (math.cos(math.pi * decay_ratio) + 1.0)
417
+
418
+ return min_lr + coeff * delta_lr
419
+
420
+
421
+ def _poly_decay(initial_lr, step, decay_steps, power, min_lr, cycle):
422
+ if cycle:
423
+ multiplier = 1.0 if step == 0 else math.ceil(step / decay_steps)
424
+ decay_steps *= multiplier
425
+ else:
426
+ step = min(step, decay_steps)
427
+ p = step / decay_steps
428
+ lr = (initial_lr - min_lr) * math.pow(1.0 - p, power)
429
+ lr += min_lr
430
+ return lr
431
+
432
+
433
+ def _noam_hold_annealing(initial_lr, step, warmup_steps, hold_steps,
434
+ decay_rate, min_lr):
435
+ # hold_steps = total number of steps
436
+ # to hold the LR, not the warmup + hold steps.
437
+ T_warmup_decay = max(1, warmup_steps**decay_rate)
438
+ T_hold_decay = max(1, (step - hold_steps)**decay_rate)
439
+ lr = (initial_lr * T_warmup_decay) / T_hold_decay
440
+ lr = max(lr, min_lr)
441
+ return lr
442
+
443
+
444
+ class SquareAnnealing(WarmupPolicy):
445
+
446
+ def __init__(self,
447
+ optimizer,
448
+ *,
449
+ max_steps,
450
+ min_lr=1e-5,
451
+ last_epoch=-1,
452
+ **kwargs):
453
+ super().__init__(optimizer=optimizer,
454
+ max_steps=max_steps,
455
+ last_epoch=last_epoch,
456
+ min_lr=min_lr,
457
+ **kwargs)
458
+
459
+ def _get_lr(self, step):
460
+ new_lrs = [
461
+ _square_annealing(
462
+ initial_lr=initial_lr,
463
+ step=step - self.warmup_steps,
464
+ max_steps=self.max_steps - self.warmup_steps,
465
+ min_lr=self.min_lr,
466
+ ) for initial_lr in self.base_lrs
467
+ ]
468
+ return new_lrs
469
+
470
+
471
+ class SquareRootAnnealing(WarmupPolicy):
472
+
473
+ def __init__(self,
474
+ optimizer,
475
+ *,
476
+ max_steps,
477
+ min_lr=0,
478
+ last_epoch=-1,
479
+ **kwargs):
480
+ super().__init__(optimizer=optimizer,
481
+ max_steps=max_steps,
482
+ last_epoch=last_epoch,
483
+ min_lr=min_lr,
484
+ **kwargs)
485
+
486
+ def _get_lr(self, step):
487
+ new_lrs = [
488
+ _squareroot_annealing(initial_lr=initial_lr,
489
+ step=step,
490
+ max_steps=self.max_steps,
491
+ min_lr=self.min_lr)
492
+ for initial_lr in self.base_lrs
493
+ ]
494
+ return new_lrs
495
+
496
+
497
+ class CosineAnnealing(WarmupAnnealHoldPolicy):
498
+
499
+ def __init__(self,
500
+ optimizer,
501
+ *,
502
+ max_steps,
503
+ min_lr=0,
504
+ last_epoch=-1,
505
+ **kwargs):
506
+ super().__init__(optimizer=optimizer,
507
+ max_steps=max_steps,
508
+ last_epoch=last_epoch,
509
+ min_lr=min_lr,
510
+ **kwargs)
511
+
512
+ def _get_lr(self, step):
513
+ for initial_lr in self.base_lrs:
514
+ if initial_lr < self.min_lr:
515
+ raise ValueError(
516
+ f"{self} received an initial learning rate "
517
+ f"that was lower than the minimum learning rate.")
518
+
519
+ if self.constant_steps is None or self.constant_steps == 0:
520
+ new_lrs = [
521
+ _cosine_annealing(
522
+ initial_lr=initial_lr,
523
+ step=step - self.warmup_steps,
524
+ max_steps=self.max_steps - self.warmup_steps,
525
+ min_lr=self.min_lr,
526
+ ) for initial_lr in self.base_lrs
527
+ ]
528
+ else:
529
+ new_lrs = self._get_linear_warmup_with_cosine_annealing_lr(step)
530
+ return new_lrs
531
+
532
+ def _get_warmup_lr(self, step):
533
+ if self.constant_steps is None or self.constant_steps == 0:
534
+ return super()._get_warmup_lr(step)
535
+ else:
536
+ # Use linear warmup for the initial part.
537
+ return self._get_linear_warmup_with_cosine_annealing_lr(step)
538
+
539
+ def _get_constant_lr(self, step):
540
+ # Only called when `constant_steps` > 0.
541
+ return self._get_linear_warmup_with_cosine_annealing_lr(step)
542
+
543
+ def _get_linear_warmup_with_cosine_annealing_lr(self, step):
544
+ # Cosine Schedule for Megatron LM,
545
+ # slightly different warmup schedule + constant LR at the end.
546
+ new_lrs = [
547
+ _linear_warmup_with_cosine_annealing(
548
+ max_lr=self.base_lrs[0],
549
+ warmup_steps=self.warmup_steps,
550
+ step=step,
551
+ decay_steps=self.decay_steps,
552
+ min_lr=self.min_lr,
553
+ ) for _ in self.base_lrs
554
+ ]
555
+ return new_lrs
556
+
557
+
558
+ class NoamAnnealing(_LRScheduler):
559
+
560
+ def __init__(self,
561
+ optimizer,
562
+ *,
563
+ d_model,
564
+ warmup_steps=None,
565
+ warmup_ratio=None,
566
+ max_steps=None,
567
+ min_lr=0.0,
568
+ last_epoch=-1):
569
+ self._normalize = d_model**(-0.5)
570
+ assert not (warmup_steps is not None and warmup_ratio is not None), \
571
+ "Either use particular number of step or ratio"
572
+ assert warmup_ratio is None or max_steps is not None, \
573
+ "If there is a ratio, there should be a total steps"
574
+
575
+ # It is necessary to assign all attributes *before* __init__,
576
+ # as class is wrapped by an inner class.
577
+ self.max_steps = max_steps
578
+ if warmup_steps is not None:
579
+ self.warmup_steps = warmup_steps
580
+ elif warmup_ratio is not None:
581
+ self.warmup_steps = int(warmup_ratio * max_steps)
582
+ else:
583
+ self.warmup_steps = 0
584
+
585
+ self.min_lr = min_lr
586
+ super().__init__(optimizer, last_epoch)
587
+
588
+ def get_lr(self):
589
+ if not self._get_lr_called_within_step:
590
+ warnings.warn(
591
+ "To get the last learning rate computed "
592
+ "by the scheduler, please use `get_last_lr()`.",
593
+ UserWarning,
594
+ stacklevel=2)
595
+
596
+ step = max(1, self.last_epoch)
597
+
598
+ for initial_lr in self.base_lrs:
599
+ if initial_lr < self.min_lr:
600
+ raise ValueError(
601
+ f"{self} received an initial learning rate "
602
+ f"that was lower than the minimum learning rate.")
603
+
604
+ new_lrs = [
605
+ self._noam_annealing(initial_lr=initial_lr, step=step)
606
+ for initial_lr in self.base_lrs
607
+ ]
608
+ return new_lrs
609
+
610
+ def _noam_annealing(self, initial_lr, step):
611
+ if self.warmup_steps > 0:
612
+ mult = self._normalize * min(step**(-0.5),
613
+ step * (self.warmup_steps**(-1.5)))
614
+ else:
615
+ mult = self._normalize * step**(-0.5)
616
+
617
+ out_lr = initial_lr * mult
618
+ if step > self.warmup_steps:
619
+ out_lr = max(out_lr, self.min_lr)
620
+ return out_lr
621
+
622
+
623
+ class NoamHoldAnnealing(WarmupHoldPolicy):
624
+
625
+ def __init__(self,
626
+ optimizer,
627
+ *,
628
+ max_steps,
629
+ decay_rate=0.5,
630
+ min_lr=0.0,
631
+ last_epoch=-1,
632
+ **kwargs):
633
+ """
634
+ From Nemo:
635
+ Implementation of the Noam Hold Annealing policy
636
+ from the SqueezeFormer paper.
637
+
638
+ Unlike NoamAnnealing, the peak learning rate
639
+ can be explicitly set for this scheduler.
640
+ The schedule first performs linear warmup,
641
+ then holds the peak LR, then decays with some schedule for
642
+ the remainder of the steps.
643
+ Therefore the min-lr is still dependent
644
+ on the hyper parameters selected.
645
+
646
+ It's schedule is determined by three factors-
647
+
648
+ Warmup Steps: Initial stage, where linear warmup
649
+ occurs uptil the peak LR is reached. Unlike NoamAnnealing,
650
+ the peak LR is explicitly stated here instead of a scaling factor.
651
+
652
+ Hold Steps: Intermediate stage, where the peak LR
653
+ is maintained for some number of steps. In this region,
654
+ the high peak LR allows the model to converge faster
655
+ if training is stable. However the high LR
656
+ may also cause instability during training.
657
+ Should usually be a significant fraction of training
658
+ steps (around 30-40% of the entire training steps).
659
+
660
+ Decay Steps: Final stage, where the LR rapidly decays
661
+ with some scaling rate (set by decay rate).
662
+ To attain Noam decay, use 0.5,
663
+ for Squeezeformer recommended decay, use 1.0.
664
+ The fast decay after prolonged high LR during
665
+ hold phase allows for rapid convergence.
666
+
667
+ References:
668
+ - [Squeezeformer:
669
+ An Efficient Transformer for Automatic Speech Recognition]
670
+ (https://arxiv.org/abs/2206.00888)
671
+
672
+ Args:
673
+ optimizer: Pytorch compatible Optimizer object.
674
+ warmup_steps: Number of training steps in warmup stage
675
+ warmup_ratio: Ratio of warmup steps to total steps
676
+ hold_steps: Number of training steps to
677
+ hold the learning rate after warm up
678
+ hold_ratio: Ratio of hold steps to total steps
679
+ max_steps: Total number of steps while training or `None` for
680
+ infinite training
681
+ decay_rate: Float value describing the polynomial decay
682
+ after the hold period. Default value
683
+ of 0.5 corresponds to Noam decay.
684
+ min_lr: Minimum learning rate.
685
+ """
686
+ self.decay_rate = decay_rate
687
+ super().__init__(optimizer=optimizer,
688
+ max_steps=max_steps,
689
+ last_epoch=last_epoch,
690
+ min_lr=min_lr,
691
+ **kwargs)
692
+
693
+ def _get_lr(self, step):
694
+ if self.warmup_steps is None or self.warmup_steps == 0:
695
+ raise ValueError(
696
+ "Noam scheduler cannot be used without warmup steps")
697
+
698
+ if self.hold_steps > 0:
699
+ hold_steps = self.hold_steps - self.warmup_steps
700
+ else:
701
+ hold_steps = 0
702
+
703
+ new_lrs = [
704
+ _noam_hold_annealing(
705
+ initial_lr,
706
+ step=step,
707
+ warmup_steps=self.warmup_steps,
708
+ hold_steps=hold_steps,
709
+ decay_rate=self.decay_rate,
710
+ min_lr=self.min_lr,
711
+ ) for initial_lr in self.base_lrs
712
+ ]
713
+ return new_lrs
714
+
715
+ def set_step(self, step: int):
716
+ self.last_epoch = step
717
+
718
+
719
+ class ConstantLR(_LRScheduler):
720
+ """The ConstantLR scheduler
721
+
722
+ This scheduler keeps a constant lr
723
+
724
+ """
725
+
726
+ def __init__(
727
+ self,
728
+ optimizer: torch.optim.Optimizer,
729
+ ):
730
+ # __init__() must be invoked before setting field
731
+ # because step() is also invoked in __init__()
732
+ super().__init__(optimizer)
733
+
734
+ def get_lr(self):
735
+ return self.base_lrs
736
+
737
+ def set_step(self, step: int):
738
+ self.last_epoch = step
cosyvoice/utils/train_utils.py ADDED
@@ -0,0 +1,345 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2021 Mobvoi Inc. (authors: Binbin Zhang)
2
+ # 2023 Horizon Inc. (authors: Xingchen Song)
3
+ # 2024 Alibaba Inc (authors: Xiang Lyu)
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ import logging
18
+ import os
19
+ import torch
20
+ import json
21
+ import re
22
+ import datetime
23
+ import yaml
24
+
25
+ import deepspeed
26
+ import torch.optim as optim
27
+ import torch.distributed as dist
28
+
29
+ from torch.utils.tensorboard import SummaryWriter
30
+ from torch.utils.data import DataLoader
31
+ from torch.nn.utils import clip_grad_norm_
32
+
33
+ from deepspeed.runtime.zero.stage_1_and_2 import estimate_zero2_model_states_mem_needs_all_live
34
+
35
+ from cosyvoice.dataset.dataset import Dataset
36
+ from cosyvoice.utils.scheduler import WarmupLR, NoamHoldAnnealing, ConstantLR
37
+
38
+
39
+ def init_distributed(args):
40
+ world_size = int(os.environ.get('WORLD_SIZE', 1))
41
+ local_rank = int(os.environ.get('LOCAL_RANK', 0))
42
+ rank = int(os.environ.get('RANK', 0))
43
+ logging.info('training on multiple gpus, this gpu {}'.format(local_rank) +
44
+ ', rank {}, world_size {}'.format(rank, world_size))
45
+ if args.train_engine == 'torch_ddp':
46
+ torch.cuda.set_device(local_rank)
47
+ dist.init_process_group(args.dist_backend)
48
+ else:
49
+ deepspeed.init_distributed(dist_backend=args.dist_backend)
50
+ return world_size, local_rank, rank
51
+
52
+
53
+ def init_dataset_and_dataloader(args, configs, gan):
54
+ data_pipeline = configs['data_pipeline_gan'] if gan is True else configs['data_pipeline']
55
+ train_dataset = Dataset(args.train_data, data_pipeline=data_pipeline, mode='train', gan=gan, shuffle=True, partition=True)
56
+ cv_dataset = Dataset(args.cv_data, data_pipeline=data_pipeline, mode='train', gan=gan, shuffle=False, partition=False)
57
+
58
+ # do not use persistent_workers=True, as whisper tokenizer opens tiktoken file each time when the for loop starts
59
+ train_data_loader = DataLoader(train_dataset,
60
+ batch_size=None,
61
+ pin_memory=args.pin_memory,
62
+ num_workers=args.num_workers,
63
+ prefetch_factor=args.prefetch)
64
+ cv_data_loader = DataLoader(cv_dataset,
65
+ batch_size=None,
66
+ pin_memory=args.pin_memory,
67
+ num_workers=args.num_workers,
68
+ prefetch_factor=args.prefetch)
69
+ return train_dataset, cv_dataset, train_data_loader, cv_data_loader
70
+
71
+
72
+ def check_modify_and_save_config(args, configs):
73
+ if args.train_engine == "torch_ddp":
74
+ configs['train_conf']["dtype"] = 'fp32'
75
+ else:
76
+ with open(args.deepspeed_config, 'r') as fin:
77
+ ds_configs = json.load(fin)
78
+ if "fp16" in ds_configs and ds_configs["fp16"]["enabled"]:
79
+ configs['train_conf']["dtype"] = "fp16"
80
+ elif "bf16" in ds_configs and ds_configs["bf16"]["enabled"]:
81
+ configs['train_conf']["dtype"] = "bf16"
82
+ else:
83
+ configs['train_conf']["dtype"] = "fp32"
84
+ assert ds_configs["train_micro_batch_size_per_gpu"] == 1
85
+ # if use deepspeed, override ddp config
86
+ configs['train_conf']['save_per_step'] = int(configs['train_conf']['save_per_step'] *
87
+ configs['train_conf']['accum_grad'] / ds_configs["gradient_accumulation_steps"])
88
+ configs['train_conf']['accum_grad'] = ds_configs["gradient_accumulation_steps"]
89
+ configs['train_conf']['grad_clip'] = ds_configs["gradient_clipping"]
90
+ configs['train_conf']['log_interval'] = ds_configs["steps_per_print"]
91
+ return configs
92
+
93
+
94
+ def wrap_cuda_model(args, model):
95
+ local_world_size = int(os.environ.get('LOCAL_WORLD_SIZE', 1))
96
+ world_size = int(os.environ.get('WORLD_SIZE', 1))
97
+ if args.train_engine == "torch_ddp": # native pytorch ddp
98
+ assert (torch.cuda.is_available())
99
+ model.cuda()
100
+ model = torch.nn.parallel.DistributedDataParallel(model, find_unused_parameters=True)
101
+ else:
102
+ if int(os.environ.get('RANK', 0)) == 0:
103
+ logging.info("Estimating model states memory needs (zero2)...")
104
+ estimate_zero2_model_states_mem_needs_all_live(
105
+ model,
106
+ num_gpus_per_node=local_world_size,
107
+ num_nodes=world_size // local_world_size)
108
+ return model
109
+
110
+
111
+ def init_optimizer_and_scheduler(args, configs, model, gan):
112
+ if gan is False:
113
+ if configs['train_conf']['optim'] == 'adam':
114
+ optimizer = optim.Adam(model.parameters(), **configs['train_conf']['optim_conf'])
115
+ elif configs['train_conf']['optim'] == 'adamw':
116
+ optimizer = optim.AdamW(model.parameters(), **configs['train_conf']['optim_conf'])
117
+ else:
118
+ raise ValueError("unknown optimizer: " + configs['train_conf'])
119
+
120
+ if configs['train_conf']['scheduler'] == 'warmuplr':
121
+ scheduler_type = WarmupLR
122
+ scheduler = WarmupLR(optimizer, **configs['train_conf']['scheduler_conf'])
123
+ elif configs['train_conf']['scheduler'] == 'NoamHoldAnnealing':
124
+ scheduler_type = NoamHoldAnnealing
125
+ scheduler = NoamHoldAnnealing(optimizer, **configs['train_conf']['scheduler_conf'])
126
+ elif configs['train_conf']['scheduler'] == 'constantlr':
127
+ scheduler_type = ConstantLR
128
+ scheduler = ConstantLR(optimizer)
129
+ else:
130
+ raise ValueError("unknown scheduler: " + configs['train_conf'])
131
+
132
+ # use deepspeed optimizer for speedup
133
+ if args.train_engine == "deepspeed":
134
+ def scheduler(opt):
135
+ return scheduler_type(opt, **configs['train_conf']['scheduler_conf'])
136
+ model, optimizer, _, scheduler = deepspeed.initialize(
137
+ args=args,
138
+ model=model,
139
+ optimizer=None,
140
+ lr_scheduler=scheduler,
141
+ model_parameters=model.parameters())
142
+
143
+ optimizer_d, scheduler_d = None, None
144
+
145
+ else:
146
+ # currently we wrap generator and discriminator in one model, so we cannot use deepspeed
147
+ if configs['train_conf']['optim'] == 'adam':
148
+ optimizer = optim.Adam(model.module.generator.parameters(), **configs['train_conf']['optim_conf'])
149
+ elif configs['train_conf']['optim'] == 'adamw':
150
+ optimizer = optim.AdamW(model.module.generator.parameters(), **configs['train_conf']['optim_conf'])
151
+ else:
152
+ raise ValueError("unknown optimizer: " + configs['train_conf'])
153
+
154
+ if configs['train_conf']['scheduler'] == 'warmuplr':
155
+ scheduler_type = WarmupLR
156
+ scheduler = WarmupLR(optimizer, **configs['train_conf']['scheduler_conf'])
157
+ elif configs['train_conf']['scheduler'] == 'NoamHoldAnnealing':
158
+ scheduler_type = NoamHoldAnnealing
159
+ scheduler = NoamHoldAnnealing(optimizer, **configs['train_conf']['scheduler_conf'])
160
+ elif configs['train_conf']['scheduler'] == 'constantlr':
161
+ scheduler_type = ConstantLR
162
+ scheduler = ConstantLR(optimizer)
163
+ else:
164
+ raise ValueError("unknown scheduler: " + configs['train_conf'])
165
+
166
+ if configs['train_conf']['optim_d'] == 'adam':
167
+ optimizer_d = optim.Adam(model.module.discriminator.parameters(), **configs['train_conf']['optim_conf'])
168
+ elif configs['train_conf']['optim_d'] == 'adamw':
169
+ optimizer_d = optim.AdamW(model.module.discriminator.parameters(), **configs['train_conf']['optim_conf'])
170
+ else:
171
+ raise ValueError("unknown optimizer: " + configs['train_conf'])
172
+
173
+ if configs['train_conf']['scheduler_d'] == 'warmuplr':
174
+ scheduler_type = WarmupLR
175
+ scheduler_d = WarmupLR(optimizer_d, **configs['train_conf']['scheduler_conf'])
176
+ elif configs['train_conf']['scheduler_d'] == 'NoamHoldAnnealing':
177
+ scheduler_type = NoamHoldAnnealing
178
+ scheduler_d = NoamHoldAnnealing(optimizer_d, **configs['train_conf']['scheduler_conf'])
179
+ elif configs['train_conf']['scheduler'] == 'constantlr':
180
+ scheduler_type = ConstantLR
181
+ scheduler_d = ConstantLR(optimizer_d)
182
+ else:
183
+ raise ValueError("unknown scheduler: " + configs['train_conf'])
184
+ return model, optimizer, scheduler, optimizer_d, scheduler_d
185
+
186
+
187
+ def init_summarywriter(args):
188
+ writer = None
189
+ if int(os.environ.get('RANK', 0)) == 0:
190
+ os.makedirs(args.model_dir, exist_ok=True)
191
+ writer = SummaryWriter(args.tensorboard_dir)
192
+ return writer
193
+
194
+
195
+ def save_model(model, model_name, info_dict):
196
+ rank = int(os.environ.get('RANK', 0))
197
+ model_dir = info_dict["model_dir"]
198
+ save_model_path = os.path.join(model_dir, '{}.pt'.format(model_name))
199
+
200
+ if info_dict["train_engine"] == "torch_ddp":
201
+ if rank == 0:
202
+ torch.save({**model.module.state_dict(), 'epoch': info_dict['epoch'], 'step': info_dict['step']}, save_model_path)
203
+ else:
204
+ with torch.no_grad():
205
+ model.save_checkpoint(save_dir=model_dir,
206
+ tag=model_name,
207
+ client_state=info_dict)
208
+ if rank == 0:
209
+ info_path = re.sub('.pt$', '.yaml', save_model_path)
210
+ info_dict['save_time'] = datetime.datetime.now().strftime('%d/%m/%Y %H:%M:%S')
211
+ with open(info_path, 'w') as fout:
212
+ data = yaml.dump(info_dict)
213
+ fout.write(data)
214
+ logging.info('[Rank {}] Checkpoint: save to checkpoint {}'.format(rank, save_model_path))
215
+
216
+
217
+ def cosyvoice_join(group_join, info_dict):
218
+ world_size = int(os.environ.get('WORLD_SIZE', 1))
219
+ local_rank = int(os.environ.get('LOCAL_RANK', 0))
220
+ rank = int(os.environ.get('RANK', 0))
221
+
222
+ if info_dict["batch_idx"] != 0:
223
+ # we try to join all rank in both ddp and deepspeed mode, in case different rank has different lr
224
+ try:
225
+ dist.monitored_barrier(group=group_join,
226
+ timeout=group_join.options._timeout)
227
+ return False
228
+ except RuntimeError as e:
229
+ logging.info("Detected uneven workload distribution: {}\n".format(e) +
230
+ "Break current worker to manually join all workers, " +
231
+ "world_size {}, current rank {}, current local_rank {}\n".
232
+ format(world_size, rank, local_rank))
233
+ return True
234
+ else:
235
+ return False
236
+
237
+
238
+ def batch_forward(model, batch, scaler, info_dict):
239
+ device = int(os.environ.get('LOCAL_RANK', 0))
240
+
241
+ dtype = info_dict["dtype"]
242
+ if dtype == "fp16":
243
+ dtype = torch.float16
244
+ elif dtype == "bf16":
245
+ dtype = torch.bfloat16
246
+ else: # fp32
247
+ dtype = torch.float32
248
+
249
+ if info_dict['train_engine'] == 'torch_ddp':
250
+ autocast = torch.cuda.amp.autocast(enabled=scaler is not None)
251
+ else:
252
+ autocast = torch.cuda.amp.autocast(enabled=True, dtype=dtype, cache_enabled=False)
253
+
254
+ with autocast:
255
+ info_dict['loss_dict'] = model(batch, device)
256
+ return info_dict
257
+
258
+
259
+ def batch_backward(model, scaler, info_dict):
260
+ if info_dict["train_engine"] == "deepspeed":
261
+ scaled_loss = model.backward(info_dict['loss_dict']['loss'])
262
+ else:
263
+ scaled_loss = info_dict['loss_dict']['loss'] / info_dict['accum_grad']
264
+ if scaler is not None:
265
+ scaler.scale(scaled_loss).backward()
266
+ else:
267
+ scaled_loss.backward()
268
+
269
+ info_dict['loss_dict']['loss'] = scaled_loss
270
+ return info_dict
271
+
272
+
273
+ def update_parameter_and_lr(model, optimizer, scheduler, scaler, info_dict):
274
+ grad_norm = 0.0
275
+ if info_dict['train_engine'] == "deepspeed":
276
+ info_dict["is_gradient_accumulation_boundary"] = model.is_gradient_accumulation_boundary()
277
+ model.step()
278
+ grad_norm = model.get_global_grad_norm()
279
+ elif (info_dict['batch_idx'] + 1) % info_dict["accum_grad"] == 0:
280
+ # Use mixed precision training
281
+ if scaler is not None:
282
+ scaler.unscale_(optimizer)
283
+ grad_norm = clip_grad_norm_(model.parameters(), info_dict['grad_clip'])
284
+ # We don't check grad here since that if the gradient
285
+ # has inf/nan values, scaler.step will skip
286
+ # optimizer.step().
287
+ if torch.isfinite(grad_norm):
288
+ scaler.step(optimizer)
289
+ scaler.update()
290
+ else:
291
+ grad_norm = clip_grad_norm_(model.parameters(), info_dict['grad_clip'])
292
+ if torch.isfinite(grad_norm):
293
+ optimizer.step()
294
+ optimizer.zero_grad()
295
+ scheduler.step()
296
+ info_dict["lr"] = optimizer.param_groups[0]['lr']
297
+ info_dict["grad_norm"] = grad_norm
298
+ return info_dict
299
+
300
+
301
+ def log_per_step(writer, info_dict):
302
+ tag = info_dict["tag"]
303
+ epoch = info_dict.get('epoch', 0)
304
+ step = info_dict["step"]
305
+ batch_idx = info_dict["batch_idx"]
306
+ loss_dict = info_dict['loss_dict']
307
+ rank = int(os.environ.get('RANK', 0))
308
+
309
+ # only rank 0 write to tensorboard to avoid multi-process write
310
+ if writer is not None:
311
+ if (info_dict['train_engine'] == 'deepspeed' and info_dict['is_gradient_accumulation_boundary'] is True) or \
312
+ (info_dict['train_engine'] == 'torch_ddp' and (info_dict['batch_idx'] + 1) % info_dict['accum_grad'] == 0):
313
+ for k in ['epoch', 'lr', 'grad_norm']:
314
+ writer.add_scalar('{}/{}'.format(tag, k), info_dict[k], step + 1)
315
+ for k, v in loss_dict.items():
316
+ writer.add_scalar('{}/{}'.format(tag, k), v, step + 1)
317
+
318
+ # TRAIN & CV, Shell log (stdout)
319
+ if (info_dict['batch_idx'] + 1) % info_dict['log_interval'] == 0:
320
+ log_str = '{} Batch {}/{} '.format(tag, epoch, batch_idx + 1)
321
+ for name, value in loss_dict.items():
322
+ log_str += '{} {:.6f} '.format(name, value)
323
+ if tag == "TRAIN":
324
+ log_str += 'lr {:.8f} grad_norm {:.6f}'.format(
325
+ info_dict["lr"], info_dict['grad_norm'])
326
+ log_str += ' rank {}'.format(rank)
327
+ logging.debug(log_str)
328
+
329
+
330
+ def log_per_save(writer, info_dict):
331
+ tag = info_dict["tag"]
332
+ epoch = info_dict["epoch"]
333
+ step = info_dict["step"]
334
+ loss_dict = info_dict["loss_dict"]
335
+ lr = info_dict['lr']
336
+ rank = int(os.environ.get('RANK', 0))
337
+ logging.info(
338
+ 'Epoch {} Step {} CV info lr {} {} rank {}'.format(
339
+ epoch, step + 1, lr, rank, ' '.join(['{}_{}'.format(k, v) for k, v in loss_dict.items()])))
340
+
341
+ if writer is not None:
342
+ for k in ['epoch', 'lr']:
343
+ writer.add_scalar('{}/{}'.format(tag, k), info_dict[k], step + 1)
344
+ for k, v in loss_dict.items():
345
+ writer.add_scalar('{}/{}'.format(tag, k), v, step + 1)
examples/libritts/cosyvoice/conf/cosyvoice.fromscratch.yaml ADDED
@@ -0,0 +1,257 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # set random seed, so that you may reproduce your result.
2
+ __set_seed1: !apply:random.seed [1986]
3
+ __set_seed2: !apply:numpy.random.seed [1986]
4
+ __set_seed3: !apply:torch.manual_seed [1986]
5
+ __set_seed4: !apply:torch.cuda.manual_seed_all [1986]
6
+
7
+ # fixed params
8
+ sample_rate: 22050
9
+ text_encoder_input_size: 512
10
+ llm_input_size: 1024
11
+ llm_output_size: 1024
12
+ spk_embed_dim: 192
13
+
14
+ # model params
15
+ # for all class/function included in this repo, we use !<name> or !<new> for intialization, so that user may find all corresponding class/function according to one single yaml.
16
+ # for system/third_party class/function, we do not require this.
17
+ llm: !new:cosyvoice.llm.llm.TransformerLM
18
+ text_encoder_input_size: !ref <text_encoder_input_size>
19
+ llm_input_size: !ref <llm_input_size>
20
+ llm_output_size: !ref <llm_output_size>
21
+ text_token_size: 51866 # change to 60515 if you want to train with CosyVoice-300M-25Hz recipe
22
+ speech_token_size: 4096
23
+ length_normalized_loss: True
24
+ lsm_weight: 0
25
+ spk_embed_dim: !ref <spk_embed_dim>
26
+ text_encoder: !new:cosyvoice.transformer.encoder.ConformerEncoder
27
+ input_size: !ref <text_encoder_input_size>
28
+ output_size: 1024
29
+ attention_heads: 8
30
+ linear_units: 2048
31
+ num_blocks: 3
32
+ dropout_rate: 0.1
33
+ positional_dropout_rate: 0.1
34
+ attention_dropout_rate: 0.0
35
+ normalize_before: True
36
+ input_layer: 'linear'
37
+ pos_enc_layer_type: 'rel_pos_espnet'
38
+ selfattention_layer_type: 'rel_selfattn'
39
+ use_cnn_module: False
40
+ macaron_style: False
41
+ use_dynamic_chunk: False
42
+ use_dynamic_left_chunk: False
43
+ static_chunk_size: 1
44
+ llm: !new:cosyvoice.transformer.encoder.TransformerEncoder
45
+ input_size: !ref <llm_input_size>
46
+ output_size: !ref <llm_output_size>
47
+ attention_heads: 8
48
+ linear_units: 2048
49
+ num_blocks: 7
50
+ dropout_rate: 0.1
51
+ positional_dropout_rate: 0.1
52
+ attention_dropout_rate: 0.0
53
+ input_layer: 'linear_legacy'
54
+ pos_enc_layer_type: 'rel_pos_espnet'
55
+ selfattention_layer_type: 'rel_selfattn'
56
+ static_chunk_size: 1
57
+ sampling: !name:cosyvoice.utils.common.ras_sampling
58
+ top_p: 0.8
59
+ top_k: 25
60
+ win_size: 10
61
+ tau_r: 0.1
62
+
63
+ flow: !new:cosyvoice.flow.flow.MaskedDiffWithXvec
64
+ input_size: 512
65
+ output_size: 80
66
+ spk_embed_dim: !ref <spk_embed_dim>
67
+ output_type: 'mel'
68
+ vocab_size: 4096
69
+ input_frame_rate: 50 # change to 25 if you want to train with CosyVoice-300M-25Hz recipe
70
+ only_mask_loss: True
71
+ encoder: !new:cosyvoice.transformer.encoder.ConformerEncoder
72
+ output_size: 512
73
+ attention_heads: 4
74
+ linear_units: 1024
75
+ num_blocks: 3
76
+ dropout_rate: 0.1
77
+ positional_dropout_rate: 0.1
78
+ attention_dropout_rate: 0.1
79
+ normalize_before: True
80
+ input_layer: 'linear'
81
+ pos_enc_layer_type: 'rel_pos_espnet'
82
+ selfattention_layer_type: 'rel_selfattn'
83
+ input_size: 512
84
+ use_cnn_module: False
85
+ macaron_style: False
86
+ length_regulator: !new:cosyvoice.flow.length_regulator.InterpolateRegulator
87
+ channels: 80
88
+ sampling_ratios: [1, 1, 1, 1]
89
+ decoder: !new:cosyvoice.flow.flow_matching.ConditionalCFM
90
+ in_channels: 240
91
+ n_spks: 1
92
+ spk_emb_dim: 80
93
+ cfm_params: !new:omegaconf.DictConfig
94
+ content:
95
+ sigma_min: 1e-06
96
+ solver: 'euler'
97
+ t_scheduler: 'cosine'
98
+ training_cfg_rate: 0.2
99
+ inference_cfg_rate: 0.7
100
+ reg_loss_type: 'l1'
101
+ estimator: !new:cosyvoice.flow.decoder.ConditionalDecoder
102
+ in_channels: 320
103
+ out_channels: 80
104
+ channels: [256, 256]
105
+ dropout: 0.0
106
+ attention_head_dim: 64
107
+ n_blocks: 4
108
+ num_mid_blocks: 8
109
+ num_heads: 8
110
+ act_fn: 'gelu'
111
+
112
+ hift: !new:cosyvoice.hifigan.generator.HiFTGenerator
113
+ in_channels: 80
114
+ base_channels: 512
115
+ nb_harmonics: 8
116
+ sampling_rate: !ref <sample_rate>
117
+ nsf_alpha: 0.1
118
+ nsf_sigma: 0.003
119
+ nsf_voiced_threshold: 10
120
+ upsample_rates: [8, 8]
121
+ upsample_kernel_sizes: [16, 16]
122
+ istft_params:
123
+ n_fft: 16
124
+ hop_len: 4
125
+ resblock_kernel_sizes: [3, 7, 11]
126
+ resblock_dilation_sizes: [[1, 3, 5], [1, 3, 5], [1, 3, 5]]
127
+ source_resblock_kernel_sizes: [7, 11]
128
+ source_resblock_dilation_sizes: [[1, 3, 5], [1, 3, 5]]
129
+ lrelu_slope: 0.1
130
+ audio_limit: 0.99
131
+ f0_predictor: !new:cosyvoice.hifigan.f0_predictor.ConvRNNF0Predictor
132
+ num_class: 1
133
+ in_channels: 80
134
+ cond_channels: 512
135
+
136
+ # gan related module
137
+ mel_spec_transform1: !name:matcha.utils.audio.mel_spectrogram
138
+ n_fft: 1024
139
+ num_mels: 80
140
+ sampling_rate: !ref <sample_rate>
141
+ hop_size: 256
142
+ win_size: 1024
143
+ fmin: 0
144
+ fmax: null
145
+ center: False
146
+ hifigan: !new:cosyvoice.hifigan.hifigan.HiFiGan
147
+ generator: !ref <hift>
148
+ discriminator: !new:cosyvoice.hifigan.discriminator.MultipleDiscriminator
149
+ mpd: !new:matcha.hifigan.models.MultiPeriodDiscriminator
150
+ mrd: !new:cosyvoice.hifigan.discriminator.MultiResolutionDiscriminator
151
+ mel_spec_transform: [
152
+ !ref <mel_spec_transform1>
153
+ ]
154
+
155
+ # processor functions
156
+ parquet_opener: !name:cosyvoice.dataset.processor.parquet_opener
157
+ get_tokenizer: !name:whisper.tokenizer.get_tokenizer # change to !name:cosyvoice.tokenizer.tokenizer.get_tokenizer if you want to train with CosyVoice-300M-25Hz recipe
158
+ multilingual: True
159
+ num_languages: 100
160
+ language: 'en'
161
+ task: 'transcribe'
162
+ allowed_special: 'all'
163
+ tokenize: !name:cosyvoice.dataset.processor.tokenize
164
+ get_tokenizer: !ref <get_tokenizer>
165
+ allowed_special: !ref <allowed_special>
166
+ filter: !name:cosyvoice.dataset.processor.filter
167
+ max_length: 40960
168
+ min_length: 0
169
+ token_max_length: 200
170
+ token_min_length: 1
171
+ resample: !name:cosyvoice.dataset.processor.resample
172
+ resample_rate: !ref <sample_rate>
173
+ truncate: !name:cosyvoice.dataset.processor.truncate
174
+ truncate_length: 24576 # must be a multiplier of hop_size
175
+ feat_extractor: !name:matcha.utils.audio.mel_spectrogram
176
+ n_fft: 1024
177
+ num_mels: 80
178
+ sampling_rate: !ref <sample_rate>
179
+ hop_size: 256
180
+ win_size: 1024
181
+ fmin: 0
182
+ fmax: 8000
183
+ center: False
184
+ compute_fbank: !name:cosyvoice.dataset.processor.compute_fbank
185
+ feat_extractor: !ref <feat_extractor>
186
+ compute_f0: !name:cosyvoice.dataset.processor.compute_f0
187
+ sample_rate: !ref <sample_rate>
188
+ hop_size: 256
189
+ parse_embedding: !name:cosyvoice.dataset.processor.parse_embedding
190
+ normalize: True
191
+ shuffle: !name:cosyvoice.dataset.processor.shuffle
192
+ shuffle_size: 1000
193
+ sort: !name:cosyvoice.dataset.processor.sort
194
+ sort_size: 500 # sort_size should be less than shuffle_size
195
+ batch: !name:cosyvoice.dataset.processor.batch
196
+ batch_type: 'dynamic'
197
+ max_frames_in_batch: 12000
198
+ padding: !name:cosyvoice.dataset.processor.padding
199
+ use_spk_embedding: False # change to True during sft
200
+
201
+ # dataset processor pipeline
202
+ data_pipeline: [
203
+ !ref <parquet_opener>,
204
+ !ref <tokenize>,
205
+ !ref <filter>,
206
+ !ref <resample>,
207
+ !ref <compute_fbank>,
208
+ !ref <parse_embedding>,
209
+ !ref <shuffle>,
210
+ !ref <sort>,
211
+ !ref <batch>,
212
+ !ref <padding>,
213
+ ]
214
+ data_pipeline_gan: [
215
+ !ref <parquet_opener>,
216
+ !ref <tokenize>,
217
+ !ref <filter>,
218
+ !ref <resample>,
219
+ !ref <truncate>,
220
+ !ref <compute_fbank>,
221
+ !ref <compute_f0>,
222
+ !ref <parse_embedding>,
223
+ !ref <shuffle>,
224
+ !ref <sort>,
225
+ !ref <batch>,
226
+ !ref <padding>,
227
+ ]
228
+
229
+ # llm flow train conf
230
+ train_conf:
231
+ optim: adam
232
+ optim_conf:
233
+ lr: 0.002 # change to 0.001 if you want to train flow from scratch
234
+ scheduler: warmuplr
235
+ scheduler_conf:
236
+ warmup_steps: 25000
237
+ max_epoch: 200
238
+ grad_clip: 5
239
+ accum_grad: 2
240
+ log_interval: 100
241
+ save_per_step: -1
242
+
243
+ # gan train conf
244
+ train_conf_gan:
245
+ optim: adam
246
+ optim_conf:
247
+ lr: 0.0002 # use small lr for gan training
248
+ scheduler: constantlr
249
+ optim_d: adam
250
+ optim_conf_d:
251
+ lr: 0.0002 # use small lr for gan training
252
+ scheduler_d: constantlr
253
+ max_epoch: 200
254
+ grad_clip: 5
255
+ accum_grad: 1 # in gan training, accum_grad must be 1
256
+ log_interval: 100
257
+ save_per_step: -1
examples/libritts/cosyvoice/conf/cosyvoice.yaml ADDED
@@ -0,0 +1,257 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # set random seed, so that you may reproduce your result.
2
+ __set_seed1: !apply:random.seed [1986]
3
+ __set_seed2: !apply:numpy.random.seed [1986]
4
+ __set_seed3: !apply:torch.manual_seed [1986]
5
+ __set_seed4: !apply:torch.cuda.manual_seed_all [1986]
6
+
7
+ # fixed params
8
+ sample_rate: 22050
9
+ text_encoder_input_size: 512
10
+ llm_input_size: 1024
11
+ llm_output_size: 1024
12
+ spk_embed_dim: 192
13
+
14
+ # model params
15
+ # for all class/function included in this repo, we use !<name> or !<new> for intialization, so that user may find all corresponding class/function according to one single yaml.
16
+ # for system/third_party class/function, we do not require this.
17
+ llm: !new:cosyvoice.llm.llm.TransformerLM
18
+ text_encoder_input_size: !ref <text_encoder_input_size>
19
+ llm_input_size: !ref <llm_input_size>
20
+ llm_output_size: !ref <llm_output_size>
21
+ text_token_size: 51866 # change to 60515 if you want to train with CosyVoice-300M-25Hz recipe
22
+ speech_token_size: 4096
23
+ length_normalized_loss: True
24
+ lsm_weight: 0
25
+ spk_embed_dim: !ref <spk_embed_dim>
26
+ text_encoder: !new:cosyvoice.transformer.encoder.ConformerEncoder
27
+ input_size: !ref <text_encoder_input_size>
28
+ output_size: 1024
29
+ attention_heads: 16
30
+ linear_units: 4096
31
+ num_blocks: 6
32
+ dropout_rate: 0.1
33
+ positional_dropout_rate: 0.1
34
+ attention_dropout_rate: 0.0
35
+ normalize_before: True
36
+ input_layer: 'linear'
37
+ pos_enc_layer_type: 'rel_pos_espnet'
38
+ selfattention_layer_type: 'rel_selfattn'
39
+ use_cnn_module: False
40
+ macaron_style: False
41
+ use_dynamic_chunk: False
42
+ use_dynamic_left_chunk: False
43
+ static_chunk_size: 1
44
+ llm: !new:cosyvoice.transformer.encoder.TransformerEncoder
45
+ input_size: !ref <llm_input_size>
46
+ output_size: !ref <llm_output_size>
47
+ attention_heads: 16
48
+ linear_units: 4096
49
+ num_blocks: 14
50
+ dropout_rate: 0.1
51
+ positional_dropout_rate: 0.1
52
+ attention_dropout_rate: 0.0
53
+ input_layer: 'linear_legacy'
54
+ pos_enc_layer_type: 'rel_pos_espnet'
55
+ selfattention_layer_type: 'rel_selfattn'
56
+ static_chunk_size: 1
57
+ sampling: !name:cosyvoice.utils.common.ras_sampling
58
+ top_p: 0.8
59
+ top_k: 25
60
+ win_size: 10
61
+ tau_r: 0.1
62
+
63
+ flow: !new:cosyvoice.flow.flow.MaskedDiffWithXvec
64
+ input_size: 512
65
+ output_size: 80
66
+ spk_embed_dim: !ref <spk_embed_dim>
67
+ output_type: 'mel'
68
+ vocab_size: 4096
69
+ input_frame_rate: 50 # change to 25 if you want to train with CosyVoice-300M-25Hz recipe
70
+ only_mask_loss: True
71
+ encoder: !new:cosyvoice.transformer.encoder.ConformerEncoder
72
+ output_size: 512
73
+ attention_heads: 8
74
+ linear_units: 2048
75
+ num_blocks: 6
76
+ dropout_rate: 0.1
77
+ positional_dropout_rate: 0.1
78
+ attention_dropout_rate: 0.1
79
+ normalize_before: True
80
+ input_layer: 'linear'
81
+ pos_enc_layer_type: 'rel_pos_espnet'
82
+ selfattention_layer_type: 'rel_selfattn'
83
+ input_size: 512
84
+ use_cnn_module: False
85
+ macaron_style: False
86
+ length_regulator: !new:cosyvoice.flow.length_regulator.InterpolateRegulator
87
+ channels: 80
88
+ sampling_ratios: [1, 1, 1, 1]
89
+ decoder: !new:cosyvoice.flow.flow_matching.ConditionalCFM
90
+ in_channels: 240
91
+ n_spks: 1
92
+ spk_emb_dim: 80
93
+ cfm_params: !new:omegaconf.DictConfig
94
+ content:
95
+ sigma_min: 1e-06
96
+ solver: 'euler'
97
+ t_scheduler: 'cosine'
98
+ training_cfg_rate: 0.2
99
+ inference_cfg_rate: 0.7
100
+ reg_loss_type: 'l1'
101
+ estimator: !new:cosyvoice.flow.decoder.ConditionalDecoder
102
+ in_channels: 320
103
+ out_channels: 80
104
+ channels: [256, 256]
105
+ dropout: 0.0
106
+ attention_head_dim: 64
107
+ n_blocks: 4
108
+ num_mid_blocks: 12
109
+ num_heads: 8
110
+ act_fn: 'gelu'
111
+
112
+ hift: !new:cosyvoice.hifigan.generator.HiFTGenerator
113
+ in_channels: 80
114
+ base_channels: 512
115
+ nb_harmonics: 8
116
+ sampling_rate: !ref <sample_rate>
117
+ nsf_alpha: 0.1
118
+ nsf_sigma: 0.003
119
+ nsf_voiced_threshold: 10
120
+ upsample_rates: [8, 8]
121
+ upsample_kernel_sizes: [16, 16]
122
+ istft_params:
123
+ n_fft: 16
124
+ hop_len: 4
125
+ resblock_kernel_sizes: [3, 7, 11]
126
+ resblock_dilation_sizes: [[1, 3, 5], [1, 3, 5], [1, 3, 5]]
127
+ source_resblock_kernel_sizes: [7, 11]
128
+ source_resblock_dilation_sizes: [[1, 3, 5], [1, 3, 5]]
129
+ lrelu_slope: 0.1
130
+ audio_limit: 0.99
131
+ f0_predictor: !new:cosyvoice.hifigan.f0_predictor.ConvRNNF0Predictor
132
+ num_class: 1
133
+ in_channels: 80
134
+ cond_channels: 512
135
+
136
+ # gan related module
137
+ mel_spec_transform1: !name:matcha.utils.audio.mel_spectrogram
138
+ n_fft: 1024
139
+ num_mels: 80
140
+ sampling_rate: !ref <sample_rate>
141
+ hop_size: 256
142
+ win_size: 1024
143
+ fmin: 0
144
+ fmax: null
145
+ center: False
146
+ hifigan: !new:cosyvoice.hifigan.hifigan.HiFiGan
147
+ generator: !ref <hift>
148
+ discriminator: !new:cosyvoice.hifigan.discriminator.MultipleDiscriminator
149
+ mpd: !new:matcha.hifigan.models.MultiPeriodDiscriminator
150
+ mrd: !new:cosyvoice.hifigan.discriminator.MultiResolutionDiscriminator
151
+ mel_spec_transform: [
152
+ !ref <mel_spec_transform1>
153
+ ]
154
+
155
+ # processor functions
156
+ parquet_opener: !name:cosyvoice.dataset.processor.parquet_opener
157
+ get_tokenizer: !name:whisper.tokenizer.get_tokenizer # change to !name:cosyvoice.tokenizer.tokenizer.get_tokenizer if you want to train with CosyVoice-300M-25Hz recipe
158
+ multilingual: True
159
+ num_languages: 100
160
+ language: 'en'
161
+ task: 'transcribe'
162
+ allowed_special: 'all'
163
+ tokenize: !name:cosyvoice.dataset.processor.tokenize
164
+ get_tokenizer: !ref <get_tokenizer>
165
+ allowed_special: !ref <allowed_special>
166
+ filter: !name:cosyvoice.dataset.processor.filter
167
+ max_length: 40960
168
+ min_length: 0
169
+ token_max_length: 200
170
+ token_min_length: 1
171
+ resample: !name:cosyvoice.dataset.processor.resample
172
+ resample_rate: !ref <sample_rate>
173
+ truncate: !name:cosyvoice.dataset.processor.truncate
174
+ truncate_length: 24576 # must be a multiplier of hop_size
175
+ feat_extractor: !name:matcha.utils.audio.mel_spectrogram
176
+ n_fft: 1024
177
+ num_mels: 80
178
+ sampling_rate: !ref <sample_rate>
179
+ hop_size: 256
180
+ win_size: 1024
181
+ fmin: 0
182
+ fmax: 8000
183
+ center: False
184
+ compute_fbank: !name:cosyvoice.dataset.processor.compute_fbank
185
+ feat_extractor: !ref <feat_extractor>
186
+ compute_f0: !name:cosyvoice.dataset.processor.compute_f0
187
+ sample_rate: !ref <sample_rate>
188
+ hop_size: 256
189
+ parse_embedding: !name:cosyvoice.dataset.processor.parse_embedding
190
+ normalize: True
191
+ shuffle: !name:cosyvoice.dataset.processor.shuffle
192
+ shuffle_size: 1000
193
+ sort: !name:cosyvoice.dataset.processor.sort
194
+ sort_size: 500 # sort_size should be less than shuffle_size
195
+ batch: !name:cosyvoice.dataset.processor.batch
196
+ batch_type: 'dynamic'
197
+ max_frames_in_batch: 2000 # change to 1400 in gan train on v100 16g
198
+ padding: !name:cosyvoice.dataset.processor.padding
199
+ use_spk_embedding: False # change to True during sft
200
+
201
+ # dataset processor pipeline
202
+ data_pipeline: [
203
+ !ref <parquet_opener>,
204
+ !ref <tokenize>,
205
+ !ref <filter>,
206
+ !ref <resample>,
207
+ !ref <compute_fbank>,
208
+ !ref <parse_embedding>,
209
+ !ref <shuffle>,
210
+ !ref <sort>,
211
+ !ref <batch>,
212
+ !ref <padding>,
213
+ ]
214
+ data_pipeline_gan: [
215
+ !ref <parquet_opener>,
216
+ !ref <tokenize>,
217
+ !ref <filter>,
218
+ !ref <resample>,
219
+ !ref <truncate>,
220
+ !ref <compute_fbank>,
221
+ !ref <compute_f0>,
222
+ !ref <parse_embedding>,
223
+ !ref <shuffle>,
224
+ !ref <sort>,
225
+ !ref <batch>,
226
+ !ref <padding>,
227
+ ]
228
+
229
+ # llm flow train conf
230
+ train_conf:
231
+ optim: adam
232
+ optim_conf:
233
+ lr: 0.001 # change to 1e-5 during sft
234
+ scheduler: warmuplr # change to constantlr during sft
235
+ scheduler_conf:
236
+ warmup_steps: 2500
237
+ max_epoch: 200
238
+ grad_clip: 5
239
+ accum_grad: 2
240
+ log_interval: 100
241
+ save_per_step: -1
242
+
243
+ # gan train conf
244
+ train_conf_gan:
245
+ optim: adam
246
+ optim_conf:
247
+ lr: 0.0002 # use small lr for gan training
248
+ scheduler: constantlr
249
+ optim_d: adam
250
+ optim_conf_d:
251
+ lr: 0.0002 # use small lr for gan training
252
+ scheduler_d: constantlr
253
+ max_epoch: 200
254
+ grad_clip: 5
255
+ accum_grad: 1 # in gan training, accum_grad must be 1
256
+ log_interval: 100
257
+ save_per_step: -1
examples/libritts/cosyvoice/conf/ds_stage2.json ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "train_micro_batch_size_per_gpu": 1,
3
+ "gradient_accumulation_steps": 1,
4
+ "steps_per_print": 100,
5
+ "gradient_clipping": 5,
6
+ "fp16": {
7
+ "enabled": false,
8
+ "auto_cast": false,
9
+ "loss_scale": 0,
10
+ "initial_scale_power": 16,
11
+ "loss_scale_window": 256,
12
+ "hysteresis": 2,
13
+ "consecutive_hysteresis": false,
14
+ "min_loss_scale": 1
15
+ },
16
+ "bf16": {
17
+ "enabled": false
18
+ },
19
+ "zero_force_ds_cpu_optimizer": false,
20
+ "zero_optimization": {
21
+ "stage": 2,
22
+ "offload_optimizer": {
23
+ "device": "none",
24
+ "pin_memory": true
25
+ },
26
+ "allgather_partitions": true,
27
+ "allgather_bucket_size": 5e8,
28
+ "overlap_comm": false,
29
+ "reduce_scatter": true,
30
+ "reduce_bucket_size": 5e8,
31
+ "contiguous_gradients" : true
32
+ },
33
+ "optimizer": {
34
+ "type": "AdamW",
35
+ "params": {
36
+ "lr": 0.001,
37
+ "weight_decay": 0.0001,
38
+ "torch_adam": true,
39
+ "adam_w_mode": true
40
+ }
41
+ }
42
+ }
examples/libritts/cosyvoice/local/download_and_untar.sh ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ # Copyright 2014 Johns Hopkins University (author: Daniel Povey)
4
+ # Apache 2.0
5
+
6
+ remove_archive=false
7
+
8
+ if [ "$1" == --remove-archive ]; then
9
+ remove_archive=true
10
+ shift
11
+ fi
12
+
13
+ if [ $# -ne 3 ]; then
14
+ echo "Usage: $0 [--remove-archive] <data-base> <url-base> <corpus-part>"
15
+ echo "e.g.: $0 /export/a15/vpanayotov/data www.openslr.org/resources/11 dev-clean"
16
+ echo "With --remove-archive it will remove the archive after successfully un-tarring it."
17
+ echo "<corpus-part> can be one of: dev-clean, test-clean, dev-other, test-other,"
18
+ echo " train-clean-100, train-clean-360, train-other-500."
19
+ exit 1
20
+ fi
21
+
22
+ data=$1
23
+ url=$2
24
+ part=$3
25
+
26
+ if [ ! -d "$data" ]; then
27
+ echo "$0: no such directory $data"
28
+ exit 1
29
+ fi
30
+
31
+ part_ok=false
32
+ list="dev-clean test-clean dev-other test-other train-clean-100 train-clean-360 train-other-500"
33
+ for x in $list; do
34
+ if [ "$part" == $x ]; then part_ok=true; fi
35
+ done
36
+ if ! $part_ok; then
37
+ echo "$0: expected <corpus-part> to be one of $list, but got '$part'"
38
+ exit 1
39
+ fi
40
+
41
+ if [ -z "$url" ]; then
42
+ echo "$0: empty URL base."
43
+ exit 1
44
+ fi
45
+
46
+ if [ -f $data/LibriTTS/$part/.complete ]; then
47
+ echo "$0: data part $part was already successfully extracted, nothing to do."
48
+ exit 0
49
+ fi
50
+
51
+
52
+ # sizes of the archive files in bytes. This is some older versions.
53
+ sizes_old="371012589 347390293 379743611 361838298 6420417880 23082659865 30626749128"
54
+ # sizes_new is the archive file sizes of the final release. Some of these sizes are of
55
+ # things we probably won't download.
56
+ sizes_new="337926286 314305928 695964615 297279345 87960560420 33373768 346663984 328757843 6387309499 23049477885 30593501606"
57
+
58
+ if [ -f $data/$part.tar.gz ]; then
59
+ size=$(/bin/ls -l $data/$part.tar.gz | awk '{print $5}')
60
+ size_ok=false
61
+ for s in $sizes_old $sizes_new; do if [ $s == $size ]; then size_ok=true; fi; done
62
+ if ! $size_ok; then
63
+ echo "$0: removing existing file $data/$part.tar.gz because its size in bytes $size"
64
+ echo "does not equal the size of one of the archives."
65
+ rm $data/$part.tar.gz
66
+ else
67
+ echo "$data/$part.tar.gz exists and appears to be complete."
68
+ fi
69
+ fi
70
+
71
+ if [ ! -f $data/$part.tar.gz ]; then
72
+ if ! which wget >/dev/null; then
73
+ echo "$0: wget is not installed."
74
+ exit 1
75
+ fi
76
+ full_url=$url/$part.tar.gz
77
+ echo "$0: downloading data from $full_url. This may take some time, please be patient."
78
+
79
+ if ! wget -P $data --no-check-certificate $full_url; then
80
+ echo "$0: error executing wget $full_url"
81
+ exit 1
82
+ fi
83
+ fi
84
+
85
+ if ! tar -C $data -xvzf $data/$part.tar.gz; then
86
+ echo "$0: error un-tarring archive $data/$part.tar.gz"
87
+ exit 1
88
+ fi
89
+
90
+ touch $data/LibriTTS/$part/.complete
91
+
92
+ echo "$0: Successfully downloaded and un-tarred $data/$part.tar.gz"
93
+
94
+ if $remove_archive; then
95
+ echo "$0: removing $data/$part.tar.gz file since --remove-archive option was supplied."
96
+ rm $data/$part.tar.gz
97
+ fi
examples/libritts/cosyvoice/local/prepare_data.py ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import logging
3
+ import glob
4
+ import os
5
+ from tqdm import tqdm
6
+
7
+
8
+ logger = logging.getLogger()
9
+
10
+
11
+ def main():
12
+ wavs = list(glob.glob('{}/*/*/*wav'.format(args.src_dir)))
13
+
14
+ utt2wav, utt2text, utt2spk, spk2utt = {}, {}, {}, {}
15
+ for wav in tqdm(wavs):
16
+ txt = wav.replace('.wav', '.normalized.txt')
17
+ if not os.path.exists(txt):
18
+ logger.warning('{} do not exsist'.format(txt))
19
+ continue
20
+ with open(txt) as f:
21
+ content = ''.join(l.replace('\n', '') for l in f.readline())
22
+ utt = os.path.basename(wav).replace('.wav', '')
23
+ spk = utt.split('_')[0]
24
+ utt2wav[utt] = wav
25
+ utt2text[utt] = content
26
+ utt2spk[utt] = spk
27
+ if spk not in spk2utt:
28
+ spk2utt[spk] = []
29
+ spk2utt[spk].append(utt)
30
+
31
+ with open('{}/wav.scp'.format(args.des_dir), 'w') as f:
32
+ for k, v in utt2wav.items():
33
+ f.write('{} {}\n'.format(k, v))
34
+ with open('{}/text'.format(args.des_dir), 'w') as f:
35
+ for k, v in utt2text.items():
36
+ f.write('{} {}\n'.format(k, v))
37
+ with open('{}/utt2spk'.format(args.des_dir), 'w') as f:
38
+ for k, v in utt2spk.items():
39
+ f.write('{} {}\n'.format(k, v))
40
+ with open('{}/spk2utt'.format(args.des_dir), 'w') as f:
41
+ for k, v in spk2utt.items():
42
+ f.write('{} {}\n'.format(k, ' '.join(v)))
43
+ return
44
+
45
+
46
+ if __name__ == "__main__":
47
+ parser = argparse.ArgumentParser()
48
+ parser.add_argument('--src_dir',
49
+ type=str)
50
+ parser.add_argument('--des_dir',
51
+ type=str)
52
+ args = parser.parse_args()
53
+ main()
examples/libritts/cosyvoice/path.sh ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ # NOTE(kan-bayashi): Use UTF-8 in Python to avoid UnicodeDecodeError when LC_ALL=C
2
+ export PYTHONIOENCODING=UTF-8
3
+ export PYTHONPATH=../../../:../../../third_party/Matcha-TTS:$PYTHONPATH
examples/libritts/cosyvoice/run.sh ADDED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ # Copyright 2024 Alibaba Inc. All Rights Reserved.
3
+ . ./path.sh || exit 1;
4
+
5
+ stage=-1
6
+ stop_stage=3
7
+
8
+ data_url=www.openslr.org/resources/60
9
+ data_dir=/mnt/lyuxiang.lx/data/tts/openslr/libritts
10
+ pretrained_model_dir=../../../pretrained_models/CosyVoice-300M
11
+
12
+ if [ ${stage} -le -1 ] && [ ${stop_stage} -ge -1 ]; then
13
+ echo "Data Download"
14
+ for part in dev-clean test-clean dev-other test-other train-clean-100 train-clean-360 train-other-500; do
15
+ local/download_and_untar.sh ${data_dir} ${data_url} ${part}
16
+ done
17
+ fi
18
+
19
+ if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then
20
+ echo "Data preparation, prepare wav.scp/text/utt2spk/spk2utt"
21
+ for x in train-clean-100 train-clean-360 train-other-500 dev-clean dev-other test-clean test-other; do
22
+ mkdir -p data/$x
23
+ python local/prepare_data.py --src_dir $data_dir/LibriTTS/$x --des_dir data/$x
24
+ done
25
+ fi
26
+
27
+ if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then
28
+ echo "Extract campplus speaker embedding, you will get spk2embedding.pt and utt2embedding.pt in data/$x dir"
29
+ for x in train-clean-100 train-clean-360 train-other-500 dev-clean dev-other test-clean test-other; do
30
+ tools/extract_embedding.py --dir data/$x \
31
+ --onnx_path $pretrained_model_dir/campplus.onnx
32
+ done
33
+ fi
34
+
35
+ if [ ${stage} -le 2 ] && [ ${stop_stage} -ge 2 ]; then
36
+ echo "Extract discrete speech token, you will get utt2speech_token.pt in data/$x dir"
37
+ for x in train-clean-100 train-clean-360 train-other-500 dev-clean dev-other test-clean test-other; do
38
+ tools/extract_speech_token.py --dir data/$x \
39
+ --onnx_path $pretrained_model_dir/speech_tokenizer_v1.onnx
40
+ done
41
+ fi
42
+
43
+ if [ ${stage} -le 3 ] && [ ${stop_stage} -ge 3 ]; then
44
+ echo "Prepare required parquet format data, you should have prepared wav.scp/text/utt2spk/spk2utt/utt2embedding.pt/spk2embedding.pt/utt2speech_token.pt"
45
+ for x in train-clean-100 train-clean-360 train-other-500 dev-clean dev-other test-clean test-other; do
46
+ mkdir -p data/$x/parquet
47
+ tools/make_parquet_list.py --num_utts_per_parquet 1000 \
48
+ --num_processes 10 \
49
+ --src_dir data/$x \
50
+ --des_dir data/$x/parquet
51
+ done
52
+ fi
53
+
54
+ # inference
55
+ if [ ${stage} -le 4 ] && [ ${stop_stage} -ge 4 ]; then
56
+ echo "Run inference. Please make sure utt in tts_text is in prompt_data"
57
+ for mode in sft zero_shot; do
58
+ python cosyvoice/bin/inference.py --mode $mode \
59
+ --gpu 0 \
60
+ --config conf/cosyvoice.yaml \
61
+ --prompt_data data/test-clean/parquet/data.list \
62
+ --prompt_utt2data data/test-clean/parquet/utt2data.list \
63
+ --tts_text `pwd`/tts_text.json \
64
+ --llm_model $pretrained_model_dir/llm.pt \
65
+ --flow_model $pretrained_model_dir/flow.pt \
66
+ --hifigan_model $pretrained_model_dir/hift.pt \
67
+ --result_dir `pwd`/exp/cosyvoice/test-clean/$mode
68
+ done
69
+ fi
70
+
71
+ # train llm
72
+ export CUDA_VISIBLE_DEVICES="0,1,2,3"
73
+ num_gpus=$(echo $CUDA_VISIBLE_DEVICES | awk -F "," '{print NF}')
74
+ job_id=1986
75
+ dist_backend="nccl"
76
+ num_workers=2
77
+ prefetch=100
78
+ train_engine=torch_ddp
79
+ if [ ${stage} -le 5 ] && [ ${stop_stage} -ge 5 ]; then
80
+ echo "Run train. We only support llm traning for now. If your want to train from scratch, please use conf/cosyvoice.fromscratch.yaml"
81
+ if [ $train_engine == 'deepspeed' ]; then
82
+ echo "Notice deepspeed has its own optimizer config. Modify conf/ds_stage2.json if necessary"
83
+ fi
84
+ cat data/{train-clean-100,train-clean-360,train-other-500}/parquet/data.list > data/train.data.list
85
+ cat data/{dev-clean,dev-other}/parquet/data.list > data/dev.data.list
86
+ for model in llm flow hifigan; do
87
+ torchrun --nnodes=1 --nproc_per_node=$num_gpus \
88
+ --rdzv_id=$job_id --rdzv_backend="c10d" --rdzv_endpoint="localhost:1234" \
89
+ cosyvoice/bin/train.py \
90
+ --train_engine $train_engine \
91
+ --config conf/cosyvoice.yaml \
92
+ --train_data data/train.data.list \
93
+ --cv_data data/dev.data.list \
94
+ --model $model \
95
+ --checkpoint $pretrained_model_dir/$model.pt \
96
+ --model_dir `pwd`/exp/cosyvoice/$model/$train_engine \
97
+ --tensorboard_dir `pwd`/tensorboard/cosyvoice/$model/$train_engine \
98
+ --ddp.dist_backend $dist_backend \
99
+ --num_workers ${num_workers} \
100
+ --prefetch ${prefetch} \
101
+ --pin_memory \
102
+ --use_amp \
103
+ --deepspeed_config ./conf/ds_stage2.json \
104
+ --deepspeed.save_states model+optimizer
105
+ done
106
+ fi
107
+
108
+ # average model
109
+ average_num=5
110
+ if [ ${stage} -le 6 ] && [ ${stop_stage} -ge 6 ]; then
111
+ for model in llm flow hifigan; do
112
+ decode_checkpoint=`pwd`/exp/cosyvoice/$model/$train_engine/${model}.pt
113
+ echo "do model average and final checkpoint is $decode_checkpoint"
114
+ python cosyvoice/bin/average_model.py \
115
+ --dst_model $decode_checkpoint \
116
+ --src_path `pwd`/exp/cosyvoice/$model/$train_engine \
117
+ --num ${average_num} \
118
+ --val_best
119
+ done
120
+ fi
121
+
122
+ if [ ${stage} -le 7 ] && [ ${stop_stage} -ge 7 ]; then
123
+ echo "Export your model for inference speedup. Remember copy your llm or flow model to model_dir"
124
+ python cosyvoice/bin/export_jit.py --model_dir $pretrained_model_dir
125
+ python cosyvoice/bin/export_onnx.py --model_dir $pretrained_model_dir
126
+ fi
examples/libritts/cosyvoice/tts_text.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "1089_134686_000002_000000": [
3
+ "hello, my name is Jack. What is your name?"
4
+ ]
5
+ }
examples/magicdata-read/cosyvoice/path.sh ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ # NOTE(kan-bayashi): Use UTF-8 in Python to avoid UnicodeDecodeError when LC_ALL=C
2
+ export PYTHONIOENCODING=UTF-8
3
+ export PYTHONPATH=../../../:../../../third_party/Matcha-TTS:$PYTHONPATH
examples/magicdata-read/cosyvoice/run.sh ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ # Copyright 2024 Alibaba Inc. All Rights Reserved.
3
+ . ./path.sh || exit 1;
4
+
5
+ stage=-1
6
+ stop_stage=3
7
+
8
+ data_url=www.openslr.org/resources/68
9
+ data_dir=/mnt/hengwu.zty/data/tts/openslr/magicdata-read
10
+ pretrained_model_dir=../../../pretrained_models/CosyVoice-300M
11
+
12
+ if [ ${stage} -le -1 ] && [ ${stop_stage} -ge -1 ]; then
13
+ echo "Data Download"
14
+ for part in dev_set test_set train_set; do
15
+ local/download_and_untar.sh ${data_dir} ${data_url} ${part}
16
+ done
17
+ fi
18
+
19
+ if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then
20
+ echo "Data preparation, prepare wav.scp/text/utt2spk/spk2utt"
21
+ for x in dev test train; do
22
+ mkdir -p data/$x
23
+ python local/prepare_data.py --src_dir $data_dir/$x --des_dir data/$x
24
+ done
25
+ fi
26
+
27
+ if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then
28
+ echo "Extract campplus speaker embedding, you will get spk2embedding.pt and utt2embedding.pt in data/$x dir"
29
+ for x in dev test train; do
30
+ tools/extract_embedding.py --dir data/$x \
31
+ --onnx_path $pretrained_model_dir/campplus.onnx
32
+ done
33
+ fi
34
+
35
+ if [ ${stage} -le 2 ] && [ ${stop_stage} -ge 2 ]; then
36
+ echo "Extract discrete speech token, you will get utt2speech_token.pt in data/$x dir"
37
+ for x in dev test train; do
38
+ tools/extract_speech_token.py --dir data/$x \
39
+ --onnx_path $pretrained_model_dir/speech_tokenizer_v1.onnx
40
+ done
41
+ fi
42
+
43
+ if [ ${stage} -le 3 ] && [ ${stop_stage} -ge 3 ]; then
44
+ echo "Prepare required parquet format data, you should have prepared wav.scp/text/utt2spk/spk2utt/utt2embedding.pt/spk2embedding.pt/utt2speech_token.pt"
45
+ for x in dev test train; do
46
+ mkdir -p data/$x/parquet
47
+ tools/make_parquet_list.py --num_utts_per_parquet 1000 \
48
+ --num_processes 10 \
49
+ --src_dir data/$x \
50
+ --des_dir data/$x/parquet
51
+ done
52
+ fi
53
+
54
+ # inference
55
+ if [ ${stage} -le 4 ] && [ ${stop_stage} -ge 4 ]; then
56
+ echo "Run inference. Please make sure utt in tts_text is in prompt_data"
57
+ for mode in sft zero_shot; do
58
+ python cosyvoice/bin/inference.py --mode $mode \
59
+ --gpu 0 \
60
+ --config conf/cosyvoice.yaml \
61
+ --prompt_data data/test/parquet/data.list \
62
+ --prompt_utt2data data/test/parquet/utt2data.list \
63
+ --tts_text `pwd`/tts_text.json \
64
+ --llm_model $pretrained_model_dir/llm.pt \
65
+ --flow_model $pretrained_model_dir/flow.pt \
66
+ --hifigan_model $pretrained_model_dir/hift.pt \
67
+ --result_dir `pwd`/exp/cosyvoice/test/$mode
68
+ done
69
+ fi
70
+
71
+ # train llm
72
+ export CUDA_VISIBLE_DEVICES="0,1,2,3"
73
+ num_gpus=$(echo $CUDA_VISIBLE_DEVICES | awk -F "," '{print NF}')
74
+ job_id=1986
75
+ dist_backend="nccl"
76
+ num_workers=2
77
+ prefetch=100
78
+ train_engine=torch_ddp
79
+ if [ ${stage} -le 5 ] && [ ${stop_stage} -ge 5 ]; then
80
+ echo "Run train. We only support llm traning for now. If your want to train from scratch, please use conf/cosyvoice.fromscratch.yaml"
81
+ if [ $train_engine == 'deepspeed' ]; then
82
+ echo "Notice deepspeed has its own optimizer config. Modify conf/ds_stage2.json if necessary"
83
+ fi
84
+ cp data/train/parquet/data.list data/train.data.list
85
+ cp data/dev/parquet/data.list data/dev.data.list
86
+ for model in llm flow; do
87
+ torchrun --nnodes=1 --nproc_per_node=$num_gpus \
88
+ --rdzv_id=$job_id --rdzv_backend="c10d" --rdzv_endpoint="localhost:0" \
89
+ cosyvoice/bin/train.py \
90
+ --train_engine $train_engine \
91
+ --config conf/cosyvoice.yaml \
92
+ --train_data data/train.data.list \
93
+ --cv_data data/dev.data.list \
94
+ --model $model \
95
+ --checkpoint $pretrained_model_dir/$model.pt \
96
+ --model_dir `pwd`/exp/cosyvoice/$model/$train_engine \
97
+ --tensorboard_dir `pwd`/tensorboard/cosyvoice/$model/$train_engine \
98
+ --ddp.dist_backend $dist_backend \
99
+ --num_workers ${num_workers} \
100
+ --prefetch ${prefetch} \
101
+ --pin_memory \
102
+ --deepspeed_config ./conf/ds_stage2.json \
103
+ --deepspeed.save_states model+optimizer
104
+ done
105
+ fi
106
+
107
+ if [ ${stage} -le 6 ] && [ ${stop_stage} -ge 6 ]; then
108
+ echo "Export your model for inference speedup. Remember copy your llm or flow model to model_dir"
109
+ python cosyvoice/bin/export_jit.py --model_dir $pretrained_model_dir
110
+ python cosyvoice/bin/export_onnx.py --model_dir $pretrained_model_dir
111
+ fi
runtime/python/grpc/.ipynb_checkpoints/client-checkpoint.py ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2024 Alibaba Inc (authors: Xiang Lyu)
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import os
15
+ import sys
16
+ ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
17
+ sys.path.append('{}/../../..'.format(ROOT_DIR))
18
+ sys.path.append('{}/../../../third_party/Matcha-TTS'.format(ROOT_DIR))
19
+ import logging
20
+ import argparse
21
+ import torchaudio
22
+ import cosyvoice_pb2
23
+ import cosyvoice_pb2_grpc
24
+ import grpc
25
+ import torch
26
+ import numpy as np
27
+ from cosyvoice.utils.file_utils import load_wav
28
+
29
+
30
+ def main():
31
+ with grpc.insecure_channel("{}:{}".format(args.host, args.port)) as channel:
32
+ stub = cosyvoice_pb2_grpc.CosyVoiceStub(channel)
33
+ request = cosyvoice_pb2.Request()
34
+ if args.mode == 'sft':
35
+ logging.info('send sft request')
36
+ sft_request = cosyvoice_pb2.sftRequest()
37
+ sft_request.spk_id = args.spk_id
38
+ sft_request.tts_text = args.tts_text
39
+ request.sft_request.CopyFrom(sft_request)
40
+ elif args.mode == 'zero_shot':
41
+ logging.info('send zero_shot request')
42
+ zero_shot_request = cosyvoice_pb2.zeroshotRequest()
43
+ zero_shot_request.tts_text = args.tts_text
44
+ zero_shot_request.prompt_text = args.prompt_text
45
+ prompt_speech = load_wav(args.prompt_wav, 16000)
46
+ zero_shot_request.prompt_audio = (prompt_speech.numpy() * (2**15)).astype(np.int16).tobytes()
47
+ request.zero_shot_request.CopyFrom(zero_shot_request)
48
+ elif args.mode == 'cross_lingual':
49
+ logging.info('send cross_lingual request')
50
+ cross_lingual_request = cosyvoice_pb2.crosslingualRequest()
51
+ cross_lingual_request.tts_text = args.tts_text
52
+ prompt_speech = load_wav(args.prompt_wav, 16000)
53
+ cross_lingual_request.prompt_audio = (prompt_speech.numpy() * (2**15)).astype(np.int16).tobytes()
54
+ request.cross_lingual_request.CopyFrom(cross_lingual_request)
55
+ else:
56
+ logging.info('send instruct request')
57
+ instruct_request = cosyvoice_pb2.instructRequest()
58
+ instruct_request.tts_text = args.tts_text
59
+ instruct_request.spk_id = args.spk_id
60
+ instruct_request.instruct_text = args.instruct_text
61
+ request.instruct_request.CopyFrom(instruct_request)
62
+
63
+ response = stub.Inference(request)
64
+ tts_audio = b''
65
+ for r in response:
66
+ tts_audio += r.tts_audio
67
+ tts_speech = torch.from_numpy(np.array(np.frombuffer(tts_audio, dtype=np.int16))).unsqueeze(dim=0)
68
+ logging.info('save response to {}'.format(args.tts_wav))
69
+ torchaudio.save(args.tts_wav, tts_speech, target_sr)
70
+ logging.info('get response')
71
+
72
+
73
+ if __name__ == "__main__":
74
+ parser = argparse.ArgumentParser()
75
+ parser.add_argument('--host',
76
+ type=str,
77
+ default='0.0.0.0')
78
+ parser.add_argument('--port',
79
+ type=int,
80
+ default='50000')
81
+ parser.add_argument('--mode',
82
+ default='sft',
83
+ choices=['sft', 'zero_shot', 'cross_lingual', 'instruct'],
84
+ help='request mode')
85
+ parser.add_argument('--tts_text',
86
+ type=str,
87
+ default='你好,我是通义千问语音合成大模型,请问有什么可以帮您的吗?')
88
+ parser.add_argument('--spk_id',
89
+ type=str,
90
+ default='中文女')
91
+ parser.add_argument('--prompt_text',
92
+ type=str,
93
+ default='希望你以后能够做的比我还好呦。')
94
+ parser.add_argument('--prompt_wav',
95
+ type=str,
96
+ default='../../../asset/zero_shot_prompt.wav')
97
+ parser.add_argument('--instruct_text',
98
+ type=str,
99
+ default='Theo \'Crimson\', is a fiery, passionate rebel leader. \
100
+ Fights with fervor for justice, but struggles with impulsiveness.')
101
+ parser.add_argument('--tts_wav',
102
+ type=str,
103
+ default='demo.wav')
104
+ args = parser.parse_args()
105
+ prompt_sr, target_sr = 16000, 22050
106
+ main()
runtime/python/grpc/.ipynb_checkpoints/cosyvoice_pb2-checkpoint.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Generated by the protocol buffer compiler. DO NOT EDIT!
3
+ # source: cosyvoice.proto
4
+ """Generated protocol buffer code."""
5
+ from google.protobuf import descriptor as _descriptor
6
+ from google.protobuf import descriptor_pool as _descriptor_pool
7
+ from google.protobuf import symbol_database as _symbol_database
8
+ from google.protobuf.internal import builder as _builder
9
+ # @@protoc_insertion_point(imports)
10
+
11
+ _sym_db = _symbol_database.Default()
12
+
13
+
14
+
15
+
16
+ DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x0f\x63osyvoice.proto\x12\tcosyvoice\"\xfb\x01\n\x07Request\x12,\n\x0bsft_request\x18\x01 \x01(\x0b\x32\x15.cosyvoice.sftRequestH\x00\x12\x37\n\x11zero_shot_request\x18\x02 \x01(\x0b\x32\x1a.cosyvoice.zeroshotRequestH\x00\x12?\n\x15\x63ross_lingual_request\x18\x03 \x01(\x0b\x32\x1e.cosyvoice.crosslingualRequestH\x00\x12\x36\n\x10instruct_request\x18\x04 \x01(\x0b\x32\x1a.cosyvoice.instructRequestH\x00\x42\x10\n\x0eRequestPayload\".\n\nsftRequest\x12\x0e\n\x06spk_id\x18\x01 \x01(\t\x12\x10\n\x08tts_text\x18\x02 \x01(\t\"N\n\x0fzeroshotRequest\x12\x10\n\x08tts_text\x18\x01 \x01(\t\x12\x13\n\x0bprompt_text\x18\x02 \x01(\t\x12\x14\n\x0cprompt_audio\x18\x03 \x01(\x0c\"=\n\x13\x63rosslingualRequest\x12\x10\n\x08tts_text\x18\x01 \x01(\t\x12\x14\n\x0cprompt_audio\x18\x02 \x01(\x0c\"J\n\x0finstructRequest\x12\x10\n\x08tts_text\x18\x01 \x01(\t\x12\x0e\n\x06spk_id\x18\x02 \x01(\t\x12\x15\n\rinstruct_text\x18\x03 \x01(\t\"\x1d\n\x08Response\x12\x11\n\ttts_audio\x18\x01 \x01(\x0c\x32\x45\n\tCosyVoice\x12\x38\n\tInference\x12\x12.cosyvoice.Request\x1a\x13.cosyvoice.Response\"\x00\x30\x01\x42\tZ\x07protos/b\x06proto3')
17
+
18
+ _globals = globals()
19
+ _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
20
+ _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'cosyvoice_pb2', _globals)
21
+ if _descriptor._USE_C_DESCRIPTORS == False:
22
+
23
+ DESCRIPTOR._options = None
24
+ DESCRIPTOR._serialized_options = b'Z\007protos/'
25
+ _globals['_REQUEST']._serialized_start=31
26
+ _globals['_REQUEST']._serialized_end=282
27
+ _globals['_SFTREQUEST']._serialized_start=284
28
+ _globals['_SFTREQUEST']._serialized_end=330
29
+ _globals['_ZEROSHOTREQUEST']._serialized_start=332
30
+ _globals['_ZEROSHOTREQUEST']._serialized_end=410
31
+ _globals['_CROSSLINGUALREQUEST']._serialized_start=412
32
+ _globals['_CROSSLINGUALREQUEST']._serialized_end=473
33
+ _globals['_INSTRUCTREQUEST']._serialized_start=475
34
+ _globals['_INSTRUCTREQUEST']._serialized_end=549
35
+ _globals['_RESPONSE']._serialized_start=551
36
+ _globals['_RESPONSE']._serialized_end=580
37
+ _globals['_COSYVOICE']._serialized_start=582
38
+ _globals['_COSYVOICE']._serialized_end=651
39
+ # @@protoc_insertion_point(module_scope)
runtime/python/grpc/.ipynb_checkpoints/cosyvoice_pb2_grpc-checkpoint.py ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
2
+ """Client and server classes corresponding to protobuf-defined services."""
3
+ import grpc
4
+
5
+ import cosyvoice_pb2 as cosyvoice__pb2
6
+
7
+
8
+ class CosyVoiceStub(object):
9
+ """Missing associated documentation comment in .proto file."""
10
+
11
+ def __init__(self, channel):
12
+ """Constructor.
13
+
14
+ Args:
15
+ channel: A grpc.Channel.
16
+ """
17
+ self.Inference = channel.unary_stream(
18
+ '/cosyvoice.CosyVoice/Inference',
19
+ request_serializer=cosyvoice__pb2.Request.SerializeToString,
20
+ response_deserializer=cosyvoice__pb2.Response.FromString,
21
+ )
22
+
23
+
24
+ class CosyVoiceServicer(object):
25
+ """Missing associated documentation comment in .proto file."""
26
+
27
+ def Inference(self, request, context):
28
+ """Missing associated documentation comment in .proto file."""
29
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
30
+ context.set_details('Method not implemented!')
31
+ raise NotImplementedError('Method not implemented!')
32
+
33
+
34
+ def add_CosyVoiceServicer_to_server(servicer, server):
35
+ rpc_method_handlers = {
36
+ 'Inference': grpc.unary_stream_rpc_method_handler(
37
+ servicer.Inference,
38
+ request_deserializer=cosyvoice__pb2.Request.FromString,
39
+ response_serializer=cosyvoice__pb2.Response.SerializeToString,
40
+ ),
41
+ }
42
+ generic_handler = grpc.method_handlers_generic_handler(
43
+ 'cosyvoice.CosyVoice', rpc_method_handlers)
44
+ server.add_generic_rpc_handlers((generic_handler,))
45
+
46
+
47
+ # This class is part of an EXPERIMENTAL API.
48
+ class CosyVoice(object):
49
+ """Missing associated documentation comment in .proto file."""
50
+
51
+ @staticmethod
52
+ def Inference(request,
53
+ target,
54
+ options=(),
55
+ channel_credentials=None,
56
+ call_credentials=None,
57
+ insecure=False,
58
+ compression=None,
59
+ wait_for_ready=None,
60
+ timeout=None,
61
+ metadata=None):
62
+ return grpc.experimental.unary_stream(request, target, '/cosyvoice.CosyVoice/Inference',
63
+ cosyvoice__pb2.Request.SerializeToString,
64
+ cosyvoice__pb2.Response.FromString,
65
+ options, channel_credentials,
66
+ insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
runtime/python/grpc/__pycache__/cosyvoice_pb2.cpython-310.pyc ADDED
Binary file (1.81 kB). View file
 
third_party/Matcha-TTS/configs/callbacks/model_checkpoint.yaml ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.callbacks.ModelCheckpoint.html
2
+
3
+ model_checkpoint:
4
+ _target_: lightning.pytorch.callbacks.ModelCheckpoint
5
+ dirpath: ${paths.output_dir}/checkpoints # directory to save the model file
6
+ filename: checkpoint_{epoch:03d} # checkpoint filename
7
+ monitor: epoch # name of the logged metric which determines when model is improving
8
+ verbose: False # verbosity mode
9
+ save_last: true # additionally always save an exact copy of the last checkpoint to a file last.ckpt
10
+ save_top_k: 10 # save k best models (determined by above metric)
11
+ mode: "max" # "max" means higher metric value is better, can be also "min"
12
+ auto_insert_metric_name: True # when True, the checkpoints filenames will contain the metric name
13
+ save_weights_only: False # if True, then only the model’s weights will be saved
14
+ every_n_train_steps: null # number of training steps between checkpoints
15
+ train_time_interval: null # checkpoints are monitored at the specified time interval
16
+ every_n_epochs: 100 # number of epochs between checkpoints
17
+ save_on_train_epoch_end: null # whether to run checkpointing at the end of the training epoch or the end of validation
third_party/Matcha-TTS/configs/callbacks/model_summary.yaml ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ # https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.callbacks.RichModelSummary.html
2
+
3
+ model_summary:
4
+ _target_: lightning.pytorch.callbacks.RichModelSummary
5
+ max_depth: 3 # the maximum depth of layer nesting that the summary will include
third_party/Matcha-TTS/configs/logger/comet.yaml ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # https://www.comet.ml
2
+
3
+ comet:
4
+ _target_: lightning.pytorch.loggers.comet.CometLogger
5
+ api_key: ${oc.env:COMET_API_TOKEN} # api key is loaded from environment variable
6
+ save_dir: "${paths.output_dir}"
7
+ project_name: "lightning-hydra-template"
8
+ rest_api_key: null
9
+ # experiment_name: ""
10
+ experiment_key: null # set to resume experiment
11
+ offline: False
12
+ prefix: ""
third_party/Matcha-TTS/configs/logger/csv.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ # csv logger built in lightning
2
+
3
+ csv:
4
+ _target_: lightning.pytorch.loggers.csv_logs.CSVLogger
5
+ save_dir: "${paths.output_dir}"
6
+ name: "csv/"
7
+ prefix: ""
third_party/Matcha-TTS/configs/logger/many_loggers.yaml ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ # train with many loggers at once
2
+
3
+ defaults:
4
+ # - comet
5
+ - csv
6
+ # - mlflow
7
+ # - neptune
8
+ - tensorboard
9
+ - wandb
third_party/Matcha-TTS/configs/logger/mlflow.yaml ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # https://mlflow.org
2
+
3
+ mlflow:
4
+ _target_: lightning.pytorch.loggers.mlflow.MLFlowLogger
5
+ # experiment_name: ""
6
+ # run_name: ""
7
+ tracking_uri: ${paths.log_dir}/mlflow/mlruns # run `mlflow ui` command inside the `logs/mlflow/` dir to open the UI
8
+ tags: null
9
+ # save_dir: "./mlruns"
10
+ prefix: ""
11
+ artifact_location: null
12
+ # run_id: ""