From 886029845cb1a797ceeb2942c70a8ce3691063da Mon Sep 17 00:00:00 2001 From: "T.X. Xie" <93981430+imxtx@users.noreply.github.com> Date: Wed, 30 Oct 2024 23:33:48 +0800 Subject: [PATCH] Fix device mismatch when there are multiple GPUs. cuda() function will create the tensor on a default device that may differ from the device of tensor 'inverse_transform'. --- PyTorch/SpeechSynthesis/FastPitch/common/stft.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/PyTorch/SpeechSynthesis/FastPitch/common/stft.py b/PyTorch/SpeechSynthesis/FastPitch/common/stft.py index bc140c142..6a6cfc567 100644 --- a/PyTorch/SpeechSynthesis/FastPitch/common/stft.py +++ b/PyTorch/SpeechSynthesis/FastPitch/common/stft.py @@ -123,7 +123,7 @@ def inverse(self, magnitude, phase): np.where(window_sum > tiny(window_sum))[0]) window_sum = torch.autograd.Variable( torch.from_numpy(window_sum), requires_grad=False) - window_sum = window_sum.cuda() if magnitude.is_cuda else window_sum + window_sum = window_sum.cuda(inverse_transform.get_device()) if magnitude.is_cuda else window_sum inverse_transform[:, :, approx_nonzero_indices] /= window_sum[approx_nonzero_indices] # scale by hop ratio