This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
predictions = ray.get(prediction_futures) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
prediction_futures, ground_truths = [], [] | |
for i, batch in enumerate(data_loader): | |
prediction_future = remote_process_batch_element.remote(batch, model_id, decoder_id, target_dict) | |
prediction_futures.append(prediction_future) | |
ground_truths.append(batch[2][0]) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
@ray.remote | |
def remote_process_data_sample(batch, model, generator, target_dict): | |
result = process_data_sample(batch, model, generator, target_dict) | |
return result |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import ray | |
ray.init() |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
def decode(self, emissions): | |
B, T, N = emissions.size() | |
hypos = list() | |
if self.asg_transitions is None: | |
transitions = torch.FloatTensor(N, N).zero_() | |
else: | |
transitions = torch.FloatTensor(self.asg_transitions).view(N, N) | |
viterbi_path = torch.IntTensor(B, T) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
from wav2letter.criterion import CpuViterbiPath, get_data_ptr_as_bytes |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
decoder_out = decoder.decode(emissions) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
quantized_model = torch.quantization.quantize_dynamic(pt_wav2vec2, {torch.nn.Linear}, dtype=torch.qint8, inplace=True) | |
quantized_model.prepare_for_inference_after_quantization() |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
def prepare_for_inference_after_quantization(self): | |
dequantizer = torch.nn.quantized.DeQuantize() | |
for trans_layer in self.encoder.layers: | |
trans_layer.self_attn.q_proj_bias = trans_layer.self_attn.q_proj.bias() | |
trans_layer.self_attn.k_proj_bias = trans_layer.self_attn.k_proj.bias() | |
trans_layer.self_attn.v_proj_bias = trans_layer.self_attn.v_proj.bias() | |
trans_layer.self_attn.in_proj_bias = torch.cat((trans_layer.self_attn.q_proj_bias, trans_layer.self_attn.k_proj_bias, trans_layer.self_attn.v_proj_bias)) | |
trans_layer.self_attn.out_proj_bias = trans_layer.self_attn.out_proj.bias() | |
trans_layer.self_attn.out_proj_weight = dequantizer(trans_layer.self_attn.out_proj.weight()) | |
trans_layer.self_attn.q_proj_weight = dequantizer(trans_layer.self_attn.q_proj.weight()) |