AB739 commited on
Commit
bc54a04
·
verified ·
1 Parent(s): d159905

Update tasks/audio.py

Browse files
Files changed (1) hide show
  1. tasks/audio.py +4 -6
tasks/audio.py CHANGED
@@ -84,7 +84,8 @@ async def evaluate_audio(request: AudioEvaluationRequest):
84
  TensorDataset(waveforms, labels),
85
  batch_size=64,
86
  shuffle=False,
87
- pin_memory=True
 
88
  )
89
 
90
  # Example Usage
@@ -98,14 +99,11 @@ async def evaluate_audio(request: AudioEvaluationRequest):
98
 
99
 
100
  int8_model = QuantizedBlazeFaceModel(model_fp32)
101
- torch.quantization.prepare(int8_model, inplace=True)
102
- torch.quantization.convert(int8_model, inplace=True)
103
- #int8_model.qconfig = torch.quantization.get_default_qat_qconfig('fbgemm')
104
 
105
  # Load the state dictionary
106
  int8_model.load_state_dict(torch.load(quantized_model_path, map_location=torch.device('cpu'), weights_only=True))
107
- int8_model.eval() # Set to evaluation mode
108
- #model.load_state_dict(torch.load("./best_blazeface_model_second.pth", map_location=torch.device('cpu'), weights_only=True))
109
 
110
  #model = torch.quantization.quantize_dynamic(model, {torch.nn.Linear}, dtype=torch.qint8)
111
 
 
84
  TensorDataset(waveforms, labels),
85
  batch_size=64,
86
  shuffle=False,
87
+ pin_memory=True,
88
+ num_workers=4
89
  )
90
 
91
  # Example Usage
 
99
 
100
 
101
  int8_model = QuantizedBlazeFaceModel(model_fp32)
102
+ int8_model.eval()
 
 
103
 
104
  # Load the state dictionary
105
  int8_model.load_state_dict(torch.load(quantized_model_path, map_location=torch.device('cpu'), weights_only=True))
106
+ int8_model.eval()
 
107
 
108
  #model = torch.quantization.quantize_dynamic(model, {torch.nn.Linear}, dtype=torch.qint8)
109