Spaces:
Sleeping
Sleeping
Update tasks/audio.py
Browse files- tasks/audio.py +3 -2
tasks/audio.py
CHANGED
|
@@ -84,7 +84,7 @@ async def evaluate_audio(request: AudioEvaluationRequest):
|
|
| 84 |
batch_size=128,
|
| 85 |
shuffle=False,
|
| 86 |
pin_memory=True,
|
| 87 |
-
num_workers=
|
| 88 |
)
|
| 89 |
|
| 90 |
scripted_model = torch.jit.load("./optimized_qat_blazeface_model.pt", map_location=torch.device('cpu'))
|
|
@@ -101,7 +101,8 @@ async def evaluate_audio(request: AudioEvaluationRequest):
|
|
| 101 |
predictions = []
|
| 102 |
with torch.no_grad():
|
| 103 |
#with autocast():
|
| 104 |
-
with torch.amp.autocast(device_type='cpu'):
|
|
|
|
| 105 |
for data, target in test_loader:
|
| 106 |
outputs = scripted_model(data)
|
| 107 |
_, predicted = torch.max(outputs, 1)
|
|
|
|
| 84 |
batch_size=128,
|
| 85 |
shuffle=False,
|
| 86 |
pin_memory=True,
|
| 87 |
+
num_workers=2
|
| 88 |
)
|
| 89 |
|
| 90 |
scripted_model = torch.jit.load("./optimized_qat_blazeface_model.pt", map_location=torch.device('cpu'))
|
|
|
|
| 101 |
predictions = []
|
| 102 |
with torch.no_grad():
|
| 103 |
#with autocast():
|
| 104 |
+
#with torch.amp.autocast(device_type='cpu'):
|
| 105 |
+
with torch.autocast(device_type='cpu'):
|
| 106 |
for data, target in test_loader:
|
| 107 |
outputs = scripted_model(data)
|
| 108 |
_, predicted = torch.max(outputs, 1)
|