Upload su_id_asr.py with huggingface_hub
Browse files- su_id_asr.py +13 -13
su_id_asr.py
CHANGED
|
@@ -4,14 +4,14 @@ from typing import Dict, List
|
|
| 4 |
|
| 5 |
import datasets
|
| 6 |
|
| 7 |
-
from
|
| 8 |
-
from
|
| 9 |
-
from
|
| 10 |
DEFAULT_SOURCE_VIEW_NAME, Tasks)
|
| 11 |
|
| 12 |
_DATASETNAME = "su_id_asr"
|
| 13 |
_SOURCE_VIEW_NAME = DEFAULT_SOURCE_VIEW_NAME
|
| 14 |
-
_UNIFIED_VIEW_NAME =
|
| 15 |
|
| 16 |
_LANGUAGES = ["sun"]
|
| 17 |
_LOCAL = False
|
|
@@ -44,25 +44,25 @@ _URLs = {
|
|
| 44 |
_SUPPORTED_TASKS = [Tasks.SPEECH_RECOGNITION]
|
| 45 |
|
| 46 |
_SOURCE_VERSION = "1.0.0"
|
| 47 |
-
|
| 48 |
|
| 49 |
|
| 50 |
class SuIdASR(datasets.GeneratorBasedBuilder):
|
| 51 |
"""su_id contains ~220K utterances for Sundanese ASR training data."""
|
| 52 |
|
| 53 |
BUILDER_CONFIGS = [
|
| 54 |
-
|
| 55 |
name="su_id_asr_source",
|
| 56 |
version=datasets.Version(_SOURCE_VERSION),
|
| 57 |
description="SU_ID_ASR source schema",
|
| 58 |
schema="source",
|
| 59 |
subset_id="su_id_asr",
|
| 60 |
),
|
| 61 |
-
|
| 62 |
-
name="
|
| 63 |
-
version=datasets.Version(
|
| 64 |
description="SU_ID_ASR Nusantara schema",
|
| 65 |
-
schema="
|
| 66 |
subset_id="su_id_asr",
|
| 67 |
),
|
| 68 |
]
|
|
@@ -80,7 +80,7 @@ class SuIdASR(datasets.GeneratorBasedBuilder):
|
|
| 80 |
"text": datasets.Value("string"),
|
| 81 |
}
|
| 82 |
)
|
| 83 |
-
elif self.config.schema == "
|
| 84 |
features = schemas.speech_text_features
|
| 85 |
|
| 86 |
return datasets.DatasetInfo(
|
|
@@ -107,7 +107,7 @@ class SuIdASR(datasets.GeneratorBasedBuilder):
|
|
| 107 |
|
| 108 |
def _generate_examples(self, filepath: Dict):
|
| 109 |
|
| 110 |
-
if self.config.schema == "source" or self.config.schema == "
|
| 111 |
|
| 112 |
for key, each_filepath in filepath.items():
|
| 113 |
|
|
@@ -131,7 +131,7 @@ class SuIdASR(datasets.GeneratorBasedBuilder):
|
|
| 131 |
"text": transcription_text,
|
| 132 |
}
|
| 133 |
yield audio_id, ex
|
| 134 |
-
elif self.config.schema == "
|
| 135 |
ex = {
|
| 136 |
"id": audio_id,
|
| 137 |
"speaker_id": speaker_id,
|
|
|
|
| 4 |
|
| 5 |
import datasets
|
| 6 |
|
| 7 |
+
from seacrowd.utils import schemas
|
| 8 |
+
from seacrowd.utils.configs import SEACrowdConfig
|
| 9 |
+
from seacrowd.utils.constants import (DEFAULT_SEACROWD_VIEW_NAME,
|
| 10 |
DEFAULT_SOURCE_VIEW_NAME, Tasks)
|
| 11 |
|
| 12 |
_DATASETNAME = "su_id_asr"
|
| 13 |
_SOURCE_VIEW_NAME = DEFAULT_SOURCE_VIEW_NAME
|
| 14 |
+
_UNIFIED_VIEW_NAME = DEFAULT_SEACROWD_VIEW_NAME
|
| 15 |
|
| 16 |
_LANGUAGES = ["sun"]
|
| 17 |
_LOCAL = False
|
|
|
|
| 44 |
_SUPPORTED_TASKS = [Tasks.SPEECH_RECOGNITION]
|
| 45 |
|
| 46 |
_SOURCE_VERSION = "1.0.0"
|
| 47 |
+
_SEACROWD_VERSION = "2024.06.20"
|
| 48 |
|
| 49 |
|
| 50 |
class SuIdASR(datasets.GeneratorBasedBuilder):
|
| 51 |
"""su_id contains ~220K utterances for Sundanese ASR training data."""
|
| 52 |
|
| 53 |
BUILDER_CONFIGS = [
|
| 54 |
+
SEACrowdConfig(
|
| 55 |
name="su_id_asr_source",
|
| 56 |
version=datasets.Version(_SOURCE_VERSION),
|
| 57 |
description="SU_ID_ASR source schema",
|
| 58 |
schema="source",
|
| 59 |
subset_id="su_id_asr",
|
| 60 |
),
|
| 61 |
+
SEACrowdConfig(
|
| 62 |
+
name="su_id_asr_seacrowd_sptext",
|
| 63 |
+
version=datasets.Version(_SEACROWD_VERSION),
|
| 64 |
description="SU_ID_ASR Nusantara schema",
|
| 65 |
+
schema="seacrowd_sptext",
|
| 66 |
subset_id="su_id_asr",
|
| 67 |
),
|
| 68 |
]
|
|
|
|
| 80 |
"text": datasets.Value("string"),
|
| 81 |
}
|
| 82 |
)
|
| 83 |
+
elif self.config.schema == "seacrowd_sptext":
|
| 84 |
features = schemas.speech_text_features
|
| 85 |
|
| 86 |
return datasets.DatasetInfo(
|
|
|
|
| 107 |
|
| 108 |
def _generate_examples(self, filepath: Dict):
|
| 109 |
|
| 110 |
+
if self.config.schema == "source" or self.config.schema == "seacrowd_sptext":
|
| 111 |
|
| 112 |
for key, each_filepath in filepath.items():
|
| 113 |
|
|
|
|
| 131 |
"text": transcription_text,
|
| 132 |
}
|
| 133 |
yield audio_id, ex
|
| 134 |
+
elif self.config.schema == "seacrowd_sptext":
|
| 135 |
ex = {
|
| 136 |
"id": audio_id,
|
| 137 |
"speaker_id": speaker_id,
|