Dataset Open Access

The Makerere Radio Speech Corpus: A Luganda Radio Corpus for Automatic Speech Recognition

Mukiibi, Jonathan; Hussein, Ali; Meyer, Joshua; Katumba, Andrew; Nakatumba-Nabende, Joyce


DataCite XML Export

<?xml version='1.0' encoding='utf-8'?>
<resource xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://datacite.org/schema/kernel-4" xsi:schemaLocation="http://datacite.org/schema/kernel-4 http://schema.datacite.org/meta/kernel-4.1/metadata.xsd">
  <identifier identifierType="DOI">10.5281/zenodo.5855017</identifier>
  <creators>
    <creator>
      <creatorName>Mukiibi, Jonathan</creatorName>
      <givenName>Jonathan</givenName>
      <familyName>Mukiibi</familyName>
      <affiliation>Makerere University</affiliation>
    </creator>
    <creator>
      <creatorName>Hussein, Ali</creatorName>
      <givenName>Ali</givenName>
      <familyName>Hussein</familyName>
      <affiliation>Ronin Institute</affiliation>
    </creator>
    <creator>
      <creatorName>Meyer, Joshua</creatorName>
      <givenName>Joshua</givenName>
      <familyName>Meyer</familyName>
      <affiliation>Coqui</affiliation>
    </creator>
    <creator>
      <creatorName>Katumba, Andrew</creatorName>
      <givenName>Andrew</givenName>
      <familyName>Katumba</familyName>
      <affiliation>Makerere University</affiliation>
    </creator>
    <creator>
      <creatorName>Nakatumba-Nabende, Joyce</creatorName>
      <givenName>Joyce</givenName>
      <familyName>Nakatumba-Nabende</familyName>
      <affiliation>Makerere University</affiliation>
    </creator>
  </creators>
  <titles>
    <title>The Makerere Radio Speech Corpus: A Luganda Radio Corpus for Automatic Speech Recognition</title>
  </titles>
  <publisher>Zenodo</publisher>
  <publicationYear>2022</publicationYear>
  <subjects>
    <subject>Luganda , radio speech corpus, automatic speech recognition</subject>
  </subjects>
  <dates>
    <date dateType="Issued">2022-01-15</date>
  </dates>
  <resourceType resourceTypeGeneral="Dataset"/>
  <alternateIdentifiers>
    <alternateIdentifier alternateIdentifierType="url">https://zenodo.org/record/5855017</alternateIdentifier>
  </alternateIdentifiers>
  <relatedIdentifiers>
    <relatedIdentifier relatedIdentifierType="DOI" relationType="IsVersionOf">10.5281/zenodo.5855016</relatedIdentifier>
  </relatedIdentifiers>
  <version>Version 1.0</version>
  <rightsList>
    <rights rightsURI="https://creativecommons.org/licenses/by/4.0/legalcode">Creative Commons Attribution 4.0 International</rights>
    <rights rightsURI="info:eu-repo/semantics/openAccess">Open Access</rights>
  </rightsList>
  <descriptions>
    <description descriptionType="Abstract">&lt;p&gt;The Makerere AI Lab has built an end-to-end CTC Luganda ASR model using radio data. Having encountered data challenges in working with low resource languages, we take the initiative together with our partners to release the first radio corpus for Luganda.&lt;/p&gt;

&lt;p&gt;The corpus of 155&amp;nbsp;hours is publicly available online under the Creative Commons BY-NC-ND 4.0 license.&amp;nbsp;The dataset release is comprised of the following:&lt;/p&gt;

&lt;ol&gt;
	&lt;li&gt;20 hours of human transcribed radio speech. The audio is 16kHZ, mono channel and with 16 bit rate.&amp;nbsp;&lt;/li&gt;
	&lt;li&gt;Two CSV files for the 20-hour human transcribed dataset - cleaned.csv contains cleaned transcripts and uncleaned.csv contains uncleaned transcripts. The uncleaned transcripts contain extra speech details included in tags like [laughter] for laughter, and [um] for filler pauses, which speaker is talking, where each speaker is assigned an identifier A or B.&lt;/li&gt;
	&lt;li&gt;The transcription guide used to transcribe the radio dataset.&lt;/li&gt;
	&lt;li&gt;&amp;nbsp;A multi-speaker untranscribed dataset of 6 hours of radio data. 1.4 hours of women voices and 4.6 hours of men voices. Each audio is a ten-seconds clip with a single speaker.&lt;/li&gt;
	&lt;li&gt;&amp;nbsp;135&amp;nbsp;hours of multi-speaker untranscribed radio data.&lt;/li&gt;
&lt;/ol&gt;

&lt;p&gt;&lt;strong&gt;NOTE: You can read and cite our paper published in the&amp;nbsp;&lt;/strong&gt;&lt;a href="http://www.lrec-conf.org/proceedings/lrec2022/pdf/2022.lrec-1.208.pdf"&gt;&lt;strong&gt;Proceedings of the 13th Conference on Language Resources and Evaluation (LREC 2022)&lt;/strong&gt;&lt;/a&gt;&lt;/p&gt;</description>
  </descriptions>
</resource>
204
29
views
downloads
All versions This version
Views 204204
Downloads 2929
Data volume 156.8 GB156.8 GB
Unique views 162162
Unique downloads 2525

Share

Cite as