Dataset Open Access

Flute audio labelled database for Automatic Music Transcription

Elena Agulló Cantos


DCAT Export

<?xml version='1.0' encoding='utf-8'?>
<rdf:RDF xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:adms="http://www.w3.org/ns/adms#" xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:dct="http://purl.org/dc/terms/" xmlns:dctype="http://purl.org/dc/dcmitype/" xmlns:dcat="http://www.w3.org/ns/dcat#" xmlns:duv="http://www.w3.org/ns/duv#" xmlns:foaf="http://xmlns.com/foaf/0.1/" xmlns:frapo="http://purl.org/cerif/frapo/" xmlns:geo="http://www.w3.org/2003/01/geo/wgs84_pos#" xmlns:gsp="http://www.opengis.net/ont/geosparql#" xmlns:locn="http://www.w3.org/ns/locn#" xmlns:org="http://www.w3.org/ns/org#" xmlns:owl="http://www.w3.org/2002/07/owl#" xmlns:prov="http://www.w3.org/ns/prov#" xmlns:rdfs="http://www.w3.org/2000/01/rdf-schema#" xmlns:schema="http://schema.org/" xmlns:skos="http://www.w3.org/2004/02/skos/core#" xmlns:vcard="http://www.w3.org/2006/vcard/ns#" xmlns:wdrs="http://www.w3.org/2007/05/powder-s#">
  <rdf:Description rdf:about="https://doi.org/10.5281/zenodo.1408985">
    <rdf:type rdf:resource="http://www.w3.org/ns/dcat#Dataset"/>
    <dct:type rdf:resource="http://purl.org/dc/dcmitype/Dataset"/>
    <dct:identifier rdf:datatype="http://www.w3.org/2001/XMLSchema#anyURI">https://doi.org/10.5281/zenodo.1408985</dct:identifier>
    <foaf:page rdf:resource="https://doi.org/10.5281/zenodo.1408985"/>
    <dct:creator>
      <rdf:Description rdf:about="http://orcid.org/0000-0003-3334-5075">
        <rdf:type rdf:resource="http://xmlns.com/foaf/0.1/Agent"/>
        <dct:identifier rdf:datatype="http://www.w3.org/2001/XMLSchema#string">0000-0003-3334-5075</dct:identifier>
        <foaf:name>Elena Agulló Cantos</foaf:name>
      </rdf:Description>
    </dct:creator>
    <dct:title>Flute audio labelled database for Automatic Music Transcription</dct:title>
    <dct:publisher>
      <foaf:Agent>
        <foaf:name>Zenodo</foaf:name>
      </foaf:Agent>
    </dct:publisher>
    <dct:issued rdf:datatype="http://www.w3.org/2001/XMLSchema#gYear">2018</dct:issued>
    <dcat:keyword>Automatic Music Transcription</dcat:keyword>
    <dcat:keyword>Music Information Retrieval</dcat:keyword>
    <dcat:keyword>Sound analysis</dcat:keyword>
    <dcat:keyword>Music</dcat:keyword>
    <dcat:keyword>Flute</dcat:keyword>
    <dcat:keyword>Digital audio</dcat:keyword>
    <dct:issued rdf:datatype="http://www.w3.org/2001/XMLSchema#date">2018-09-04</dct:issued>
    <dct:language rdf:resource="http://publications.europa.eu/resource/authority/language/SPA"/>
    <owl:sameAs rdf:resource="https://zenodo.org/record/1408985"/>
    <adms:identifier>
      <adms:Identifier>
        <skos:notation rdf:datatype="http://www.w3.org/2001/XMLSchema#anyURI">https://zenodo.org/record/1408985</skos:notation>
        <adms:schemeAgency>url</adms:schemeAgency>
      </adms:Identifier>
    </adms:identifier>
    <dct:isVersionOf rdf:resource="https://doi.org/10.5281/zenodo.1408984"/>
    <dct:isPartOf rdf:resource="https://zenodo.org/communities/mir"/>
    <dct:description>&lt;p&gt;Automatic Music Transcription (ATM) is a well-known task in the Music Information Retrieval (MIR) domain and consists on the computation of a symbolic music representation from an audio recording. In this work, our focus is to adapt algorithms that extract musical information from an audio file for a particular instrument. The main objective is to study the automatic transcription of digitized music support systems. Currently, these techniques are applied to a generic sound timbre, to sounds to any instrument for further analysis and conversion to a digital music encoding and final score format. The results of this project add new knowledge in this automatic transcription field, since traverse flute has been selected as the instrument on which to focus all the process and, until now, there is no database of flute sounds for this purpose.&lt;/p&gt; &lt;p&gt;For so, we have recorded some sounds, both monophonic and polyphonic music. These audio files have been processed by the chosen transcription algorithm and converted to a digital music encoding format for its posterior alignment with the original recordings. Once all these data have been converted to text, the resulting labeled database its constituted by the initial audios and final aligned files.&lt;/p&gt; &lt;p&gt;Furthermore, after this process and from the obtained data, an evaluation of the transcriptor behavior has been made based on two main techniques: note and frame level.&lt;/p&gt; &lt;p&gt;This database includes the original audio files (.wav),&amp;nbsp;transcribed MIDI files (.mid), aligned MIDI files (.mid), aligned text files (.txt) and evaluation files (.csv).&lt;/p&gt;</dct:description>
    <dct:accessRights rdf:resource="http://publications.europa.eu/resource/authority/access-right/PUBLIC"/>
    <dct:accessRights>
      <dct:RightsStatement rdf:about="info:eu-repo/semantics/openAccess">
        <rdfs:label>Open Access</rdfs:label>
      </dct:RightsStatement>
    </dct:accessRights>
    <dcat:distribution>
      <dcat:Distribution>
        <dct:license rdf:resource="https://creativecommons.org/licenses/by/4.0/legalcode"/>
        <dcat:accessURL rdf:resource="https://doi.org/10.5281/zenodo.1408985"/>
      </dcat:Distribution>
    </dcat:distribution>
    <dcat:distribution>
      <dcat:Distribution>
        <dcat:accessURL>https://doi.org/10.5281/zenodo.1408985</dcat:accessURL>
        <dcat:byteSize>114892266</dcat:byteSize>
        <dcat:downloadURL>https://zenodo.org/record/1408985/files/flute-audio-labelled-database-AMT.zip</dcat:downloadURL>
        <dcat:mediaType>application/zip</dcat:mediaType>
      </dcat:Distribution>
    </dcat:distribution>
  </rdf:Description>
</rdf:RDF>
473
83
views
downloads
All versions This version
Views 473473
Downloads 8383
Data volume 9.5 GB9.5 GB
Unique views 408408
Unique downloads 6262

Share

Cite as