Report Open Access

Cross-Dataset Music Emotion Recognition: an End-to-End Approach

Pandrea, Ana Gabriela; Gómez-Cañón, Juan Sebastián; Herrera, Perfecto


DataCite XML Export

<?xml version='1.0' encoding='utf-8'?>
<resource xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://datacite.org/schema/kernel-4" xsi:schemaLocation="http://datacite.org/schema/kernel-4 http://schema.datacite.org/meta/kernel-4.1/metadata.xsd">
  <identifier identifierType="DOI">10.5281/zenodo.4076772</identifier>
  <creators>
    <creator>
      <creatorName>Pandrea, Ana Gabriela</creatorName>
      <givenName>Ana Gabriela</givenName>
      <familyName>Pandrea</familyName>
      <affiliation>Universitat Pompeu Fabra</affiliation>
    </creator>
    <creator>
      <creatorName>Gómez-Cañón, Juan Sebastián</creatorName>
      <givenName>Juan Sebastián</givenName>
      <familyName>Gómez-Cañón</familyName>
      <nameIdentifier nameIdentifierScheme="ORCID" schemeURI="http://orcid.org/">0000-0002-2544-6311</nameIdentifier>
      <affiliation>Universitat Pompeu Fabra</affiliation>
    </creator>
    <creator>
      <creatorName>Herrera, Perfecto</creatorName>
      <givenName>Perfecto</givenName>
      <familyName>Herrera</familyName>
      <nameIdentifier nameIdentifierScheme="ORCID" schemeURI="http://orcid.org/">0000-0003-2799-7675</nameIdentifier>
      <affiliation>Universitat Pompeu Fabra</affiliation>
    </creator>
  </creators>
  <titles>
    <title>Cross-Dataset Music Emotion Recognition: an End-to-End Approach</title>
  </titles>
  <publisher>Zenodo</publisher>
  <publicationYear>2020</publicationYear>
  <subjects>
    <subject>cross-dataset</subject>
    <subject>cross-cultural</subject>
    <subject>music amotion recognition</subject>
    <subject>end-to-end model</subject>
  </subjects>
  <dates>
    <date dateType="Issued">2020-10-12</date>
  </dates>
  <language>en</language>
  <resourceType resourceTypeGeneral="Text">Report</resourceType>
  <alternateIdentifiers>
    <alternateIdentifier alternateIdentifierType="url">https://zenodo.org/record/4076772</alternateIdentifier>
  </alternateIdentifiers>
  <relatedIdentifiers>
    <relatedIdentifier relatedIdentifierType="DOI" relationType="IsVersionOf">10.5281/zenodo.4076771</relatedIdentifier>
  </relatedIdentifiers>
  <rightsList>
    <rights rightsURI="https://creativecommons.org/licenses/by/4.0/legalcode">Creative Commons Attribution 4.0 International</rights>
    <rights rightsURI="info:eu-repo/semantics/openAccess">Open Access</rights>
  </rightsList>
  <descriptions>
    <description descriptionType="Abstract">&lt;p&gt;The topic of Music Emotion Recognition (MER) evolved&amp;nbsp;as music is a fascinating expression of emotions, yet it faces challenges given its subjectivity. Because each language has its particularities in terms of sound and intonation, and implicitly associations made upon them, we hypothesize perceived emotions might vary in different cultures. To address this issue, we test a novel approach towards emotion detection and propose a language sensitive end-to-end model that learns to tag emotions from music with lyrics in English, Mandarin and Turkish.&lt;/p&gt;</description>
  </descriptions>
</resource>
52
29
views
downloads
All versions This version
Views 5252
Downloads 2929
Data volume 2.2 MB2.2 MB
Unique views 4040
Unique downloads 2424

Share

Cite as