Conference paper Open Access

Attention-enhanced Sensorimotor Object Recognition

Thermos, S; Papadopoulos, GT; Daras, P; Potamianos, G


DCAT Export

<?xml version='1.0' encoding='utf-8'?>
<rdf:RDF xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:adms="http://www.w3.org/ns/adms#" xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:dct="http://purl.org/dc/terms/" xmlns:dctype="http://purl.org/dc/dcmitype/" xmlns:dcat="http://www.w3.org/ns/dcat#" xmlns:duv="http://www.w3.org/ns/duv#" xmlns:foaf="http://xmlns.com/foaf/0.1/" xmlns:frapo="http://purl.org/cerif/frapo/" xmlns:geo="http://www.w3.org/2003/01/geo/wgs84_pos#" xmlns:gsp="http://www.opengis.net/ont/geosparql#" xmlns:locn="http://www.w3.org/ns/locn#" xmlns:org="http://www.w3.org/ns/org#" xmlns:owl="http://www.w3.org/2002/07/owl#" xmlns:prov="http://www.w3.org/ns/prov#" xmlns:rdfs="http://www.w3.org/2000/01/rdf-schema#" xmlns:schema="http://schema.org/" xmlns:skos="http://www.w3.org/2004/02/skos/core#" xmlns:vcard="http://www.w3.org/2006/vcard/ns#" xmlns:wdrs="http://www.w3.org/2007/05/powder-s#">
  <rdf:Description rdf:about="https://zenodo.org/record/3727849">
    <rdf:type rdf:resource="http://www.w3.org/ns/dcat#Dataset"/>
    <dct:type rdf:resource="http://purl.org/dc/dcmitype/Text"/>
    <dct:identifier rdf:datatype="http://www.w3.org/2001/XMLSchema#anyURI">https://zenodo.org/record/3727849</dct:identifier>
    <foaf:page rdf:resource="https://zenodo.org/record/3727849"/>
    <dct:creator>
      <rdf:Description>
        <rdf:type rdf:resource="http://xmlns.com/foaf/0.1/Agent"/>
        <foaf:name>Thermos, S</foaf:name>
        <foaf:givenName>S</foaf:givenName>
        <foaf:familyName>Thermos</foaf:familyName>
      </rdf:Description>
    </dct:creator>
    <dct:creator>
      <rdf:Description>
        <rdf:type rdf:resource="http://xmlns.com/foaf/0.1/Agent"/>
        <foaf:name>Papadopoulos, GT</foaf:name>
        <foaf:givenName>GT</foaf:givenName>
        <foaf:familyName>Papadopoulos</foaf:familyName>
      </rdf:Description>
    </dct:creator>
    <dct:creator>
      <rdf:Description>
        <rdf:type rdf:resource="http://xmlns.com/foaf/0.1/Agent"/>
        <foaf:name>Daras, P</foaf:name>
        <foaf:givenName>P</foaf:givenName>
        <foaf:familyName>Daras</foaf:familyName>
      </rdf:Description>
    </dct:creator>
    <dct:creator>
      <rdf:Description>
        <rdf:type rdf:resource="http://xmlns.com/foaf/0.1/Agent"/>
        <foaf:name>Potamianos, G</foaf:name>
        <foaf:givenName>G</foaf:givenName>
        <foaf:familyName>Potamianos</foaf:familyName>
      </rdf:Description>
    </dct:creator>
    <dct:title>Attention-enhanced Sensorimotor Object Recognition</dct:title>
    <dct:publisher>
      <foaf:Agent>
        <foaf:name>Zenodo</foaf:name>
      </foaf:Agent>
    </dct:publisher>
    <dct:issued rdf:datatype="http://www.w3.org/2001/XMLSchema#gYear">2018</dct:issued>
    <dcat:keyword>Sensorimotor object recognition, attention mechanism, stream fusion, deep neural networks</dcat:keyword>
    <dct:issued rdf:datatype="http://www.w3.org/2001/XMLSchema#date">2018-10-10</dct:issued>
    <dct:language rdf:resource="http://publications.europa.eu/resource/authority/language/ENG"/>
    <owl:sameAs rdf:resource="https://zenodo.org/record/3727849"/>
    <adms:identifier>
      <adms:Identifier>
        <skos:notation rdf:datatype="http://www.w3.org/2001/XMLSchema#anyURI">https://zenodo.org/record/3727849</skos:notation>
        <adms:schemeAgency>url</adms:schemeAgency>
      </adms:Identifier>
    </adms:identifier>
    <owl:sameAs rdf:resource="https://doi.org/10.1109/ICIP.2018.8451158"/>
    <dct:isPartOf rdf:resource="https://zenodo.org/communities/vrtogether-h2020"/>
    <owl:versionInfo>pre-print</owl:versionInfo>
    <dct:description>&lt;p&gt;Sensorimotor learning, namely the process of understanding the physical world by combining visual and motor information, has been recently investigated, achieving promising results for the task of 2D/3D object recognition. Following the recent trend in computer vision, powerful deep neural networks (NNs) have been used to model the &amp;ldquo;sensory&amp;rdquo; and &amp;ldquo;motor&amp;rdquo; information, namely the object appearance and affordance. However, the existing implementations cannot efficiently address the spatio-temporal nature of the humanobject interaction. Inspired by recent work on attention-based learning, this paper introduces an attention-enhanced NN-based model that learns to selectively focus on parts of the physical interaction where the object appearance is corrupted by occlusions and deformations. The model&amp;rsquo;s attention mechanism relies on the confidence of classifying an object based solely on its appearance. Three metrics are used to measure the latter, namely the prediction entropy, the average N-best likelihood difference, and the N-best likelihood dispersion. Evaluation of the attention-enhanced model on the SOR3D dataset reports 33% and 26% relative improvement over the appearance-only and the spatio-temporal fusion baseline models, respectively.&lt;/p&gt;</dct:description>
    <dct:accessRights rdf:resource="http://publications.europa.eu/resource/authority/access-right/PUBLIC"/>
    <dct:accessRights>
      <dct:RightsStatement rdf:about="info:eu-repo/semantics/openAccess">
        <rdfs:label>Open Access</rdfs:label>
      </dct:RightsStatement>
    </dct:accessRights>
    <dcat:distribution>
      <dcat:Distribution>
        <dct:license rdf:resource="https://creativecommons.org/licenses/by/4.0/legalcode"/>
        <dcat:accessURL rdf:resource="https://zenodo.org/record/3727849"/>
      </dcat:Distribution>
    </dcat:distribution>
    <dcat:distribution>
      <dcat:Distribution>
        <dcat:accessURL>https://doi.org/10.1109/ICIP.2018.8451158</dcat:accessURL>
        <dcat:byteSize>777718</dcat:byteSize>
        <dcat:downloadURL>https://zenodo.org/record/3727849/files/06_CERTH_ICIP_2018.pdf</dcat:downloadURL>
        <dcat:mediaType>application/pdf</dcat:mediaType>
      </dcat:Distribution>
    </dcat:distribution>
  </rdf:Description>
</rdf:RDF>
24
96
views
downloads
Views 24
Downloads 96
Data volume 74.7 MB
Unique views 21
Unique downloads 95

Share

Cite as