Journal article Open Access

Deep Motifs and Motion Signatures

Aristidou Andreas; Cohen-Or Daniel; Hodgins Jessica K; Chrysanthou Yiorgos; Shamir Ariel


DCAT Export

<?xml version='1.0' encoding='utf-8'?>
<rdf:RDF xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:adms="http://www.w3.org/ns/adms#" xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:dct="http://purl.org/dc/terms/" xmlns:dctype="http://purl.org/dc/dcmitype/" xmlns:dcat="http://www.w3.org/ns/dcat#" xmlns:duv="http://www.w3.org/ns/duv#" xmlns:foaf="http://xmlns.com/foaf/0.1/" xmlns:frapo="http://purl.org/cerif/frapo/" xmlns:geo="http://www.w3.org/2003/01/geo/wgs84_pos#" xmlns:gsp="http://www.opengis.net/ont/geosparql#" xmlns:locn="http://www.w3.org/ns/locn#" xmlns:org="http://www.w3.org/ns/org#" xmlns:owl="http://www.w3.org/2002/07/owl#" xmlns:prov="http://www.w3.org/ns/prov#" xmlns:rdfs="http://www.w3.org/2000/01/rdf-schema#" xmlns:schema="http://schema.org/" xmlns:skos="http://www.w3.org/2004/02/skos/core#" xmlns:vcard="http://www.w3.org/2006/vcard/ns#" xmlns:wdrs="http://www.w3.org/2007/05/powder-s#">
  <rdf:Description rdf:about="https://zenodo.org/record/2658798">
    <dct:identifier rdf:datatype="http://www.w3.org/2001/XMLSchema#anyURI">https://zenodo.org/record/2658798</dct:identifier>
    <foaf:page rdf:resource="https://zenodo.org/record/2658798"/>
    <dct:creator>
      <rdf:Description>
        <rdf:type rdf:resource="http://xmlns.com/foaf/0.1/Agent"/>
        <foaf:name>Aristidou Andreas</foaf:name>
        <org:memberOf>
          <foaf:Organization>
            <foaf:name>The Interdisciplinary Centre</foaf:name>
          </foaf:Organization>
        </org:memberOf>
      </rdf:Description>
    </dct:creator>
    <dct:creator>
      <rdf:Description>
        <rdf:type rdf:resource="http://xmlns.com/foaf/0.1/Agent"/>
        <foaf:name>Cohen-Or Daniel</foaf:name>
        <org:memberOf>
          <foaf:Organization>
            <foaf:name>Tel-Aviv University</foaf:name>
          </foaf:Organization>
        </org:memberOf>
      </rdf:Description>
    </dct:creator>
    <dct:creator>
      <rdf:Description>
        <rdf:type rdf:resource="http://xmlns.com/foaf/0.1/Agent"/>
        <foaf:name>Hodgins Jessica K</foaf:name>
        <org:memberOf>
          <foaf:Organization>
            <foaf:name>Carnegie Mellon University</foaf:name>
          </foaf:Organization>
        </org:memberOf>
      </rdf:Description>
    </dct:creator>
    <dct:creator>
      <rdf:Description>
        <rdf:type rdf:resource="http://xmlns.com/foaf/0.1/Agent"/>
        <foaf:name>Chrysanthou Yiorgos</foaf:name>
        <org:memberOf>
          <foaf:Organization>
            <foaf:name>Research Centre on Interactive Media Smart Systems and Emerging Technologies</foaf:name>
          </foaf:Organization>
        </org:memberOf>
      </rdf:Description>
    </dct:creator>
    <dct:creator>
      <rdf:Description>
        <rdf:type rdf:resource="http://xmlns.com/foaf/0.1/Agent"/>
        <foaf:name>Shamir Ariel</foaf:name>
        <org:memberOf>
          <foaf:Organization>
            <foaf:name>The Interdisciplinary Centre</foaf:name>
          </foaf:Organization>
        </org:memberOf>
      </rdf:Description>
    </dct:creator>
    <dct:title>Deep Motifs and Motion Signatures</dct:title>
    <dct:publisher>
      <foaf:Agent>
        <foaf:name>Zenodo</foaf:name>
      </foaf:Agent>
    </dct:publisher>
    <dct:issued rdf:datatype="http://www.w3.org/2001/XMLSchema#gYear">2018</dct:issued>
    <dcat:keyword>Motion capture</dcat:keyword>
    <dcat:keyword>Motion processing</dcat:keyword>
    <dcat:keyword>Animation</dcat:keyword>
    <dcat:keyword>Motion Word</dcat:keyword>
    <dcat:keyword>Motif</dcat:keyword>
    <dcat:keyword>Motion Signature</dcat:keyword>
    <dcat:keyword>Convolutional Network,</dcat:keyword>
    <dcat:keyword>Triplet Loss</dcat:keyword>
    <frapo:isFundedBy rdf:resource="info:eu-repo/grantAgreement/EC/H2020/739578/"/>
    <schema:funder>
      <foaf:Organization>
        <dct:identifier rdf:datatype="http://www.w3.org/2001/XMLSchema#string">10.13039/501100000780</dct:identifier>
        <foaf:name>European Commission</foaf:name>
      </foaf:Organization>
    </schema:funder>
    <dct:issued rdf:datatype="http://www.w3.org/2001/XMLSchema#date">2018-11-01</dct:issued>
    <dct:language rdf:resource="http://publications.europa.eu/resource/authority/language/ENG"/>
    <owl:sameAs rdf:resource="https://zenodo.org/record/2658798"/>
    <adms:identifier>
      <adms:Identifier>
        <skos:notation rdf:datatype="http://www.w3.org/2001/XMLSchema#anyURI">https://zenodo.org/record/2658798</skos:notation>
        <adms:schemeAgency>url</adms:schemeAgency>
      </adms:Identifier>
    </adms:identifier>
    <owl:sameAs rdf:resource="https://doi.org/10.1145/3272127.3275038"/>
    <dct:isPartOf rdf:resource="https://zenodo.org/communities/rise-teaming-cyprus"/>
    <owl:versionInfo>Published</owl:versionInfo>
    <dct:description>&lt;p&gt;Many analysis tasks for human motion rely on high-level similarity between sequences of motions, that are not an exact matches in joint angles, timing, or ordering of actions. Even the same movements performed by the same person can vary in duration and speed. Similar motions are characterized by similar sets of actions that appear frequently. In this paper we introduce motion motifs and motion signatures that are a succinct but descriptive representation of motion sequences. We first break the motion sequences to short-term movements called motion words, and then cluster the words in a high-dimensional feature space to find motifs. Hence, motifs are words that are both common and descriptive, and their distribution represents the motion sequence. To cluster words and find motifs, the challenge is to define an effective feature space, where the distances among motion words are semantically meaningful, and where variations in speed and duration are handled. To this end, we use a deep neural network to embed the motion&amp;nbsp;words into feature space using a triplet loss function. To define a signature, we choose a finite set of motion-motifs, creating a bag-of-motifs representation for the sequence. Motion signatures are agnostic to movement order, speed or duration variations, and can distinguish fine-grained differences between motions of the same class. We illustrate examples of characterizing motion sequences by motifs, and for the use of motion signatures in anumber of applications.&lt;/p&gt;</dct:description>
    <dct:description>This work has received funding from the European Union's Horizon 2020 Research and Innovation Programme under Grant Agreement No 739578 and the Government of the Republic of Cyprus through the Directorate General for European Programmes, Coordination and Development. @2018 ACM Copyright under the CC-BY-NC-ND 4.0 license (http://creativecommons.org/licenses/by-nc-nd/4.0/) Citation: Andreas Aristidou, Daniel Cohen-Or, Jessica K. Hodgins, Yiorgos Chrysanthou, and Ariel Shamir. 2018. Deep motifs and motion signatures. ACM Trans. Graph. 37, 6, Article 187 (December 2018), 13 pages. DOI: https://doi.org/10.1145/3272127.3275038</dct:description>
    <dct:accessRights rdf:resource="http://publications.europa.eu/resource/authority/access-right/PUBLIC"/>
    <dct:accessRights>
      <dct:RightsStatement rdf:about="info:eu-repo/semantics/openAccess">
        <rdfs:label>Open Access</rdfs:label>
      </dct:RightsStatement>
    </dct:accessRights>
    <dct:license rdf:resource="https://creativecommons.org/licenses/by-nc-nd/4.0/legalcode"/>
    <dcat:distribution>
      <dcat:Distribution>
        <dcat:accessURL rdf:resource="https://doi.org/10.1145/3272127.3275038">https://doi.org/10.1145/3272127.3275038</dcat:accessURL>
        <dcat:byteSize>25304177</dcat:byteSize>
        <dcat:downloadURL rdf:resource="https://zenodo.org/record/2658798/files/DeepSignatures.pdf">https://zenodo.org/record/2658798/files/DeepSignatures.pdf</dcat:downloadURL>
        <dcat:mediaType>application/pdf</dcat:mediaType>
      </dcat:Distribution>
    </dcat:distribution>
  </rdf:Description>
  <foaf:Project rdf:about="info:eu-repo/grantAgreement/EC/H2020/739578/">
    <dct:identifier rdf:datatype="http://www.w3.org/2001/XMLSchema#string">739578</dct:identifier>
    <dct:title>Research Center on Interactive Media, Smart System and Emerging Technologies</dct:title>
    <frapo:isAwardedBy>
      <foaf:Organization>
        <dct:identifier rdf:datatype="http://www.w3.org/2001/XMLSchema#string">10.13039/501100000780</dct:identifier>
        <foaf:name>European Commission</foaf:name>
      </foaf:Organization>
    </frapo:isAwardedBy>
  </foaf:Project>
</rdf:RDF>
200
170
views
downloads
Views 200
Downloads 170
Data volume 4.3 GB
Unique views 175
Unique downloads 166

Share

Cite as