Conference paper Open Access

Visual Data Simulation for Deep Learning in Robot Manipulation Tasks

Surák, Miroslav; Košnar, Karel; Kulich, Miroslav; Přeučil, Libor


DataCite XML Export

<?xml version='1.0' encoding='utf-8'?>
<resource xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://datacite.org/schema/kernel-4" xsi:schemaLocation="http://datacite.org/schema/kernel-4 http://schema.datacite.org/meta/kernel-4.1/metadata.xsd">
  <identifier identifierType="URL">https://zenodo.org/record/4352739</identifier>
  <creators>
    <creator>
      <creatorName>Surák, Miroslav</creatorName>
      <givenName>Miroslav</givenName>
      <familyName>Surák</familyName>
      <affiliation>Faculty of Electrical Engineering, Czech Technical University in Prague,Prague, Czech Republic</affiliation>
    </creator>
    <creator>
      <creatorName>Košnar, Karel</creatorName>
      <givenName>Karel</givenName>
      <familyName>Košnar</familyName>
      <nameIdentifier nameIdentifierScheme="ORCID" schemeURI="http://orcid.org/">0000-0002-6362-4254</nameIdentifier>
      <affiliation>Czech Institute of Informatics, Robotics and Cybernetics,Czech Technical University in Prague, Prague, Czech Republic</affiliation>
    </creator>
    <creator>
      <creatorName>Kulich, Miroslav</creatorName>
      <givenName>Miroslav</givenName>
      <familyName>Kulich</familyName>
      <nameIdentifier nameIdentifierScheme="ORCID" schemeURI="http://orcid.org/">0000-0002-0997-5889</nameIdentifier>
      <affiliation>Czech Institute of Informatics, Robotics and Cybernetics,Czech Technical University in Prague, Prague, Czech Republic</affiliation>
    </creator>
    <creator>
      <creatorName>Přeučil, Libor</creatorName>
      <givenName>Libor</givenName>
      <familyName>Přeučil</familyName>
      <affiliation>Czech Institute of Informatics, Robotics and Cybernetics,Czech Technical University in Prague, Prague, Czech Republic</affiliation>
    </creator>
  </creators>
  <titles>
    <title>Visual Data Simulation for Deep Learning in Robot Manipulation Tasks</title>
  </titles>
  <publisher>Zenodo</publisher>
  <publicationYear>2018</publicationYear>
  <dates>
    <date dateType="Issued">2018-10-17</date>
  </dates>
  <resourceType resourceTypeGeneral="ConferencePaper"/>
  <alternateIdentifiers>
    <alternateIdentifier alternateIdentifierType="url">https://zenodo.org/record/4352739</alternateIdentifier>
  </alternateIdentifiers>
  <relatedIdentifiers>
    <relatedIdentifier relatedIdentifierType="DOI" relationType="IsIdenticalTo">10.1007/978-3-030-14984-0_29</relatedIdentifier>
    <relatedIdentifier relatedIdentifierType="URL" relationType="IsPartOf">https://zenodo.org/communities/safelog</relatedIdentifier>
  </relatedIdentifiers>
  <rightsList>
    <rights rightsURI="https://creativecommons.org/licenses/by/4.0/legalcode">Creative Commons Attribution 4.0 International</rights>
    <rights rightsURI="info:eu-repo/semantics/openAccess">Open Access</rights>
  </rightsList>
  <descriptions>
    <description descriptionType="Abstract">&lt;p&gt;This paper introduces the usage of simulated images fortraining convolutional neural networks for object recognition and local-ization in the task of random bin picking. For machine learning appli-cations, a limited amount of real world image data that can be cap-tured and labeled for training and testing purposes is a big issue. Inthis paper, we focus on the use of realistic simulation of image data fortraining convolutional neural networks to be able to estimate the poseof an object. We can systematically generate varying camera viewpointdatasets with a various pose of an object and lighting conditions. Aftersuccessful training and testing the neural network, we compare the per-formance of network trained on simulated images and images from a realcamera capturing the physical object. The usage of the simulated datacan speed up the complex and time-consuming task of gathering trainingdata as well as increase robustness of object recognition by generating abigger amount of data&lt;/p&gt;</description>
  </descriptions>
  <fundingReferences>
    <fundingReference>
      <funderName>European Commission</funderName>
      <funderIdentifier funderIdentifierType="Crossref Funder ID">10.13039/100010661</funderIdentifier>
      <awardNumber awardURI="info:eu-repo/grantAgreement/EC/H2020/688117/">688117</awardNumber>
      <awardTitle>Safe human-robot interaction in logistic applications for highly flexible warehouses</awardTitle>
    </fundingReference>
  </fundingReferences>
</resource>
34
67
views
downloads
Views 34
Downloads 67
Data volume 121.0 MB
Unique views 28
Unique downloads 66

Share

Cite as