Conference paper Open Access

Comparison of Fine-tuning and Extension Strategies for Deep Convolutional Neural Networks

Pittaras, Nikiforos; Markatopoulou, Foteini; Mezaris, Vasileios; Patras, Ioannis


DataCite XML Export

<?xml version='1.0' encoding='utf-8'?>
<resource xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://datacite.org/schema/kernel-4" xsi:schemaLocation="http://datacite.org/schema/kernel-4 http://schema.datacite.org/meta/kernel-4.1/metadata.xsd">
  <identifier identifierType="URL">https://zenodo.org/record/240853</identifier>
  <creators>
    <creator>
      <creatorName>Pittaras, Nikiforos</creatorName>
      <givenName>Nikiforos</givenName>
      <familyName>Pittaras</familyName>
      <affiliation>Information Technologies Institute (ITI), Centre for Research and Technology Hellas (CERTH), Thessaloniki, Greece</affiliation>
    </creator>
    <creator>
      <creatorName>Markatopoulou, Foteini</creatorName>
      <givenName>Foteini</givenName>
      <familyName>Markatopoulou</familyName>
      <affiliation>Information Technologies Institute (ITI), Centre for Research and Technology Hellas (CERTH), Thessaloniki, Greece</affiliation>
    </creator>
    <creator>
      <creatorName>Mezaris, Vasileios</creatorName>
      <givenName>Vasileios</givenName>
      <familyName>Mezaris</familyName>
      <affiliation>Information Technologies Institute (ITI), Centre for Research and Technology Hellas (CERTH), Thessaloniki, Greece</affiliation>
    </creator>
    <creator>
      <creatorName>Patras, Ioannis</creatorName>
      <givenName>Ioannis</givenName>
      <familyName>Patras</familyName>
      <affiliation>Queen Mary University of London, Mile end Campus, UK</affiliation>
    </creator>
  </creators>
  <titles>
    <title>Comparison of Fine-tuning and Extension Strategies for Deep Convolutional Neural Networks</title>
  </titles>
  <publisher>Zenodo</publisher>
  <publicationYear>2016</publicationYear>
  <dates>
    <date dateType="Issued">2016-12-31</date>
  </dates>
  <resourceType resourceTypeGeneral="Text">Conference paper</resourceType>
  <alternateIdentifiers>
    <alternateIdentifier alternateIdentifierType="url">https://zenodo.org/record/240853</alternateIdentifier>
  </alternateIdentifiers>
  <relatedIdentifiers>
    <relatedIdentifier relatedIdentifierType="DOI" relationType="IsIdenticalTo">10.1007/978-3-319-51811-4_9</relatedIdentifier>
    <relatedIdentifier relatedIdentifierType="URL" relationType="IsPartOf">https://zenodo.org/communities/ecfunded</relatedIdentifier>
    <relatedIdentifier relatedIdentifierType="URL" relationType="IsPartOf">https://zenodo.org/communities/invid-h2020</relatedIdentifier>
  </relatedIdentifiers>
  <rightsList>
    <rights rightsURI="http://creativecommons.org/licenses/by/4.0/legalcode">Creative Commons Attribution 4.0 International</rights>
    <rights rightsURI="info:eu-repo/semantics/openAccess">Open Access</rights>
  </rightsList>
  <descriptions>
    <description descriptionType="Abstract">&lt;p&gt;In this study we compare three different fine-tuning strategies in order to investigate the best way to transfer the parameters of popular deep convolutional neural networks that were trained for a visual annotation task on one dataset, to a new, considerably different dataset. We focus on the concept-based image/video annotation problem and use ImageNet as the source dataset, while the TRECVID SIN 2013 and PASCAL VOC-2012 classification datasets are used as the target datasets. A large set of experiments examines the effectiveness of three fine-tuning strategies on each of three different pre-trained DCNNs and each target dataset. The reported results give rise to guidelines for effectively fine-tuning a DCNN for concept-based visual annotation.&lt;/p&gt;</description>
  </descriptions>
  <fundingReferences>
    <fundingReference>
      <funderName>European Commission</funderName>
      <funderIdentifier funderIdentifierType="Crossref Funder ID">10.13039/501100000780</funderIdentifier>
      <awardNumber awardURI="info:eu-repo/grantAgreement/EC/H2020/687786/">687786</awardNumber>
      <awardTitle>In Video Veritas – Verification of Social Media Video Content for the News Industry</awardTitle>
    </fundingReference>
  </fundingReferences>
</resource>
30
33
views
downloads
Views 30
Downloads 33
Data volume 8.4 MB
Unique views 30
Unique downloads 31

Share

Cite as