Journal article Open Access

Efficient Evaluation of Image Quality via Deep-Learning Approximation of Perceptual Metrics

Artusi Alessandro; Banterle Francesco; Carrara Fabio; Moreo Alejandro


MARC21 XML Export

<?xml version='1.0' encoding='UTF-8'?>
<record xmlns="http://www.loc.gov/MARC21/slim">
  <leader>00000nam##2200000uu#4500</leader>
  <datafield tag="041" ind1=" " ind2=" ">
    <subfield code="a">eng</subfield>
  </datafield>
  <datafield tag="653" ind1=" " ind2=" ">
    <subfield code="a">Convolutional Neural Networks (CNNs)</subfield>
  </datafield>
  <datafield tag="653" ind1=" " ind2=" ">
    <subfield code="a">Objective Metrics</subfield>
  </datafield>
  <datafield tag="653" ind1=" " ind2=" ">
    <subfield code="a">Image Evaluation</subfield>
  </datafield>
  <datafield tag="653" ind1=" " ind2=" ">
    <subfield code="a">Human Visual System</subfield>
  </datafield>
  <datafield tag="653" ind1=" " ind2=" ">
    <subfield code="a">JPEG-XT</subfield>
  </datafield>
  <datafield tag="653" ind1=" " ind2=" ">
    <subfield code="a">HDR Imaging</subfield>
  </datafield>
  <controlfield tag="005">20200730130352.0</controlfield>
  <datafield tag="500" ind1=" " ind2=" ">
    <subfield code="a">This work has received funding from the European Union's Horizon 2020 Research and Innovation Programme under Grant Agreement  No 739578 and the Government of the Republic of Cyprus through the Directorate General for European Programmes, Coordination and Development.</subfield>
  </datafield>
  <controlfield tag="001">3522907</controlfield>
  <datafield tag="700" ind1=" " ind2=" ">
    <subfield code="u">ISTI CNR, Italy</subfield>
    <subfield code="a">Banterle Francesco</subfield>
  </datafield>
  <datafield tag="700" ind1=" " ind2=" ">
    <subfield code="u">ISTI CNR, Italy</subfield>
    <subfield code="a">Carrara Fabio</subfield>
  </datafield>
  <datafield tag="700" ind1=" " ind2=" ">
    <subfield code="u">ISTI CNR, Italy</subfield>
    <subfield code="a">Moreo Alejandro</subfield>
  </datafield>
  <datafield tag="856" ind1="4" ind2=" ">
    <subfield code="s">21640416</subfield>
    <subfield code="z">md5:5876f0ae8d402aae845b9c8db6c32989</subfield>
    <subfield code="u">https://zenodo.org/record/3522907/files/TIP2944079.pdf</subfield>
  </datafield>
  <datafield tag="542" ind1=" " ind2=" ">
    <subfield code="l">open</subfield>
  </datafield>
  <datafield tag="260" ind1=" " ind2=" ">
    <subfield code="c">2019-10-07</subfield>
  </datafield>
  <datafield tag="909" ind1="C" ind2="O">
    <subfield code="p">openaire</subfield>
    <subfield code="p">user-rise-teaming-cyprus</subfield>
    <subfield code="o">oai:zenodo.org:3522907</subfield>
  </datafield>
  <datafield tag="909" ind1="C" ind2="4">
    <subfield code="c">1-14</subfield>
    <subfield code="p">IEEE Transactions on Image Processing</subfield>
  </datafield>
  <datafield tag="100" ind1=" " ind2=" ">
    <subfield code="u">MRG DeepCamera Group, RISE Ltd</subfield>
    <subfield code="0">(orcid)0000-0002-4502-663X</subfield>
    <subfield code="a">Artusi Alessandro</subfield>
  </datafield>
  <datafield tag="245" ind1=" " ind2=" ">
    <subfield code="a">Efficient Evaluation of Image Quality via Deep-Learning Approximation of Perceptual Metrics</subfield>
  </datafield>
  <datafield tag="980" ind1=" " ind2=" ">
    <subfield code="a">user-rise-teaming-cyprus</subfield>
  </datafield>
  <datafield tag="536" ind1=" " ind2=" ">
    <subfield code="c">739578</subfield>
    <subfield code="a">Research Center on Interactive Media, Smart System and Emerging Technologies</subfield>
  </datafield>
  <datafield tag="540" ind1=" " ind2=" ">
    <subfield code="u">https://creativecommons.org/licenses/by-nc-nd/4.0/legalcode</subfield>
    <subfield code="a">Creative Commons Attribution Non Commercial No Derivatives 4.0 International</subfield>
  </datafield>
  <datafield tag="650" ind1="1" ind2="7">
    <subfield code="a">cc-by</subfield>
    <subfield code="2">opendefinition.org</subfield>
  </datafield>
  <datafield tag="520" ind1=" " ind2=" ">
    <subfield code="a">&lt;p&gt;mage metrics based on Human Visual System&amp;nbsp;(HVS) play a remarkable role in the evaluation of complex image&amp;nbsp;processing&amp;nbsp; algorithms. However, mimicking the HVS is known&amp;nbsp;to be complex and computationally expensive (both in terms&amp;nbsp;of time and memory), and its usage is thus limited to a few&amp;nbsp;applications and to small input data. All of this makes such&amp;nbsp;metrics not fully attractive in real-world scenarios. To address&amp;nbsp;these issues, we propose Deep Image Quality Metric (DIQM), a&amp;nbsp;deep-learning approach to learn the global image quality feature&amp;nbsp;(mean-opinion-score). DIQM can emulate existing visual metrics&amp;nbsp;efficiently, reducing the computational costs by more than an&lt;/p&gt;</subfield>
  </datafield>
  <datafield tag="024" ind1=" " ind2=" ">
    <subfield code="a">10.1109/TIP.2019.2944079</subfield>
    <subfield code="2">doi</subfield>
  </datafield>
  <datafield tag="980" ind1=" " ind2=" ">
    <subfield code="a">publication</subfield>
    <subfield code="b">article</subfield>
  </datafield>
</record>
59
28
views
downloads
Views 59
Downloads 28
Data volume 605.9 MB
Unique views 53
Unique downloads 27

Share

Cite as