{ "access": { "embargo": { "active": false, "reason": null }, "files": "restricted", "record": "public", "status": "restricted" }, "created": "2023-01-24T15:58:08.092627+00:00", "custom_fields": { "journal:journal": { "issue": "11", "pages": "7144-7153", "title": "IEEE TRANSACTIONS ON IMAGE PROCESSING", "volume": "32" } }, "deletion_status": { "is_deleted": false, "status": "P" }, "files": { "enabled": true }, "id": "7565996", "is_draft": false, "is_published": true, "links": { "access": "https://zenodo.org/api/records/7565996/access", "access_links": "https://zenodo.org/api/records/7565996/access/links", "access_request": "https://zenodo.org/api/records/7565996/access/request", "access_users": "https://zenodo.org/api/records/7565996/access/users", "archive": "https://zenodo.org/api/records/7565996/files-archive", "archive_media": "https://zenodo.org/api/records/7565996/media-files-archive", "communities": "https://zenodo.org/api/records/7565996/communities", "communities-suggestions": "https://zenodo.org/api/records/7565996/communities-suggestions", "doi": "https://doi.org/10.1109/TIP.2022.3220058", "draft": "https://zenodo.org/api/records/7565996/draft", "files": "https://zenodo.org/api/records/7565996/files", "latest": "https://zenodo.org/api/records/7565996/versions/latest", "latest_html": "https://zenodo.org/records/7565996/latest", "media_files": "https://zenodo.org/api/records/7565996/media-files", "parent": "https://zenodo.org/api/records/7565995", "parent_doi": "https://zenodo.org/doi/", "parent_html": "https://zenodo.org/records/7565995", "requests": "https://zenodo.org/api/records/7565996/requests", "reserve_doi": "https://zenodo.org/api/records/7565996/draft/pids/doi", "self": "https://zenodo.org/api/records/7565996", "self_doi": "https://zenodo.org/doi/10.1109/TIP.2022.3220058", "self_html": "https://zenodo.org/records/7565996", "self_iiif_manifest": "https://zenodo.org/api/iiif/record:7565996/manifest", "self_iiif_sequence": "https://zenodo.org/api/iiif/record:7565996/sequence/default", "versions": "https://zenodo.org/api/records/7565996/versions" }, "media_files": { "enabled": false }, "metadata": { "creators": [ { "affiliations": [ { "name": "UNITN" } ], "person_or_org": { "family_name": "Yue Song", "name": "Yue Song", "type": "personal" } }, { "affiliations": [ { "name": "ETH" } ], "person_or_org": { "family_name": "Hao Tang", "name": "Hao Tang", "type": "personal" } }, { "affiliations": [ { "name": "Beihang University" } ], "person_or_org": { "family_name": "Mengyi Zhao", "name": "Mengyi Zhao", "type": "personal" } }, { "affiliations": [ { "name": "UNITN" } ], "person_or_org": { "family_name": "Nicu Sebe", "name": "Nicu Sebe", "type": "personal" } }, { "affiliations": [ { "name": "UNITN" } ], "person_or_org": { "family_name": "Wei Wang", "name": "Wei Wang", "type": "personal" } } ], "description": "
Modern saliency detection models are based on the
\nencoder-decoder framework and they use different strategies to
\nfuse the multi-level features between the encoder and decoder
\nto boost representation power. Motivated by recent work in
\nimplicit modelling, we propose to introduce an implicit function
\nto simulate the equilibrium state of the feature pyramid at infinite
\ndepths. We question the existence of the ideal equilibrium and
\nthus propose a quasi-equilibrium model by taking the first-order
\nderivative into the black-box root solver using Taylor expansion.
\nIt models more realistic convergence states and significantly
\nimproves the network performance. We also propose a differentiable
\nedge extractor that directly extracts edges from the
\nsaliency masks. By optimizing the extracted edges, the generated
\nsaliency masks are naturally optimized on contour constraints
\nand the non-deterministic predictions are removed. We evaluate
\nthe proposed methodology on five public datasets and extensive
\nexperiments show that our method achieves new state-of-the-art
\nperformances on six metrics across datasets.
This community will accept all uploads that have been generated by partners of the AI4Media consortium that received funding from the project.
\r\n", "description": "Horizon H2020 EC-funded project AI4Media: A European Excellence Centre for Media, Society and Democracy", "page": "Motivated by the challenges, risks and opportunities that the widespread use of artificial intelligence (AI) has brought to the media, society and politics, the EU-funded AI4Media project aspires to establish a centre of excellence and a wide network of researchers across Europe and beyond. Its focus will be on delivering the next generation of core AI advances to serve the key sector of media, making sure that European values surrounding ethical and trustworthy AI are embedded in future AI deployments. The project will be supplemented by a funding framework, a PhD programme and a set of use cases to demonstrate the impact of the actions taken on the media sector.
\r\n", "title": "AI4Media H2020 Project" }, "revision_id": 0, "slug": "ai4media", "updated": "2020-10-22T12:35:38.584014+00:00" } ], "ids": [ "8538384b-cd84-4cc6-a77c-c259e9568dfd" ] }, "id": "7565995", "pids": {} }, "pids": { "doi": { "identifier": "10.1109/TIP.2022.3220058", "provider": "external" }, "oai": { "identifier": "oai:zenodo.org:7565996", "provider": "oai" } }, "revision_id": 2, "stats": { "all_versions": { "data_volume": 48402460.0, "downloads": 2, "unique_downloads": 2, "unique_views": 14, "views": 14 }, "this_version": { "data_volume": 48402460.0, "downloads": 2, "unique_downloads": 2, "unique_views": 14, "views": 14 } }, "status": "published", "updated": "2023-01-24T16:14:46.047660+00:00", "versions": { "index": 1, "is_latest": true } }