Conference paper Open Access

User-space I/O for μs-level storage devices

Anastasios Papagiannis; Giorgos Saloustros; Manolis Marazakis; Angelos Bilas


JSON Export

{
  "conceptrecid": "803969", 
  "created": "2017-06-07T12:29:52.164594+00:00", 
  "doi": "10.1007/978-3-319-46079-6_44", 
  "id": 803970, 
  "links": {
    "badge": "https://zenodo.org/badge/doi/10.1007/978-3-319-46079-6_44.svg", 
    "doi": "https://doi.org/10.1007/978-3-319-46079-6_44"
  }, 
  "metadata": {
    "access_right": "open", 
    "access_right_category": "success", 
    "creators": [
      {
        "affiliation": "Institute of Computer Science, FORTH (ICS) and Department of Computer Science, University of Crete", 
        "name": "Anastasios Papagiannis"
      }, 
      {
        "affiliation": "Institute of Computer Science, FORTH (ICS)", 
        "name": "Giorgos Saloustros"
      }, 
      {
        "affiliation": "Institute of Computer Science, FORTH (ICS)", 
        "name": "Manolis Marazakis"
      }, 
      {
        "affiliation": "Institute of Computer Science, FORTH (ICS) and Department of Computer Science, University of Crete", 
        "name": "Angelos Bilas"
      }
    ], 
    "description": "<p>System software overheads in the I/O path, including VFS and file system code, become more pronounced with emerging low-latency storage devices. Currently, these overheads constitute the main bottleneck in the I/O path and they limit efficiency of modern storage systems. In this paper we present Iris, a new I/O path for applications, that minimizes overheads from system software in the common I/O path. The main idea is the separation of the control and data planes. The control plane consists of an unmodified Linux kernel and is responsible for handling data plane initialization and the normal processing path through the kernel for non-file related operations. The data plane is a lightweight mechanism to provide direct access to storage devices with minimum<br>\noverheads and without sacrificing strong protection semantics. Iris requires neither hardware support from the storage devices nor changes in user applications. We evaluate our early prototype and we find that it achieves on a single core up to 1.7\u00d7 and 2.2\u00d7 better read and write random IOPS, respectively, compared to the xfs and ext4 file systems. It also scales with the number of cores; using 4 cores Iris achieves 1.84\u00d7 and 1.96\u00d7 better read and write random IOPS, respectively.</p>", 
    "doi": "10.1007/978-3-319-46079-6_44", 
    "embargo_date": "2017-10-07", 
    "grants": [
      {
        "acronym": "ExaNeSt", 
        "code": "671553", 
        "funder": {
          "acronyms": [
            "EC"
          ], 
          "doi": "10.13039/501100000780", 
          "name": "European Commission"
        }, 
        "program": "H2020", 
        "title": "European Exascale System Interconnect and Storage"
      }
    ], 
    "keywords": [
      "NVM", 
      "I/O", 
      "storage systems", 
      "low latency", 
      "protection", 
      "European Union", 
      "Horizon 2020", 
      "Euratom", 
      "Euratom research & training programme 2014-2018"
    ], 
    "license": {
      "id": "cc-by-nc-nd-4.0"
    }, 
    "meeting": {
      "acronym": "WOPSSS 2016", 
      "dates": "19\u201323 June 2016", 
      "place": "Frankfurt, Germany", 
      "title": "ISC High Performance 2016 International Workshops ExaComm, E-MuCoCoS, HPC-IODC, IXPUG, IWOPH, P3MA, VHPC, WOPSSS", 
      "url": "https://link.springer.com/chapter/10.1007/978-3-319-46079-6_44"
    }, 
    "notes": "This paper has been presented at WOPSSS 2016: Workshop On Performance and Scalability of Storage Systems. An extended version of it appeared in ACM SIGOPS Operating Systems Review, Volume 50, Issue 3, December 2016,\nPages 3-11.", 
    "publication_date": "2016-10-06", 
    "related_identifiers": [
      {
        "identifier": "10.1145/3041710.3041713", 
        "relation": "isPreviousVersionOf", 
        "scheme": "doi"
      }
    ], 
    "relations": {
      "version": [
        {
          "count": 1, 
          "index": 0, 
          "is_last": true, 
          "last_child": {
            "pid_type": "recid", 
            "pid_value": "803970"
          }, 
          "parent": {
            "pid_type": "recid", 
            "pid_value": "803969"
          }
        }
      ]
    }, 
    "resource_type": {
      "subtype": "conferencepaper", 
      "title": "Conference paper", 
      "type": "publication"
    }, 
    "title": "User-space I/O for \u03bcs-level storage devices"
  }, 
  "owners": [
    32233
  ], 
  "revision": 9, 
  "updated": "2017-10-07T00:52:22.030807+00:00"
}

Share

Cite as