Conference paper Open Access

Transformers without Tears: Improving the Normalization of Self-Attention

Nguyen, Toan Q.; Salazar, Julian


JSON Export

{
  "files": [
    {
      "links": {
        "self": "https://zenodo.org/api/files/af91a16c-9d1c-4a50-bfe9-be29a2d488cf/IWSLT2019_paper_26.pdf"
      }, 
      "checksum": "md5:5a6c18ef21719ddacdab79deec9a4b39", 
      "bucket": "af91a16c-9d1c-4a50-bfe9-be29a2d488cf", 
      "key": "IWSLT2019_paper_26.pdf", 
      "type": "pdf", 
      "size": 345932
    }
  ], 
  "owners": [
    50447
  ], 
  "doi": "10.5281/zenodo.3525484", 
  "stats": {
    "version_unique_downloads": 576.0, 
    "unique_views": 888.0, 
    "views": 1037.0, 
    "version_views": 1037.0, 
    "unique_downloads": 576.0, 
    "version_unique_views": 888.0, 
    "volume": 222780208.0, 
    "version_downloads": 644.0, 
    "downloads": 644.0, 
    "version_volume": 222780208.0
  }, 
  "links": {
    "doi": "https://doi.org/10.5281/zenodo.3525484", 
    "conceptdoi": "https://doi.org/10.5281/zenodo.3525483", 
    "bucket": "https://zenodo.org/api/files/af91a16c-9d1c-4a50-bfe9-be29a2d488cf", 
    "conceptbadge": "https://zenodo.org/badge/doi/10.5281/zenodo.3525483.svg", 
    "html": "https://zenodo.org/record/3525484", 
    "latest_html": "https://zenodo.org/record/3525484", 
    "badge": "https://zenodo.org/badge/doi/10.5281/zenodo.3525484.svg", 
    "latest": "https://zenodo.org/api/records/3525484"
  }, 
  "conceptdoi": "10.5281/zenodo.3525483", 
  "created": "2019-11-02T01:13:18.324800+00:00", 
  "updated": "2020-01-20T17:44:54.821195+00:00", 
  "conceptrecid": "3525483", 
  "revision": 2, 
  "id": 3525484, 
  "metadata": {
    "access_right_category": "success", 
    "doi": "10.5281/zenodo.3525484", 
    "description": "<p>We evaluate three simple, normalization-centric changes to improve Transformer training. First, we show that pre-norm residual connections (PRENORM) and smaller initializations enable warmup-free, validation-based training with large learning rates. Second, we propose&nbsp;l2&nbsp;normalization with a single scale parameter (SCALENORM) for faster training and better performance. Finally, we reaffirm the effectiveness of normalizing word embeddings to a fixed length (FIXNORM). On five low-resource translation pairs from TED Talks-based corpora, these changes always converge, giving an average +1.1 BLEU over state-of-the-art bilingual baselines and a new 32.8 BLEU on IWSLT &#39;15 English-Vietnamese. We ob- serve sharper performance curves, more consistent gradient norms, and a linear relationship between activation scaling and decoder depth. Surprisingly, in the high-resource setting (WMT &#39;14 English-German), SCALENORM&nbsp;and FIXNORM&nbsp;remain competitive but PRENORM&nbsp;degrades performance.</p>", 
    "language": "eng", 
    "title": "Transformers without Tears: Improving the Normalization of Self-Attention", 
    "license": {
      "id": "CC-BY-4.0"
    }, 
    "relations": {
      "version": [
        {
          "count": 1, 
          "index": 0, 
          "parent": {
            "pid_type": "recid", 
            "pid_value": "3525483"
          }, 
          "is_last": true, 
          "last_child": {
            "pid_type": "recid", 
            "pid_value": "3525484"
          }
        }
      ]
    }, 
    "communities": [
      {
        "id": "iwslt2019"
      }
    ], 
    "publication_date": "2019-11-02", 
    "creators": [
      {
        "affiliation": "University of Notre Dame", 
        "name": "Nguyen, Toan Q."
      }, 
      {
        "affiliation": "Amazon AWS AI", 
        "name": "Salazar, Julian"
      }
    ], 
    "access_right": "open", 
    "resource_type": {
      "subtype": "conferencepaper", 
      "type": "publication", 
      "title": "Conference paper"
    }, 
    "related_identifiers": [
      {
        "scheme": "doi", 
        "identifier": "10.5281/zenodo.3525483", 
        "relation": "isVersionOf"
      }
    ]
  }
}
1,037
644
views
downloads
All versions This version
Views 1,0371,037
Downloads 644644
Data volume 222.8 MB222.8 MB
Unique views 888888
Unique downloads 576576

Share

Cite as