Journal article Open Access

A collection of Swedish diachronic word embedding models trained on historical newspaper data

Hengchen, Simon; Tahmasebi, Nina


DCAT Export

<?xml version='1.0' encoding='utf-8'?>
<rdf:RDF xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:adms="http://www.w3.org/ns/adms#" xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:dct="http://purl.org/dc/terms/" xmlns:dctype="http://purl.org/dc/dcmitype/" xmlns:dcat="http://www.w3.org/ns/dcat#" xmlns:duv="http://www.w3.org/ns/duv#" xmlns:foaf="http://xmlns.com/foaf/0.1/" xmlns:frapo="http://purl.org/cerif/frapo/" xmlns:geo="http://www.w3.org/2003/01/geo/wgs84_pos#" xmlns:gsp="http://www.opengis.net/ont/geosparql#" xmlns:locn="http://www.w3.org/ns/locn#" xmlns:org="http://www.w3.org/ns/org#" xmlns:owl="http://www.w3.org/2002/07/owl#" xmlns:prov="http://www.w3.org/ns/prov#" xmlns:rdfs="http://www.w3.org/2000/01/rdf-schema#" xmlns:schema="http://schema.org/" xmlns:skos="http://www.w3.org/2004/02/skos/core#" xmlns:vcard="http://www.w3.org/2006/vcard/ns#" xmlns:wdrs="http://www.w3.org/2007/05/powder-s#">
  <rdf:Description rdf:about="https://doi.org/10.5281/zenodo.4301658">
    <dct:identifier rdf:datatype="http://www.w3.org/2001/XMLSchema#anyURI">https://doi.org/10.5281/zenodo.4301658</dct:identifier>
    <foaf:page rdf:resource="https://doi.org/10.5281/zenodo.4301658"/>
    <dct:creator>
      <rdf:Description rdf:about="http://orcid.org/0000-0002-8453-7221">
        <rdf:type rdf:resource="http://xmlns.com/foaf/0.1/Agent"/>
        <dct:identifier rdf:datatype="http://www.w3.org/2001/XMLSchema#string">0000-0002-8453-7221</dct:identifier>
        <foaf:name>Hengchen, Simon</foaf:name>
        <foaf:givenName>Simon</foaf:givenName>
        <foaf:familyName>Hengchen</foaf:familyName>
        <org:memberOf>
          <foaf:Organization>
            <foaf:name>University of Gothenburg</foaf:name>
          </foaf:Organization>
        </org:memberOf>
      </rdf:Description>
    </dct:creator>
    <dct:creator>
      <rdf:Description rdf:about="http://orcid.org/0000-0003-1688-1845">
        <rdf:type rdf:resource="http://xmlns.com/foaf/0.1/Agent"/>
        <dct:identifier rdf:datatype="http://www.w3.org/2001/XMLSchema#string">0000-0003-1688-1845</dct:identifier>
        <foaf:name>Tahmasebi, Nina</foaf:name>
        <foaf:givenName>Nina</foaf:givenName>
        <foaf:familyName>Tahmasebi</foaf:familyName>
        <org:memberOf>
          <foaf:Organization>
            <foaf:name>University of Gothenburg</foaf:name>
          </foaf:Organization>
        </org:memberOf>
      </rdf:Description>
    </dct:creator>
    <dct:title>A collection of Swedish diachronic word embedding models trained on historical newspaper data</dct:title>
    <dct:publisher>
      <foaf:Agent>
        <foaf:name>Zenodo</foaf:name>
      </foaf:Agent>
    </dct:publisher>
    <dct:issued rdf:datatype="http://www.w3.org/2001/XMLSchema#gYear">2020</dct:issued>
    <dct:issued rdf:datatype="http://www.w3.org/2001/XMLSchema#date">2020-12-01</dct:issued>
    <dct:language rdf:resource="http://publications.europa.eu/resource/authority/language/SWE"/>
    <owl:sameAs rdf:resource="https://zenodo.org/record/4301658"/>
    <adms:identifier>
      <adms:Identifier>
        <skos:notation rdf:datatype="http://www.w3.org/2001/XMLSchema#anyURI">https://zenodo.org/record/4301658</skos:notation>
        <adms:schemeAgency>url</adms:schemeAgency>
      </adms:Identifier>
    </adms:identifier>
    <dct:isVersionOf rdf:resource="https://doi.org/10.5281/zenodo.4274481"/>
    <dct:description>&lt;p&gt;&lt;em&gt;&lt;strong&gt;A collection of Swedish diachronic word embedding models trained on historical newspaper data&lt;/strong&gt;&lt;/em&gt;&lt;/p&gt; &lt;p&gt;Simon Hengchen, Nina Tahmasebi&lt;/p&gt; &lt;p&gt;&lt;em&gt;NOTE: this README.md&amp;nbsp;is a summary. For all details, see the paper at&amp;nbsp;&lt;a href="https://doi.org/10.5334/johd.22"&gt;https://doi.org/10.5334/johd.22&lt;/a&gt;&lt;/em&gt;&lt;/p&gt; &lt;p&gt;&lt;em&gt;NOTE: this data release is available on Zenodo at&amp;nbsp;&lt;a href="https://zenodo.org/record/4301658"&gt;https://zenodo.org/record/4301658&lt;/a&gt;&lt;/em&gt;&lt;/p&gt; &lt;p&gt;&lt;strong&gt;Description&lt;/strong&gt;&lt;/p&gt; &lt;p&gt;This is the data release accompanying the Journal of Open Humanities Data paper &amp;quot;A collection of Swedish diachronic word embedding models trained on historical newspaper data.&amp;quot; This paper describes the creation of several word embedding models based on a large collection of diachronic Swedish newspaper material available through Spr&amp;aring;kbanken Text, the Swedish language bank. This data was produced in the context of Spr&amp;aring;kbanken Text&amp;#39;s continued mission to collaborate with humanities and natural language processing researchers and to provide freely available language resources, for the development of state-of-the-art NLP methods and tools.&lt;/p&gt; &lt;p&gt;&lt;strong&gt;Bibtex&lt;/strong&gt;&lt;/p&gt; &lt;p&gt;If you use the models or the code provided in this paper, please cite the following:&lt;/p&gt; &lt;pre&gt;&lt;code&gt;@article{hengchen-tahmasebi-2021-collection, title = "A collection of {S}wedish diachronic word embedding models trained on historical newspaper data", author = "Hengchen, Simon and Tahmasebi, Nina", journal = "Journal of Open Humanities Data", year = "2021", pages = {1--7}, volume = {7}, number = {2}, doi = {10.5334/johd.22} } &lt;/code&gt;&lt;/pre&gt; &lt;p&gt;&lt;strong&gt;Overview&lt;/strong&gt;&lt;/p&gt; &lt;p&gt;We release diachronic word2vec and fastText models in their skip-gram with negative sampling (SGNS) architecture. The models are trained on 20-year time bins, with two temporal alignment strategies: independently-trained models for post-hoc alignment, and incremental training. The independently-trained models are NOT aligned, leaving the&amp;nbsp;&lt;a href="https://github.com/Garrafao/LSCDetection/tree/master/alignment"&gt;choice of alignment&lt;/a&gt;&amp;nbsp;to the end user. We provide code to reproduce our pipeline, and code examples to load and use the models.&lt;/p&gt; &lt;p&gt;&lt;strong&gt;Data&lt;/strong&gt;&lt;/p&gt; &lt;p&gt;The entirety of the Kunglinga bibliotekets historiska tidningar (Kubhist 2) corpus was used. The original data was scanned and OCRed by the National Library of Sweden. It consists of Swedish newspapers from all parts of Sweden. It has since been run through the Sparv annotation pipeline by Martin Hammarstedt at Spr&amp;aring;kbanken Text.&lt;/p&gt; &lt;p&gt;&lt;strong&gt;Preprocessing&lt;/strong&gt;&lt;/p&gt; &lt;p&gt;The text was retrieved from the original XML. The processing steps prior to training the models are:&lt;/p&gt; &lt;ul&gt; &lt;li&gt;lowercasing&lt;/li&gt; &lt;li&gt;removal of digits&lt;/li&gt; &lt;li&gt;removal of all characters not belonging to the Swedish alphabet (a-z&amp;auml;&amp;aring;&amp;ouml;)&lt;/li&gt; &lt;li&gt;removal of tokens the length of which is two characters or smaller&lt;/li&gt; &lt;li&gt;merging of all texts pertaining to the same double decade (1740-1759; 1760-1779; ...)&lt;/li&gt; &lt;/ul&gt; &lt;p&gt;&lt;strong&gt;Quality control&lt;/strong&gt;&lt;/p&gt; &lt;p&gt;All models have been queried for some control analogies by a native speaker of Swedish. A (non-native speaker of Swedish) reviewer, whom we thank, also performed checks on the local neighbourhoods of selected terms, performed vector arithmetics, and confirmed the models behaved as expected.&lt;/p&gt; &lt;p&gt;&lt;strong&gt;Structure&lt;/strong&gt;&lt;/p&gt; &lt;pre&gt;&lt;code&gt;ROOT/ README.md code/ *.py files requirements.txt fasttext/ incremental/ *.ft files *.npy files indep/ *.ft files *.npy files word2vec/ incremental/ *.w2v files *.npy files indep/ *.w2v files *.npy files &lt;/code&gt;&lt;/pre&gt; &lt;p&gt;Regarding the code:&lt;/p&gt; &lt;ul&gt; &lt;li&gt;&lt;code&gt;kubhist_XML_to_gensim.py&lt;/code&gt;&amp;nbsp;will transform the XML into &amp;quot;LineSentence&amp;quot;, &amp;quot;clean&amp;quot; corpora&lt;/li&gt; &lt;li&gt;&lt;code&gt;train_w2v-ft.py&lt;/code&gt;&amp;nbsp;will train models&lt;/li&gt; &lt;li&gt;&lt;code&gt;load_run_models.py&lt;/code&gt;&amp;nbsp;will print some examples of what can be done with embeddings&lt;/li&gt; &lt;li&gt;&lt;code&gt;utils.py&lt;/code&gt;&amp;nbsp;contains the functions called by the scripts above&lt;/li&gt; &lt;li&gt;&lt;code&gt;requirements.txt&lt;/code&gt;&amp;nbsp;contains the output of&amp;nbsp;&lt;code&gt;pip freeze &amp;gt; requirements.txt&lt;/code&gt;, i.e. the python libraries needed to run the scripts above&lt;/li&gt; &lt;/ul&gt; &lt;p&gt;&lt;strong&gt;Funding&lt;/strong&gt;&lt;/p&gt; &lt;p&gt;This work has been funded in part by the project&amp;nbsp;&lt;a href="https://languagechange.org/"&gt;&lt;em&gt;Towards Computational Lexical Semantic Change Detection&lt;/em&gt;&lt;/a&gt;&amp;nbsp;supported by the Swedish Research Council (2019--2022; dnr 2018-01184), and&amp;nbsp;&lt;em&gt;Nationella Spr&amp;aring;kbanken&lt;/em&gt;&amp;nbsp;(the Swedish National Language Bank) -- jointly funded by the Swedish Research Council (2018--2024; dnr 2017-00626) and its 10 partner institutions, to NT.&lt;/p&gt;</dct:description>
    <dct:accessRights rdf:resource="http://publications.europa.eu/resource/authority/access-right/PUBLIC"/>
    <dct:accessRights>
      <dct:RightsStatement rdf:about="info:eu-repo/semantics/openAccess">
        <rdfs:label>Open Access</rdfs:label>
      </dct:RightsStatement>
    </dct:accessRights>
    <dct:license rdf:resource="https://creativecommons.org/licenses/by/4.0/legalcode"/>
    <dcat:distribution>
      <dcat:Distribution>
        <dcat:accessURL rdf:resource="https://doi.org/10.5281/zenodo.4301658"/>
        <dcat:byteSize>16249535451</dcat:byteSize>
        <dcat:downloadURL rdf:resource="https://zenodo.org/record/4301658/files/HENGCHEN-TAHMASEBI_-_2020_-_Kubhist2_diachronic_embeddings.zip"/>
        <dcat:mediaType>application/zip</dcat:mediaType>
      </dcat:Distribution>
    </dcat:distribution>
  </rdf:Description>
</rdf:RDF>
7,949
3,350
views
downloads
All versions This version
Views 7,9497,898
Downloads 3,3503,350
Data volume 54.4 TB54.4 TB
Unique views 7,7137,688
Unique downloads 3,3313,331

Share

Cite as