There is a newer version of this record available.

Dataset Open Access

How We Type: Movement Strategies and Performance in Everyday Typing

Feit, Anna Maria; Weir, Daryl; Oulasvirta, Antti


MARC21 XML Export

<?xml version='1.0' encoding='UTF-8'?>
<record xmlns="http://www.loc.gov/MARC21/slim">
  <leader>00000nmm##2200000uu#4500</leader>
  <datafield tag="041" ind1=" " ind2=" ">
    <subfield code="a">eng</subfield>
  </datafield>
  <datafield tag="653" ind1=" " ind2=" ">
    <subfield code="a">Text entry</subfield>
  </datafield>
  <datafield tag="653" ind1=" " ind2=" ">
    <subfield code="a">Typing</subfield>
  </datafield>
  <datafield tag="653" ind1=" " ind2=" ">
    <subfield code="a">Touch typing</subfield>
  </datafield>
  <datafield tag="653" ind1=" " ind2=" ">
    <subfield code="a">Keyboard</subfield>
  </datafield>
  <datafield tag="653" ind1=" " ind2=" ">
    <subfield code="a">Human-Computer Interaction</subfield>
  </datafield>
  <controlfield tag="005">20200924073553.0</controlfield>
  <controlfield tag="001">4034268</controlfield>
  <datafield tag="711" ind1=" " ind2=" ">
    <subfield code="d">May, 2016</subfield>
    <subfield code="g">CHI</subfield>
    <subfield code="a">Conference on Human Factors in Computing Systems</subfield>
    <subfield code="c">Seoul, South Korea</subfield>
  </datafield>
  <datafield tag="700" ind1=" " ind2=" ">
    <subfield code="u">Aalto University</subfield>
    <subfield code="a">Weir, Daryl</subfield>
  </datafield>
  <datafield tag="700" ind1=" " ind2=" ">
    <subfield code="u">Aalto University</subfield>
    <subfield code="0">(orcid)0000-0002-2498-7837</subfield>
    <subfield code="a">Oulasvirta, Antti</subfield>
  </datafield>
  <datafield tag="856" ind1="4" ind2=" ">
    <subfield code="s">16224</subfield>
    <subfield code="z">md5:ebcfd13a312aaf203ebca0d09526eed7</subfield>
    <subfield code="u">https://zenodo.org/record/4034268/files/Background.xlsx</subfield>
  </datafield>
  <datafield tag="856" ind1="4" ind2=" ">
    <subfield code="s">15457687855</subfield>
    <subfield code="z">md5:dd6e89b3b694a3ee552fa1b06f4e37e5</subfield>
    <subfield code="u">https://zenodo.org/record/4034268/files/Eye tracking.zip</subfield>
  </datafield>
  <datafield tag="856" ind1="4" ind2=" ">
    <subfield code="s">903405</subfield>
    <subfield code="z">md5:7511817b97668250315a3007c9ae499b</subfield>
    <subfield code="u">https://zenodo.org/record/4034268/files/hand_markers.png</subfield>
  </datafield>
  <datafield tag="856" ind1="4" ind2=" ">
    <subfield code="s">1594</subfield>
    <subfield code="z">md5:39c8e67a4a01debe06d388f07fd05a39</subfield>
    <subfield code="u">https://zenodo.org/record/4034268/files/keyboard_flat_coordinates.csv</subfield>
  </datafield>
  <datafield tag="856" ind1="4" ind2=" ">
    <subfield code="s">981077704</subfield>
    <subfield code="z">md5:03e27cb33b946b66217c93915babd68f</subfield>
    <subfield code="u">https://zenodo.org/record/4034268/files/Motion Capture.zip</subfield>
  </datafield>
  <datafield tag="856" ind1="4" ind2=" ">
    <subfield code="s">6827</subfield>
    <subfield code="z">md5:58b5e068625bbfe06af1551d8fa3d85f</subfield>
    <subfield code="u">https://zenodo.org/record/4034268/files/Readme.txt</subfield>
  </datafield>
  <datafield tag="856" ind1="4" ind2=" ">
    <subfield code="s">29292760284</subfield>
    <subfield code="z">md5:193daa9779eb1a862b809dd6772314be</subfield>
    <subfield code="u">https://zenodo.org/record/4034268/files/Reference video.zip</subfield>
  </datafield>
  <datafield tag="856" ind1="4" ind2=" ">
    <subfield code="s">733527</subfield>
    <subfield code="z">md5:c3d020b88d41069172814721481f2752</subfield>
    <subfield code="u">https://zenodo.org/record/4034268/files/Typing.zip</subfield>
  </datafield>
  <datafield tag="542" ind1=" " ind2=" ">
    <subfield code="l">open</subfield>
  </datafield>
  <datafield tag="260" ind1=" " ind2=" ">
    <subfield code="c">2016-05-05</subfield>
  </datafield>
  <datafield tag="909" ind1="C" ind2="O">
    <subfield code="p">openaire_data</subfield>
    <subfield code="o">oai:zenodo.org:4034268</subfield>
  </datafield>
  <datafield tag="100" ind1=" " ind2=" ">
    <subfield code="u">Aalto University</subfield>
    <subfield code="0">(orcid)0000-0003-4168-6099</subfield>
    <subfield code="a">Feit, Anna Maria</subfield>
  </datafield>
  <datafield tag="245" ind1=" " ind2=" ">
    <subfield code="a">How We Type: Movement Strategies and Performance in Everyday Typing</subfield>
  </datafield>
  <datafield tag="536" ind1=" " ind2=" ">
    <subfield code="c">637991</subfield>
    <subfield code="a">Computational User Interface Design</subfield>
  </datafield>
  <datafield tag="540" ind1=" " ind2=" ">
    <subfield code="u">https://creativecommons.org/licenses/by-nc/4.0/legalcode</subfield>
    <subfield code="a">Creative Commons Attribution Non Commercial 4.0 International</subfield>
  </datafield>
  <datafield tag="650" ind1="1" ind2="7">
    <subfield code="a">cc-by</subfield>
    <subfield code="2">opendefinition.org</subfield>
  </datafield>
  <datafield tag="520" ind1=" " ind2=" ">
    <subfield code="a">&lt;p&gt;Tihs dataset contains motion capture, keylog, eye tracking, and video data of 30 participants, transcribing regular sentences. It is part of the following publication:&lt;/p&gt;

&lt;p&gt;Anna Maria Feit, Daryl Weir, Antti Oulasvirta. 2016.How We Type: Movement Strategies and Performance in Everyday Typing.In Proceedings of the 2016 CHI Conference on Human Factors in Computing Systems (CHI &amp;#39;16). ACM, New York, NY, USA, 4262-4273&lt;/p&gt;

&lt;p&gt;The paper revisits the present understanding of typing, which originates mostly from studies of trained typists using the tenfinger touch typing system. Our goal was&amp;nbsp;to characterise the majority of present-day users who are untrained and employ diverse, self-taught techniques. In a transcription task, we compared self-taught typists and those that took a touch typing course. We reported several differences in performance, gaze deployment and movement strategies. The most surprising finding was&amp;nbsp;that self-taught typists can achieve performance levels comparable with touch typists, even when using fewer fingers. Motion capture data exposed&amp;nbsp;3 predictors of high performance: 1) unambiguous mapping (a letter is consistently pressed by the same finger), 2) active preparation of upcoming keystrokes, and 3) minimal global hand motion.&amp;nbsp;&lt;/p&gt;

&lt;p&gt;The dataset is free for non-commercial use. Please cite the above work.&amp;nbsp;&lt;/p&gt;

&lt;p&gt;Note that participants wrote in either Finnish or English.&amp;nbsp;&lt;/p&gt;</subfield>
  </datafield>
  <datafield tag="773" ind1=" " ind2=" ">
    <subfield code="n">doi</subfield>
    <subfield code="i">isPartOf</subfield>
    <subfield code="a">10.1145/2858036.2858233</subfield>
  </datafield>
  <datafield tag="773" ind1=" " ind2=" ">
    <subfield code="n">doi</subfield>
    <subfield code="i">isVersionOf</subfield>
    <subfield code="a">10.5281/zenodo.4034267</subfield>
  </datafield>
  <datafield tag="024" ind1=" " ind2=" ">
    <subfield code="a">10.5281/zenodo.4034268</subfield>
    <subfield code="2">doi</subfield>
  </datafield>
  <datafield tag="980" ind1=" " ind2=" ">
    <subfield code="a">dataset</subfield>
  </datafield>
</record>
174
81
views
downloads
All versions This version
Views 174148
Downloads 8159
Data volume 412.9 GB199.7 GB
Unique views 159141
Unique downloads 3529

Share

Cite as