-
Notifications
You must be signed in to change notification settings - Fork 17
/
Copy pathjs.bib
151 lines (139 loc) · 11.1 KB
/
js.bib
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
@comment{jabref-meta: databaseType:bibtex;}
@article{denard2009london,
title={The London Charter for the computer-based visualisation of cultural heritage},
author={Denard, Hugh and others},
journal={no. February},
pages={1--13},
year={2009}
}
@book{gitelman_raw_2013,
address = {Cambridge, Massachusetts ; London, England},
title = {Raw {Data} {Is} an {Oxymoron}},
isbn = {978-0-262-51828-4},
abstract = {Episodes in the history of data, from early modern math problems to today's inescapable "dataveillance," that demonstrate the dependence of data on culture.We live in the era of Big Data, with storage and transmission capacity measured not just in terabytes but in petabytes (where peta- denotes a quadrillion, or a thousand trillion). Data collection is constant and even insidious, with every click and every "like" stored somewhere for something. This book reminds us that data is anything but "raw," that we shouldn't think of data as a natural resource but as a cultural one that needs to be generated, protected, and interpreted. The book's essays describe eight episodes in the history of data from the predigital to the digital. Together they address such issues as the ways that different kinds of data and different domains of inquiry are mutually defining; how data are variously "cooked" in the processes of their collection and use; and conflicts over what can―or can't―be "reduced" to data. Contributors discuss the intellectual history of data as a concept; describe early financial modeling and some unusual sources for astronomical data; discover the prehistory of the database in newspaper clippings and index cards; and consider contemporary "dataveillance" of our online habits as well as the complexity of scientific data curation. Essay AuthorsGeoffrey C. Bowker, Kevin R. Brine, Ellen Gruber Garvey, Lisa Gitelman, Steven J. Jackson, Virginia Jackson, Markus Krajewski, Mary Poovey, Rita Raley, David Ribes, Daniel Rosenberg, Matthew Stanley, Travis D. Williams},
language = {English},
publisher = {The MIT Press},
editor = {Gitelman, Lisa},
month = jan,
year = {2013}
}
@misc{university_of_york_department_of_archaeology_heritage_2017,
title = {The {Heritage} {Jam}},
url = {http://www.heritagejam.org/home2/},
language = {en-GB},
urldate = {2018-08-15},
journal = {The Heritage Jam},
author = {University of York Department of Archaeology},
year = {2017},
file = {Snapshot:/home/jolene/Zotero/storage/ZLL69L4B/home2.html:text/html}
}
@article{marwick_computational_2016,
Author = {Marwick, Ben},
File = {Snapshot:/Users/shawngraham/Library/Application Support/Zotero/Profiles/rcpe5jts.default/zotero/storage/NCT4Q4ZA/s10816-015-9272-9.html:text/html},
Journal = {Journal of Archaeological Method and Theory},
Keywords = {data publication},
Pages = {1--27},
Shorttitle = {Computational reproducibility in archaeological research},
Title = {Computational reproducibility in archaeological research: basic principles and a case study of their implementation},
Url = {http://link.springer.com/article/10.1007/s10816-015-9272-9},
Urldate = {2017-02-16},
Year = {2016},
Bdsk-Url-1 = {http://link.springer.com/article/10.1007/s10816-015-9272-9}}
@article{broman_data_2018,
title = {Data {Organization} in {Spreadsheets}},
volume = {72},
issn = {0003-1305},
url = {https://doi.org/10.1080/00031305.2017.1375989},
doi = {10.1080/00031305.2017.1375989},
abstract = {Spreadsheets are widely used software tools for data entry, storage, analysis, and visualization. Focusing on the data entry and storage aspects, this article offers practical recommendations for organizing spreadsheet data to reduce errors and ease later analyses. The basic principles are: be consistent, write dates like YYYY-MM-DD, do not leave any cells empty, put just one thing in a cell, organize the data as a single rectangle (with subjects as rows and variables as columns, and with a single header row), create a data dictionary, do not include calculations in the raw data files, do not use font color or highlighting as data, choose good names for things, make backups, use data validation to avoid data entry errors, and save the data in plain text files.},
number = {1},
urldate = {2018-08-19},
journal = {The American Statistician},
author = {Broman, Karl W. and Woo, Kara H.},
month = jan,
year = {2018},
keywords = {Data management, Data organization, Microsoft Excel, Spreadsheets},
pages = {2--10},
file = {Full Text PDF:/home/jolene/Zotero/storage/L6BGHTHI/Broman and Woo - 2018 - Data Organization in Spreadsheets.pdf:application/pdf;Snapshot:/home/jolene/Zotero/storage/TZHQYSM3/00031305.2017.html:text/html}
}
@article{ziemann_gene_2016,
title = {Gene name errors are widespread in the scientific literature},
volume = {17},
issn = {1474-760X},
url = {https://doi.org/10.1186/s13059-016-1044-7},
doi = {10.1186/s13059-016-1044-7},
abstract = {The spreadsheet software Microsoft Excel, when used with default settings, is known to convert gene names to dates and floating-point numbers. A programmatic scan of leading genomics journals reveals that approximately one-fifth of papers with supplementary Excel gene lists contain erroneous gene name conversions.},
number = {1},
urldate = {2018-09-01},
journal = {Genome Biology},
author = {Ziemann, Mark and Eren, Yotam and El-Osta, Assam},
month = aug,
year = {2016},
pages = {177},
file = {Full Text PDF:/home/jolene/Zotero/storage/NLPNCF4I/Ziemann et al. - 2016 - Gene name errors are widespread in the scientific .pdf:application/pdf;Snapshot:/home/jolene/Zotero/storage/Q6SJITHN/s13059-016-1044-7.html:text/html}
}
@article{wilson_good_2017,
title = {Good enough practices in scientific computing},
volume = {13},
issn = {1553-7358},
url = {http://journals.plos.org/ploscompbiol/article?id=10.1371/journal.pcbi.1005510},
doi = {10.1371/journal.pcbi.1005510},
abstract = {Author summary Computers are now essential in all branches of science, but most researchers are never taught the equivalent of basic lab skills for research computing. As a result, data can get lost, analyses can take much longer than necessary, and researchers are limited in how effectively they can work with software and data. Computing workflows need to follow the same practices as lab projects and notebooks, with organized data, documented steps, and the project structured for reproducibility, but researchers new to computing often don't know where to start. This paper presents a set of good computing practices that every researcher can adopt, regardless of their current level of computational skill. These practices, which encompass data management, programming, collaborating with colleagues, organizing projects, tracking work, and writing manuscripts, are drawn from a wide variety of published sources from our daily lives and from our work with volunteer organizations that have delivered workshops to over 11,000 people since 2010.},
language = {en},
number = {6},
urldate = {2018-04-09},
journal = {PLOS Computational Biology},
author = {Wilson, Greg and Bryan, Jennifer and Cranston, Karen and Kitzes, Justin and Nederbragt, Lex and Teal, Tracy K.},
month = jun,
year = {2017},
keywords = {Data management, Computer software, Control systems, Data processing, Programming languages, Reproducibility, Software tools, Source code},
pages = {e1005510},
file = {Full Text PDF:/home/jolene/Zotero/storage/Z6K4Y8BF/Wilson et al. - 2017 - Good enough practices in scientific computing.pdf:application/pdf;Snapshot:/home/jolene/Zotero/storage/88W58PN9/article.html:text/html}
}
@book{owens_theory_2018,
title = {The {Theory} and {Craft} of {Digital} {Preservation}},
isbn = {978-1-4214-2697-6},
abstract = {Many people believe that what is on the Internet will be around forever. At the same time, warnings of an impending "digital dark age"—where records of the recent past become completely lost or inaccessible—appear with regular frequency in the popular press. It's as if we need a system to safeguard our digital records for future scholars and researchers. Digital preservation experts, however, suggest that this is an illusory dream not worth chasing. Ensuring long-term access to digital information is not that straightforward; it is a complex issue with a significant ethical dimension. It is a vocation.In The Theory and Craft of Digital Preservation, librarian Trevor Owens establishes a baseline for practice in this field. In the first section of the book, Owens synthesizes work on the history of preservation in a range of areas (archives, manuscripts, recorded sound, etc.) and sets that history in dialogue with work in new media studies, platform studies, and media archeology. In later chapters, Owens builds from this theoretical framework and maps out a more deliberate and intentional approach to digital preservation. A basic introduction to the issues and practices of digital preservation, the book is anchored in an understanding of the traditions of preservation and the nature of digital objects and media. Based on extensive reading, research, and writing on digital preservation, Owens's work will prove an invaluable reference for archivists, librarians, and museum professionals, as well as scholars and researchers in the digital humanities.},
language = {en},
publisher = {JHU Press},
author = {Owens, Trevor},
month = dec,
year = {2018},
note = {Google-Books-ID: 4\_R0DwAAQBAJ},
keywords = {Literary Criticism / Semiotics \& Theory}
@misc{science_daily_preservation_2017,
title = {Preservation for the (digital) ages: {Digital} archivists collaborate with classicists to improve database preservation methods},
shorttitle = {Preservation for the (digital) ages},
url = {https://www.sciencedaily.com/releases/2017/10/171017124345.htm},
abstract = {Researchers working with classicists and computer scientists have developed a method to preserve digital humanities databases. The preservation strategy allows scholars to re-launch a database application in a variety of environments -- from individual computers, to virtual machines, to future web servers -- without compromising its interactive features.},
language = {en},
urldate = {2018-02-07},
journal = {ScienceDaily},
author = {{Science Daily}},
month = oct,
year = {2017},
file = {Snapshot:/home/jolene/Zotero/storage/5YBIWQI8/171017124345.html:text/html}
}
@misc{archaeology_data_service_archaeology_2014,
title = {Archaeology {Data} {Service} {Collections} {Policy}},
url = {http://archaeologydataservice.ac.uk/advice/collectionsPolicy.xhtml},
urldate = {2018-12-06},
journal = {Archaeology Data Service Collections Policy},
author = {{Archaeology Data Service}},
month = apr,
year = {2014},
file = {Archaeology Data Service:/home/jolene/Zotero/storage/EAW643AR/collectionsPolicy.html:text/html}
}
@article{hadley_wickham_tidy_2014,
title = {Tidy {Data}},
volume = {59},
url = {https://www.jstatsoft.org/article/view/v059i10},
doi = {10.18637/jss.v059.i10},
language = {en-US},
urldate = {2018-10-03},
journal = {Journal of Statistical Software},
author = {{Hadley Wickham}},
month = sep,
year = {2014},
file = {Snapshot:/home/jolene/Zotero/storage/XTRIT6TE/v059i10.html:text/html}
}