@book{andrienko_exploratory_2006, address = {Berlin Heidelberg}, title = {Exploratory {Analysis} of {Spatial} and {Temporal} {Data}: {A} {Systematic} {Approach}}, isbn = {978-3-540-25994-7}, shorttitle = {Exploratory {Analysis} of {Spatial} and {Temporal} {Data}}, url = {https://www.springer.com/de/book/9783540259947}, abstract = {Exploratory data analysis (EDA) is about detecting and describing patterns, trends, and relations in data, motivated by certain purposes of investigation. As something relevant is detected in data, new questions arise, causing specific parts to be viewed in more detail. So EDA has a significant appeal: it involves hypothesis generation rather than mere hypothesis testing. The authors describe in detail and systemize approaches, techniques, and methods for exploring spatial and temporal data in particular. They start by developing a general view of data structures and characteristics and then build on top of this a general task typology, distinguishing between elementary and synoptic tasks. This typology is then applied to the description of existing approaches and technologies, resulting not just in recommendations for choosing methods but in a set of generic procedures for data exploration. Professionals practicing analysis will profit from tested solutions – illustrated in many examples – for reuse in the catalogue of techniques presented. Students and researchers will appreciate the detailed description and classification of exploration techniques, which are not limited to spatial data only. In addition, the general principles and approaches described will be useful for designers of new methods for EDA.}, language = {en}, publisher = {Springer-Verlag}, author = {Andrienko, Natalia and Andrienko, Gennady}, year = {2006}, doi = {10.1007/3-540-31190-4}, } @book{card_readings_1999, address = {San Francisco, Calif.}, edition = {Nachdr.}, series = {The {Morgan} {Kaufmann} series in interactive technologies}, title = {Readings in information visualization: using vision to think}, isbn = {978-1-55860-533-6}, shorttitle = {Readings in information visualization}, language = {en}, publisher = {Morgan Kaufmann}, author = {Card, Stuart K. and Mackinlay, Jock D. and Shneiderman, Ben}, year = {1999}, } @incollection{earnshaw_what_1992, address = {Berlin Heidelberg}, title = {What {Scientific} {Visualization} {Can} {Do}!}, isbn = {978-3-642-63470-3}, url = {https://www.springer.com/de/book/9783642634703}, abstract = {Scientific visualization is concerned with exploring data and information insuch a way as to gain understanding and insight into the data. This is a fundamental objective of much scientific investigation. To achieve this goal, scientific visualization utilises aspects in the areas of computergraphics, user-interface methodology, image processing, system design, and signal processing. This volume is intended for readers new to the field and who require a quick and easy-to-read summary of what scientific visualization is and what it can do. Written in a popular andjournalistic style with many illustrations it will enable readers to appreciate the benefits of scientific visualization and how current tools can be exploited in many application areas. This volume is indispensible for scientists and research workers who have never used computer graphics or other visual tools before, and who wish to find out the benefitsand advantages of the new approaches.}, language = {en}, booktitle = {An {Introductory} {Guide} to {Scientific} {Visualization}}, publisher = {Springer-Verlag}, author = {Earnshaw, Rae and Wiseman, Norman}, editor = {Earnshaw, Rae A. and Wiseman, Norman}, year = {1992}, doi = {10.1007/978-3-642-58101-4}, pages = {5--19}, } @article{gahegan_scatterplots_1998, title = {Scatterplots and scenes: visualisation techniques for exploratory spatial analysis}, volume = {22}, issn = {0198-9715}, shorttitle = {Scatterplots and scenes}, url = {http://www.sciencedirect.com/science/article/pii/S0198971598000180}, doi = {10.1016/S0198-9715(98)00018-0}, abstract = {A collection of geographic data from a particular region contains many explicit and implicit relationships which are difficult to display and communicate without resorting to statistical summarisation or fragmentation into themes. This paper addresses the problems of exploring interconnections between layers of data using state-of-the-art visualisation techniques and is based on the premise that visual exploratory data analysis is a useful tool for providing insight into the complex and subtle relationships that occur in geography. The tools and techniques described extend beyond the current capacity of commercial geographic information systems in terms of (a) the flexibility of the scene description, (b) the volume of data (particularly the number of distinct layers or themes that are viewable concurrently) and (c) the facilities by which the user may study relationships. The techniques proposed are justified from a psychometric standpoint and some important limitations with their use are described. As well as the pictures shown in the paper, further results are given in the form of high resolution colour images, VRML scenes and video clips which may be downloaded from an accompanying Web site.}, language = {en}, number = {1}, journal = {Computers, Environment and Urban Systems}, author = {Gahegan, Mark}, month = jan, year = {1998}, pages = {43--56}, } @book{aigner_visualization_2011, address = {London}, series = {Human–{Computer} {Interaction} {Series}}, title = {Visualization of {Time}-{Oriented} {Data}}, isbn = {978-0-85729-078-6}, url = {https://www.springer.com/de/book/9780857290786}, abstract = {Time is an exceptional dimension that is common to many application domains such as medicine, engineering, business, science, biography, history, planning, or project management. Understanding time-oriented data enables us to learn from the past in order to predict, plan, and build the future. Due to the distinct characteristics of time, appropriate visual and analytical methods are required to explore and analyze them.This book starts with an introduction to visualization and a number of historical examples of visual representations. At its core, the book presents and discusses a systematic view of the visualization of time-oriented data. This view is structured along three key questions. While the aspects of time and associated data describe what is being visualized, user tasks are related to the question why something is visualized. These characteristics and tasks determine how the visualization is to be designed. To support visual exploration, interaction techniques and analytical methods are required as well, which are discussed in separate chapters. The concepts explained in this book are illustrated with numerous examples.A large part of this book is devoted to a structured survey of existing techniques for visualizing time and time-oriented data. Overall, 101 different visualization techniques are presented on a per-page basis; each of these self-contained descriptions is accompanied by an illustration and corresponding references. This survey serves as a reference for scientists conducting related research as well as for practitioners seeking information on how their time-oriented data can best be visualized in order to gain valuable insights.}, language = {en}, publisher = {Springer-Verlag}, author = {Aigner, Wolfgang and Miksch, Silvia and Schumann, Heidrun and Tominski, Christian}, year = {2011}, doi = {10.1007/978-0-85729-079-3}, } @incollection{keim_information_2005, address = {Oxford}, series = {International {Cartographic} {Association}}, title = {Information {Visualization}: {Scope}, {Techniques} and {Opportunities} for {Geovisualization}}, isbn = {978-0-08-044531-1}, shorttitle = {Chapter 2 - {Information} {Visualization}}, url = {http://www.sciencedirect.com/science/article/pii/B9780080445311504206}, abstract = {This chapter provides an overview of Information Visualization and visual data mining techniques and illustrates them using a few examples. The application of Information Visualization methods provides new ways of analyzing geospatial data. Exploring and analyzing the vast volumes of data has become increasingly difficult. Information visualization and visual data mining can help to deal with this flood of information. Visualization techniques have been developed over the last two decades to support the exploration of large data sets. Visual data exploration usually follows a three-step process: overview first, zoom and filter, and then details-on-demand. The advantage of visual data exploration is that the user is directly involved in the data mining process. Visualization technology may be used for all three steps of the data exploration process. Visualization techniques are useful for showing an overview of visualization and allowing the user to identify interesting subsets. In this step, it is important to keep the overview visualization while focusing on the subset using another visualization technique. An alternative is to distort the overview visualization to focus on the interesting subsets. This can be performed by dedicating a larger percentage of the display to the interesting subsets while decreasing screen utilization for uninteresting data.}, language = {en}, booktitle = {Exploring {Geovisualization}}, publisher = {Elsevier}, author = {Keim, Daniel A.}, editor = {Dykes, Jason and MacEachren, Alan M. and Kraak, Menno-Jan}, month = jan, year = {2005}, doi = {10.1016/B978-008044531-1/50420-6}, pages = {21--52}, file = {Eingereichte Version:C\:\\Users\\carst\\Zotero\\storage\\B6DBAW8A\\Keim - 2005 - Chapter 2 - Information Visualization Scope, Tech.pdf:application/pdf}, } @article{mackinlay_automating_1986, title = {Automating the design of graphical presentations of relational information}, volume = {5}, issn = {0730-0301}, url = {https://doi.org/10.1145/22949.22950}, doi = {10.1145/22949.22950}, abstract = {The goal of the research described in this paper is to develop an application-independent presentation tool that automatically designs effective graphical presentations (such as bar charts, scatter plots, and connected graphs) of relational information. Two problems are raised by this goal: The codification of graphic design criteria in a form that can be used by the presentation tool, and the generation of a wide variety of designs so that the presentation tool can accommodate a wide variety of information. The approach described in this paper is based on the view that graphical presentations are sentences of graphical languages. The graphic design issues are codified as expressiveness and effectiveness criteria for graphical languages. Expressiveness criteria determine whether a graphical language can express the desired information. Effectiveness criteria determine whether a graphical language exploits the capabilities of the output medium and the human visual system. A wide variety of designs can be systematically generated by using a composition algebra that composes a small set of primitive graphical languages. Artificial intelligence techniques are used to implement a prototype presentation tool called APT (A Presentation Tool), which is based on the composition algebra and the graphic design criteria.}, number = {2}, journal = {ACM Transactions on Graphics}, author = {Mackinlay, Jock}, month = apr, year = {1986}, pages = {110--141}, } @book{nazemi_adaptive_2016, series = {Studies in {Computational} {Intelligence}}, title = {Adaptive {Semantics} {Visualization}}, isbn = {978-3-319-30815-9}, url = {https://www.springer.com/de/book/9783319308159}, abstract = {This book introduces a novel approach for intelligent visualizations that adapts the different visual variables and data processing to human’s behavior and given tasks. Thereby a number of new algorithms and methods are introduced to satisfy the human need of information and knowledge and enable a usable and attractive way of information acquisition. Each method and algorithm is illustrated in a replicable way to enable the reproduction of the entire “SemaVis” system or parts of it. The introduced evaluation is scientifically well-designed and performed with more than enough participants to validate the benefits of the methods. Beside the introduced new approaches and algorithms, readers may find a sophisticated literature review in Information Visualization and Visual Analytics, Semantics and information extraction, and intelligent and adaptive systems. This book is based on an awarded and distinguished doctoral thesis in computer science.}, language = {en}, publisher = {Springer International Publishing}, author = {Nazemi, Kawa}, year = {2016}, doi = {10.1007/978-3-319-30816-6}, file = {Eingereichte Version:C\:\\Users\\carst\\Zotero\\storage\\84ZF5BKQ\\Nazemi - 2016 - Adaptive Semantics Visualization.pdf:application/pdf}, } @inproceedings{nazemi_intelligent_2018, address = {Cham}, series = {Advances in {Intelligent} {Systems} and {Computing}}, title = {Intelligent {Visual} {Analytics} – a {Human}-{Adaptive} {Approach} for {Complex} and {Analytical} {Tasks}}, isbn = {978-3-319-73888-8}, doi = {10.1007/978-3-319-73888-8_29}, abstract = {Visual Analytics enables solving complex and analytical tasks by combining automated data analytics methods and interactive visualizations. The complexity of tasks, the huge amount of data and the complex visual representation may overstrain the users of such systems. Intelligent and adaptive visualizations system show already promising results to bridge the gap between human and the complex visualization. We introduce in this paper a revised version of layer-based visual adaptation model that considers the human perception and cognition abilities. The model is then used to enhance the most popular Visual Analytics model to enable the development of Intelligent Visual Analytics systems.}, language = {en}, booktitle = {Intelligent {Human} {Systems} {Integration}}, publisher = {Springer International Publishing}, author = {Nazemi, Kawa}, editor = {Karwowski, Waldemar and Ahram, Tareq}, year = {2018}, pages = {180--190}, } @article{xu_ensemblelens_2019, title = {{EnsembleLens}: {Ensemble}-based {Visual} {Exploration} of {Anomaly} {Detection} {Algorithms} with {Multidimensional} {Data}}, volume = {25}, issn = {1941-0506}, shorttitle = {{EnsembleLens}}, doi = {10.1109/TVCG.2018.2864825}, abstract = {The results of anomaly detection are sensitive to the choice of detection algorithms as they are specialized for different properties of data, especially for multidimensional data. Thus, it is vital to select the algorithm appropriately. To systematically select the algorithms, ensemble analysis techniques have been developed to support the assembly and comparison of heterogeneous algorithms. However, challenges remain due to the absence of the ground truth, interpretation, or evaluation of these anomaly detectors. In this paper, we present a visual analytics system named EnsembleLens that evaluates anomaly detection algorithms based on the ensemble analysis process. The system visualizes the ensemble processes and results by a set of novel visual designs and multiple coordinated contextual views to meet the requirements of correlation analysis, assessment and reasoning of anomaly detection algorithms. We also introduce an interactive analysis workflow that dynamically produces contextualized and interpretable data summaries that allow further refinements of exploration results based on user feedback. We demonstrate the effectiveness of EnsembleLens through a quantitative evaluation, three case studies with real-world data and interviews with two domain experts.}, number = {1}, journal = {IEEE Transactions on Visualization and Computer Graphics}, author = {Xu, Ke and Xia, Meng and Mu, Xing and Wang, Yun and Cao, Nan}, year = {2019}, pages = {109--119}, } @article{stevens_theory_1946, title = {On the {Theory} of {Scales} of {Measurement}}, volume = {103}, copyright = {© 1946}, issn = {0036-8075, 1095-9203}, url = {https://science.sciencemag.org/content/103/2684/677}, doi = {10.1126/science.103.2684.677}, language = {en}, number = {2684}, journal = {Science}, author = {Stevens, S. S.}, month = jun, year = {1946}, pages = {677--680}, } @article{west_images_1999, title = {Images and reversals: {James} {Clerk} {Maxwell}, working in wet clay}, volume = {33}, issn = {0097-8930}, shorttitle = {Images and reversals}, url = {https://doi.org/10.1145/563666.563671}, doi = {10.1145/563666.563671}, language = {en}, number = {1}, journal = {ACM SIGGRAPH Computer Graphics}, author = {West, Thomas G.}, month = feb, year = {1999}, pages = {15--17}, } @inproceedings{shneiderman_eyes_1996, title = {The eyes have it: a task by data type taxonomy for information visualizations}, shorttitle = {The eyes have it}, doi = {10.1109/VL.1996.545307}, abstract = {A useful starting point for designing advanced graphical user interfaces is the visual information seeking Mantra: overview first, zoom and filter, then details on demand. But this is only a starting point in trying to understand the rich and varied set of information visualizations that have been proposed in recent years. The paper offers a task by data type taxonomy with seven data types (one, two, three dimensional data, temporal and multi dimensional data, and tree and network data) and seven tasks (overview, zoom, filter, details-on-demand, relate, history, and extracts).}, booktitle = {Proceedings 1996 {IEEE} {Symposium} on {Visual} {Languages}}, author = {Shneiderman, B.}, year = {1996}, pages = {336--343}, file = {Eingereichte Version:C\:\\Users\\carst\\Zotero\\storage\\4UVUQYAU\\Shneiderman - 1996 - The eyes have it a task by data type taxonomy for.pdf:application/pdf}, } @article{xu_vidx_2017, title = {{ViDX}: {Visual} {Diagnostics} of {Assembly} {Line} {Performance} in {Smart} {Factories}}, volume = {23}, issn = {1941-0506}, shorttitle = {{ViDX}}, doi = {10.1109/TVCG.2016.2598664}, abstract = {Visual analytics plays a key role in the era of connected industry (or industry 4.0, industrial internet) as modern machines and assembly lines generate large amounts of data and effective visual exploration techniques are needed for troubleshooting, process optimization, and decision making. However, developing effective visual analytics solutions for this application domain is a challenging task due to the sheer volume and the complexity of the data collected in the manufacturing processes. We report the design and implementation of a comprehensive visual analytics system, ViDX. It supports both real-time tracking of assembly line performance and historical data exploration to identify inefficiencies, locate anomalies, and form hypotheses about their causes and effects. The system is designed based on a set of requirements gathered through discussions with the managers and operators from manufacturing sites. It features interlinked views displaying data at different levels of detail. In particular, we apply and extend the Marey's graph by introducing a time-aware outlier-preserving visual aggregation technique to support effective troubleshooting in manufacturing processes. We also introduce two novel interaction techniques, namely the quantiles brush and samples brush, for the users to interactively steer the outlier detection algorithms. We evaluate the system with example use cases and an in-depth user interview, both conducted together with the managers and operators from manufacturing plants. The result demonstrates its effectiveness and reports a successful pilot application of visual analytics for manufacturing in smart factories.}, number = {1}, journal = {IEEE Transactions on Visualization and Computer Graphics}, author = {Xu, Panpan and Mei, Honghui and Ren, Liu and Chen, Wei}, year = {2017}, pages = {291--300}, } @inproceedings{kaupp_outlier_2019, address = {Cham}, series = {Communications in {Computer} and {Information} {Science}}, title = {Outlier {Detection} in {Temporal} {Spatial} {Log} {Data} {Using} {Autoencoder} for {Industry} 4.0}, isbn = {978-3-030-20257-6}, doi = {10.1007/978-3-030-20257-6_5}, abstract = {Industry is changing rapidly under industry 4.0. The manufacturing process and its cyber-physical systems (CPSs) produce large amounts of data with many relationships and dependencies in the data. Outlier detection and problem solving is difficult in such an environment. We present an unsupervised outlier detection method to find outliers in temporal spatial log data without domain-specific knowledge. Our method is evaluated with real-world unlabeled CPS log data extracted from a quality glass inspection machine used in production. As a measurement metric for success, we set reasonable outlier areas in cooperation with a domain expert. Using our proposed method, we were able to find all known outlier areas. In addition, we found outliers that were not previously known and have been verified as outliers by a domain expert ex post.}, language = {en}, booktitle = {Engineering {Applications} of {Neural} {Networks}}, publisher = {Springer International Publishing}, author = {Kaupp, Lukas and Beez, Ulrich and Hülsmann, Jens and Humm, Bernhard G.}, editor = {Macintyre, John and Iliadis, Lazaros and Maglogiannis, Ilias and Jayne, Chrisina}, year = {2019}, pages = {55--65}, } @article{sacha_knowledge_2014, title = {Knowledge {Generation} {Model} for {Visual} {Analytics}}, volume = {20}, issn = {1941-0506}, doi = {10.1109/TVCG.2014.2346481}, abstract = {Visual analytics enables us to analyze huge information spaces in order to support complex decision making and data exploration. Humans play a central role in generating knowledge from the snippets of evidence emerging from visual data analysis. Although prior research provides frameworks that generalize this process, their scope is often narrowly focused so they do not encompass different perspectives at different levels. This paper proposes a knowledge generation model for visual analytics that ties together these diverse frameworks, yet retains previously developed models (e.g., KDD process) to describe individual segments of the overall visual analytic processes. To test its utility, a real world visual analytics system is compared against the model, demonstrating that the knowledge generation process model provides a useful guideline when developing and evaluating such systems. The model is used to effectively compare different data analysis systems. Furthermore, the model provides a common language and description of visual analytic processes, which can be used for communication between researchers. At the end, our model reflects areas of research that future researchers can embark on.}, number = {12}, journal = {IEEE Transactions on Visualization and Computer Graphics}, author = {Sacha, Dominik and Stoffel, Andreas and Stoffel, Florian and Kwon, Bum Chul and Ellis, Geoffrey and Keim, Daniel A.}, year = {2014}, pages = {1604--1613}, } @inproceedings{card_structure_1997, address = {Los Alamitos}, title = {The structure of the information visualization design space}, isbn = {0-8186-8189-6}, doi = {10.1109/INFVIS.1997.636792}, abstract = {Research on information visualization has reached the point where a number of successful point designs have been proposed and a variety of techniques have been discovered. It is now appropriate to describe and analyze portions of the design space so as to understand the differences among designs and to suggest new possibilities. This paper proposes an organization of the information visualization literature and illustrates it with a series of examples. The result is a framework for designing new visualizations and augmenting existing designs.}, booktitle = {Proceedings of {VIZ} '97: {Visualization} {Conference}, {Information} {Visualization} {Symposium} and {Parallel} {Rendering} {Symposium}}, publisher = {IEEE Computer Society Press}, author = {Card, S.K. and Mackinlay, J.}, year = {1997}, pages = {92--99}, } @article{viau_flowvizmenu_2010, title = {The {FlowVizMenu} and {Parallel} {Scatterplot} {Matrix}: {Hybrid} {Multidimensional} {Visualizations} for {Network} {Exploration}}, volume = {16}, issn = {1941-0506}, shorttitle = {The {FlowVizMenu} and {Parallel} {Scatterplot} {Matrix}}, doi = {10.1109/TVCG.2010.205}, abstract = {A standard approach for visualizing multivariate networks is to use one or more multidimensional views (for example, scatterplots) for selecting nodes by various metrics, possibly coordinated with a node-link view of the network. In this paper, we present three novel approaches for achieving a tighter integration of these views through hybrid techniques for multidimensional visualization, graph selection and layout. First, we present the FlowVizMenu, a radial menu containing a scatterplot that can be popped up transiently and manipulated with rapid, fluid gestures to select and modify the axes of its scatterplot. Second, the FlowVizMenu can be used to steer an attribute-driven layout of the network, causing certain nodes of a node-link diagram to move toward their corresponding positions in a scatterplot while others can be positioned manually or by force-directed layout. Third, we describe a novel hybrid approach that combines a scatterplot matrix (SPLOM) and parallel coordinates called the Parallel Scatterplot Matrix (P-SPLOM), which can be used to visualize and select features within the network. We also describe a novel arrangement of scatterplots called the Scatterplot Staircase (SPLOS) that requires less space than a traditional scatterplot matrix. Initial user feedback is reported.}, number = {6}, journal = {IEEE Transactions on Visualization and Computer Graphics}, author = {Viau, Christophe and McGuffin, Michael J. and Chiricota, Yves and Jurisica, Igor}, year = {2010}, pages = {1100--1108}, } @article{kohlhammer_toward_2012, title = {Toward {Visualization} in {Policy} {Modeling}}, volume = {32}, issn = {1558-1756}, doi = {10.1109/MCG.2012.107}, abstract = {This article looks at the current and future roles of information visualization, semantics visualization, and visual analytics in policy modeling. Many experts believe that you can't overestimate visualization's role in this respect.}, number = {5}, journal = {IEEE Computer Graphics and Applications}, author = {Kohlhammer, Jörn and Nazemi, Kawa and Ruppert, Tobias and Burkhardt, Dirk}, year = {2012}, pages = {84--89}, } @inproceedings{nazemi_visual_2019, address = {Los Alamitos, CA.}, title = {Visual {Analytics} for {Analyzing} {Technological} {Trends} from {Text}}, doi = {10.1109/IV.2019.00041}, abstract = {The awareness of emerging technologies is essential for strategic decision making in enterprises. Emerging and decreasing technological trends could lead to strengthening the competitiveness and market positioning. The exploration, detection and identification of such trends can be essentially supported through information visualization, trend mining and in particular through the combination of those. Commonly, trends appear first in science and scientific documents. However, those documents do not provide sufficient information for analyzing and identifying emerging trends. It is necessary to enrich data, extract information from the integrated data, measure the gradient of trends over time and provide effective interactive visualizations. We introduce in this paper an approach for integrating, enriching, mining, analyzing, identifying and visualizing emerging trends from scientific documents. Our approach enhances the state of the art in visual trend analytics by investigating the entire analysis process and providing an approach for enabling human to explore undetected potentially emerging trends.}, booktitle = {Information {Visualisation}: {Biomedical} visualization and geometric modelling \& imaging}, publisher = {IEEE Computer Society, Conference Publishing Services}, author = {Nazemi, Kawa and Burkhardt, Dirk}, editor = {Banissi, E.}, year = {2019}, pages = {191--200}, } @article{velleman_nominal_1993, title = {Nominal, {Ordinal}, {Interval}, and {Ratio} {Typologies} are {Misleading}}, volume = {47}, issn = {0003-1305}, url = {https://www.tandfonline.com/doi/abs/10.1080/00031305.1993.10475938}, doi = {10.1080/00031305.1993.10475938}, abstract = {The psychophysicist S.S. Stevens developed a measurement scale typology that has dominated social statistics methodology for almost 50 years. During this period, it has generated considerable controversy among statisticians. Recently, there has been a renaissance in the use of Stevens's scale typology for guiding the design of statistical computer packages. The current use of Stevens's terminology fails to deal with the classical criticisms at the time it was proposed and ignores important developments in data analysis over the last several decades.}, number = {1}, journal = {The American Statistician}, author = {Velleman, Paul F. and Wilkinson, Leland}, month = feb, year = {1993}, pages = {65--72}, } @inproceedings{inselberg_parallel_1990, address = {Los Alamitos, CA}, series = {{VIS} '90}, title = {Parallel coordinates: a tool for visualizing multi-dimensional geometry}, isbn = {978-0-8186-2083-6}, shorttitle = {Parallel coordinates}, abstract = {A methodology for visualizing analytic and synthetic geometry in RN is presented. It is based on a system of parallel coordinates which induces a non-projective mapping between N-Dimensional and 2-Dimensional sets. Hypersurfaces are represented by their planar images which have some geometrical properties analogous to the properties of the hypersurface that they represent. A point ← → line duality when N = 2 generalizes to lines and hyperplanes enabling the representation of polyhedra in RN. The representation of a class of convex and non-convex hypersurfaces is discussed together with an algorithm for constructing and displaying any interior point. The display shows some local properties of the hypersurface and provides information on the point's proximity to the boundary. Applications to Air Traffic Control, Robotics, Computer Vision, Computational Geometry, Statistics, Instrumentation and other areas are discussed.}, booktitle = {Proceedings of the 1st conference on {Visualization} '90}, publisher = {IEEE Computer Society Press}, author = {Inselberg, Alfred and Dimsdale, Bernard}, month = oct, year = {1990}, pages = {361--378}, } @article{nazemi_semantics_2015, series = {2015 {International} {Conference} {Virtual} and {Augmented} {Reality} in {Education}}, title = {Semantics {Visualization} – {Definition}, {Approaches} and {Challenges}}, volume = {75}, issn = {1877-0509}, url = {http://www.sciencedirect.com/science/article/pii/S1877050915036777}, doi = {10.1016/j.procs.2015.12.216}, abstract = {The visualization of the simulation results must be done in conformity with beneficiaries perception and professional domain understanding. It means that right data must be identified before. Semantic technologies provide new ways for accessing data and acquiring knowledge. The underlying structures allow finding information easier, gathering meanings and associations of the data entities and associating the data to users’ knowledge. Even though the focus of the research in this area is more to provide “machine readable” data, human-centered systems benefit from the technologies too. Especially graphical representations of the semantically structured data play a key-role in today's research. The meaningful relations of data entities and the meaningful and labeled clustering of data in form of semantic concepts enable new ways to visualize data. With these new ways, various challenges are related with deploying semantics visualizations beyond analytical search and simulation. The goal is to give a common understanding of the term semantics as it is used in semantic web. This paper dealt with the general idea of semantics visualization. First a short introduction to semantic formalisms is given followed by a general definition. Subsequently approaches and techniques of existing semantics visualizations are presented, where-as a new classification is introduced to describe the techniques. The article concludes with future challenges in semantics visualization focusing on users, data and tasks.}, language = {en}, journal = {Procedia Computer Science}, author = {Nazemi, Kawa and Burkhardt, Dirk and Ginters, Egils and Kohlhammer, Jorn}, month = jan, year = {2015}, pages = {75--83}, file = {Nazemi et al. - 2015 - Semantics Visualization – Definition, Approaches a.pdf:C\:\\Users\\carst\\Zotero\\storage\\VUPCYJZI\\Nazemi et al. - 2015 - Semantics Visualization – Definition, Approaches a.pdf:application/pdf}, } @inproceedings{nazemi_visual_2015, address = {New York, NY, USA}, series = {i-{KNOW} '15}, title = {Visual trend analysis with digital libraries}, isbn = {978-1-4503-3721-2}, url = {https://doi.org/10.1145/2809563.2809569}, doi = {10.1145/2809563.2809569}, abstract = {The early awareness of new technologies and upcoming trends is essential for making strategic decisions in enterprises and research. Trends may signal that technologies or related topics might be of great interest in the future or obsolete for future directions. The identification of such trends premises analytical skills that can be supported through trend mining and visual analytics. Thus the earliest trends or signals commonly appear in science, the investigation of digital libraries in this context is inevitable. However, digital libraries do not provide sufficient information for analyzing trends. It is necessary to integrate data, extract information from the integrated data and provide effective interactive visual analysis tools. We introduce in this paper a model that investigates all stages from data integration to interactive visualization for identifying trends and analyzing the market situation through our visual trend analysis environment. Our approach improves the visual analysis of trends by investigating the entire transformation steps from raw and structured data to visual representations.}, booktitle = {Proceedings of the 15th {International} {Conference} on {Knowledge} {Technologies} and {Data}-driven {Business}}, publisher = {Association for Computing Machinery}, author = {Nazemi, Kawa and Retz, Reimond and Burkhardt, Dirk and Kuijper, Arjan and Kohlhammer, Jörn and Fellner, Dieter W.}, month = oct, year = {2015}, pages = {1--8}, } @book{bertin_semiology_1983, address = {Madison, Wis.}, title = {Semiology of graphics : diagrams, networks, maps}, isbn = {0-299-09060-4}, language = {en}, publisher = {University of Wisconsin Press}, author = {Bertin, Jacques}, translator = {Berg, William J.}, year = {1983}, } @article{may_towards_2008, title = {Towards closing the analysis gap: {Visual} generation of decision supporting schemes from raw data}, volume = {27}, copyright = {© 2008 The Author(s) Journal compilation © 2008 The Eurographics Association and Blackwell Publishing Ltd.}, issn = {1467-8659}, shorttitle = {Towards closing the analysis gap}, url = {https://onlinelibrary.wiley.com/doi/abs/10.1111/j.1467-8659.2008.01224.x}, doi = {10.1111/j.1467-8659.2008.01224.x}, abstract = {The derivation, manipulation and verification of analytical models from raw data is a process which requires a transformation of information across different levels of abstraction. We introduce a concept for the coupling of data classification and interactive visualization in order to make this transformation visible and steerable for the human user. Data classification techniques generate mappings that formally group data items into categories. Interactive visualization includes the user into an iterative refinement process. The user identifies and selects interesting patterns to define these categories. The following step is the transformation of a visible pattern into the formal definition of a classifier. In the last step the classifier is transformed back into a pattern that is blended with the original data in the same visual display. Our approach allows in intuitive assessment of a formal classifier and its model, the detection of outliers and the handling of noisy data using visual pattern-matching. We instantiated the concept using decision trees for classification and KVMaps as the visualization technique. The generation of a classifier from visual patterns and its verification is transformed from a cognitive to a mostly pre-cognitive task.}, language = {en}, number = {3}, journal = {Computer Graphics Forum}, author = {May, T. and Kohlhammer, J.}, year = {2008}, pages = {911--918}, } @book{munzner_visualization_2014, address = {Hoboken}, series = {A.{K}. {Peters} visualization series}, title = {Visualization analysis \& design}, isbn = {978-1-4665-0891-0}, publisher = {Taylor and Francis}, author = {Munzner, Tamara}, year = {2014}, } @inproceedings{curdt_metadata_2016, address = {Cham}, series = {Communications in {Computer} and {Information} {Science}}, title = {Metadata {Management} in an {Interdisciplinary}, {Project}-{Specific} {Data} {Repository}: {A} {Case} {Study} from {Earth} {Sciences}}, isbn = {978-3-319-49157-8}, shorttitle = {Metadata {Management} in an {Interdisciplinary}, {Project}-{Specific} {Data} {Repository}}, doi = {10.1007/978-3-319-49157-8_31}, abstract = {This paper presents an approach to manage metadata of (research) data from the interdisciplinary, long-term, DFG-funded, collaborative research project ‘Patterns in Soil-Vegetation-Atmosphere Systems: Monitoring, Modelling, and Data Assimilation’. In this framework, a data repository, the so-called TR32DB project database, was established in 2008 with the aim to manage the resulting data of the involved scientists. The data documentation with accurate, extensive metadata has been a key task. Consequently, a standardized, interoperable, multi-level metadata schema has been designed and implemented to ensure a proper documentation and publication of all project data (e.g. data, publication, reports), as well as to facilitate data search, exchange and re-use. A user-friendly web-interface was designed for a simple metadata input and search.}, language = {en}, booktitle = {Metadata and {Semantics} {Research}}, publisher = {Springer International Publishing}, author = {Curdt, Constanze}, editor = {Garoufallou, Emmanouel and Subirats Coll, Imma and Stellato, Armando and Greenberg, Jane}, year = {2016}, pages = {357--368}, } @article{fleischer_was_2020, title = {„{Was} vom {Tage} übrigblieb“ – ‚{SFB}-{Legacy}’ von {INF}-{Teilprojekten}}, copyright = {CC BY 4.0}, url = {https://bausteine-fdm.de/article/view/8090}, doi = {10.17192/bfdm.2020.1.8090}, abstract = {Sonderforschungsbereiche haben aus Sicht der Deutschen Forschungsgemeinschaft einen strukturellen Auftrag. Dieser Auftrag spiegelt sich am Stärksten in den Teilprojekten (INF) wieder. Im Rahmen einesWorkshops, der 2018 in Göttingen stattfand, wurde ein WorldCafé mit dem Thema Nachhaltigkeit von Ergebnissen/Produkten aus INFProjekten angeboten. Teilgenommen haben an diesemWorkshop Teilprojektleitungen (INF) und deren Personal sowie SFB-Projektkoordinationen. Die Erfahrungen der Beteiligten zeigen, dass nicht immer die Anbindung an Infrastruktureinrichtungen gegeben und daraus resultierend die Nachhaltigkeit des INF-Projektes ungeklärt ist. Dies kann verschiedene Ursachen haben, wie die wissenschaftliche Ausrichtung des Themas oder fehlende strategische Zielvorgaben. Generell waren sich die Teilnehmenden einig, dass eine frühzeitige Klärung dieser Anbindung für ihr Arbeitsumfeld und die Ergebnisse vorteilhaft waren. Wie eine solche Anbindung hergestellt werden könnte, ist – als Zusammenfassung der WorldCafé-Beiträge – Thema dieses Artikels.}, language = {de}, number = {1}, journal = {Bausteine Forschungsdatenmanagement}, author = {Fleischer, Dirk}, month = apr, year = {2020}, pages = {39--44}, file = {Fleischer - 2020 - „Was vom Tage übrigblieb“ – ‚SFB-Legacy’ von INF-T.pdf:C\:\\Users\\carst\\Zotero\\storage\\I87GIJZT\\Fleischer - 2020 - „Was vom Tage übrigblieb“ – ‚SFB-Legacy’ von INF-T.pdf:application/pdf}, } @article{stegemann_was_2020, title = {Was bleibt nach dem {Projekt}? - {Nachhaltigkeitsstrategien} für das {Forschungsdatenmangement} ({FDM}) entwickeln}, copyright = {CC BY 4.0}, shorttitle = {Was bleibt nach dem {Projekt}?}, url = {https://bausteine-fdm.de/article/view/8167}, doi = {10.17192/bfdm.2020.1.8167}, abstract = {Schon am Anfang eines Projektes sollte an das Ende gedacht werden. Um die Ergebnisse, das gesammelte Wissen und die Erfahrungen aus Projekten zum Aufbau von FDM-Services langfristig zu sichern und nachnutzbar zu machen, sollten als Teil des Projektmanagements frühzeitig Strategien und Konzepte zur Nachhaltigkeit entworfen werden. Der Beitrag stellt verschiedene Maßnahmen und Ziele für eine Nachhaltigkeitsstrategie vor. Anhand von konkreten Beispielen – u.a. den Schulungsmaterialien aus dem Projekt „FoDaKo“ und eines Konzeptentwurfs für die Vernetzung von Stakeholdern des FDM an Universitäten – wird aufgezeigt, wie Prinzipien der Nachhaltigkeit in der Praxis Anwendung finden können. Der Artikel basiert auf dem Vortrag "Was bleibt nach dem Projekt? - FDM langfristig an Universitäten etablieren", den die Autorin auf dem DINI/nestor-Workshop "Strukturen entwickeln: Organisation und Governance für lokale FDM-Services" am 4. April 2019 in Siegen gehalten hat.}, language = {de}, number = {1}, journal = {Bausteine Forschungsdatenmanagement}, author = {Stegemann, Jessica}, month = apr, year = {2020}, pages = {69--76}, file = {Stegemann - 2020 - Was bleibt nach dem Projekt - Nachhaltigkeitsstra.pdf:C\:\\Users\\carst\\Zotero\\storage\\UKBTD7QJ\\Stegemann - 2020 - Was bleibt nach dem Projekt - Nachhaltigkeitsstra.pdf:application/pdf}, } @article{arning_publisso_2016, title = {{PUBLISSO}: {Das} {Open}-{Access}-{Publikationsportal} für die {Lebenswissenschaften}}, volume = {16}, copyright = {This is an Open Access article distributed under the terms of the Creative Commons Attribution 4.0 License.}, issn = {1865-066X}, shorttitle = {{PUBLISSO}}, doi = {10.3205/mbi000370}, abstract = {Der Beitrag erläutert die Gründe für den Aufbau des ZB MED Publikationsportals PUBLISSO und stellt die Ziele, das Konzept und die einzelnen Services vor. Im Mittelpunkt steht dabei, den Open-Access-Gedanken zu verbreiten, dazu zu beraten und entsprechende Publikationsmöglichkeiten für alle Publikationsarten von Textpublikationen bis zu Forschungsdaten zur Verfügung zu stellen.}, language = {de}, number = {3}, journal = {GMS Medizin - Bibliothek - Information}, author = {Arning, Ursula and Lindstädt, Birte and Schmitz, Jasmin}, month = dec, year = {2016}, pages = {Doc15}, file = {Arning et al. - 2016 - PUBLISSO Das Open-Access-Publikationsportal für d.pdf:C\:\\Users\\carst\\Zotero\\storage\\XU5RM4E2\\Arning et al. - 2016 - PUBLISSO Das Open-Access-Publikationsportal für d.pdf:application/pdf}, } @article{curdt_research_2015, title = {Research data management services for a multidisciplinary, collaborative research project: {Design} and implementation of the {TR32DB} project database}, volume = {49}, issn = {0033-0337}, shorttitle = {Research data management services for a multidisciplinary, collaborative research project}, url = {https://doi.org/10.1108/PROG-02-2015-0016}, doi = {10.1108/PROG-02-2015-0016}, abstract = {Purpose Research data management (RDM) comprises all processes, which ensure that research data are well-organized, documented, stored, backed up, accessible, and reusable. RDM systems form the technical framework. The purpose of this paper is to present the design and implementation of a RDM system for an interdisciplinary, collaborative, long-term research project with focus on Soil-Vegetation-Atmosphere data. Design/methodology/approach The presented RDM system is based on a three-tier (client-server) architecture. This includes a file-based data storage, a database-based metadata storage, and a self-designed user-friendly web-interface. The system is designed in cooperation with the local computing centre, where it is also hosted. A self-designed interoperable, project-specific metadata schema ensures the accurate documentation of all data. Findings A RDM system has to be designed and implemented according to requirements of the project participants. General challenges and problems of RDM should be considered. Thus, a close cooperation with the scientists obtains the acceptance and usage of the system. Originality/value This paper provides evidence that the implementation of a RDM system in the provided and maintained infrastructure of a computing centre offers many advantages. Consequently, the designed system is independent of the project funding. In addition, access and re-use of all involved project data is ensured. A transferability of the presented approach to another interdisciplinary research project was already successful. Furthermore, the designed metadata schema can be expanded according to changing project requirements.}, number = {4}, journal = {Program: electronic library and information systems}, author = {Curdt, Constanze and Hoffmeister, Dirk}, editor = {Cox, Andrew}, month = jan, year = {2015}, pages = {494--512}, } @article{venters_software_2018, title = {Software sustainability: {Research} and practice from a software architecture viewpoint}, volume = {138}, issn = {0164-1212}, shorttitle = {Software sustainability}, url = {http://www.sciencedirect.com/science/article/pii/S0164121217303072}, doi = {10.1016/j.jss.2017.12.026}, abstract = {Context Modern societies are highly dependent on complex, large-scale, software-intensive systems that increasingly operate within an environment of continuous availability, which is challenging to maintain and evolve in response to the inevitable changes in stakeholder goals and requirements of the system. Software architectures are the foundation of any software system and provide a mechanism for reasoning about core software quality requirements. Their sustainability – the capacity to endure in changing environments – is a critical concern for software architecture research and practice. Problem Accidental software complexity accrues both naturally and gradually over time as part of the overall software design and development process. From a software architecture perspective, this allows several issues to overlap including, but not limited to: the accumulation of technical debt design decisions of individual components and systems leading to coupling and cohesion issues; the application of tacit architectural knowledge resulting in unsystematic and undocumented design decisions; architectural knowledge vaporisation of design choices and the continued ability of the organization to understand the architecture of its systems; sustainability debt and the broader cumulative effects of flawed architectural design choices over time resulting in code smells, architectural brittleness, erosion, and drift, which ultimately lead to decay and software death. Sustainable software architectures are required to evolve over the entire lifecycle of the system from initial design inception to end-of-life to achieve efficient and effective maintenance and evolutionary change. Method This article outlines general principles and perspectives on sustainability with regards to software systems to provide a context and terminology for framing the discourse on software architectures and sustainability. Focusing on the capacity of software architectures and architectural design choices to endure over time, it highlights some of the recent research trends and approaches with regards to explicitly addressing sustainability in the context of software architectures. Contribution The principal aim of this article is to provide a foundation and roadmap of emerging research themes in the area of sustainable software architectures highlighting recent trends, and open issues and research challenges.}, language = {en}, journal = {Journal of Systems and Software}, author = {Venters, Colin C. and Capilla, Rafael and Betz, Stefanie and Penzenstadler, Birgit and Crick, Tom and Crouch, Steve and Nakagawa, Elisa Yumi and Becker, Christoph and Carrillo, Carlos}, month = apr, year = {2018}, pages = {174--188}, file = {Venters et al. - 2018 - Software sustainability Research and practice fro.pdf:C\:\\Users\\carst\\Zotero\\storage\\KJPGDH4H\\Venters et al. - 2018 - Software sustainability Research and practice fro.pdf:application/pdf}, } @article{von_krogh_special_2003, series = {Open {Source} {Software} {Development}}, title = {Special issue on open source software development}, volume = {32}, issn = {0048-7333}, doi = {10.1016/S0048-7333(03)00054-4}, abstract = {This special issue of Research Policy is dedicated to new research on the phenomenon of open source software development. Open Source, because of its novel modes of operation and robust functioning in the marketplace, poses novel and fundamental questions for researchers in many fields, ranging from the economics of innovation to the principles by which productive work can best be organized. In this introduction to the special issue, we provide a general history and description of open source software and open source software development processes, plus an overview of the articles.}, language = {en}, number = {7}, journal = {Research Policy}, author = {von Krogh, Georg and von Hippel, Eric}, month = jun, year = {2003}, pages = {1149--1157}, file = {von Krogh und von Hippel - 2003 - Special issue on open source software development.pdf:C\:\\Users\\carst\\Zotero\\storage\\WB3VV37I\\von Krogh und von Hippel - 2003 - Special issue on open source software development.pdf:application/pdf}, } @article{crouch_software_2013, title = {The {Software} {Sustainability} {Institute}: {Changing} {Research} {Software} {Attitudes} and {Practices}}, volume = {15}, issn = {1558-366X}, shorttitle = {The {Software} {Sustainability} {Institute}}, doi = {10.1109/MCSE.2013.133}, abstract = {To effect change, the Software Sustainability Institute works with researchers, developers, funders, and infrastructure providers to identify and address key issues with research software.}, number = {6}, journal = {Computing in Science Engineering}, author = {Crouch, Stephen and Hong, Neil Chue and Hettrick, Simon and Jackson, Mike and Pawlik, Aleksandra and Sufi, Shoaib and Carr, Les and De Roure, David and Goble, Carole and Parsons, Mark}, year = {2013}, pages = {74--80}, file = {Crouch et al. - 2013 - The Software Sustainability Institute Changing Re.pdf:C\:\\Users\\carst\\Zotero\\storage\\P35JWV5C\\Crouch et al. - 2013 - The Software Sustainability Institute Changing Re.pdf:application/pdf}, } @article{anhalt-depies_tradeoffs_2019, title = {Tradeoffs and tools for data quality, privacy, transparency, and trust in citizen science}, volume = {238}, issn = {0006-3207}, url = {http://www.sciencedirect.com/science/article/pii/S0006320719301958}, doi = {10.1016/j.biocon.2019.108195}, abstract = {Emerging technologies make it increasingly straightforward for scientists to collect data that are fine in scale, broad in scope, and transparent with open access. However, the resulting datasets may contain sensitive information such as location information about endangered resources or private landowners. These tensions are particularly relevant for citizen science programs which engage the public in answering scientific questions. Citizen science programs are often promoted as being able to achieve multiple scientific (e.g. quality data and open sharing of data) and social goals (e.g. increased transparency and public trust), but likely tensions between these desired outcomes are less frequently discussed. We develop a conceptual framework for tensions in citizen science information and review the internal policies and practices currently used to navigate between data sharing and privacy protection. We also examine the case of Snapshot Wisconsin's wildlife camera traps on private land to understand how program managers balanced data production and sharing with protection of sensitive information and how citizen scientists perceived the project. We found that programs may be forced to make tradeoffs between data quality, privacy protection, resource security, transparency, and trust. In order to maximize conservation outcomes, we recommend that managers anticipate potential tradeoffs in advance of data collection, develop policies and practices to address these, and practice iterative evaluation that solicits feedback from participants.}, language = {en}, journal = {Biological Conservation}, author = {Anhalt-Depies, Christine and Stenglein, Jennifer L. and Zuckerberg, Benjamin and Townsend, Philip A. and Rissman, Adena R.}, month = oct, year = {2019}, pages = {108195}, } @article{curdt_tr32db_2014, title = {{TR32DB} {Metadata} {Schema} for the {Description} of {Research} {Data} in the {TR32DB}}, copyright = {Creative Commons Attribution-ShareAlike 3.0 Unported (CC BY-SA 3.0)}, url = {http://www.tr32db.uni-koeln.de/DOI/doi.php?doiID=50}, doi = {10.5880/TR32DB.10}, abstract = {The TR32DB Metadata Schema is a structured list of metadata properties chosen to describe all data in the TR32DB with accurate metadata properties and thus to improve their searchability. The entire data provided to the TR32DB can be described with a number of descriptive metadata properties (e.g. creator, title, abstract, keywords, etc.) and administrative or technical properties (e.g. file format, file type, rights statement, etc.). The stored data are organized in six main data type categories: Data, Geodata, Report, Picture, Presentation, and Publication. The TR32DB Metadata Schema is set up in two levels to describe the various types of data collected by the CRC/TR32 participants. The first level is the General level. This level includes metadata properties classified in seven categories: Identification, Responsible Party, Topic, File Details, Constraints, Geographic, and automatic generated Metadata Details. The second level is the Specific level and contains the data type specific metadata properties. Currently, six data types are included: Data, Geodata, Report, Picture, Presentation, and Publication. Publication takes a special position and is once again sub-divided into the sub-categories: Article, Book, Book Section, and Event Paper.}, language = {en}, author = {Curdt, Constanze}, year = {2014}, pages = {52}, } @article{david_introduction_2016, title = {An introduction to the special issue on {Geoscience} {Papers} of the {Future}}, volume = {3}, copyright = {©2016. The Authors.}, issn = {2333-5084}, url = {https://agupubs.onlinelibrary.wiley.com/doi/abs/10.1002/2016EA000201}, doi = {10.1002/2016EA000201}, abstract = {Advocates of enhanced quality for published scientific results are increasingly voicing the need for further transparency of data and software for scientific reproducibility. However, such advanced digital scholarship can appear perplexing to geoscientists that are seduced by the concept of open science yet wonder about the exact mechanics and implications of the associated efforts. This special issue of Earth and Space Science entitled “Geoscience Papers of the Future” includes a review of existing best practices for digital scholarship and bundles a set of example articles that share their digital research products and reflect on the process of opening their scientific approach in a common quest for reproducible science.}, language = {en}, number = {10}, journal = {Earth and Space Science}, author = {David, Cédric H. and Gil, Yolanda and Duffy, Christopher J. and Peckham, Scott D. and Venayagamoorthy, S. Karan}, year = {2016}, pages = {441--444}, file = {David et al. - 2016 - An introduction to the special issue on Geoscience.pdf:C\:\\Users\\carst\\Zotero\\storage\\TTR6LM4B\\David et al. - 2016 - An introduction to the special issue on Geoscience.pdf:application/pdf}, } @incollection{dyke_chapter_2020, address = {London}, title = {Chapter 2 - {Genomic} data access policy models}, isbn = {978-0-12-816197-5}, url = {http://www.sciencedirect.com/science/article/pii/B9780128161975000024}, abstract = {This chapter focuses on how shared genomic data are made available to researchers through different data access policy models that have evolved in response to developments in genomics research and data-sharing policy. Data access policy plays a critical role in both enabling broad use of shared data and respecting the privacy and consent of research participants. Three data access policy models from the genomics community are described: open access, controlled access, and registered access. The advantages and drawbacks of each model regarding the ethical–legal considerations pertaining to genomic data sharing are discussed.}, language = {en}, booktitle = {Responsible {Genomic} {Data} {Sharing}}, publisher = {Academic Press}, author = {Dyke, Stephanie O. M.}, editor = {Jiang, Xiaoqian and Tang, Haixu}, month = jan, year = {2020}, doi = {10.1016/B978-0-12-816197-5.00002-4}, pages = {19--32}, } @incollection{eaton_chapter_2020, address = {London}, title = {Chapter {Six} - {Protecting} the data: {Security} and privacy}, isbn = {978-0-12-816543-0}, shorttitle = {Chapter {Six} - {Protecting} the data}, url = {http://www.sciencedirect.com/science/article/pii/B9780128165430000066}, abstract = {Hearing the word “data” may cause individuals to think about a wide variety of topics, from how much data they have used on their phone plans this month to the data that their social media accounts store. Data are defined as “individual facts, statistics, or items of information.” In healthcare, the amount of data is increasing, and data that once were only recorded on paper have now shifted to digital records. With this increase in data comes an increased responsibility for the need to secure them safely. This chapter focuses on data security, which includes information about specific privacy and security rules of healthcare data in the United States. These include the Health Insurance Portability and Accountability Act (HIPAA) Privacy Rule and its requirements for protecting data, the HIPAA Security Rule along with the best practices for securing data, and the HIPAA Breach Notification Rule along with the steps to take if a data breach occurs. Additional considerations for data security on an international level will also be presented.}, language = {en}, booktitle = {Data for {Nurses}}, publisher = {Academic Press}, author = {Eaton, India and McNett, Molly}, editor = {McNett, Molly}, month = jan, year = {2020}, doi = {10.1016/B978-0-12-816543-0.00006-6}, pages = {87--99}, } @article{patel_research_2016, title = {Research data management: a conceptual framework}, volume = {65}, issn = {0024-2535}, shorttitle = {Research data management}, url = {https://doi.org/10.1108/LR-01-2016-0001}, doi = {10.1108/LR-01-2016-0001}, abstract = {Purpose Research data management (RDM) is gaining a lot of momentum in the present day and rightly so. Research data are the core of any research study. The findings and conclusions of a study are entirely dependent on the research data. Traditional publishing did not focus on the presentation of data, along with the publications such as research monographs and especially journal articles, probably because of the difficulties involved in managing the research data sets. The current day technology, however, has helped in making this task easier. The purpose of this paper is to present a conceptual framework for managing research data at the institutional level. Design/methodology/approach This paper discusses the significance and advantages of sharing research data. In the spirit of open access to publications, freeing research data and making it available openly, with minimal restrictions, will help in not only furthering research and development but also avoiding duplication of efforts. The issues and challenges involved in RDM at the institutional level are discussed. Findings A conceptual framework for RDM at the institutional level is presented. A model for a National Repository of Open Research Data (NRORD) is also proposed, and the workflow of the functioning of NRORD is also presented. Originality/value The framework clearly presents the workflow of the data life-cycle in its various phases right from its creation, storage, organization and sharing. It also attempts to address crucial issues in RDM such as data privacy, data security, copyright and licensing. The framework may help the institutions in managing the research data life-cycle in a more efficient and effective manner.}, number = {4/5}, journal = {Library Review}, author = {Patel, Dimple}, month = jan, year = {2016}, pages = {226--241}, } @article{abrahamsson_agile_2017, title = {Agile {Software} {Development} {Methods}: {Review} and {Analysis}}, shorttitle = {Agile {Software} {Development} {Methods}}, url = {http://arxiv.org/abs/1709.08439}, abstract = {Agile - denoting "the quality of being agile, readiness for motion, nimbleness, activity, dexterity in motion" - software development methods are attempting to offer an answer to the eager business community asking for lighter weight along with faster and nimbler software development processes. This is especially the case with the rapidly growing and volatile Internet software industry as well as for the emerging mobile application environment. The new agile methods have evoked substantial amount of literature and debates. However, academic research on the subject is still scarce, as most of existing publications are written by practitioners or consultants. The aim of this publication is to begin filling this gap by systematically reviewing the existing literature on agile software development methodologies. This publication has three purposes. First, it proposes a definition and a classification of agile software development approaches. Second, it analyses ten software development methods that can be characterized as being "agile" against the defined criterion. Third, it compares these methods and highlights their similarities and differences. Based on this analysis, future research needs are identified and discussed.}, journal = {arXiv:1709.08439 [cs]}, author = {Abrahamsson, Pekka and Salo, Outi and Ronkainen, Jussi and Warsta, Juhani}, month = sep, year = {2017}, pages = {112}, file = {Abrahamsson et al. - 2017 - Agile Software Development Methods Review and Ana.pdf:C\:\\Users\\carst\\Zotero\\storage\\JT82MV65\\Abrahamsson et al. - 2017 - Agile Software Development Methods Review and Ana.pdf:application/pdf}, } @article{suhr_menoci_2020, title = {menoci: {Lightweight} {Extensible} {Web} {Portal} enabling {FAIR} {Data} {Management} for {Biomedical} {Research} {Projects}}, shorttitle = {menoci}, url = {http://arxiv.org/abs/2002.06161}, abstract = {Background: Biomedical research projects deal with data management requirements from multiple sources like funding agencies' guidelines, publisher policies, discipline best practices, and their own users' needs. We describe functional and quality requirements based on many years of experience implementing data management for the CRC 1002 and CRC 1190. A fully equipped data management software should improve documentation of experiments and materials, enable data storage and sharing according to the FAIR Guiding Principles while maximizing usability, information security, as well as software sustainability and reusability. Results: We introduce the modular web portal software menoci for data collection, experiment documentation, data publication, sharing, and preservation in biomedical research projects. Menoci modules are based on the Drupal content management system which enables lightweight deployment and setup, and creates the possibility to combine research data management with a customisable project home page or collaboration platform. Conclusions: Management of research data and digital research artefacts is transforming from individual researcher or groups best practices towards project- or organisation-wide service infrastructures. To enable and support this structural transformation process, a vital ecosystem of open source software tools is needed. Menoci is a contribution to this ecosystem of research data management tools that is specifically designed to support biomedical research projects.}, journal = {arXiv:2002.06161 [cs]}, author = {Suhr, Markus and Lehmann, Christoph and Bauer, Christian Robert and Bender, Theresa and Knopp, Cornelius and Freckmann, Luca and Hansen, Björn Öst and Henke, Christian and Aschenbrandt, Georg and Kühlborn, Lea and Rheinländer, Sophia and Weber, Linus and Marzec, Bartlomiej and Hellkamp, Marcel and Wieder, Philipp and Kusch, Harald and Sax, Ulrich and Nussbeck, Sara Yasemin}, month = feb, year = {2020}, pages = {19}, file = {Suhr et al. - 2020 - menoci Lightweight Extensible Web Portal enabling.pdf:C\:\\Users\\carst\\Zotero\\storage\\RV2XFSQ2\\Suhr et al. - 2020 - menoci Lightweight Extensible Web Portal enabling.pdf:application/pdf}, } @incollection{de_oliveira_clouds_2017, address = {Cham}, series = {Computer {Communications} and {Networks}}, title = {Clouds and {Reproducibility}: {A} {Way} to {Go} to {Scientific} {Experiments}?}, isbn = {978-3-319-54645-2}, shorttitle = {Clouds and {Reproducibility}}, url = {https://doi.org/10.1007/978-3-319-54645-2_5}, abstract = {Scientific research is supported by computing techniques and tools that allow for gathering, management, analysis, visualization, sharing, and reproduction of scientific data and its experiments. The simulations performed in this type of research are called in silico experiments, and they are commonly composed of several applications that execute traditional algorithms and methods. Reproducibility plays a key role and gives the ability to make changes in the data and test environment of a scientific experiment to evaluate the robustness of the proposed scientific method. By verifying and validating generated results of these experiments, there is an increase in productivity and quality of scientific data analysis processes resulting in the improvement of science development and production of complex data in various scientific domains. There are many challenges to enable experimental reproducibility in in silico experiments. Many of these challenges are related to guaranteeing that simulation programs and data are still available when scientists need to reproduce an experiment. Clouds can play a key role by offering the infrastructure for long-term preserving programs and data. The goal of this chapter is to characterize terms and requirements related to scientific reproducibility and show how clouds can aid the development and selection of reproducibility approaches in science.}, language = {en}, booktitle = {Cloud {Computing}: {Principles}, {Systems} and {Applications}}, publisher = {Springer International Publishing}, author = {de Oliveira, Ary H. M. and de Oliveira, Daniel and Mattoso, Marta}, editor = {Antonopoulos, Nick and Gillam, Lee}, year = {2017}, doi = {10.1007/978-3-319-54645-2_5}, pages = {127--151}, } @article{rowbotham_does_2019, title = {Does citizen science have the capacity to transform population health science?}, volume = {29}, issn = {0958-1596}, url = {https://doi.org/10.1080/09581596.2017.1395393}, doi = {10.1080/09581596.2017.1395393}, abstract = {Citizen science engages members of the public in research design, data collection, and analysis – in asking and answering questions about the world around them. The United States, European Union, and Australia have placed citizen science at the forefront of national science policy. Journals such as Science, Nature and Bioscience regularly feature projects conducted by citizens. Citizen science engages millions of people worldwide. However, to date, population health science has not relied heavily on citizen contributions. Although community-based participatory action research remains a strong foundational method to engage those affected by public health problems, there is additional potential to mainstream population health through wider, less intensive opportunities to be involved in our science. If we are to tackle the complex challenges that face population health then new avenues are needed to capture the energy and attention of citizens who may not feel affected by public health problems, i.e. to engage the ‘by-standers’ in population health science. Particular types of citizen science methods have the potential to do this. But simply increasing the breadth and volume of scientific evidence will not be enough. Complex, intractable, macro-level problems in population health require change in how our journals and funding bodies respond to data generated by the public. Of course, democratisation of science and the potential decentralisation of scientific authority will bring deep challenges. But potentially it brings a future where population health science is better known, understood and respected, with benefits for the types of public policies that derive from this science.}, number = {1}, journal = {Critical Public Health}, author = {Rowbotham, Samantha and McKinnon, Merryn and Leach, Joan and Lamberts, Rod and Hawe, Penelope}, month = jan, year = {2019}, pages = {118--128}, } @incollection{torcivia-rodriguez_primer_2019, address = {New York, NY}, series = {Methods in {Molecular} {Biology}}, title = {A {Primer} for {Access} to {Repositories} of {Cancer}-{Related} {Genomic} {Big} {Data}}, isbn = {978-1-4939-8868-6}, url = {https://doi.org/10.1007/978-1-4939-8868-6_1}, abstract = {The use of large datasets has become ubiquitous in biomedical sciences. Researchers in the field of cancer genomics have, in recent years, generated large volumes of data from their experiments. Those responsible for production of this data often analyze a narrow subset of this data based on the research question they are trying to address: this is the case whether or not they are acting independently or in conjunction with a large-scale cancer genomics project. The reality of this situation creates the opportunity for other researchers to repurpose this data for different hypotheses if the data is made easily and freely available. New insights in biology resulting from more researchers having access to data they otherwise would be unable to generate on their own are a boon for the field. The following chapter reviews several cancer genomics-related databases and outlines the type of data they contain, as well as the methods required to access each database. While this list is not comprehensive, it should provide a basis for cancer researchers to begin exploring some of the many large datasets that are available to them.}, language = {en}, booktitle = {Cancer {Bioinformatics}}, publisher = {Springer}, author = {Torcivia-Rodriguez, John and Dingerdissen, Hayley and Chang, Ting-Chia and Mazumder, Raja}, editor = {Krasnitz, Alexander}, year = {2019}, doi = {10.1007/978-1-4939-8868-6_1}, pages = {1--37}, } @article{engelhardt_forschungsdatenmanagement_2020, title = {Forschungsdatenmanagement in {DFG}-{Sonderforschungsbereichen}:}, copyright = {CC BY 4.0}, shorttitle = {Forschungsdatenmanagement in {DFG}-{Sonderforschungsbereichen}}, url = {https://bausteine-fdm.de/article/view/8157}, doi = {10.17192/bfdm.2020.1.8157}, abstract = {Dieser Artikel stellt die Ergebnisse einer Befragung zum Forschungsdatenmanagement und zur Forschungsdateninfrastruktur in DFG-Sonderforschungsbereichen vor, die im Herbst 2018 im Vorfeld eines Workshops zum selben Thema durchgeführt wurde und an der sich 20 SFBs beteiligten. Die Befragung bestand aus vier offenen Fragen, die die Verortung von INF- und anderen Datenmanagementteilprojekten innerhalb der SFBs, ihre Aufgaben, Herausforderungen sowie Policies und Technologien, die eingesetzt werden, zum Thema hatten. Zum Schluss werden die Ergebnisse mit denjenigen aus einer ähnlich angelegten Untersuchung aus dem Jahr 2013 verglichen.}, language = {de}, number = {1}, journal = {Bausteine Forschungsdatenmanagement}, author = {Engelhardt, Claudia}, month = apr, year = {2020}, pages = {16--27}, file = {Engelhardt - 2020 - Forschungsdatenmanagement in DFG-Sonderforschungsb.pdf:C\:\\Users\\carst\\Zotero\\storage\\Z2VDCWKC\\Engelhardt - 2020 - Forschungsdatenmanagement in DFG-Sonderforschungsb.pdf:application/pdf}, } @incollection{lauber-ronsberg_data_2018, address = {Wiesbaden}, title = {Data {Protection} {Laws}, {Research} {Ethics} and {Social} {Sciences}}, isbn = {978-3-658-12909-5}, url = {https://doi.org/10.1007/978-3-658-12909-5_4}, abstract = {This paper examines fundamental principles of European data protection law and subsequently uses two case studies in order to illustrate some data protection issues arising within the context of research in social sciences.}, language = {en}, booktitle = {Research {Ethics} in the {Digital} {Age}: {Ethics} for the {Social} {Sciences} and {Humanities} in {Times} of {Mediatization} and {Digitization}}, publisher = {Springer Fachmedien}, author = {Lauber-Rönsberg, Anne}, editor = {Dobrick, Farina Madita and Fischer, Jana and Hagen, Lutz M.}, year = {2018}, doi = {10.1007/978-3-658-12909-5_4}, pages = {29--44}, } @inproceedings{freire_reproducibility_2016, address = {Dagstuhl}, title = {Reproducibility of {Data}-{Oriented} {Experiments} in e-{Science} ({Dagstuhl} {Seminar} 16041)}, copyright = {CC BY}, url = {http://drops.dagstuhl.de/opus/volltexte/2016/5817/}, doi = {10.4230/DAGREP.6.1.108}, abstract = {This report documents the program and the outcomes of Dagstuhl Seminar 16041 “Reproducibility of Data-Oriented Experiments in e-Science”. In many subfields of computer science, experiments play an important role. Besides theoretic properties of algorithms or methods, their effectiveness and performance often can only be validated via experimentation. In most of these cases, the experimental results depend on the input data, settings for input parameters, and potentially on characteristics of the computational environment where the experiments were designed and run. Unfortunately, most computational experiments are specified only informally in papers, where experimental results are briefly described in figure captions; the code that produced the results is seldom available. This has serious implications. Scientific discoveries do not happen in isolation. Important advances are often the result of sequences of smaller, less significant steps. In the absence of results that are fully documented, reproducible, and generalizable, it becomes hard to re-use and extend these results. Besides hindering the ability of others to leverage our work, and consequently limiting the impact of our field, the absence of reproducibility experiments also puts our reputation at stake, since reliability and validity of empiric results are basic scientific principles. This seminar brought together experts from various sub-fields of computer science to create a joint understanding of the problems of reproducibility of experiments, discussing existing solutions and impediments, and proposing ways to overcome current limitations.}, language = {en}, author = {Freire, Juliana and Fuhr, Norbert and Rauber, Andreas}, year = {2016}, pages = {52}, file = {Freire et al. - 2016 - Reproducibility of Data-Oriented Experiments in e-.pdf:C\:\\Users\\carst\\Zotero\\storage\\67ZFI9UU\\Freire et al. - 2016 - Reproducibility of Data-Oriented Experiments in e-.pdf:application/pdf}, } @book{ands_creating_2018, title = {Creating a data management framework}, url = {https://www.ands.org.au/guides/creating-a-data-management-framework}, abstract = {Discusses creating a data management framework for research institutions. The framework outlines the basic elements required within an institutional context to support effective data management. Institutions have a role in establishing and promulgating policies and procedures and providing the requisite support infrastructure and services for good data management.}, language = {en}, author = {{ANDS}}, month = mar, year = {2018}, note = {Publication Title: ANDS}, file = {Snapshot:C\:\\Users\\carst\\Zotero\\storage\\FS5AMNRX\\creating-a-data-management-framework.html:text/html}, } @book{adam_eln-wegweiserelektronische_2019, address = {Köln}, title = {{ELN}-{WegweiserElektronische} {Laborbücher} im {Kontext} von {Forschungsdatenmanagement} und guter wissenschaftlicher {Praxis} - ein {Wegweiser} für die {Lebenswissenschaften}}, copyright = {CC BY 4.0}, url = {https://repository.publisso.de/resource/frl:6415715}, language = {de}, author = {Adam, Beatrix and Lindstädt, Birte}, editor = {{ZB MED-Informationszentrum Lebenswissenschaften}}, year = {2019}, doi = {10.4126/FRL01-006415715}, file = {Adam und Lindstädt - 2019 - ELN-WegweiserElektronische Laborbücher im Kontext .pdf:C\:\\Users\\carst\\Zotero\\storage\\NEGRMSAP\\Adam und Lindstädt - 2019 - ELN-WegweiserElektronische Laborbücher im Kontext .pdf:application/pdf}, } @inproceedings{ahmed_typology_2018, address = {Red Hook, NY}, title = {A {Typology} of {Virtual} {Research} {Environments}}, copyright = {Attribution-NonCommercial-NoDerivatives 4.0 International}, isbn = {978-0-9981331-1-9}, url = {http://scholarspace.manoa.hawaii.edu/handle/10125/49974}, doi = {10.24251/HICSS.2018.087}, abstract = {Virtual Research Environments (VREs) are online spaces that support communication and collaboration among scientists. Hundreds of VREs have been constructed using various configurations of research tools and information and communication technologies (ICTs) to serve many disciplines and interdisciplinary inquiry. This study characterizes a large sample of VREs in terms of the research and ICT resources they incorporate and derives a typology of VREs based on their particular ICT configurations. The four types are correlated with previous VRE typologies and disciplinary domains. Results indicate that there are correspondences, but that types of ICT configurations also exhibit complex relationships with function and discipline.}, language = {en}, booktitle = {Proceedings of the 51st {Hawaii} {International} {Conference} on {System} {Sciences}}, publisher = {Curran Associates, Inc}, author = {Ahmed, Iftekhar and Poole, Marshall and Trudeau, Ashley}, month = mar, year = {2018}, pages = {10}, file = {Ahmed et al. - 2018 - A Typology of Virtual Research Environments.pdf:C\:\\Users\\carst\\Zotero\\storage\\KNDRSHR7\\Ahmed et al. - 2018 - A Typology of Virtual Research Environments.pdf:application/pdf}, } @book{antonopoulos_cloud_2017, address = {Cham}, series = {Computer {Communications} and {Networks}}, title = {Cloud {Computing}: {Principles}, {Systems} and {Applications}}, isbn = {978-3-319-54644-5 978-3-319-54645-2}, shorttitle = {Cloud {Computing}}, url = {http://link.springer.com/10.1007/978-3-319-54645-2}, abstract = {This practically-focused text/reference presents a comprehensive overview of the present state of the art in Cloud Computing, and examines the potential for future Cloud and Cloud-related technologies to address specific industrial and research challenges. Compiled as a series of selected papers from leading Cloud researchers, this new edition recognizes the relative maturity of Cloud, as offers contrast to the first edition, and explores both established and emergent principles, techniques, protocols and algorithms involved with the design, development, and management of Cloud-based systems. The text reviews a range of applications and methods for linking Clouds, undertaking data management and scientific data analysis, and addressing requirements both of data analysis and also of management of large scale and complex systems. This new edition also extends into the emergent next generation of mobile telecommunications, relating network function virtualization and mobile edge Cloud Computing, as supports emergence of, for example, Smart Grids and Smart Cities. As with the first edition, emphasis continues to be placed on the four quality-of-service cornerstones of efficiency, scalability, robustness, and security.}, language = {en}, publisher = {Springer International Publishing}, editor = {Antonopoulos, Nick and Gillam, Lee}, year = {2017}, doi = {10.1007/978-3-319-54645-2}, } @article{anzt_environment_2020, title = {An environment for sustainable research software in {Germany} and beyond: current state, open challenges, and call for action}, volume = {9}, issn = {2046-1402}, shorttitle = {An environment for sustainable research software in {Germany} and beyond}, url = {https://f1000research.com/articles/9-295/v1}, doi = {10.12688/f1000research.23224.1}, abstract = {Research software has become a central asset in academic research. It optimizes existing and enables new research methods, implements and embeds research knowledge, and constitutes an essential research product in itself. Research software must be sustainable in order to understand, replicate, reproduce, and build upon existing research or conduct new research effectively. In other words, software must be available, discoverable, usable, and adaptable to new needs, both now and in the future. Research software therefore requires an environment that supports sustainability. Hence, a change is needed in the way research software development and maintenance are currently motivated, incentivized, funded, structurally and infrastructurally supported, and legally treated. Failing to do so will threaten the quality and validity of research. In this paper, we identify challenges for research software sustainability in Germany and beyond, in terms of motivation, selection, research software engineering personnel, funding, infrastructure, and legal aspects. Besides researchers, we specifically address political and academic decision-makers to increase awareness of the importance and needs of sustainable research software practices. In particular, we recommend strategies and measures to create an environment for sustainable research software, with the ultimate goal to ensure that software-driven research is valid, reproducible and sustainable, and that software is recognized as a first class citizen in research. This paper is the outcome of two workshops run in Germany in 2019, at deRSE19 - the first International Conference of Research Software Engineers in Germany - and a dedicated DFG-supported follow-up workshop in Berlin.}, language = {en}, journal = {F1000Research}, author = {Anzt, Hartwig and Bach, Felix and Druskat, Stephan and Löffler, Frank and Loewe, Axel and Renard, Bernhard Y. and Seemann, Gunnar and Struck, Alexander and Achhammer, Elke and Aggarwal, Piush and Appel, Franziska and Bader, Michael and Brusch, Lutz and Busse, Christian and Chourdakis, Gerasimos and Dabrowski, Piotr Wojciech and Ebert, Peter and Flemisch, Bernd and Friedl, Sven and Fritzsch, Bernadette and Funk, Maximilian D. and Gast, Volker and Goth, Florian and Grad, Jean-Noël and Hermann, Sibylle and Hohmann, Florian and Janosch, Stephan and Kutra, Dominik and Linxweiler, Jan and Muth, Thilo and Peters-Kottig, Wolfgang and Rack, Fabian and Raters, Fabian H.C. and Rave, Stephan and Reina, Guido and Reißig, Malte and Ropinski, Timo and Schaarschmidt, Joerg and Seibold, Heidi and Thiele, Jan P. and Uekermann, Benjamin and Unger, Stefan and Weeber, Rudolf}, month = apr, year = {2020}, pages = {295}, file = {Anzt et al. - 2020 - An environment for sustainable research software i.pdf:C\:\\Users\\carst\\Zotero\\storage\\VLUTKUZM\\Anzt et al. - 2020 - An environment for sustainable research software i.pdf:application/pdf}, } @techreport{bach_muster-richtlinie_2019, address = {Potsdam}, title = {Muster-{Richtlinie} {Nachhaltige} {Forschungssoftware} an den {Helmholtz}-{Zentren}}, url = {https://gfzpublic.gfz-potsdam.de/pubman/faces/ViewItemOverviewPage.jsp?itemId=item_4906899}, language = {de}, institution = {Helmholtz Open Science Office}, author = {Bach, Felix and Bertuch, Oliver and Busse, Christian and Castell, Wolfgang zu and Celo, Sabine and Denker, Michael and Dinkelacker, Stefan and Druskat, Stephan and Faber, Claas and Finke, Ants and Fritzsch, Bernadette and Hammitzsch, M. and Haseleu, Julia and Konrad, Uwe and Krupa, Jörn and Leifels, Yvonne and Mohns-Pöschke, Kerstin and Moravcikova, Martina and Nöller, Joachim and Möhl, Christoph and Nolden, Marco and Scheinert, Markus and Schelhaas, Ute and Scheliga, Katharina Sara and Schlauch, Tobias and Schnicke, Thomas and Scholz, Almut and Schwennsen, Florian and Seifarth, Jenny and Selzer, Michael and Shishatskiy, Sergey and Steglich, Dirk and Strohbach, Sandra and Terhorst, Dennis and Al-Turany, Mohammad and Vierkant, Paul and Wieser, Thomas and Witter, Ludwig and Wortmann, Daniel}, year = {2019}, doi = {10.2312/os.helmholtz.007}, pages = {12}, file = {Bach et al. - 2019 - Muster-Richtlinie Nachhaltige Forschungssoftware a.pdf:C\:\\Users\\carst\\Zotero\\storage\\ZD4W4FWH\\Bach et al. - 2019 - Muster-Richtlinie Nachhaltige Forschungssoftware a.pdf:application/pdf}, } @book{bauer_e-health_2018, address = {Wiesbaden}, title = {E-{Health}: {Datenschutz} und {Datensicherheit}}, isbn = {978-3-658-15090-7 978-3-658-15091-4}, shorttitle = {E-{Health}}, language = {de}, publisher = {Springer Fachmedien}, author = {Bauer, Christoph and Eickmeier, Frank and Eckard, Michael}, year = {2018}, doi = {10.1007/978-3-658-15091-4}, } @inproceedings{bender_fair_2018, address = {Düsseldorf}, title = {{FAIR} conform {ETL} processing in translational research}, copyright = {This is an Open Access article distributed under the terms of the Creative Commons Attribution 4.0 License.}, url = {https://www.egms.de/static/en/meetings/gmds2018/18gmds095.shtml}, doi = {10.3205/18gmds095}, language = {en}, publisher = {German Medical Science GMS Publishing House}, author = {Bender, Theresa and Bauer, Christian R and Parciak, Marcel and Lodahl, Robert and Sax, Ulrich}, month = aug, year = {2018}, pages = {DocAbstr. 254}, file = {Snapshot:C\:\\Users\\carst\\Zotero\\storage\\BQ43KYP7\\18gmds095.html:text/html}, } @article{candela_virtual_2013, title = {Virtual {Research} {Environments}: {An} {Overview} and a {Research} {Agenda}}, volume = {12}, issn = {1683-1470}, shorttitle = {Virtual {Research} {Environments}}, url = {http://datascience.codata.org/articles/abstract/10.2481/dsj.GRDI-013/}, doi = {10.2481/dsj.GRDI-013}, abstract = {Virtual Research Environments are innovative, web-based, community-oriented, comprehensive, flexible, and secure working environments conceived to serve the needs of modern science. We overview the existing initiatives developing these environments by highlighting the major distinguishing features. We envisage a future where regardless of geographical location, scientists will be able to use their Web browsers to seamlessly access data, software, and processing resources that are managed by diverse systems in separate administration domains via Virtual Research Environments. We identify and discuss the major challenges that should be resolved to fully achieve the proposed vision, i.e., large-scale integration and interoperability, sustainability, and adoption.}, language = {en}, journal = {Data Science Journal}, author = {Candela, Leonardo and Castelli, Donatella and Pagano, Pasquale}, month = jul, year = {2013}, pages = {GRDI75--GRDI81}, file = {Candela et al. - 2013 - Virtual Research Environments An Overview and a R.pdf:C\:\\Users\\carst\\Zotero\\storage\\4Y4KZKAN\\Candela et al. - 2013 - Virtual Research Environments An Overview and a R.pdf:application/pdf}, } @book{noauthor_forschungsprojekt_nodate, title = {Forschungsprojekt {DataJus}}, url = {https://tu-dresden.de/gsw/jura/igetem/jfbimd13/forschung/forschungsprojekt-datajus?set_language=de}, abstract = {Rechtliche Rahmenbedingungen des Forschungsdatenmanagements Vom 01.06.2017 bis zum 30.09.2019 untersuchte das durch das BMBF geförderte Projekt DataJus die rechtlichen Rahmenbedingungen des …}, language = {de}, note = {Publication Title: TU Dresden Type: Document}, file = {Snapshot:C\:\\Users\\carst\\Zotero\\storage\\GGJM8EAV\\forschungsprojekt-datajus.html:text/html}, } @book{noauthor_datenschutz-grundverordnung_nodate, title = {Datenschutz-{Grundverordnung}}, url = {https://dejure.org/gesetze/DSGVO/4.html}, language = {de}, note = {Section: Kapitel I - Allgemeine Bestimmungen (Art. 1 - 4)}, file = {Snapshot:C\:\\Users\\carst\\Zotero\\storage\\ZHBMXFMM\\4.html:text/html}, } @techreport{noauthor_verfahrensbeschreibung_2014, address = {Greifswald, Göttingen}, title = {Verfahrensbeschreibung und {Datenschutzkonzept} des {Zentralen} {Datenmanagements} des {DZHK}}, url = {https://dzhk.de/fileadmin/user_upload/Datenschutzkonzept_des_DZHK.pdf}, language = {de}, number = {Version 1.2}, institution = {Deutsches Zentrum für Herz-Kreislauf Forschung e.V.}, month = mar, year = {2014}, pages = {62}, file = {Universitätsmedizin Greifswald KdöR Institut für Community Medicine und Universitätsmedizin Göttingen Institut für Medizinische Informatik - 2014 - Verfahrensbeschreibung und Datenschutzkonzept des .pdf:C\:\\Users\\carst\\Zotero\\storage\\YTZ45JHK\\Universitätsmedizin Greifswald KdöR Institut für Community Medicine und Universitätsmedizin Göttingen Institut für Medizinische Informatik - 2014 - Verfahrensbeschreibung und Datenschutzkonzept des .pdf:application/pdf}, } @article{eitzel_citizen_2017, title = {Citizen {Science} {Terminology} {Matters}: {Exploring} {Key} {Terms}}, volume = {2}, issn = {2057-4991}, shorttitle = {Citizen {Science} {Terminology} {Matters}}, url = {http://theoryandpractice.citizenscienceassociation.org/article/10.5334/cstp.96/}, doi = {10.5334/cstp.96}, abstract = {Much can be at stake depending on the choice of words used to describe citizen science, because terminology impacts how knowledge is developed. Citizen science is a quickly evolving field that is mobilizing people’s involvement in information development, social action and justice, and large-scale information gathering. Currently, a wide variety of terms and expressions are being used to refer to the concept of ‘citizen science’ and its practitioners. Here, we explore these terms to help provide guidance for the future growth of this field. We do this by reviewing the theoretical, historical, geopolitical, and disciplinary context of citizen science terminology; discussing what citizen science is and reviewing related terms; and providing a collection of potential terms and definitions for ‘citizen science’ and people participating in citizen science projects. This collection of terms was generated primarily from the broad knowledge base and on-the-ground experience of the authors, by recognizing the potential issues associated with various terms. While our examples may not be systematic or exhaustive, they are intended to be suggestive and invitational of future consideration. In our collective experience with citizen science projects, no single term is appropriate for all contexts. In a given citizen science project, we suggest that terms should be chosen carefully and their usage explained; direct communication with participants about how terminology affects them and what they would prefer to be called also should occur. We further recommend that a more systematic study of terminology trends in citizen science be conducted.}, language = {en}, number = {1}, journal = {Citizen Science: Theory and Practice}, author = {Eitzel, M. V. and Cappadonna, Jessica L. and Santos-Lang, Chris and Duerr, Ruth Ellen and Virapongse, Arika and West, Sarah Elizabeth and Kyba, Christopher Conrad Maximillian and Bowser, Anne and Cooper, Caren Beth and Sforzi, Andrea and Metcalfe, Anya Nova and Harris, Edward S. and Thiel, Martin and Haklay, Mordechai and Ponciano, Lesandro and Roche, Joseph and Ceccaroni, Luigi and Shilling, Fraser Mark and Dörler, Daniel and Heigl, Florian and Kiessling, Tim and Davis, Brittany Y. and Jiang, Qijun}, month = jun, year = {2017}, pages = {1}, file = {Eitzel et al. - 2017 - Citizen Science Terminology Matters Exploring Key.pdf:C\:\\Users\\carst\\Zotero\\storage\\DY7ARNU5\\Eitzel et al. - 2017 - Citizen Science Terminology Matters Exploring Key.pdf:application/pdf}, } @article{engelhardt_forschungsdatenmanagement_2013, title = {Forschungsdatenmanagement in {DFG}-{SFBs}}, volume = {23}, url = {https://edoc.hu-berlin.de/handle/18452/9697}, doi = {10.18452/9045}, abstract = {Seit 2007 gibt es im Rahmen von DFG-Sonderforschungsbereichen die Möglichkeit, ein „Teilprojekt Informationsinfrastruktur“ (kurz: INF-Projekt) zu beantragen. Dieses dient dem Forschungsdatenmanagement innerhalb des Sonderforschungsbereiches, was sowohl die Konzeption des Datenmanagements als auch die Bereitstellung der erforderlichen Infrastruktur beinhalten kann. Das Konzept der INF-Projekte sieht dabei die Kooperation der Fachwissenschaft mit den Informationsinfrastruktureinrichtungen am Standort vor, bspw.beispielsweise mit der Bibliothek oder dem Rechenzentrum. In dieser Hinsicht können die INF-Projekte auch als ein spezifischer Anwendungsfall des von Embedded Librarianship betrachtet werden. In dieser Arbeit werden die Ergebnisse einer Befragung und eines Workshops mit Vertretern der INF-Projekte vorgestellt, die interessante Einblicke in die Arbeit derselben gewähren.}, language = {de}, journal = {LIBREAS Library Ideas}, author = {Engelhardt, Claudia}, month = oct, year = {2013}, file = {Engelhardt - 2013 - Forschungsdatenmanagement in DFG-SFBs.pdf:C\:\\Users\\carst\\Zotero\\storage\\36ISZ3Y2\\Engelhardt - 2013 - Forschungsdatenmanagement in DFG-SFBs.pdf:application/pdf}, } @book{noauthor_welche_nodate, title = {Welche personenbezogenen {Daten} gelten als sensibel?}, url = {https://ec.europa.eu/info/law/law-topic/data-protection/reform/rules-business-and-organisations/legal-grounds-processing-data/sensitive-data/what-personal-data-considered-sensitive_de}, abstract = {Daten zu Religion, Politik, Gesundheit usw. gelten gemäß EU-Datenschutzvorschriften als sensibel und stehen unter einem besonderen Schutz.}, language = {de}, note = {Publication Title: EU-Kommission - European Commission Type: Text}, file = {Snapshot:C\:\\Users\\carst\\Zotero\\storage\\JTLIU45W\\what-personal-data-considered-sensitive_de.html:text/html}, } @book{noauthor_forschungslizenzen_nodate, title = {Forschungslizenzen}, url = {http://forschungslizenzen.de/}, abstract = {Lizenzierung von Forschungsdaten einfach gemacht. Rechte, Pflichten \& Möglichkeiten: Hier finden Sie Erklärungen, Use-Cases und Materialien für den Weg zu Open Science und Open Access.}, language = {de-DE}, file = {Snapshot:C\:\\Users\\carst\\Zotero\\storage\\QH7RD7L9\\forschungslizenzen.de.html:text/html}, } @book{noauthor_home_nodate, title = {home {\textbackslash}textbar {European} {Genome}-phenome {Archive}}, url = {https://www.ebi.ac.uk/ega/home}, file = {home | European Genome-phenome Archive:C\:\\Users\\carst\\Zotero\\storage\\DQU4RF94\\home.html:text/html}, } @techreport{noauthor_micro-moments_2017, type = {Note to the researcher visiting {Eurostat}'s {Safe} {Center}}, title = {Micro-{Moments} {Dataset} linked micro-aggregated data on {ICT} usage, innovation and economic performance in enterprises}, url = {https://ec.europa.eu/eurostat/documents/203647/6867168/Safe+centre+rules+for+MMD.pdf/}, language = {en}, year = {2017}, pages = {[7]}, file = {_.pdf:C\:\\Users\\carst\\Zotero\\storage\\33UBZIJX\\_.pdf:application/pdf}, } @book{noauthor_rechte_nodate, title = {Rechte und {Pflichten} {\textbackslash}textbar {Themen} {\textbackslash}textbar {Forschungsdaten} und {Forschungsdatenmanagement}}, url = {https://www.forschungsdaten.info/themen/rechte-und-pflichten/}, note = {Publication Title: forschungsdaten.info}, file = {Rechte und Pflichten | Themen | Forschungsdaten und Forschungsdatenmanagement:C\:\\Users\\carst\\Zotero\\storage\\8UFPVR2U\\rechte-und-pflichten.html:text/html}, } @book{noauthor_elektronische_nodate, title = {Elektronische {Laborbücher}}, url = {https://www.forschungsdaten.org/index.php/Elektronische_Laborb%C3%BCcher}, note = {Publication Title: forschungsdaten.org}, } @book{noauthor_zusammenarbeiten_nodate, title = {Zusammenarbeiten : {FZI} {Forschungszentrum} {Informatik}}, shorttitle = {Zusammenarbeiten}, url = {https://www.fzi.de/wir-fuer-sie/zusammenarbeiten/}, abstract = {Wie eine Zusammenarbeit mit uns aussehen kann, welche Kooperationsformen es gibt und wie wir arbeiten, stellen wir Ihnen hier vor.}, language = {de}, note = {Publication Title: FIZ}, file = {Snapshot:C\:\\Users\\carst\\Zotero\\storage\\RKA8M7J8\\zusammenarbeiten.html:text/html}, } @book{noauthor_gdd_nodate, title = {{GDD} e.{V}.}, url = {https://www.gdd.de/}, file = {GDD e.V.:C\:\\Users\\carst\\Zotero\\storage\\3WKHTYUF\\www.gdd.de.html:text/html}, } @book{noauthor_gesis_nodate, title = {{GESIS} – {Leibniz}-{Institut} für {Sozialwissenschaften}}, url = {https://www.gesis.org/angebot/daten-analysieren/weitere-sekundaerdaten/secure-data-center-sdc}, note = {Publication Title: gesis.org}, } @article{hothorn_biometrical_2009, title = {Biometrical {Journal} and {Reproducible} {Research}}, volume = {51}, copyright = {Copyright © 2009 WILEY‐VCH Verlag GmbH \& Co. KGaA, Weinheim}, issn = {1521-4036}, doi = {10.1002/bimj.200900154}, language = {en}, number = {4}, journal = {Biometrical Journal}, author = {Hothorn, Torsten and Held, Leonhard and Friede, Tim}, year = {2009}, pages = {553--555}, file = {Hothorn et al. - 2009 - Biometrical Journal and Reproducible Research.pdf:C\:\\Users\\carst\\Zotero\\storage\\DL6TKQSC\\Hothorn et al. - 2009 - Biometrical Journal and Reproducible Research.pdf:application/pdf}, } @book{kalberg_datenschutz_2014, address = {Münster}, series = {Schriften zum {Informations}-, {Telekommunikations}- und {Medienrecht}}, title = {Datenschutz an {Hochschulen} : eine {Analyse} der {Rechtsgrundlagen} und ihrer {Umsetzung} in integriertem {Informationsmanagement} und {Forschung}}, isbn = {978-3-643-12155-4}, url = {http://www.gbv.de/dms/spk/sbb/toc/78478633x.pdf}, language = {de}, number = {46}, publisher = {LIT}, author = {Kalberg, Nadine}, year = {2014}, } @book{gkoulalas-divanis_medical_2015, address = {Cham}, title = {Medical {Data} {Privacy} {Handbook}}, isbn = {978-3-319-23632-2 978-3-319-23633-9}, abstract = {This handbook covers Electronic Medical Record (EMR) systems, which enable the storage, management, and sharing of massive amounts of demographic, diagnosis, medication, and genomic information. It presents privacy-preserving methods for medical data, ranging from laboratory test results to doctors’ comments. The reuse of EMR data can greatly benefit medical science and practice, but must be performed in a privacy-preserving way according to data sharing policies and regulations. Written by world-renowned leaders in this field, each chapter offers a survey of a research direction or a solution to problems in established and emerging research areas. The authors explore scenarios and techniques for facilitating the anonymization of different types of medical data, as well as various data mining tasks. Other chapters present methods for emerging data privacy applications and medical text de-identification, including detailed surveys of deployed systems. A part of the book is devoted to legislative and policy issues, reporting on the US and EU privacy legislation and the cost of privacy breaches in the healthcare domain. This reference is intended for professionals, researchers and advanced-level students interested in safeguarding medical data.}, language = {en}, publisher = {Springer International Publishing}, editor = {Gkoulalas-Divanis, Aris and Loukides, Grigorios}, year = {2015}, doi = {10.1007/978-3-319-23633-9}, } @article{hothorn_case_2011, title = {Case studies in reproducibility}, volume = {12}, issn = {1467-5463}, url = {https://academic.oup.com/bib/article/12/3/288/258098}, doi = {10.1093/bib/bbq084}, abstract = {Reproducible research is a concept of providing access to data and software along with published scientific findings. By means of some case studies from different disciplines, we will illustrate reasons why readers should be given the possibility to look at the data and software independently from the authors of the original publication. We report results of a survey comprising 100 papers recently published in Bioinformatics. The main finding is that authors of this journal share a culture of making data available. However, the number of papers where source code for simulation studies or analyzes is available is still rather limited.}, language = {en}, number = {3}, journal = {Briefings in Bioinformatics}, author = {Hothorn, Torsten and Leisch, Friedrich}, month = may, year = {2011}, pages = {288--300}, file = {Hothorn und Leisch - 2011 - Case studies in reproducibility.pdf:C\:\\Users\\carst\\Zotero\\storage\\PBK98WUB\\Hothorn und Leisch - 2011 - Case studies in reproducibility.pdf:application/pdf}, } @book{noauthor_agiles_nodate, title = {Agiles {Arbeiten} in verteilten {Teams}}, url = {https://www.it-agile.de/wissen/agile-teams/agiles-arbeiten-in-verteilten-teams/}, abstract = {Verteilte Teams wird es immer geben. Um den speziellen Herausforderungen gerecht zu werden, helfen die passenden Prinzipien und das Erlernen geeigeneter Skills. Agil in einem verteilten Team zu arbeiten, ist kein "Hexenwerk" und kann dem Unternehmen wertschöpfende Ergebnisse liefern.}, language = {de}, note = {Publication Title: it-agile.de}, file = {Snapshot:C\:\\Users\\carst\\Zotero\\storage\\A62HLI6H\\agiles-arbeiten-in-verteilten-teams.html:text/html}, } @article{nurmi_privacy_2018, title = {Privacy of {Clinical} {Research} {Subjects}: {An} {Integrative} {Literature} {Review}:}, volume = {14}, copyright = {© The Author(s) 2018}, shorttitle = {Privacy of {Clinical} {Research} {Subjects}}, url = {https://journals.sagepub.com/doi/10.1177/1556264618805643}, doi = {10.1177/1556264618805643}, abstract = {With changes in clinical research practice, the importance of a study-subject’s privacy and the confidentiality of their personal data is growing. However, the body of research is fragmented, and a synthesis of work in this area is lacking. Accordingly, an integrative review was performed, guided by Whittemore and Knafl’s work. Data from PubMed, Scopus, and CINAHL searches from January 2012 to February 2017 were analyzed via the constant comparison method. From 16 empirical and theoretical studies, six topical aspects were identified: the evolving nature of health data in clinical research, sharing of health data, the challenges of anonymizing data, collaboration among stakeholders, the complexity of regulation, and ethics-related tension between social benefits and privacy. Study subjects’ privacy is an increasingly important ethics principle for clinical research, and privacy protection is rendered even more challenging by changing research practice.}, language = {en}, number = {1}, journal = {Journal of Empirical Research on Human Research Ethics}, author = {Nurmi, Sanna-Maria and Kangasniemi, Mari and Halkoaho, Arja and Pietilä, Anna-Maija}, month = oct, year = {2018}, pages = {33--48}, } @book{pommerening_leitfaden_2014, address = {Berlin}, title = {Leitfaden zum {Datenschutz} in medizinischen {Forschungsprojekten}: {Generische} {Lösungen} der {TMF} 2.0}, isbn = {978-3-95466-295-1}, url = {https://www.mwv-open.de/site/books/m/10.32745/9783954662951/}, abstract = {Das Vertrauen von Patienten und Probanden ist eine unverzichtbare Voraussetzung für den Erfolg medizinischer Forschungsprojekte, die ohne die Erhebung, langfristige Speicherung und Analyse von klinischen Daten und Proben nicht durchgeführt werden können. Medizinische Forschung arbeitet heute überwiegend vernetzt in zunehmend größeren Forschungsverbünden. Entsprechend nimmt auch die Bedeutung von Datenschutz und Datensicherheit immer weiter zu. Die TMF hat bereits 2003 erstmals generische Datenschutzkonzepte für medizinische Forschungsverbünde veröffentlicht. Auf dieser Basis konnten zahlreiche Forschungsprojekte ihre Datenschutzkonzepte schneller erarbeiten und abstimmen. Die dabei gewonnenen Erfahrungen sind in die grundlegende Überarbeitung der generischen Konzepte eingeflossen. So trägt das neue Konzept der Vielschichtigkeit medizinischer Forschungsprozesse durch einen modularen Aufbau Rechnung und wurde zudem in einen umfassenden Leitfaden eingebettet.}, language = {de}, publisher = {Medizinisch Wissenschaftliche Verlagsgesellschaft}, author = {Pommerening, Klaus and Drepper, Johannes and Helbing, Krister and Ganslandt, Thomas}, year = {2014}, doi = {10.32745/9783954662951}, file = {Pommerening et al. - 2014 - Leitfaden zum Datenschutz in medizinischen Forschu.pdf:C\:\\Users\\carst\\Zotero\\storage\\J4FFUXF2\\Pommerening et al. - 2014 - Leitfaden zum Datenschutz in medizinischen Forschu.pdf:application/pdf}, } @article{lechler_exploring_2017, title = {Exploring the {Role} of {Project} {Management} in the {Development} of the {Academic} {Agile} {Software} {Discourse}: {A} {Bibliometric} {Analysis}:}, volume = {48}, copyright = {© 2017 Project Management Institute, Inc}, shorttitle = {Exploring the {Role} of {Project} {Management} in the {Development} of the {Academic} {Agile} {Software} {Discourse}}, url = {https://journals.sagepub.com/doi/10.1177/875697281704800101}, doi = {10.1177/875697281704800101}, abstract = {The practical applications of agile methods and their impact on the productivity and efficiency of software development dominate the agile literature. We analyzed 827 academic articles with bibliometric techniques to explore the role project management research played in the development of the academic agile discourse. Bibliometric analyses over two time periods reveal that project management–related topics form a distinct stream of research in the second time period but not in the first. Furthermore, our results reveal that the academic agile discussion has been mainly unidirectional. This situation offers many opportunities for project management researchers to contribute to the agile discourse.}, language = {en}, number = {1}, journal = {Project Management Journal}, author = {Lechler, Thomas G. and Yang, Siwen}, month = feb, year = {2017}, pages = {3--18}, } @book{noauthor_informationsplattform_nodate, title = {Informationsplattform {Open} {Access}: {Lizenzen}}, url = {https://open-access.net/informationen-zu-open-access/rechtsfragen/rechtsfragen-in-deutschland/lizenzen}, file = {Informationsplattform Open Access\: Lizenzen:C\:\\Users\\carst\\Zotero\\storage\\WL9GFVXM\\lizenzen.html:text/html}, } @book{noauthor_lizenzen_nodate, title = {Lizenzen für {Forschungsdaten} {\textbackslash}textbar {RADAR} - {Ein} {Repository} für die {Wissenschaft}}, url = {https://www.radar-service.eu/de/lizenzen-fuer-forschungsdaten}, note = {Publication Title: radar-service.eu}, file = {Lizenzen für Forschungsdaten | RADAR - Ein Repository für die Wissenschaft:C\:\\Users\\carst\\Zotero\\storage\\RCNRP8AD\\lizenzen-fuer-forschungsdaten.html:text/html}, } @article{repschlager_cloud_2010, title = {Cloud {Computing}: {Definitionen}, {Geschäftsmodelle} und {Entwicklungspotenziale}}, volume = {47}, issn = {2198-2775}, shorttitle = {Cloud {Computing}}, url = {https://doi.org/10.1007/BF03340507}, doi = {10.1007/BF03340507}, abstract = {Architekturparadigmen der IT stehen stets im Spannungsfeld von Zentralisierung und Dezentralisierung. Im Rahmen des Cloud Computing vollzieht sich aktuell eine Hinwendung zu stärker zentralisierten Architekturen. Die Anwender von IT-Dienstleistungen werden dabei weitgehend von der kapitalintensiven Beschaffung der Hard- und Software sowie der Vorhaltung eigener Personalressourcen befreit. Im Modell des Cloud Computing werden IT-Dienstleistungen bedarfsorientiert auf Basis eines Mietmodells bezogen. Cloud Computing ermöglicht es Unternehmen, auf Basis skalierbarer, lose gekoppelter IT-Ressourcen und variabler Kostenmodelle ein hohes Maβ an Flexibilität zu erreichen.}, language = {de}, number = {5}, journal = {HMD Praxis der Wirtschaftsinformatik}, author = {Repschläger, Jonas and Pannicke, Danny and Zarnekow, Rüdiger}, month = oct, year = {2010}, pages = {6--15}, } @techreport{noauthor_datentreuhandstellen_2020, address = {Göttingen}, title = {Datentreuhandstellen gestalten – zu {Erfahrungen} der {Wissenschaft}}, copyright = {Creative Commons Namensnennung –Weitergabe unter gleichen Bedingungen 4.0 International}, url = {http://www.rfii.de/?p=4259}, language = {de}, institution = {Rat für Informationsinfrastrukturen}, year = {2020}, pages = {8}, file = {2016 - Datentreuhandstellen gestalten – zu Erfahrungen de.pdf:C\:\\Users\\carst\\Zotero\\storage\\BP5WGLKU\\2016 - Datentreuhandstellen gestalten – zu Erfahrungen de.pdf:application/pdf}, } @article{shah_sharing_2019, title = {Sharing data for future research—engaging participants’ views about data governance beyond the original project: a {DIRECT} {Study}}, volume = {21}, copyright = {2018 American College of Medical Genetics and Genomics}, issn = {1530-0366}, shorttitle = {Sharing data for future research—engaging participants’ views about data governance beyond the original project}, url = {https://www.nature.com/articles/s41436-018-0299-7}, doi = {10.1038/s41436-018-0299-7}, abstract = {Biomedical data governance strategies should ensure that data are collected, stored, and used ethically and lawfully. However, research participants’ preferences for how data should be governed is least studied. The Diabetes Research on Patient Stratification (DIRECT) project collected substantial amounts of health and genetic information from patients at risk of, and with type II diabetes. We conducted a survey to understand participants’ future data governance preferences. Results will inform the postproject data governance strategy.}, language = {en}, number = {5}, journal = {Genetics in Medicine}, author = {Shah, Nisha and Coathup, Victoria and Teare, Harriet and Forgie, Ian and Giordano, Giuseppe Nicola and Hansen, Tue Haldor and Groeneveld, Lenka and Hudson, Michelle and Pearson, Ewan and Ruetten, Hartmut and Kaye, Jane}, year = {2019}, pages = {1131--1138}, file = {Shah et al. - 2019 - Sharing data for future research—engaging particip.pdf:C\:\\Users\\carst\\Zotero\\storage\\V4827NJD\\Shah et al. - 2019 - Sharing data for future research—engaging particip.pdf:application/pdf}, } @inproceedings{roertgen_posters_2019, address = {Göttingen}, title = {Posters presented at "{Workshop} zu {Forschungsdatenmanagement} und -infrastruktur in {DFG}-{Sonderforschungsbereichen}", 26./27. {November} 2018 in {Göttingen}}, url = {https://data.goettingen-research-online.de/dataset.xhtml?persistentId=doi:10.25625/22Y1WC}, doi = {10.25625/22Y1WC}, abstract = {Posters presented at "Workshop zu Forschungsdatenmanagement und -infrastruktur in DFG-Sonderforschungsbereichen", 26./27. November 2018 in Göttinge...}, language = {en}, author = {Roertgen, Steffen and Kusch, Harald and Engelhardt, Claudia and Bingert, Sven and Savin, Valeria and Kraus, Inga and Brand, Ortrun and Dierkes, Jens and Curdt, Constanze and Löschen, Christian and Vompras, Johanna and Paul-Stüve, Thilo and Schwandt, Silke}, year = {2019}, } @incollection{rottgen_rechtspositionen_2020, address = {Berlin}, title = {Rechtspositionen an {Daten}: {Die} {Rechtslage} im europäischen {Rechtsraum}}, isbn = {978-3-503-18782-9 3-503-18782-0}, language = {de}, booktitle = {Datenrecht in der {Digitalisierung}}, publisher = {Erich Schmidt Verlag}, author = {Röttgen, Charlotte}, editor = {Specht-Riemenschneider, Louisa and Werry, Nikola and Werry, Susanne}, year = {2020}, } @techreport{schallabock_orcid_2017, address = {Berlin}, type = {Gutachten}, title = {{ORCID} aus datenschutzrechtlicher {Sicht}: {Gutachten} im {Auftrag} des von der {Deutschen} {Forschungsgemeinschaft} ({DFG}) geförderten {Projektes} {ORCID} {DE} zur {Förderung} der {Open} {Researcher} and {Contributor} {ID} in {Deutschland}}, copyright = {CC BY}, url = {https://gfzpublic.gfz-potsdam.de/rest/items/item_2263903_5/component/file_2265895/content}, language = {de}, institution = {iRights.Law}, author = {Schallaböck, Jan and Grafenstein, Max von}, year = {2017}, pages = {51}, file = {Schallaböck und Grafenstein - 2017 - ORCID aus datenschutzrechtlicher Sicht Gutachten .pdf:C\:\\Users\\carst\\Zotero\\storage\\Y26H5NWV\\Schallaböck und Grafenstein - 2017 - ORCID aus datenschutzrechtlicher Sicht Gutachten .pdf:application/pdf}, } @incollection{schapke_collaborative_2018, address = {Cham}, title = {Collaborative {Data} {Management}}, isbn = {978-3-319-92862-3}, url = {https://doi.org/10.1007/978-3-319-92862-3_14}, abstract = {The design, construction and operation of buildings is a collaborative process involving numerous project participants who exchange information on an ongoing basis. Many of their working and communication processes can be significantly improved by using a uniformly structured building information model. A centralized approach to the administration of model information simplifies coordination between project participants and their communications and makes it possible to monitor the integrity of the information as well as to obtain an overview of project progress at any time. Depending on which model information from which project phases and/or sections need to be worked on by which partners, different forms and means of cooperation can be employed. This chapter presents different methodical approaches, practical techniques and available software systems for cooperative data administration. It discusses the different information resources and possible forms of cooperation for model-based collaboration and explains the underlying technical concepts, such as concurrency checking and versioning along with rights and permissions management. Several different software systems available for cooperative data administration are also presented. The chapter concludes with a brief look at future developments and the challenges still to be faced.}, language = {en}, booktitle = {Building {Information} {Modeling}: {Technology} {Foundations} and {Industry} {Practice}}, publisher = {Springer International Publishing}, author = {Schapke, Sven-Eric and Beetz, Jakob and König, Markus and Koch, Christian and Borrmann, André}, editor = {Borrmann, André and König, Markus and Koch, Christian and Beetz, Jakob}, year = {2018}, doi = {10.1007/978-3-319-92862-3_14}, pages = {251--277}, } @incollection{schefer_wissensbegriff_2012, address = {Bern}, title = {Der {Wissensbegriff} am {Limit}? – {Kollaborative} {Wissensgenerierung} im {Grossforschungsprojekt} {ATLAS} am {CERN}}, booktitle = {{MetaATLAS}. {Studien} zur {Generierung}, {Validierung} und {Kommunikation} von {Wissen} in einer modernen {Forschungskollaboration}}, publisher = {Bern Studies in the History and Philosophy of Science}, author = {Schefer, Maya}, editor = {Graßhoff, Gerd and Wüthrich, Adrian}, year = {2012}, pages = {83--108}, } @book{schmidt_datenschutz_2012, address = {Bonn}, series = {Schriftenreihe / {Bundeszentrale} für {Politische} {Bildung}}, title = {Datenschutz: {Grundlagen}, {Entwicklungen} und {Kontroversen}}, isbn = {978-3-8389-0190-9}, shorttitle = {Datenschutz}, abstract = {Eine Vielzahl technischer und medialer Innovationen sowie die Aufdeckung zahlreicher Datenpannen haben in den vergangenen Jahren das Themenfeld Datenschutz in den Fokus der breiten Öffentlichkeit gerückt. Ob beruflich oder privat, ob gegenüber Unternehmen oder Behörden, ob in den vernetzten Öffentlichkeiten des Internets oder in den öffentlichen Räumen unser Städte: Die elektronische Datenverarbeitung dringt in alle Lebensbereiche vor, überall werden Daten unterschiedlichster Art erhoben, gespeichert und miteinander verknüpft. Um das Grundrecht auf informationelle Selbstbestimmung wahrnehmen zu können und sich in aktuellen Datenschutz-Kontroversen eine eigene Meinung zu bilden, ist ein Basisverständnis für die Prinzipien und nationalen wie internationalen Rahmenbedingungen des Datenschutzes nötig. Der interdisziplinär angelegte Sammelband gibt einen allgemeinverständlichen Überblick zum aktuellen Stand von Recht, Technik und gesellschaftliche Debatten, zu Herausforderungen, Chancen und Risiken sowie zu möglichen Szenarien der zukünftigen Entwicklung. In fünf Abschnitten enthält der Band nicht nur eine Bestandsaufnahme der gegenwärtigen Datenschutzregelungen und ihres Anpassungsbedarfs an das digitale Zeitalter, sondern beleuchtet auch sozialwissenschaftliche, pädagogische, politische und psychologische Aspekte. Damit leistet er einen facettenreichen Beitrag zum Verständnis der gegenwärtigen Fragen des Datenschutzes, die sich angesichts des raschen Wandels der Informations- und Kommunikationstechnik in Zukunft immer drängender stellen werden. (bpb)}, language = {de}, number = {1190}, publisher = {Bundeszentrale für Politische Bildung}, editor = {Schmidt, Jan-Hinrik and Weichert, Thilo}, year = {2012}, file = {Schmidt und Weichert - 2012 - Datenschutz Grundlagen, Entwicklungen und Kontrov.pdf:C\:\\Users\\carst\\Zotero\\storage\\L7KGNZQM\\Schmidt und Weichert - 2012 - Datenschutz Grundlagen, Entwicklungen und Kontrov.pdf:application/pdf}, } @phdthesis{simons_robust_2020, address = {Berlin}, type = {Dissertation}, title = {A robust high-resolution hydrodynamic numerical model for surface water flow and transport processes within a flexible software framework}, copyright = {https://creativecommons.org/licenses/by/4.0/}, url = {https://depositonce.tu-berlin.de/handle/11303/10689}, abstract = {In water resources management computer simulations and numerical methods have become a more and more important tool for supporting decisions. They enable the understanding of complex relationships in the hydrological cycle and predictions about future development. This thesis describes the development of a numerical modeling software which allows the integrated simulation of surface water flow and transport processes as well as their interactions. To make use of the improvements in the methods to survey high-resolution topography information and to capture small-scale processes, numerical methods were developed which allow a robust and highly detailed simulation of surface water flow and transport processes in urban and natural environments. A robust numerical scheme for the solution of the shallow water equations based on the finite volume method was implemented, which can handle complex flow conditions, e. g. small water depths, wetting/drying and varying flow conditions including sub- and supercritical flows, hydraulic jumps and sharp water level gradients. In addition, the shallow water equations were augmented by the transport of contaminants and sediments and an infiltration model based on the Green-Ampt equation. A numerical framework was developed which provides the fundamental infrastructure for explicit high-order finite volume schemes, robust numerical methods and a flexible codebase which allows simple extension by new processes and numerical schemes. By means of several verification tests and case studies involving channel flow, rainfall-runoff, tracer transport, infiltration and sediment transport, the suitability of the software framework and the developed numerical schemes was demonstrated.}, language = {en}, school = {Technische Universität Berlin}, author = {Simons, Franz}, year = {2020}, doi = {10.14279/depositonce-9589}, file = {Simons - 2020 - A robust high-resolution hydrodynamic numerical mo.pdf:C\:\\Users\\carst\\Zotero\\storage\\MST3PH28\\Simons - 2020 - A robust high-resolution hydrodynamic numerical mo.pdf:application/pdf}, } @article{solove_taxonomy_2006, title = {A {Taxonomy} of {Privacy}}, volume = {154}, issn = {0041-9907}, url = {https://www.jstor.org/stable/40041279}, doi = {10.2307/40041279}, abstract = {Privacy is a concept in disarray. Nobody can articulate what it means. As one commentator has observed, privacy suffers from "an embarrassment of meanings." Privacy is far too vague a concept to guide adjudication and lawmaking, as abstract incantations of the importance of "privacy" do not fare well when pitted against more concretely stated countervailing interests. In I960, the famous torts scholar William Prosser attempted to make sense of the landscape of privacy law by identifying four different interests. But Prosser focused only on tort law, and the law of information privacy is significantly more vast and complex, extending to Fourth Amendment law, the constitutional right to information privacy, evidentiary privileges, dozens of federal privacy statutes, and hundreds of state statutes. Moreover, Prosser wrote over 40 years ago, and new technologies have given rise to a panoply of new privacy harms. A new taxonomy to understand privacy violations is thus sorely needed. This Article develops a taxonomy to identify privacy problems in a comprehensive and concrete manner. It endeavors to guide the law toward a more coherent understanding of privacy and to serve as a framework for the future development of the field of privacy law.}, number = {3}, journal = {University of Pennsylvania Law Review}, author = {Solove, Daniel J.}, year = {2006}, pages = {477--564}, } @article{stiles_research_2011, title = {Research and confidentiality: {Legal} issues and risk management strategies.}, volume = {17}, issn = {1939-1528, 1076-8971}, shorttitle = {Research and confidentiality}, url = {http://doi.apa.org/getdoi.cfm?doi=10.1037/a0022507}, doi = {10.1037/a0022507}, language = {en}, number = {3}, journal = {Psychology, Public Policy, and Law}, author = {Stiles, Paul G. and Petrila, John}, year = {2011}, pages = {333--356}, } @book{noauthor_data_nodate, title = {Data {Preparation} – kostenloses {Datenaufbereitungstool} von {Talend}}, url = {https://www.talend.com/de/products/data-preparation/data-preparation-free-desktop/}, abstract = {Dieses kostenlose Datenaufbereitungstool automatisiert die Datenbereinigung. So können Sie ganz einfach bereinigte, brauchbare Daten in nahezu jede Unternehmens- oder Cloud-Anwendung integrieren.}, language = {de-DE}, note = {Publication Title: talend.com}, } @book{noauthor_standard_nodate, title = {Standard procedures, protocols and policies}, url = {https://www.ukdataservice.ac.uk/manage-data/collaboration/coordinate.aspx}, abstract = {Best practice guidance on coordinating data management for a research centre or organisation}, note = {Publication Title: UKDA}, file = {Snapshot:C\:\\Users\\carst\\Zotero\\storage\\HU2Q3CXK\\coordinate.html:text/html}, } @book{noauthor_was_nodate, title = {Was versteht der {SNF} unter kollaborativer {Forschung}? - {SNF}}, url = {http://www.snf.ch/de/fokusForschung/faq/Seiten/faq-foerderinstrument-sinergia-reform-kollaborative-forschung.aspx}, note = {Publication Title: SNF}, file = {o) Was versteht der SNF unter kollaborativer Forschung? - SNF:C\:\\Users\\carst\\Zotero\\storage\\7NESRWNJ\\faq-foerderinstrument-sinergia-reform-kollaborative-forschung.html:text/html}, } @book{noauthor_data_nodate-1, title = {Data resources library}, url = {https://www.ukdataservice.ac.uk/manage-data/collaboration/resources-library.aspx}, abstract = {A data management resources library is a one-stop location where a research hub can centralise all relevant data management and sharing resources for researchers and staff in a single location: on an intranet site, website, wiki, shared network drive or within a virtual research environment.}, note = {Publication Title: UKDA}, file = {Snapshot:C\:\\Users\\carst\\Zotero\\storage\\LGBZ5KF8\\resources-library.html:text/html}, } @book{noauthor_file_nodate, title = {File sharing}, url = {https://www.ukdataservice.ac.uk/manage-data/collaboration/file-sharing.aspx}, abstract = {Best practice guidance for file sharing}, note = {Publication Title: UKDA}, file = {Snapshot:C\:\\Users\\carst\\Zotero\\storage\\X4A8X5TV\\file-sharing.html:text/html}, } @book{warkentin_kollaboratives_2020, title = {Kollaboratives {Arbeiten}: {Tipps} für {Teams}}, shorttitle = {Kollaboratives {Arbeiten}}, url = {https://karrierebibel.de/kollaboratives-arbeiten/}, abstract = {Kollaboratives Arbeiten ist nicht nur für international operierende Unternehmen unverzichtbar. Teams können dabei vor einigen Herausforderungen stehen...}, language = {de-DE}, author = {Warkentin, Nils}, month = may, year = {2020}, note = {Publication Title: karrierebibel.de}, } @inproceedings{stahl_data_2016, address = {Berlin, Heidelberg}, series = {Lecture {Notes} in {Computer} {Science}}, title = {Data {Quality} {Scores} for {Pricing} on {Data} {Marketplaces}}, isbn = {978-3-662-49381-6}, doi = {10.1007/978-3-662-49381-6_21}, abstract = {Data and data-related services are increasingly being traded on data marketplaces. However, value attribution of data is still not well-understood, in particular when two competing offers are to be compared. This paper discusses the role data quality can play in this context and suggests a weighted quality score that allows for ‘quality for money’ comparisons of different offerings.}, language = {en}, booktitle = {Intelligent {Information} and {Database} {Systems}}, publisher = {Springer}, author = {Stahl, Florian and Vossen, Gottfried}, editor = {Nguyen, Ngoc Thanh and Trawiński, Bogdan and Fujita, Hamido and Hong, Tzung-Pei}, year = {2016}, pages = {215--224}, } @techreport{stahl_data_2014, type = {{ERCIS} {Working} {Paper}}, title = {The data marketplace survey revisited}, url = {https://econpapers.repec.org/paper/zbwercisw/18.htm}, abstract = {Trading data as a commodity is increasingly popular. To get a better understanding of emerging data marketplaces, we have conducted two surveys to systematically gather and evaluate their characteristics. This paper is a continuation of a survey we conducted in 2012; it describes our findings from a second round done in 2013. Our study shows that the market is vivid with numerous exits and changes in its core business. We try to identify trends in this young field and explain them. Notably, there is a definite trend towards high quality data.}, number = {18}, institution = {University of Münster, European Research Center for Information Systems (ERCIS)}, author = {Stahl, Florian and Schomm, Fabian and Vossen, Gottfried}, year = {2014}, file = {Stahl et al. - 2014 - The data marketplace survey revisited.pdf:C\:\\Users\\carst\\Zotero\\storage\\7LR26RWP\\Stahl et al. - 2014 - The data marketplace survey revisited.pdf:application/pdf}, } @inproceedings{muschalle_pricing_2013, address = {Berlin, Heidelberg}, series = {Lecture {Notes} in {Business} {Information} {Processing}}, title = {Pricing {Approaches} for {Data} {Markets}}, isbn = {978-3-642-39872-8}, doi = {10.1007/978-3-642-39872-8_10}, abstract = {Currently, multiple data vendors utilize the cloud-computing paradigm for trading raw data, associated analytical services, and analytic results as a commodity good. We observe that these vendors often move the functionality of data warehouses to cloud-based platforms. On such platforms, vendors provide services for integrating and analyzing data from public and commercial data sources. We present insights from interviews with seven established vendors about their key challenges with regard to pricing strategies in different market situations and derive associated research problems for the business intelligence community.}, language = {en}, booktitle = {Enabling {Real}-{Time} {Business} {Intelligence}}, publisher = {Springer}, author = {Muschalle, Alexander and Stahl, Florian and Löser, Alexander and Vossen, Gottfried}, editor = {Castellanos, Malu and Dayal, Umeshwar and Rundensteiner, Elke A.}, year = {2013}, pages = {129--144}, } @book{meisel_datenmarktplatze_2019, address = {Dortmund}, series = {{ISST}-{Bericht}}, title = {Datenmarktplätze - {Plattformen} für {Datenaustausch} und {Datenmonetarisierung} in der {Data} {Economy}}, language = {de}, publisher = {Fraunhofer Institut für Software- und Systemtechnik}, author = {Meisel, Lukas and Spiekermann, Markus}, editor = {Otto, Boris and Rehof, Jakob}, year = {2019}, file = {Meisel und Spiekermann - 2019 - Datenmarktplätze - Plattformen für Datenaustausch .pdf:C\:\\Users\\carst\\Zotero\\storage\\57G3MJU8\\Meisel und Spiekermann - 2019 - Datenmarktplätze - Plattformen für Datenaustausch .pdf:application/pdf}, } @inproceedings{chen_towards_2019, address = {New York}, series = {{SIGMOD} '19}, title = {Towards {Model}-{Based} {Pricing} for {Machine} {Learning} in a {Data} {Marketplace}}, isbn = {978-1-4503-5643-5}, url = {https://doi.org/10.1145/3299869.3300078}, doi = {10.1145/3299869.3300078}, abstract = {Data analytics using machine learning (ML) has become ubiquitous in science, business intelligence, journalism and many other domains. While a lot of work focuses on reducing the training cost, inference runtime and storage cost of ML models, little work studies how to reduce the cost of data acquisition, which potentially leads to a loss of sellers' revenue and buyers' affordability and efficiency. In this paper, we propose a model-based pricing (MBP) framework, which instead of pricing the data, directly prices ML model instances. We first formally describe the desired properties of the MBP framework, with a focus on avoiding arbitrage. Next, we show a concrete realization of the MBP framework via a noise injection approach, which provably satisfies the desired formal properties. Based on the proposed framework, we then provide algorithmic solutions on how the seller can assign prices to models under different market scenarios (such as to maximize revenue). Finally, we conduct extensive experiments, which validate that the MBP framework can provide high revenue to the seller, high affordability to the buyer, and also operate on low runtime cost.}, booktitle = {Proceedings of the 2019 {International} {Conference} on {Management} of {Data}}, publisher = {Association for Computing Machinery}, author = {Chen, Lingjiao and Koutris, Paraschos and Kumar, Arun}, year = {2019}, pages = {1535--1552}, } @inproceedings{attard_data_2016, title = {Data {Value} {Networks}: {Enabling} a {New} {Data} {Ecosystem}}, isbn = {978-1-5090-4470-2}, url = {https://ieeexplore.ieee.org/document/7817090}, doi = {10.1109/WI.2016.0073}, abstract = {With the increasing permeation of data into all dimensions of our information society, data is progressively becoming the basis for many products and services. It is hence becoming more and more vital to identify the means and methods how to exploit the value of this data. In this paper we provide our definition of the Data Value Network, where we specifically cater for non-tangible data products. We also propose a Demand and Supply Distribution Model with the aim of providing insight on how an entity can participate in the global data market by producing a data product, as well as a concrete implementation through the Demand and Supply as a Service. Through our contributions we project our vision of generating a new Economic Data Ecosystem that has the Web of Data as its core.}, booktitle = {2016 {IEEE}/{WIC}/{ACM} {International} {Conference} on {Web} {Intelligence} ({WI})}, author = {Attard, Judie and Orlandi, Anneke and Auer, Sören}, editor = {{IEEE}}, year = {2016}, pages = {453--456}, } @incollection{charalabidis_organizational_2018, address = {Cham}, series = {Public {Administration} and {Information} {Technology}}, title = {Organizational {Issues}: {How} to {Open} {Up} {Government} {Data}?}, isbn = {978-3-319-90850-2}, shorttitle = {Organizational {Issues}}, url = {https://doi.org/10.1007/978-3-319-90850-2_4}, abstract = {Governments create and collect enormous amounts of data, for instance concerning voting results, transport, energy, education, and employment. These datasets are often stored in an archive that is not accessible for others than the organization’s employees. To attain benefits such as transparency, engagement, and innovation, many governmental organizations are now also providing public access to this data. However, in opening up their data, these organizations face many issues, including the lack of standard procedures, the threat of privacy violations when releasing data, accidentally releasing policy-sensitive data, the risk of data misuse, challenges regarding the ownership of data and required changes at different organizational layers. These issues often hinder the easy publication of government data.}, language = {en}, booktitle = {The {World} of {Open} {Data}: {Concepts}, {Methods}, {Tools} and {Experiences}}, publisher = {Springer International Publishing}, author = {Charalabidis, Yannis and Zuiderwijk, Anneke and Alexopoulos, Charalampos and Janssen, Marijn and Lampoltshammer, Thomas and Ferro, Enrico}, editor = {Charalabidis, Yannis and Zuiderwijk, Anneke and Alexopoulos, Charalampos and Janssen, Marijn and Lampoltshammer, Thomas and Ferro, Enrico}, year = {2018}, doi = {10.1007/978-3-319-90850-2_4}, pages = {57--73}, } @article{dai_open_2018, title = {Open and inclusive collaboration in science: {A} framework}, copyright = {© OECD/OCDE 2018}, shorttitle = {Open and inclusive collaboration in science}, url = {https://www.oecd-ilibrary.org/industry-and-services/open-and-inclusive-collaboration-in-science_2dbff737-en}, doi = {10.1787/2dbff737-en}, abstract = {Digitalisation is fundamentally changing science and the paper lays out some of the opportunities, risks and major policy challenges associated with these changes. More specifically, the paper lays out a conceptual framework for open science. This framework incorporates access to data and information, as well as civil society engagement, in the different stages of the scientific research process. It is not meant to be prescriptive but should help different communities to decide on their own priorities within the open science space and to better visualise how these priorities link to different stage of the scientific process and to different actors. Such a framework can be useful also in considering how best to incentivise and measure different aspects of open science.}, language = {en}, journal = {OECD Science, Technology and Industry Working Papers 2018/07}, author = {Dai, Qian and Shin, Eunjung and Smith, Carthage}, month = mar, year = {2018}, pages = {29}, file = {Dai et al. - 2018 - Open and inclusive collaboration in science A fra.pdf:C\:\\Users\\carst\\Zotero\\storage\\ZTNIEET9\\Dai et al. - 2018 - Open and inclusive collaboration in science A fra.pdf:application/pdf}, } @incollection{ghosh_data_2018, address = {Singapore}, series = {Studies in {Big} {Data}}, title = {Data {Marketplace} as a {Platform} for {Sharing} {Scientific} {Data}}, isbn = {978-981-10-7515-5}, url = {https://doi.org/10.1007/978-981-10-7515-5_7}, abstract = {Data marketplace is an emerging service model to facilitate data exchange between its producers and consumers. While the service has been motivated by a business model for data and has established itself in the commercial sector over the last few years, it is possible to build a data sharing platform for the scientific community on this model. This article analyzes the motivational and technical challenges for scientific data exchange and proposes use of data marketplace service model to address them.}, language = {en}, booktitle = {Data {Science} {Landscape}: {Towards} {Research} {Standards} and {Protocols}}, publisher = {Springer}, author = {Ghosh, Hiranmay}, editor = {Munshi, Usha Mujoo and Verma, Neeta}, year = {2018}, doi = {10.1007/978-981-10-7515-5_7}, pages = {99--105}, } @incollection{charalabidis_open_2018, address = {Cham}, series = {Public {Administration} and {Information} {Technology}}, title = {Open {Data} {Evaluation} {Models}: {Theory} and {Practice}}, isbn = {978-3-319-90850-2}, shorttitle = {Open {Data} {Evaluation} {Models}}, url = {https://doi.org/10.1007/978-3-319-90850-2_8}, abstract = {Evaluation of Open Data is a systematic determination of open data merit, worth and significance, using criteria governed by a set of standards (Farbey, Land, \& Targett, 1999). It is an essential procedure trying to ignite a learning and innovation process leading to a more effective data exploitation. Examples of questions to be answered by open data evaluation could be: what is the current status of published data against the best practices identified, how effectively these data are published or used, what are the most valuable data for users, what are the problems and barriers discouraging the publication and use of open data and in which extend these barriers affects users’ behaviour towards data usage. The answers on these questions will affect the next developments of an open data portal or initiative and the publication procedure.}, language = {en}, booktitle = {The {World} of {Open} {Data}: {Concepts}, {Methods}, {Tools} and {Experiences}}, publisher = {Springer International Publishing}, author = {Charalabidis, Yannis and Zuiderwijk, Anneke and Alexopoulos, Charalampos and Janssen, Marijn and Lampoltshammer, Thomas and Ferro, Enrico}, editor = {Charalabidis, Yannis and Zuiderwijk, Anneke and Alexopoulos, Charalampos and Janssen, Marijn and Lampoltshammer, Thomas and Ferro, Enrico}, year = {2018}, doi = {10.1007/978-3-319-90850-2_8}, pages = {137--172}, } @incollection{charalabidis_open_2018-1, address = {Cham}, series = {Public {Administration} and {Information} {Technology}}, title = {Open {Data} {Value} and {Business} {Models}}, isbn = {978-3-319-90850-2}, url = {https://doi.org/10.1007/978-3-319-90850-2_7}, abstract = {The chapter focuses on innovation processes aspiring to generate value through a purposeful and effective exploitation of data released in an open format. On the one hand, such processes represent a great opportunity for private and public organizations while, on the other, they pose a number of challenges having to do with creating the technical, legal and procedural preconditions as well as identifying appropriate business models that may guarantee the long term financial viability of such activities. As a matter of fact, while information sharing is widely recognized as a value multiplier, the release of information in an open data format through creative common licenses generates information-based common goods characterized by nonrivalry and nonexcludability in fruition. An aspect posing significant challenges for the pursuit of sustainable competitive advantages.}, language = {en}, booktitle = {The {World} of {Open} {Data}: {Concepts}, {Methods}, {Tools} and {Experiences}}, publisher = {Springer International Publishing}, author = {Charalabidis, Yannis and Zuiderwijk, Anneke and Alexopoulos, Charalampos and Janssen, Marijn and Lampoltshammer, Thomas and Ferro, Enrico}, editor = {Charalabidis, Yannis and Zuiderwijk, Anneke and Alexopoulos, Charalampos and Janssen, Marijn and Lampoltshammer, Thomas and Ferro, Enrico}, year = {2018}, doi = {10.1007/978-3-319-90850-2_7}, pages = {115--136}, } @book{munshi_data_2018, address = {Singapore}, series = {Studies in {Big} {Data}}, title = {Data {Science} {Landscape}}, volume = {38}, isbn = {978-981-10-7514-8 978-981-10-7515-5}, language = {en}, publisher = {Springer Singapore}, editor = {Munshi, Usha Mujoo and Verma, Neeta}, year = {2018}, doi = {10.1007/978-981-10-7515-5}, } @book{ivanschitz_data_2018, title = {A {Data} {Market} with {Decentralized} {Repositories}}, url = {https://openreview.net/pdf?id=rkgzBg7yeX}, abstract = {In the current era of ever growing data volumes and increased commercialization of data, an interest for datamarkets is on the rise. When the participants in this markets need access to large amounts of data, as necessaryfor big data applications, a centralized approach becomes unfeasible. In this paper, we argue for a data marketbased on decentralized data repositories and outline an implementation approach currently being undertakenby the Data Market Austria projec}, author = {Ivanschitz, Bernd-Peter and Lampoltshammer, Thomas J. and Mireles, Victor and Revenko, Artem and Schlarb, Sven and Thurnay, Lorinc}, year = {2018}, file = {Ivanschitz et al. - 2018 - A Data Market with Decentralized Repositories.pdf:C\:\\Users\\carst\\Zotero\\storage\\KPBJ2APD\\Ivanschitz et al. - 2018 - A Data Market with Decentralized Repositories.pdf:application/pdf}, } @incollection{munshi_data_2018-1, address = {Singapore}, series = {Studies in {Big} {Data}}, title = {Data {Science} {LandscapeData} science landscape: {Tracking} the {Ecosystem}}, isbn = {978-981-10-7515-5}, shorttitle = {Data {Science} {LandscapeData} science landscape}, url = {https://doi.org/10.1007/978-981-10-7515-5_1}, abstract = {The big data phenomenon is continuously evolving, so is its entire ecosystem. In the recent past due to the advancing technologies/resources cropping up on all fronts, we have moved from data deficit to data deluge . The real challenge is in deriving benefits from the data tsunami for public good. Thus, it is imperative to build infrastructure to store and process humongous data. It is equally important to evolve innovative mechanisms for data analytics to draw inferences that can facilitate smart research and good decision making landscape. The paper dwells on some of the core elements of the big data ecosystem and endeavors to present the current scenario by identifying and portraying various initiatives to address big data boom.}, language = {en}, booktitle = {Data {Science} {Landscape}: {Towards} {Research} {Standards} and {Protocols}}, publisher = {Springer}, author = {Munshi, Usha Mujoo}, editor = {Munshi, Usha Mujoo and Verma, Neeta}, year = {2018}, doi = {10.1007/978-981-10-7515-5_1}, pages = {1--31}, } @book{dunger_forschungsethik_2018, address = {Bern}, edition = {2. Auflage}, title = {Forschungsethik: informieren - reflektieren - anwenden}, isbn = {978-3-456-85850-0 978-3-456-95850-7}, shorttitle = {Forschungsethik}, publisher = {Hogrefe}, author = {Dunger, Christine and Schnell, Martin}, year = {2018}, } @incollection{von_unger_forschungsethik_2014, address = {Wiesbaden}, title = {Forschungsethik in der qualitativen {Forschung}: {Grundsätze}, {Debatten} und offene {Fragen}}, isbn = {978-3-658-04289-9}, shorttitle = {Forschungsethik in der qualitativen {Forschung}}, url = {https://doi.org/10.1007/978-3-658-04289-9_2}, abstract = {Forschungsethische Grundsätze für die Sozialwissenschaften wurden unter anderem im Ethik-Kodex der Soziolog*innen formuliert. Einige dieser Grundsätze, wie das Konzept der Objektivität, das Informierte Einverständnis und Verfahren der Anonymisierung werfen in der qualitativen Forschung Fragen auf und stellen die Forschenden vor besondere Herausforderungen. Zwar bietet der hohe Stellenwert der Selbstreflexivität im interpretativen Paradigma grundsätzlich gute Voraussetzungen für ethisch reflektiertes Forschungshandeln, es ist jedoch unklar, ob und wie dieses Potenzial genutzt wird. Bislang werden forschungsethische Fragen in der deutschsprachigen qualitativen Methoden-Debatte nur selten explizit diskutiert. Dabei besteht ein Diskussionsbedarf, unter anderem weil bestimmte Grundsätze nicht realisierbar sind und neue Technologien im Zeitalter des Internets neue Fragen aufwerfen.}, language = {de}, booktitle = {Forschungsethik in der qualitativen {Forschung}: {Reflexivität}, {Perspektiven}, {Positionen}}, publisher = {Springer Fachmedien}, author = {von Unger, Hella}, editor = {von Unger, Hella and Narimani, Petra and M´Bayo, Rosaline}, year = {2014}, doi = {10.1007/978-3-658-04289-9_2}, pages = {15--39}, } @book{starck_verantwortung_2005, address = {Tübingen}, title = {Verantwortung der {Wissenschaft}}, isbn = {978-3-16-148812-2}, publisher = {Mohr Siebeck}, editor = {Starck, Christian}, year = {2005}, } @book{mitscherlich_medizin_1960, address = {Frankfurt/M.}, edition = {2}, series = {Fischer-{Bücherei}}, title = {Medizin ohne {Menschlichkeit}: {Dokumente} des {Nürnberger} Ärzteprozesses}, shorttitle = {Medizin ohne {Menschlichkeit}}, number = {332}, publisher = {Fischer}, author = {Mitscherlich, Alexander and Mielke, Fred}, year = {1960}, } @incollection{nida-rumelin_wissenschaftsethik_1996, address = {Stuttgart}, title = {Wissenschaftsethik}, isbn = {978-3-520-43701-3}, booktitle = {Angewandte {Ethik}: die {Bereichsethiken} und ihre theoretische {Fundierung} ; ein {Handbuch}}, publisher = {Kröner}, author = {Nida-Rümelin, Julian}, editor = {Nida-Rümelin, Julian}, year = {1996}, pages = {778--805}, } @book{viebrock_forschungsethik_2015, address = {Frankfurt am Main}, series = {Kolloquium {Fremdsprachenunterricht}}, title = {Forschungsethik in der {Fremdsprachenforschung}: eine systematische {Betrachtung}}, isbn = {978-3-631-66895-5 3-631-66895-3}, language = {de}, number = {Band 53}, publisher = {Peter Lang Edition}, author = {Viebrock, Britta}, year = {2015}, } @incollection{walger_20_2019, address = {Bad Honnef}, title = {20 {Jahre} {Regeln} zur {Sicherung} der guten wissenschaftlichen {Praxis}. {Die} {Rolle} von {Hochschulbibliotheken} beim {Streben} nach {Wahrheit} und {Objektivität}}, isbn = {978-3-88347-311-6}, url = {https://www.th-koeln.de/mam/downloads/deutsch/studium/studiengaenge/f03/bib_inf_ma/festschrift_osswald.pdf}, booktitle = {Bibliotheksentwicklung im {Netzwerk} von {Menschen}, {Informationstechnologie} und {Nachhaltigkeit}: {Festschrift} für {Achim} {Oßwald}}, publisher = {Bock Herchen Verlag}, author = {Walger, Nadine and Walger, Nicole}, editor = {Fühles-Ubach, Simone and Georgy, Ursula}, year = {2019}, pages = {87--102}, file = {Walger und Walger - 2019 - 20 Jahre Regeln zur Sicherung der guten wissenscha.pdf:C\:\\Users\\carst\\Zotero\\storage\\46SINK4A\\Walger und Walger - 2019 - 20 Jahre Regeln zur Sicherung der guten wissenscha.pdf:application/pdf}, } @techreport{barbour_discussion_2016, type = {Discussion}, title = {Discussion document on {Best} {Practice} for {Consent} for {Publishing} {Medical} {Case} {Reports}}, copyright = {Creative Commons Attribution-NonCommercial-NoDerivs}, url = {https://publicationethics.org/files/u7141/Discussion_document_on_Best_Practice_for_Consent_for_Publishing_Medical_Case_Reports%20%283%29.pdf}, language = {en}, institution = {COPE Council}, author = {Barbour, Virginia}, month = feb, year = {2016}, pages = {[2]}, file = {_.pdf:C\:\\Users\\carst\\Zotero\\storage\\67DZRIXL\\_.pdf:application/pdf}, } @book{boker_gute_2020, title = {Gute wissenschaftliche {Praxis} und {FDM}. {Ein} Überblick über die {DFG} {Leitlinien}}, copyright = {CC BY 4.0}, url = {https://www.forschungsdaten.info/themen/ethik-und-gute-wissenschaftliche-praxis/gute-wissenschaftliche-praxis-und-fdm/}, abstract = {Die Basis einer vertrauenswürdigen Wissenschaft bildet die wissenschaftliche Integrität. Daher tragen Forscherinnen und Forscher die Verantwortung, ihre Tätigkeit nach den Regeln der guten wissenschaftlichen Praxis auszuführen. Den Umgang mit Forschungsdaten hat die DFG in ihre Leitlinien zur Sicherung guter wissenschaftlicher Praxis aufgenommen. Ein Überblick.}, language = {de}, author = {Böker, Elisabeth}, month = jun, year = {2020}, note = {Publication Title: https://www.forschungsdaten.info}, } @book{borgwardt_wissenschaft_2014, address = {Berlin}, edition = {1}, series = {Schriftenreihe des {Netzwerk} {Exzellenz} an {Deutschen} {Hochschulen}. - {Berlin} : {Friedrich}-{Ebert}-{Stiftung}, {Abt}. {Studienförderung}, 2007-}, title = {Wissenschaft auf {Abwegen}?: {Zum} drohenden {Qualitätsverlust} in der {Wissenschaft}}, isbn = {978-3-95861-006-4}, shorttitle = {Wissenschaft auf {Abwegen}?}, url = {https://library.fes.de/pdf-files/studienfoerderung/11071.pdf}, number = {9}, author = {Borgwardt, Angela}, year = {2014}, file = {Borgwardt - 2014 - Wissenschaft auf Abwegen Zum drohenden Qualitäts.pdf:C\:\\Users\\carst\\Zotero\\storage\\M8BJBJUF\\Borgwardt - 2014 - Wissenschaft auf Abwegen Zum drohenden Qualitäts.pdf:application/pdf}, } @article{noauthor_muster-berufsordnung_2019, title = {({Muster}-){Berufsordnung} für die in {Deutschland} tätigen Ärztinnen und Ärzte}, volume = {1}, doi = {10.3238/arztebl.2019.mbo_daet2018}, language = {de}, number = {Februar}, journal = {Deutsches Ärzteblatt}, year = {2019}, pages = {A1--A9}, file = {_.pdf:C\:\\Users\\carst\\Zotero\\storage\\C2FEK6CH\\_.pdf:application/pdf}, } @techreport{allea_-_all_european_academie_europaischer_2018, address = {Berlin}, title = {Europäischer {Verhaltenskodex} für {Integrität} in der {Forschung}: Überarbeitete {Fassung}}, copyright = {©ALLEA - All European Academies, Berlin 2018}, url = {https://allea.org/allea-european-code-of-conduct-for-research-integrity-2017-digital_de_final/}, language = {de}, institution = {Berlin-Brandenburgische Akademie der Wissenschaften}, author = {{ALLEA - All European Academie}}, month = jun, year = {2018}, pages = {17}, file = {2018 - Europäischer Verhaltenskodex für Integrität in der.pdf:C\:\\Users\\carst\\Zotero\\storage\\9YGLHLVW\\2018 - Europäischer Verhaltenskodex für Integrität in der.pdf:application/pdf}, } @book{noauthor_arbeitskreis_2019, title = {Arbeitskreis medizinischer {Ethik}-{Kommissionen} - {Mitglieder}}, url = {https://www.ak-med-ethik-komm.de/index.php?option=com_content&view=category&layout=blog&id=13&Itemid=103&lang=de}, abstract = {Dem 1983 gegrün­dete „Arbeit­skreis Medi­zinis­cher Ethik-​Kommissionen in der Bun­desre­pub­lik Deutsch­land e.V. “ gehören 52 nach Lan­desrecht gebilde­ten Ethik-​Kommissionen als Mit­glieder an.}, year = {2019}, file = {Arbeitskreis medizinischer Ethik-Kommissionen - Mitglieder:C\:\\Users\\carst\\Zotero\\storage\\5267XWQK\\index.html:text/html}, } @article{buchner_aufgaben_2019, title = {Aufgaben, {Regularien} und {Arbeitsweise} von {Ethikkommissionen}}, volume = {62}, abstract = {Die Freiheit der Wissenschaft ist verfassungsrechtlich umfassend geschützt, jeder Eingriff bedarf der Rechtfertigung. Der Einbindung von Ethikkommissionen in den Forschungsprozess liegt eine umfassende Abwägung der Wissenschaftsfreiheit mit entgegenstehenden Rechtsgütern wie Leben, Gesundheit und Selbstbestimmung von Studienteilnehmern zugrunde. Dem wird hier in zwei Bereichen nachgegangen. Zunächst werden klinische Prüfungen im Arzneimittelmittel- und Medizinprodukterecht, d. h. in Regelungsbereichen erörtert, in denen Aufgaben und Befugnisse der Kommissionen in den Einzelheiten normativ festgelegt sind. Aufgrund der europäischen Gesetzgebung werden die Bestimmungen jetzt grundlegend verändert. Sodann werden Ethikkommissionen zur Befassung mit sogenannten freien Studien im universitären Forschungsbereich behandelt, die nicht unter die Regelungen des Arzneimittel- und Medizinproduktegesetzes fallen und deren Tätigkeit stattdessen im Wesentlichen auf der universitären Selbstverwaltung beruht. Am Beispiel der Statistik wird dann aufgezeigt, wie eine notwendige und legitime Beurteilung wissenschaftlicher Projekte von einer pauschalen „Ethisierung“ derselben abzugrenzen ist. Schließlich wird die Thematik aus einer philosophischen Perspektive beleuchtet.}, number = {6}, journal = {Bundesgesundheitsblatt}, author = {Buchner, Benedikt and Hase, Friedhelm and Borchers, Dagmar and Pigeot, Iris}, year = {2019}, pages = {690--696}, } @book{noauthor_core_nodate, title = {Core practices}, url = {https://publicationethics.org/core-practices}, abstract = {The Core Practices were developed in 2017, replacing the Code of Conduct. They are applicable to all involved in publishing scholarly literature: editors and their journals, publishers, and institutions. The Core Practices should be considered alongside specific national and international codes of conduct for research and are not intended to replace these.}, language = {en}, note = {Publication Title: https://publicationethics.org}, file = {Core practices | COPE\: Committee on Publication Ethics:C\:\\Users\\carst\\Zotero\\storage\\23BXV43T\\core-practices.html:text/html}, } @techreport{committee_cses_2018, address = {Wheat Ridge}, type = {White {Paper}}, title = {{CSE}’s {White} {Paper} on {Promoting} {Integrity} in {Scientific} {Journal} {Publications}}, copyright = {© 2018 Council of Science Editors}, url = {https://druwt19tzv6d76es3lg0qdo7-wpengine.netdna-ssl.com/wp-content/uploads/CSE-White-Paper_2018-update-050618.pdf}, language = {en}, author = {Committee, Editorial Policy}, year = {2018}, pages = {81}, file = {_.pdf:C\:\\Users\\carst\\Zotero\\storage\\2CE5G5CD\\_.pdf:application/pdf}, } @techreport{noauthor_wissenschaftsfreiheit_2014, title = {Wissenschaftsfreiheit und {Wissenschaftsverantwortung}: {Empfehlungen} zum {Umgang} mit sicherheitsrelevanter {Forschung}}, url = {https://www.dfg.de/download/pdf/dfg_im_profil/reden_stellungnahmen/2014/dfg-leopoldina_forschungsrisiken_de_en.pdf}, language = {de ; en}, institution = {Deutsche Forschungsgemeinschaft}, month = may, year = {2014}, pages = {18}, file = {2014 - Wissenschaftsfreiheit und Wissenschaftsverantwortu.pdf:C\:\\Users\\carst\\Zotero\\storage\\DI6A3JQY\\2014 - Wissenschaftsfreiheit und Wissenschaftsverantwortu.pdf:application/pdf}, } @book{noauthor_deutscher_nodate, title = {Deutscher {Ethikrat}}, url = {https://www.ethikrat.org/}, language = {de}, note = {Publication Title: https://www.ethikrat.org/}, file = {Snapshot:C\:\\Users\\carst\\Zotero\\storage\\NS3GL9QX\\www.ethikrat.org.html:text/html}, } @book{noauthor_guidelines_nodate, title = {Guidelines \& {Toolkits}}, url = {https://ease.org.uk/guidelines-toolkits/}, abstract = {EASE has created several guidelines which we encourage members and non-members to adopt. We have also compiled toolkits which are links to online resources that may be used as reference and training guides. We are keen to develop further guidelines and toolkits, so please do email us to make suggestions for improving our current guidelines and toolkits, and for new topics which we can develop.}, language = {en}, } @book{noauthor_elearning_nodate, title = {{eLearning}}, url = {https://publicationethics.org/resources/e-learning}, abstract = {This course, in a series of ten modules, is designed for editors and publishers who want to improve their understanding on publication ethics and to provide the tools and knowledge needed to address the many issues they face when editing a journal.}, language = {en}, note = {Publication Title: https://publicationethics.org}, file = {eLearning | COPE\: Committee on Publication Ethics:C\:\\Users\\carst\\Zotero\\storage\\2RU9QXEI\\e-learning.html:text/html}, } @techreport{noauthor_guidelines_2019, title = {Guidelines: {Retraction} {Guidelines}}, copyright = {© 2019 Committee on Publication Ethics}, url = {https://publicationethics.org/files/retraction-guidelines.pdf}, language = {en}, number = {Version 2}, institution = {COPE Council}, year = {2019}, pages = {10}, file = {_.pdf:C\:\\Users\\carst\\Zotero\\storage\\IIIBTVPA\\_.pdf:application/pdf}, } @book{noauthor_datenschutz-grundverordnung_nodate-1, title = {Datenschutz-{Grundverordnung}: {DSGVO}}, shorttitle = {Datenschutz-{Grundverordnung}}, url = {https://dsgvo-gesetz.de/}, abstract = {Willkommen auf dsgvo-gesetz.de. Hier finden Sie das offizielle PDF der Verordnung (EU) 2016/679 (Datenschutz-Grundverordnung) in der aktuellen Version des ABl. L 119, 04.05.2016; ber. ABl. L 127, 23.05.2018 übersichtlich aufbereitet. Alle Artikel sind mit den passenden Erwägungsgründen und dem BDSG (neu) 2018 verknüpft. Die EU-DSGVO und das BDSG (neu) sind seit dem 25. Mai 2018 anwendbar. Den Text der EU-Datenschutz-Grundverordnung gibt es auf Deutsch sowie auf Englisch.}, language = {de-DE}, note = {Publication Title: Datenschutz-Grundverordnung (DSGVO)}, file = {Snapshot:C\:\\Users\\carst\\Zotero\\storage\\MTD5VHJI\\dsgvo-gesetz.de.html:text/html}, } @techreport{forschungsgemeinschaft_replizierbarkeit_2017, type = {Stellungnahme}, title = {Replizierbarkeit von {Forschungsergebnissen}: {Eine} {Stellungnahme} der {Deutschen} {Forschungsgemeinschaft}}, url = {https://www.dfg.de/download/pdf/dfg_im_profil/reden_stellungnahmen/2017/170425_stellungnahme_replizierbarkeit_forschungsergebnisse_de.pdf}, language = {de}, institution = {Deutsche Forschungsgemeinschaft}, author = {Forschungsgemeinschaft, Deutsche}, year = {2017}, pages = {5}, file = {2017 - Replizierbarkeit von Forschungsergebnissen Eine S.pdf:C\:\\Users\\carst\\Zotero\\storage\\DN3PNH2B\\2017 - Replizierbarkeit von Forschungsergebnissen Eine S.pdf:application/pdf}, } @incollection{elger_wissenschaftliche_2014, address = {Würzburg}, title = {Wissenschaftliche {Integrität}: {Umgang} mit {Daten} und {Publikationsethik}}, booktitle = {Forschungsethik}, publisher = {Königshausen \& Neumann.}, author = {Elger, Bernice and Engel-Glatter, Sabrina}, editor = {Demko, Daniela and Brudermüller, Gerd}, year = {2014}, } @techreport{noauthor_gesetz_2019, title = {Gesetz über {Medizinprodukte}. ({Medizinproduktegesetz} – {MPG})}, url = {https://www.gesetze-im-internet.de/mpg/MPG.pdf}, language = {de}, year = {2019}, pages = {32}, file = {_.pdf:C\:\\Users\\carst\\Zotero\\storage\\MTPCAAVJ\\_.pdf:application/pdf}, } @book{noauthor_opendoar_nodate, title = {{OpenDOAR} {Policy} {Tool}}, url = {https://v2.sherpa.ac.uk/opendoar/policytool/}, language = {en}, file = {Policy Support - v2.sherpa:C\:\\Users\\carst\\Zotero\\storage\\WTSCK5XQ\\policytool.html:text/html}, } @techreport{ebel_hinweise_2015, address = {Frankfurt am Main}, title = {Hinweise zur {Anonymisierung} von quantitativen {Daten}}, copyright = {© FDZ Bildung am DIPF}, url = {https://www.forschungsdaten-bildung.de/files/fdb-informiert-nr-3.pdf}, language = {de}, number = {Nr. 3 (2015) ; Version 1.0}, institution = {Forschungsdatenzentrum (FDZ) Bildung am DIPFDeutsches Institut für Internationale Pädagogische Forschung}, author = {Ebel, Thomas and Meyermann, Alexia and Forschung, Forschungsdatenzentrum (FDZ) Bildung am DIPFDeutsches Institut für Internationale Pädagogische}, year = {2015}, pages = {11}, file = {_.pdf:C\:\\Users\\carst\\Zotero\\storage\\EYU7ENCJ\\_.pdf:application/pdf}, } @techreport{porzelt_hinweise_2014, address = {Frankfurt am Main}, title = {Hinweise zur {Anonymisierung} von quantitativen {Daten}}, copyright = {© FDZ Bildung am DIPF}, url = {https://www.forschungsdaten-bildung.de/files/fdb-informiert-nr-1.pdf}, language = {de}, number = {Nr. 1 (2014) ; Version 1.0}, institution = {Forschungsdatenzentrum (FDZ) Bildung am DIPFDeutsches Institut für Internationale Pädagogische Forschung}, author = {Porzelt, Maike and Meyermann, Alexia and Forschung, Forschungsdatenzentrum (FDZ) Bildung am DIPFDeutsches Institut für Internationale Pädagogische}, year = {2014}, pages = {17}, file = {Ebel und Meyermann - 2015 - Hinweise zur Anonymisierung von quantitativen Date.pdf:C\:\\Users\\carst\\Zotero\\storage\\KFCYRM9W\\Ebel und Meyermann - 2015 - Hinweise zur Anonymisierung von quantitativen Date.pdf:application/pdf}, } @techreport{noauthor_gesetz_2020, title = {Gesetz über den {Verkehr} mit {Arzneimitteln}. ({Arzneimittelgesetz} – {AMG})}, url = {https://www.gesetze-im-internet.de/amg_1976/AMG.pdf}, language = {de}, year = {2020}, pages = {155}, file = {Gesetz über den Verkehr mit Arzneimitteln. (Arznei.pdf:C\:\\Users\\carst\\Zotero\\storage\\FTTCZVYY\\Gesetz über den Verkehr mit Arzneimitteln. (Arznei.pdf:application/pdf}, } @book{hufen_braucht_2017, title = {Braucht {Forschung} {Aufpasser}?}, url = {https://www.forschung-und-lehre.de/recht/braucht-forschung-aufpasser-153}, abstract = {Der Ruf nach Kontrolle der Wissenschaft wird lauter. Das wirft gravierende verfassungsrechtliche Fragen auf.}, language = {de}, author = {Hufen, Friedhelm}, year = {2017}, note = {Publication Title: https://www.forschung-und-lehre.de}, } @incollection{hopf_forschungsethik_2016, address = {Wiesbaden}, title = {Forschungsethik und qualitative {Forschung}}, isbn = {978-3-658-11482-4}, url = {https://doi.org/10.1007/978-3-658-11482-4_9}, abstract = {Unter dem Stichwort „Forschungsethik“ werden in den Sozialwissenschaften im Allgemeinen all jene ethischen Prinzipien und Regeln zusammengefasst, in denen mehr oder minder verbindlich und mehr oder minder konsensuell bestimmt wird, in welcher Weise die Beziehungen zwischen den Forschenden auf der einen Seite und den in sozialwissenschaftliche Untersuchungen einbezogenen Personen auf der anderen Seite zu gestalten sind. Typische, auch in der qualitativen Sozialforschung immer wieder gestellte Fragen sind dabei unter anderem: die Frage nach der Freiwilligkeit der Teilnahme an Untersuchungen, die Frage nach der Absicherung von Anonymitäts- und Vertraulichkeitszusagen, die Frage nach der Vermeidung von Schädigungen derer, die in Untersuchungen einbezogen werden, oder auch die Frage nach der Zulässigkeit verdeckter Formen der Beobachtung.}, language = {de}, booktitle = {Schriften zu {Methodologie} und {Methoden} qualitativer {Sozialforschung}}, publisher = {Springer Fachmedien}, author = {Hopf, Christel}, editor = {Hopf, Wulf and Kuckartz, Udo}, year = {2016}, doi = {10.1007/978-3-658-11482-4_9}, pages = {195--206}, } @article{kaminsky_nicht_2012, title = {Nicht noch ein {Fach}⁉ {Forschungsethik} im {Studium} der {Sozialen} {Arbeit}}, volume = {17}, url = {http://dokumentix.ub.uni-siegen.de/opus/volltexte/2018/1231/pdf/Kaminsky_Mayerle_Nicht_noch_ein_Fach.pdf}, number = {2}, journal = {Siegen: Sozial. Analysen, Berichte, Kontroversen}, author = {Kaminsky, Carmen and Mayerle, Michael}, year = {2012}, pages = {20--25}, } @article{lucas_wissen_1999, title = {Wissen ist {Gemeinbesitz}: Über die ethischen {Grundlagen} von {Wissenschaft} und {Forschung}}, volume = {23}, url = {http://www.deutsches-museum.de/fileadmin/Content/data/Insel/Information/KT/heftarchiv/1999/23-4-46.pdf}, number = {4}, journal = {Kultur \& Technik}, author = {Lucas, Andrea and Nida-Rümelin, Julian}, year = {1999}, pages = {46--49}, file = {_.pdf:C\:\\Users\\carst\\Zotero\\storage\\MTK3SUHZ\\_.pdf:application/pdf}, } @article{wirtschaftsdaten_ratswd_forschungsethische_2017, title = {Forschungsethische {Grundsätze} und {Prüfverfahren} in den {Sozial}- und {Wirtschaftswissenschaften}}, volume = {9}, url = {https://www.ratswd.de/publikation/output-series/2323}, doi = {10.17620/02671.1}, language = {de}, number = {5}, journal = {RatSWD Output Series}, author = {Wirtschaftsdaten (RatSWD), Rat Für Sozial-Und}, year = {2017}, pages = {53}, file = {Rat Für Sozial- Und Wirtschaftsdaten (RatSWD) - 2017 - Forschungsethische Grundsätze und Prüfverfahren in.pdf:C\:\\Users\\carst\\Zotero\\storage\\829589HV\\Rat Für Sozial- Und Wirtschaftsdaten (RatSWD) - 2017 - Forschungsethische Grundsätze und Prüfverfahren in.pdf:application/pdf}, } @book{noauthor_ubersicht_2019, title = {Übersicht sozial- und wirtschaftswissenschaftlicher {Ethikkommissionen}}, url = {https://www.ratswd.de/themen/forschungsethik/kommissionen}, abstract = {Eine Übersicht über lokale Ethikkommissionen mit Zuständigkeit für die Psychologie findet sich auf der Webseite der Deutschen Gesellschaft für Pyschologie (DGPs). Eine Übersicht der Ansprechpartner und Kommissionen in Deutschland, die für Ethik sicherheits-relevanter Forschung zuständig sind, findet sich auf der Website des Gemeinsamen Ausschusses zum Umgang mit sicherheitsrelevanter Forschung von DFG und Leopoldina.}, language = {de}, year = {2019}, note = {Publication Title: https://www.ratswd.de}, file = {Übersicht sozial- und wirtschaftswissenschaftlicher Ethikkommissionen | RatSWD ‒ Rat für Sozial- und Wirtschaftsdaten:C\:\\Users\\carst\\Zotero\\storage\\SKGXMEX8\\kommissionen.html:text/html}, } @techreport{von_unger_ethikkommissionen_2016, address = {Berlin}, type = {Working {Paper}}, title = {Ethikkommissionen in den {Sozialwissenschaften} ‒ {Historische} {Entwicklungen} und internationale {Kontroversen}}, url = {https://www.ratswd.de/dl/RatSWD_WP_253.pdf}, language = {de}, number = {253}, institution = {RatSWD}, author = {von Unger, Hella and Simon, Dagmar and RatSWD, Vorsitzender des}, year = {2016}, doi = {10.17620/02671.21}, pages = {17}, file = {von Unger und Simon - 2016 - Ethikkommissionen in den Sozialwissenschaften ‒ Hi.pdf:C\:\\Users\\carst\\Zotero\\storage\\X8Q5DBZN\\von Unger und Simon - 2016 - Ethikkommissionen in den Sozialwissenschaften ‒ Hi.pdf:application/pdf}, } @techreport{noauthor_stellungnahme_2015, type = {Stellungnahme}, title = {Stellungnahme des {RatSWD} zur {Archivierung} und {Sekundärnutzung} von {Daten} der qualitativen {Sozialforschung}}, url = {https://www.ratswd.de/dl/RatSWD_Stellungnahme_QualiDaten.pdf}, abstract = {Der RatSWD will optimale Bedingungen für die Genese von und den Zugang der Wissenschaft zu Forschungsdaten herstellen. Er befürwortet grundsätzlich die Archivierung und die Bereitstellung qualitativer Daten für Sekundäranalysen. Der RatSWD erkennt an, dass die Frage der Möglichkeit, der Angemessenheit und des wissenschaftlichen Nutzens von Sekundäranalysen nach Materialart und For-schungsmethoden differenziert zu beantworten ist. Er spricht sich dafür aus, auch im Bereich der qualitativen Sozialforschung grundsätzlich eine Kultur der Daten-bereitstellung zu fördern. Hier sind Forschende typischerweise aktiv in die Pro-duktion von Datenmaterial eingebunden, so dass die Frage der Datenweitergabe die Frage des geistigen Eigentums berührt. Es gilt, Archivierungsverfahren bereit zu stellen, die versprechen, den Wert des Datenmaterials zu erhalten, ohne den Datenschutz zu gefährden. Regelungen zur Archivierung wie zur Sekundärnut-zung dürfen den Feldzugang für Primärforschende nicht einschränken. Der RatSWD befürwortet die Archivierung qualitativer Daten und ihre Bereitstellung fürSekundäranalysen soweit dies ohne nachvollziehbare Gefährdung der pri-mären Forschungsziele möglich ist. Bei Drittmittelprojekten soll die Vorlage eines Datenmanagementplans bei der Beantragung obligatorisch gemacht werden. Die Entscheidung über die Eignung von Daten für eine Sekundärnutzung darf keinen Einfluss auf die Genehmigung beantragter Projekte haben.}, institution = {RatSWD}, year = {2015}, pages = {8}, } @techreport{kamper_risiken_2016, address = {Berlin}, type = {Working {Paper}}, title = {Risiken sozialwissenschaftlicher {Forschung}? {Forschungsethik}, {Datenschutz} und {Schutz} von {Persönlichkeitsrechten} in den {Sozial}- und {Verhaltenswissenschaften}}, url = {https://www.ratswd.de/dl/RatSWD_WP_255.pdf}, language = {de}, number = {255}, institution = {RatSWD}, author = {Kämper, Eckard and RatSWD, Vorsitzender des}, year = {2016}, doi = {10.17620/02671.21}, pages = {8}, file = {Kämper - 2016 - Risiken sozialwissenschaftlicher Forschung Forsch.pdf:C\:\\Users\\carst\\Zotero\\storage\\6IVBZGB8\\Kämper - 2016 - Risiken sozialwissenschaftlicher Forschung Forsch.pdf:application/pdf}, } @techreport{wagner_anmerkungen_2017, address = {Berlin}, type = {Working {Paper}}, title = {Anmerkungen zu den vielfältigen {Dimensionen} einer {Forschungsethik} in den {Sozial}-, {Verhaltens}- und {Wirtschaftswissenschaften}}, url = {https://www.ratswd.de/dl/RatSWD_WP_265.pdf}, language = {de}, number = {265}, institution = {RatSWD}, author = {Wagner, Gert G. and RatSWD, Vorsitzender des}, year = {2017}, doi = {10.17620/02671.21}, pages = {7}, file = {Wagner - 2017 - Anmerkungen zu den vielfältigen Dimensionen einer .pdf:C\:\\Users\\carst\\Zotero\\storage\\3L4RAF4M\\Wagner - 2017 - Anmerkungen zu den vielfältigen Dimensionen einer .pdf:application/pdf}, } @book{wiley_best_2014, edition = {second Edition}, title = {Best {Practice} {Guidelines} on {Publishing} {Ethics}: {A} {Publisher}’s {Perspective}}, copyright = {© 2014 John Wiley \& Sons, Ltd. CC BY-NC 4.0}, url = {https://authorservices.wiley.com/asset/photos/Ethics_Guidelines_26.04.17.pdf}, language = {en}, author = {{Wiley}}, year = {2014}, file = {Wiley - 2014 - Best Practice Guidelines on Publishing Ethics A P.pdf:C\:\\Users\\carst\\Zotero\\storage\\L4KCNMWF\\Wiley - 2014 - Best Practice Guidelines on Publishing Ethics A P.pdf:application/pdf}, } @article{ostendorff_best-practices_2019, title = {Best-{Practices} im {Umgang} mit rechtlichen {Fragestellungen} zum {Forschungsdatenmanagement} ({FDM})}, volume = {53}, issn = {0006-1972, 2194-9646}, url = {https://www.degruyter.com/view/journals/bd/53/10-11/article-p717.xml}, doi = {10.1515/bd-2019-0098}, abstract = {Der Beitrag stellt die Ergebnisse mehrerer Interviews zur Organisation rechtlicher Aspekte des FDM dar. Die Interviews ergaben das Ergebnis, dass rechtliche Fragestellungen zum FDM wichtiger Bestandteil der Beratungsangebote sind. Als Organisationsstruktur bietet sich das Modell des First-Level-Supports an. Dadurch lässt sich die bibliothekarische mit der technischen und rechtlichen Expertise verbinden. Anders als in der bisherigen Praxis sollte die Hochschule jedoch mit klaren Arbeitsanweisungen oder Tätigkeitsbeschreibungen für die Beratungsleistungen ihrer Mitarbeiter einstehen.}, language = {de}, number = {10-11}, journal = {Bibliotheksdienst}, author = {Ostendorff, Philipp and Linke, David}, month = oct, year = {2019}, pages = {717--723}, file = {Snapshot:C\:\\Users\\carst\\Zotero\\storage\\H6CNXNZL\\article-p717.html:text/html}, } @book{kreutzer_rechtsfragen_2019, address = {Hamburg}, title = {Rechtsfragen bei {Open} {Science} ein {Leitfaden}}, copyright = {CC BY}, isbn = {978-3-943423-66-2}, url = {https://hup.sub.uni-hamburg.de/volltexte/2019/195/pdf/HamburgUP_KreutzerLahmann_Rechtsfragen.pdf}, abstract = {Die Digitalisierung ermöglicht eine offene Wissenschaft (Open Science). Diese hat viele Aspekte, insbesondere den freien Zugang zu wissenschaftlichen Veröffentlichungen und Materialien (Open Access), transparente Begutachtungsverfahren (Open Peer Review) oder quelloffene Technologien (Open Source). Das Programm Hamburg Open Science will offene Wissenschaft am Forschungsstandort Hamburg unterstützen. Der in diesem Kontext entstandene Leitfaden soll das rechtliche Umfeld greifbar machen. Der Leitfaden erarbeitet die betroffenen Rechtsgebiete zunächst systematisch. Im zweiten Teil werden rechtliche Fragen zu Open Science beantwortet, die direkt aus den Universitäten und Bibliotheken kommen. So gelingt eine praxisnahe Aufbereitung und Vermittlung. Autoren des Leitfadens sind Dr. Till Kreutzer, Partner der Kanzlei iRights.Law, und deren wissenschaftlicher Mitarbeiter Henning Lahmann.}, language = {de}, publisher = {Hamburg University Press}, author = {Kreutzer, Till and Lahmann, Henning Christian}, year = {2019}, doi = {10.15460/HUP.195}, file = {Kreutzer und Lahmann - 2019 - Rechtsfragen bei Open Science ein Leitfaden.pdf:C\:\\Users\\carst\\Zotero\\storage\\3GEB9DKS\\Kreutzer und Lahmann - 2019 - Rechtsfragen bei Open Science ein Leitfaden.pdf:application/pdf}, } @article{winter_herausforderungen_2019, title = {Herausforderungen für die {Anonymisierung} von {Daten} – {Technische} {Defizite}, konzeptuelle {Lücken} und rechtliche {Fragen} bei der {Anonymisierung} von {Daten}}, volume = {9}, number = {11}, journal = {Zeitschrift für Datenschutz}, author = {Winter, Christian and Battis, Verena and Halvani, Oren}, year = {2019}, pages = {489--493}, } @incollection{watteler_datenschutz_2019, address = {Opladen, Berlin, Toronto}, title = {Datenschutz im {Forschungsdatenmanagement}}, booktitle = {Forschungsdatenmanagement sozialwissenschaftlicher {Umfragedaten}: {Grundlagen} und praktische {Lösungen} für den {Umgang} mit quantitativen {Forschungsdaten}}, publisher = {Barbara Budrich}, author = {Watteler, Oliver and Ebel, Thomas}, editor = {Jensen, Uwe and Weller, Katrin and Netscher, Sebastian}, year = {2019}, pages = {57--80}, } @article{noauthor_verwaltungsgericht_nodate, title = {Verwaltungsgericht {Freiburg}. 1983. {Urteil} vom 02.02.1983, {Az}. 1 {K} 153/81}, volume = {5}, number = {8}, journal = {Verwaltungsblätter für Baden-Württemberg}, pages = {286--289}, } @article{schwartmann_verantwortlichkeit_2020, title = {Die {Verantwortlichkeit} für die {Verarbeitung} von {Forschungsdaten} an {Hochschulen}}, issn = {2197-9197}, url = {http://ordnungderwissenschaft.de/wp-content/uploads/2020/03/02_Schwartmann.pdf}, number = {2}, journal = {Ordnung der Wissenschaft}, author = {Schwartmann, Rolf}, year = {2020}, pages = {77--84}, file = {Schwartmann - 2020 - Die Verantwortlichkeit für die Verarbeitung von Fo.pdf:C\:\\Users\\carst\\Zotero\\storage\\2A6NDRA3\\Schwartmann - 2020 - Die Verantwortlichkeit für die Verarbeitung von Fo.pdf:application/pdf}, } @article{rosnagel_datenschutz_2019, title = {Datenschutz in der {Forschung}: {Die} neuen {Datenschutzregelungen} in der {Forschungspraxis} von {Hochschulen}}, volume = {9}, number = {4}, journal = {Zeitschrift für Datenschutz}, author = {Roßnagel, Alexander}, year = {2019}, pages = {157--164}, } @book{wirtschaftsdaten_ratswd_handreichung_2017, address = {Berlin}, title = {Handreichung {Datenschutz}}, isbn = {10.17620/02671.6}, url = {https://www.ratswd.de/publikation/output-series/2118}, language = {de}, editor = {Wirtschaftsdaten (RatSWD), Rat Für Sozial-Und}, year = {2017}, doi = {10.17620/02671.6}, file = {Rat Für Sozial- Und Wirtschaftsdaten (RatSWD) - 2017 - Handreichung Datenschutz.pdf:C\:\\Users\\carst\\Zotero\\storage\\QALTY8FV\\Rat Für Sozial- Und Wirtschaftsdaten (RatSWD) - 2017 - Handreichung Datenschutz.pdf:application/pdf}, } @article{rosnagel_pseudonymisierung_2018, title = {Pseudonymisierung personenbezogener {Daten}: {Ein} zentrales {Instrument} im {Datenschutz} nach der {DS}-{GVO}}, volume = {8}, number = {6}, journal = {Zeitschrift für Datenschutz}, author = {Roßnagel, Alexander}, year = {2018}, pages = {243--247}, } @incollection{paal_datenschutz-grundverordnung_nodate, address = {München}, edition = {2. Auflage}, title = {Datenschutz-{Grundverordnung}, {Bundesdatenschutzgesetz}}, booktitle = {Beck’sche {Kompakt}-{Kommentare}}, publisher = {C.H. Beck}, author = {Paal, Boris and Pauly, Daniel A.}, } @book{noauthor_olg_nodate, title = {{OLG} {Dresden}. 2018. {Teilurteil} vom 21.08.2018, {Az}. 14 {U} 1570/16 (nicht rechtskräftig und unveröffentlicht)}, } @techreport{lauberronsberg_gutachtenzudenrechtlichenrahmenbedingungendesforschungsdatenmanagements_2018, title = {{GutachtenzudenrechtlichenRahmenbedingungendesForschungsdatenmanagements}}, copyright = {CC‐BY‐SA (4.0)}, url = {https://tu-dresden.de/gsw/jura/igetem/jfbimd13/ressourcen/dateien/publikationen/DataJus_Zusammenfassung_Gutachten_12-07-18.pdf?lang=de}, language = {de}, author = {Lauber‐Rönsberg, Anne and Krahn, Philipp and Baumann, PAul}, month = jul, year = {2018}, pages = {20}, file = {Lauber‐Rönsberg et al. - 2018 - GutachtenzudenrechtlichenRahmenbedingungendesForsc.pdf:C\:\\Users\\carst\\Zotero\\storage\\JDJ5ZSBS\\Lauber‐Rönsberg et al. - 2018 - GutachtenzudenrechtlichenRahmenbedingungendesForsc.pdf:application/pdf}, } @incollection{noauthor_lag_nodate, title = {{LAG} ({Landesarbeitsgericht}) {Mecklenburg}-{Vorpommern}. 2017. {Urteil} vom 04.04.2017, {Az}. 2 {Sa} 11/17}, booktitle = {{BeckRS} 2017}, } @article{hartmann_rechtsfragen_2019, title = {Rechtsfragen: {Institutioneller} {Rahmen} und {Handlungsoptionen} für universitäres {FDM}}, copyright = {Creative Commons Attribution 4.0 International, Open Access}, shorttitle = {Rechtsfragen}, url = {https://zenodo.org/record/2654306}, doi = {10.5281/ZENODO.2654306}, language = {de}, author = {Hartmann, Thomas}, month = apr, year = {2019}, file = {Hartmann - 2019 - Rechtsfragen Institutioneller Rahmen und Handlung.pdf:C\:\\Users\\carst\\Zotero\\storage\\9EQDR7M4\\Hartmann - 2019 - Rechtsfragen Institutioneller Rahmen und Handlung.pdf:application/pdf}, } @book{dreier_urheberrechtsgesetz_2018, address = {München}, edition = {6}, title = {Urheberrechtsgesetz: {Verwertungsgesellschaftengesetz}, {Kunsturhebergesetz} : {Kommentar}}, isbn = {978-3-406-71266-1}, shorttitle = {Urheberrechtsgesetz}, publisher = {C.H. Beck}, author = {Dreier, Thomas and Schulze, Gernot}, year = {2018}, } @book{forschungsgemeinschaft_vorschlage_1998, address = {Weinheim}, title = {Vorschläge zur {Sicherung} guter wissenschaftlicher {Praxis}: {Empfehlungen} der {Kommission} "{Selbstkontrolle} in der {Wissenschaft}": {Denkschrift} : recommendation of the {Commission} on {Professional} {Self} {Regulation} in {Science}}, isbn = {978-3-527-27212-9}, url = {https://www.dfg.de/download/pdf/dfg_im_profil/reden_stellungnahmen/download/empfehlung_wiss_praxis_1310.pdf}, language = {de en}, publisher = {Wiley-VCH}, editor = {Forschungsgemeinschaft, Deutsche}, year = {1998}, file = {Deutsche Forschungsgemeinschaft - 1998 - Vorschläge zur Sicherung guter wissenschaftlicher .pdf:C\:\\Users\\carst\\Zotero\\storage\\6EGL9GK3\\Deutsche Forschungsgemeinschaft - 1998 - Vorschläge zur Sicherung guter wissenschaftlicher .pdf:application/pdf}, } @incollection{klimpel_eigentum_2015, address = {Berlin, Boston}, title = {Eigentum an {Metadaten}? {Urheberrechtliche} {Aspekte} von {Bestandsinformationen} und ihre {Freigabe}}, url = {https://irights.info/wp-content/uploads/2016/01/Klimpel-2015-Eigentum-an-Metadaten.pdf}, language = {de}, booktitle = {Handbuch {Kulturportale}, {Online}-{Angebote} aus {Kultur} und {Wissenschaft}}, publisher = {De Gruyter}, author = {Klimpel, Paul}, editor = {Euler, Ellen and Hagedorn-Saupe, Monika and Meier, Gerald}, year = {2015}, pages = {57--64}, file = {Klimpel - 2015 - Eigentum an Metadaten Urheberrechtliche Aspekte v.pdf:C\:\\Users\\carst\\Zotero\\storage\\EUHP7RLT\\Klimpel - 2015 - Eigentum an Metadaten Urheberrechtliche Aspekte v.pdf:application/pdf}, } @incollection{gotting_urheberrecht_2017, address = {Heidelberg}, edition = {3. Auflage}, title = {Das {Urheberrecht} des wissenschaftlichen {Personals}}, isbn = {978-3-8114-4348-8}, booktitle = {Hochschulrecht – ein {Handbuch} für die {Praxis}}, publisher = {C.F. Müller}, author = {Götting, Horst-Peter and Leuze, Dieter}, editor = {Hartmer, Michael and Detmer, Hubert}, year = {2017}, pages = {778--830}, } @book{gierschmann_kommentar_2017, address = {Köln}, title = {Kommentar {Datenschutz}-{Grundverordnung}}, isbn = {978-3-8462-0639-3}, language = {de}, publisher = {Reguvis Fachmedien}, author = {Gierschmann, Sybille and Schlender, Katharina and Stentzel, Rainer and Veil, Winfried}, year = {2017}, } @book{kreutzer_open_2015, address = {Bonn}, title = {Open {Content}: ein {Praxisleitfaden} zur {Nutzung} von {Creative}-{Commons}-{Lizenzen}}, isbn = {978-3-940785-78-7}, shorttitle = {Open {Content}}, url = {https://irights.info/wp-content/uploads/2015/10/Open_Content_-_Ein_Praxisleitfaden_zur_Nutzung_von_Creative-Commons-Lizenzen.pdf}, language = {de}, publisher = {Deutsche UNESCO-Kommission e.V}, author = {Kreutzer, Till}, editor = {Unesco-Kommission, Deutsche and Nordrhein-Westfalen (hbz), Hochschulbibliothekszentrum des Landes and Deutschland, Wikimedia}, year = {2015}, file = {Kreutzer - 2015 - Open Content ein Praxisleitfaden zur Nutzung von .pdf:C\:\\Users\\carst\\Zotero\\storage\\ZQVFZVU8\\Kreutzer - 2015 - Open Content ein Praxisleitfaden zur Nutzung von .pdf:application/pdf}, } @article{hartmann_zur_nodate, title = {Zur urheberrechtlichen {Schutzfähigkeit} von {Forschungsdaten}}, volume = {1}, url = {http://hdl.handle.net/11858/00-001M-0000-0014-1208-E}, number = {4}, journal = {Zeitschrift zum Innovations- und Technikrecht}, author = {Hartmann, Thomas}, pages = {199--202}, } @article{golla_neue_nodate, title = {Das neue {Datenschutzrecht} und die {Hochschullehre}}, volume = {51}, doi = {10.1628/wissr‑2018‑0011}, number = {2}, journal = {Wissenschaftsrecht}, author = {Golla, Sebastiona and Matthé, Luisa}, pages = {206--223}, } @techreport{datenschutzkonferenz_beschluss_2019, type = {Beschluss}, title = {Beschluss der 97. {Konferenz} der unabhängigen {Datenschutzaufsichtsbehörden} des {Bundes} und der {Länder} zu {Auslegung} des {Begriffs} „bestimmte {Bereiche} wissenschaftlicher {Forschung}“ im {Erwägungsgrund} 33 der {DS}-{GVO}}, url = {https://www.datenschutzkonferenz-online.de/media/dskb/20190405_auslegung_bestimmte_bereiche_wiss_forschung.pdf}, language = {de}, author = {Datenschutzkonferenz, D. S. K.}, month = apr, year = {2019}, pages = {2}, file = {2019 - Beschluss der 97. Konferenz der unabhängigen Daten.pdf:C\:\\Users\\carst\\Zotero\\storage\\6F4U59RD\\2019 - Beschluss der 97. Konferenz der unabhängigen Daten.pdf:application/pdf}, } @article{noauthor_gedichttitelliste_nodate, title = {„{Gedichttitelliste} {III}.“ {Urteil} vom 13.8.2009, {Az}. {I} {ZR} 130/04}, volume = {63}, number = {11}, journal = {Neue juristische Wochenschrift}, pages = {778--779}, } @article{noauthor_grabungsmaterialien_nodate, title = {„{Grabungsmaterialien}.“ {Urteil} vom 27.09.1990, {Az}. {I} {ZR} 244/88}, volume = {93}, number = {7}, journal = {Gewerblicher Rechtsschutz und Urheberrecht}, pages = {523--529}, } @book{wolff_beckscher_2015, address = {München}, series = {Beck'scher {Online}-{Kommentar} {Beck}-online}, title = {Beck'scher {Online}-{Kommentar} {Datenschutzrecht}}, language = {de}, publisher = {Beck}, editor = {Wolff, Heinrich Amadeus}, year = {2015}, } @incollection{golla_datenschutz_2019, address = {München}, title = {Datenschutz in {Forschung} und {Hochschullehre}}, booktitle = {Handbuch {Europäisches} und deutsches {Datenschutzrech}}, publisher = {C.H. Beck}, author = {Golla, Sebastian}, editor = {Specht, Louisa and Mantz, Reto}, year = {2019}, pages = {646--671}, } @article{noauthor_verbraucherzentrale_2019, title = {„{Verbraucherzentrale} {Bundesverband} e.{V}./{Planet49} {GmbH}.“ {Urteil} vom 1.10.2019, {Az}. {C}-673/17}, volume = {72}, url = {http://curia.europa.eu/juris/document/document.jsf?text=&docid=218462&pageIndex=0&doclang=DE&mode=lst&dir=&occ=first&part=1&cid=5934059}, number = {47}, journal = {Neue juristische Wochenschrift}, year = {2019}, pages = {3433--3437}, } @article{noauthor_breyerdeutschland_2017, title = {„{Breyer}/{Deutschland}.“ {Urteil} vom 19.10.2016, {Az}. {C}-582/14}, volume = {7}, url = {http://curia.europa.eu/juris/document/document.jsf?text=&docid=184668&pageIndex=0&doclang=de&mode=lst&dir=&occ=first&part=1&cid=5934193}, number = {1}, journal = {Zeitschrift für Datenschutz}, year = {2017}, pages = {24--29}, } @article{noauthor_staatsexamensarbeit_nodate, title = {„{Staatsexamensarbeit}.“ {Urteil} vom 21.11.1980, {Az}. {I} {ZR} 106/78}, volume = {83}, number = {5}, journal = {Gewerblicher Rechtsschutz und Urheberrecht}, pages = {352--355}, } @techreport{hiemenz_empfehlungen_2018, address = {Berlin}, title = {Empfehlungen zur {Erstellung} institutioneller {Forschungsdaten}-{Policies}: {Das} {Forschungsdaten}-{Policy}-{Kit} als generischer {Baukasten} mit {Leitfragen} und {Textbausteinen} für {Hochschulen} in {Deutschland}}, copyright = {CC BY 4.0 International}, url = {http://dx.doi.org/10.14279/depositonce-7521}, abstract = {Eine Forschungsdaten-Policy (FD-Policy) gibt Forschenden einen Orientierungsrahmen im Forschungsdatenmanagement (FDM) und sorgt dafür, dass sie die Anforderungen der Forschungsförderer erfüllen können. Für die Einrichtung selbst bietet eine FD-Policy die Chance, Entwicklungspotenziale im Bereich FDM zu erkennen, indem sie im Zuge der FD-Policy-Erstellung den Ist-Zustand ihrer technischen Infrastruktur und Serviceleistungen eruieren und den Soll-Zustand herausarbeiten kann. Angesichts wachsender Datenmengen stellen Forschungsdaten eine Herausforderung dar und bedürfen der permanenten Hinterfragung und Selbst-Vergewisserung durch die Hochschulen, wie sie mit ihren Forschungsdaten umgehen wollen. Zur dauerhaften Verankerung der damit einhergehenden strategischen Fragestellungen in den Hochschulleitungen sind FD-Policies und ihre regelmäßige Evaluierung und Anpassung probate Mittel. Jede Hochschule hat im Bereich Forschungsdatenmanagement ihre individuellen Rahmenbedingungen, die bei der Erstellung ihrer Forschungsdaten-Policy zu berücksichtigen sind. Als Handlungsanleitung und zur Unterstützung der Hochschulen hat die Technische Universität Berlin mit dem hier vorliegenden Forschungsdaten-Policy-Kit Empfehlungen in Form eines generischen Baukastensystems erstellt, das sowohl den allgemeinen als auch den individuellen Bedingungen Rechnung trägt. Das Forschungsdaten-Policy-Kit gliedert sich in drei Teile: Im ersten Abschnitt sind in den Vorüberlegungen grundlegende Fragen formuliert, mit deren Klärung vor der eigentlichen Erstellung der Forschungsdaten-Policy, begonnen werden sollte. Das Forschungsdaten-Policy-Schema mit der Übersicht über die inhaltlichen Bestandteile im zweiten Abschnitt schafft Klarheit über die Kernbereiche einer Forschungsdaten-Policy und dient als Vorlage zur Strukturierung der Inhalte. Die Leitfragen und Muster-Textbausteine im dritten Abschnitt sind das Kernstück des Forschungsdaten-Policy Kit. Anhand der Leitfragen kann jede Einrichtung die für sie relevanten Inhalte herausfiltern und die Forschungsdaten-Policy ihren spezifischen Gegebenheiten und dem eigenen Forschungsverständnis entsprechend gestalten. Das Projekt Modalitäten und Entwicklung institutioneller Forschungsdaten-Policies der TU Berlin ist Teilprojekt des zweijährigen BMBF-Verbundvorhabens FDMentor. Als Ergänzung zum Forschungsdaten-Policy-Kit wird die TU Berlin Anfang 2019 einen strategischen Leitfaden veröffentlichen, der einen vertiefenden Einblick in die Abläufe, Prozesse und strategischen Überlegungen bei der Entwicklung einer institutionellen Forschungsdaten-Policy geben soll.}, language = {de}, institution = {Technische Universität Berlin}, author = {Hiemenz, Bea and Kuberek, Monika}, year = {2018}, doi = {10.14279/depositonce-7521}, pages = {30}, file = {Hiemenz und Kuberek - 2018 - Empfehlungen zur Erstellung institutioneller Forsc.pdf:C\:\\Users\\carst\\Zotero\\storage\\VF8GC5EC\\Hiemenz und Kuberek - 2018 - Empfehlungen zur Erstellung institutioneller Forsc.pdf:application/pdf}, } @incollection{pampel_data_2011, address = {Bad Honnef}, title = {"{Data} {Policies}" im {Spannungsfeld} zwischen {Empfehlung} und {Verpflichtung}}, isbn = {978-3-88347-283-6}, url = {urn:nbn:de:kobv:525-opus-2287}, abstract = {Unter Beachtung disziplinärer Anforderungen beginnen Akteure aus Wissenschaft, Wissenschaftsmanagement und Infrastruktureinrichtungen Aussagen zum Umgang mit Forschungsdaten zu tätigen. Je nach Akteur und Zielgruppe variieren diese Aussagen, die häufig unter dem Begriff Policy gefasst werden. Der Beitrag gibt einen Überblick über die Vielfalt der Policies und beschreibt die Herausforderungen bei der Umsetzung dieser empfehlenden oder verpflichtenden Aussagen.}, language = {de}, booktitle = {Handbuch {Forschungsdatenmanagement}}, publisher = {Bock u. Herchen}, author = {Pampel, Heinz and Bertelmann, Roland}, year = {2011}, pages = {49--62}, file = {Pampel und Bertelmann - 2011 - Data Policies im Spannungsfeld zwischen Empfehlu.pdf:C\:\\Users\\carst\\Zotero\\storage\\3UJ9UTBV\\Pampel und Bertelmann - 2011 - Data Policies im Spannungsfeld zwischen Empfehlu.pdf:application/pdf}, } @techreport{noauthor_h2020_2019, type = {Framework {Programme}}, title = {H2020 {Programme}: {AGA} –{Annotated} {Model} {Grant} {Agreement}}, url = {https://ec.europa.eu/research/participants/data/ref/h2020/grants_manual/amga/h2020-amga_en.pdf}, language = {en}, number = {Version 5.2}, institution = {European Commission}, month = jun, year = {2019}, pages = {846}, file = {2019 - H2020 Programme AGA –Annotated Model Grant Agreem.pdf:C\:\\Users\\carst\\Zotero\\storage\\3TC7VJC3\\2019 - H2020 Programme AGA –Annotated Model Grant Agreem.pdf:application/pdf}, } @inproceedings{albornoz_framing_2018, title = {Framing {Power}: {Tracing} {Key} {Discourses} in {Open} {Science} {Policies}}, shorttitle = {Framing {Power}}, url = {https://hal.archives-ouvertes.fr/hal-01816725v1}, doi = {10.4000/proceedings.elpub.2018.23}, abstract = {Given that “Open Science” is becoming a popular policy object around the world, this study sought to identify key narratives about Open Science in policy, and critically examine the extent to which they are sustaining or strengthening multi-layered domination and inequality schemes that pre-exist in scientific knowledge production. To do so, we conducted a content analysis of Open Science policies stemming from Europe, North America, Latin America, Asia and Africa to understand which narratives about Open Science policies are produced, reproduced and by whom; and in turn, whose interests may be neglected in this process. We found that Open Science policies, mostly stemming from Europe, frame “openness” as a vehicle to promote technological change as part of an inevitable and necessary cultural shift to modernity in scientific production. The global reach of these narratives, and the technologies, standards and models these narratives sustain, are dictating modes of working and collaborating among those who can access them, and creating new categories of exclusion that invalidate knowledge that cannot meet this criteria, putting historically marginalized researchers and publics at further disadvantage.}, booktitle = {22nd {International} {Conference} on {Electronic} {Publishing}}, publisher = {OpenEdition Press}, author = {Albornoz, Denisse and Huang, Maggie and Martin, Issra Marie and Mateus, Maria and Touré, Aicha Yasmine and Chan, Leslie}, month = jun, year = {2018}, file = {Albornoz et al. - 2018 - Framing Power Tracing Key Discourses in Open Scie.pdf:C\:\\Users\\carst\\Zotero\\storage\\F4YIFWTL\\Albornoz et al. - 2018 - Framing Power Tracing Key Discourses in Open Scie.pdf:application/pdf}, } @techreport{forschungsgemeinschaft_empfehlungen_2009, title = {Empfehlungen zur gesicherten {Aufbewahrung} und {Bereitstellung} digitaler {Forschungsprimärdaten}}, url = {https://www.dfg.de/download/pdf/foerderung/programme/lis/ua_inf_empfehlungen_200901.pdf}, language = {de-DE}, institution = {Deutsche Forschungsgemeinschaft: Ausschuss für Wissenschaftliche Bibliotheken und Informationssysteme: Unterausschuss für Informationsmanagement}, author = {Forschungsgemeinschaft, Deutsche}, year = {2009}, pages = {3}, file = {Deutsche Forschungsgemeinschaft - 2009 - Empfehlungen zur gesicherten Aufbewahrung und Bere.pdf:C\:\\Users\\carst\\Zotero\\storage\\4LSZ39QE\\Deutsche Forschungsgemeinschaft - 2009 - Empfehlungen zur gesicherten Aufbewahrung und Bere.pdf:application/pdf}, } @book{noauthor_policy_nodate, title = {Policy tools and guidance}, copyright = {© Digital Curation Centre 2004-2020}, url = {https://www.dcc.ac.uk/guidance/policy/policy-tools-and-guidance}, abstract = {This page lists tools that you can use to create RDM policies. It also gives examples of policies from overseas institutions.}, note = {Publication Title: https://www.dcc.ac.uk}, file = {Policy tools and guidance | DCC:C\:\\Users\\carst\\Zotero\\storage\\4A58JVWZ\\policy-tools-and-guidance.html:text/html}, } @book{digital_state_2018, title = {The {State} of {Open} {Data} {Report} 2018}, isbn = {978-1-9993177-0-6}, url = {https://digitalscience.figshare.com/articles/report/The_State_of_Open_Data_Report_2018/7195058}, language = {en}, author = {Digital, Science and Hahnel, Mark and Fane, Briony and Treadway, Jon and Baynes, Grace and Wilkinson, Ross and Mons, Barend and Schultes, Erik and Santos, Luiz Olavo Bonino da Silva and Arefiev, Pavel and Osipov, Igor}, month = nov, year = {2018}, doi = {10.6084/m9.figshare.7195058.v2}, file = {Science Digital et al. - 2018 - The State of Open Data Report 2018.pdf:C\:\\Users\\carst\\Zotero\\storage\\HQV7JMP7\\Science Digital et al. - 2018 - The State of Open Data Report 2018.pdf:application/pdf}, } @article{lasthiotakis_open_2015, title = {Open science strategies in research policies: {A} comparative exploration of {Canada}, the {US} and the {UK}:}, volume = {13}, copyright = {© The Author(s) 2015}, shorttitle = {Open science strategies in research policies}, url = {https://journals.sagepub.com/doi/10.1177/1478210315579983}, doi = {10.1177/1478210315579983}, abstract = {Several movements have emerged related to the general idea of promoting ‘openness’ in science. Research councils are key institutions in bringing about changes ...}, language = {en}, number = {8}, journal = {Policy Futures in Education}, author = {Lasthiotakis, Helen and Kretz, Andrew and Sá, Creso}, month = apr, year = {2015}, pages = {968--989}, file = {Lasthiotakis et al. - 2015 - Open science strategies in research policies A co.pdf:C\:\\Users\\carst\\Zotero\\storage\\4DTAX6AJ\\Lasthiotakis et al. - 2015 - Open science strategies in research policies A co.pdf:application/pdf}, } @book{oecd_oecd_2007, address = {Paris}, title = {{OECD} {Principles} and {Guidelines} for {Access} to {Research} {Data} from {Public} {Funding}}, copyright = {© OECD 2007}, url = {https://www.oecd-ilibrary.org/content/publication/9789264034020-en-fr}, author = {{OECD}}, year = {2007}, doi = {10.1787/9789264034020-en-fr}, file = {OECD - 2007 - OECD Principles and Guidelines for Access to Resea.pdf:C\:\\Users\\carst\\Zotero\\storage\\Q4B3H8DN\\OECD - 2007 - OECD Principles and Guidelines for Access to Resea.pdf:application/pdf}, } @article{walport_sharing_2011, title = {Sharing research data to improve public health}, volume = {377}, issn = {0140-6736, 1474-547X}, url = {https://www.thelancet.com/journals/lancet/article/PIIS0140-6736(10)62234-9/abstract}, doi = {10.1016/S0140-6736(10)62234-9}, abstract = {The purpose of medical research is to analyse and understand health and disease. A key and expensive element is the study of populations to explore how interactions between behaviour and environment, in the context of genetic diversity, determine causation and variation in health and disease. As funders of public health research, we need to ensure that research outputs are used to maximise knowledge and potential health benefits. In turn, the populations who participate in research, and the taxpayers who foot the bill, have the right to expect that every last ounce of knowledge will be wrung from the research.}, language = {English}, number = {9765}, journal = {The Lancet}, author = {Walport, Mark and Brest, Paul}, month = feb, year = {2011}, pages = {537--539}, file = {Snapshot:C\:\\Users\\carst\\Zotero\\storage\\CE88Y24Q\\fulltext.html:text/html}, } @article{zuiderwijk_open_2014, title = {Open data policies, their implementation and impact: {A} framework for comparison}, volume = {31}, issn = {0740-624X}, shorttitle = {Open data policies, their implementation and impact}, url = {http://www.sciencedirect.com/science/article/pii/S0740624X13001202}, doi = {10.1016/j.giq.2013.04.003}, abstract = {In developing open data policies, governments aim to stimulate and guide the publication of government data and to gain advantages from its use. Currently there is a multiplicity of open data policies at various levels of government, whereas very little systematic and structured research has been done on the issues that are covered by open data policies, their intent and actual impact. Furthermore, no suitable framework for comparing open data policies is available, as open data is a recent phenomenon and is thus in an early stage of development. In order to help bring about a better understanding of the common and differentiating elements in the policies and to identify the factors affecting the variation in policies, this paper develops a framework for comparing open data policies. The framework includes the factors of environment and context, policy content, performance indicators and public values. Using this framework, seven Dutch governmental policies at different government levels are compared. The comparison shows both similarities and differences among open data policies, providing opportunities to learn from each other's policies. The findings suggest that current policies are rather inward looking, open data policies can be improved by collaborating with other organizations, focusing on the impact of the policy, stimulating the use of open data and looking at the need to create a culture in which publicizing data is incorporated in daily working processes. The findings could contribute to the development of new open data policies and the improvement of existing open data policies.}, language = {en}, number = {1}, journal = {Government Information Quarterly}, author = {Zuiderwijk, Anneke and Janssen, Marijn}, month = jan, year = {2014}, pages = {17--29}, file = {Zuiderwijk und Janssen - 2014 - Open data policies, their implementation and impac.pdf:C\:\\Users\\carst\\Zotero\\storage\\DB3BGXHQ\\Zuiderwijk und Janssen - 2014 - Open data policies, their implementation and impac.pdf:application/pdf}, } @article{tsoukala_recode_2016, title = {{RECODE}: {Policy} recommendations for open access to research data}, shorttitle = {{RECODE}}, url = {https://zenodo.org/record/50863#.X2NQR4vgpoU}, doi = {10.5281/zenodo.50863}, abstract = {These policy recommendations are a deliverable from the EC FP7 funded RECODE project (2012-2014), which aimed at assisting stakeholders within the scholarly communications ecosystem, e.g., policy makers, research funders, data managers, and publishers, to implement open access to research data. The recommendations, which are a product of research, analysis and stakeholder consultations, provide both over-arching and stakeholder specific guidance. While a consensus is observed amongst many policy makers on the benefits of open access for science, industry and civil society, there are still important barriers that need to be overcome. RECODE identified in particular two overarching issues in the mobilization of open access to research data: a lack of coherent open data ecosystem and a lack of attention to the specificity of research practices, processes and form of data collections.}, language = {en}, author = {Tsoukala, Victoria and Angelaki, Marina and Kalaitzi, Vasso and Wessels, Bridgette and Price, Lada and Taylor, Mark J. and Smallwood, Rod and Linde, Peter and Sondervan, Jeroen and Reilly, Susan and Noorman, Merel and Wyatt, Sally and Bigagli, Lorenzo and Finn, Rachel and Sveinsdottir, Thordis and Wadhwa, Kush}, month = apr, year = {2016}, pages = {40}, file = {Tsoukala et al. - 2016 - RECODE Policy recommendations for open access to .pdf:C\:\\Users\\carst\\Zotero\\storage\\7GZ4XENM\\Tsoukala et al. - 2016 - RECODE Policy recommendations for open access to .pdf:application/pdf}, } @book{biernacka_wie_2019, title = {Wie {FAIR} sind {Deine} {Forschungsdaten}?}, url = {https://zenodo.org/record/2547339#.X2eBtYvgq-4}, abstract = {Informationsmaterial zu den "FAIR Data Principles".}, language = {deu}, author = {Biernacka, Katarzyna and Dolzycka, Dominika and Buchholz, Petra and Helbig, Kerstin}, month = jan, year = {2019}, doi = {10.5281/zenodo.2547339}, file = {Biernacka et al. - 2019 - Wie FAIR sind Deine Forschungsdaten.pdf:C\:\\Users\\carst\\Zotero\\storage\\RKSHTSA4\\Biernacka et al. - 2019 - Wie FAIR sind Deine Forschungsdaten.pdf:application/pdf}, } @book{kunkel_stakeholder-dialoge_2019, address = {Wiesbaden}, edition = {2., akt. Auflage 2019}, title = {Stakeholder-{Dialoge} erfolgreich gestalten: {Kernkompetenzen} für erfolgreiche {Konsultations}- und {Kooperationsprozesse}}, isbn = {978-3-658-26971-5 3-658-26971-5}, language = {de}, publisher = {Springer Fachmedien Wiesbaden GmbH Springer Gabler}, author = {Künkel, Petra and Gerlach, Silvine and Frieg, Vera}, year = {2019}, } @book{krips_stakeholdermanagement_2017, address = {Berlin}, edition = {2., neu bearbeitete Auflage}, series = {Edition {DVP}}, title = {Stakeholdermanagement}, isbn = {978-3-662-55633-7 3-662-55633-2}, language = {de}, number = {Heft 5}, publisher = {Springer Vieweg}, author = {Krips, David}, year = {2017}, } @book{corti_managing_2014, address = {Los Angeles London New Delhi}, title = {Managing and sharing research data: a guide to good practice}, isbn = {978-1-4462-6726-4 978-1-4462-6725-7}, shorttitle = {Managing and sharing research data}, language = {en}, publisher = {SAGE}, author = {Corti, Louise and Van den Eynden, Veerle and Bishop, Libby and Woollard, Matthew}, year = {2014}, } @techreport{duden_konzept_2017, address = {Braunschweig}, type = {Konzept}, title = {Konzept {Forschungsdatenmanagement}}, url = {https://www.ptb.de/cms/fileadmin/internet/forschung_entwicklung/digitalisierung/Konzept_Forschungsdatenmanagement_mit_PTB-Cover.pdf}, language = {de}, institution = {Physikalisch-Technische Bundesanstalt}, author = {Duden, Tobias}, year = {2017}, pages = {145}, file = {_.pdf:C\:\\Users\\carst\\Zotero\\storage\\I9CWKXDR\\_.pdf:application/pdf}, } @book{molloy_research_nodate, title = {Research data management ({RDM}) open training materials}, url = {https://zenodo.org/communities/dcc-rdm-training-materials/}, abstract = {Research data management (RDM) open training materials Openly accessible online training materials which can be shared and repurposed for RDM training. All contributions in any language are welcome.}, language = {en}, author = {Molloy, Laura}, note = {Publication Title: https://zenodo.org}, } @book{noauthor_uag_2020, title = {{UAG} {Schulungen}/{Fortbildungen}}, copyright = {CC BY}, url = {https://www.forschungsdaten.org/index.php/UAG_Schulungen/Fortbildungen}, language = {de}, month = jun, year = {2020}, } @book{locke_theory_1990, address = {Englewood Cliffs, N.J.}, title = {A theory of goal setting and task performance}, isbn = {978-0-13-913138-7}, language = {en}, publisher = {Prentice Hall}, author = {Locke, Edwin A. and Latham, Gary P. and Smith, Ken J.}, year = {1990}, file = {Table of Contents PDF:C\:\\Users\\carst\\Zotero\\storage\\4AXDX7V3\\Locke et al. - 1990 - A theory of goal setting & task performance.pdf:application/pdf}, } @book{research_data_alliance_education_nodate, title = {Education and {Training} on handling of research data {IG}}, url = {https://www.rd-alliance.org/groups/education-and-training-handling-research-data.html}, abstract = {The context of increasing volumes of data being created by researchers and the strengthening of requirements for research data management and data sharing has created demand for a new and evolving set of competencies and skills for researchers who create and use the data, and the growing cadre of professionals who support them.}, language = {en}, author = {{Research Data Alliance}}, note = {Publication Title: RDA}, file = {Snapshot:C\:\\Users\\carst\\Zotero\\storage\\HG8PEHRR\\education-and-training-handling-research-data.html:text/html}, } @book{noauthor_research_2019, title = {Research {Data} {Lifecycle}}, url = {https://www.youtube.com/watch?v=-wjFMMQD3UA&feature=youtu.be}, abstract = {Data often have a longer lifespan than the research project that creates them. Researchers may continue to work on data after funding has ceased, follow-up projects may analyse or add to the data, and data may be re-used by other researchers. Well organised, well documented, preserved and shared data are invaluable to advance scientific inquiry and to increase opportunities for learning and innovation.}, language = {en}, month = aug, year = {2019}, } @book{noauthor_umfragen_2019, title = {Umfragen zum {Umgang} mit {Forschungsdaten} an wissenschaftlichen {Institutionen}}, copyright = {CC BY}, url = {https://www.forschungsdaten.org/index.php/Umfragen_zum_Umgang_mit_Forschungsdaten_an_wissenschaftlichen_Institutionen}, language = {de}, month = mar, year = {2019}, note = {Publication Title: https://www.forschungsdaten.org}, } @techreport{forschungsgemeinschaft_guidance_nodate, address = {Bonn}, type = {Guidance {Notes}}, title = {Guidance {Notes} on {Funding} {Criteria} {National} {Research} {Data} {Infrastructure} ({NFDI})}, url = {https://www.dfg.de/formulare/nfdi120/nfdi120_en.pdf}, language = {en}, number = {DFG form nfdi120 -05/20}, institution = {Deutsche Forschungsgemeinschaft}, author = {Forschungsgemeinschaft, Deutsche}, pages = {3}, file = {Guidance Notes on Funding Criteria National Resear.pdf:C\:\\Users\\carst\\Zotero\\storage\\5ZRCJYVI\\Guidance Notes on Funding Criteria National Resear.pdf:application/pdf}, } @article{bug_operative_2018, title = {Operative und strategische {Elemente} einer leistungsfähigen {Forschungsdateninfrastruktur} in den {Sozial}- und {Wirtschaftswissenschaften}}, volume = {238}, issn = {0021-4027, 2366-049X}, url = {https://www.degruyter.com/view/journals/jbnst/238/6/article-p571.xml}, doi = {10.1515/jbnst-2018-0029}, language = {en}, number = {6}, journal = {Jahrbücher für Nationalökonomie und Statistik}, author = {Bug, Mathias and Liebig, Stefan and Oellers, Claudia and Riphahn, Regina T.}, month = oct, year = {2018}, pages = {571--590}, file = {Bug et al. - 2018 - Operative und strategische Elemente einer leistung.pdf:C\:\\Users\\carst\\Zotero\\storage\\HMNRBNNM\\Bug et al. - 2018 - Operative und strategische Elemente einer leistung.pdf:application/pdf}, } @inproceedings{dros_kuratierung_2019, title = {Die {Kuratierung} sozialwissenschaftlicher {Forschungsdaten} - {Praxisfragen} und {Beispiellösungen}}, isbn = {978-3-95806-405-8}, url = {https://www.econstor.eu/handle/10419/201387}, abstract = {Der Beitrag bietet einen Blick in die Praxis der Veröffentlichung sozialwissenschaftlicher Forschungsdaten. Dabei beschreibt er den Arbeitsschritt der Datenkuratierung als wesentliche Komponente, um Forschungsdaten nachhaltig verfügbar zu machen. Anhand der Erfahrungen des institutionellen Forschungsdatenmanagements am Wissenschaftszentrum Berlin für Sozialforschung (WZB) werden einzelne Problemfelder und Praxislösungen dargestellt, die im Laufe des Kuratierungsprozesses auftreten. Dabei sich zeigt, dass die Datenkuratierung ein komplexer Vorgang ist, bei dem sich fachspezifische, aber auch (arbeits-)organisatorische Herausforderungen stellen. Diese werden anhand von drei exemplarischen Themenkomplexen skizziert: Datenschutzfragen müssen im Forschungsprozess frühzeitig mitgedacht werden. Dazu gehört, dass in informierten Einwilligungen die Rechte der Befragten geschützt, gleichzeitig aber spätere Nachnutzungsmöglichkeiten nicht durch zu restriktive Formulierungen ausgeschlossen werden. In Bezug auf die Verwertungs- und Nutzungsrechte muss gerade in Projektkonstellationen mit mehreren Beteiligten frühzeitig eine Abstimmung über die Datenveröffentlichung erfolgen. Schließlich verdeutlicht die Verknüpfung von Daten- und Textpublikation, wie sich Veröffentlichungsroutinen und Suchgewohnheiten der Wissenschaftler*innen auf die Auffindbarkeit der Forschungsdaten auswirken.}, language = {de}, booktitle = {Forschungsdaten - sammeln, sichern, strukturieren}, publisher = {Jülich: Forschungszentrum Jülich Zentralbibliothek}, author = {Droß, Patrick J. and Naujoks, Julian}, year = {2019}, pages = {23--38}, file = {Droß und Naujoks - 2019 - Die Kuratierung sozialwissenschaftlicher Forschung.pdf:C\:\\Users\\carst\\Zotero\\storage\\UY66D9FN\\Droß und Naujoks - 2019 - Die Kuratierung sozialwissenschaftlicher Forschung.pdf:application/pdf}, } @techreport{noauthor_european_2017, address = {Brussels}, type = {Declaration}, title = {European {Open} {Science} {Cloud}: {New} {Research} \& {Innovation} {Opportunities}}, url = {https://ec.europa.eu/research/openscience/pdf/eosc_declaration.pdf#view=fit&pagemode=none}, abstract = {The EOSC Declaration and its principles, guiding the implementation of the EOSC, are the tangible result of the EOSC Summit of 12 June 2017. They have been endorsed by the undersigning stakeholders, found in the List of Signatories, who also committed to specific actions to implement it (Action List). As such, the Declaration does not commit the European Commission and Union institutions.}, language = {en}, month = oct, year = {2017}, pages = {5}, file = {2017 - European Open Science Cloud New Research & Innova.pdf:C\:\\Users\\carst\\Zotero\\storage\\3ED3FR8U\\2017 - European Open Science Cloud New Research & Innova.pdf:application/pdf}, } @article{glaeser_measuring_2000, title = {Measuring {Trust}}, volume = {115}, issn = {0033-5533}, url = {https://academic.oup.com/qje/article/115/3/811/1828161}, doi = {10.1162/003355300554926}, abstract = {We combine two experiments and a survey to measure trust and trustworthiness—two key components of social capital. Standard attitudinal survey questions about trust predict trustworthy behavior in our experiments much better than they predict trusting behavior. Trusting behavior in the experiments is predicted by past trusting behavior outside of the experiments. When individuals are closer socially, both trust and trustworthiness rise. Trustworthiness declines when partners are of different races or nationalities. High status individuals are able to elicit more trustworthiness in others.}, language = {en}, number = {3}, journal = {The Quarterly Journal of Economics}, author = {Glaeser, Edward L. and Laibson, David I. and Scheinkman, José A. and Soutter, Christine L.}, year = {2000}, pages = {811--846}, file = {Glaeser et al. - 2000 - Measuring Trust.pdf:C\:\\Users\\carst\\Zotero\\storage\\8L6UDHBP\\Glaeser et al. - 2000 - Measuring Trust.pdf:application/pdf}, } @article{linne_strengthening_2017, title = {Strengthening institutional data management and promoting data sharing in the social and economic sciences}, volume = {27}, issn = {2213-056X}, url = {http://www.liberquarterly.eu/article/10.18352/lq.10195/}, doi = {10.18352/lq.10195}, abstract = {In the German social and economic sciences there is a growing awareness of flexible data distribution and research data reuse, especially as increasing numbers of research funders recommend publishing research data as the basis for scientific insight. However, a data-sharing mentality has not yet been established in Germany attributable to researchers’ strong reservations about publishing their data. This attitude is exacerbated by the fact that, at present, there is no trusted national data sharing repository that covers the particular requirements of institutions regarding research data. This article discusses how this objective can be achieved with the project initiative SowiDataNet. The development of a community-driven data repository is a logically consistent and important step towards an attitude shift concerning data sharing in the social and economic sciences}, language = {en}, number = {1}, journal = {LIBER Quarterly}, author = {Linne, Monika and Zenk-Möltgen, Wolfgang}, month = apr, year = {2017}, pages = {58--72}, file = {Snapshot:C\:\\Users\\carst\\Zotero\\storage\\ZF4U6KSR\\lq.10195.html:text/html}, } @book{noauthor_open_nodate, title = {Open {Science} {Monitor}: {Facts} and {Figures} for open research data}, url = {https://ec.europa.eu/info/research-and-innovation/strategy/goals-research-and-innovation-policy/open-science/open-science-monitor/facts-and-figures-open-research-data_en}, abstract = {Figures and case studies related to accessing and reusing the data produced in the course of scientific production.}, language = {en}, note = {Publication Title: European Commission - European Commission}, file = {Snapshot:C\:\\Users\\carst\\Zotero\\storage\\4AGJHYWQ\\facts-and-figures-open-research-data_en.html:text/html}, } @article{popkin_data_2019, title = {Data sharing and how it can benefit your scientific career}, volume = {569}, copyright = {2020 Nature}, url = {https://www.nature.com/articles/d41586-019-01506-x}, doi = {10.1038/d41586-019-01506-x}, abstract = {Open science can lead to greater collaboration, increased confidence in findings and goodwill between researchers.}, language = {en}, number = {7756}, journal = {Nature}, author = {Popkin, Gabriel}, month = may, year = {2019}, pages = {445--447}, file = {Volltext:C\:\\Users\\carst\\Zotero\\storage\\PY74U7CA\\Popkin - 2019 - Data sharing and how it can benefit your scientifi.pdf:application/pdf}, } @book{noauthor_fair_2019, title = {{FAIR} {Working} {Group}}, url = {https://www.eoscsecretariat.eu/working-groups/fair-working-group}, language = {en}, month = nov, year = {2019}, note = {Publication Title: EOSC Secretariat}, file = {FAIR Working Group | EOSCSecretariat:C\:\\Users\\carst\\Zotero\\storage\\344AQFPR\\fair-working-group.html:text/html}, } @inproceedings{linne_sustainable_2013, title = {Sustainable {Data} {Preservation} using datorium – facilitating the {Scientific} {Ideal} of {Data} {Sharing} in the {Social} {Sciences}}, copyright = {CC BY-SA 2.0 AT}, url = {https://phaidra.univie.ac.at/detail/o:378062#?sortdef=title%20asc&page=6&pagesize=10&collection=o:378098}, abstract = {This paper introduces datorium - a digital data preservation project at the Data Archive of GESIS-Leibniz Institute for the Social Sciences. datorium is a new data repository service for the research community. It functions as a web-based data sharing repository providing a user-friendly tool for researchers making data accessible for the purpose of re-use by other scholars. Sharing, managing, documenting and publishing data, structured metadata and publications will be carried out autonomously by researchers. Data and related information will be available free of charge. All uploaded research data and documentation will be peer-reviewed and digitally preserved by the GESIS Data Archive. GESIS promotes data sharing as a scholarly ideal and facilitates cooperation between researchers. By developing datorium the Data Archive aims to collect and provide research data with a wide thematic scope for academic re-use. A further intention is to ensure long-term preservation of archived data and metadata as well as providing wide-ranging dissemination possibilities for scholars in order to increase the visibility and availability of their research projects. By providing access to their research data scholars can support new research or secondary analysis and beyond that they profit from increased citations of their work, thereby improving their professional reputation.}, language = {en}, booktitle = {Proceedings of the 10th {International} {Conference} on {Preservation} of {Digital} {Objects}}, author = {Linne, Monika}, year = {2013}, pages = {150--155}, file = {Linne - 2013 - Sustainable Data Preservation using datorium – fac.pdf:C\:\\Users\\carst\\Zotero\\storage\\ZE88VUCX\\Linne - 2013 - Sustainable Data Preservation using datorium – fac.pdf:application/pdf}, } @techreport{rehwald_von_2019, address = {Duisburg-Essen}, title = {Von {USB}-{Sticks} und {Repositorien} -{Ergebnisse} der {UNEKE}-{Umfrage}}, url = {https://www.forschungsdaten.org/images/e/e5/01-uneke–usb-sticks-und-repositorien.pdf}, language = {de}, author = {Rehwald, Stephanie and {Bela Brenger}}, month = jan, year = {2019}, pages = {[36]}, file = {Rehwald und Bela Brenger - 2019 - Von USB-Sticks und Repositorien -Ergebnisse der UN.pdf:C\:\\Users\\carst\\Zotero\\storage\\N4EGETWY\\Rehwald und Bela Brenger - 2019 - Von USB-Sticks und Repositorien -Ergebnisse der UN.pdf:application/pdf}, } @book{rat_fur_sozial-_und_wirtschaftsdaten_ratswd_remote_2019, address = {Berlin}, series = {{RatSWD} {Output} {Series}}, title = {Remote {Access} zu {Daten} der amtlichen {Statistik} und der {Sozialversicherungsträger}}, isbn = {10.17620/02671.42}, url = {https://www.ratswd.de/publikation/output-series/2842}, abstract = {6Daten der amtlichen Statistik des Bundes und der Länder und der Sozialversicherungsträger stellen eine wichtige Quelle für die empirische Sozial- und Wirtschaftsforschung dar . Ein großer Teil des Mikrodatenbestandes kann bislang jedoch nur ortsgebunden an den Gastwissenschaftsarbeits-plätzen (GWAP) der Forschungsdatenzentren (FDZ) der Datenproduzenten oder über die kontrollierte Datenfernverarbeitung (KDFV) genutzt werden .Wenngleich die Tatsache, dass die Daten in einen transparenten und strukturierten Verfahren für die Forschung zugänglich sind, im Vergleich zu der Situation vor 16 Jahren einen erheblichen Fortschritt darstellt, sind diese Arten des Datenzugangs für die Forschung mit hohem Aufwand verbunden und schränken den Forschungsprozesses zeitlich und organisatorisch deutlich ein . Vor diesem Hinter-grund ist es wichtig, über eine Modernisierung des Datenzugangs in Deutschland nachzudenken, mit dem Ziel, den Datenzugang sowohl aus der Perspektive der Forschung als auch der Datenproduzenten effizienter zu gestalten . Vorbilder können die statistischen Ämter der nordischen Länder oder auch der Niederlande sein, die schon seit geraumer Zeit die rechtlichen und technischen Voraussetzungen geschaffen haben, um ihre Daten für die Forschenden im eigenen Land an deren Arbeitsplatz via Remote Access in Form eines Remote Desktop Verfahren zugänglich zu machen . Mit dem vorliegenden Papier werden verschiedene Vorgehensweisen dargestellt und diesbezüg-liche Empfehlungen formuliert: In Deutschland sollte ein Remote Desktop Verfahren zu Daten der amtlichen Statistik und der Sozialversicherungsträger über Pilotprojekte etabliert werden. Die statistischen Ämter des Bundes und der Länder, die Sozialversicherungsträger, Wissenschaft und Datenschutz sollten in diesen Pilotprojekten eng kooperieren. In Hinblick auf den Remote Access zu Daten der amtlichen Statistik empfiehlt der Rat eine Änderung des §16 Abs. 6 im Bundesstatistik-gesetz dahingehend, dass ein Remote Desktop Zugang zu formal anonymisierten Daten der amtlichen Statistik möglich wird. Für die rechtssichere und hochverfügbare Implementierung der Pilotprojekte, die damit wachsende Attraktivität des Forschungsstandortes Deutschland wünscht sich der RatSWD Unterstützung seitens der forschungsfördernden Institutionen.}, language = {de}, editor = {{Rat Für Sozial- Und Wirtschaftsdaten (RatSWD)}}, year = {2019}, doi = {10.17620/02671.42}, file = {Rat Für Sozial- Und Wirtschaftsdaten (RatSWD) - 2019 - Remote Access zu Daten der amtlichen Statistik und.pdf:C\:\\Users\\carst\\Zotero\\storage\\7SCIGULS\\Rat Für Sozial- Und Wirtschaftsdaten (RatSWD) - 2019 - Remote Access zu Daten der amtlichen Statistik und.pdf:application/pdf}, } @book{rat_fur_sozial-_und_wirtschaftsdaten_ratswd_sozial-_2019, address = {Berlin}, series = {{RatSWD} {Output} {Series}}, title = {Sozial-, {Verhaltens}- und {Wirtschaftswissenschaften} in {Roadmap}-{Prozessen}}, isbn = {10.17620/02671.38}, url = {https://www.ratswd.de/publikation/output-series/2811}, language = {de}, author = {{Rat Für Sozial- Und Wirtschaftsdaten (RatSWD)}}, year = {2019}, doi = {10.17620/02671.38}, file = {Rat Für Sozial- Und Wirtschaftsdaten (RatSWD) - 2019 - Sozial-, Verhaltens- und Wirtschaftswissenschaften.pdf:C\:\\Users\\carst\\Zotero\\storage\\Z9C3YBEZ\\Rat Für Sozial- Und Wirtschaftsdaten (RatSWD) - 2019 - Sozial-, Verhaltens- und Wirtschaftswissenschaften.pdf:application/pdf}, } @book{vocile_open_nodate, title = {Open {Science} {Trends} {You} {Need} to {Know} {About}}, url = {https://www.wiley.com/network/researchers/licensing-and-open-access/open-science-trends-you-need-to-know-about}, abstract = {One topic of great interest across academia is the evolution of researcher perceptions of open access publishing and data sharing. In September, this was the focus of the latest in Wiley’s annual surveys of the research community. The 2016 Wiley Open Science Researcher Survey* builds upon our previous surveys on open access and open data to discover trends in research. Despite geographical and subject-level differences among authors, there are underlying commonalities in open science practices. The insights reported by our respondents show a willingness to move forward with open initiatives, but confusion around the best ways to do so.}, language = {en}, author = {Vocile, Bobby}, note = {Publication Title: https://www.wiley.com}, file = {Open Science Trends You Need to Know About:C\:\\Users\\carst\\Zotero\\storage\\Z7IQ6SPJ\\open-science-trends-you-need-to-know-about.html:text/html}, } @book{noauthor_go_nodate, title = {Die {GO} {FAIR} {Initiative}}, url = {https://www.zbw.eu/de/ueber-uns/arbeitsschwerpunkte/forschungsdatenmanagement/go-fair}, note = {Publication Title: ZBW}, file = {GO FAIR | ZBW:C\:\\Users\\carst\\Zotero\\storage\\4CNGD8ZX\\go-fair.html:text/html}, } @incollection{welle_donker_funding_2018, address = {The Hague}, series = {Information {Technology} and {Law} {Series}}, title = {Funding {Open} {Data}}, isbn = {978-94-6265-261-3}, url = {https://doi.org/10.1007/978-94-6265-261-3_4}, abstract = {Open government data are fast becoming entrenched in our society. However, even though open government data may be “free”, it is not “gratis”. It takes substantial human and financial resources not only to collect and maintain government data, but also to process the data to be suitable for distribution as open data. Those resources need to be funded. In this chapter, we identify potential funding models for open data. We also explore the costs of implementing open data policies, and the benefits of open data, both for the open data organisation and for society. We demonstrate that the once-off operational costs of open data supply are marginal compared to the total operational costs of the open data organisation. Open data leads to efficiency gains within the open data organisation and to societal benefits. However, to reap those benefits, it is essential that organisations switching to open data, receive compensation, at least in the short-term. The compensation may be found in a new paid role in the information value chain.}, language = {en}, booktitle = {Open {Data} {Exposed}}, publisher = {T.M.C. Asser Press}, author = {Welle Donker, Frederika}, editor = {van Loenen, Bastiaan and Vancauwenberghe, Glenn and Crompvoets, Joep}, year = {2018}, doi = {10.1007/978-94-6265-261-3_4}, pages = {55--78}, file = {Welle Donker - 2018 - Funding Open Data.pdf:C\:\\Users\\carst\\Zotero\\storage\\C8IVCDHX\\Welle Donker - 2018 - Funding Open Data.pdf:application/pdf}, } @book{noauthor_hauptseite_nodate, title = {Hauptseite}, url = {https://www.forschungsdaten.org/index.php/Hauptseite}, language = {de}, note = {Publication Title: https://www.forschungsdaten.org/}, file = {Forschungsdaten.org:C\:\\Users\\carst\\Zotero\\storage\\XGACPBQY\\Hauptseite.html:text/html}, } @book{noauthor_ausbildung_nodate, title = {Ausbildung und {Qualifikation}}, url = {https://www.forschungsdaten.org/index.php/Ausbildung_und_Qualifikation}, language = {de}, note = {Publication Title: Forschungsdaten.org}, file = {Ausbildung und Qualifikation – Forschungsdaten.org:C\:\\Users\\carst\\Zotero\\storage\\WBLC67KC\\Ausbildung_und_Qualifikation.html:text/html}, } @book{noauthor_entwicklung_nodate, title = {Entwicklung eines {Schulungspaketes}}, shorttitle = {{eHumanities} - interdisziplinär}, url = {https://www.fdm-bayern.org/ehumanities-interdisziplinaer/ziele-und-arbeitspakete/e-learning/}, abstract = {Die Website des Projektes eHumanities – interdisziplinär gibt einen Überblick über Initiativen zum Forschungsdatenmanagement in und außerhalb Bayerns.}, language = {de-DE}, urldate = {2020-09-18}, note = {Publication Title: Forschungsdatenmanagement Bayern}, file = {Snapshot:C\:\\Users\\carst\\Zotero\\storage\\BV9DCCDT\\e-learning.html:text/html}, } @article{gantert_digitale_2018, title = {Die digitale {Transformation} meistern}, volume = {42}, issn = {1865-7648, 0341-4183}, url = {http://www.degruyter.com/view/j/bfup.2018.42.issue-3/bfp-2018-0053/bfp-2018-0053.xml}, doi = {10.1515/bfp-2018-0053}, abstract = {Zusammenfassung In diesem Beitrag werden Spezifika der Hochschulen und Ausbildungseinrichtungen, die in der KIBA organisiert sind, mit ihren Studiengängen, Weiterbildungsprogrammen, Forschungsschwerpunkten und didaktischen Konzepten vorgestellt. Es wird gezeigt, wie diese Einrichtungen mit ihrer Berufungs- und Einstellungspolitik, strategischen Allianzen und übergeordneten fachlichen und politischen Zusammenschlüssen sowie mit der Profilierung ihrer Studiengänge auf neue Anforderungen des Marktes und der Berufspraxis reagieren. Berücksichtigt werden dabei Positionen und Strategien zur Digitalisierung aus der Politik sowie ihren Beratungsgremien, in der sich die Inhalte bibliotheks- und informationswissenschaftlicher Ausbildung und Forschung verorten lassen. Insgesamt wird deutlich, wie schwierig es heute ist zu definieren, was die Bibliotheks- und die Informationswissenschaft im Kern ausmacht, um im Spannungsfeld der Herausforderungen an wissenschaftliche und öffentliche Bibliotheken, den Anforderungen der Wirtschaft im Bereich Informations- und Wissensmanagement, der Digitalisierung und Langzeitarchivierung von Kulturerbe, um nur einige Felder zu nennen, Ausbildungsprogramme bedarfsgerecht zu profilieren und die bibliotheks- und informationswissenschaftlichen Institute, Fachbereiche und Ausbildungseinrichtungen politisch abzusichern und ausreichend mit Ressourcen auszustatten.}, number = {3}, journal = {Bibliothek Forschung und Praxis}, author = {Gantert, Klaus and Neher, Günther and Schade, Frauke}, month = nov, year = {2018}, pages = {441--452}, file = {Gantert et al. - 2018 - Die digitale Transformation meistern.pdf:C\:\\Users\\carst\\Zotero\\storage\\I968JI8F\\Gantert et al. - 2018 - Die digitale Transformation meistern.pdf:application/pdf}, } @article{noauthor_bund-lander-vereinbarung_2018, title = {Bund-{Länder}-{Vereinbarung} zu {Aufbau} und {Förderung} einer {Nationalen} {Forschungsdateninfrastruktur} ({NFDI}) vom 26. {November} 2018}, url = {https://www.gwk-bonn.de/fileadmin/Redaktion/Dokumente/Papers/NFDI.pdf}, language = {de}, journal = {Bundesanzeiger (BAnz) AT 21.12.2018 B10}, month = dec, year = {2018}, pages = {5}, file = {2018 - Bund-Länder-Vereinbarung zu Aufbau und Förderung e.pdf:C\:\\Users\\carst\\Zotero\\storage\\EQF88AGG\\2018 - Bund-Länder-Vereinbarung zu Aufbau und Förderung e.pdf:application/pdf}, } @book{noauthor_cessda-training_nodate, title = {Cessda-{Training} 2020}, url = {https://www.gesis.org/angebot/archivieren-und-registrieren/cessda-training}, } @book{noauthor_praxis_nodate, title = {Praxis kompakt/{FDM} in den {Bundesländern}}, url = {https://www.forschungsdaten.info/fdm-im-deutschsprachigen-raum/}, abstract = {In diesem Bereich stellen die einzelnen Länderinitiativen aus Deutschland und Österreich ihre Projekte, Inititativen und Aktivitäten im Bereich Forschungsdatenmanagement vor. Außerdem finden Sie Informationen zu NFDI und eine Kontaktübersicht zu FDM-Expertinnen und Experten.}, language = {de}, note = {Publication Title: FDM im deutschsprachigen Raum}, file = {FDM im deutschsprachigen Raum | Forschungsdaten und Forschungsdatenmanagement:C\:\\Users\\carst\\Zotero\\storage\\C95CA3VZ\\fdm-im-deutschsprachigen-raum.html:text/html}, } @book{ritz_rda_nodate, title = {{RDA} {Deutschland} {Tagung} 2019}, url = {https://www.rda-deutschland.de/events/tagung-2019}, abstract = {Jahrestreffen der deutschen RDA Community}, language = {de}, author = {Ritz, Raphael}, note = {Publication Title: RDA Deutschland}, file = {Snapshot:C\:\\Users\\carst\\Zotero\\storage\\VYR48PEL\\tagung-2019.html:text/html}, } @techreport{technische_hochschule_wildau_studiengang_2017, address = {Wildau}, type = {Modulkatalog}, title = {Studiengang „{Bibliotheksinformatik}“ {Master} of {Science}, {Modulkatalog}}, copyright = {© 2017 Technische Hochschule Wildau [FH]}, url = {https://docs.wixstatic.com/ugd/eb06ec_19f8be0b961e49269051fde0b8eec3d3.pdf}, language = {de}, author = {{Technische Hochschule Wildau}}, year = {2017}, pages = {37}, } @phdthesis{zellmann_bibliothekare_2018, address = {Hannover}, type = {Bachelor {Thesis}}, title = {Bibliothekare und {Informationswissenschaftler} mit {IT}-{Schwerpunkt} in {Deutschland} – {Bedarf}, {Aufgaben}, {Kompetenzanforderungen} und {Vergleich} der {IT}-{Kompetenzen} in {Praxis} und {Studium}}, copyright = {Creative Commons - Namensnennung-Nicht kommerziell 4.0}, url = {http://nbn-resolving.de/urn:nbn:de:bsz:960-opus4-12648}, abstract = {Die vorliegende Bachelorarbeit untersucht mittels Stellenanzeigenanalyse von Bibliothekaren mit IT-Schwerpunkt (System- und IT-Bibliothekare, Bibliotheksinformatiker, Data Librarians etc.) sowie anhand eines Vergleichs der Curricula informationswissenschaftlicher Studiengänge, inwieweit den informationstechnischen Anforderungen der modernen Arbeitswelt in der Hochschulausbildung hinreichend Rechnung getragen wird. Zu diesem Zweck liegen 179 IT-bibliothekarische Stellen der Jahre 2012-2017 von OpenBiblioJobs vor, die u. a. hinsichtlich Bedarf, Aufgaben, Kompetenzanforderungen sowie weiteren Inhalten zum Beschäftigungsverhältnis ausgewertet sind. Die analysierten IT-Kompetenzen sind den Modulinhalten von 14 Bachelor- und 9 Masterstudiengängen gegenübergestellt. Das durchschnittliche Jahreswachstum der IT-lastigen Stellenangebote im Untersuchungszeitraum liegt bei 38,25 \% und bestätigt damit den Bedarf an Absolventen bibliothekarischer IT-Profile. Systemorientierte Stellen benötigen u. a. vertiefte Kompetenzen mit bibliothekarischen Informationssystemen, den dort vorkommenden Daten(-banken) und Kenntnisse in der Programmierung, um die Konzeption, Administration und den Support dieser Systeme zu gewährleisten. Diese IT-Kompetenzen werden passend zum vorausgesetzten Abschluss (Bachelor) und der Vergütung (E9-12) in vielen Bachelorstudiengängen in grundlegender Form vermittelt, oft jedoch nur als Wahlpflichtmodule. Datenorientierte Stellen setzen überwiegend den Master voraus und sind dementsprechend ab E13 eingruppiert. Neben Kompetenzen im gesamten Bereich des Datenmanagements (samt Meta- und Forschungsdaten), Fähigkeiten in der Analyse von Daten und deren Visualisierung sowie in Big und Linked Data, sind insbesondere umfassende Kenntnisse im Wissenschaftsbetrieb gefordert. Im Gegensatz zu systemorientierten Stellen, für die passende Studiengänge mit vertieften IT-Inhalten existieren, fehlt bislang ein Masterstudiengang für das Datenmanagement, der die nötigen Kompetenzen konzertiert beinhaltet – nur einzelne Bereiche werden bislang abgedeckt. Die Arbeit richtet sich an Interessierte im Informationsbereich, Koordinatoren bibliothekarischer Studiengänge sowie Bibliotheksverbände.}, language = {de}, school = {Hochschule Hannover, Fakultät III - Medien, Information und Design}, author = {Zellmann, Cedrik}, year = {2018}, doi = {10.25968/opus-1264}, file = {Zellmann - 2018 - Bibliothekare und Informationswissenschaftler mit .pdf:C\:\\Users\\carst\\Zotero\\storage\\YEPVGCGX\\Zellmann - 2018 - Bibliothekare und Informationswissenschaftler mit .pdf:application/pdf}, } @book{heiss_sciebo_nodate, title = {Sciebo {RDS}: {Research} data services}, url = {https://www.research-data-services.org/de/}, abstract = {Das DFG-Projekt sciebo RDS (Research Data Services) hat es sich zum Ziel gesetzt, niederschwellige Dienste und Werkzeuge aus dem Forschungsdatenmanagement und der wissenschaftlichen Analyse auf Basis von Forschungsdaten dorthin zu bringen, wo die Forschenden bereits jetzt mit ihren Daten umgehen – in die Hochschulcloud „sciebo“.}, language = {de}, author = {Heiss, Peter}, note = {Publication Title: Sciebo RDS}, file = {Snapshot:C\:\\Users\\carst\\Zotero\\storage\\CAFIHLXZ\\de.html:text/html}, } @techreport{hochschulrektorenkonferen_forschungsdatenmanagement_2016, address = {Berlin ; Bonn}, type = {Pressemitteilung}, title = {Forschungsdatenmanagement: {Deutschland} muss aufholen – {Impulse} von {Bund} und {Ländern} unverzichtbar}, url = {https://www.hrk.de/fileadmin/redaktion/hrk/02-Dokumente/02-02-PM/HRK_PM_Workshop_Forschungsdatenmanagement_16122016.pdf}, language = {de}, number = {53/2016}, author = {{Hochschulrektorenkonferen}}, month = dec, year = {2016}, pages = {[1]}, file = {Hochschulrektorenkonferen - 2016 - Forschungsdatenmanagement Deutschland muss aufhol.pdf:C\:\\Users\\carst\\Zotero\\storage\\I2JED6EK\\Hochschulrektorenkonferen - 2016 - Forschungsdatenmanagement Deutschland muss aufhol.pdf:application/pdf}, } @book{krahwinkel_fokus_nodate, title = {{FOKUS} - {Forschungsdatenkurse} für {Studierende} und {Graduierte}}, url = {https://www.uni-marburg.de/de/forschung/kontakt/eresearch/projekte-und-netzwerke/fokus}, abstract = {FOKUS (Forschungsdatenkurse für Studierende und Graduierte) war ein BMBF-gefördertes Projekt zur Konzeption und Erstellung von disziplinspezifischen, nachnutzbaren Lehr- und Lernmodulen für Studierende und Graduierte.}, language = {de}, author = {Krähwinkel, Esther}, note = {Publication Title: Philipps-Universität Marburg}, file = {Snapshot:C\:\\Users\\carst\\Zotero\\storage\\ENIIRER5\\fokus.html:text/html}, } @book{noauthor_projekt_nodate, title = {Projekt {EWiG}: {Erziehungswissenschaftliche} {Wissensgeschichte}}, url = {http://projektewig.uni-goettingen.de/}, abstract = {Das Projekt „Erziehungswissenschaftliche Wissensgeschichte 1750-2000“ (EWiG) ist ein durch die Deutsche Forschungsgemeinschaft (DFG) gefördertes Forschungsprojekt, das sich mit der Analyse von wissenschaftlich-pädagogischer bzw. erziehungswissenschaftlicher Einführungs- und Grundlagenliteratur (z.B. Vorlesungssammlungen, Lehrbücher, Klassikersammlungen etc.) befasst. Ein Ziel des Projektes ist die digitale Dokumentation aller Einführungen, Klassiker und Geschichten der wissenschaftlichen Pädagogik seit 1750.}, language = {de-DE}, file = {Snapshot:C\:\\Users\\carst\\Zotero\\storage\\RL5VVX4S\\projektewig.uni-goettingen.de.html:text/html}, } @book{noauthor_servicezentrum_nodate, title = {Servicezentrum digital gestützte {Forschung}: {Projekte} \& {Netzwerke}}, url = {https://www.uni-marburg.de/de/forschung/kontakt/eresearch/projekte-und-netzwerke}, abstract = {Eigene und Verbundsprojekte, sowie Informationen zum Forum digital gestützte Forschung}, language = {de}, note = {Publication Title: Philipps-Universität Marburg}, file = {Snapshot:C\:\\Users\\carst\\Zotero\\storage\\CQCD6ZU5\\projekte-und-netzwerke.html:text/html}, } @techreport{presse-_und_informationsamt_der_bundesregierung_digitalisierung_2019, address = {Berlin}, title = {Digitalisierung gestalten: {Umsetzungsstrategie} der {Bundesregierung}}, url = {https://www.bildung-forschung.digital/files/pdf-umsetzungsstrategie-digitalisierung-data.pdf}, language = {de}, number = {4. überarbeitete Auflage}, author = {{Presse- und Informationsamt der Bundesregierung}}, year = {2019}, pages = {168}, file = {Presse- und Informationsamt der Bundesregierung - 2019 - Digitalisierung gestalten Umsetzungsstrategie der.pdf:C\:\\Users\\carst\\Zotero\\storage\\4MLIER6L\\Presse- und Informationsamt der Bundesregierung - 2019 - Digitalisierung gestalten Umsetzungsstrategie der.pdf:application/pdf}, } @book{institution_pas_nodate, title = {{PAS} 182 {Smart} city concept model – {Guide} to establishing a model for data interoperability}, url = {https://www.bsigroup.com/en-GB/smart-cities/Smart-Cities-Standards-and-Publication/PAS-182-smart-cities-data-concept-model/}, language = {en}, author = {Institution, The British Standards}, } @book{both_berliner_2011, address = {Berlin}, title = {Berliner {Open} {Data} {Strategie}}, publisher = {Fraunhofer-Verl}, author = {Both, Wolfgang and Schieferdecker, Ina Kathrin}, year = {2011}, } @book{noauthor_plattform_nodate, title = {Die {Plattform} für {Citizen} {Science}}, url = {https://www.buergerschaffenwissen.de/}, language = {de}, file = {Snapshot:C\:\\Users\\carst\\Zotero\\storage\\P67L9BAT\\www.buergerschaffenwissen.de.html:text/html}, } @incollection{catal_urbane_2018, address = {Berlin}, edition = {1. Auflage}, series = {Beuth {Innovation}}, title = {Urbane {Datenplattformen} in der {Cloud}}, isbn = {978-3-410-27681-4}, language = {de}, booktitle = {Mensch und {Technik} in der {Smart} {City}: {Die} menschliche {Smart} {City}}, publisher = {Beuth Verlag GmbH}, author = {Catal, Faruk and Lämmel, Philipp and Schieferdecker, Ina and Tcholtchev, Nikolay}, year = {2018}, pages = {143--158}, } @book{catal_mensch_2018, address = {Berlin}, edition = {1. Auflage}, series = {Beuth {Innovation}}, title = {Mensch und {Technik} in der {Smart} {City}: {Die} menschliche {Smart} {City}}, isbn = {978-3-410-27681-4}, language = {de}, publisher = {Beuth Verlag GmbH}, author = {Catal, Faruk and Drescher, Burkhard and Eickhoff, Antje and Fehling, Thomas and Haist, Karin and Hellweg, Uli and Jursch, Ulrich and Kahl, Holger and Kemmerzehl, Richard and Klaus, Agata and Kleewein, Klaus and Kreitsch, Thomas and Lämmel, Philipp and Mader, Michael and Mienkus, Rolf and Möhlendick, Barbara and Müller, Christian and Pahl-Weber, Elke and Schieferdecker, Ina and Schonowski, Joachim and Tank, Ralf and Tcholtchev, Nikolay and Weis, Matthias}, editor = {Hertzsch, Eckhart and Heuser, Lutz}, year = {2018}, } @book{bundesnetzagentur_fur_elektrizitat_daten_2018, address = {Bomm}, title = {Daten als {Wettbewerbs}-und {Wertschöpfungsfaktor} in den {Netzsektoren}: {Eine} {Analyse} vor dem {Hintergrund} der digitalen {Transformation}, {Bonn}: {Bundesnetzagentur} für {Elektrizität}, {Gas}, {Telekommunikation}, {Post} und {Eisenbahnen}}, url = {https://www.bundesnetzagentur.de/SharedDocs/Downloads/DE/Allgemeines/Bundesnetzagentur/Publikationen/Berichte/2018/Digitalisierung.pdf?__blob=publicationFile&v=4}, language = {de}, author = {Bundesnetzagentur für Elektrizität, Gas, Telekommunikation, Post und Eisenbahne}, year = {2018}, file = {Bundesnetzagentur für Elektrizität, Gas, Telekommunikation, Post und Eisenbahne - 2018 - Daten als Wettbewerbs-und Wertschöpfungsfaktor in .pdf:C\:\\Users\\carst\\Zotero\\storage\\YM2KE59C\\Bundesnetzagentur für Elektrizität, Gas, Telekommunikation, Post und Eisenbahne - 2018 - Daten als Wettbewerbs-und Wertschöpfungsfaktor in .pdf:application/pdf}, } @book{noauthor_cc_nodate, title = {{CC} creative commons}, url = {https://creativecommons.org/}, language = {en-US}, note = {Publication Title: Creative Commons}, file = {Snapshot:C\:\\Users\\carst\\Zotero\\storage\\637VXE8J\\creativecommons.org.html:text/html}, } @book{noauthor_european_2017-1, title = {European {Data} {Portal}}, url = {https://www.europeandataportal.eu/en}, year = {2017}, file = {Home | European Data Portal:C\:\\Users\\carst\\Zotero\\storage\\MN6QKTF7\\en.html:text/html}, } @article{noauthor_richtlinie_nodate, title = {Richtlinie ({EU}) 2019/1024 des {Europäischen} {Parlaments} und des {Rats} vom 20. {Juni} 2019 über offene {Daten} und die {Weiterverwendung} von {Informationen} des öffentlichen {Sektors}}, url = {https://eur-lex.europa.eu/legal-content/DE/TXT/HTML/?uri=CELEX:32019L1024&from=DE}, language = {de}, number = {L 172/56}, journal = {Amtsblatt der Europäischen Union}, } @techreport{commission_building_2017, address = {Brussels}, type = {Communication from the {Commission} to the {European} {Parliament}, the {Council}, the {European} {Economic} and {Social} {Committee} and the {Committee} of the {Regions}}, title = {Building a {European} {Data} {Economy}}, url = {https://eur-lex.europa.eu/legal-content/EN/TXT/PDF/?uri=CELEX:52017DC0009}, language = {en}, institution = {European Commission}, author = {Commission, European}, month = jan, year = {2017}, pages = {18}, file = {2017 - Building a European Data Economy.pdf:C\:\\Users\\carst\\Zotero\\storage\\AJX5JTS7\\2017 - Building a European Data Economy.pdf:application/pdf}, } @techreport{european_commission_towards_2018, address = {Brussels}, type = {Communication from the {Commission} to the {European} {Parliament}, the {Council}, the {European} {Economic} and {Social} {Committee} and the {Committee} of the {Regions}}, title = {Towards a common {European} data space}, url = {https://eur-lex.europa.eu/legal-content/EN/TXT/PDF/?uri=CELEX:52018DC0232}, language = {en}, institution = {European Commission}, author = {{European Commission}}, month = apr, year = {2018}, pages = {14}, file = {2018 - Towards a common European data space.pdf:C\:\\Users\\carst\\Zotero\\storage\\3GDECLDL\\2018 - Towards a common European data space.pdf:application/pdf}, } @book{das_fraunhofer-institut_fur_offene_kommunikationssysteme_fokus_piveau_nodate, title = {piveau}, url = {https://www.piveau.de/}, abstract = {piveau ist ein Datenmanagement-Ökosystem für den öffentlichen Sektor. Es bietet Komponenten und Werkzeuge für die Unterstützung der kompletten Verarbeitungskette von der Erfassung, Aggregation, Bereitstellung bis zur Nutzung der Daten.}, language = {de ; en}, author = {{Das Fraunhofer-Institut für Offene Kommunikationssysteme FOKUS}}, file = {piveau:C\:\\Users\\carst\\Zotero\\storage\\SSWX5T8L\\www.piveau.de.html:text/html}, } @article{geiger_open_2012, title = {Open {Government} and ({Linked}) ({Open}) ({Government}) ({Data})}, volume = {4}, issn = {2075-9517}, url = {https://www.jedem.org/index.php/jedem/article/view/143}, doi = {10.29379/jedem.v4i2.143}, language = {en}, number = {2}, journal = {JeDEM - eJournal of eDemocracy and Open Government}, author = {Geiger, Christian Philipp and Lucke, Jörn von}, month = dec, year = {2012}, pages = {265--278}, file = {Geiger und Lucke - 2012 - Open Government and (Linked) (Open) (Government) (.pdf:C\:\\Users\\carst\\Zotero\\storage\\E4UD69FC\\Geiger und Lucke - 2012 - Open Government and (Linked) (Open) (Government) (.pdf:application/pdf}, } @incollection{helene_govdata_2014, title = {{GovData} - {Das} {Datenportal} für {Deutschland}}, isbn = {978-3-8487-1131-4}, booktitle = {Transparenz, {Partizipation}, {Kollaboration}}, publisher = {Nomos Verlagsgesellschaft mbH \& Co. KG}, author = {Helene, Maria}, editor = {Hill, Hermann and Martini, Mario and Wagner, Edgar}, year = {2014}, doi = {10.5771/9783845252636-109}, pages = {109--116}, } @book{noauthor_infrastructure_nodate, title = {Infrastructure for spatial information in {Europe}}, url = {https://inspire.ec.europa.eu/}, note = {Publication Title: INSPIRE Knowledge Base}, file = {INSPIRE | Welcome to INSPIRE:C\:\\Users\\carst\\Zotero\\storage\\SQG7MQKK\\inspire.ec.europa.eu.html:text/html}, } @article{kim_creative_2017, title = {The {Creative} {Commons} and {Copyright} {Protection} in the {Digital} {Era}: {Uses} of {Creative} {Commons} {Licenses}}, volume = {13}, shorttitle = {The {Creative} {Commons} and {Copyright} {Protection} in the {Digital} {Era}}, url = {https://academic.oup.com/jcmc/article/13/1/187/4583060}, doi = {10.1111/j.1083-6101.2007.00392.x}, abstract = {As digital technology thrusts complexity upon copyright law, conflict has escalated between copyright holders desperate to institute a vigorous enforcement mechanism against copying in order to protect their ownership and others who underscore the importance of public interests in accessing and using copyrighted works. This study explores whether Creative Commons (CC) licenses are a viable solution for copyright protection in the digital era. Through a mixed-methods approach involving a web-based survey of CC licensors, a content analysis of CC-licensed works, and interviews, the study characterizes CC licensors, the ways that CC licensors produce creative works, the private interests that CC licenses serve, and the public interests that CC licenses serve. The findings suggest that the Creative Commons can alleviate some of the problems caused by the copyright conflict.}, language = {en}, number = {1}, journal = {Journal of Computer-Mediated Communication}, author = {Kim, Minjeong}, month = oct, year = {2017}, pages = {187--209}, file = {Kim - 2017 - The Creative Commons and Copyright Protection in t.pdf:C\:\\Users\\carst\\Zotero\\storage\\JBIWC42U\\Kim - 2017 - The Creative Commons and Copyright Protection in t.pdf:application/pdf}, } @book{klessmann_open_2012, title = {Open {Government} {Data} {Deutschland}: eine {Studie} zu {Open} {Government} in {Deutschland} im {Auftrag} des {Bundesministerium} des {Innern}}, shorttitle = {Open {Government} {Data} {Deutschland}}, url = {https://www.bmi.bund.de/SharedDocs/Downloads/DE/Themen/OED_Verwaltung/ModerneVerwaltung/opengovernment.pdf?__blob=publicationFile.}, language = {de}, publisher = {Deutschland / Bundesministerium}, author = {Klessmann, Jens and Denker, Philipp and Schieferdecker, Ina and Schulz, Sönke E and Hoepner, Petra and Lapi, Evanela and Marienfeld, Florian and Müller, Lena-Sophie and Tcholtchev, Nikolay and Rein-Fischböck, Katharina}, year = {2012}, doi = {10.13140/RG.2.1.4506.6321}, file = {Klessmann et al. - 2012 - Open Government Data Deutschland eine Studie zu O.pdf:C\:\\Users\\carst\\Zotero\\storage\\C4Z82FWA\\Klessmann et al. - 2012 - Open Government Data Deutschland eine Studie zu O.pdf:application/pdf}, } @book{krcmar_informationsmanagement_2015, address = {Berlin, Heidelberg}, title = {Informationsmanagement}, isbn = {978-3-662-45862-4 978-3-662-45863-1}, language = {de}, publisher = {Springer Berlin Heidelberg}, author = {Krcmar, Helmut}, year = {2015}, doi = {10.1007/978-3-662-45863-1}, } @book{kuzev_open_2016, address = {Berlin}, title = {Open {Data}: die wichtigsten {Fakten} zu offenen {Daten}: {Grundlagen}, {Rahmenbedingungen} und {Beispiele} zur {Nutzung} von {Open} {Data}}, url = {https://www.kas.de/de/einzeltitel/-/content/open-data1}, language = {de}, publisher = {Konrad-Adenauer-Stiftung}, author = {Kuzev, Pencho}, year = {2016}, file = {Kuzev - 2016 - Open Data die wichtigsten Fakten zu offenen Daten.pdf:C\:\\Users\\carst\\Zotero\\storage\\3ZGBE2KV\\Kuzev - 2016 - Open Data die wichtigsten Fakten zu offenen Daten.pdf:application/pdf}, } @book{noauthor_openstreetmap_nodate, title = {{OpenStreetMap}}, url = {https://www.openstreetmap.org/}, abstract = {OpenStreetMap ist eine Karte der Welt, erstellt von Menschen wie dir und frei verwendbar unter einer offenen Lizenz.}, language = {de}, note = {Publication Title: OpenStreetMap}, file = {Snapshot:C\:\\Users\\carst\\Zotero\\storage\\SYDR77JH\\www.openstreetmap.org.html:text/html}, } @inproceedings{schieferdecker_urban_2016, address = {New York, NY, USA}, series = {{OpenSym} '16}, title = {Urban {Data} {Platforms}: {An} {Overview}}, isbn = {978-1-4503-4481-4}, shorttitle = {Urban {Data} {Platforms}}, url = {https://doi.org/10.1145/2962132.2984894}, doi = {10.1145/2962132.2984894}, abstract = {Along the increasing digitization and interconnection in almost every domain in society or business, data is growing exponentially. It is expected that the worldwide Internet traffic will triple until 2020 in comparison to 2015. In the same time, the transmitted data volume will move from 53,2 Exabytes per months to 161 Exabytes per months [Cisco, 2016]. Cities and communities can support the provisioning and usage of urban data and benefit from resulting new services for the monitoring, understanding, decision making, steering, and control. Providing urban data is also supported by the ongoing movement of opening governmental data, but goes beyond. Urban data can include data from public, industrial, scientific or private sources. Yet, the design of urban data is still ongoing and numerous initiatives and standardization efforts on smart cities and communities put the grounds for the uptake and interoperability of urban data.}, language = {en}, booktitle = {Proceedings of the 12th {International} {Symposium} on {Open} {Collaboration} {Companion}}, publisher = {Association for Computing Machinery}, author = {Schieferdecker, Ina and Tcholtchev, Nikolay and Lämmel, Philipp}, month = aug, year = {2016}, pages = {1--4}, } @techreport{schieferdecker_handreichung_2019, address = {Berlin}, title = {Handreichung zur {Studie}: {Urbane} {Datenräume} - {Möglichkeiten} von {Datenaustausch} und {Zusammenarbeit} im urbanen {Raum}}, copyright = {© Fraunhofer FOKUS, Berlin, 2019}, url = {https://cdn0.scrvt.com/fokus/702aa1480e55b335/bc8c65c81a42/190311_Handreichung_UDR_02.pdf}, language = {de}, institution = {Fraunhofer FOKUS}, author = {Schieferdecker, Ina and Bruns, Lina and Cuno, Silke and Flügge, Matthias and Isakovic, Karsten and Klessman, Jens and Lämmel, Philipp and Stadtkewit, Dustin and Tcholtchev, Nikolay and Lange, Christoph and Imbusch, Benedikt T. and Strauß, Leonie and Vastag, Alex and Flocke, Florian and Kraft, Volker}, year = {2019}, pages = {24}, file = {Schieferdecker et al. - 2019 - Handreichung zur Studie Urbane Datenräume - Mögli.pdf:C\:\\Users\\carst\\Zotero\\storage\\YMHQV3MF\\Schieferdecker et al. - 2019 - Handreichung zur Studie Urbane Datenräume - Mögli.pdf:application/pdf}, } @techreport{schieferdecker_urbane_2018, address = {Berlin}, title = {Urbane {Datenräume} - {Möglichkeiten} von {Datenaustausch} und {Zusammenarbeit} im urbanen {Raum}}, copyright = {© Fraunhofer FOKUS, Berlin, 2018}, url = {https://cdn0.scrvt.com/fokus/774af17bdc0a18cd/69f7a401c168/UDR_Studie_062018.pdf}, language = {de}, institution = {Fraunhofer FOKUS}, author = {Schieferdecker, Ina and Bruns, Lina and Cuno, Silke and Flügge, Matthias and Isakovic, Karsten and Klessman, Jens and Lämmel, Philipp and Stadtkewit, Dustin and Tcholtchev, Nikolay and Lange, Christoph and Imbusch, Benedikt T. and Strauß, Leonie and Vastag, Alex and Flocke, Florian and Kraft, Volker}, year = {2018}, pages = {250}, file = {Schieferdecker et al. - 2018 - Urbane Datenräume - Möglichkeiten von Datenaustaus.pdf:C\:\\Users\\carst\\Zotero\\storage\\4N9AHWGB\\Schieferdecker et al. - 2018 - Urbane Datenräume - Möglichkeiten von Datenaustaus.pdf:application/pdf}, } @incollection{niedbal_smart_2020, address = {Wiesbaden}, title = {„{Smart} {Cities}“ als Überbegriff für eine lebenswerte, komfortable und {Teilhabe} ermöglichende {Umgebung}}, isbn = {978-3-658-27232-6}, url = {https://doi.org/10.1007/978-3-658-27232-6_49}, abstract = {Der Verkehr in Städten steht vor einer fundamentalen Transformation. Das Internet der Dinge und die vielfach gewonnenen Daten bilden dabei das Rückgrat der Städte. Sharing- und On-Demand-Angebote, die in den öffentlichen Verkehr integriert sind, reduzieren die verkehrliche Belastung. Intelligente Schließfächer und Lastenfahrräder fungieren als wichtiger Bestandteil der innerstädtischen Logistik von morgen. Bahnhöfe entwickeln sich zur multimodalen Mobilitätsplattform weiter und sind ein zentraler Ort in Städten. Sie bieten Aufenthaltsqualität und dienen Menschen im Rahmen von Coworking-Angeboten als flexibler, dezentraler Arbeitsplatz.}, language = {de}, booktitle = {Smart {City} – {Made} in {Germany}: {Die} {Smart}-{City}-{Bewegung} als {Treiber} einer gesellschaftlichen {Transformation}}, publisher = {Springer Fachmedien}, author = {Niedbal, Meike}, editor = {Etezadzadeh, Chirine}, year = {2020}, doi = {10.1007/978-3-658-27232-6_49}, pages = {469--484}, } @book{noauthor_openseamap_nodate, title = {{OpenSeaMap} - die freie {Seekarte}}, url = {https://www.openseamap.org}, file = {OpenSeaMap\: Startseite:C\:\\Users\\carst\\Zotero\\storage\\7A2CDCPB\\index.html:text/html}, } @book{noauthor_data_nodate-2, title = {Data {Platform} – {Open} {Power} {System} {Data}}, url = {https://data.open-power-system-data.org/}, abstract = {This is the Open Power System Data platform. We provide European power system data in five packages.}, language = {en}, file = {Data Platform – Open Power System Data:C\:\\Users\\carst\\Zotero\\storage\\74UNDW8E\\data.open-power-system-data.org.html:text/html}, } @book{senatsverwaltung_fur_wirtschaft_energie_und_betriebe_berlin_nodate, title = {Berlin {Open} {Data} {Portal}}, url = {https://daten.berlin.de/}, language = {de}, author = {{Senatsverwaltung für Wirtschaft, Energie und Betriebe}}, file = {Offene Daten Berlin | Offene Daten lesbar für Mensch und Maschine. Das ist das Ziel.:C\:\\Users\\carst\\Zotero\\storage\\6XMK7V6X\\daten.berlin.de.html:text/html}, } @book{noauthor_stromnetz_2012, title = {Stromnetz {Berlin}}, url = {http://www.netzdaten-berlin.de}, year = {2012}, note = {Publication Title: Netzdaten Berlin – das Pilotportal}, } @book{preische_digitales_2014, address = {Berlin}, edition = {Redaktionsschluss: Oktober 2013}, series = {Daten und {Fakten}}, title = {Digitales {Gold}}, language = {de}, author = {Preische, Jens}, editor = {{TSB Technologiestiftung Berlin}}, year = {2014}, file = {Preische - 2014 - Digitales Gold.pdf:C\:\\Users\\carst\\Zotero\\storage\\AEMF5NSB\\Preische - 2014 - Digitales Gold.pdf:application/pdf}, } @book{wissenschaftlicher_beirat_der_bundesregierung_globale_umweltveranderungen_unsere_2019, title = {Unsere gemeinsame digitale {Zukunft}}, isbn = {978-3-946830-02-3}, url = {https://www.wbgu.de/de/publikationen/publikation/unsere-gemeinsame-digitale-zukunft}, language = {de}, author = {{Wissenschaftlicher Beirat der Bundesregierung Globale Umweltveränderungen}}, year = {2019}, } @book{sunlight_foundation_ten_2010, address = {Washington}, title = {Ten principles for opening up government information}, url = {https://sunlightfoundation.com/policy/documents/ten-open-data-principles/}, language = {en}, publisher = {Sunlight Foundation}, author = {{Sunlight Foundation}}, year = {2010}, } @book{noauthor_wheelmap_nodate, title = {Wheelmap}, url = {https://wheelmap.org}, abstract = {Wheelmap is an online map to search, find and mark wheelchair-accessible places. Get involved by marking public places like bars, restaurants, cinemas or supermarkets.}, language = {de}, note = {Publication Title: Wheelmap}, file = {Snapshot:C\:\\Users\\carst\\Zotero\\storage\\LBMFR3CF\\wheelmap.org.html:text/html}, } @article{lerner_revolution_2018, title = {Revolution in {Health} {Care}: {How} {Will} {Data} {Science} {Impact} {Doctor}–{Patient} {Relationships}?}, volume = {6}, issn = {2296-2565}, shorttitle = {Revolution in {Health} {Care}}, url = {https://www.frontiersin.org/articles/10.3389/fpubh.2018.00099/full}, doi = {10.3389/fpubh.2018.00099}, abstract = {Revolution in Health Care: How Will Data Science Impact Doctor–Patient Relationships?}, language = {en}, journal = {Frontiers in Public Health}, author = {Lerner, Ivan and Veil, Raphaël and Nguyen, Dinh-Phong and Luu, Vinh Phuc and Jantzen, Rodolphe}, year = {2018}, file = {Lerner et al. - 2018 - Revolution in Health Care How Will Data Science I.pdf:C\:\\Users\\carst\\Zotero\\storage\\K2SS22PX\\Lerner et al. - 2018 - Revolution in Health Care How Will Data Science I.pdf:application/pdf}, } @inproceedings{schieferdecker_towards_2017, title = {Towards an {Open} {Data} {Based} {ICT} {Reference} {Architecture} for {Smart} {Cities}}, doi = {10.1109/CeDEM.2017.18}, abstract = {Given that ICT is at the heart of today's Smart City approach, it is of paramount importance to investigate concepts, which would enable the unification, the common understanding and the replication of ICT architectures/solutions/models across multiple cities. This unified and replicable approach can be best achieved by a very abstract model, aiming to capture the taxonomy and high-level structure of complex integrative ICT solutions for Smart Cities. The approach should be based on the idea of openness with respect to interfaces, software components and especially data, which is to be seen as the main ingredient of an ICT eco-system for Smart Cities. This paper presents an Open Data based ICT Reference Architecture for Smart Cities, which is developed within the EU project Triangulum [1].}, booktitle = {2017 {Conference} for {E}-{Democracy} and {Open} {Government} ({CeDEM})}, author = {Schieferdecker, Ina and Tcholtchev, Nikolay and Lämmel, Philipp and Scholz, Robert and Lapi, Evanela}, month = may, year = {2017}, pages = {184--193}, } @book{das_fraunhofer-institut_fur_offene_kommunikationssysteme_fokus_wikipedia_nodate, title = {Wikipedia}, url = {https://www.wikipedia.org/}, author = {{Das Fraunhofer-Institut für Offene Kommunikationssysteme FOKUS}}, file = {Wikipedia:C\:\\Users\\carst\\Zotero\\storage\\W6UZRNEE\\www.wikipedia.org.html:text/html}, } @article{kindling_landscape_2017, title = {The {Landscape} of {Research} {Data} {Repositories} in 2015: {A} re3data {Analysis}}, volume = {23}, issn = {1082-9873}, shorttitle = {The {Landscape} of {Research} {Data} {Repositories} in 2015}, url = {http://www.dlib.org/dlib/march17/kindling/03kindling.html}, doi = {10.1045/march2017-kindling}, abstract = {This article provides a comprehensive descriptive and statistical analysis of metadata information on 1,381 research data repositories worldwide and across all research disciplines. The analyzed metadata is derived from the re3data database, enabling search and browse functionalities for the global registry of research data repositories. The analysis focuses mainly on institutions that operate research data repositories, types and subjects of research data repositories (RDR), access conditions as well as services provided by the research data repositories. RDR differ in terms of the service levels they offer, languages they support or standards they comply with. These statements are commonly acknowledged by saying the RDR landscape is heterogeneous. As expected, we found a heterogeneous RDR landscape that is mostly influenced by the repositories' disciplinary background for which they offer services.}, language = {en}, number = {3/4}, journal = {D-Lib Magazine}, author = {Kindling, Maxi and Pampel, Heinz and van de Sandt, Stephanie and Rücknagel, Jessika and Vierkant, Paul and Kloska, Gabriele and Witt, Michael and Schirmbacher, Peter and Bertelmann, Roland and Scholze, Frank}, year = {2017}, file = {Eingereichte Version:C\:\\Users\\carst\\Zotero\\storage\\PSSMBQ2K\\Kindling et al. - 2017 - The Landscape of Research Data Repositories in 201.pdf:application/pdf}, } @article{kaden_warum_2018, title = {Warum {Forschungsdaten} nicht publiziert werden}, copyright = {Creative Commons BY 3.0}, issn = {1860-7950}, url = {https://edoc.hu-berlin.de/bitstream/handle/18452/20046/kaden-fd.pdf?sequence=1&isAllowed=y}, doi = {10.18452/19284}, language = {de}, journal = {LIBREAS. Library Ideas}, author = {Kaden, Ben}, year = {2018}, pages = {8}, file = {Kaden - 2018 - Warum Forschungsdaten nicht publiziert werden.pdf:C\:\\Users\\carst\\Zotero\\storage\\WU63T7RT\\Kaden - 2018 - Warum Forschungsdaten nicht publiziert werden.pdf:application/pdf}, } @incollection{hagendorff_open_2016, address = {Stuttgart}, title = {Open {Data}}, isbn = {978-3-476-05394-7}, booktitle = {Handbuch {Medien}- und {Informationsethik}}, publisher = {J.B. Metzler}, author = {Hagendorff, Thilo}, editor = {Heesen, Jessica}, year = {2016}, pages = {227--233}, } @book{herb_uberwachungskapitalismus_nodate, title = {Überwachungskapitalismus und {Wissenschaftssteuerung}}, url = {https://www.heise.de/tp/features/Ueberwachungskapitalismus-und-Wissenschaftssteuerung-4480357.html}, abstract = {Die Metamorphose des Wissenschaftsverlags Elsevier zum Research Intelligence Dienstleister ist paradigmatisch für die neuen Möglichkeiten der Protokollierung und Steuerung von Wissenschaft}, language = {de}, author = {Herb, Ulrich}, note = {Publication Title: Telepolis}, file = {Snapshot:C\:\\Users\\carst\\Zotero\\storage\\VNMG3XVS\\Ueberwachungskapitalismus-und-Wissenschaftssteuerung-4480357.html:text/html}, } @incollection{immenhauser_habent_2018, title = {habent sua fata data: der {Beitrag} der {Schweizerischen} {Akademie} der {Geistes}- und {Sozialwissenschaften} zur nachhaltigen {Sicherung} von {Forschungsdaten}}, isbn = {978-3-11-055379-6}, abstract = {Der vorliegende Band versammelt eine repräsentative Auswahl an Initiativen der Wissenschaftlichen Bibliotheken der Schweiz, die als Resultat erfolgreicher Kooperationen ein hohes Innovationspotential aufweisen. Im Mittelpunkt stehen wissenschafts-, hochschul- und förderpolitische Themen in Verbindung mit Services zur Forschungsunterstützung wie nationale Open Access-Strategie, Forschungsdatenmanagement, Digitalisierung sowie Präsentations- und Serviceplattformen. Ebenfalls aufgegriffen werden Fragen der baulichen Entwicklung und strategischen Standortplanung, Aus- und Weiterbildung oder die Bedeutung überregional und landesweit agierender Verbände. Der Band wird als Festschrift für Susanna Bliggenstorfer anlässlich ihres Rücktrittes als Direktorin der Zentralbibliothek Zürich herausgegeben.}, language = {de}, booktitle = {Bibliotheken der {Schweiz}: {Innovation} durch {Kooperation}: {Festschrift} für {Susanna} {Bliggenstorfer} anlässlich ihres {Rücktrittes} als {Direktorin} der {Zentralbibliothek} {Zürich}}, publisher = {De Gruyter Saur}, author = {Immenhauser, Beat}, editor = {Zürich, Zentralbibliothek and Keller, Alice and Uhl, Susanne}, year = {2018}, file = {Immenhauser - 2018 - habent sua fata data der Beitrag der Schweizerisc.pdf:C\:\\Users\\carst\\Zotero\\storage\\ML5ESNWS\\Immenhauser - 2018 - habent sua fata data der Beitrag der Schweizerisc.pdf:application/pdf}, } @incollection{pampel_stand_2015, address = {Bonn}, title = {Stand und {Perspektive} des globalen {Verzeichnisses} von {Forschungsdaten}-{Repositorien} re3data.org}, isbn = {978-3-88579-637-4}, abstract = {Das Projekt re3data.org – Registry of Research Data Repositories macht Forschungsdaten-Repositorien in einem web-basierten Verzeichnis auffindbar. Das Ziel von re3data.org ist es, Forschenden eine Orientierung über bestehende Repositorien zur dauerhaften Zugänglichmachung von digitalen Forschungsdaten zu bieten, um „data sharing“ und „data re-use“ in der Wissenschaft zu fördern. Der Beitrag ordnet den Dienst in aktuelle Diskussionen um den offenen Zugang zu Forschungsdaten ein und beschreibt Stand und Perspektive von re3data.org.}, language = {Beitr. teilw. in dt., teilw. in engl. Sprache}, booktitle = {8. {DFN}-{Forum} {Kommunikationstechnologien} {Beiträge} der {Fachtagung} 08.06.-09.06.2015 in {Lübeck}}, publisher = {Gesellschaft für Informatik e.V.}, author = {Pampel, Heinz and Bertelmann, Roland and Scholze, Frank and Vierkant, Paul and Kindling, Maxi}, editor = {Müller, Paul}, year = {2015}, pages = {13--22}, file = {Pampel et al. - 2015 - Stand und Perspektive des globalen Verzeichnisses .pdf:C\:\\Users\\carst\\Zotero\\storage\\AUSM6CJY\\Pampel et al. - 2015 - Stand und Perspektive des globalen Verzeichnisses .pdf:application/pdf}, } @book{eynden_managing_2011, address = {Colchester}, edition = {Third edition, fully revised}, title = {Managing and sharing data: a best practice guide for researchers}, copyright = {CC BY NC SA}, isbn = {978-1-904059-78-3}, shorttitle = {Managing and sharing data}, url = {https://ukdataservice.ac.uk/media/622417/managingsharing.pdf}, language = {en}, publisher = {Print Essex at the University of Essex}, author = {Eynden, Veerle van den and Corti, Louise and Woollard, Matthew and Bishop, Libby and Horton, Laurence}, year = {2011}, file = {Eynden et al. - 2011 - Managing and sharing data a best practice guide f.pdf:C\:\\Users\\carst\\Zotero\\storage\\9IN5RJI4\\Eynden et al. - 2011 - Managing and sharing data a best practice guide f.pdf:application/pdf}, } @book{goldhammer_okonomischer_2017, address = {Berlin}, title = {Ökonomischer {Wert} von {Verbraucherdaten} für {Adress}- und {Datenhändler}: {Studie} im {Auftrag} des {Bundesministeriums} der {Justiz} und für {Verbraucherschutz}:}, url = {https://www.goldmedia.com/fileadmin/goldmedia/2015/Studien/2017/Verbraucherdaten_BMJV/Studie_Wert_Daten_Adresshaendler_Goldmedia_BMJV_2017.pdf}, language = {de}, author = {Goldhammer, Klaus and Wiegand, André}, editor = {Consulting, Goldmedia GmbH Strategy}, month = apr, year = {2017}, file = {Goldhammer und Wiegand - 2017 - Ökonomischer Wert von Verbraucherdaten für Adress-.pdf:C\:\\Users\\carst\\Zotero\\storage\\CPHDRC5F\\Goldhammer und Wiegand - 2017 - Ökonomischer Wert von Verbraucherdaten für Adress-.pdf:application/pdf}, } @article{cusumano_business_2019, title = {The {Business} of {Platforms}: {Strategy} in the {Age} of {Digital} {Competition}, {Innovation}, and {Power}}, shorttitle = {The {Business} of {Platforms}}, url = {https://www.hbs.edu/faculty/Pages/item.aspx?num=56021}, abstract = {The Business of Platforms explores the strategic, economic, and technology management challenges of digital platform businesses. We have five major themes in the book: 1) The world’s most valuable companies are all platforms, in part because platforms have network effects, with the potential for a winner-take-all or winner-take-most outcome. 2) Platforms come in 3 flavors: innovation platforms, transaction platforms, and hybrid platforms. We suggest that the world is moving towards more and more hybrids, and we identify the key steps in building a successful platform. 3) Failure is more likely than winner-take-all: mispricing, mistrust, mistiming, and hubris lead to hundreds of failures. 4) Old “dogs” can learn new tricks: conventional companies can adapt to a platform world with a buy, build, or belong strategy. And 5) Platforms are a double-edge sword: abuse of power, bullying poor labor practices, and bad actors can undermine even the most successful platforms. The book concludes with an exploration of platform battles of the future, including voice wars (Alexa vs. Hey Google vs. Siri), ridesharing and autonomous car platforms, quantum computing, and CRISPR.}, language = {en-us}, author = {Cusumano, Michael A. and Gawer, Annabelle and Yoffie, David B.}, month = may, year = {2019}, file = {Snapshot:C\:\\Users\\carst\\Zotero\\storage\\T76B9STB\\item.html:text/html}, } @book{mayer-schonberger_digital_2017, address = {Berlin}, title = {Das {Digital}: {Markt}, {Wertschöpfung} und {Gerechtigkeit} im {Datenkapitalismus}}, isbn = {978-3-430-20233-6}, shorttitle = {Das {Digital}}, language = {de}, publisher = {Econ}, author = {Mayer-Schönberger, Viktor and Ramge, Thomas}, year = {2017}, file = {Table of Contents PDF:C\:\\Users\\carst\\Zotero\\storage\\HCFVDTJI\\Mayer-Schönberger und Ramge - 2017 - Das Digital Markt, Wertschöpfung und Gerechtigkei.pdf:application/pdf}, } @article{schomm_marketplaces_2013, title = {Marketplaces for {Data}: {An} {Initial} {Survey}}, volume = {42}, issn = {0163-5808}, url = {https://doi.org/10.1145/2481528.2481532}, doi = {10.1145/2481528.2481532}, abstract = {Data is becoming more and more of a commodity, so that it is not surprising that data has reached the status of tradable goods. An increasing number of data providers is recognizing this and is consequently setting up platforms for selling, buying, or trading data. We identify several categories and dimensions of data marketplaces and data vendors and provide a snapshot of the situation as of Summer 2012.}, language = {en}, number = {1}, journal = {SIGMOD Rec.}, author = {Schomm, Fabian and Stahl, Florian and Vossen, Gottfried}, year = {2013}, pages = {15--26}, } @article{stahl_marketplaces_2017, title = {Marketplaces for {Digital} {Data}: {Quo} {Vadis}?}, volume = {10}, copyright = {Copyright (c) 2017 Florian Stahl,Fabian Schomm,Lara Vomfell,Gottfried Vossen}, issn = {1913-8989}, shorttitle = {Marketplaces for {Digital} {Data}}, url = {http://www.ccsenet.org/journal/index.php/cis/article/view/70439}, doi = {10.5539/cis.v10n4p22}, abstract = {The survey presented in this work investigates emerging markets for data and is the third of its kind, providing a deeper understanding of this emerging type of market. The findings indicate that data providers focus on limited business models and that data remains individualized and differentiated. Nevertheless, a trend towards commoditization for certain types of data can be foreseen, which allows an outlook to further developments in this area.}, language = {en}, number = {4}, journal = {Computer and Information Science}, author = {Stahl, Florian and Schomm, Fabian and Vomfell, Lara and Vossen, Gottfried}, month = oct, year = {2017}, pages = {22}, file = {Stahl et al. - 2017 - Marketplaces for Digital Data Quo Vadis.pdf:C\:\\Users\\carst\\Zotero\\storage\\8TJBRDIK\\Stahl et al. - 2017 - Marketplaces for Digital Data Quo Vadis.pdf:application/pdf}, } @article{stahl_classification_2016, title = {A classification framework for data marketplaces}, volume = {3}, issn = {2196-8896}, url = {https://doi.org/10.1007/s40595-016-0064-2}, doi = {10.1007/s40595-016-0064-2}, abstract = {Trading data as a commodity has become increasingly popular in recent years, and data marketplaces have emerged as a new business model where data from a variety of sources can be collected, aggregated, processed, enriched, bought, and sold. They are effectively changing the way data are distributed and managed on the Internet. To get a better understanding of the emergence of data marketplaces, we have conducted several surveys in recent years to systematically gather and evaluate their characteristics. This paper takes a broader perspective and relates data marketplaces as currently discussed in computer science to the neoclassical notions of market and marketplace from economics. Specifically, we provide a typology of electronic marketplaces and discuss their approaches to the distribution of data. Finally, we provide a distinct definition of data marketplaces, leading to a classification framework that can provide structure for the emerging field of data marketplace research.}, language = {en}, number = {3}, journal = {Vietnam Journal of Computer Science}, author = {Stahl, Florian and Schomm, Fabian and Vossen, Gottfried and Vomfell, Lara}, month = aug, year = {2016}, pages = {137--143}, file = {Stahl et al. - 2016 - A classification framework for data marketplaces.pdf:C\:\\Users\\carst\\Zotero\\storage\\HWUWYEBY\\Stahl et al. - 2016 - A classification framework for data marketplaces.pdf:application/pdf}, } @article{stahl_name_2017, title = {Name {Your} {Own} {Price} on {Data} {Marketplaces}}, volume = {28}, issn = {0868-4952}, url = {https://content.iospress.com/articles/informatica/inf1134}, abstract = {A novel approach to pricing on data marketplaces is proposed, which is based on the Name Your Own Price (NYOP) principle: customers suggest their own price for a (relational) data product and in return receive a custom-tailored one. The result is a f}, language = {en}, number = {1}, journal = {Informatica}, author = {Stahl, Florian and Vossen, Gottfried}, month = jan, year = {2017}, pages = {155--180}, file = {Snapshot:C\:\\Users\\carst\\Zotero\\storage\\Q36M5DZH\\inf1134.html:text/html}, } @inproceedings{koutris_query-based_2012, address = {New York}, series = {{PODS} '12}, title = {Query-based data pricing}, isbn = {978-1-4503-1248-6}, url = {https://doi.org/10.1145/2213556.2213582}, doi = {10.1145/2213556.2213582}, abstract = {Data is increasingly being bought and sold online, and Web-based marketplace services have emerged to facilitate these activities. However, current mechanisms for pricing data are very simple: buyers can choose only from a set of explicit views, each with a specific price. In this paper, we propose a framework for pricing data on the Internet that, given the price of a few views, allows the price of any query to be derived automatically. We call this capability "query-based pricing." We first identify two important properties that the pricing function must satisfy, called arbitrage-free and discount-free. Then, we prove that there exists a unique function that satisfies these properties and extends the seller's explicit prices to all queries. When both the views and the query are Unions of Conjunctive Queries, the complexity of computing the price is high. To ensure tractability, we restrict the explicit prices to be defined only on selection views (which is the common practice today). We give an algorithm with polynomial time data complexity for computing the price of any chain query by reducing the problem to network flow. Furthermore, we completely characterize the class of Conjunctive Queries without self-joins that have PTIME data complexity (this class is slightly larger than chain queries), and prove that pricing all other queries is NP-complete, thus establishing a dichotomy on the complexity of the pricing problem when all views are selection queries.}, booktitle = {Proceedings of the 31st {ACM} {SIGMOD}-{SIGACT}-{SIGAI} symposium on {Principles} of {Database} {Systems}}, publisher = {Association for Computing Machinery}, author = {Koutris, Paraschos and Upadhyaya, Prasang and Balazinska, Magdalena and Howe, Bill and Suciu, Dan}, month = may, year = {2012}, pages = {167--178}, file = {Koutris et al. - 2012 - Query-based data pricing.pdf:C\:\\Users\\carst\\Zotero\\storage\\9YIE84UA\\Koutris et al. - 2012 - Query-based data pricing.pdf:application/pdf}, } @article{stahl_preismodelle_2015, title = {Preismodelle für {Datenmarktplätze}}, volume = {38}, issn = {1432-122X}, url = {https://doi.org/10.1007/s00287-013-0751-7}, doi = {10.1007/s00287-013-0751-7}, abstract = {Eine zunehmende Zahl von Anbietern nutzt das Cloud-Computing-Paradigma für einen Handel mit Daten und analytischen Dienstleistungen. In dieser qualitativen Studie präsentieren wir die Ergebnisse aus Interviews mit zwölf etablierten Anbietern. Unsere Ergebnisse zeigen insbesondere eine große Unsicherheit bezüglich der Preissetzung und Preismodellwahl. Ferner erlauben sie eine Abstraktion der betrachteten Marktplätze auf ein einheitliches Schema mit sieben Akteuren sowie sechs atomaren und zwei hybriden Preisstrategien abstrahieren. Darüber hinaus bietet diese Papier erstmals eine strukturierte Entscheidungshilfe für die Wahl eines geeigneten Preismodells für Datenmarktplätze und legt somit den Grundstein für eine algorithmische Unterstützung bei Preismodellwahl und Preisfindung.}, language = {de}, number = {2}, journal = {Informatik-Spektrum}, author = {Stahl, Florian and Löser, Alexander and Vossen, Gottfried}, month = apr, year = {2015}, pages = {133--141}, } @inproceedings{martins_supporting_2019, title = {Supporting {Customers} with {Limited} {Budget} in {Data} {Marketplaces}}, doi = {10.1109/LA-CCI47412.2019.9037038}, abstract = {As the competitiveness and dynamics of current markets intensify, companies and organizations see opportunities to optimize their strategies and increase their business advantage in data-driven decision-making. This has led to an emergence of data marketplaces, where providers can sell data, while consumers can purchase it. However, the process of acquiring data from a marketplace involves issuing queries with an associated monetary cost, and data consumers often struggle to purchase the targeted data set of appropriate volume and content within their budget. Two issues need to be considered: One is querying itself, which may require API calls, structured queries written in SQL, graph queries written in Neo4J, or any other language framework. Querying is often a stepwise process that starts from generic queries and gets refined as the user learns about the data that results. The other issue is the cost involved, which consists of the price a consumer has to pay for the data and that of processing the various queries. In this paper, the second issue is studied from a computational perspective; in particular, we propose a novel framework for data-purchase support that considers data purchase from a marketplace as a sequence of interactions between the data provider (or the marketplace) and the consumer. This allows us to deal with scenarios in which the consumer has a limited budget, insufficient to embrace the complete data set he or she targets. We formalize the problem setting and the characteristics of available queries offered by the data provider so that efficient (approximation) algorithms can be devised. Our empirical results demonstrate that intelligent algorithms can aid the data consumer with near-optimum solutions that consider her preferences about the queries to be issue to the data provider.}, booktitle = {2019 {IEEE} {Latin} {American} {Conference} on {Computational} {Intelligence} ({LA}-{CCI})}, author = {Martins, Denis Mayr Lima and Lechtenbörger, Jens and Vossen, Gottfried}, month = nov, year = {2019}, pages = {1--6}, } @inproceedings{travizano_wibson_2018, address = {San Francisco}, title = {Wibson: {A} {Decentralized} {Data} {Marketplace}}, shorttitle = {Wibson}, url = {http://arxiv.org/abs/1812.09966}, abstract = {Our aim is for Wibson to be a blockchain-based, decentralized data marketplace that provides individuals a way to securely and anonymously sell information in a trusted environment. The combination of the Wibson token and blockchain-enabled smart contracts hopes to allow Data Sellers and Data Buyers to transact with each other directly while providing individuals the ability to maintain anonymity as desired. Wibson intends that its data marketplace will provide infrastructure and financial incentives for individuals to securely sell personal information without sacrificing personal privacy. Data Buyers receive information from willing and actively participating individuals with the benefit of knowing that the personal information should be accurate and current.}, author = {Travizano, Matias and Sarraute, Carlos and Ajzenman, Gustavo and Minnoni, Martin}, year = {2018}, file = {Travizano et al. - 2018 - Wibson A Decentralized Data Marketplace.pdf:C\:\\Users\\carst\\Zotero\\storage\\9CECZ9FV\\Travizano et al. - 2018 - Wibson A Decentralized Data Marketplace.pdf:application/pdf}, } @article{bracher_fashion_2016, title = {Fashion {DNA}: {Merging} {Content} and {Sales} {Data} for {Recommendation} and {Article} {Mapping}}, shorttitle = {Fashion {DNA}}, url = {http://arxiv.org/abs/1609.02489}, abstract = {We present a method to determine Fashion DNA, coordinate vectors locating fashion items in an abstract space. Our approach is based on a deep neural network architecture that ingests curated article information such as tags and images, and is trained to predict sales for a large set of frequent customers. In the process, a dual space of customer style preferences naturally arises. Interpretation of the metric of these spaces is straightforward: The product of Fashion DNA and customer style vectors yields the forecast purchase likelihood for the customer-item pair, while the angle between Fashion DNA vectors is a measure of item similarity. Importantly, our models are able to generate unbiased purchase probabilities for fashion items based solely on article information, even in absence of sales data, thus circumventing the "cold-start problem" of collaborative recommendation approaches. Likewise, it generalizes easily and reliably to customers outside the training set. We experiment with Fashion DNA models based on visual and/or tag item data, evaluate their recommendation power, and discuss the resulting article similarities.}, journal = {arXiv:1609.02489 [cs]}, author = {Bracher, Christian and Heinz, Sebastian and Vollgraf, Roland}, month = sep, year = {2016}, pages = {[10]}, file = {Bracher et al. - 2016 - Fashion DNA Merging Content and Sales Data for Re.pdf:C\:\\Users\\carst\\Zotero\\storage\\B6AP7CJ8\\Bracher et al. - 2016 - Fashion DNA Merging Content and Sales Data for Re.pdf:application/pdf}, } @article{devlin_bert_2019, title = {{BERT}: {Pre}-training of {Deep} {Bidirectional} {Transformers} for {Language} {Understanding}}, shorttitle = {{BERT}}, url = {http://arxiv.org/abs/1810.04805}, abstract = {We introduce a new language representation model called BERT, which stands for Bidirectional Encoder Representations from Transformers. Unlike recent language representation models, BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly conditioning on both left and right context in all layers. As a result, the pre-trained BERT model can be fine-tuned with just one additional output layer to create state-of-the-art models for a wide range of tasks, such as question answering and language inference, without substantial task-specific architecture modifications. BERT is conceptually simple and empirically powerful. It obtains new state-of-the-art results on eleven natural language processing tasks, including pushing the GLUE score to 80.5\% (7.7\% point absolute improvement), MultiNLI accuracy to 86.7\% (4.6\% absolute improvement), SQuAD v1.1 question answering Test F1 to 93.2 (1.5 point absolute improvement) and SQuAD v2.0 Test F1 to 83.1 (5.1 point absolute improvement).}, journal = {arXiv:1810.04805 [cs]}, author = {Devlin, Jacob and Chang, Ming-Wei and Lee, Kenton and Toutanova, Kristina}, month = may, year = {2019}, file = {Devlin et al. - 2019 - BERT Pre-training of Deep Bidirectional Transform.pdf:C\:\\Users\\carst\\Zotero\\storage\\Y9WHXQ4G\\Devlin et al. - 2019 - BERT Pre-training of Deep Bidirectional Transform.pdf:application/pdf}, } @inproceedings{tang_get_2014, address = {Cham}, series = {Lecture {Notes} in {Computer} {Science}}, title = {Get a {Sample} for a {Discount}}, isbn = {978-3-319-10073-9}, doi = {10.1007/978-3-319-10073-9_3}, abstract = {While price and data quality should define the major trade-off for consumers in data markets, prices are usually prescribed by vendors and data quality is not negotiable. In this paper we study a model where data quality can be traded for a discount. We focus on the case of XML documents and consider completeness as the quality dimension. In our setting, the data provider offers an XML document, and sets both the price of the document and a weight to each node of the document, depending on its potential worth. The data consumer proposes a price. If the proposed price is lower than that of the entire document, then the data consumer receives a sample, i.e., a random rooted subtree of the document whose selection depends on the discounted price and the weight of nodes. By requesting several samples, the data consumer can iteratively explore the data in the document. We show that the uniform random sampling of a rooted subtree with prescribed weight is unfortunately intractable. However, we are able to identify several practical cases that are tractable. The first case is uniform random sampling of a rooted subtree with prescribed size; the second case restricts to binary weights. For both these practical cases we present polynomial-time algorithms and explain how they can be integrated into an iterative exploratory sampling approach.}, language = {en}, booktitle = {Database and {Expert} {Systems} {Applications}}, publisher = {Springer International Publishing}, author = {Tang, Ruiming and Amarilli, Antoine and Senellart, Pierre and Bressan, Stéphane}, editor = {Decker, Hendrik and Lhotská, Lenka and Link, Sebastian and Spies, Marcus and Wagner, Roland R.}, year = {2014}, pages = {20--34}, } @inproceedings{agarwal_marketplace_2019, address = {New York, NY, USA}, series = {{EC} '19}, title = {A {Marketplace} for {Data}: {An} {Algorithmic} {Solution}}, isbn = {978-1-4503-6792-9}, url = {https://doi.org/10.1145/3328526.3329589}, doi = {10.1145/3328526.3329589}, abstract = {In this work, we aim to design a data marketplace; a robust real-time matching mechanism to efficiently buy and sell training data for Machine Learning tasks. While the monetization of data and pre-trained models is an essential focus of industry today, there does not exist a market mechanism to price training data and match buyers to sellers while still addressing the associated (computational and other) complexity. The challenge in creating such a market stems from the very nature of data as an asset: (i) it is freely replicable; (ii) its value is inherently combinatorial due to correlation with signal in other data; (iii) prediction tasks and the value of accuracy vary widely; (iv) usefulness of training data is difficult to verify a priori without first applying it to a prediction task. As our main contributions we: (i) propose a mathematical model for a two-sided data market and formally define the key associated challenges; (ii) construct algorithms for such a market to function and analyze how they meet the challenges defined. We highlight two technical contributions: (i) a new notion of "fairness" required for cooperative games with freely replicable goods; (ii) a truthful, zero regret mechanism to auction a class of combinatorial goods based on utilizing Myerson's payment function and the Multiplicative Weights algorithm. These might be of independent interest.}, booktitle = {Proceedings of the 2019 {ACM} {Conference} on {Economics} and {Computation}}, publisher = {Association for Computing Machinery}, author = {Agarwal, Anish and Dahleh, Munther and Sarkar, Tuhin}, year = {2019}, pages = {701--726}, file = {Agarwal et al. - 2019 - A Marketplace for Data An Algorithmic Solution.pdf:C\:\\Users\\carst\\Zotero\\storage\\MUHSR4PL\\Agarwal et al. - 2019 - A Marketplace for Data An Algorithmic Solution.pdf:application/pdf}, } @book{noauthor_open_nodate-1, title = {Open {Data}}, url = {http://www.bmi.bund.de/DE/themen/moderne-verwaltung/open-government/open-data/open-data-artikel.html;jsessionid=3A36750DBBB050B221E2E62A17E1842A.1_cid373?nn=9392318}, abstract = {Wir leben in einer Welt des rasanten Wandels. Wissenschaft und Technologien eröffnen neue, bislang unbekannte Möglichkeiten. Dies gilt in besonderem Maße für die Digitalisierung aller Lebensbereiche. Der Zugang zu Informationen ist einfach und zu geringsten Kosten möglich.}, language = {de}, note = {Publication Title: Bundesministerium des Innern, für Bau und Heimat}, file = {Snapshot:C\:\\Users\\carst\\Zotero\\storage\\J74QC67Q\\open-data-node.html:text/html}, } @techreport{hodson_current_2014, title = {Current {Best} {Practice} {For} {Research} {Data} {Management} {Policies}}, copyright = {Creative Commons Attribution 4.0, Open Access}, url = {https://zenodo.org/record/27872}, abstract = {Report on Current Best Practice for Research Data Management Policies commissioned from CODATA by the Danish e-Infrastructure Cooperation and the Danish Digital Library and submitted in May 2014.}, language = {en}, author = {Hodson, Simon and Molloy, Laura}, year = {2014}, doi = {10.5281/ZENODO.27872}, file = {RDM_Policy_Briefing-CODATA-Executive_Summary-201405_Final.pdf:C\:\\Users\\carst\\Zotero\\storage\\G4YMB3ZR\\RDM_Policy_Briefing-CODATA-Executive_Summary-201405_Final.pdf:application/pdf;RDM_Policy_Briefing-CODATA-201405_Appendices.pdf:C\:\\Users\\carst\\Zotero\\storage\\778X63J9\\RDM_Policy_Briefing-CODATA-201405_Appendices.pdf:application/pdf;RDM_Policy_Briefing-CODATA-201405_Final.pdf:C\:\\Users\\carst\\Zotero\\storage\\25P5XJ5V\\RDM_Policy_Briefing-CODATA-201405_Final.pdf:application/pdf}, } @techreport{commission_h2020_2016, title = {H2020 {Programme}: {Guidelines} on {FAIR} {Data} {Management} in {Horizon} 2020}, url = {https://ec.europa.eu/research/participants/data/ref/h2020/grants_manual/hi/oa_pilot/h2020-hi-oa-data-mgt_en.pdf}, language = {en}, number = {Version 3.0}, institution = {European Commission}, author = {Commission, European}, month = jul, year = {2016}, pages = {12}, file = {European Commission - 2016 - H2020 Programme Guidelines on FAIR Data Managemen.pdf:C\:\\Users\\carst\\Zotero\\storage\\X3IUZA2B\\European Commission - 2016 - H2020 Programme Guidelines on FAIR Data Managemen.pdf:application/pdf}, } @book{deutsche_forschungsgemeinschaft_dfg_sicherung_2013, address = {Weinheim, Germany}, title = {Sicherung guter wissenschaftlicher {Praxis}}, isbn = {978-3-527-67918-8 978-3-527-33703-3}, url = {http://doi.wiley.com/10.1002/9783527679188.oth1}, language = {de-DE}, publisher = {Wiley-VCH Verlag GmbH \& Co. KGaA}, editor = {{Deutsche Forschungsgemeinschaft DFG}}, month = oct, year = {2013}, doi = {10.1002/9783527679188.oth1}, file = {Deutsche Forschungsgemeinschaft DFG - 2013 - Sicherung guter wissenschaftlicher Praxis.pdf:C\:\\Users\\carst\\Zotero\\storage\\9FND2362\\Deutsche Forschungsgemeinschaft DFG - 2013 - Sicherung guter wissenschaftlicher Praxis.pdf:application/pdf}, } @book{noauthor_liste_2019, title = {Liste der größten gemeinwohlorientierten {Stiftungen}}, url = {https://www.stiftungen.org/de/stiftungen/zahlen-und-daten/liste-der-groessten-stiftungen.html}, language = {de}, month = dec, year = {2019}, note = {Publication Title: Bundesverband Deutscher Stiftungen}, file = {Snapshot:C\:\\Users\\carst\\Zotero\\storage\\5M89HJ23\\liste-der-groessten-stiftungen.html:text/html}, } @book{noauthor_tipps_nodate, title = {Tipps zur {Tool}-{Auswahl}}, copyright = {CC0 1.0 Universal}, url = {https://www.fdm.uni-wuppertal.de/de/datenmanagementplan/tipps-zur-tool-auswahl.html}, note = {Publication Title: https://www.uni-wuppertal.de/}, file = {Tipps zur Tool-Auswahl - Forschungsdaten-Management - BERGISCHE UNIVERSITÄT WUPPERTAL:C\:\\Users\\carst\\Zotero\\storage\\5FBCYD45\\tipps-zur-tool-auswahl.html:text/html}, } @techreport{innern_nationaler_2014, address = {Berlin}, title = {Nationaler {Aktionsplan} der {Bundesregierung} zur {Umsetzung} der {Open}-{Data}-{Charta} der {G8}}, url = {https://www.bmi.bund.de/SharedDocs/downloads/DE/publikationen/themen/moderne-verwaltung/aktionsplan-open-data.pdf?__blob=publicationFile&v=3}, language = {de}, author = {Innern, Bundesministerium des}, year = {2014}, pages = {20}, file = {2014 - Nationaler Aktionsplan der Bundesregierung zur Ums.pdf:C\:\\Users\\carst\\Zotero\\storage\\MG5BCKL5\\2014 - Nationaler Aktionsplan der Bundesregierung zur Ums.pdf:application/pdf}, } @book{bmbf-internetredaktion_open_2018, title = {Open {Data}}, url = {https://www.bmbf.de/de/open-data-6547.html}, abstract = {Bundesbehörden müssen digitale Daten als offene Daten bereitstellen. Das regelt das Open-Data-Gesetz. Das BMBF veröffentlicht daher regelmäßig Daten und Fakten aus den Bereichen Bildung, Wissenschaft, Forschung, Entwicklung und Innovation.}, language = {de}, author = {{BMBF-Internetredaktion}}, month = jul, year = {2018}, note = {Publication Title: Bundesministerium für Bildung und Forschung - BMBF}, file = {Snapshot:C\:\\Users\\carst\\Zotero\\storage\\LITIM8MF\\open-data-6547.html:text/html}, } @book{noauthor_dmponline_nodate, title = {{DMPonline}}, url = {https://dmponline.dcc.ac.uk/}, file = {DMPonline:C\:\\Users\\carst\\Zotero\\storage\\JZIQC2VL\\dmponline.dcc.ac.uk.html:text/html}, } @article{carr_sharing_2015, title = {Sharing {Research} {Data} to {Improve} {Public} {Health}: {A} {Funder} {Perspective}}, volume = {10}, issn = {1556-2646}, url = {https://doi.org/10.1177/1556264615593485}, doi = {10.1177/1556264615593485}, abstract = {Through the Public Health Research Data Forum, global health research funders are working together to increase the availability of public health and epidemiology research data in ways that are equitable, ethical, and efficient. The Wellcome Trust funded the research reported in this special edition as a first step toward building an evidence base on the perspectives of research stakeholders in low- and middle-income countries on the benefits and challenges of sharing health research data. We hope this work will make a key contribution to discussions aimed at creating policy frameworks for data access at local, national, and regional levels that are sensitive to different contexts and ensure the benefits to research and health are realized in an equitable manner.}, number = {3}, journal = {Journal of Empirical Research on Human Research Ethics}, author = {Carr, David and Littler, Katherine}, month = jul, year = {2015}, pages = {314--316}, file = {Carr und Littler - 2015 - Sharing Research Data to Improve Public Health A .pdf:C\:\\Users\\carst\\Zotero\\storage\\M5X87ZZ8\\Carr und Littler - 2015 - Sharing Research Data to Improve Public Health A .pdf:application/pdf}, } @techreport{noauthor_wissenschaftliche_2018, type = {Beschluss}, title = {Wissenschaftliche {Bibliotheken} 2025: beschlossen von der {Sektion} 4 „{Wissenschaftliche} {Universalbibliotheken}“ im {Deutschen} {Bibliotheksverband} e.{V}. (dbv) im {Januar} 2018}, url = {https://www.bibliotheksverband.de/fileadmin/user_upload/Sektionen/sektion4/Publikationen/WB2025_Endfassung_endg.pdf}, language = {de}, institution = {Deutscher Bibliotheksverband e.V. (dbv)}, year = {2018}, pages = {24}, file = {2018 - Wissenschaftliche Bibliotheken 2025 beschlossen v.pdf:C\:\\Users\\carst\\Zotero\\storage\\GSBHSN9S\\2018 - Wissenschaftliche Bibliotheken 2025 beschlossen v.pdf:application/pdf}, } @book{noauthor_disciplinary_nodate, title = {Disciplinary {Metadata}}, copyright = {© Digital Curation Centre 2004-2020}, url = {https://www.dcc.ac.uk/guidance/standards/metadata}, abstract = {While data curators, and increasingly researchers, know that good metadata is key for research data access and re-use, figuring out precisely what metadata to capture and how to capture it is a complex task. Fortunately, many academic disciplines have supported initiatives to formalise the metadata specifications the community deems to be required for data re-use. This page provides links to information about these disciplinary metadata standards, including profiles, tools to implement the standards, and use cases of data repositories currently implementing them. For those disciplines that have not yet settled on a metadata standard, and for those repositories that work with data across disciplines, the General Research Data section links to information about broader metadata standards that have been adapted to suit the needs of research data. Please note that a community-maintained version of this directory has been set up under the auspices of the Research Data Alliance.}, language = {en}, note = {Publication Title: https://www.dcc.ac.uk}, file = {Disciplinary Metadata | DCC:C\:\\Users\\carst\\Zotero\\storage\\GX8F379D\\metadata.html:text/html}, } @techreport{noauthor_multi-beneficiary_2017, title = {Multi-{Beneficiary} {Model} {Grant} {Agreement}: {ERC} {Proof} of {Concept} ({H2020} {ERC} {MGA} {PoC} — {Multi})}, url = {https://ec.europa.eu/research/participants/data/ref/h2020/mga/erc/h2020-mga-erc-poc-multi_en.pdf}, language = {en}, number = {Version 5.0}, institution = {European Research Council (ERC)}, month = oct, year = {2017}, pages = {(149)}, file = {2017 - Multi-Beneficiary Model Grant Agreement ERC Proof.pdf:C\:\\Users\\carst\\Zotero\\storage\\2AQ7BE2J\\2017 - Multi-Beneficiary Model Grant Agreement ERC Proof.pdf:application/pdf}, } @book{noauthor_impact_2017, title = {Impact analysis and policy support}, url = {https://erc.europa.eu/about-erc/impact-analysis-and-policy-support}, abstract = {"The Scientific Council will continuously monitor the operation of the ERC and its evaluation procedures and consider how best to achieve its broader objectives.” (Council Decision 743/2013 establishing the specific programme implementing Horizon 2020). Since its creation a lot of effort has been employed for the analysis of the impact of ERC funding on the scientific community and on the research landscape at large.}, language = {en}, month = nov, year = {2017}, note = {Publication Title: ERC: European Research Council}, file = {Snapshot:C\:\\Users\\carst\\Zotero\\storage\\DEKBXDQH\\impact-analysis-and-policy-support.html:text/html}, } @book{noauthor_national_nodate, title = {National {Contact} {Points} for {Horizon} 2020}, url = {https://ec.europa.eu/info/funding-tenders/opportunities/portal/screen/support/ncp}, note = {Publication Title: https://ec.europa.eu}, file = {Funding & tenders:C\:\\Users\\carst\\Zotero\\storage\\W8DC2D8G\\ncp.html:text/html}, } @techreport{noauthor_guidelines_2017, type = {Guideline}, title = {Guidelines on: {Implementation} of {Open} {Access} to {Scientific} {Publications} and {Research} {Data}: in projects supported by the {European} {Research} {Council} under {Horizon} 2020}, url = {https://ec.europa.eu/research/participants/data/ref/h2020/other/hi/oa-pilot/h2020-hi-erc-oa-guide_en.pdf}, language = {en}, number = {Version 1.1}, institution = {European Research Council (ERC)}, month = apr, year = {2017}, pages = {7}, file = {Guidelines on Implementation of Open Access to Sc.pdf:C\:\\Users\\carst\\Zotero\\storage\\WJGBJ2DC\\Guidelines on Implementation of Open Access to Sc.pdf:application/pdf}, } @techreport{noauthor_open_2019, title = {Open {Research} {Data} and {Data} {Management} {Plans}: {Information} for {ERC} grantees: by the {ERC} {Scientific} {Council}}, url = {https://erc.europa.eu/sites/default/files/document/file/ERC_info_document-Open_Research_Data_and_Data_Management_Plans.pdf}, language = {en}, number = {Version 3.1}, institution = {European Research Council (ERC)}, month = jul, year = {2019}, pages = {19}, file = {2019 - Open Research Data and Data Management Plans Info.pdf:C\:\\Users\\carst\\Zotero\\storage\\45MUZSXP\\2019 - Open Research Data and Data Management Plans Info.pdf:application/pdf}, } @book{noauthor_open_2017, title = {Open {Access}}, url = {https://erc.europa.eu/managing-project/open-access}, abstract = {Since its creation, the ERC has been supporting the principle of open access to the published output of research as a fundamental part of its mission. It also promotes the basic principle of open access to research data. This page provides an overview of the rules related to open access to publications and research data management that apply to ERC grants. It also includes links to important open access repositories for publications and research data, and to useful registries and directories.}, language = {en}, month = feb, year = {2017}, note = {Publication Title: ERC: European Research Council}, file = {Snapshot:C\:\\Users\\carst\\Zotero\\storage\\VPLR6E4H\\open-access.html:text/html}, } @book{noauthor_fairsharing_nodate, title = {{FAIRsharing} policies: {A} catalogue of data preservation, management and sharing policies from international funding agencies, regulators and journals}, url = {https://fairsharing.org/policies/}, note = {Publication Title: https://fairsharing.org}, file = {FAIRsharing:C\:\\Users\\carst\\Zotero\\storage\\CSQFXXSU\\policies.html:text/html}, } @book{noauthor_policies_2020, title = {Policies von {Förderorganisationen} › {Forschungsdatenmanagement} {Bayern}}, shorttitle = {Forschungsdatenmanagement {Bayern}}, url = {https://www.fdm-bayern.org/policies/policies-von-foerderorganisationen/}, abstract = {Die Website des Projektes eHumanities – interdisziplinär gibt einen Überblick über Initiativen zum Forschungsdatenmanagement in und außerhalb Bayerns.}, language = {de-DE}, month = jan, year = {2020}, note = {Publication Title: Forschungsdatenmanagement Bayern}, file = {Snapshot:C\:\\Users\\carst\\Zotero\\storage\\8SLA586K\\policies-von-foerderorganisationen.html:text/html}, } @book{noauthor_open_nodate-2, title = {Open {Access} für {Forschungsdaten}}, url = {https://www.fwf.ac.at/de/forschungsfoerderung/open-access-policy/open-access-fuer-forschungsdaten/}, language = {de}, note = {Publication Title: https://www.fwf.ac.at}, file = {Open Access für Forschungsdaten:C\:\\Users\\carst\\Zotero\\storage\\AXEJAZQB\\open-access-fuer-forschungsdaten.html:text/html}, } @book{noauthor_policy_2013, title = {Policy paper: {G8} {Open} {Data} {Charter} and {Technical} {Annex}}, url = {https://www.gov.uk/government/publications/open-data-charter/g8-open-data-charter-and-technical-annex}, language = {en}, month = jun, year = {2013}, note = {Publication Title: GOV.UK}, file = {Snapshot:C\:\\Users\\carst\\Zotero\\storage\\RVCY4T32\\g8-open-data-charter-and-technical-annex.html:text/html}, } @article{lariviere_authors_2018, title = {Do authors comply when funders enforce open access to research?}, volume = {562}, copyright = {2020 Nature}, url = {https://www.nature.com/articles/d41586-018-07101-w}, doi = {10.1038/d41586-018-07101-w}, abstract = {The first large-scale analysis of compliance with open-access rules reveals that up to one-third of articles are not free to read.}, language = {en}, number = {7728}, journal = {Nature}, author = {Larivière, Vincent and Sugimoto, Cassidy R.}, month = oct, year = {2018}, pages = {483--486}, file = {Larivière und Sugimoto - 2018 - Do authors comply when funders enforce open access.pdf:C\:\\Users\\carst\\Zotero\\storage\\FR6MA6ZK\\Larivière und Sugimoto - 2018 - Do authors comply when funders enforce open access.pdf:application/pdf}, } @article{neylon_compliance_2017, title = {Compliance {Culture} or {Culture} {Change}? {The} role of funders in improving data management and sharing practice amongst researchers}, volume = {3}, url = {https://doi.org/10.3897/rio.3.e14673}, doi = {10.3897/rio.3.e14673}, abstract = {There is a wide and growing interest in promoting Research Data Management (RDM) and Research Data Sharing (RDS) from many stakeholders in the research enterprise. Funders are under pressure from activists, from government, and from the wider public agenda towards greater transparency and access to encourage, require, and deliver improved data practices from the researchers they fund. Funders are responding to this, and to their own interest in improved practice, by developing and implementing policies on RDM and RDS. In this review we examine the state of funder policies, the process of implementation and available guidance to identify the challenges and opportunities for funders in developing policy and delivering on the aspirations for improved community practice, greater transparency and engagement, and enhanced impact. The review is divided into three parts. The first two components are based on desk research: a survey of existing policy statements drawing in part on existing surveys and a brief review of available guidance on policy development for funders. The third part addresses the experience of policy implementation through interviews with funders, policy developers, and infrastructure providers. In our review we identify, in common with other surveys, that RDM and RDS policies are increasingly common. The most developed are found amongst funders in the United States, United Kingdom, Australia, and European Union. However many other funders and nations have aspirational statements or are developing policy. There is a broad pattern of policy development moving from aspiration, to recommendations, to requirements, and finally reporting and auditing of data management practice. There are strong similarities across policies: a requirement for data management planning, often in grant submissions, expectations that data supporting published articles will be made available, and in many cases requirements for data archiving and availability over extended periods beyond grants. However there are also important differences in implementation. There is essentially no information available on the uptake and success of different policies in terms of compliance rates, or degrees of data availability. Many policies require a Data Management Plan as part of grant submission. This requirement can be enforced but there is disagreement on the value of this. One view is that requirements such as DMPs are the only way to force researchers to pay attention to these issues. The other is that such requirements lead to a culture of compliance in which the minimal effort is made and planning is seen as a “tick-box” exercise that has no further value. In this view requirements such as DMPs may actually be damaging the effort to effect culture change towards improved community practice. One way to bring these two views together is to see DMPs as living documents that form the basis of collaboration between researchers, funders, and data managers throughout the life of a research project. This approach is reflected in guidance on policy development that emphasises the importance of clarifying responsibilities of various stakeholders and ensuring that researchers are both recognised for good practice and see tangible benefits. More broadly this points to the need for the program of improving RDM and RDS to be shared project with the incentives for funders and researchers aligned as far as is possible. In the interviews successful policy implementation was often seen to be dependent on funders providing the required support, both in the form of infrastructure and resourcing, and via the provision of internal expertise amongst program managers. Where resources are limited, leveraging other support, especially from institutional sources, was seen as important as was ensuring the scope of policy requirements were commensurate with the support available and readiness of research communities. Throughout the desk research and the interviews a consistent theme is the desire for cultural change, where data management and sharing practices are embedded within the norms of behaviour for research communities. There is general agreement that progress from aspirational policies to achieving compliance is challenging and that broad cultural change, with the exception of specific communities, is a long way off. It is interesting to note that discussion of cultural change is largely externalised. There is little engagement with the concept of culture as an issue to consider or work with and very little engagement with models of how cultural change could be enabled. The disagreement over the value of DMPs is one example of how a lack of active engagement with culture and how it changes is leading to problems. Key Findings Policies on RDM and RDS are being developed by a number of agencies, primarily in the Global North. These policies are broadly consistent in aspiration and outlines but differ significantly in details of implementation. Policies generally develop along a path starting with aspirational statements, followed by recommendations, then requirements, and finally auditing and compliance measures. Measurement of policy adoption and compliance in terms of the over goals of increased availability and re-use of data is not tracked and is likely unmeasurable currently. Data Management Plans are a central requirement for many policies, in part because they can be made compulsory and act as a general focus for raising awareness. There are significant differences in the views of stakeholders on the value of Data Management Planning in its current form. Some stakeholders regard them as successful in raising awareness albeit with some limitations. Some regard them as actively damaging progress towards real change in practice by making RDM appear as one administrative activity among the many required for grant submission Successful policy implementation is coupled with funder support for infrastructure and training. Seeing RDM as an area for collaboration between funders and researchers may be valuable Internal expertise and support within a funder is often a gap which becomes a problem with monitoring and implementation DMPs can be a helpful part of process but it will be important to make them useful documents throughout and beyond the project If the object of RDM and RDS policy is cultural change in research communities then direct engagement with understanding the various cultures of researcher and other stakeholder communities, alongside frameworks of how they change is an important area for future focus.}, journal = {Research Ideas and Outcomes}, author = {Neylon, Cameron}, year = {2017}, pages = {e14673}, file = {Neylon - 2017 - Compliance Culture or Culture Change The role of .pdf:C\:\\Users\\carst\\Zotero\\storage\\C677TXWU\\Neylon - 2017 - Compliance Culture or Culture Change The role of .pdf:application/pdf}, } @article{hrynaszkiewicz_standardising_2017, title = {Standardising and {Harmonising} {Research} {Data} {Policy} in {Scholarly} {Publishing}}, volume = {12}, copyright = {Copyright (c) 2017 International Journal of Digital Curation}, issn = {1746-8256}, url = {http://www.ijdc.net/article/view/12.1.65}, doi = {10.2218/ijdc.v12i1.531}, abstract = {To address the complexities researchers face during publication, and the potential community-wide benefits of wider adoption of clear data policies, the publisher Springer Nature has developed a standardised, common framework for the research data policies of all its journals. An expert working group was convened to audit and identify common features of research data policies of the journals published by Springer Nature, where policies were present. The group then consulted with approximately 30 editors, covering all research disciplines within the organisation. The group also consulted with academic editors, librarians and funders, which informed development of the framework and the creation of supporting resources. Four types of data policy were defined in recognition that some journals and research communities are more ready than others to adopt strong data policies. As of January 2017 more than 700 journals have adopted a standard policy and this number is growing weekly. To potentially enable standardisation and harmonisation of data policy across funders, institutions, repositories, societies and other publishers, the policy framework was made available under a Creative Commons license. However, the framework requires wider debate with these stakeholders and an Interest Group within the Research Data Alliance (RDA) has been formed to initiate this process.}, language = {en}, number = {1}, journal = {International Journal of Digital Curation}, author = {Hrynaszkiewicz, Iain and Birukou, Aliaksandr and Astell, Mathias and Swaminathan, Sowmya and Kenall, Amye and Khodiyar, Varsha}, month = sep, year = {2017}, pages = {65--71}, file = {Hrynaszkiewicz et al. - 2017 - Standardising and Harmonising Research Data Policy.pdf:C\:\\Users\\carst\\Zotero\\storage\\LAP3QQL2\\Hrynaszkiewicz et al. - 2017 - Standardising and Harmonising Research Data Policy.pdf:application/pdf}, } @article{kiley_data_2017, title = {Data {Sharing} from {Clinical} {Trials} — {A} {Research} {Funder}’s {Perspective}}, volume = {377}, url = {https://doi.org/10.1056/NEJMsb1708278}, doi = {10.1056/NEJMsb1708278}, number = {20}, journal = {New England Journal of Medicine}, author = {Kiley, Robert and Peatfield, Tony and Hansen, Jennifer and Reddington, Fiona}, year = {2017}, pages = {1990--1992}, file = {Kiley et al. - 2017 - Data Sharing from Clinical Trials — A Research Fun.pdf:C\:\\Users\\carst\\Zotero\\storage\\BM7PYNX3\\Kiley et al. - 2017 - Data Sharing from Clinical Trials — A Research Fun.pdf:application/pdf}, } @article{kipphut-smith_measuring_2018, title = {Measuring {Open} {Access} {Policy} {Compliance}: {Results} of a {Survey}}, volume = {6}, issn = {2162-3309}, shorttitle = {Measuring {Open} {Access} {Policy} {Compliance}}, url = {http://jlsc-pub.org/articles/abstract/10.7710/2162-3309.2247/}, doi = {10.7710/2162-3309.2247}, abstract = {INTRODUCTION In the last decade, a significant number of institutions have adopted open access (OA) policies. Many of those working with OA policies are tasked with measuring policy compliance. This article reports on a survey of Coalition of Open Access Policy Institutions (COAPI) members designed to better understand the methods currently used for measuring and communicating OA policy success. METHODS This electronic survey was distributed to the COAPI member listserv, inviting both institutions who have passed an implemented policies and those who are still developing policies to participate. RESULTS The results to a number of questions related to topics such as policy workflows, quantitative and qualitative measurement activities and related tools, and challenges showed a wide range of responses, which are shared here. DISCUSSION It is clear that a number of COAPI members struggle with identifying what should be measured and what tools and methods are appropriate. The survey illustrates how each institution measures compliance differently, making it difficult to benchmark against peer institutions. CONCLUSION As a result of this survey, we recommend that institutions working with OA policies be as transparent as possible about their data sources and methods when calculating deposit rates and other quantitative measures. It is hoped that this transparency will result in the development of a set of qualitative and quantitative best practices for assessing OA policies that standardizes assessment terminology and articulates why institutions may want to measure policies.}, language = {eng}, number = {1}, journal = {Journal of Librarianship and Scholarly Communication}, author = {Kipphut-Smith, Shannon and Boock, Michael and Chapman, Kimberly and Hooper, Michaela Willi}, month = oct, year = {2018}, pages = {eP2247}, file = {Kipphut-Smith et al. - 2018 - Measuring Open Access Policy Compliance Results o.pdf:C\:\\Users\\carst\\Zotero\\storage\\WZCTXGLG\\Kipphut-Smith et al. - 2018 - Measuring Open Access Policy Compliance Results o.pdf:application/pdf}, } @article{kriesberg_analysis_2017, title = {An {Analysis} of {Federal} {Policy} on {Public} {Access} to {Scientific} {Research} {Data}}, volume = {16}, issn = {1683-1470}, url = {http://datascience.codata.org/article/10.5334/dsj-2017-027/}, doi = {10.5334/dsj-2017-027}, abstract = {The 2013 Office of Science and Technology Policy (OSTP) Memo on federally-funded research directed agencies with research and development budgets above \$100 million to develop and release plans to increase and broaden access to research results, both published literature and data. The agency responses have generated discussion and interest but are yet to be analyzed and compared. In this paper, we examine how 19 federal agencies responded to the memo, written by John Holdren, on issues of scientific data and the extent of their compliance to the directives outlined in the memo. We present a varied picture of the readiness of federal science agencies to comply with the memo through a comparative analysis and close reading of the contents of these responses. While some agencies, particularly those with a long history of supporting and conducting science, scored well, other responses indicate that some agencies have only taken a few steps towards implementing policies that comply with the memo. These results are of interest to the data curation community as they reveal how different agencies across the federal government approach their responsibilities for research data management, and how new policies and requirements might continue to affect scientists and research communities.}, language = {en}, journal = {Data Science Journal}, author = {Kriesberg, Adam and Huller, Kerry and Punzalan, Ricardo and Parr, Cynthia}, month = jun, year = {2017}, pages = {27}, file = {Kriesberg et al. - 2017 - An Analysis of Federal Policy on Public Access to .pdf:C\:\\Users\\carst\\Zotero\\storage\\UT4GXLS7\\Kriesberg et al. - 2017 - An Analysis of Federal Policy on Public Access to .pdf:application/pdf}, } @techreport{nicol_open_2013, title = {Open {Data} {Access} {Policies} and {Strategies} in the {European} {Research} {Area} and {Beyond}}, url = {https://www.science-metrix.com/pdf/SM_EC_OA_Data.pdf}, abstract = {This report examines policies and strategies towards open access (OA) of scientific data in the European Research Area (ERA), Brazil, Canada, Japan and the US from 2000 onwards. The analysis examines strategies that aim to foster OA scientific data—such as the types of incentives given at the researcher and institutional levels and the level of compliance by researchers and funded organisations—and also examines how, and whether, these policies are monitored and enforced. The infrastructures developed to store and share OA scientific data are also examined. The analysis is supported by findings from the literature on the global progression of OA scientific data since 2000—including its growth as a segment of scholarly publishing—as well as some of the broader trends, themes and debates that have emerged from the movement.}, language = {en}, institution = {European Commission DG Research \& Innovation}, author = {Nicol, Aurore and Caruso, Julie and Archambault, Éric}, year = {2013}, pages = {16}, file = {Nicol et al. - 2013 - Open Data Access Policies and Strategies in the Eu.pdf:C\:\\Users\\carst\\Zotero\\storage\\WR4K4UQW\\Nicol et al. - 2013 - Open Data Access Policies and Strategies in the Eu.pdf:application/pdf}, } @book{noauthor_declaration_2004, title = {Declaration on {Access} to {Research} {Data} from {Public} {Funding}}, url = {https://legalinstruments.oecd.org/en/instruments/157}, year = {2004}, file = {OECD Legal Instruments:C\:\\Users\\carst\\Zotero\\storage\\FUGI7QHW\\157.html:text/html}, } @book{noauthor_international_2015, title = {International {Open} {Data} {Charter}}, url = {https://opendatacharter.net/wp-content/uploads/2015/10/opendatacharter-charter_F.pdf}, language = {en}, year = {2015}, file = {2015 - International Open Data Charter.pdf:C\:\\Users\\carst\\Zotero\\storage\\JF2PG7KE\\2015 - International Open Data Charter.pdf:application/pdf}, } @article{putnings_im_2017, title = {Im {Netz} der {Policies}: {Beachtung} von und {Bewusstsein} für verschiedenste {Policies} bei {Forschungsprojekten}}, volume = {23}, url = {https://www.wissenschaftsmanagement.de/schwerpunkt/im-netz-der-policies}, abstract = {Wissenschaftler sind heutzutage in ein dichtes Netz von Richtlinien, Empfehlungen und Vorgaben seitens Politik, Hochschule und Drittmittelgebern eingebunden, neudeutsch oftmals mit Policies bezeichnet. Ein wirkliches Bewusstsein für diese und deren Folgen, etwa die Sanktionen bei Nichtbeachtung, fehlt jedoch vielfach}, language = {de}, number = {1}, journal = {Wissenschaftsmanagement}, author = {Putnings, Markus}, year = {2017}, pages = {34--37}, file = {Putnings - 2017 - Im Netz der Policies Beachtung von und Bewusstsei.pdf:C\:\\Users\\carst\\Zotero\\storage\\JBUU8FQZ\\Putnings - 2017 - Im Netz der Policies Beachtung von und Bewusstsei.pdf:application/pdf}, } @book{noauthor_metadata_nodate, title = {Metadata {Standards} {Directory} {Working} {Group}}, url = {http://rd-alliance.github.io/metadata-directory/}, abstract = {The RDA Metadata Standards Directory Working Group is supported by individuals and organizations involved in the development, implementation, and use of metadata for scientific data. The overriding goal is to develop a collaborative, open directory of metadata standards applicable to scientific data can help address infrastructure challenges.}, file = {Attachment:C\:\\Users\\carst\\Zotero\\storage\\68ASB3AZ\\metadata-directory.html:text/html}, } @article{sa_accountability_2013, title = {Accountability, performance assessment, and evaluation: {Policy} pressures and responses from research councils}, volume = {22}, issn = {0958-2029}, shorttitle = {Accountability, performance assessment, and evaluation}, url = {https://academic.oup.com/rev/article/22/2/105/1606677}, doi = {10.1093/reseval/rvs041}, abstract = {This study identifies contemporary government accountability requirements impacting research councils in North America and Europe and investigates how councils deal with such demands. This investigation is set against the background of rising policy frameworks stressing public sector accountability that have led many national governments to enact legislation requiring public agencies to collect more performance information and tie it to decision-making. Through documentary analysis and interviews with informants at several research councils we clarify how broader policy trends are reflected in the operation of public institutions that provide critical support for academic science. In addition to legislation cast broadly to regulate the activities of all government agencies, numerous regulations and guidelines have been targeted specifically at science and technology (S\&T) activities. Regulations on S\&T expenditures in general and on research councils more specifically include efforts to develop new metrics specific to science-based or innovation-based outcomes, to enhance the use of indicators in decision-making, to focus on tracing the broad impacts of programs, to increase the frequency of reporting, and to make agencies more responsive to business and public interests.}, language = {en}, number = {2}, journal = {Research Evaluation}, author = {Sá, Creso M. and Kretz, Andrew and Sigurdson, Kristjan}, month = jun, year = {2013}, pages = {105--117}, file = {Snapshot:C\:\\Users\\carst\\Zotero\\storage\\54HWAUJR\\1606677.html:text/html}, } @book{noauthor_213_2015, title = {2.13 {Sachkosten}: {Kosten} für die {Zugänglichmachung} von {Forschungsdaten} ({Open} {Research} {Data})}, url = {http://www.snf.ch/de/foerderung/dokumente-downloads/Seiten/reglement-ausfuehrungsreglement.aspx#ar_a_2_13}, language = {de}, month = dec, year = {2015}, note = {Publication Title: Allgemeines Ausführungsreglement zum Beitragsreglement}, file = {2015 - 2.13 Sachkosten Kosten für die Zugänglichmachung .pdf:C\:\\Users\\carst\\Zotero\\storage\\C9TQJBMF\\2015 - 2.13 Sachkosten Kosten für die Zugänglichmachung .pdf:application/pdf}, } @techreport{shearer_comprehensive_2015, title = {Comprehensive {Brief} on {Research} {Data} {Management} {Policies}}, url = {https://portagenetwork.ca/wp-content/uploads/2016/03/Comprehensive-Brief-on-Research-Data-Management-Policies-2015.pdf}, language = {en}, author = {Shearer, Kathleen}, month = apr, year = {2015}, pages = {43}, file = {Shearer - 2015 - Comprehensive Brief on Research Data Management Po.pdf:C\:\\Users\\carst\\Zotero\\storage\\JS3D3P8E\\Shearer - 2015 - Comprehensive Brief on Research Data Management Po.pdf:application/pdf}, } @book{noauthor_research_nodate, title = {Research {Data} {Support} for {Wellcome} researchers}, url = {https://www.springernature.com/gp/campaign/Wellcome-RDS-Pilot}, abstract = {If you are a researcher who receives Wellcome funding, you can access support from one of our experienced data curation experts at no cost to you. Our research data curators will assess your scientific manuscript or publication and ensure the associated data is findable, accessible and reusable. We will also provide expert curation on data files you submit, saving you time and money organising your research data. You can now submit and store files up to 500GB in size.}, language = {en}, } @book{noauthor_artikel_2015, title = {Artikel 47: {Veröffentlichung} und {Zugänglichmachung} der {Forschungsresultate}}, url = {http://www.snf.ch/de/foerderung/dokumente-downloads/Seiten/reglement-beitragsreglement.aspx#br_a_47}, language = {de}, month = feb, year = {2015}, note = {Publication Title: Beitragsreglement: Reglement des Schweizerischen Nationalfonds über die Gewährung von Beiträgen}, file = {2015 - Artikel 47 Veröffentlichung und Zugänglichmachung.pdf:C\:\\Users\\carst\\Zotero\\storage\\SGX4LC6L\\2015 - Artikel 47 Veröffentlichung und Zugänglichmachung.pdf:application/pdf}, } @book{noauthor_open_nodate-3, title = {Open {Research} {Data}}, url = {http://www.snf.ch/de/derSnf/forschungspolitische_positionen/open_research_data/Seiten/default.aspx}, abstract = {Der SNF unterstützt diesen Grundsatz. Seit Oktober 2017 ist die Einreichung eines Data Management Plans (DMP) in den meisten Förderinstrumenten obligatorisch. Ebenso erwartet der SNF, dass Daten, die während der Forschungsarbeiten produziert werden, nachfolgend auf digitalen Datenbanken öffentlich zugänglich sind, sofern dem keine rechtlichen, ethischen, urheberrechtlichen oder andere Klauseln entgegenstehen.}, language = {de}, } @techreport{tananbaum_implementing_nodate, title = {Implementing an {Open} {Data} {Policy}: {A} {Primer} for {Research} {Funders}}, url = {https://sparcopen.org/wp-content/uploads/2016/01/sparc-open-data-primer-final.pdf}, language = {en}, institution = {Scholarly Publishing and Academic Resources Coalition (SPARC)}, author = {Tananbaum, Greg}, pages = {6}, file = {Tananbaum - Implementing an Open Data Policy A Primer for Res.pdf:C\:\\Users\\carst\\Zotero\\storage\\4JHDUVUK\\Tananbaum - Implementing an Open Data Policy A Primer for Res.pdf:application/pdf}, } @techreport{noauthor_information_2018, title = {Information {Open} {Access} – {Open} {Data} – {Open} {Source}}, url = {https://www.volkswagenstiftung.de/sites/default/files/downloads/OpenAccessOpenDataOpenSource-Hinweise_10_2018.pdf}, abstract = {Der gesamtgesellschaftliche Umbruch durch die Informationstechnologie ist ohne Zweifel als Paradigmenwechsel zu bezeichnen, der alle Bereiche der Gesellschaftumfasst – auch die Wissenschaft, die ihrerseits wieder über die zunehmend digitale Gesellschaft und Kultur forscht. Innerhalb der Wissenschaft ist von dem Paradig-menwechsel neben dem Forschungsgegenstand auch der Forschungsprozess betroffen, in dessen Verlauf immer mehr Forschungsdaten generiert bzw. ausge-wertet werden mit immer komplexeren Analysetools. Parallel dazu verändert sich auch die Wissenschaftskommunikation, die neue digitale Medien und Infrastrukturen zur Distribution von Forschungsergebnissen bietet. Die VolkswagenStiftung möchte in ihrer Förderung die mit diesem Paradigmenwechsel verbundenen Chancen unter-stützen, wohl wissend, dass sich die Wissenschaft wie die Gesellschaft derzeit im Übergang in ein neues digitales Medienzeitalter befindet. Bei einer Vielzahl von technischen, rechtlichen und institutionellen Rahmenbedingungen steht noch eine Klärung aus.}, language = {de}, institution = {Volkswagen Stiftung}, year = {2018}, pages = {[2]}, file = {2018 - Information Open Access – Open Data – Open Source.pdf:C\:\\Users\\carst\\Zotero\\storage\\UIWTGP4N\\2018 - Information Open Access – Open Data – Open Source.pdf:application/pdf}, } @article{zuiderwijk_special_2014, title = {Special {Issue} on {Transparency} and {Open} {Data} {Policies}: {Guest} {Editors}' {Introduction}}, volume = {9}, issn = {0718-1876}, url = {http://www.scielo.cl/scielo.php?script=sci_arttext&pid=S0718-18762014000300001&nrm=iso}, language = {en}, journal = {Journal of theoretical and applied electronic commerce research}, author = {Zuiderwijk, Anneke and Gaseó, Mila and Parycek, Peter and Janssen, Marijn}, year = {2014}, pages = {I -- IX}, file = {Zuiderwijk et al. - 2014 - Special Issue on Transparency and Open Data Polici.pdf:C\:\\Users\\carst\\Zotero\\storage\\C7DCANWH\\Zuiderwijk et al. - 2014 - Special Issue on Transparency and Open Data Polici.pdf:application/pdf}, } @incollection{jones_research_2012, address = {London}, title = {Research data policies: {Principles}, requirements and trends}, isbn = {978-1-85604-756-2}, abstract = {This title defines what is required to achieve a culture of effective data management offering advice on the skills required, legal and contractual obligations, strategies and management plans and the data management infrastructure of specialists and services. Data management has become an essential requirement for information professionals over the last decade, particularly for those supporting the higher education research community, as more and more digital information is created and stored. As budgets shrink and funders of research demand evidence of value for money and demonstrable benefits for society, there is increasing pressure to provide plans for the sustainable management of data. Ensuring that important data remains discoverable, accessible and intelligible and is shared as part of a larger web of knowledge will mean that research has a life beyond its initial purpose and can offer real utility to the wider community. This edited collection, bringing together leading figures in the field from the UK and around the world, provides an introduction to all the key data issues facing the HE and information management communities. Each chapter covers a critical element of data management: • Why manage research data? • The lifecycle of data management • Research data policies: principles, requirements and trends • Sustainable research data • Data management plans and planning • Roles and responsibilities – libraries, librarians and data • Research data management: opportunities and challenges for HEIs • The national data centres • Contrasting national research data strategies: Australia and the USA • Emerging infrastructure and services for research data management and curation in the UK and Europe Readership: This is essential reading for librarians and information professionals working in the higher education sector, the research community, policy makers and university managers. It will also be a useful introduction for students taking courses in information management, archivists and national library services.}, language = {en}, booktitle = {Managing {Research} {Data}}, publisher = {Facet Publishing}, author = {Jones, Sarah}, year = {2012}, pages = {47--66}, } @article{wykstra_funder_2017, title = {Funder {Data}-{Sharing} {Policies}: {Overview} and {Recommendations}}, copyright = {Creative Commons Attribution 4.0 International}, shorttitle = {Funder {Data}-{Sharing} {Policies}}, url = {https://figshare.com/articles/journal_contribution/Funder_Data-Sharing_Policies_Overview_and_Recommendations/5395456/2}, doi = {10.6084/M9.FIGSHARE.5395456.V2}, abstract = {This report covers funder data-sharing policies/practices, and provides recommendations to funders and others as they consider their own policies. It was commissioned by Robert Wood Johnson Foundation in 2017. If any comments or questions, please contact Stephanie Wykstra (stephanie.wykstra@gmail.com).}, author = {Wykstra, Stephanie}, year = {2017}, pages = {1486388 Bytes}, file = {Wykstra - 2017 - Funder Data-Sharing Policies Overview and Recomme.pdf:C\:\\Users\\carst\\Zotero\\storage\\PW3JLQW9\\Wykstra - 2017 - Funder Data-Sharing Policies Overview and Recomme.pdf:application/pdf}, } @book{hey_fourth_2009, title = {The {Fourth} {Paradigm}: {Data}-{Intensive} {Scientific} {Discovery}}, isbn = {978-0-9825442-0-4}, url = {https://www.microsoft.com/en-us/research/publication/fourth-paradigm-data-intensive-scientific-discovery/}, abstract = {Increasingly, scientific breakthroughs will be powered by advanced computing capabilities that help researchers manipulate and explore massive datasets. The speed at which any given scientific discipline advances will depend on how well its researchers collaborate with one another, and with technologists, in areas of eScience such as databases, workflow management, visualization, and cloud computing technologies. In The Fourth Paradigm: Data-Intensive Scientific Discovery, the collection of essays expands on the vision of pioneering computer scientist Jim Gray for a new, fourth paradigm of discovery based on data-intensive science and offers insights into how it can be fully realized. Critical praise for The Fourth Paradigm “The individual essays—and The Fourth Paradigm as a whole—give readers a glimpse of the horizon for 21st-century research and, at their best, a peek at what lies beyond. It’s a journey well worth taking.” — James P. Collins School of Life Sciences, Arizona State University Purchase from Amazon Paperback Kindle version From the back cover “The impact of Jim Gray’s thinking is continuing to get people to think in a new way about how data and software are redefining what it means to do science." — Bill Gates, Chairman, Microsoft Corporation “I often tell people working in eScience that they aren’t in this field because they are visionaries or super-intelligent—it’s because they care about science and they are alive now. It is about technology changing the world, and science taking advantage of it, to do more and do better.” — Rhys Francis, Australian eResearch Infrastructure Council “One of the greatest challenges for 21st-century science is how we respond to this new era of data-intensive science. This is recognized as a new paradigm beyond experimental and theoretical research and computer simulations of natural phenomena—one that requires new tools, techniques, and ways of working.” — Douglas Kell, University of Manchester “The contributing authors in this volume have done an extraordinary job of helping to refine an understanding of this new paradigm from a variety of disciplinary perspectives.” — Gordon Bell, Microsoft Research Microsoft Research is honored to provide initial website hosting for this book launch.}, publisher = {Microsoft Research}, author = {Hey, Tony and Tansley, Stewart and Tolle, Kristin}, month = oct, year = {2009}, file = {Hey et al. - 2009 - The fourth paradigm data-intensive scientific dis.pdf:C\:\\Users\\carst\\Zotero\\storage\\MCZ5A99X\\Hey et al. - 2009 - The fourth paradigm data-intensive scientific dis.pdf:application/pdf}, } @techreport{noauthor_bund-lander-vereinbarung_2018-1, title = {Bund-{Länder}-{Vereinbarung} zu {Aufbau} und {Förderung} einer {Nationalen} {Forschungsdateninfrastruktur} ({NFDI}) vom 26. {November} 2018}, url = {https://www.gwk-bonn.de/fileadmin/Redaktion/Dokumente/Papers/NFDI.pdf}, language = {de-DE}, month = nov, year = {2018}, pages = {5}, file = {2018 - Bund-Länder-Vereinbarung zu Aufbau und Förderung e.pdf:C\:\\Users\\carst\\Zotero\\storage\\GN82HFJ3\\2018 - Bund-Länder-Vereinbarung zu Aufbau und Förderung e.pdf:application/pdf}, } @techreport{noauthor_council_2016, address = {Brussels}, type = {outcome of proceedings}, title = {Council conclusions on the transition towards an {Open} {Science} system}, url = {https://data.consilium.europa.eu/doc/document/ST-9526-2016-INIT/en/pdf}, language = {en}, institution = {Council of the European Union}, month = may, year = {2016}, pages = {10}, file = {2016 - Council conclusions on the transition towards an O.pdf:C\:\\Users\\carst\\Zotero\\storage\\8PCUZXGQ\\2016 - Council conclusions on the transition towards an O.pdf:application/pdf}, } @book{commission_realising_2016, address = {Luxembourg}, title = {Realising the {European} open science cloud: first report and recommendations of the {Commission} high level expert group on the {European} open science cloud.}, isbn = {10.2777/940154}, shorttitle = {Realising the {European} open science cloud}, url = {https://data.europa.eu/doi/10.2777/940154}, language = {en}, editor = {Commission), Directorate-General for Research {and} Innovation (European}, year = {2016}, } @techreport{commission_setting_2018, address = {Brussels}, type = {Commission {Decision}}, title = {Setting up the {Expert} {Group} -{Executive} {Board} of the {European} {Open} {Science} {Cloud} (‘{EOSC}’) and laying down rules for its financing}, url = {https://www.eosc-portal.eu/sites/default/files/C20185552-EC-DECISION-EOSC-Excecutive-Board.pdf}, language = {en}, institution = {European Commission}, author = {Commission, European}, month = aug, year = {2018}, pages = {10}, file = {2018 - Setting up the Expert Group -Executive Board of th.pdf:C\:\\Users\\carst\\Zotero\\storage\\TC8WXWZ2\\2018 - Setting up the Expert Group -Executive Board of th.pdf:application/pdf}, } @book{noauthor_rfii-stellungnahme_2018, title = {{RfII}-{Stellungnahme} zur {European} {Open} {Science} {Cloud} – {April} 2018}, copyright = {(CC BY-ND)}, url = {http://www.rfii.de/?p=2788}, language = {de}, publisher = {Rat für Informationsinfrastrukturen (RfII)}, month = apr, year = {2018}, file = {2018 - RfII-Stellungnahme zur European Open Science Cloud.pdf:C\:\\Users\\carst\\Zotero\\storage\\DTXC8B4C\\2018 - RfII-Stellungnahme zur European Open Science Cloud.pdf:application/pdf}, } @techreport{commission_digital_2015, address = {Brussels}, type = {Communication from the {Commission} to the {European} {Parliament}, the {Council}, the {European} {Economic} and {Social} {Committee} and the {Committee} of the {Regions}}, title = {A {Digital} {Single} {Market} {Strategy} for {Europe}}, url = {https://eur-lex.europa.eu/legal-content/EN/TXT/?uri=COM:2015:192:FIN}, institution = {European Commission}, author = {Commission, European}, month = may, year = {2015}, pages = {20}, file = {2015 - A Digital Single Market Strategy for Europe.pdf:C\:\\Users\\carst\\Zotero\\storage\\6G3R4HSD\\2015 - A Digital Single Market Strategy for Europe.pdf:application/pdf}, } @techreport{noauthor_riding_2010, type = {Final report of the {High} {Level} {Expert} {Group} on {Scientific} {Data}: {A} submission to the {European} {Commission}}, title = {Riding the wave: {How} {Europe} can gain from the rising tide of scientific data}, url = {file:///C:/Users/carst/Downloads/Ridingthewave-HowEuropecangainfromtherisingtideofscientificdata-FinalreportoftheHighLevelExpertGrouponScientificData-October2010.pdf}, language = {en}, year = {2010}, pages = {36}, file = {2010 - Riding the wave How Europe can gain from the risi.pdf:C\:\\Users\\carst\\Zotero\\storage\\YPRPCTVH\\2010 - Riding the wave How Europe can gain from the risi.pdf:application/pdf}, } @book{jung_helmholtz_2017, title = {Helmholtz {Portfolio} {Theme} {Large}-{Scale} {Data} {Management} and {Analysis} ({LSDMA})}, isbn = {978-3-7315-0695-9}, abstract = {The Helmholtz Association funded the "Large-Scale Data Management and Analysis" portfolio theme from 2012-2016. Four Helmholtz centres, six universities and another research institution in Germany joined to enable data-intensive science by optimising data life cycles in selected scientific communities. In our Data Life cycle Labs, data experts performed joint R\&D together with scientific communities. The Data Services Integration Team focused on generic solutions applied by several communities.}, language = {en}, publisher = {KIT Scientific Publishing, Karlsruhe}, editor = {Jung, Christopher and Meyer, Jörg and Streit, Achim}, year = {2017}, doi = {10.5445/KSP/1000071931}, file = {Jung et al. - 2017 - Helmholtz Portfolio Theme Large-Scale Data Managem.pdf:C\:\\Users\\carst\\Zotero\\storage\\L5QFTN8I\\Jung et al. - 2017 - Helmholtz Portfolio Theme Large-Scale Data Managem.pdf:application/pdf}, } @techreport{koski_partnership_2009, type = {White {Paper}}, title = {Partnership for {Accessing} {Data} in {EuropeStrategy} for a {European} {Data} {Infrastructure}}, url = {https://ec.europa.eu/eurostat/cros/system/files/parade-white-paper.pdf}, language = {en}, author = {Koski, Kimmo and Gheller, Claudio and Heinzel, Stefan and Kennedy, Alison and Streit, Achim and Wittenburg, Peter}, month = sep, year = {2009}, pages = {29}, file = {Koski et al. - 2009 - Partnership for Accessing Data in EuropeStrategy f.pdf:C\:\\Users\\carst\\Zotero\\storage\\N237WH6J\\Koski et al. - 2009 - Partnership for Accessing Data in EuropeStrategy f.pdf:application/pdf}, } @techreport{commission_implementation_2018, address = {Brussels}, type = {Commission {Staff} {Working} {Document}}, title = {Implementation {Roadmap} for the {European} {Open} {Science} {Cloud}}, url = {https://ec.europa.eu/transparency/regdoc/rep/10102/2018/EN/SWD-2018-83-F1-EN-MAIN-PART-1.PDF}, language = {en}, institution = {European Commission}, author = {Commission, European}, month = mar, year = {2018}, pages = {34}, file = {European Commission - 2018 - Implementation Roadmap for the European Open Scien.PDF:C\:\\Users\\carst\\Zotero\\storage\\ZVCF3UFJ\\European Commission - 2018 - Implementation Roadmap for the European Open Scien.PDF:application/pdf}, } @book{commission_prompting_2018, address = {Luxembourg}, title = {Prompting an {EOSC} in practice: final report and recommendations of the {Commission} 2nd {High} {Level} {Expert} {Group} on the {European} {Open} {Science} {Cloud} ({EOSC}), 2018.}, isbn = {978-92-79-94836-7}, shorttitle = {Prompting an {EOSC} in practice}, url = {https://data.europa.eu/doi/10.2777/112658}, language = {en}, publisher = {Publications Office of the European Union}, editor = {Commission), Directorate-General for Research {and} Innovation (European}, year = {2018}, doi = {10.2777/112658}, file = {Directorate-General for Research and Innovation (European Commission) - 2018 - Prompting an EOSC in practice final report and re.pdf:C\:\\Users\\carst\\Zotero\\storage\\62R8GF3R\\Directorate-General for Research and Innovation (European Commission) - 2018 - Prompting an EOSC in practice final report and re.pdf:application/pdf}, } @article{budroni_architectures_2019, title = {Architectures of {Knowledge}: {The} {European} {Open} {Science} {Cloud}}, volume = {39}, issn = {2191-4664, 0720-6763}, shorttitle = {Architectures of {Knowledge}}, url = {https://www.degruyter.com/view/journals/abitech/39/2/article-p130.xml}, doi = {10.1515/abitech-2019-2006}, language = {de}, number = {2}, journal = {ABI Technik}, author = {Budroni, Paolo and Claude-Burgelman, Jean and Schouppe, Michel}, month = jul, year = {2019}, pages = {130--141}, file = {Budroni et al. - 2019 - Architectures of Knowledge The European Open Scie.pdf:C\:\\Users\\carst\\Zotero\\storage\\FNJXTBY5\\Budroni et al. - 2019 - Architectures of Knowledge The European Open Scie.pdf:application/pdf}, } @book{wittenburg_fair_2019, address = {Beijing}, title = {{FAIR} {Digital} {Objects} – {Implementing} {FAIR} {Principles}}, url = {http://codata2019.csp.escience.cn/dct/page/70006}, author = {Wittenburg, Peter and Liu, Jia}, month = sep, year = {2019}, note = {Type: Lightning Talks}, } @article{heidorn_shedding_2008, title = {Shedding {Light} on the {Dark} {Data} in the {Long} {Tail} of {Science}}, volume = {57}, copyright = {Copyright 2008 Board of Trustees of the University of Illinois}, issn = {0024-2594}, url = {https://www.ideals.illinois.edu/handle/2142/10672}, abstract = {One of the primary outputs of the scientific enterprise is data, but many institutions such as libraries that are charged with preserving and disseminating scholarly output have largely ignored this form of documentation of scholarly activity. This paper focuses on a particularly troublesome class of data, termed dark data. “Dark data” is not carefully indexed and stored so it becomes nearly invisible to scientists and other potential users and therefore is more likely to remain underutilized and eventually lost. The article discusses how the concepts from long-tail economics can be used to understand potential solutions for better curation of this data. The paper describes why this data is critical to scientific progress, some of the properties of this data, as well as some social and technical barriers to proper management of this class of data. Many potentially useful institutional, social, and technical solutions are under development and are introduced in the last sections of the paper, but these solutions are largely unproven and require additional research and development.}, language = {en}, number = {2}, journal = {Library Trends}, author = {Heidorn, P. Bryan}, year = {2008}, pages = {280--299}, file = {Snapshot:C\:\\Users\\carst\\Zotero\\storage\\VC5EAMRU\\10672.html:text/html;Heidorn - 2008 - Shedding Light on the Dark Data in the Long Tail o.pdf:C\:\\Users\\carst\\Zotero\\storage\\KTGBJGE6\\Heidorn - 2008 - Shedding Light on the Dark Data in the Long Tail o.pdf:application/pdf}, } @book{harari_homo_2017, address = {München}, edition = {1}, title = {Homo deus: eine {Geschichte} von {Morgen}}, isbn = {978-3-406-70401-7 978-3-406-70402-4}, shorttitle = {Homo deus}, language = {de}, publisher = {C.H. Beck}, author = {Harari, Yuval Noaḥ}, translator = {Wirthensohn, Andreas}, year = {2017}, } @book{ghosh_googles_2015, title = {Google's {Vint} {Cerf} warns of 'digital {Dark} {Age}'}, url = {https://www.bbc.com/news/science-environment-31450389}, abstract = {Vint Cerf, a "father of the internet", says a way must be found to stop all our images and documents being lost through technological obsolescence.}, language = {en-GB}, author = {Ghosh, Pallab}, month = feb, year = {2015}, note = {Publication Title: BBC News}, file = {Snapshot:C\:\\Users\\carst\\Zotero\\storage\\H8KFZ3CU\\science-environment-31450389.html:text/html}, } @book{borgman_data_2014, address = {Amsterdam}, title = {Data, {Data}, {Everywhere}, {Nor} {Any} {Drop} to {Drink}.}, copyright = {License: CC Attribution-NonCommercial License}, url = {https://www.slideshare.net/ResearchDataAlliance/christine-borgman-keynote}, language = {en-US}, author = {Borgman, Christine L.}, month = sep, year = {2014}, note = {Type: Kexnote presentation}, } @book{noauthor_take_nodate, title = {Take {Action} for the {Sustainable} {Development} {Goals}}, url = {https://www.un.org/sustainabledevelopment/sustainable-development-goals/}, abstract = {The Sustainable Development Goals are the blueprint to achieve a better and more sustainable future for all. They address the global challenges we face, including poverty, inequality, climate change, environmental degradation, peace and justice. Learn more and take action. Watch the global broadcast ‘Nations United” on 19 September at 9 a.m. EDT}, language = {en-US}, note = {Publication Title: United Nations Sustainable Development}, file = {Snapshot:C\:\\Users\\carst\\Zotero\\storage\\EKEW5FHJ\\sustainable-development-goals.html:text/html}, } @inproceedings{noauthor_moving_2019, address = {Paris}, title = {Moving {Forward} on {Data} {Infrastructure} {Technology} {Convergence}}, url = {https://github.com/GEDE-RDA-Europe/GEDE/tree/master/FAIR%20Digital%20Objects/Paris-FDO-workshop}, year = {2019}, file = {Snapshot:C\:\\Users\\carst\\Zotero\\storage\\6VW6J3VC\\Paris-FDO-workshop.html:text/html}, } @book{noauthor_e-science_nodate, title = {E-{Science}}, url = {https://mwk.baden-wuerttemberg.de/de/forschung/forschungslandschaft/e-science/}, language = {de}, note = {Publication Title: Baden-Württemberg.de}, file = {Snapshot:C\:\\Users\\carst\\Zotero\\storage\\4ISWVP3M\\e-science.html:text/html;E-Science.pdf:C\:\\Users\\carst\\Zotero\\storage\\X6Y4UY9I\\E-Science.pdf:application/pdf}, } @book{noauthor_digitalisierung_nodate, title = {Digitalisierung}, url = {https://mwfk.brandenburg.de/mwfk/de/wissenschaft/digitalisierung/}, note = {Publication Title: https://mwfk.brandenburg.de}, file = {Digitalisierung | Ministerium für Wissenschaft, Forschung und Kultur:C\:\\Users\\carst\\Zotero\\storage\\H4KUHTHA\\digitalisierung.html:text/html}, } @techreport{noauthor_global_nodate, title = {A {Global} {Roadmap} for {Health} {Informatics} {StandardizationProposal} prepared by the {World} {Economic} {Forum}, in collaboration with {Boston} {Consulting} {Group}}, file = {A Global Roadmap for Health Informatics Standardiz.pdf:C\:\\Users\\carst\\Zotero\\storage\\58NBKPB6\\A Global Roadmap for Health Informatics Standardiz.pdf:application/pdf}, } @book{noauthor_vier_2019, title = {Vier {Science} {Data} {Centers} in {Baden}-{Württemberg}}, url = {https://mwk.baden-wuerttemberg.de/de/service/presse/pressemitteilung/pid/vier-science-data-centers-in-baden-wuerttemberg/}, abstract = {Der systematische Zugang zu digitalen Datenbeständen wird für neue wissenschaftliche Erkenntnisse und damit für Innovationen und Technologietransfer immer wichtiger. Zukunftsfelder wie Maschinelles Lernen oder Künstliche Intelligenz sind auf entsprechende Datengrundlagen angewiesen. Die reine...}, language = {de}, month = feb, year = {2019}, note = {Publication Title: Baden-Württemberg.de}, file = {2019 - Vier Science Data Centers in Baden-Württemberg.pdf:C\:\\Users\\carst\\Zotero\\storage\\RJIFDFMS\\2019 - Vier Science Data Centers in Baden-Württemberg.pdf:application/pdf;Snapshot:C\:\\Users\\carst\\Zotero\\storage\\6GZF98RY\\vier-science-data-centers-in-baden-wuerttemberg.html:text/html}, } @book{koureas_digital_2018, address = {Brussels}, title = {Digital {Objects}: {The} {Science} {Case}}, url = {https://github.com/GEDE-RDA-Europe/GEDE/blob/master/Digital-Objects/DO-Workshops/workshop-September-18/6-koureas-intro-talk.pdf}, author = {Koureas, Dimitris}, year = {2018}, file = {Koureas - 2018 - Digital Objects The Science Case.pdf:C\:\\Users\\carst\\Zotero\\storage\\4KSGF5ZJ\\Koureas - 2018 - Digital Objects The Science Case.pdf:application/pdf}, } @article{kahn_framework_2006, title = {A framework for distributed digital object services}, volume = {6}, issn = {1432-1300}, url = {https://doi.org/10.1007/s00799-005-0128-x}, doi = {10.1007/s00799-005-0128-x}, language = {en}, number = {2}, journal = {International Journal on Digital Libraries}, author = {Kahn, Robert and Wilensky, Robert}, month = mar, year = {2006}, pages = {115--123}, file = {Kahn und Wilensky - 2006 - A framework for distributed digital object service.pdf:C\:\\Users\\carst\\Zotero\\storage\\C7SGZ277\\Kahn und Wilensky - 2006 - A framework for distributed digital object service.pdf:application/pdf}, } @article{mons_nano-publication_2009, title = {Nano-{Publication} in the e-science era}, volume = {523}, abstract = {The rate of data production in the Life Sciences has now reached such proportions that to consider it irresponsible to fund data generation without proper concomitant funding and infrastructure for storing, analyzing and exchanging the information and knowledge contained in, and extracted from, those data, is not an exaggerated position any longer. The chasm between data production and data handling has become so wide, that many data go unnoticed or at least run the risk of relative obscurity, fail to reveal the information contained in the data set or remains inaccessible due to ambiguity, or financial or legal toll-barriers. As a result, inconsistency, ambiguity and redundancy of data and information on the Web are becoming impediments to the performance of comprehensive information extraction and analysis. This paper attempts a stepwise explanation of the use of richly annotated RDF-statements as carriers of unambiguous, meta-analyzed information in the form of traceable nano-publications.}, journal = {Workshop on Semantic Web Applications in Scientific Discourse (SWASD 2009)}, author = {Mons, Barend and Velterop, Jan}, year = {2009}, file = {Mons und Velterop - 2009 - Nano-Publication in the e-science era.pdf:C\:\\Users\\carst\\Zotero\\storage\\U9JD26RQ\\Mons und Velterop - 2009 - Nano-Publication in the e-science era.pdf:application/pdf}, } @book{kraft_fair_2017, title = {Die {FAIR} {Data} {Prinzipien} für {Forschungsdaten}}, url = {https://blogs.tib.eu/wp/tib/2017/09/12/die-fair-data-prinzipien-fuer-forschungsdaten/}, abstract = {Wissenschaftliche Daten sollen nach den FAIR Data Prinzipien auffindbar, zugänglich, interoperabel und wiederverwendbar sein. Unsere ausführliche Checkliste gibt Handlungsempfehlungen für Wissenschaftlerinnen und Wissenschaftler und Betreiber von Informationsinfrastrukturen.}, language = {de-DE}, author = {Kraft, Angelina}, month = sep, year = {2017}, note = {Publication Title: TIB-Blog}, file = {Snapshot:C\:\\Users\\carst\\Zotero\\storage\\6FILD9R7\\die-fair-data-prinzipien-fuer-forschungsdaten.html:text/html}, } @book{kahn_framework_1995, title = {A framework for distributed digital object services}, url = {https://www.cnri.reston.va.us/home/cstr/arch/k-w.html}, language = {en}, author = {Kahn, Robert and Wilensky, Robert}, month = may, year = {1995}, } @incollection{van_loenen_open_2018, address = {The Hague}, series = {Information {Technology} and {Law} {Series}}, title = {Open {Data} {Exposed}}, isbn = {978-94-6265-261-3}, url = {https://doi.org/10.1007/978-94-6265-261-3_1}, abstract = {This book is about open data, i.e. data that does not have any barriers in the (re)use. Open data aims to optimize access, sharing and using data from a technical, legal, financial, and intellectual perspective. Data increasingly determines the way people live their lives today. Nowadays, we cannot imagine a life without real-time traffic information about our route to work, information of the daily news or information about the local weather. At the same time, citizens themselves now are constantly generating and sharing data and information via many different devices and social media systems. Especially for governments, collection, management, exchange, and use of data and information have always been key tasks, since data is both the primary input to and output of government activities. Also for businesses, non-profit organizations, researchers and various other actors, data and information are essential.}, language = {en}, booktitle = {Open {Data} {Exposed}}, publisher = {T.M.C. Asser Press}, author = {van Loenen, Bastiaan and Vancauwenberghe, Glenn and Crompvoets, Joep and Dalla Corte, Lorenzo}, editor = {van Loenen, Bastiaan and Vancauwenberghe, Glenn and Crompvoets, Joep}, year = {2018}, doi = {10.1007/978-94-6265-261-3_1}, pages = {1--10}, } @incollection{verma_open_2018, address = {Singapore}, series = {Studies in {Big} {Data}}, title = {Open {Data} {Infrastructure} for {Research} and {Development}}, isbn = {978-981-10-7515-5}, url = {https://doi.org/10.1007/978-981-10-7515-5_2}, abstract = {Open data are the idea originated from philosophy that certain data should be freely available for everyone to use, reuse, and republish as they wish, without restrictions from copyright, patents or other mechanisms of control. The intent of the open data movement is on the same lines as that of other “open” movements such as open source, open content, and open access. Open data have caught attention of research community, government, industry across the world, due to its huge potential to bring constructive changes in the socioeconomic and scientific domain by developing and disseminating information within a vibrant mixed ecosystem comprising of research and developer community, government bodies, business houses, and hybrid solutions of various forms fueled by the sharp elevation of information and communications technologies (ICT) and digital governance. Open data along with data analytics service give a huge opportunity for development and innovation in citizen service delivery. Open data have also formed a part of core strategy of businesses across the world, big or small, digital or non-digital. Research community has also highlighted the potential of data for research and development. Reuse of research data can enable its analysis from a different perspective; processing the data in combination with other related datasets may provide a different perspective of the research findings.}, language = {en}, booktitle = {Data {Science} {Landscape}: {Towards} {Research} {Standards} and {Protocols}}, publisher = {Springer}, author = {Verma, Neeta and Gupta, M. P. and Biswas, Shubhadip}, editor = {Munshi, Usha Mujoo and Verma, Neeta}, year = {2018}, doi = {10.1007/978-981-10-7515-5_2}, pages = {33--43}, } @phdthesis{stvilia_measuring_2006, type = {Dissertation}, title = {Measuring information quality}, url = {https://www.researchgate.net/publication/34172596_Measuring_information_quality}, school = {University of Illinois}, author = {Stvilia, Besiki}, year = {2006}, } @article{stvilia_framework_2007, title = {A framework for information quality assessment}, volume = {58}, copyright = {Copyright © 2007 Wiley Periodicals, Inc., A Wiley Company}, issn = {1532-2890}, url = {https://onlinelibrary.wiley.com/doi/abs/10.1002/asi.20652}, doi = {10.1002/asi.20652}, abstract = {One cannot manage information quality (IQ) without first being able to measure it meaningfully and establishing a causal connection between the source of IQ change, the IQ problem types, the types of activities affected, and their implications. In this article we propose a general IQ assessment framework. In contrast to context-specific IQ assessment models, which usually focus on a few variables determined by local needs, our framework consists of comprehensive typologies of IQ problems, related activities, and a taxonomy of IQ dimensions organized in a systematic way based on sound theories and practices. The framework can be used as a knowledge resource and as a guide for developing IQ measurement models for many different settings. The framework was validated and refined by developing specific IQ measurement models for two large-scale collections of two large classes of information objects: Simple Dublin Core records and online encyclopedia articles.}, language = {en}, number = {12}, journal = {Journal of the American Society for Information Science and Technology}, author = {Stvilia, Besiki and Gasser, Les and Twidale, Michael B. and Smith, Linda C.}, year = {2007}, pages = {1720--1733}, } @article{apel_offene_2018, title = {Offene {Forschungsdaten} an der {Universität} {Heidelberg}: von generischen institutionellen {Repositorien} zu fach- und projektspezifischen {Diensten}}, volume = {5}, issn = {2363-9814}, shorttitle = {Offene {Forschungsdaten} an der {Universität} {Heidelberg}}, url = {https://www.o-bib.de/article/view/2018H2S61-71}, doi = {10.5282/o-bib/2018H2S61-71}, abstract = {Die Universität Heidelberg hat 2014 das Kompetenzzentrum Forschungsdaten als gemeinsame Serviceeinrichtung der Universitätsbibliothek und des Universitätsrechenzentrums eingerichtet. Der vorliegende Beitrag stellt die Angebote des Kompetenzzentrums zur Publikation von Forschungsdaten vor, fasst bisherige Erfahrungen zusammen und diskutiert auf dieser Grundlage exemplarisch die Rolle von institutionellen Veröffentlichungsplattformen für Open Research Data. Im Einzelnen werden dabei das institutionelle Datenrepositorium heiDATA, die Bild- und Multimediadatenbank heidICON sowie die derzeitige Weiterentwicklung des Dienstleistungsportfolios des Kompetenzzentrums im Rahmen des Projekts „Community-spezifische Forschungsdatenpublikation (CS-FDP)“ vorgestellt. In 2014 Heidelberg University established the Competence Centre for Research Data as a joint facility of the University Library and the university’s Computing Centre. This article describes the Competence Centre’s services for publishing research data and examines on that basis the role of institutional publication platforms for open research data. In particular the paper discusses the institutional research data repository heiDATA, the image and multimedia database heidICON and the current refinement of the Competence Centre’s service portfolio within the project “Community Specific Research Data Publication (CS-FDP)”.}, language = {de}, number = {2}, journal = {o-bib. Das offene Bibliotheksjournal}, author = {Apel, Jochen and Gebhart, Fabian and Maylein, Leonhard and Wlotzka, Martin}, month = jul, year = {2018}, pages = {61--71}, file = {Apel et al. - 2018 - Offene Forschungsdaten an der Universität Heidelbe.pdf:C\:\\Users\\carst\\Zotero\\storage\\6TRSD76V\\Apel et al. - 2018 - Offene Forschungsdaten an der Universität Heidelbe.pdf:application/pdf}, } @book{depuydt_rosetta_1999, address = {London New York}, title = {Rosetta {Stone}}, isbn = {978-0-415-18589-9}, language = {en}, publisher = {Routledge}, author = {Depuydt, Leo}, editor = {Bard, Kathryn A. and Shubert, Steven Blake}, year = {1999}, note = {Publication Title: Encyclopedia of the archaeology of ancient Egypt}, } @book{noauthor_publikationen_nodate, title = {Publikationen}, url = {https://www.langzeitarchivierung.de/Webs/nestor/DE/Publikationen/publikationen_node.html}, note = {Publication Title: nestor – Kompetenznetzwerk digitale Langzeitarchivierung}, file = {nestor - Publikationen:C\:\\Users\\carst\\Zotero\\storage\\XRQADACD\\publikationen_node.html:text/html}, } @inproceedings{factor_object_2005, title = {Object storage: the future building block for storage systems}, shorttitle = {Object storage}, doi = {10.1109/LGDI.2005.1612479}, abstract = {The concept of object storage was introduced in the early 1990's by the research community. Since then it has greatly matured and is now in its early stages of adoption by the industry. Yet, object storage is still not widely accepted. Viewing object store technology as the future building block particularly for large storage systems, our team in IBM Haifa Research Lab has invested substantial efforts in this area. In this position paper we survey the latest developments in the area of object store technology, focusing on standardization, research prototypes, and technology adoption and deployment. A major step has been the approval of the TIO OSD protocol (version I) as an OSD standard in late 2004. We also report on prototyping efforts that are carried out in IBM Haifa Research Lab in building an object store. Our latest prototype is compliant with a large subset of the TIO standard. To facilitate deployment of the new technology and protocol in the community at large, our team also implemented a TIO-compliant OSD (iSCSI) initiator for Linux. The initiator is interoperable with object disks of other vendors. The initiator is available as an open source driver for Linux.}, booktitle = {2005 {IEEE} {International} {Symposium} on {Mass} {Storage} {Systems} and {Technology}}, author = {Factor, M. and Meth, K. and Naor, D. and Rodeh, O. and Satran, J.}, year = {2005}, pages = {119--123}, } @article{rapp_sara-dienst_2018, title = {{SARA}-{Dienst}: {Software} langfristig verfügbar machen}, volume = {5}, issn = {2363-9814}, shorttitle = {{SARA}-{Dienst}}, url = {https://www.o-bib.de/article/view/2018H2S92-105}, doi = {10.5282/o-bib/2018H2S92-105}, abstract = {Software spielt in vielen Disziplinen eine wichtige Rolle im Forschungsprozess. Sie ist entweder selbst Gegenstand der Forschung oder wird als Hilfsmittel zur Erfassung, Verarbeitung und Analyse von Forschungsdaten eingesetzt. Zur Nachvollziehbarkeit der durchgeführten Forschung sollte Software langfristig verfügbar gemacht werden. Im SARA-Projekt zwischen der Universität Konstanz und der Universität Ulm wird ein Dienst entwickelt, der versucht die Einschränkungen bereits bestehender Angebote aufzuheben. Dies beinhaltet u.a. die Möglichkeit, die gesamte Entwicklungshistorie auf einfache Weise mitzuveröffentlichen und für Dritte zur Online-Exploration anzubieten. Zudem bestimmen die Forschenden den Zeitpunkt und Umfang der zu archivierenden/veröffentlichenden Software-Artefakte selbst. Der SARA-Dienst sieht auch die Möglichkeit vor, eine Archivierung ohne Veröffentlichung vorzunehmen. Der geplante Dienst verbindet bereits bestehende Publikations- und Forschungsinfrastrukturen miteinander. Er ermöglicht aus der Arbeitsumgebung der Forschenden heraus eine Archivierung und Veröffentlichung von Software und unterstützt Forschende dabei, bereits prozessbegleitend Zwischenstände ihrer Forschung festzuhalten. Aufgrund seines modularen Aufbaus kann der SARA-Dienst in unterschiedlichen Szenarien zum Einsatz kommen, beispielsweise als kooperativer Dienst für mehrere Einrichtungen. Er stellt eine sinnvolle Ergänzung zu bestehenden Angeboten im Forschungsdatenmanagement dar.}, language = {de}, number = {2}, journal = {o-bib. Das offene Bibliotheksjournal}, author = {Rapp, Franziska and Kombrink, Stefan and Kushnarenko, Volodymyr and Fratz, Matthias and Scharon, Daniel}, month = jul, year = {2018}, pages = {92--105}, file = {Rapp et al. - 2018 - SARA-Dienst Software langfristig verfügbar machen.pdf:C\:\\Users\\carst\\Zotero\\storage\\CPIKB7ZF\\Rapp et al. - 2018 - SARA-Dienst Software langfristig verfügbar machen.pdf:application/pdf}, } @article{ovshinsky_reversible_1968, title = {Reversible {Electrical} {Switching} {Phenomena} in {Disordered} {Structures}}, volume = {21}, url = {https://link.aps.org/doi/10.1103/PhysRevLett.21.1450}, doi = {10.1103/PhysRevLett.21.1450}, abstract = {A rapid and reversible transition between a highly resistive and conductive state effected by an electric field, which we have observed in various types of disordered semiconducting material, is described in detail. The switching parameters and chemical composition of a typical material are presented, and microscopic mechanisms for the conduction phenomena are suggested.}, language = {en}, number = {20}, journal = {Physical Review Letters}, author = {Ovshinsky, Stanford R.}, month = nov, year = {1968}, pages = {1450--1453}, } @book{neuroth_nestor_2016, address = {Glückstadt, Göttingen}, edition = {2. aktualisierte Druckauflage}, title = {nestor {Handbuch}: {Eine} kleine {Enzyklopädie} der digitalen {Langzeitarchivierung} ; {Version} 2.3}, language = {de}, publisher = {Werner Hülsbusch Fachverlag für Medientechnik und -wirtschaft}, editor = {Neuroth, Heike and Oßwald, Achim and Scheffel, Regine and Strathmann, Stefan and Huth, Karsten}, year = {2016}, file = {Neuroth et al. - 2016 - nestor Handbuch Eine kleine Enzyklopädie der digi.pdf:C\:\\Users\\carst\\Zotero\\storage\\R77S23VE\\Neuroth et al. - 2016 - nestor Handbuch Eine kleine Enzyklopädie der digi.pdf:application/pdf}, } @article{bundesministerium_des_innern_grundsatze_nodate, title = {Grundsätze zur {Durchführung} der {Sicherheitsverfilmung} von {Archivalien}: {Bek}. d. {BMI} v. 13.05.1987 – {ZV} 1 {M} 325 100–213}, volume = {38}, number = {16}, journal = {Gemeinsames Ministerialblatt}, author = {{Bundesministerium des Innern}}, pages = {284--292.}, } @techreport{noauthor_entwicklung_2017, address = {Göttingen}, title = {Entwicklung von {Forschungsdateninfrastrukturen} im internationalen {Vergleich}: {Bericht} und {Anregungen}}, copyright = {Creative Commons Namensnennung –Weitergabe unter gleichen Bedingungen 4.0 International}, url = {http://www.rfii.de/?p=2346}, language = {de}, institution = {Rat für Informationsinfrastrukturen}, year = {2017}, pages = {93}, file = {2016 - Entwicklung von Forschungsdateninfrastrukturen im .pdf:C\:\\Users\\carst\\Zotero\\storage\\HQDBXF3R\\2016 - Entwicklung von Forschungsdateninfrastrukturen im .pdf:application/pdf}, } @book{leggett_digitization_2014, address = {Lanham Boulder New York Toronto Plymouth, UK}, series = {Practical guides for librarians}, title = {Digitization and digital archiving: a practical guide for librarians}, isbn = {978-0-8108-9207-1 978-0-8108-9208-8}, shorttitle = {Digitization and digital archiving}, language = {en}, number = {no. 7}, publisher = {Rowman \& Littlefield}, author = {Leggett, Elizabeth R.}, year = {2014}, } @book{majonica_geheimnis_2007, address = {München}, edition = {Überarb. und erw. Neuausg}, series = {dtv}, title = {Das {Geheimnis} der {Hieroglyphen}: die abenteuerliche {Entschlüsselung} der ägyptischen {Schrift} durch {Jean} {François} {Champollion} ; mit zahlreichen dokumentarischen {Abbildungen} und zeitgenössischen {Grafiken}}, isbn = {978-3-423-71275-0}, language = {de}, number = {71275 : Junior}, publisher = {Dt. Taschenbuch-Verl}, author = {Majonica, Rudolf}, year = {2007}, } @book{verheul_networking_2006, address = {München}, series = {{IFLA} {Publications}}, title = {Networking for {Digital} {Preservation}: {Current} {Practice} in 15 {National} {Libraries}}, isbn = {978-3-598-44021-2}, shorttitle = {Networking for {Digital} {Preservation}}, abstract = {Bibliotheken auf der ganzen Welt müssen sich mit der schnell wachsenden Menge an zu sicherndem digitalem Material auseinandersetzen. Digitale Veröffentlichungen, online oder auf CD, digitalisierte Bilder und digital erstellte Objekte müssen aufbewahrt und verfügbar gemacht werden. Das Speichern und Sichern des digitalen Erbes ist ein wichtiges Thema, insbesondere für Nationalbibliotheken, aufgrund ihrer gesetzlichen Aufgabe, das nationale Erbe eines Landes zu erhalten. Dieser Band beschreibt den aktuellen Stand der digitalen Speichermedien, Strategien der Datensicherung und aktuelle Projekte in den Nationalbibliotheken Australiens, Chinas, Dänemarks, Deutschlands, Frankreichs, Großbritanniens, Japans, Kanadas, Neuseelands, der Niederlande, Österreichs, Portugals, Schwedens, der Schweiz und der USA.}, language = {en}, number = {119}, publisher = {De Gruyter Saur}, author = {Verheul, Ingeborg}, year = {2006}, doi = {10.1515/9783598440212}, file = {Verheul - 2006 - Networking for Digital Preservation Current Pract.pdf:C\:\\Users\\carst\\Zotero\\storage\\TP6RDDGR\\Verheul - 2006 - Networking for Digital Preservation Current Pract.pdf:application/pdf}, } @book{corti_managing_2014-1, address = {Los Angeles, Calif. [u.a.]}, title = {Managing and sharing research data : a guide to good practice}, isbn = {978-1-4462-6725-7 978-1-4462-6726-4}, publisher = {SAGE}, editor = {Corti, Louise and Van den Eynden, Veerle and Bishop, Libby and Woollard, Matthew}, year = {2014}, } @article{parkin_magnetic_2008, title = {Magnetic {Domain}-{Wall} {Racetrack} {Memory}}, volume = {320}, copyright = {American Association for the Advancement of Science}, issn = {0036-8075, 1095-9203}, url = {https://science.sciencemag.org/content/320/5873/190}, doi = {10.1126/science.1145799}, abstract = {Recent developments in the controlled movement of domain walls in magnetic nanowires by short pulses of spin-polarized current give promise of a nonvolatile memory device with the high performance and reliability of conventional solid-state memory but at the low cost of conventional magnetic disk drive storage. The racetrack memory described in this review comprises an array of magnetic nanowires arranged horizontally or vertically on a silicon chip. Individual spintronic reading and writing nanodevices are used to modify or read a train of ∼10 to 100 domain walls, which store a series of data bits in each nanowire. This racetrack memory is an example of the move toward innately three-dimensional microelectronic devices.}, language = {en}, number = {5873}, journal = {Science}, author = {Parkin, Stuart S. P. and Hayashi, Masamitsu and Thomas, Luc}, month = apr, year = {2008}, pages = {190--194}, } @article{clelland_hiding_1999, title = {Hiding messages in {DNA} microdots}, volume = {399}, copyright = {1999 Macmillan Magazines Ltd.}, issn = {1476-4687}, url = {https://www.nature.com/articles/21092}, doi = {10.1038/21092}, abstract = {The microdot is a means of concealing messages (steganography)1 that was developed by Professor Zapp and used by German spies in the Second World War to transmit secret information2. A microdot (“the enemy's masterpiece of espionage”2) was a greatly reduced photograph of a typewritten page that was pasted over a full stop in an innocuous letter2. We have taken the microdot a step further and developed a DNA-based, doubly steganographic technique for sending secret messages. A DNA-encoded message is first camouflaged within the enormous complexity of human genomic DNA and then further concealed by confining this sample to a microdot.}, language = {en}, number = {6736}, journal = {Nature}, author = {Clelland, Catherine Taylor and Risca, Viviana and Bancroft, Carter}, year = {1999}, pages = {533--534}, file = {Clelland et al. - 1999 - Hiding messages in DNA microdots.pdf:C\:\\Users\\carst\\Zotero\\storage\\X9GBC7FK\\Clelland et al. - 1999 - Hiding messages in DNA microdots.pdf:application/pdf}, } @article{mohammed_controlled_2020, title = {Controlled spin-torque driven domain wall motion using staggered magnetic wires}, volume = {116}, issn = {0003-6951}, url = {https://aip.scitation.org/doi/10.1063/1.5135613}, doi = {10.1063/1.5135613}, abstract = {Domain wall (DW) memory devices such as racetrack memory offer an alternative to the hard disk drive in achieving high capacity storage. In DW memory, the control of domain wall positions and their motion using spin-transfer torque is an important challenge. In this Letter, we demonstrate controlled domain wall motion using spin-transfer torque in staggered wires. The devices, fabricated using electron-beam and laser lithography, were tested using magneto-optical Kerr microscopy and electrical transport measurements. The depinning current is found to depend on the device dimensions of the staggering wires. Thus, the proposed staggering configuration can be utilized to fine-tune the properties of DW devices for memory applications. The authors would like to thank S. Al Harthi and M. T. Zar Myint from Sultan Qaboos University for their support and assistance in the magnetometry measurements. S.N.P. acknowledges the partial support from the grant of National Research Foundation, Singapore (NRF2015-IIP003-001).}, language = {en}, number = {3}, journal = {Applied Physics Letters}, author = {Mohammed, H. and Risi, S. Al and Jin, T. L. and Kosel, J. and Piramanayagam, S. N. and Sbiaa, R.}, month = jan, year = {2020}, pages = {032402}, file = {Mohammed et al. - 2020 - Controlled spin-torque driven domain wall motion u.pdf:C\:\\Users\\carst\\Zotero\\storage\\U9V3XWWG\\Mohammed et al. - 2020 - Controlled spin-torque driven domain wall motion u.pdf:application/pdf}, } @book{tanenbaum_computerarchitektur_2001, address = {München}, edition = {2. Auflage}, series = {Informatik {Grundlagen}}, title = {Computerarchitektur: {Strukturen}, {Konzepte}, {Grundlagen}}, isbn = {978-3-8273-7016-7 978-3-8273-7148-5}, shorttitle = {Computerarchitektur}, language = {de}, publisher = {Pearson-Studium}, author = {Tanenbaum, Andrew S. and Goodman, James}, year = {2001}, } @book{hennessy_computer_2007, address = {Amsterdam, Heidelberg}, edition = {4. Auflage}, title = {Computer architecture : a quantitative approach}, isbn = {0-12-370490-1 978-0-12-370490-0}, publisher = {Elsevier}, author = {Hennessy, John L. and Patterson, David A.}, editor = {Arpaci-Dusseau, Andrea C.}, year = {2007}, } @book{borges_cloud_2016, address = {München}, title = {Cloud {Computing}: {Rechtshandbuch}}, isbn = {978-3-406-64590-7 3-406-64590-9}, language = {de}, publisher = {C.H. Beck}, author = {Borges, Georg}, editor = {Meents, Jan Geert}, year = {2016}, } @techreport{lucraft_five_2019, type = {White {Paper}}, title = {Five {Essential} {Factors} for {Data} {Sharing}}, copyright = {Creative Commons Attribution 4.0 International}, url = {https://figshare.com/articles/Five_Essential_Factors_for_Data_Sharing/7807949/1}, abstract = {The paper proposes and discusses key ways to improve on research data sharing practices.}, language = {en}, author = {Lucraft, Mithu and Baynes, Grace and Allin, Katie and Hrynaszkiewicz, Iain and Khodiyar, Varsha}, month = apr, year = {2019}, doi = {10.6084/m9.figshare.7807949.v1}, pages = {21}, file = {Mithu Lucraft et al. - 2019 - Five Essential Factors for Data Sharing.pdf:C\:\\Users\\carst\\Zotero\\storage\\GVZ87YPX\\Mithu Lucraft et al. - 2019 - Five Essential Factors for Data Sharing.pdf:application/pdf}, } @article{helbig_losungen_2019, title = {Lösungen und {Leitfäden} für das institutionelle {Forschungsdatenmanagement}}, volume = {6}, copyright = {Copyright (c) 2019 Kerstin Helbig, Katarzyna Biernacka, Petra Buchholz, Dominika Dolzycka, Niklas Hartmann, Thomas Hartmann, Beate Maria Hiemenz, Boris Jacob, Monika Kuberek, Nadin Weiß, Malte Dreyer}, issn = {2363-9814}, url = {https://www.o-bib.de/article/view/5505}, doi = {10.5282/o-bib/2019H3S21-39}, abstract = {Hochschulen und deren Zentraleinrichtungen beschäftigen sich zunehmend mit dem Thema Forschungsdatenmanagement (FDM), um ihre Forschenden adäquat zu unterstützen. Nicht zuletzt aufgrund neuer Verlags- und Förderanforderungen wünschen sich Forschende Beratung und Orientierung, wie sie mit ihren Forschungsdaten umgehen sollen. Damit Hochschulen schnell und nachhaltig Lösungen zum institutionellen FDM etablieren können, haben fünf Berliner und Brandenburger Universitäten im gemeinsamen Verbundvorhaben FDMentor mit Förderung des Bundesministeriums für Bildung und Forschung (BMBF) entsprechende Leitfäden und Werkzeuge erarbeitet. Die innerhalb von zwei Jahren (2017–2019) entstandenen Ergebnisse in den Bereichen Strategieentwicklung, Forschungsdaten-Policy, rechtliche Aspekte und Kompetenzausbau finden über das Verbundprojekt hinaus ihre Anwendung.}, language = {de}, number = {3}, journal = {o-bib. Das offene Bibliotheksjournal}, author = {Helbig, Kerstin and Biernacka, Katarzyna and Buchholz, Petra and Dolzycka, Dominika and Hartmann, Niklas and Hartmann, Thomas and Hiemenz, Beate Maria and Jacob, Boris and Kuberek, Monika and Weiß, Nadin and Dreyer, Malte}, month = oct, year = {2019}, pages = {21--39}, file = {Helbig et al. - 2019 - Lösungen und Leitfäden für das institutionelle For.pdf:C\:\\Users\\carst\\Zotero\\storage\\5DK384BT\\Helbig et al. - 2019 - Lösungen und Leitfäden für das institutionelle For.pdf:application/pdf}, } @article{linek_data_2017, title = {Data sharing as social dilemma: {Influence} of the researcher’s personality}, volume = {12}, issn = {1932-6203}, shorttitle = {Data sharing as social dilemma}, url = {https://journals.plos.org/plosone/article?id=10.1371/journal.pone.0183216}, doi = {10.1371/journal.pone.0183216}, abstract = {It is widely acknowledged that data sharing has great potential for scientific progress. However, so far making data available has little impact on a researcher’s reputation. Thus, data sharing can be conceptualized as a social dilemma. In the presented study we investigated the influence of the researcher's personality within the social dilemma of data sharing. The theoretical background was the appropriateness framework. We conducted a survey among 1564 researchers about data sharing, which also included standardized questions on selected personality factors, namely the so-called Big Five, Machiavellianism and social desirability. Using regression analysis, we investigated how these personality domains relate to four groups of dependent variables: attitudes towards data sharing, the importance of factors that might foster or hinder data sharing, the willingness to share data, and actual data sharing. Our analyses showed the predictive value of personality for all four groups of dependent variables. However, there was not a global consistent pattern of influence, but rather different compositions of effects. Our results indicate that the implications of data sharing are dependent on age, gender, and personality. In order to foster data sharing, it seems advantageous to provide more personal incentives and to address the researchers’ individual responsibility.}, language = {en}, number = {8}, journal = {PLOS ONE}, author = {Linek, Stephanie B. and Fecher, Benedikt and Friesike, Sascha and Hebing, Marcel}, month = aug, year = {2017}, pages = {e0183216}, file = {Linek et al. - 2017 - Data sharing as social dilemma Influence of the r.pdf:C\:\\Users\\carst\\Zotero\\storage\\EWMRW2LJ\\Linek et al. - 2017 - Data sharing as social dilemma Influence of the r.pdf:application/pdf}, } @article{putnings_erfolgreich_2017, title = {Erfolgreich {Forschen} durch {Kooperation}. {Verknüpfung} hochschuleigener {Informationsstrukturen} zu einem zentralen {Service} für {Forschende}}, volume = {4}, copyright = {Copyright (c) 2017 Markus Putnings, Sebastian Teichert}, issn = {2363-9814}, url = {https://www.o-bib.de/article/view/2017H4S137-144}, doi = {10.5282/o-bib/2017H4S137-144}, abstract = {An der Friedrich-Alexander-Universität Erlangen-Nürnberg (FAU) entstand durch die Kooperation mehrerer hochschulinterner Einrichtungen ein außergewöhnlich umfassender Service für Forschende bei der Drittmittelberatung. Es wird als Best Practice-Beispiel aufgezeigt, wie diese Dienste ineinandergreifen und wie die Einrichtungen ihre Fähigkeiten in den Bereichen Informationsbeschaffung und -vermittlung einbringen können, um den gesamten Forschungsprozess kooperativ zu stützen. Hintergrund sind die Forderungen des Wissenschaftsrates und des Rats für Informationsinfrastrukturen nach einer besseren Koordinierung der Arbeit der Informationsinfrastruktureinrichtungen angesichts aktueller Herausforderungen bspw. im Bereich Open Science und Forschungsdatenmanagement.}, language = {de}, number = {4}, journal = {o-bib. Das offene Bibliotheksjournal}, author = {Putnings, Markus and Teichert, Sebastian}, month = dec, year = {2017}, pages = {137--144}, file = {Putnings und Teichert - 2017 - Erfolgreich Forschen durch Kooperation. Verknüpfun.pdf:C\:\\Users\\carst\\Zotero\\storage\\JT64KX7Q\\Putnings und Teichert - 2017 - Erfolgreich Forschen durch Kooperation. Verknüpfun.pdf:application/pdf}, } @techreport{curdt_zur_2018, address = {Essen}, type = {Positionspapier}, title = {Zur {Rolle} der {Hochschulen} - {Positionspapier} der {Landesinitiative} {NFDI} und {Expertengruppe} {FDM} der {Digitalen} {Hochschule} {NRW} zum {Aufbau} einer {Nationalen} {Forschungsdateninfrastruktur}}, copyright = {CC BY-ND}, url = {https://zenodo.org/record/1217527#.X2ez0Yvgq-5}, abstract = {Das Positionspapier der Landesinitaitve NFDI und der Expertengruppe FDM der Digitalen Hochschule NRW beschreibt mögliche Szenarien und Herausforderungen einer notwendigen Beteiligung von Hochschulen beim Aufbau einer Nationalen Forschungsdateninfrastruktur (NFDI) und möchte damit zu einer weitergehenden Auseinandersetzung mit der Rolle der Hochschulen in diesem Prozess anregen. In Vorbereitung auf die geplante NFDI vernetzt die Landesinitiative NFDI Akteure und Organisationen im Bereich Forschungsdatenmanagement in NRW, trägt zur Etablierung und Anerkennung von FDM in den wissenschaftlichen Communities bei und begleitet als zentrale Anlaufstelle für Hochschulen den Wandel hin zu einer neuen Datenkultur.}, language = {de}, institution = {Landesinitiative NFDI der Digitalen Hochschule NRW}, author = {Curdt, Constanze and Grasse, Marleen and Hess, Volker and Kasties, Nils and López, Ania and Magrean, Benedikt and Perry, Anja and Quast, Andres and Rudolph, Dominik and Stork, Simone and Vompras, Johanna and Winter, Nina}, month = apr, year = {2018}, doi = {10.5281/zenodo.1217527}, pages = {3}, file = {Curdt et al. - 2018 - Zur Rolle der Hochschulen - Positionspapier der La.pdf:C\:\\Users\\carst\\Zotero\\storage\\3QLYT4HG\\Curdt et al. - 2018 - Zur Rolle der Hochschulen - Positionspapier der La.pdf:application/pdf}, } @article{wicherts_willingness_2011, title = {Willingness to {Share} {Research} {Data} {Is} {Related} to the {Strength} of the {Evidence} and the {Quality} of {Reporting} of {Statistical} {Results}}, volume = {6}, issn = {1932-6203}, url = {https://journals.plos.org/plosone/article?id=10.1371/journal.pone.0026828}, doi = {10.1371/journal.pone.0026828}, abstract = {Background The widespread reluctance to share published research data is often hypothesized to be due to the authors' fear that reanalysis may expose errors in their work or may produce conclusions that contradict their own. However, these hypotheses have not previously been studied systematically. Methods and Findings We related the reluctance to share research data for reanalysis to 1148 statistically significant results reported in 49 papers published in two major psychology journals. We found the reluctance to share data to be associated with weaker evidence (against the null hypothesis of no effect) and a higher prevalence of apparent errors in the reporting of statistical results. The unwillingness to share data was particularly clear when reporting errors had a bearing on statistical significance. Conclusions Our findings on the basis of psychological papers suggest that statistical results are particularly hard to verify when reanalysis is more likely to lead to contrasting conclusions. This highlights the importance of establishing mandatory data archiving policies.}, language = {en}, number = {11}, journal = {PLOS ONE}, author = {Wicherts, Jelte M. and Bakker, Marjan and Molenaar, Dylan}, month = nov, year = {2011}, pages = {e26828}, file = {Wicherts et al. - 2011 - Willingness to Share Research Data Is Related to t.PDF:C\:\\Users\\carst\\Zotero\\storage\\EKX4M74P\\Wicherts et al. - 2011 - Willingness to Share Research Data Is Related to t.PDF:application/pdf}, } @article{hausen_entwicklung_2018, title = {Entwicklung eines {Blended} {Learning} {Kurses} zum {Forschungsdatenmanagement} an der {RWTH} {Aachen} {University}}, volume = {5}, copyright = {Copyright (c) 2018 Daniela Adele Hausen, Jürgen Windeck}, issn = {2363-9814}, url = {https://www.o-bib.de/article/view/5337}, doi = {10.5282/o-bib/2018H3S17-31}, abstract = {Im Rahmen eines MALIS-Projektes wird ein Kurs zum Forschungsdatenmanagement für die RWTH Aachen University vorgestellt. Basierend auf einer Analyse des bisherigen Kursprogramms, orientiert sich das Kurskonzept konsequent an den Zielen und Erwartungen der Forschenden. Das Konzept für den Blended-Learning-Kurs basiert auf der Lehrstrategie MOMBI. Angelehnt an das „Flipped Classroom“-Modell, wird der Kurs aus einer vorgeschalteten Online-Selbstlernphase und einem anschließenden Präsenzworkshop aufgebaut. In dem vorliegenden Aufsatz werden die Lernziele, der Kursaufbau und die Inhalte des Kurses vorgestellt.}, language = {de}, number = {3}, journal = {o-bib. Das offene Bibliotheksjournal}, author = {Hausen, Daniela Adele and Windeck, Jürgen}, month = sep, year = {2018}, pages = {17--31}, file = {Hausen und Windeck - 2018 - Entwicklung eines Blended Learning Kurses zum Fors.pdf:C\:\\Users\\carst\\Zotero\\storage\\RCRFT48E\\Hausen und Windeck - 2018 - Entwicklung eines Blended Learning Kurses zum Fors.pdf:application/pdf}, } @article{zuiderwijk_sharing_2019, title = {Sharing and re-using open data: {A} case study of motivations in astrophysics}, volume = {49}, issn = {0268-4012}, shorttitle = {Sharing and re-using open data}, url = {http://www.sciencedirect.com/science/article/pii/S0268401218311836}, doi = {10.1016/j.ijinfomgt.2019.05.024}, abstract = {Open data sharing and re-use is currently more common in some academic disciplines than others. Although each discipline has unique challenges and characteristics which can influence data sharing and re-use behavior, it may be possible to gain transferable insight from disciplines where these practices are more common. Several studies of the motivations underlying data sharing and re-use have been conducted, however these studies often remain at a high level of abstraction rather than providing in-depth insight about discipline-specific challenges and opportunities. This study sought to provide in-depth insight about the complex interaction of factors influencing motivations for sharing and re-using open research data within a single discipline, namely astrophysics. We focused on this discipline due to its well-developed tradition of free and open access to research data. Eight factors were found to influence researchers’ motivations for sharing data openly, including the researcher’s background, personal drivers, experience, legislation, regulation and policy, data characteristics, performance expectancy, usability, and collaboration. We identified six factors that influence researchers’ motivations to re-use open research data, including the researcher’s background, facilitating conditions, expected performance, social and affiliation factors, effort and experience. Finally, we discuss how data sharing and re-use can be encouraged within the context of astrophysics research, and we discuss how these insights may be transferred to disciplines with low rates of data sharing and re-use.}, language = {en}, journal = {International Journal of Information Management}, author = {Zuiderwijk, Anneke and Spiers, Helen}, month = dec, year = {2019}, pages = {228--241}, file = {Zuiderwijk und Spiers - 2019 - Sharing and re-using open data A case study of mo.pdf:C\:\\Users\\carst\\Zotero\\storage\\FIKLQD2C\\Zuiderwijk und Spiers - 2019 - Sharing and re-using open data A case study of mo.pdf:application/pdf}, } @book{borgman_big_2015, title = {Big data, little data, no data: scholarship in the networked world}, isbn = {978-0-262-02856-1}, shorttitle = {Big data, little data, no data}, abstract = {"Big Data" is on the covers of Science, Nature, the Economist, and Wired magazines, on the front pages of the Wall Street Journal and the New York Times. But despite the media hyperbole, as Christine Borgman points out in this examination of data and scholarly research, having the right data is usually better than having more data; little data can be just as valuable as big data. In many cases, there are no data – because relevant data don't exist, cannot be found, or are not available. Moreover, data sharing is difficult, incentives to do so are minimal, and data practices vary widely across disciplines. Borgman, an often-cited authority on scholarly communication, argues that data have no value or meaning in isolation; they exist within a knowledge infrastructure – an ecology of people, practices, technologies, institutions, material objects, and relationships. After laying out the premises of her investigation – six "provocations" mea t to inspire discussion about the uses of data in scholarship – Borgman offers case studies of data practices in the sciences, the social sciences, and the humanities, and then considers the implications of her findings for scholarly practice and research policy. To manage and exploit data over the long term, Borgman argues, requires massive investment in knowledge infrastructures; at stake is the future of scholarship.}, language = {English}, publisher = {The MIT Press}, author = {Borgman, Christine L.}, year = {2015}, } @article{borgman_lives_2019, title = {The {Lives} and {After} {Lives} of {Data}}, volume = {1}, url = {https://hdsr.mitpress.mit.edu/pub/4giycvvj/release/5}, doi = {10.1162/99608f92.9a36bdb6}, abstract = {The most elusive term in data science is ‘data.’ While often treated as objects to be computed upon, data is a theory-laden concept with a long history. Data exist within knowledge infrastructures that govern how they are created, managed, and interpreted. By comparing models of data life cycles, implicit assumptions about data become apparent. In linear models, data pass through stages from beginning to end of life, which suggest that data can be recreated as needed. Cyclical models, in which data flow in a virtuous circle of uses and reuses, are better suited for irreplaceable observational data that may retain value indefinitely. In astronomy, for example, observations from one generation of telescopes may become calibration and modeling data for the next generation, whether digital sky surveys or glass plates. The value and reusability of data can be enhanced through investments in knowledge infrastructures, especially digital curation and preservation. Determining what data to keep, why, how, and for how long, is the challenge of our day.}, language = {en}, number = {1}, journal = {Harvard Data Science Review}, author = {Borgman, Christine L.}, month = jul, year = {2019}, pages = {10}, file = {Borgman - 2019 - The Lives and After Lives of Data.pdf:C\:\\Users\\carst\\Zotero\\storage\\XFYNGP47\\Borgman - 2019 - The Lives and After Lives of Data.pdf:application/pdf}, } @book{caplan_metadata_2003, address = {Chicago}, title = {Metadata fundamentals for all librarians}, isbn = {978-0-8389-0847-1}, language = {en}, publisher = {American Library Association}, author = {Caplan, Priscilla}, year = {2003}, } @book{gartner_metadata_2016, address = {Cham}, edition = {1st ed. 2017}, title = {Metadata: {Shaping} {Knowledge} from {Antiquity} to the {Semantic} {Web}}, isbn = {978-3-319-40891-0 3-319-40891-7}, language = {en}, publisher = {Springer International Publishing Springer}, author = {Gartner, Richard}, year = {2016}, } @book{corrado_digital_2017, address = {Lanham, Maryland Boulder New York London}, edition = {Second edition}, title = {Digital preservation for libraries, archives, and museums}, isbn = {978-1-4422-7871-4 978-1-4422-7872-1 978-1-4422-7873-8}, abstract = {What is digital preservation? – Getting started with the digital preservation triad – Management for digital preservation – The OAIS reference model – Organizing digital content – Consortia and membership organizations – Human resources and education – Sustainable digital preservation – Digital repository software and digital preservation systems – The digital preservation repository and trust – Metadata for digital preservation – File formats and software for digital preservation – Emulation – Selecting content – Preserving research data – Preserving humanities content – Digital preservation of selected specialized formats – Appendix A. Select resources in support of digital preservation}, language = {en}, publisher = {Rowman \& Littlefield}, author = {Corrado, Edward M. and Moulaison, Heather Léa}, year = {2017}, } @article{griffin_best_2018, title = {Best practice data life cycle approaches for the life sciences}, volume = {6}, issn = {2046-1402}, url = {https://f1000research.com/articles/6-1618/v2}, doi = {10.12688/f1000research.12344.2}, abstract = {Throughout history, the life sciences have been revolutionised by technological advances; in our era this is manifested by advances in instrumentation for data generation, and consequently researchers now routinely handle large amounts of heterogeneous data in digital formats. The simultaneous transitions towards biology as a data science and towards a ‘life cycle’ view of research data pose new challenges. Researchers face a bewildering landscape of data management requirements, recommendations and regulations, without necessarily being able to access data management training or possessing a clear understanding of practical approaches that can assist in data management in their particular research domain. Here we provide an overview of best practice data life cycle approaches for researchers in the life sciences/bioinformatics space with a particular focus on ‘omics’ datasets and computer-based data processing and analysis. We discuss the different stages of the data life cycle and provide practical suggestions for useful tools and resources to improve data management practices.}, language = {en}, journal = {F1000Research}, author = {Griffin, Philippa C. and Khadake, Jyoti and LeMay, Kate S. and Lewis, Suzanna E. and Orchard, Sandra and Pask, Andrew and Pope, Bernard and Roessner, Ute and Russell, Keith and Seemann, Torsten and Treloar, Andrew and Tyagi, Sonika and Christiansen, Jeffrey H. and Dayalan, Saravanan and Gladman, Simon and Hangartner, Sandra B. and Hayden, Helen L. and Ho, William W.H. and Keeble-Gagnère, Gabriel and Korhonen, Pasi K. and Neish, Peter and Prestes, Priscilla R. and Richardson, Mark F. and Watson-Haigh, Nathan S. and Wyres, Kelly L. and Young, Neil D. and Schneider, Maria Victoria}, month = jun, year = {2018}, pages = {1618}, file = {Volltext:C\:\\Users\\carst\\Zotero\\storage\\S2NRKC7I\\Griffin et al. - 2018 - Best practice data life cycle approaches for the l.pdf:application/pdf}, } @book{jahnke_problem_2012, address = {Washington}, series = {Council on {Library} and {Information} {Resources} ({CLIR}) {Report}}, title = {The {Problem} of {Data}}, copyright = {CC BY-NC-SA}, isbn = {978-1-932326-42-0}, url = {https://digitalcommons.bucknell.edu/fac_pubs/52}, abstract = {Jahnke and Asher explore workflows and methodologies at a variety of academic data curation sites, and Keralis delves into the academic milieu of library and information schools that offer instruction in data curation. Their conclusions point to the urgent need for a reliable and increasingly sophisticated professional cohort to support data-intensive research in our colleges, universities, and research centers.}, language = {en}, number = {154}, author = {Jahnke, Lori and Asher, Andrew and Keralis, Spencer}, month = aug, year = {2012}, file = {Jahnke et al. - 2012 - The Problem of Data.pdf:C\:\\Users\\carst\\Zotero\\storage\\4LNFBUSX\\Jahnke et al. - 2012 - The Problem of Data.pdf:application/pdf}, } @article{borgman_data_2016, title = {Data {Management} in the {Long} {Tail}: {Science}, {Software}, and {Service}}, volume = {11}, issn = {1746-8256}, shorttitle = {Data {Management} in the {Long} {Tail}}, doi = {10.2218/ijdc.v11i1.428}, abstract = {Scientists in all fields face challenges in managing and sustaining access to their research data. The larger and longer term the research project, the more likely that scientists are to have resources and dedicated staff to manage their technology and data, leaving those scientists whose work is based on smaller and shorter term projects at a disadvantage. The volume and variety of data to be managed varies by many factors, only two of which are the number of collaborators and length of the project. As part of an NSF project to conceptualize the Institute for Empowering Long Tail Research, we explored opportunities offered by Software as a Service (SaaS). These cloud-based services are popular in business because they reduce costs and labor for technology management, and are gaining ground in scientific environments for similar reasons. We studied three settings where scientists conduct research in small and medium-sized laboratories. Two were NSF Science and Technology Centers (CENS and C-DEBI) and the third was a workshop of natural reserve scientists and managers. These laboratories have highly diverse data and practices, make minimal use of standards for data or metadata, and lack resources for data management or sustaining access to their data, despite recognizing the need. We found that SaaS could address technical needs for basic document creation, analysis, and storage, but did not support the diverse and rapidly changing needs for sophisticated domain-specific tools and services. These are much more challenging knowledge infrastructure requirements that require long-term investments by multiple stakeholders.}, language = {en}, number = {1}, journal = {International Journal of Digital Curation}, author = {Borgman, Christine L. and Golshan, Milena S. and Sands, Ashley E. and Wallis, Jillian C. and Cummings, Rebekah L. and Darch, Peter T. and Randles, Bernadette M.}, month = oct, year = {2016}, pages = {128--149}, file = {Borgman et al. - 2016 - Data Management in the Long Tail Science, Softwar.pdf:C\:\\Users\\carst\\Zotero\\storage\\T28MM5E5\\Borgman et al. - 2016 - Data Management in the Long Tail Science, Softwar.pdf:application/pdf}, } @article{goodman_ten_2014, title = {Ten {Simple} {Rules} for the {Care} and {Feeding} of {Scientific} {Data}}, volume = {10}, issn = {1553-7358}, url = {https://journals.plos.org/ploscompbiol/article?id=10.1371/journal.pcbi.1003542}, doi = {10.1371/journal.pcbi.1003542}, language = {en}, number = {4}, journal = {PLOS Computational Biology}, author = {Goodman, Alyssa and Pepe, Alberto and Blocker, Alexander W. and Borgman, Christine L. and Cranmer, Kyle and Crosas, Merce and Stefano, Rosanne Di and Gil, Yolanda and Groth, Paul and Hedstrom, Margaret and Hogg, David W. and Kashyap, Vinay and Mahabal, Ashish and Siemiginowska, Aneta and Slavkovic, Aleksandra}, month = apr, year = {2014}, pages = {e1003542}, file = {Goodman et al. - 2014 - Ten Simple Rules for the Care and Feeding of Scien.PDF:C\:\\Users\\carst\\Zotero\\storage\\HW9739TU\\Goodman et al. - 2014 - Ten Simple Rules for the Care and Feeding of Scien.PDF:application/pdf}, } @article{fournier_zum_2017, title = {Zum qualifizierten {Umgang} mit {Forschungsdaten}. {Ein} {Bericht} über den {Workshop} „{Wissenschaft} im digitalen {Wandel}“ am 6. {Juni} 2017 in der {Universität} {Mannheim}}, volume = {4}, copyright = {Copyright (c) 2017 Johannes Fournier}, issn = {2363-9814}, url = {https://www.o-bib.de/article/view/2017H3S88-93}, doi = {10.5282/o-bib/2017H3S88-93}, language = {de}, number = {3}, journal = {o-bib. Das offene Bibliotheksjournal}, author = {Fournier, Johannes}, month = oct, year = {2017}, pages = {88--93}, file = {Fournier - 2017 - Zum qualifizierten Umgang mit Forschungsdaten. Ein.pdf:C\:\\Users\\carst\\Zotero\\storage\\RQH25MN7\\Fournier - 2017 - Zum qualifizierten Umgang mit Forschungsdaten. Ein.pdf:application/pdf}, } @book{cox_exploring_2018, address = {London}, title = {Exploring research data management}, isbn = {978-1-78330-278-9 978-1-78330-279-6}, language = {en}, publisher = {Facet Publishing}, author = {Cox, Andrew M. and Verbaan, Eddy}, year = {2018}, } @article{klar_report_2013, title = {Report "{Organisation} und {Struktur}"}, url = {https://gfzpublic.gfz-potsdam.de/pubman/faces/ViewItemOverviewPage.jsp?itemId=item_117051}, doi = {10.2312/RADIESCHEN_005}, abstract = {Die Reports Technologie, Organisation, Kosten stellen die Ergebnisse einzelner Arbeitspakete des DFG-Projekts „Rahmenbedingungen einer disziplinübergreifenden Forschungsdateninfrastruktur“ (Radieschen) dar. Der Report „Synthese“ gibt einen Überblick über die Gesamt-Ergebnisse des Projekts, zeigt Handlungsempfehlungen auf und gibt einen Ausblick auf eine mögliche Zukunft der Forschungsdaten-Infrastrukturen in Deutschland.}, language = {de}, journal = {.“ DFG-Projekt RADIESCHEN – Rahmenbedingungen einer disziplinübergreifenden Forschungsdateninfrastruktur}, author = {Klar, Jochen and Enke, Harry}, year = {2013}, pages = {61}, file = {ProjektRadieschen_Organisation_und_Struktur.pdf:C\:\\Users\\carst\\Zotero\\storage\\Z9BCVD92\\ProjektRadieschen_Organisation_und_Struktur.pdf:application/pdf}, } @book{ludwig_leitfaden_2013, address = {Glückstadt}, title = {Leitfaden zum {Forschungsdaten}-{Management}: {Handreichungen} aus dem {WissGrid}-{Projekt}}, isbn = {978-3-86488-032-2}, shorttitle = {Leitfaden zum {Forschungsdaten}-{Management}}, url = {http://resolver.sub.uni-goettingen.de/purl?isbn-978-3-86488-032-2}, abstract = {Digitale Forschungsdaten sind eine unverzichtbare Grundlage morderner Wissenschaft. Mit ihnen sind aber eine Reihe von notwendigen Datenmanagement-Aufgaben verbunden, damit sie in der aktiven Forschung bestmöglich eingesetzt werden können und die gute wissenschaftliche Praxis eingehalten wird. Wie lange sollen die Daten aufbewahrt werden? Welche Kontextinformtionen müssen erfasst werden, um die Daten zu einem späteren Zeitpunkt noch sinnvol benutzen zu können? Wie viel kostet die Aufbewahrung? Dieses Buch kann diese Fragen nur aufwerfen, nicht beantworten. Denn ohne auf Details der jeweiligen Disziplin, Forschungsdaten und Projekte einzugehen, lassen sich diese Fragen nicht allegemeinverbindlich klären. Was dieses Buch jedoch mit einem Leitfaden und einer Checkliste bereitstellt, ist ein einfaches Instrument für fachwissenschatliche oder Infrastruktur-Einrichtungen, um die wichtigsten Aufgaben und Fragen im Forschungsdaten-Management strukturiert beantworten und effizient planen zu können}, language = {de}, publisher = {Hülsbusch}, editor = {Ludwig, Jens and Enke, Harry}, year = {2013}, file = {Ludwig und Enke - 2013 - Leitfaden zum Forschungsdaten-Management Handreic.pdf:C\:\\Users\\carst\\Zotero\\storage\\SR7HXD6V\\Ludwig und Enke - 2013 - Leitfaden zum Forschungsdaten-Management Handreic.pdf:application/pdf}, } @book{lyon_dealing_2007, address = {Bath}, title = {Dealing with {Data}: {Roles}, {Rights}, {Responsibilities} and {Relationships}. {Consultancy} {Report}}, abstract = {This Report explores the roles, rights, responsibilities and relationships of institutions, data centres and other key stakeholders who work with data. It concentrates primarily on the UK scene with some reference to other relevant experience and opinion, and is framed as 'a snapshot' of a relatively fast-moving field. It is strategically positioned to provide a bridge between the high-level RIN Framework of Principles and Guidelines for the stewardship of research data, and practitioner-focused technical development work.}, language = {en}, publisher = {University of Bath}, author = {Lyon, Liz}, year = {2007}, } @book{meyer_knowledge_2015, address = {Cambridge, MA}, series = {Infrastructures series}, title = {Knowledge machines: digital transformations of the sciences and humanities}, isbn = {978-0-262-02874-5}, shorttitle = {Knowledge machines}, language = {eng}, publisher = {The MIT Press}, author = {Meyer, Eric T. and Schroeder, Ralph}, year = {2015}, } @incollection{neuroth_haben_2019, address = {Bad Honnef}, title = {Haben {Metadaten} ihre {Unschuld} verloren?}, copyright = {CC BY}, isbn = {978-3-88347-311-6}, url = {https://www.th-koeln.de/mam/downloads/deutsch/studium/studiengaenge/f03/bib_inf_ma/festschrift_osswald.pdf}, language = {de}, booktitle = {Bibliotheksentwicklung im {Netzwerk} von {Menschen}, {Informationstechnologie} und {Nachhaltigkeit}: {Festschrift} für {Achim} {Oßwald}}, publisher = {Bock + Herchen Verlag}, author = {Neuroth, Heike}, year = {2019}, pages = {167--178}, file = {Neuroth - .pdf:C\:\\Users\\carst\\Zotero\\storage\\XI7JSMKA\\Neuroth - .pdf:application/pdf}, } @techreport{pampel_data_2010, address = {Berlin}, type = {Working {Paper}}, title = {„{Data} {Librarianship}“ – {Rollen}, {Aufgaben}, {Kompetenzen}}, url = {https://pdfs.semanticscholar.org/87b2/d2b6807610f8ba292d64e55ba79209b3a6d6.pdf}, language = {de}, number = {Nr. 144}, institution = {RatSWD}, author = {Pampel, Heinz and Bertelmann, Roland and Hobohm, Hans-Christoph}, year = {2010}, pages = {20}, file = {_.pdf:C\:\\Users\\carst\\Zotero\\storage\\XX9NYSAX\\_.pdf:application/pdf}, } @article{ribeiro_research_2018, title = {Research {Data} {Management} {Tools} and {Workflows}: {Experimental} {Work} at the {University} of {Porto}}, volume = {42}, issn = {2331-4141}, shorttitle = {Research {Data} {Management} {Tools} and {Workflows}}, url = {https://www.iassistquarterly.com/index.php/iassist/article/view/925}, doi = {10.29173/iq925}, language = {en}, number = {2}, journal = {IASSIST Quarterly}, author = {Ribeiro, Cristina and Silva, João Rocha da and Castro, João Aguiar and Amorim, Ricardo Carvalho and Lopes, João Correia and David, Gabriel}, month = jul, year = {2018}, pages = {1--16}, file = {Ribeiro et al. - 2018 - Research Data Management Tools and Workflows Expe.pdf:C\:\\Users\\carst\\Zotero\\storage\\UN7UJK9B\\Ribeiro et al. - 2018 - Research Data Management Tools and Workflows Expe.pdf:application/pdf}, } @article{treloar_updating_2019, title = {Updating the {Data} {Curation} {Continuum}}, volume = {14}, copyright = {CC BY}, issn = {1746-8256}, url = {http://www.ijdc.net/article/view/643}, doi = {10.2218/ijdc.v14i1.643}, abstract = {The Data Curation Continuum was developed as a way of thinking about data repository infrastructure. Since its original development over a decade ago, a number of things have changed in the data infrastructure domain. This paper revisits the thinking behind the original data curation continuum and updates it to respond to changes in research objects, storage models, and the repository landscape in general.}, language = {en}, number = {1}, journal = {International Journal of Digital Curation}, author = {Treloar, Andrew and Klump, Jens}, month = dec, year = {2019}, pages = {87--101}, file = {Treloar und Klump - 2019 - Updating the Data Curation Continuum.pdf:C\:\\Users\\carst\\Zotero\\storage\\9WHZNZVD\\Treloar und Klump - 2019 - Updating the Data Curation Continuum.pdf:application/pdf}, } @article{mons_cloudy_2017, title = {Cloudy, increasingly {FAIR}; revisiting the {FAIR} {Data} guiding principles for the {European} {Open} {Science} {Cloud}}, volume = {37}, issn = {0167-5265}, url = {https://content.iospress.com/articles/information-services-and-use/isu824}, doi = {10.3233/ISU-170824}, abstract = {The FAIR Data Principles propose that all scholarly output should be Findable, Accessible, Interoperable, andReusable. As a set of guiding principles, expressing only the kinds of behaviours that researchers should expect from con-temporary data resources, how the FAIR principles should manifest in reality was largely open to interpretation. As support forthe Principles has spread, so has the breadth of these interpretations. In observing this creeping spread of interpretation, severalof the original authors felt it was now appropriate to revisit the Principles, to clarify both what FAIRness is, and is not.}, language = {en}, number = {1}, journal = {Information Services \& Use}, author = {Mons, Barend and Neylon, Cameron and Velterop, Jan and Dumontier, Michel and da Silva Santos, Luiz Olavo Bonino and Wilkinson, Mark D.}, month = jan, year = {2017}, pages = {49--56}, file = {Volltext:C\:\\Users\\carst\\Zotero\\storage\\Y7EQCQX4\\Mons et al. - 2017 - Cloudy, increasingly FAIR\; revisiting the FAIR Dat.pdf:application/pdf;isu_2017_37-1_isu-37-1-isu824_isu-37-isu824.pdf:C\:\\Users\\carst\\Zotero\\storage\\2L7Z566L\\isu_2017_37-1_isu-37-1-isu824_isu-37-isu824.pdf:application/pdf}, } @article{neuroth_aktives_2018, title = {Aktives {Forschungsdatenmanagement}}, volume = {38}, issn = {2191-4664, 0720-6763}, doi = {10.1515/abitech-2018-0008}, abstract = {Forschungsdatenmanagement und damit einhergehend Forschungsdatenmanagementpläne nehmen national und international an Bedeutung zu. Nicht nur, dass verschiedene Förderorganisationen wie die National Science Foundation (USA), der Schweizerische Nationalfonds (SNF), die Deutsche Forschungsgemeinschaft (DFG) sowie die Europäische Kommission mit Horizon 2020 (H2020) bereits bei Projektanträgen Auskunft über den Umgang mit den nachgenutzten oder erstellten Forschungsdaten verlangen, es beschäftigen sich auch mehr und mehr Initiativen wie zum Beispiel international die Research Data Alliance oder in Deutschland die DINI/nestor-Arbeitsgruppe Forschungsdaten mit dem Thema. International setzt sich dabei mehr und mehr die Erkenntnis durch, dass es im Umgang mit Forschungsdaten nicht mit einem einmaligen Erstellen eines Forschungsdatenmanagementplans getan ist, sondern dass sich die Pläne aktiv dem Verlauf des Forschungsprozesses anpassen und für verschiedene Bedarfe zur Verfügung gestellt werden müssen. So kann es sinnvoll und notwendig sein, weitere Beteiligte wie zum Beispiel IT-Support oder übergeordnete Datenmanager beim Erstellen und Aktualisieren eines Forschungsdatenmanagementplans zu berücksichtigen. Daher muss ein Werkzeug zur Unterstützung von diesen Plänen über das bloße Ausfüllen von Vorlagen der Förderorganisation hinaus weitere Aufgaben erfüllen und so den gesamten Prozess des Forschungsdatenmanagements unterstützen. Der Research Data Management Organiser (RDMO) ist ein solches Werkzeug, das im Rahmen eines DFG-Projektes entwickelt und mit Hilfe unterschiedlicher Gruppen von Nutzenden getestet wurde. Das RDMO-Tool ist multilingual, flexibel an Community- und Organisationsanforderungen anpassbar und unterstützt verschiedene Aufgaben wie zum Beispiel unterschiedliche Export-Funktionen oder die Erledigung zeitlich gebundener Tasks. In einem DFG-Nachfolgeprojekt, das Ende 2017 gestartet ist, werden in RDMO wichtige Erweiterungen sowohl technischer Art als auch bezogen auf verschiedene Aspekte der Nachhaltigkeit vorgenommen.}, language = {de}, number = {1}, journal = {ABI Technik}, author = {Neuroth, Heike and Engelhardt, Claudia and Klar, Jochen and Ludwig, Jens and Enke, Harry}, month = apr, year = {2018}, pages = {55--64}, } @incollection{thibodeau_overview_2002, address = {Washington, D.C}, title = {Overview of {Technological} {Approaches} to {Digital} {Preservation} and {Challenges} in {Coming} {Years}}, isbn = {978-1-887334-92-1}, language = {en}, booktitle = {The state of digital preservation: an international perspective ; conference proceedings, {Documentation} {Abstracts}, {Inc}. {Institutes} for {Information} {Science}, {Washington}, {D}.{C}., {April} 24-25, 2002}, publisher = {CLIR}, author = {Thibodeau, Kenneth}, editor = {{Council on Library and Information Resources}}, year = {2002}, pages = {4--31}, file = {Thibodeau - 2002 - Overview of Technological Approaches to Digital Pr.pdf:C\:\\Users\\carst\\Zotero\\storage\\PS2M2NU3\\Thibodeau - 2002 - Overview of Technological Approaches to Digital Pr.pdf:application/pdf}, } @article{thompson_making_2019, title = {Making {FAIR} {Easy} with {FAIR} {Tools}: {From} {Creolization} to {Convergence}}, volume = {2}, shorttitle = {Making {FAIR} {Easy} with {FAIR} {Tools}}, url = {https://doi.org/10.1162/dint_a_00031}, doi = {10.1162/dint_a_00031}, abstract = {Since their publication in 2016 we have seen a rapid adoption of the FAIR principles in many scientific disciplines where the inherent value of research data and, therefore, the importance of good data management and data stewardship, is recognized. This has led to many communities asking “What is FAIR?” and “How FAIR are we currently?”, questions which were addressed respectively by a publication revisiting the principles and the emergence of FAIR metrics. However, early adopters of the FAIR principles have already run into the next question: “How can we become (more) FAIR?” This question is more difficult to answer, as the principles do not prescribe any specific standard or implementation. Moreover, there does not yet exist a mature ecosystem of tools, platforms and standards to support human and machine agents to manage, produce, publish and consume FAIR data in a user-friendly and efficient (i.e., “easy”) way. In this paper we will show, however, that there are already many emerging examples of FAIR tools under development. This paper puts forward the position that we are likely already in a creolization phase where FAIR tools and technologies are merging and combining, before converging in a subsequent phase to solutions that make FAIR feasible in daily practice.}, language = {en}, number = {1-2}, journal = {Data Intelligence}, author = {Thompson, Mark and Burger, Kees and Kaliyaperumal, Rajaram and Roos, Marco and da Silva Santos, Luiz Olavo Bonino}, month = nov, year = {2019}, pages = {87--95}, file = {Thompson et al. - 2019 - Making FAIR Easy with FAIR Tools From Creolizatio.pdf:C\:\\Users\\carst\\Zotero\\storage\\AY5J9AWK\\Thompson et al. - 2019 - Making FAIR Easy with FAIR Tools From Creolizatio.pdf:application/pdf}, } @article{treloar_data_2007, title = {The {Data} {Curation} {Continuum}: {Managing} {Data} {Objects} in {Institutional} {Repositories}}, volume = {13}, issn = {1082-9873}, shorttitle = {The {Data} {Curation} {Continuum}}, url = {http://www.dlib.org/dlib/september07/treloar/09treloar.html}, doi = {10.1045/september2007-treloar}, abstract = {This article describes the work currently underway at Monash University to rethink the role of repositories in supporting data management. It first describes the context within which the work has taken place and some of the local factors that have contributed to the inception and continuation of this work. It then introduces the idea of a Data Curation Continuum and describes the various continua that might be applicable in a repository data management context. The article then discusses some of the implications of this approach, before reviewing related work.}, language = {en}, number = {9/10}, journal = {D-Lib Magazine}, author = {Treloar, Andrew and Groenewegen, David and Harboe-Ree, Cathrine}, year = {2007}, } @article{michener_ten_2015, title = {Ten {Simple} {Rules} for {Creating} a {Good} {Data} {Management} {Plan}}, volume = {11}, issn = {1553-7358}, url = {https://journals.plos.org/ploscompbiol/article?id=10.1371/journal.pcbi.1004525}, doi = {10.1371/journal.pcbi.1004525}, language = {en}, number = {10}, journal = {PLOS Computational Biology}, author = {Michener, William K.}, month = oct, year = {2015}, pages = {e1004525}, file = {Michener - 2015 - Ten Simple Rules for Creating a Good Data Manageme.PDF:C\:\\Users\\carst\\Zotero\\storage\\PMALVZFG\\Michener - 2015 - Ten Simple Rules for Creating a Good Data Manageme.PDF:application/pdf}, } @article{leendertse_datenmanagementplane_2019, title = {Datenmanagementpläne zur {Strukturierung} von {Forschungsvorhaben}}, copyright = {Copyright (c) 2019 Jan Leendertse, Susanne Mocken, Dirk von Suchodoletz}, url = {https://bausteine-fdm.de/article/view/8003}, doi = {10.17192/bfdm.2019.2.8003}, abstract = {Digitale Forschungsprozesse produzieren eine zunehmende Menge an Daten. Sie sind oft sehr disziplinspezifisch und liegen in verschiedenen Formen vor. Dabei können sie sowohl Basis als auch Ergebnis von Forschung sein. Bewahren, Verwalten und Kuratieren von Forschungsdaten wird so zu einer zentralen Aufgabe jeder Wissenschaftlerin und jedes Wissenschaftlers sowie der Forschungseinrichtungen. Dieser Prozess muss strukturiert und organisiert werden. Eine sich zunehmend etablierende Lösung besteht in der Verwendung von Datenmanagementplänen (DMP). Sie können primär als abstraktes Konzept aufgefasst werden, das dabei hilft, das Datenmanagement über den geplanten Projektablauf des Forschungsvorhabens und die spätere Langzeitverfügbarkeit zu definieren. Ein DMP strukturiert hierzu den Umgang mit Forschungsdaten über deren Lebenszyklus hinweg. Dabei laufen Erkenntnisse zu benötigten oder generierten Datensätzen ebenso zusammen wie deren Lizenzierung, Anreicherung mit Metadaten, notwendige Verarbeitungsschritte und -software oder die Eigentümerschaft im Zeitablauf. Um die vielfältigen Fragestellungen rund um DMPs abzudecken, sollten Forschungseinrichtungen geeignete Beratungsangebote etablieren.}, language = {de}, number = {2}, journal = {Bausteine Forschungsdatenmanagement}, author = {Leendertse, Jan and Mocken, Susanne and Suchodoletz, Dirk von}, month = may, year = {2019}, pages = {4--9}, file = {Leendertse et al. - 2019 - Datenmanagementpläne zur Strukturierung von Forsch.pdf:C\:\\Users\\carst\\Zotero\\storage\\U5TPZX72\\Leendertse et al. - 2019 - Datenmanagementpläne zur Strukturierung von Forsch.pdf:application/pdf}, } @article{schirrwagen_expanding_2019, title = {Expanding the {Research} {Data} {Management} {Service} {Portfolio} at {Bielefeld} {University} {According} to the {Three}-pillar {Principle} {Towards} {Data} {FAIRness}}, volume = {18}, issn = {1683-1470}, url = {http://datascience.codata.org/articles/10.5334/dsj-2019-006/}, doi = {10.5334/dsj-2019-006}, abstract = {Research Data Management at Bielefeld University is considered as a cross-cutting task among central facilities and research groups at the faculties. While initially started as project “Bielefeld Data Informium” lasting over seven years (2010–2015), it is now being expanded by setting up a Competence Center for Research Data. The evolution of the institutional RDM is based on the three-pillar principle: 1. Policies, 2. Technical infrastructure and 3. Support structures. The problem of data quality and the issues with reproducibility of research data is addressed in the project Conquaire. It is creating an infrastructure for the processing and versioning of research data which will finally allow publishing of research data in the institutional repository. Conquaire extends the existing RDM infrastructure in three ways: with a Collaborative Platform, Data Quality Checking, and Reproducible Research.}, language = {en}, number = {1}, journal = {Data Science Journal}, author = {Schirrwagen, Jochen and Cimiano, Philipp and Ayer, Vidya and Pietsch, Christian and Wiljes, Cord and Vompras, Johanna and Pieper, Dirk}, month = jan, year = {2019}, pages = {6}, file = {Schirrwagen et al. - 2019 - Expanding the Research Data Management Service Por.pdf:C\:\\Users\\carst\\Zotero\\storage\\D5TZCKHC\\Schirrwagen et al. - 2019 - Expanding the Research Data Management Service Por.pdf:application/pdf}, } @article{miksa_ten_2019, title = {Ten principles for machine-actionable data management plans}, volume = {15}, issn = {1553-7358}, url = {https://journals.plos.org/ploscompbiol/article?id=10.1371/journal.pcbi.1006750}, doi = {10.1371/journal.pcbi.1006750}, abstract = {Data management plans (DMPs) are documents accompanying research proposals and project outputs. DMPs are created as free-form text and describe the data and tools employed in scientific investigations. They are often seen as an administrative exercise and not as an integral part of research practice. There is now widespread recognition that the DMP can have more thematic, machine-actionable richness with added value for all stakeholders: researchers, funders, repository managers, research administrators, data librarians, and others. The research community is moving toward a shared goal of making DMPs machine-actionable to improve the experience for all involved by exchanging information across research tools and systems and embedding DMPs in existing workflows. This will enable parts of the DMP to be automatically generated and shared, thus reducing administrative burdens and improving the quality of information within a DMP. This paper presents 10 principles to put machine-actionable DMPs (maDMPs) into practice and realize their benefits. The principles contain specific actions that various stakeholders are already undertaking or should undertake in order to work together across research communities to achieve the larger aims of the principles themselves. We describe existing initiatives to highlight how much progress has already been made toward achieving the goals of maDMPs as well as a call to action for those who wish to get involved.}, language = {en}, number = {3}, journal = {PLOS Computational Biology}, author = {Miksa, Tomasz and Simms, Stephanie and Mietchen, Daniel and Jones, Sarah}, month = mar, year = {2019}, pages = {e1006750}, file = {Miksa et al. - 2019 - Ten principles for machine-actionable data managem.pdf:C\:\\Users\\carst\\Zotero\\storage\\J729UMHS\\Miksa et al. - 2019 - Ten principles for machine-actionable data managem.pdf:application/pdf}, } @inproceedings{treloar_rethinking_2008, address = {Indianapolis}, title = {Rethinking {Metadata} {Creation} and {Management} in a {Data}-{Driven} {Research} {World}}, doi = {10.1109/eScience.2008.41}, abstract = {Research data collections are tremendously important and thus need good curation. However data collections are significantly different to publication repositories and so we need to ensure that these differences are taken into account when managing research data. We believe that a good way of approaching this problem is to articulate the needs of research data stakeholders - particularly users and creators. Consequently we have described an analysis of these needs and then examined costs in the light of these varying needs - it is important to note that costs are often incurred by different people to the beneficiaries. We finish the paper by showing practically how incurring software costs can provide valuable savings for both data creators and data managers.}, booktitle = {2008 {IEEE} {Fourth} {International} {Conference} on {eScience}}, author = {Treloar, Andrew and Wilkinson, Ross}, month = dec, year = {2008}, pages = {782--789}, } @article{cremer_embedded_2015, title = {Embedded {Data} {Manager} – {Integriertes} {Forschungsdatenmanagement}: {Praxis}, {Perspektiven} und {Potentiale}}, volume = {39}, issn = {0341-4183, 1865-7648}, shorttitle = {Embedded {Data} {Manager} – {Integriertes} {Forschungsdatenmanagement}}, doi = {10.1515/bfp-2015-0006}, abstract = {Die digitale Transformation verändert die Wissenschaft und deren Forschungsprozesse: Interdisziplinäre Verbünde, IT-basierte und datengestützte Forschung eröffnen neue Innovationsräume und stellen zugleich komplexe Anforderungen an ein adäquates Forschungsdatenmanagement. Eine Antwort darauf liefert das Konzept des ‚Embedded Data Manager‘, das neben aller Innovation und Agilität vor allem auf ein Prinzip setzt: Partnerschaft. Hier liegt auch die Chance, wissenschaftliche Bibliotheken wieder mehr in die Forschungsprozesse einzubinden und langfristig in der Wissenschaft zu verankern.}, language = {de}, number = {1}, journal = {Bibliothek Forschung und Praxis}, author = {Cremer, Fabian and Engelhardt, Claudia and Neuroth, Heike}, month = apr, year = {2015}, pages = {13--31}, } @article{wilkinson_fair_2016, title = {The {FAIR} {Guiding} {Principles} for scientific data management and stewardship}, volume = {3}, issn = {2052-4463}, url = {https://doi.org/10.1038/sdata.2016.18}, doi = {10.1038/sdata.2016.18}, abstract = {There is an urgent need to improve the infrastructure supporting the reuse of scholarly data. A diverse set of stakeholders—representing academia, industry, funding agencies, and scholarly publishers—have come together to design and jointly endorse a concise and measureable set of principles that we refer to as the FAIR Data Principles. The intent is that these may act as a guideline for those wishing to enhance the reusability of their data holdings. Distinct from peer initiatives that focus on the human scholar, the FAIR Principles put specific emphasis on enhancing the ability of machines to automatically find and use the data, in addition to supporting its reuse by individuals. This Comment is the first formal publication of the FAIR Principles, and includes the rationale behind them, and some exemplar implementations in the community.}, number = {1}, journal = {Scientific Data}, author = {Wilkinson, Mark D. and Dumontier, Michel and Aalbersberg, IJsbrand Jan and Appleton, Gabrielle and Axton, Myles and Baak, Arie and Blomberg, Niklas and Boiten, Jan-Willem and da Silva Santos, Luiz Bonino and Bourne, Philip E. and Bouwman, Jildau and Brookes, Anthony J. and Clark, Tim and Crosas, Mercè and Dillo, Ingrid and Dumon, Olivier and Edmunds, Scott and Evelo, Chris T. and Finkers, Richard and Gonzalez-Beltran, Alejandra and Gray, Alasdair J.G. and Groth, Paul and Goble, Carole and Grethe, Jeffrey S. and Heringa, Jaap and ’t Hoen, Peter A.C and Hooft, Rob and Kuhn, Tobias and Kok, Ruben and Kok, Joost and Lusher, Scott J. and Martone, Maryann E. and Mons, Albert and Packer, Abel L. and Persson, Bengt and Rocca-Serra, Philippe and Roos, Marco and van Schaik, Rene and Sansone, Susanna-Assunta and Schultes, Erik and Sengstag, Thierry and Slater, Ted and Strawn, George and Swertz, Morris A. and Thompson, Mark and van der Lei, Johan and van Mulligen, Erik and Velterop, Jan and Waagmeester, Andra and Wittenburg, Peter and Wolstencroft, Katherine and Zhao, Jun and Mons, Barend}, month = mar, year = {2016}, } @book{bodendorf_daten-_2006, address = {Berlin Heidelberg New York}, edition = {2., aktualisierte und erweiterte Auflage}, series = {Springer-{Lehrbuch}}, title = {Daten- und {Wissensmanagement}}, isbn = {978-3-540-28743-8}, url = {http://dx.doi.org/10.1007/3-540-28682-9}, language = {de}, publisher = {Springer}, author = {Bodendorf, Freimut}, year = {2006}, doi = {10.1007/3-540-28682-9}, } @incollection{jensen_forschungsdatenmanagement_2019, address = {Opladen Berlin}, edition = {1. Auflage}, title = {Forschungsdatenmanagement systematisch planen und umsetzen}, isbn = {978-3-8474-2233-4 3-8474-2233-2}, language = {de}, booktitle = {Forschungsdatenmanagement sozialwissenschaftlicher {Umfragedaten}: {Grundlagen} und praktische {Lösungen} für den {Umgang} mit quantitativen {Forschungsdaten}}, publisher = {Verlag Barbara Budrich}, author = {Jensen, Uwe and Netscher, Sebastian}, editor = {Weller, Katrin and Jensen, Uwe and Netscher, Sebastian}, year = {2019}, doi = {10.3224/84742233.04}, pages = {37--55}, file = {Jensen und Netscher - 2019 - Forschungsdatenmanagement systematisch planen und .pdf:C\:\\Users\\carst\\Zotero\\storage\\MZKMGG3J\\Jensen und Netscher - 2019 - Forschungsdatenmanagement systematisch planen und .pdf:application/pdf}, } @article{schmitz_forschungsdaten_2018, title = {Forschungsdaten managen – {Bausteine} für eine dezentrale, forschungsnahe {Unterstützung}}, volume = {5}, copyright = {Copyright (c) 2018 Dominik Schmitz, Marius Politze}, issn = {2363-9814}, url = {https://www.o-bib.de/article/view/5339}, doi = {10.5282/o-bib/2018H3S76-91}, abstract = {(Inter-)Nationale Infrastrukturen für Forschungsdaten sind aktuell im Entstehen begriffen. Zentrale Infrastruktureinrichtungen müssen aber bereits jetzt den Bedarf der Wissenschaftlerinnen und Wissenschaftler der eigenen Universität nach forschungsnaher Unterstützung erfüllen. Gemeinsam ist beiden, dass erst die fachspezifische Anpassung und Ausprägung des Forschungsdatenmanagements (FDM) zu einer wirklichen Unterstützung für die Forschenden führt. Aktuell sind gerade die Plattformen erfolgreich, die ganz auf die Bedürfnisse einer wissenschaftlichen Community ausgerichtet sind. Analog ist die Situation an der RWTH Aachen University ebenfalls durch Vielfalt und Dezentralität geprägt. Die einzelnen Institute und Lehrstühle sind unterschiedlich weit bei der Unterstützung ihrer Forschenden. Wie können zentrale Infrastruktureinrichtungen wie die Bibliothek und das Rechenzentrum in diesem Kontext Unterstützung leisten? Dieser Beitrag stellt das bausteinbasierte Lösungskonzept der RWTH vor, benennt konkrete Dienste, die zentral angeboten werden, und argumentiert, warum technologieunabhängige, prozessorientierte Schnittstellen geeignet sind, um forschungsnahe Unterstützung sicherzustellen und sich zugleich in die gerade in der Entstehung befindlichen (inter-) nationalen Forschungsdaten-Infrastrukturen einzubringen.}, language = {de}, number = {3}, journal = {o-bib. Das offene Bibliotheksjournal}, author = {Schmitz, Dominik and Politze, Marius}, month = sep, year = {2018}, pages = {76--91}, file = {Schmitz und Politze - 2018 - Forschungsdaten managen – Bausteine für eine dezen.pdf:C\:\\Users\\carst\\Zotero\\storage\\E67U9438\\Schmitz und Politze - 2018 - Forschungsdaten managen – Bausteine für eine dezen.pdf:application/pdf}, } @article{cremer_redaktionssache_2019, title = {Redaktionssache {Forschungsdaten}: {Ein} {Servicekonzept} zur {Forschungsdatenpublikation} in den {Geisteswissenschaften}}, volume = {43}, issn = {0341-4183, 1865-7648}, shorttitle = {Redaktionssache {Forschungsdaten}}, doi = {10.1515/bfp-2019-2018}, abstract = {Der Beitrag fokussiert die Publikation von Forschungsdaten in den Geisteswissenschaften und vertritt die These, dass die Forschungsdatenpublikation im Wissenschaftsbetrieb als Dienstleistung realisierbar ist. Ausgehend von hinderlichen Barrieren im System und förderlichen Modellen in Verbundprojekten wird das Konzept der Forschungsdatenredaktion diskutiert. Dieses Konzept basiert auf einer redaktionellen Bearbeitung der Forschungsdaten nach dem Forschungsprozess und der Veröffentlichung als eigenständiger Publikationsform.}, language = {de}, number = {1}, journal = {Bibliothek Forschung und Praxis}, author = {Cremer, Fabian and Klaffki, Lisa and Steyer, Timo}, month = apr, year = {2019}, pages = {118--125}, file = {Cremer et al. - 2019 - Redaktionssache Forschungsdaten Ein Servicekonzep.pdf:C\:\\Users\\carst\\Zotero\\storage\\NJ9TQD4X\\Cremer et al. - 2019 - Redaktionssache Forschungsdaten Ein Servicekonzep.pdf:application/pdf}, } @techreport{hochschulrektorenkonferenz_management_2014, address = {Bonn}, title = {Management von {Forschungsdaten} – eine zentrale strategische {Herausforderung} für {Hochschulleitungen}: {Empfehlung} der 16. {Mitgliederversammlung} der {HRK} am 13. {Mai} 2014 in {Frankfurt} am {Main}}, url = {https://www.hrk.de/fileadmin/_migrated/content_uploads/HRK_Empfehlung_Forschungsdaten_13052014_01.pdf}, abstract = {Das exponentielle Wachstum, die steigende Komplexität und der zunehmende Gebrauch von digitalen Forschungsdaten haben in den letzten Jahren einen erheblichen Einfluss auf den Forschungsprozess genommen. Als Beispiele seien die Verwendung von Methoden zur Handhabung sehr großer Datenmengen aufgrund des exponentiellen Wachstums von digitalen Forschungsdaten und die Entwicklung von Werkzeugen zur besseren Integration heterogener Daten genannt. Das Management von Forschungsdaten, die Möglichkeiten ihrer Vernetzung, ihre dauerhafte Verfügbarhaltung und ihr offener Zugang erfordern adäquate neue Infrastrukturen.Für die strategische Steuerung dieser Prozesse sind die Hochschulleitungen gefragt. Sie sind gefordert, an ihren Hochschulen Leitlinien zum Umgang mit digitalen Forschungsdaten abzustimmen und Vereinbarungen mit anderen Hochschulen, außeruniversitären Forschungseinrichtungen sowie fachspezifischen Daten-Infrastrukturen zu unterstützen. Es ist an den Hochschulleitungen, die Informationskompetenz der Hochschulmitglieder zu stärken und die strukturellen Voraussetzungen für ein effizientes, institutionelles Forschungsdatenmanagement zu schaffen. Bund und Länder werden aufgefordert, übergreifende Abstimmungen und Maßnahmen zu koordinieren, die für den Aufbau tragfähiger Informationsinfrastrukturen über die Grenzen der Bundesländer hinweg unerlässlich sind. Außerdem wird an die Politik appelliert, zusätzliche finanzielle Mittel zur Verfügung zu stellen. Der von der GWK beschlossene Rat für Informationsinfrastrukturen sollte hier – in enger Zusammenarbeit mit der HRK – eine koordinierende Rolle übernehmen.}, language = {de}, institution = {Hochschulrektorenkonferenz}, author = {{Hochschulrektorenkonferenz}}, year = {2014}, pages = {6}, file = {2014 - Management von Forschungsdaten – eine zentrale str.pdf:C\:\\Users\\carst\\Zotero\\storage\\MN54WGF5\\2014 - Management von Forschungsdaten – eine zentrale str.pdf:application/pdf}, } @techreport{hochschulrektorenkonferenz_wie_2015, address = {Bonn}, title = {Wie {Hochschulleitungen} die {Entwicklung} des {Forschungsdatenmanagements} steuern können. {Orientierungspfade}, {Handlungsoptionen}, {Szenarien}: {Empfehlung} der 19. {Mitgliederversammlung} der {HRK} am 10. {November} 2015 in {Kiel}}, url = {https://www.hrk.de/fileadmin/_migrated/content_uploads/Empfehlung_Forschungsdatenmanagement__final_Stand_11.11.2015.pdf}, abstract = {Die Entwicklungen im Zuge der sehr dynamisch fortschreitenden Digitalisierung in allen Bereichen haben auch einen signifikanten Einfluss auf Forschungsprozesse. Besonders deutlich wird dies beim Thema ‚Forschungsdatenmanagement‘ (FDM), das eine wesentliche Rolle in vielen wissenschaftlichen Aktivitäten der Zukunft spielen wird. In diesem Bereich sind bereits vielfältige Aktivitäten zu beobachten: Während Wissenschaftlerinnen und Wissenschaftler sich weltweit im Rahmen der ‚Research Data Alliance‘ vernetzen, mahnt die Allianz der Wissenschaftsorganisationen im Rahmen der Schwerpunktinitiative Digitale Information für das FDM den Aufbau nachhaltiger Strukturen an. Auch der Rat für Informationsinfrastrukturen setzt den Gegenstand weit oben auf seine Agenda. Die Bundesregierung adressiert das Thema ebenfalls, namentlich im Rahmen der ‚Digitalen Agenda', der ‚IT-Gipfel' und des geplanten Deutschen Internetinstituts. Die DFG weist mit ihren ‚Leitlinien zum Umgang mit Forschungsdaten‘ auf die Dringlichkeit der anstehenden Aufgaben hin. Unter den Ländern bekennt sich zum Beispiel Baden-Württemberg im Rahmen seines E-Science-Förderprogramms zum Auf- und Ausbau des FDM. Und auf der internationalen Ebene machte die Europäische Kommission das Thema zu einem wichtigen Förderschwerpunkt innerhalb des Forschungsrahmenprogramms ‚Horizont 2020‘. Die HRK sieht aufgrund der Vielzahl der Akteure einen dringenden Bedarf an Koordination und Abstimmung zwischen den Hochschulen beim Aufbau eines länderübergreifenden und international kompatiblen FDM. Dabei muss das Ziel sein, möglichst alle Hochschulen einzubinden.}, language = {de}, institution = {Hochschulrektorenkonferenz}, author = {{Hochschulrektorenkonferenz}}, year = {2015}, pages = {25}, file = {2015 - Wie Hochschulleitungen die Entwicklung des Forschu.pdf:C\:\\Users\\carst\\Zotero\\storage\\HFFW6LYR\\2015 - Wie Hochschulleitungen die Entwicklung des Forschu.pdf:application/pdf}, } @article{houtkoop_data_2018, title = {Data {Sharing} in {Psychology}: {A} {Survey} on {Barriers} and {Preconditions}:}, volume = {1}, shorttitle = {Data {Sharing} in {Psychology}}, url = {https://journals.sagepub.com/doi/10.1177/2515245917751886}, doi = {10.1177/2515245917751886}, abstract = {Despite its potential to accelerate academic progress in psychological science, public data sharing remains relatively uncommon. In order to discover the perceived barriers to public data sharing and possible means for lowering them, we conducted a survey, which elicited responses from 600 authors of articles in psychology. The results confirmed that data are shared only infrequently. Perceived barriers included respondents’ belief that sharing is not a common practice in their fields, their preference to share data only upon request, their perception that sharing requires extra work, and their lack of training in sharing data. Our survey suggests that strong encouragement from institutions, journals, and funders will be particularly effective in overcoming these barriers, in combination with educational materials that demonstrate where and how data can be shared effectively.}, language = {en}, number = {1}, journal = {Advances in Methods and Practices in Psychological Science}, author = {Houtkoop, Bobby Lee and Chambers, Chris and Macleod, Malcolm and Bishop, Dorothy V. M. and Nichols, Thomas E. and Wagenmakers, Eric-Jan}, month = feb, year = {2018}, pages = {70--85}, file = {Houtkoop et al. - 2018 - Data Sharing in Psychology A Survey on Barriers a.pdf:C\:\\Users\\carst\\Zotero\\storage\\UU26YAZM\\Houtkoop et al. - 2018 - Data Sharing in Psychology A Survey on Barriers a.pdf:application/pdf}, } @book{jensen_leitlinien_2012, address = {Mannheim}, series = {{GESIS} technical reports}, title = {Leitlinien zum {Management} von {Forschungsdaten}: sozialwissenschaftliche {Umfragedaten}}, url = {https://www.gesis.org/fileadmin/upload/forschung/publikationen/gesis_reihen/gesis_methodenberichte/2012/TechnicalReport_2012-07.pdf}, language = {de}, number = {2012, 07}, publisher = {GESIS}, author = {Jensen, Uwe}, year = {2012}, file = {Jensen - 2012 - Leitlinien zum Management von Forschungsdaten soz.pdf:C\:\\Users\\carst\\Zotero\\storage\\XAIF28NN\\Jensen - 2012 - Leitlinien zum Management von Forschungsdaten soz.pdf:application/pdf}, } @techreport{noauthor_breite_2018, address = {Göttingen}, title = {In der {Breite} und forschungsnah: {Handlungsfähige} {Konsortien}. {Dritter} {Diskussionsimpuls} zur {Ausgestaltung} einer {Nationalen} {Forschungsdateninfrastruktur} ({NFDI}) für die {Wissenschaft} in {Deutschland}}, copyright = {Creative Commons Namensnennung –Weitergabe unter gleichen Bedingungen 4.0 International}, url = {urn:nbn:de:101:1-2018120515263879760228}, language = {de}, institution = {Rat für Informationsinfrastrukturen}, month = dec, year = {2018}, pages = {6}, file = {2016 - In der Breite und forschungsnah Handlungsfähige K.pdf:C\:\\Users\\carst\\Zotero\\storage\\4ND6N6PL\\2016 - In der Breite und forschungsnah Handlungsfähige K.pdf:application/pdf}, } @article{mancilla_quest_2019, title = {On a {Quest} for {Cultural} {Change} - {Surveying} {Research} {Data} {Management} {Practices} at {Delft} {University} of {Technology}}, volume = {29}, issn = {2213-056X}, url = {http://www.liberquarterly.eu/articles/10.18352/lq.10287/}, doi = {10.18352/lq.10287}, abstract = {The Data Stewardship project is a new initiative from the Delft University of Technology (TU Delft) in the Netherlands. Its aim is to create mature working practices and policies regarding research data management across all TU Delft faculties. The novelty of this project relies on having a dedicated person, the so-called ‘Data Steward’, embedded in each faculty to approach research data management from a more discipline-specific perspective. It is within this framework that a research data management survey was carried out at the faculties that had a Data Steward in place by July 2018. The goal was to get an overview of the general data management practices, and use its results as a benchmark for the project. The total response rate was 11 to 37\% depending on the faculty. Overall, the results show similar trends in all faculties, and indicate lack of awareness regarding different data management topics such as automatic data backups, data ownership, relevance of data management plans, awareness of FAIR data principles and usage of research data repositories. The results also show great interest towards data management, as more than {\textbackslash}textasciitilde80\% of the respondents in each faculty claimed to be interested in data management training and wished to see the summary of survey results. Thus, the survey helped identified the topics the Data Stewardship project is currently focusing on, by carrying out awareness campaigns and providing training at both university and faculty levels.}, language = {en}, number = {1}, journal = {LIBER Quarterly}, author = {Mancilla, Heather Andrews and Teperek, Marta and Dijck, Jasper van and Heijer, Kees den and Eggermont, Robbert and Plomp, Esther and Velden, Yasemin Turkyilmaz-van der and Kurapati, Shalini}, month = aug, year = {2019}, pages = {1--27}, file = {Mancilla et al. - 2019 - On a Quest for Cultural Change - Surveying Researc.pdf:C\:\\Users\\carst\\Zotero\\storage\\G3XWHS3A\\Mancilla et al. - 2019 - On a Quest for Cultural Change - Surveying Researc.pdf:application/pdf}, } @book{mellor_landscape_2018, title = {The {Landscape} of {Open} {Data} {Policies}}, url = {https://www.cos.io/blog/the-landscape-of-open-data-policies}, language = {en}, author = {Mellor, David}, month = aug, year = {2018}, file = {Snapshot:C\:\\Users\\carst\\Zotero\\storage\\XRTAGDLT\\the-landscape-of-open-data-policies.html:text/html}, } @book{sane_overcoming_2015, address = {London}, title = {Overcoming {Barriers} to {Data} {Sharing} in {Public} {Health}: {A} {Global} {Perspective}}, isbn = {978 1 78413 050 3}, url = {https://www.chathamhouse.org/publication/overcoming-barriers-data-sharing-public-health-global-perspective/20150417OvercomingBarriersDataSharingPublicHealthSaneEdelstein.pdf}, abstract = {• The interaction between barriers to data sharing in public health is complex, and single solutions to single barriers are unlikely to be successful. Political, economic and legal obstacles will be the most challenging to overcome. • Public health data sharing occurs extensively as a collection of subregional and regional surveillance networks. These existing networks have often arisen as a consequence of a specific local public health crisis, and should be integrated into any global framework. • Data sharing in public health is successful when a perceived need is addressed, and the social, political and cultural context is taken into account. • A global data sharing legal framework is unlikely to be successful. A global data governance or ethical framework, supplemented by local memoranda of understanding that take into account the local context, is more likely to succeed. • The International Health Regulations (IHR) should be considered as an infrastructure for data sharing. However, their lack of enforcement mechanism, lack of minimum data sets, lack of capacity assessment mechanism, and potential impact on trade and travel following data sharing need to be addressed. • Optimal data sharing does not equate with open access for public health data.}, language = {en}, publisher = {Chatham House}, author = {Sane, Jussi and Edelstein, Michael}, editor = {{Centre on Global Health Security at The Royal Institute of International Affairs}}, year = {2015}, file = {_.pdf:C\:\\Users\\carst\\Zotero\\storage\\9KQFP3BV\\_.pdf:application/pdf}, } @book{wiarda_medizin_2019, title = {Medizin und {Machtmissbrauch}}, shorttitle = {Kommentar}, url = {https://www.spektrum.de/kolumne/medizin-und-machtmissbrauch/1688058}, abstract = {Steile Hierarchien und falsche Anreize begünstigen Forschungsskandale im Gesundheitssystem}, language = {de}, author = {Wiarda, Jan-Martin}, month = nov, year = {2019}, note = {Publication Title: Spektrum der Wissenschaft. Medizin}, } @techreport{noauthor_schritt_2017, address = {Göttingen}, title = {Schritt für {Schritt} – oder: {Was} bringt wer mit?: {Ein} {Diskussionsimpuls} zu {Zielstellung} und {Voraussetzungen} für den {Einstieg} in die {Nationale} {Forschungsdateninfrastruktur} ({NFDI})}, copyright = {Creative Commons Namensnennung –Weitergabe unter gleichen Bedingungen 4.0 International}, url = {urn:nbn:de:101:1-201705023233}, language = {de}, institution = {Rat für Informationsinfrastrukturen}, month = apr, year = {2017}, pages = {4}, file = {2016 - Schritt für Schritt – oder Was bringt wer mit Ei.pdf:C\:\\Users\\carst\\Zotero\\storage\\6MKWKXM5\\2016 - Schritt für Schritt – oder Was bringt wer mit Ei.pdf:application/pdf}, } @article{helbig_kein_2017, title = {Kein {Königsweg} - die {Vermittlung} von {Forschungsdatenkompetenz} auf allen universitären {Ebenen}}, volume = {4}, copyright = {Copyright (c) 2017 Pamela Aust, Kerstin Helbig}, issn = {2363-9814}, url = {https://www.o-bib.de/article/view/2017H1S108-116}, doi = {10.5282/o-bib/2017H1S108-116}, abstract = {Die Strategien und Erfahrungen der Humboldt-Universität zu Berlin bei der Einführung von Schulungs- und Informationsangeboten für Universitätsangehörige im Bereich Forschungsdatenmanagement werden in diesem Artikel dargestellt. Dabei werden insbesondere Unterschiede zwischen den Zielgruppen, aber auch geeignete Schulungsformate sowie Probleme und zukünftige Herausforderungen thematisiert.}, language = {de}, number = {1}, journal = {o-bib. Das offene Bibliotheksjournal}, author = {Helbig, Kerstin and Aust, Pamela}, month = apr, year = {2017}, pages = {108--116}, file = {Helbig und Aust - 2017 - Kein Königsweg - die Vermittlung von Forschungsdat.pdf:C\:\\Users\\carst\\Zotero\\storage\\FVM9BPH7\\Helbig und Aust - 2017 - Kein Königsweg - die Vermittlung von Forschungsdat.pdf:application/pdf}, } @article{dierkes_von_2018, title = {Von der {Idee} zum {Konzept} – {Forschungsdatenmanagement} an der {Universität} zu {Köln}}, volume = {5}, copyright = {Copyright (c) 2018 Jens Dierkes, Constanze Curdt}, issn = {2363-9814}, url = {https://www.o-bib.de/article/view/2018H2S28-46}, doi = {10.5282/o-bib/2018H2S28-46}, language = {de}, number = {2}, journal = {o-bib. Das offene Bibliotheksjournal}, author = {Dierkes, Jens and Curdt, Constanze}, month = jul, year = {2018}, pages = {28--46}, file = {Dierkes und Curdt - 2018 - Von der Idee zum Konzept – Forschungsdatenmanageme.pdf:C\:\\Users\\carst\\Zotero\\storage\\UM3K6BW6\\Dierkes und Curdt - 2018 - Von der Idee zum Konzept – Forschungsdatenmanageme.pdf:application/pdf}, } @book{wuttke_schulungsmaterialien_2019, title = {Schulungsmaterialien zu {RDMO} \& fachspezifische {Lern}- und {Informationsangebote} für das geisteswissenschaftliche {Forschungsdatenmanagement}}, url = {https://zenodo.org/record/3520839#.X2euaYvgq-4}, abstract = {Zusammenstellung einer Übersicht von Schulungsmaterialien zu RDMO (Research Data Management Organiser) und zum Forschungsdatenmanagement (mit Schwerpunkt Geisteswissenschaften) für einen Workshop an der Universität Hildesheim.}, language = {de}, author = {Wuttke, Ulrike}, month = oct, year = {2019}, doi = {10.5281/zenodo.3520839}, file = {Wuttke - 2019 - Schulungsmaterialien zu RDMO & fachspezifische Le.pdf:C\:\\Users\\carst\\Zotero\\storage\\HAKDCFI5\\Wuttke - 2019 - Schulungsmaterialien zu RDMO & fachspezifische Le.pdf:application/pdf}, } @article{shanahan_curriculum_2019, title = {A curriculum for foundational {Research} {Data} {Science} skills for {Early} {Career} {Researchers}}, url = {https://www.rd-alliance.org/group/rdacodata-summer-schools-data-science-and-cloud-computing-developing-world-wg/outcomes-0}, doi = {10.15497/rda00038}, abstract = {This recommendation describes the curriculum and example materials to give Early Career Researchers (ECR’s) the foundational skills in Data Science to work with their data. This curriculum combines technical skills, such as Software Carpentry with responsible research practices such as Open and Responsible Research. In 2016 we ran one school in Trieste, Italy. In 2017 we ran two, Trieste and São Paulo, Brazil. In 2018 we ran three, Trieste, São Paulo and Kigali, Rwanda. In 2019 we will run four schools (Addis Ababa, Ethiopia, Trieste, Abuja, Nigeria and San José in Costa Rica). At the end of this year approximately 400 students will have been taught on four continents using the curriculum developed here.}, author = {Shanahan, Hugh and Quick, Rob and Alfaro Córdoba, Marcela and Clement, Gail and Bezuidenhout, Louise and Shanmugasundaram, Venkat and Jones, Sarah and Ashley, Kevin and Diggs, Stephen and Gillespie, Colin and El Jadid, Sara and Sorokina, Maria and Barlow, Roger and Okorafor, Ekpe and Sipos, Gergely and Costantini, Alessandro and Short, Hannah}, month = oct, year = {2019}, } @techreport{noauthor_digitale_2019, address = {Göttingen}, title = {Digitale {Kompetenzen} - dringend gesucht! {Empfehlungen} zu {Berufs}- und {Ausbildungsperspektiven} für den {Arbeitsmarkt} {Wissenschaft}}, copyright = {Creative Commons Namensnennung –Weitergabe unter gleichen Bedingungen 4.0 International}, url = {http://www.rfii.de/?p=3883}, language = {de}, institution = {Rat für Informationsinfrastrukturen}, year = {2019}, pages = {56}, file = {2016 - Leistung aus Vielfalt Empfehlungen zu Strukturen,.pdf:C\:\\Users\\carst\\Zotero\\storage\\N57TV9N4\\2016 - Leistung aus Vielfalt Empfehlungen zu Strukturen,.pdf:application/pdf}, } @article{wiljes_teaching_2019, title = {Teaching {Research} {Data} {Management} for {Students}}, volume = {18}, issn = {1683-1470}, doi = {10.5334/dsj-2019-038}, abstract = {Sound skills in managing research data are a fundamental requirement in any discipline of research. Therefore, research data management should be included in academic education of students as early as possible. We have been teaching an interdisciplinary full semester’s course on research data management for six years. We report how we established the course. We describe our competency-based approach to teaching research data management and the curriculum of topics that we consider essential. We evaluate our approach by a survey done among the participants of the course and summarize the lessons we learned in teaching the course.}, language = {en}, number = {1}, journal = {Data Science Journal}, author = {Wiljes, Cord and Cimiano, Philipp}, month = aug, year = {2019}, pages = {38}, file = {Wiljes und Cimiano - 2019 - Teaching Research Data Management for Students.pdf:C\:\\Users\\carst\\Zotero\\storage\\2MXBQS6N\\Wiljes und Cimiano - 2019 - Teaching Research Data Management for Students.pdf:application/pdf}, } @techreport{schuller_future_2019, address = {Berlin}, type = {Arbeitspapier}, title = {Future {Skills}: {Ein} {Framework} für {Data} {Literacy}}, copyright = {CC BY SA}, shorttitle = {Future {Skills}}, url = {https://hochschulforumdigitalisierung.de/sites/default/files/dateien/HFD_AP_Nr_47_DALI_Kompetenzrahmen_WEB.pdf}, abstract = {1.1 Management Summary Welches Wissen, welche Fähigkeiten, welche Haltung benötigt es in Gesellschaft, Arbeitswelt und Wissenschaft, in denen Daten als wertvolle, mitunter die wertvollste Ressource gelten und Entschei-dungen zunehmend auf der Grundlage von Daten getroffen werden? Zweifellos werden Digitalisierung und Datafizierung das Leben und Arbeiten im 21. Jahrhundert nachhaltig verändern. Künstliche Intelli-genz, vernetzte Produktion, kommunizierende Maschinen und selbstfahrende Autos werden von Daten gesteuert und produzieren selbst Daten am laufenden Band. Daten sind die Ausgangsbasis für Wissens- bzw. Wertschöpfung als Grundlage für bessere Entscheidungen. Der Prozess der Wissensschöpfung umfasst mehrere Schritte: (A) Datenkultur etablieren – (B) Daten bereitstellen – (C) Daten auswerten – (D) Ergebnisse interpretieren – (E) Daten interpretieren – (F) Handeln ableiten. Um systematisch Wissen bzw. Wert aus Daten zu schöpfen, ist deshalb zukünftig in allen Sektoren und Disziplinen die Fähigkeit, planvoll mit Daten umzugehen und sie im jeweiligen Kontext bewusst einsetzen und hinterfragen zu können, von entscheidender Bedeutung. Dies wird als Data Literacy bezeichnet und umfasst die Fähigkeiten, Daten auf kritische Art und Weise zu sammeln, zu managen, zu bewerten und anzuwenden. Data Literacy ist weit mehr ein breites und tiefes Detailwissen über sich laufend verändernde Methoden und Technologien. Vielmehr spielt die Dimension der Datenethik, der Motivation und Werthaltung eine zentrale Rolle, um zukünftig mit Daten erfolgreich und souverän umgehen zu können. Data Literacy ist eine Schlüsselkompetenz des 21. Jahrhunderts, die in der modernen Gesellschaft und Arbeitswelt unerlässlich sein wird. Data Literacy muss deshalb von Beginn an und fächerübergreifend an den Hochschulen vermittelt werden. Hierfür bedarf es eines Kompetenzrahmens, d.h. eines Modells zur strukturierten Beschreibung von effektivem Verhalten in einem gegebenen Aufgabenkontext. Er umfasst Kompetenzen, deren Definitionen und daraus abgeleitete Verhaltensindikatoren. Ein derartiger Kompetenzrahmen soll alle Stufen des Wissens- bzw. Wertschöpfungsprozesses aus Daten abbilden; er soll alle Kompetenzdimensionen erfassen: (a) Wissen, (b) Fertigkeiten, (c) Fähigkeiten, (d) Motivation und (Wert-)Haltung; er soll es erlauben, die erfassten Kompetenzen in konkrete und testbare Lern- oder Kompetenzziele zu überführen; und er soll der die Interdisziplinarität der Aufgabe reflektieren, also widerspiegeln, dass neben Datenexpert*innen auch Fachleute für Datenschutz und Datenethik benötigt werden. Außerdem ist zu erforschen, wie Data Literacy gemessen und getestet werden kann. Geeignete Mess- und Testinstrumente erfassen kognitive und affektive Lernbereiche; sie umfassen möglichst viele Lernstufen: (a) Reaktion, (b) Lernerfolg, (c) Verhalten, (d) Ergebnis; sie sind transparent bezüglich der Möglichkeiten und Grenzen einer Schlussfolgerung von beobachtbarem Verhalten auf dahinter liegender Kompetenz; sie genügen den Testgütekriterien der Validität, Reliabilität und Objektivität; und schließlich sind sie mit vertretbarem Aufwand (Geld, Zeit, benötigte Fähigkeiten der Prüfer*innen) durchzuführen. Die vorliegende Studie verfolgt somit zwei Ziele. Erstens soll ein Kompetenzrahmen für Digitalkompe-tenzen am Beispiel von Data Literacy entwickelt und umsetzbares Wissen für Hochschulen verfügbar gemacht werden. Zweitens soll die Messung von Wirkung und Qualität von Lehre und Studium im digitalen Zeitalter wie auch die Testentwicklung für Digitalkompetenzen am Beispiel von Data Literacy vorbereitet werden. Die Studie richtet sich an Verantwortliche für die Curriculumentwicklung in der Hochschulpolitik und den Hochschulen selbst, aber auch an Lehrende, die nach konkreten Ansätzen zur Vermittlung und Evaluation von Data Literacy suchen. Hierfür stellt das Arbeitspapier einen ausdifferenzierten Kompetenzrahmen zur Verfügung, der zur Ableitung von Lernzielen in zahlreichen Fachgebieten und Studiengängen dienen kann. Mögliche Mess- und Testverfahren für Data Literacy werden näher beleuchtet. Beispielhaft ist anhand von Fallstudien aufgezeigt, wie der Kompetenzrahmen in realen Problemsituationen dazu beitragen kann, Daten nutzbar zu machen. Nun gilt es, die erarbeiteten Ergebnisse in Vorschläge für Curricula umzusetzen. Dafür müssen Pilothochschulen und Pilotstudiengänge ausgewählt werden, in denen spezifische Lernziele für die jeweiligen Disziplinen aus dem Kompetenzrahmen abgeleitet werden. Zu diskutieren ist weiter der Aspekt des lebenslangen Lernens von Schlüsselkompetenzen: Wie kann bzw. sollte Data Literacy bereits in der Schule sowie später in der Arbeitswelt und der Erwachsenenbildung vermittelt werden? In jedem Fall braucht es didaktische Ansätze, die der Interdisziplinarität der Aufgabe gerecht werden, denn komplexe Datenprojekte werden bereits heute im Team bearbeitet, sie erfordern ein multiprofessionelles Arbeiten, das die Fähigkeit zum Projektmanagement und die Kenntnis organisatorischer, rechtlicher und ethischer Rahmenbedingungen miteinschließt. Nicht zuletzt bedarf die Frage, wie die Lehrenden für diese Herausforderung qualifiziert werden können, einer Antwort. 1.2 Aufbau des Arbeitspapiers Das vorliegende Arbeitspapier fasst das Ergebnis der Studie in einem Bericht zusammen. Ein Überblick über Ausgangslage und Zielsetzung beleuchtet im Kapitel Hintergrund und Zielsetzungen zunächst die Herausforderungen durch Digitalisierung und Datafizierung und beschreibt dann die Ziele der Studie. Im Anschluss wird der Begriff „Data Literacy“ präzisiert, wobei zunächst abgeleitet wird, welche Implikationen die Betrachtungsweise von Data Literacy als 21st Century Skill mit sich bringt. Es folgen eine Diskussion möglicher Zwecke von Data Literacy und eine Abgrenzung des Begriffs. Daraus leiten sich Anforderungen an einen Kompetenzrahmen und an mögliche Mess- und Testinstrumente ab. Es folgt im nächsten Kapitel die Herleitung des Kompetenzrahmens. Nach einer Darstellung der historischen Entwicklung der Begriffsdefinition wird die kompetenzdefinierende Aufgabe der Wert-schöpfung aus Daten näher erläutert. Vor diesem Hintergrund werden Data Literacy und andere Kom-petenzen eingeordnet und voneinander abgegrenzt. Haltung als Kompetenzdimension wird im folgenden Unterkapitel betrachtet; es folgen im Ausblick Impulse zur Ableitung überprüfbarer Lernziele und zur didaktischen Umsetzung in anderen Disziplinen. Schließlich wird der Kompetenzrahmen in der Literatur verortet. Das nächste Kapitel beinhaltet die detaillierte Darstellung des Kompetenzrahmens für Data Literacy als Kernergebnis der Studie. Intention und Einsatzmöglichkeiten sowie die Gliederungsebenen werden vorgestellt; danach wird aufgezeigt, wie die Kompetenzdefinition in einen Kompetenzrahmen mündet. Insbesondere werden Kompetenzfelder und Kompetenzen sowie die (Wert-)Haltung als Kompetenzdimension vorgestellt. Zwei korrespondierende Kompetenzen werden im Detail vorgestellt. Anschließend beleuchtet der Forschungsbericht im Kapitel zur Messung und Testung von Data Literacy mögliche Instrumentarien, die zur praktischen Entwicklung von Mess- und Testinstrumenten für Data Literacy genutzt werden können, und schlägt einen Rahmen zu deren Einordnung vor. Aus der Literaturanalyse heraus ergeben sich verschiedene Varianten von objektiven und interpretativen Testverfahren als Ankerpunkte. Schließlich werden Beispiele multipler Methoden diskutiert. Das Abschlusskapitel Reflexion und Ausblick reflektiert die Arbeit und gibt einen kurzen Ausblick auf weitere Forschungsaufgaben. Zunächst wird die Diskussion mit Fachexpert*innen, Hochschulprofes-sor*innen und Führungskräften zusammengefasst. Sie thematisiert die veränderte Bedeutung von Data Literacy, die Wahrnehmung von Haltung als Kompetenzdimension und die Frage, inwiefern Data Literacy heute schon (standardisiert) gemessen wird. Der Ausblick auf Lernziele und Qualifikationsniveaus sowie die Testentwicklung beschließt den Forschungsbericht. 1.3 Abgrenzung und Bezug zum zweiten Arbeitspapier der Studie Die Studie „Entwicklung eines Kompetenzrahmens und vorbereitende Studie zur Kompetenzmessung von 21st Century Skills am Beispiel von Data Literacy“ umfasst insgesamt zwei Arbeitspapiere. Neben dem vorliegenden Forschungsbericht ist dies ein Systematic Review (Schüller \& Busch, 2019a). Das vorliegende Dokument fasst die Ergebnisse der Studie insgesamt zusammen und sammelt In-strumente, welche die Grundlage für Testentwicklung in einem Folgeprojekt bilden können. Es liefert zudem eine ausführliche Dokumentation des entwickelten Kompetenzrahmens einschließlich der Über-legungen zu seiner Herleitung. Das Systematic Review liefert einen ausführlichen Vorgehens- und Ergebnisbericht über die Re-cherche und Einordnung der Vorarbeiten. Er dient der weiteren Forschung sowie der Auswahl poten-ziell geeigneter, existierender Test- und Messinstrumente für Data Literacy für praktische Projekte.}, language = {de}, number = {Arbeitspapier 47}, institution = {Hochschulforum Digitalisierung}, author = {Schüller, Katharina and Busch, Paulina and Hindinger, Carina and {Geschäftsstelle Hochschulforum Digitalisierung beim Stifterverband für die Deutsche Wissenschaft e.}}, month = sep, year = {2019}, doi = {10.5281/zenodo.3349865}, pages = {114}, file = {Katharina Schüller et al. - 2019 - Future Skills Ein Framework für Data Literacy.pdf:C\:\\Users\\carst\\Zotero\\storage\\3QBR7WZ6\\Katharina Schüller et al. - 2019 - Future Skills Ein Framework für Data Literacy.pdf:application/pdf}, } @article{petras_digitales_2019, title = {Digitales {Datenmanagement} als {Berufsfeld} im {Kontext} der {Data} {Literacy}}, volume = {39}, issn = {2191-4664, 0720-6763}, doi = {10.1515/abitech-2019-1005}, abstract = {Das Institut für Bibliotheks- und Informationswissenschaft der Humboldt-Universität zu Berlin und der Fachbereich Informationswissenschaften der Fachhochschule Potsdam entwickeln den weiterbildenden Masterstudiengang Digitales Datenmanagement. Er vermittelt Kompetenzen der Forschungs- und Handlungsfelder im Datenmanagement unter Berücksichtigung der nationalen und internationalen wissenschaftspolitischen, organisatorischen, technischen und rechtlichen Rahmenbedingungen. Im Mittelpunkt steht die Vermittlung von Fähigkeiten im analytischen wie praktischen Umgang mit Forschungs- und Informationsinfrastrukturen besonders in der Domäne Wissenschaft und Forschung sowie in der Ermittlung von Anforderungen und Lösungen für Informationssysteme, die das Datenmanagement unterstützen.}, language = {de}, number = {1}, journal = {ABI Technik}, author = {Petras, Vivien and Kindling, Maxi and Neuroth, Heike and Rothfritz, Laura}, month = apr, year = {2019}, pages = {26--33}, file = {Petras et al. - 2019 - Digitales Datenmanagement als Berufsfeld im Kontex.pdf:C\:\\Users\\carst\\Zotero\\storage\\I5WKIUA3\\Petras et al. - 2019 - Digitales Datenmanagement als Berufsfeld im Kontex.pdf:application/pdf}, } @techreport{noauthor_towards_2011, address = {Brussels}, title = {Towards a {European} framework for research careers}, url = {https://cdn5.euraxess.org/sites/default/files/policy_library/towards_a_european_framework_for_research_careers_final.pdf}, language = {en}, institution = {European Commission}, month = jul, year = {2011}, pages = {11}, file = {2011 - Towards a European framework for research careers.pdf:C\:\\Users\\carst\\Zotero\\storage\\MM84EVZ4\\2011 - Towards a European framework for research careers.pdf:application/pdf}, } @techreport{whyte_skills_2018, title = {Skills and {Capability} {Framework}}, url = {https://eoscpilot.eu/sites/default/files/eoscpilot-d7.3.pdf}, abstract = {The report offers a Framework to help organisations to plan the professional development of their staff, as EOSC service operators or users, and for any individual to identify competences and learning materials that match the capabilities they need. It provides a set of core competences for data stewardship, relating topics to recommended expertise levels for researchers and the professional groups that support them. The Framework also offers examples of capability and competence statements, focusing these on skills areas that we have identified as gaps for stakeholders. The Framework provides an approach to describing similar competence and capability statements that will be of continued use as EOSC services evolve. It is of use to service operators andothers with an interest in skills development. Conclusions are provided about the need to consult further on Research Infrastructure and other stakeholder expectations about the role of the Skills Framework in scoping and supporting the generic skills needed to enable the EOSC. This consultation is planned with EOSCpilot partners and Research Infrastructure training coordinators to inform the final recommendations of WP7.}, language = {en}, institution = {The European Open Science Cloud for Research Projekt}, author = {Whyte, Angus and Vries, Jerry de and Thorat, Rahul and Kuehn, Eileen and Sipos, Gergely and Cavalli, Valentino and Kalaitzi, Vasso and {Kevin Ashley}}, month = jul, year = {2018}, pages = {63}, file = {_.pdf:C\:\\Users\\carst\\Zotero\\storage\\WUYT56IS\\_.pdf:application/pdf}, } @article{ridsdale_strategies_2015, title = {Strategies and {Best} {Practices} for {Data} {Literacy} {Education}: {Knowledge} {Synthesis} {Report}}, url = {http://rgdoi.net/10.13140/RG.2.1.1922.5044}, doi = {10.13140/RG.2.1.1922.5044}, abstract = {We are a data‑rich society; perhaps even data‑driven (Pentland, 2013). In 2012, analysts estimated 90\% of the world’s data had come into existence within the previous 2 years (Vesset et al., 2014). Organizations in all sectors are struggling with this volume of data, confident that despite the velocity at which it is growing, and the variety of its formats, there is value. The goal is to transition from being data-­‐‑rich to being information-­‐‑rich and knowledge-­‐‑rich, for which we need both data scientists and people capable of working effectively with data. The McKinsey Global Institute suggested that at current training rates, in the US alone there will be 140,000-­‐‑190,000 more jobs than trained data scientists by 2018 (Manyika et al., 2011). On the literacy, fluency, mastery scale, a data scientist would have achieved mastery. However, the same report also estimated a 1,500,000 employee shortfall of “data-­‐‑savvy” analysts and managers capable of working with the data to make effective decisions (Manyika et al., 2011); IDC suggests a similar number (Vesset et al., 2014). This latter set of skills is what we refer to as data literacy. Across academic disciplines and throughout the private sector, we are recognizing a growing need for data-­‐‑literate graduates from all backgrounds. The recent Tri-­‐‑Council consultation document on digital scholarship (Government of Canada, 2013) recognizes this challenge, and the issue of training in particular: “Digital data are the raw materials of the knowledge economy, and are becoming increasingly important for all areas of society, including industry… The same may be said of the capacity to capture, manage and preserve it, or the requisite training of personnel who can operate effectively in this milieu” (Government of Canada, 2013). This recognition prompts the core question addressed in this report: How can post-­‐‑secondary institutions in Canada best equip graduates with the knowledge, understanding, and skills required for the data-­‐‑rich knowledge economy? We addressed this question by examining existing strategies and best practices for teaching data literacy, synthesizing documented explicit knowledge (from both formal and informal literature) using a narrative-­‐‑synthesis methodology. When necessary, we used our team'ʹs expertise to aid in synthesizing and summarizing; this expertise spans multiple disciplines, including Science, Computer Science, Business, Information Management, Arts and Social Sciences, and Education. We begin by establishing the skills that comprise data literacy. Data literacy is the ability to collect, manage, evaluate, and apply data, in a critical manner. We define the core skills and competencies that comprise data literacy, using a thematic analysis of the elements of data literacy described in peer-­‐‑reviewed literature. These competencies (23 in total) and their skills, knowledge, and expected tasks (64 in total) are organized under the top-­‐‑level elements of the definition (data, collect, manage, evaluate, apply) and are categorized as conceptual competencies, core competencies, and advanced competencies. This view of data literacy is central to our synthesis, which includes two primary sections: the context and strategic value of data literacy education, and best practices for teaching data literacy across disciplines. There also remains much we do not know, and further steps that need to be taken, to understand data literacy instructions.}, language = {en}, author = {Ridsdale, Chantel and Rothwell, James and Smit, Mike and Bliemel, Michael and Irvine, Dean and Kelley, Daniel and Matwin, Stan and Wuetherick, Brad and Ali-Hassan, Hossam}, year = {2015}, file = {Ridsdale et al. - 2015 - Strategies and Best Practices for Data Literacy Ed.pdf:C\:\\Users\\carst\\Zotero\\storage\\CA4AMAMW\\Ridsdale et al. - 2015 - Strategies and Best Practices for Data Literacy Ed.pdf:application/pdf}, } @article{plomp_cultural_2019, title = {Cultural obstacles to research data management and sharing at {TU} {Delft}}, volume = {32}, issn = {2048-7754}, doi = {10.1629/uksg.484}, abstract = {Research data management (RDM) is increasingly important in scholarship. Many researchers are, however, unaware of the benefits of good RDM and unsure about the practical steps they can take to improve their RDM practices. Delft University of Technology (TU Delft) addresses this cultural barrier by appointing Data Stewards at every faculty. By providing expert advice and increasing awareness, the Data Stewardship project focuses on incremental improvements in current data and software management and sharing practices. This cultural change is accelerated by the Data Champions who share best practices in data management with their peers. The Data Stewards and Data Champions build a community that allows a discipline-specific approach to RDM. Nevertheless, cultural change also requires appropriate rewards and incentives. While local initiatives are important, and we discuss several examples in this paper, systemic changes to the academic rewards system are needed. This will require collaborative efforts of a broad coalition of stakeholders and we will mention several such initiatives. This article demonstrates that community building is essential in changing the code and data management culture at TU Delft.}, language = {en}, number = {1}, journal = {Insights}, author = {Plomp, Esther and Dintzner, Nicolas and Teperek, Marta and Dunning, Alastair}, month = oct, year = {2019}, pages = {29}, file = {Plomp et al. - 2019 - Cultural obstacles to research data management and.pdf:C\:\\Users\\carst\\Zotero\\storage\\56C3X2TW\\Plomp et al. - 2019 - Cultural obstacles to research data management and.pdf:application/pdf}, } @book{mehlberg_call_2019, title = {Call for {Instructors}: {Als} „library carpenter“ {Skills} zum praktischen {Umgang} mit {Daten} im {Bibliotheksalltag} vermitteln}, url = {https://www.vdb-online.org/2019/03/14/call-for-instructors-als-library-carpenter-skills-zum-praktischen-umgang-mit-daten-im-bibliotheksalltag-vermitteln/}, language = {de}, author = {Mehlberg, Martin}, month = mar, year = {2019}, file = {Verein Deutscher Bibliothekarinnen und Bibliothekare:C\:\\Users\\carst\\Zotero\\storage\\D6T84DDV\\call-for-instructors-als-library-carpenter-skills-zum-praktischen-umgang-mit-daten-im-bibliothe.html:text/html}, } @book{helbig_train--trainer_2019, title = {A train-the-trainer program for sustainable research data management training}, url = {https://zenodo.org/record/3466378#.X2eb64vgq-5}, abstract = {A large number of researchers need to be trained in FAIR and open research data management at universities in the next years. However, human resources are scarce at infrastructure facilities and knowledge is just being built up for open science and data literacy. To enhance competences in a smooth, sustainable and fast way, Humboldt-Universität zu Berlin and Freie Universität Berlin created a train-the-trainer program for research data management. With funding of the German Federal Ministry of Education and Research, the two universities designed a comprehensive concept that encompasses 22 learning units. The concept and the extensive accompanying training materials were published for re-use and adoption.}, language = {en}, author = {Helbig, Kerstin and Cortez, Katrin}, month = oct, year = {2019}, doi = {10.5281/zenodo.3466378}, file = {Helbig und Cortez - 2019 - A train-the-trainer program for sustainable resear.pdf:C\:\\Users\\carst\\Zotero\\storage\\THRGLLWR\\Helbig und Cortez - 2019 - A train-the-trainer program for sustainable resear.pdf:application/pdf}, } @book{georgy_big_2019, address = {Leipzig}, title = {Big {Data} in der {Bibliothek} bewältigen: {Der} {ZBIW}-{Zertifikatskurs} {Data} {Librarian}}, copyright = {https://creativecommons.org/licenses/by/4.0/deed.de}, shorttitle = {Big {Data} in der {Bibliothek} bewältigen}, url = {https://opus4.kobv.de/opus4-bib-info/frontdoor/index/index/docId/16475}, language = {de}, author = {Georgy, Ursula and Lanczek, Marvin}, month = mar, year = {2019}, file = {Georgy und Lanczek - Big Data in der Bibliothek bewältigen Der ZBIW-Ze.pdf:C\:\\Users\\carst\\Zotero\\storage\\NXJVF62T\\Georgy und Lanczek - Big Data in der Bibliothek bewältigen Der ZBIW-Ze.pdf:application/pdf}, } @article{federer_defining_2018, title = {Defining data librarianship: a survey of competencies, skills, and training}, volume = {106}, issn = {1558-9439, 1536-5050}, shorttitle = {Defining data librarianship}, url = {http://jmla.pitt.edu/ojs/jmla/article/view/306}, doi = {10.5195/JMLA.2018.306}, abstract = {Objectives: Many librarians are taking on new roles in research data services. However, the emerging field of data librarianship, including specific roles and competencies, has not been clearly established. This study aims to better define data librarianship by exploring the skills and knowledge that data librarians utilize and the training that they need to succeed.Methods: Librarians who do data-related work were surveyed about their work and educational backgrounds and asked to rate the relevance of a set of data-related skills and knowledge to their work.Results: Respondents considered a broad range of skills and knowledge important to their work, especially “soft skills” and personal characteristics, like communication skills and the ability to develop relationships with researchers. Traditional library skills like cataloging and collection development were considered less important. A cluster analysis of the responses revealed two types of data librarians: data generalists, who tend to provide data services across a variety of fields, and subject specialists, who tend to provide more specialized services to a distinct discipline.Discussion: The findings of this study suggest that data librarians provide a broad range of services to their users and, therefore, need a variety of skills and expertise. Libraries hiring a data librarian may wish to consider whether their communities will be best served by a data generalist or a subject specialist and write their job postings accordingly. These findings also have implications for library schools, which could consider adjusting their curricula to better prepare their students for data librarian roles. This article has been approved for the Medical Library Association’s Independent Reading Program.}, number = {3}, journal = {Journal of the Medical Library Association}, author = {Federer, Lisa}, month = jul, year = {2018}, pages = {294--303}, file = {Federer - 2018 - Defining data librarianship a survey of competenc.pdf:C\:\\Users\\carst\\Zotero\\storage\\3INGRUVU\\Federer - 2018 - Defining data librarianship a survey of competenc.pdf:application/pdf}, } @book{carlson_data_2015, address = {West Lafayette, Indiana}, series = {Purdue information literacy handbooks}, title = {Data information literacy: librarians, data, and the education of a new generation of researchers}, isbn = {978-1-55753-696-9}, shorttitle = {Data information literacy}, language = {en}, publisher = {Purdue University Press}, editor = {Carlson, Jake and Johnston, Lisa R.}, year = {2015}, } @inproceedings{kindling_data_2019, address = {Jülich}, series = {Schriften des {Forschungszentrums} {Jülich} {Reihe} {Bibliothek} / {Library}}, title = {Data {Literacy} {Education} – {Kooperative} {Vermittlung} von {Kompetenzen} für {Digitales} {Datenmanagement} am {Beispiel} des neuen {Masterstudiengangs} {Digitales} {Datenmanagement} der {HU} {Berlin} und {FH} {Potsdam}}, volume = {23}, isbn = {978-3-95806-405-8}, url = {http://hdl.handle.net/2128/22277}, abstract = {Am Institut für Bibliotheks- und Informationswissenschaft der Humboldt-Universität zu Berlin und dem Fachbereich Informationswissenschaften der Fachhochschule Potsdam wird ein neuer weiterbildender Masterstudiengang Digitales Datenmanagement (DDM) entwickelt, der zum Sommersemester 2020 starten soll. Er greift ein Desiderat an Aus- und Weiterbildung für die Data Literacy Education auf, das nicht nur seitens der Informationsinfrastruktureinrichtungen im Bereich Wissenschaft und Forschung formuliert wird. Der kompetente Umgang mit digitalen Daten in verschiedenen Szenarien wird in allen Domänen des digitalen Lebens und Arbeitens gebraucht. Der Studiengang DDM legt den Fokus auf Wissenschaft und Forschung und basiert auf Vorarbeiten an beiden Einrichtungen, die sich seit vielen Jahren mit diesen Themen befassen. Außerhalb der bibliotheks- und informationswissenschaftlichen Lehre und Forschung (im Folgenden abgekürzt: LIS, Library and Information Science) konnten diese Angebote bislang aber kaum Sichtbarkeit erreichen. Der Studiengang fokussiert auf Querschnittsthemen wie sie auch in Referenzrahmen formuliert werden, die außerhalb der Informationsinfrastruktur in Wissenschaft und Forschung international Beachtung finden. Die Studieninhalte von DDM bilden einen umfassenden Ansatz für Data Literacy Education ab. Sie vermitteln für Interessierte mit fachlichem Hintergrund anderer Domänen oder beruflichen Zielen außerhalb von Wissenschaft und Forschung unter anderem wichtige Rahmenbedingungen im Umgang mit digitalen Daten. Ergänzend wird die kritische Auseinandersetzung des Umgangs mit Daten unter Bezugnahme auf aktuelle Diskurse integriert.}, language = {de}, booktitle = {Schriften des {Forschungszentrums} {Jülich} {Reihe} {Bibliothek} / {Library} 23}, publisher = {Forschungszentrum Jülich GmbH, Zentralbibliothek, Verlag}, author = {Kindling, Maxi and Rothfritz, Laura}, year = {2019}, pages = {229--245}, file = {Mittermaier - 2019 - .pdf:C\:\\Users\\carst\\Zotero\\storage\\FZDVKU9B\\Mittermaier - 2019 - .pdf:application/pdf}, } @techreport{heidrich_future_2018, address = {Berlin}, type = {Arbeitspapier}, title = {Future {Skills}: {Ansätze} zur {Vermittlung} von {Data} {Literacy} in der {Hochschulbildung}.}, shorttitle = {Future {Skills}}, url = {https://hochschulforumdigitalisierung.de/sites/default/files/dateien/HFD_AP_Nr37_DALI_Studie.pdf}, abstract = {Die Fähigkeit, planvoll mit Daten umzugehen und sie im jeweiligen Kontext bewusst einsetzen und hinterfragen zu können wird im Zuge der digitalen Transformationen von zunehmender Wichtigkeit und stellt eine zentrale Kompetenz in allen Sektoren und Disziplinen dar. Auf der einen Seite werden Data Scientists benötigt, die in der Lage sind, speziell mit großen heterogenen Daten umzugehen und die Technologie rund um den Big-Data-Lifecycle beherrschen, um schnell Entscheidungen basierend auf Daten und daraus abgeleiteten Informationen ermöglichen zu können. Auf der anderen Seite werden in der Breite in allen Sektoren und Disziplinen Personen benötigt, welche die Fähigkeit besitzen, Daten auf kritische Art und Weise zu sammeln, zu managen, zu bewerten und anzuwenden. Diese Fähigkeiten werden unter dem Begriff Data Literacy zusammengefasst. Im Auftrag der Arbeitsgruppe Curriculum 4.0 des Hochschulforums Digitalisierung führten das Fraunhofer-Institut für Experimentelles Software-Engineering IESE und die Gesellschaft für Informatik eine Studie durch, um umsetzbares Wissen für Hochschulen und Fächer für die Curriculum-Entwicklung im Hinblick auf Data Literacy zusammenzustellen. Der Fokus lag dabei auf europäischen und internationalen Best-Practice-Beispielen, welche Angebote zur bedarfsgerechten, disziplinübergreifenden Vermittlung von Wissen zur datengestützten Arbeit und Entscheidungsfindung aufgebaut haben. Im Vordergrund stand die Wissensvermittlung von Data Literacy in den Anwendungsdomänen und nicht die Ausbildung von Data Scientists. Im Rahmen der Studie wurden dazu Best-Practice-Beispiele recherchiert, essentielle Literaturquellen und existierende Studien analysiert, Interviews mit Fachexperten und eine Online-Umfrage durchgeführt sowie mögliche Handlungsempfehlungen in einem internationalen Experten-Workshop erarbeitet. Basierend auf den Studienergebnissen lassen sich eine Reihe von Herausforderungen und Maßnahmen zusammenfassen, die im vorliegenden Bericht im Detail beschrieben werden. Hierbei sei angemerkt, dass die Maßnahmen einen teils heterogenen Lösungsraum mit verschiedenen Optionen aufspannen, die sich in einem bestimmten Anwendungskontext bewährt haben. Auf dieser Basis lässt sich zwar noch nicht direkt ein homogenes Maßnahmenpaket ableiten, aber die Studie liefert in den essentiellen Bereichen Anregungen, konkrete Lösungsbausteine und Praxisbeispiele, die von Hochschulakteuren und der Politik, als Grundlage genutzt werden können, um Curricula im Hinblick auf Data Literacy erfolgreich zu gestalten bzw. die richtigen Anreize und Strukturen dafür zu schaffen. Damit liefern die Studienergebnisse einen wichtigen Beitrag, um die für die digitale Transformation essentiellen Kompetenzen in Data Literacy nachhaltig aufbauen zu können.}, language = {de}, number = {Arbeitspapier 37}, institution = {Hochschulforum Digitalisierung}, author = {Heidrich, Jens and Bauer, Pascal and Krupka, Daniel and {Geschäftsstelle Hochschulforum Digitalisierung beim Stifterverband für die Deutsche Wissenschaft e.}}, year = {2018}, doi = {10.5281/zenodo.1413119}, pages = {113}, file = {Heidrich et al. - 2018 - Future Skills Ansätze zur Vermittlung von Data Li.pdf:C\:\\Users\\carst\\Zotero\\storage\\U5WV6R76\\Heidrich et al. - 2018 - Future Skills Ansätze zur Vermittlung von Data Li.pdf:application/pdf}, } @article{zenk-moltgen_factors_2018, title = {Factors influencing the data sharing behavior of researchers in sociology and political science}, volume = {74}, issn = {0022-0418}, url = {https://doi.org/10.1108/JD-09-2017-0126}, doi = {10.1108/JD-09-2017-0126}, number = {5}, journal = {Journal of Documentation}, author = {Zenk-Möltgen, Wolfgang and Akdeniz, Esra and Katsanidou, Alexia and Naßhoven, Verena and Balaban, Ebru}, month = jan, year = {2018}, pages = {1053--1073}, } @article{hartmann_rise-_2019, title = {{RISE}-{DE} – {Referenzmodell} für {Strategieprozesse} im institutionellen {Forschungsdatenmanagement}}, url = {https://zenodo.org/record/3585556#.X2eHIovgq-4}, doi = {10.5281/zenodo.3585556}, abstract = {Mit RISE-DE liegt als FDMentor-Projektergebnis ein Referenzmodell für Strategieprozesse im institutionellen Forschungsdatenmanagement (FDM) vor. RISE-DE bietet einen Bewertungsrahmen zur Selbstevaluation und Zielbestimmung und eignet sich als Werkzeug zur Gestaltung einer strukturierten, Stakeholder-orientierten Strategieentwicklung für das FDM an Hochschulen und Forschungseinrichtungen. RISE-DE basiert auf dem lose an Reifegradenmodellen orientierten Research Infrastructure Self-Evaluation Framework (RISE v1.1) des Digital Curation Centre (DCC), wurde aber für den Einsatz in partizipativen Prozessen deutlich überarbeitet sowie inhaltlich an den deutschen Wissenschaftskontext und Entwicklungen in der guten Praxis im FDM angepasst. Eine mit Hilfe von RISE-DE erarbeitete Strategie erfüllt zugleich die von der Hochschulrektorenkonferenz (HRK) und der League of European Research Universities (LERU) formulierten Empfehlungen. Die hier vorliegende RISE-DE Version 1.0 nimmt Erfahrungen aus dem Piloteinsatz an der Universität Potsdam sowie Feedback aus der Community auf. Es beinhaltet gegenüber der Vorversion zum einen Veränderungen an den Themen des Referenzmodells, zum anderen wurden Empfehlungen für FDM-Beginner deutlich erweitert und Erläuterungen für die Durchführung partizipativer Strategieprozesse hinzugefügt. In Zusammenarbeit mit der Hochschule für Angewandte Wissenschaften Hamburg entstand außerdem ein digitales Evaluations-Tool.}, language = {de}, author = {Hartmann, Niklas K. and Jacob, Boris and Weiß, Nadin}, month = oct, year = {2019}, file = {RISE-DE V1.0 Abb_1.jpg:C\:\\Users\\carst\\Zotero\\storage\\VLVN3SD6\\RISE-DE V1.0 Abb_1.jpg:image/jpeg;RISE-DE V1.0 Abb_2.jpg:C\:\\Users\\carst\\Zotero\\storage\\NGTPKL6I\\RISE-DE V1.0 Abb_2.jpg:image/jpeg;RISE-DE V1.0 Evaluations-Tool.xlsx:C\:\\Users\\carst\\Zotero\\storage\\9SG4QDA3\\RISE-DE V1.0 Evaluations-Tool.xlsx:application/vnd.openxmlformats-officedocument.spreadsheetml.sheet;RISE-DE Referenzmodell V1.0.pdf:C\:\\Users\\carst\\Zotero\\storage\\DQMF88SA\\RISE-DE Referenzmodell V1.0.pdf:application/pdf}, } @techreport{kuberek_organisatorisch-technisches_2012, type = {Konzept}, title = {Organisatorisch-technisches {Konzept} für eine {Forschungsdaten}-{Infrastruktur} in der {TU} {Berlin} : {Langfassung}}, copyright = {Creative Commons BY-NC-SA 3.0}, url = {http://dx.doi.org/10.14279/depositonce-6604}, abstract = {Das vorliegende organisatorisch-technische Konzept hat zum Ziel, an der TU Berlin eine Forschungsdaten-Infrastruktur aufzubauen, die die langfristige Sicherung und Verfügbarkeit der hier entstandenen Forschungsdaten und der darauf basierenden Forschungspublikationen gewährleistet. Um die Forschungsdaten-Infrastruktur organisatorisch in der TU Berlin zu verankern und die Nachhaltigkeit der getroffenen Maßnahmen sicherzustellen, sieht das Konzept vor, dass Universitätsbibliothek (UB), IT-Service-Center (tubIT) und Forschungsabteilung in einem „Servicezentrum Forschungsdaten und -publikationen“ kooperieren. Zur Unterstützung der Wissenschaftler/innen stellt das Servicezentrum ein umfassendes Dienstleistungs- und Beratungsangebot zu Forschungsdaten und -publikationen bereit. Es wird eine technische Infrastruktur aufgebaut, die dem großen Spektrum unterschiedlicher Fachdisziplinen an der TU Berlin gerecht wird und in der Lage ist, Forschungsdaten vieler Disziplinen zu verwalten und zu speichern. Dabei werden Forschungsdaten und -publikationen als Einheit angesehen, die gegenseitig aufeinander referenzieren; beide werden im institutionellen Forschungsdaten-Repositorium der TU Berlin gespeichert. In dem Konzept wird berücksichtigt, dass eine Reihe von Fach-Communities in den vergangenen Jahren weltweit disziplinspezifische Forschungsdaten-Infrastrukturen aufgebaut hat, an denen sich auch Wissenschaftler/innen der TU Berlin beteiligen. Zur Dokumentation der von Wissenschaftler/innen der TU Berlin erbrachten Forschungsleistungen ist perspektivisch ein Gesamtnachweis – inkl. Link zu den Forschungsdatensets und Volltexten – über die verteilten Forschungsdaten-Repositorien (institutionell und weltweit) hinweg möglich.}, language = {de}, institution = {Technische Universität Berlin}, author = {Kuberek, Monika}, month = mar, year = {2012}, doi = {10.14279/depositonce-6604}, pages = {33}, file = {Kuberek - 2012 - Organisatorisch-technisches Konzept für eine Forsc.pdf:C\:\\Users\\carst\\Zotero\\storage\\C2HNER4Y\\Kuberek - 2012 - Organisatorisch-technisches Konzept für eine Forsc.pdf:application/pdf}, } @article{nrw_nachnutzbare_2019, title = {Nachnutzbare {Awarenessmaterialien} für {Forschungsdatenmanagement} ({FDM})}, url = {https://zenodo.org/record/2599867#.X2eK0ovgq-4}, doi = {10.5281/zenodo.2599867}, abstract = {In Rechenzentren, Bibliotheken und Einrichtungen für Forschungsförderung ist man sich der Bedeutung von FDM längst bewusst. Doch auch wenn Daten als Grundlage zukunftsfähiger Forschung gelten und Basis von Wissensgenerierung sind, bedarf es in der Breite der Forschenden noch viel Sensibilisierung und Aufklärung. Zur Unterstützung der Mitarbeiterinnen und Mitarbeiter der Infrastruktureinrichtungen bei dieser Aufgabe, hat die Landesinitiative fdm.nrw gemeinsam mit einer hochschulübergreifenden Arbeitsgruppe Awareness-Materialien für FDM entwickelt, die zur freien Nachnutzung und Anpassung unter der Lizenz CC0 (Ausnahmen wurden gekennzeichnet) zur Verfügung stehen. Kontakt: info@fdm.nrw}, language = {de}, author = {NRW, Landesinitiative NFDI der Digitalen Hochschule and Awareness, AG FDM}, month = mar, year = {2019}, } @article{helbig_aufbau_2018, title = {Aufbau und {Bekanntmachung} von {Informationsangeboten} über {Forschungsdatenmanagement} für {Forschende}}, copyright = {Copyright (c) 2018 Kerstin Helbig, Jens Dierkes, Janna Neumann}, url = {https://bausteine-fdm.de/article/view/7821}, doi = {10.17192/bfdm.2018.1.7821}, abstract = {Forschende stehen zunehmend vor der Herausforderung, sich mit dem Thema Forschungsdatenmanagement auseinander setzen zu müssen. Um sie dabei adäquat zu unterstützen, etablieren Zentraleinrichtungen wie Bibliotheken, Forschungsdezernate oder Rechenzentren Dienstleistungen und bieten Informationen zum Umgang mit Forschungsdaten an. Der Umfang dieser Informationsangebote hängt jedoch von den Kapazitäten und Kompetenzen der jeweiligen Serviceanbieter ab. Die nachfolgende Empfehlung informiert über potenzielle Dienstleistungen, deren Ressourcenaufwand sowie beteiligte Personen oder Einrichtungen. Die zugehörigen Erfahrungsberichte geben darüber hinaus einen praktischen Einblick in den Aufbau von Informationsangeboten zum Forschungsdatenmanagement.}, language = {de}, number = {1}, journal = {Bausteine Forschungsdatenmanagement}, author = {Helbig, Kerstin and Dierkes, Jens and Neumann, Janna}, month = oct, year = {2018}, pages = {1--6}, file = {Helbig et al. - 2018 - Aufbau und Bekanntmachung von Informationsangebote.pdf:C\:\\Users\\carst\\Zotero\\storage\\CXB4IR62\\Helbig et al. - 2018 - Aufbau und Bekanntmachung von Informationsangebote.pdf:application/pdf}, } @book{biernacka_wie_2018, title = {Wie publiziere ich {Forschungsdaten}?}, url = {https://zenodo.org/record/1440956#.X2eBJIvgq-4}, abstract = {Informationsmaterial zur Publikation von Forschungsdaten.}, language = {de}, author = {Biernacka, Katarzyna and Dolzycka, Dominika and Buchholz, Petra}, month = oct, year = {2018}, doi = {10.5281/zenodo.1440956}, file = {Biernacka et al. - 2018 - Wie publiziere ich Forschungsdaten.pdf:C\:\\Users\\carst\\Zotero\\storage\\IZXLJYF8\\Biernacka et al. - 2018 - Wie publiziere ich Forschungsdaten.pdf:application/pdf}, } @article{neumann_konzept_2018, title = {Konzept zum {Forschungsdatenmanagement} an der {Leibniz} {Universität} {Hannover}}, copyright = {Copyright (c) 2018 Janna Neumann}, url = {https://bausteine-fdm.de/article/view/7822}, doi = {10.17192/bfdm.2018.1.7822}, language = {de}, number = {1}, journal = {Bausteine Forschungsdatenmanagement}, author = {Neumann, Janna}, month = oct, year = {2018}, pages = {14--16}, file = {Neumann - 2018 - Konzept zum Forschungsdatenmanagement an der Leibn.pdf:C\:\\Users\\carst\\Zotero\\storage\\TM4CCUIJ\\Neumann - 2018 - Konzept zum Forschungsdatenmanagement an der Leibn.pdf:application/pdf}, } @techreport{wuttke_umfeldanalyse_2021, address = {Potsdam}, title = {Umfeldanalyse zum {Aufbau} einer neuen {Datenkultur} in {Brandenburg} ({FDM}-{BB})}, url = {https://doi.org/10.25932/publishup-48090}, language = {de}, author = {Wuttke, Ulrike and Neuroth, Heike and Rothfritz, Laura and Straka, Janine and Zeunert, Miriam and Schneemann, Carsten and Hartmann, Niklas and Radtke, Ina}, year = {2021}, } @article{schneemann_rahmendaten_2020, title = {Rahmendaten zu {FDM}-{Bundeslandinitiativen}}, url = {https://zenodo.org/record/4068170}, doi = {10.5281/zenodo.4068170}, abstract = {Diese Datensammlung erfolgte im Rahmen des vom Ministerium für Wissenschaft, Forschung und Kultur (MWFK) geförderten Projekts „Forschungsdatenmanagement Brandenburg“ (FDM-BB) und ist im Report 1 „Umfeldanalyse zum Aufbau einer neuen Datenkultur in Brandenburg (FDM-BB)“ ausgewertet worden. Die vorliegende Matrix dient dem Erfassen des Status Quo der FDM-Initiativen in den deutschen Bundesländern bis zum 31. März 2020.}, language = {de}, author = {Schneemann, Carsten and Zeunert, Miriam and Rothfritz, Laura and Neuroth, Heike and Hartmann, Niklas and Radtke, Ina}, month = nov, year = {2020}, file = {Snapshot:C\:\\Users\\carst\\Zotero\\storage\\M2EXXTHF\\4068170.html:text/html}, } @techreport{radtke_anforderungserhebung_2020, address = {Potsdam}, title = {Anforderungserhebung bei den brandenburgischen {Hochschulen}}, url = {https://doi.org/10.25932/publishup-48091}, language = {de}, author = {Radtke, Ina and Hartmann, Niklas and Neuroth, Heike and Rothfritz, Laura and Wuttke, Ulrike and Straka, Janine and Zeunert, Miriam and Schneemann, Carsten}, year = {2020}, } @book{below_partizipation_2018, address = {Kassel}, series = {{FORUM} {Wirtschaftsrecht}}, title = {Partizipation und {Transparenz} der europäischen {Chemikalienregulierung} - {Juristische} {Analyse} der {Inklusionsleistung} der europäischen {Chemikalienregulierungsbehörden}}, isbn = {978-3-7376-0336-2}, url = {https://www.uni-kassel.de/ub/index.php?id=39129&s=978-3-7376-0336-2}, abstract = {Das neue europäische Chemikalienrecht (REACH), das darauf abzielt ein hohes Schutzniveau für die menschliche Gesundheit und die Umwelt sowie einen freien Warenverkehr und Innovation sicherzustellen, setzt im Zuge des Paradigmenwechsels von der „toxic ignorance“ hin zu einem kontinuierlich lernenden System Vorgaben für einen regen Austausch von Informationen innerhalb der nationalen und europäischen Administrativen sowie außerhalb mit zivilgesellschaftlichen und wirtschaftlichen Akteuren. Da der Erfolg des kontinuierlich lernenden Systems an ihrer Partizipations- und Transparenzqualität zu messen ist, fokussiert die Arbeit die Inklusionsleistungen der europäischen Chemikalienregulierungsbehörden und bewertet sie anhand höherrangiger europarechtlicher und völkerrechtlicher Vorgaben. Die Arbeit identifiziert Defizite und formuliert Gestaltungsoptionen, um die Behördenpraxis zu verbessern. Dabei folgt die Analyse dem Motiv der Gesetzesfolgenabschätzung im Sinne einer Rechtsverträglichkeitsprüfung. REACH, Aarhus Übereinkommen, Verwaltungsrecht, Partizipation, Transparenz, ECHA, KORA-Methode, responsive Regulierung, Betriebs- und Geschäftsgeheimnisse, Datenschutz, Informationsrecht, Nichtregierungsorganisationen, Lobbyismus, Dritter Sektor}, language = {de}, number = {24}, publisher = {kassel university press}, author = {Below, Nicola}, year = {2018}, doi = {10.19211/KUP9783737603379}, } @incollection{oswald_langzeitarchivierung_2012, address = {Boizenburg Göttingen}, title = {Langzeitarchivierung von {Forschungsdaten}. {Einführende} Überlegungen}, copyright = {CC BY-NC-SA}, isbn = {978-3-86488-008-7}, url = {urn:nbn:de:0008-2010071949}, language = {de}, booktitle = {Langzeitarchivierung von {Forschungsdaten}: eine {Bestandsaufnahme}}, publisher = {Hülsbusch Univ.-Verl. Göttingen}, author = {Oßwald, Achim and Scheffel, Regine and Neuroth, Heike}, editor = {Neuroth, Heike and Strathmann, Stefan and Oßwald, Achim and Scheffel, Regine and Klump, Jens and Ludwig, Jens and Deutschland, nestor-Kompetenznetzwerk Langzeitarchivierung und Langzeitverfügbarkeit Digitaler Ressourcen für}, year = {2012}, } @article{cabrera_valdes_accelerator_1989, title = {Accelerator {14C} dates for early upper paleolithic (basal {Aurignacian}) at {El} {Castillo} {Cave} ({Spain})}, volume = {16}, issn = {0305-4403}, url = {http://www.sciencedirect.com/science/article/pii/030544038990023X}, doi = {10.1016/0305-4403(89)90023-X}, abstract = {Three fragments of charcoal taken from different parts of the lowermost bed containing Aurignacian artifacts at El Castillo Cave yielded AMS dates of 37·7 (± 1·8) ka bp, 38·5 (± 1·8) ka bp, and 40·0 (± 2·1) ka bp (average 38·7 ± 1·9 ka bp). These dates are almost identical to new AMS dates from l'Arbreda cave in Catalunya on the same cultural horizon (average 38·5 ± 1·0 ka bp) and are significantly older than the earliest dates for Aurignacian industries in the Aquitaine and in other parts of Central and Western Europe.}, language = {en}, number = {6}, journal = {Journal of Archaeological Science}, author = {Cabrera Valdes, Victoria and Bischoff, James L.}, month = nov, year = {1989}, pages = {577--584}, } @incollection{klump_langzeiterhaltung_2011, address = {Bad Honnef}, title = {Langzeiterhaltung digitaler {Forschungsdaten}}, isbn = {978-3-88347-283-6}, abstract = {Das Handbuch Forschungsdatenmanagement ist konzipiert als Leitfaden für das Selbststudium sowie zur Unterstützung der Aus- und Weiterbildung auf dem aktuellen Stand der Diskussion. Sie richtet sich insbesondere an Einsteiger im Forschungsdatenmanagement, aber gleichermaßen auch an wissenschaftliche Datenkuratoren, IT-Administratoren und Informationswissenschaftler, die ihre Aufgaben im Forschungsdatenmanagement nicht mehr nur einzelfall- oder disziplinorientiert, sondern in Hinblick auf die Arbeit in und an Forschungsdateninfrastrukturen wahrnehmen wollen. Und so war die Aufgabe für die Autorinnen und Autoren in ihrem Kapitel nicht nur den State-of-the-Art darzustellen, sondern das Thema so aufzubereiten, dass z. B. über die Referenzen das weitere Einarbeiten in die Themenfelder erleichtert wird. Zentrale Aspekte des Forschungsdatenmanagements werden in der Publikation aus informationswissenschaftlicher und anwendungsbezogener Perspektive disziplinübergreifend eingeführt.}, language = {de}, booktitle = {Handbuch {Forschungsdatenmanagement}}, publisher = {Bock + Herchen}, author = {Klump, Jens}, editor = {Büttner, Stephan and Hobohm, Hans-Christoph and Müller, Lars}, year = {2011}, pages = {115--119}, } @article{brand_failures_2020, title = {Failures and major issues}, url = {https://bausteine-fdm.de/article/view/8102}, doi = {10.17192/bfdm.2020.2.8102}, abstract = {Das "Scheitern" und die zentralen Herausforderungen bei der Entwicklung und Etablierung einer Informationsinfrastruktur fürs Forschungsdatenmanagement (FDM) sind Themenbereiche, die üblicherweise ungern diskutiert werden. Diese können jedoch zu Unzufriedenheit in dem ganzen Sonderforschungsbereich (SFB) führen, insbesondere aber auch zu Frustration bei den Mitarbeiterinnen und Mitarbeitern der Informationsinfrastruktur (INF)-Teilprojekte. Die Probleme und Herausforderungen manifestieren sich auf verschiedenen Ebenen, die wir in diesem Beitrag etwas näher beleuchten wollen. Die Diskussion am World-Café-Tisch lässt sich in folgende Bereiche bündeln: (i) Fehlender Zugang zu Bedarfen, (ii) unklare Rollen und Governance, (iii) Nachnutzung statt Neuentwicklung. Aufgrund des Projektcharakters von SFBs kann das "Scheitern" wesentliche Auswirkungen haben. Die in diesem Beitrag ausformulierten Stolpersteine sollten jedoch nicht notwendigerweise als vollständiges Scheitern von INF-Vorhaben betrachtet werden. Vielmehr hilft ein Wechsel auf die Perspektive von Forschenden: "Man hat unter bestimmten Annahmen und Rahmenbedingungen eine Entwicklung betrieben. Geht das Experiment schief, dann müssen die Annahmen und Rahmenbedingungen angepasst werden." Das Gelingen eines Infrastrukturprojektes hängt in komplexer Weise von vielen Faktoren ab. Eine gepflegte Kommunikation zwischen den Beteiligten, ein offenes Mindset auf allen Seiten und eine klare Verteilung der Rollen scheinen hier besonders wichtig.}, language = {de}, number = {2}, journal = {Bausteine Forschungsdatenmanagement}, author = {Brand, Ortrun and Dierkes, Jens}, month = nov, year = {2020}, pages = {89--96}, file = {Snapshot:C\:\\Users\\carst\\Zotero\\storage\\SJN49NQK\\8102.html:text/html;Full Text PDF:C\:\\Users\\carst\\Zotero\\storage\\6R9QKZYV\\Brand und Dierkes - 2020 - Failures and major issues.pdf:application/pdf}, } @book{royal_society_science_2012, address = {London}, title = {Science as an open enterprise}, copyright = {CC BY-NC-SA}, isbn = {978-0-85403-962-3}, url = {https://royalsociety.org/-/media/policy/projects/sape/2012-06-20-saoe.pdf}, language = {en}, editor = {{Royal Society}}, year = {2012}, file = {Royal Society (Great Britain) - 2012 - Science as an open enterprise.pdf:C\:\\Users\\carst\\Zotero\\storage\\7IDZ7QS2\\Royal Society (Great Britain) - 2012 - Science as an open enterprise.pdf:application/pdf}, } @article{sturges_zugang_2014, title = {Zugang zu {Forschungsdaten}. {Die} {Notwendigkeit} von {Richtlinien} für {Data} {Sharing} bei {Fachzeitschriften}. {Das} {JoRD}-{Projekt} der {Universität} {Nottingham}}, volume = {17}, url = {https://www.b-i-t-online.de/heft/2014-05-fachbeitrag-sturges.pdf}, number = {5}, journal = {b.i.t.online}, author = {Sturges, Paul and Bamkin, Marianne and Anders, Jane and Hussain, Azhar}, year = {2014}, pages = {421--430}, } @book{noauthor_umgang_nodate, title = {Umgang mit {Forschungsdaten}: {DFG}-{Leitlinien} zum {Umgang} mit {Forschungsdaten}}, url = {https://www.dfg.de/foerderung/antrag_gutachter_gremien/antragstellende/nachnutzung_forschungsdaten/}, language = {de}, note = {Publication Title: Deutsche Forschungsgemeinschaft}, file = {DFG - Deutsche Forschungsgemeinschaft - Umgang mit Forschungsdaten:C\:\\Users\\carst\\Zotero\\storage\\G454TFJL\\nachnutzung_forschungsdaten.html:text/html}, } @book{strecker_fachgesellschaften_2019, title = {Fachgesellschaften und {Open} {Access} in {Deutschland} – eine {Analyse} zur {Herausgabe} von {Zeitschriften}}, url = {https://zenodo.org/record/3406288#.X2fQ4Ivgq-4}, abstract = {In der Debatte um die Open-Access-Transformation wird auch die Rolle wissenschaftlicher Fachgesellschaften diskutiert. Bisher gab es jedoch keine systematische Erhebung zum Einfluss von deutschen Fachgesellschaften auf das Publikationssystem. Dieser unbefriedigende Forschungsstand führte dazu, dass das Potenzial dieses wichtigen Akteurs bei der Open-Access-Transformation bisher weitgehende unbeachtet blieb und möglichen Barrieren auf Seiten der Fachgesellschaften bisher nicht adressiert wurden. Im Rahmen des BMBF-Projekts Options4OA werden die auch Publikations- und Open-Access-Aktivitäten deutscher Fachgesellschaften untersucht. Die Ergebnisse dieser Untersuchung wurden in einer Posterpräsentation auf den Open-Access-Tagen 2019 in Hannover erstmals vorgestellt. Eine Analyse des Publikationsverhaltens von 300 Fachgesellschaften zeigte, dass 39,33 \% (n = 118) herausgeberisch tätig sind. Mehr als die Hälfte dieser Fachgesellschaften gibt eine Zeitschrift heraus. Von den 118 untersuchten Zeitschriften sind nur 6,59 \% (n = 12) reine Open-Access-Zeitschriften, 56,04 \% (n = 102) bieten eine hybrid-Option an. Die Mehrzahl der reinen Open-Access-Zeitschriften verlangt keine Publikationsgebühren, Zeitschriften mit hyprid-Option verlangen durchschnittlich 2424 Euro. In der Datensammlung OpenAPC-de sind 1348 Publikationen aus 9 Zeitschriften nachgewiesen. Ein Großteil der Publikationen wurde über reine Open-Access-Zeitschriften veröffentlicht, für die durchschnittlich 1244,50 Euro Publikationsgebühren gezahlt wurden. Das Vorhaben wurde vom Bundesministerium für Bildung und Forschung (BMBF) im Rahmen des Projektes „Options4OA” gefördert (Förderkennzeichen: 16OA034). Weitere Informationen unter: https://os.helmholtz.de/projekte/options4oa/}, language = {de}, author = {Strecker, Dorothea and Pampel, Heinz}, month = sep, year = {2019}, doi = {10.5281/zenodo.3406288}, file = {Dorothea Strecker und Heinz Pampel - 2019 - Fachgesellschaften und Open Access in Deutschland .pdf:C\:\\Users\\carst\\Zotero\\storage\\IRC2VAAS\\Dorothea Strecker und Heinz Pampel - 2019 - Fachgesellschaften und Open Access in Deutschland .pdf:application/pdf}, } @book{senatskanzlei_geschafts-_und_koordinierungsstelle_govdata_datenportal_nodate, title = {Das {Datenportal} für {Deutschland}: {Open} {Government}: {Verwaltungsdaten} transparent, offen und frei nutzbar}, url = {https://www.govdata.de/}, author = {{Senatskanzlei, Geschäfts- und Koordinierungsstelle GovData}}, file = {GovData | Datenportal für Deutschland - GovData:C\:\\Users\\carst\\Zotero\\storage\\5IKIEYRC\\www.govdata.de.html:text/html}, } @techreport{noauthor_din_nodate, title = {{DIN} {SPEC} 91357:2017-12, {Referenzarchitekturmodell} {Offene} {Urbane} {Plattform} ({OUP})}, shorttitle = {{DIN} {SPEC} 91357}, url = {https://www.beuth.de/de/-/-/281077528}, institution = {Beuth Verlag GmbH}, doi = {10.31030/2780217}, } @article{davies_open_2014, title = {Open {Data}: {Growing} {Up} and {Getting} {Specific}}, volume = {6}, issn = {2075-9517}, shorttitle = {Open {Data}}, url = {https://jedem.org/index.php/jedem/article/view/344}, doi = {10.29379/jedem.v6i1.344}, language = {en}, number = {1}, journal = {JeDEM - eJournal of eDemocracy and Open Government}, author = {Davies, Tim and Janssen, Marijn and Schieferdecker, Ina and Höchtl, Jan}, month = nov, year = {2014}, pages = {i--iii}, file = {Daviees et al. - 2014 - Open Data Growing Up and Getting Specific.pdf:C\:\\Users\\carst\\Zotero\\storage\\MW5QSWRE\\Daviees et al. - 2014 - Open Data Growing Up and Getting Specific.pdf:application/pdf}, } @techreport{bauer_zukunftsfahige_2020, title = {Zukunftsfähige {Städte} und {Regionen}. {Eine} neue {Strategie} für die breite {Umsetzung} nachhaltiger {Stadentwicklung} in {Deutschland}}, author = {Bauer, Wilhelm and Radecki, Alanus von and Ottendörfer, Eva}, year = {2020}, } @book{noauthor_hightech_nodate, title = {Hightech {Forum}: {Offene} {Wissenschaft} und {Innovation}}, url = {https://www.hightech-forum.de/beratungsthemen/offene-wissenschaft-und-innovation/}, language = {de}, note = {Publication Title: Hightech Forum}, file = {Snapshot:C\:\\Users\\carst\\Zotero\\storage\\WYWFPLGG\\offene-wissenschaft-und-innovation.html:text/html}, } @techreport{science_europe_science_2018, address = {Brussels}, title = {Science {Europe} {Guidance} {Document}: {Presenting} a {Framework} for {Disciplinespecific} {Research} {Data} {Management}}, copyright = {CC BY}, url = {http://www.snf.ch/SiteCollectionDocuments/science_europe_guidance-document.pdf}, language = {en}, author = {{Science Europe}}, year = {2018}, pages = {46}, } @techreport{science_europe_practical_2018, address = {Brussels}, title = {Practical {Guide} to the {International} {Alignment} of {Research} {Data} {Management}}, copyright = {CC BY}, url = {https://www.scienceeurope.org/media/jezkhnoo/se_rdm_practical_guide_final.pdf}, language = {en}, author = {{Science Europe}}, year = {2018}, pages = {36}, } @book{forschung_data_nodate, title = {Data {Management} {Plan} ({DMP}) - {Leitlinien} für {Forschende}}, url = {http://www.snf.ch/de/derSnf/forschungspolitische_positionen/open_research_data/Seiten/data-management-plan-dmp-leitlinien-fuer-forschende.aspx}, language = {de}, author = {Forschung, Schweizerischer Nationalfonds zur Förderung der wissenschaftlichen}, } @book{schweizerischer_nationalfonds_zur_forderung_der_wissenschaftlichen_forschung_data_2017, title = {Data {Management} {Plan} – {mySNF} {Formular}}, url = {http://www.snf.ch/SiteCollectionDocuments/DMP_content_mySNF-form_de.pdf}, language = {de}, author = {{Schweizerischer Nationalfonds zur Förderung der wissenschaftlichen Forschung}}, month = jun, year = {2017}, } @techreport{noauthor_multi-beneficiary_2017-1, title = {Multi-{Beneficiary} {Model} {Grant} {Agreement}: {ERC} {Starting} {Grants}, {Consolidator} {Grants}, {Advanced} {Grants} and {Synergy} {Grants} ({H2020} {ERC} {MGA} – {Multi})}, url = {http://ec.europa.eu/research/participants/data/ref/h2020/mga/erc/ h2020-mga-erc-multi_en.pdf}, language = {en}, number = {Version 5.0}, institution = {European Research Council (ERC)}, month = oct, year = {2017}, pages = {[151]}, } @techreport{rylance_concordat_2016, title = {Concordat on {Open} {Research} {Data}}, url = {https://www.ukri.org/files/legacy/documents/concordatonopenresearchdata-pdf/}, abstract = {The Concordat on Open Research Data has been developed by a UK multi-stakeholder group. This concordat will help to ensure that the research data gathered and generated by members of the UK research community is made openly available for use by others wherever possible in a manner consistent with relevant legal, ethical, disciplinary and regulatory frameworks and norms, and with due regard to the costs involved.}, language = {en}, author = {Rylance, Rick and Wingham, Duncan and Wright, Nick and Bruce, Rachel and Hammonds, William and Arrowsmith, Jamie and Johnson, Ben and Thorley, Mark and Jones, Tim and Jubb, Michael and Hrynaszkiewicz, Iain and Maricevic, Maja and Carr, David and Woollard, Matthew and Bradshaw, Tim}, month = jul, year = {2016}, pages = {[24]}, file = {2016 - Concordat on Open Research Data.pdf:C\:\\Users\\carst\\Zotero\\storage\\RH2VTSWA\\2016 - Concordat on Open Research Data.pdf:application/pdf}, } @techreport{noorman_institutional_2014, title = {Institutional barriers and good practice solutions}, url = {https://doi.org/10.5281/zenodo.1297493}, abstract = {In this fourth RECODE deliverable we focus on the challenges faced by institutions, such as archives, libraries, universities, data centres and funding bodies, in making open access to research data possible. Policy makers and the scientific community expect these institutions to play an important role in creating and funding data sharing infrastructures and stimulating and assisting researchers to make their research material public. They look towards these institutions to curate and preserve information, and provide guidance to researchers in managing their data.}, language = {en}, institution = {Zenodo}, author = {Noorman, M. and Kalaitzi, V. and Angelaki, M. and Tsoukala, V. and Sveinsdottir, T. and Price, L. and Wessels, B.}, month = sep, year = {2014}, doi = {10.5281/zenodo.1297493}, pages = {73}, file = {Noorman et al. - 2013 - Institutional barriers and good practice solutions.pdf:C\:\\Users\\carst\\Zotero\\storage\\WKZ5SLRH\\Noorman et al. - 2013 - Institutional barriers and good practice solutions.pdf:application/pdf}, } @article{michael_franke_positionspapier_2015, title = {Positionspapier „{Research} data at your fingertips“ der {Arbeitsgruppe} {Forschungsdaten}}, copyright = {CC-BY 4.0}, url = {https://gfzpublic.gfz-potsdam.de/pubman/faces/ViewItemFullPage.jsp?itemId=item_986897}, doi = {10.2312/allianzfd.001}, language = {de}, journal = {Schwerpunktinitiative „Digitale Information“ der Allianz der deutschen Wissenschaftsorganisationen}, author = {{Michael Franke} and Heinzel, Stefan and Mauer, Reiner and Neumann, Janna and Neuroth, Heike and Pfeiffenberger, Hans and Senst, Henriette and Siegberg, Andrea and Sitek, Dagmar and Spiecker, Claus and Wambsganß, Joachim and Weber, Danny and Winkler-Nees, Stefan}, year = {2015}, pages = {4}, file = {Arbeitsgruppe Forschungsdaten et al. - 2015 - Positionspapier „Research data at your fingertips“.pdf:C\:\\Users\\carst\\Zotero\\storage\\W8DMBXXF\\Arbeitsgruppe Forschungsdaten et al. - 2015 - Positionspapier „Research data at your fingertips“.pdf:application/pdf}, } @book{noauthor_open_nodate-4, title = {Open {Access}}, url = {https://erc.europa.eu/funding-and-grants/managing-project/open-access}, language = {en}, note = {Publication Title: European Research Council}, } @techreport{noauthor_h2020_2017, title = {H2020 {Programme}: {Guidelines} to the {Rules} {onOpen} {Accessto} {Scientific} {Publications} {andOpen} {Access} to {Research} {Datain} {Horizon} 2020}, url = {https://ec.europa.eu/research/participants/data/ref/h2020/grants_manual/hi/oa_pilot/h2020-hi-oa-pilot-guide_en.pdf}, language = {en}, number = {Version 3.2}, institution = {European Commission}, month = mar, year = {2017}, pages = {11}, } @book{noauthor_gute_nodate, title = {Gute wissenschaftliche {Praxis}: ‚{Leitlinien} zur {Sicherung} guter wissenschaftlicher {Praxis}‘}, url = {https://www.dfg.de/foerderung/grundlagen_rahmenbedingungen/gwp}, language = {de}, note = {Publication Title: Deutsche Forschungsgemeinschaft}, } @book{deutsche_forschungsgemeinschaf_leitlinien_2019, address = {Bonn}, title = {Leitlinien zur {Sicherung} guter wissenschaftlicher {Praxis}: {Kodex}}, isbn = {978-3-527-34740-7}, shorttitle = {Leitlinien zur {Sicherung} guter wissenschaftlicher {Praxis}}, url = {https://www.dfg.de/download/pdf/foerderung/rechtliche_rahmenbedingungen/gute_wissenschaftliche_praxis/kodex_gwp.pdf}, language = {de}, editor = {{Deutsche Forschungsgemeinschaf}}, year = {2019}, file = {kodex_leitlinien_gwp_dfg.pdf:C\:\\Users\\carst\\Zotero\\storage\\3W7CYSE4\\kodex_leitlinien_gwp_dfg.pdf:application/pdf;code_of_conduct_dfg.pdf:C\:\\Users\\carst\\Zotero\\storage\\8LRHFWFD\\code_of_conduct_dfg.pdf:application/pdf;Deutsche Forschungsgemeinschaf - 2019 - Leitlinien zur Sicherung guter wissenschaftlicher .pdf:C\:\\Users\\carst\\Zotero\\storage\\NEYI258S\\Deutsche Forschungsgemeinschaf - 2019 - Leitlinien zur Sicherung guter wissenschaftlicher .pdf:application/pdf}, } @book{noauthor_cmip_2017, title = {{CMIP} {Phase} 6 ({CMIP6})}, url = {https://www.wcrp-climate.org/wgcm-cmip/}, year = {2017}, note = {Publication Title: World Climate Research Programme}, } @article{skala_europaweiter_2018, title = {Europaweiter {Vergleich}: {Darum} liegt {Deutschland} bei der {Digitalisierung} hinten}, issn = {0174-4909}, shorttitle = {Europaweiter {Vergleich}}, url = {https://www.faz.net/1.5480625}, abstract = {Während die designierte CSU-Staatsministerin Dorothee Bär schon von Flugtaxis träumt, sind viele Regionen in Deutschland weiterhin vom schnellen Internet abgeschnitten. Europäische Nachbarn sind deutlich weiter. Was kann Deutschland von diesen Ländern lernen?}, language = {de}, journal = {FAZ.NET}, author = {Skala, Fridolin}, year = {2018}, file = {Snapshot:C\:\\Users\\carst\\Zotero\\storage\\DCUA3P27\\digitalisierung-darum-liegt-deutschland-im-eu-vergleich-hinten-15480625.html:text/html}, } @book{milch_professor_1998, title = {Professor {Friedrich} {Hertweck} emeritiert: {Wegbereiter} des {Supercomputing} in {Deutschland}}, url = {https://www.ipp.mpg.de/ippcms/de/presse/archiv/10_98_pi}, abstract = {Einer der bedeutendsten Wegbereiter der Supercomputer in Deutschland, Professor Dr. Friedrich Hertweck, ging Ende November in den Ruhestand. Bis zu seiner Emeritierung leitete er den Bereich Informatik des Max-Planck-Instituts für Plasmaphysik in Garching und war Mitglied der Wissenschaftlichen Leitung.}, language = {de}, author = {Milch, Isabella}, year = {1998}, note = {Publication Title: Max-Planck-Institut für Plasmaphysik}, file = {Snapshot:C\:\\Users\\carst\\Zotero\\storage\\TJZYB3D5\\10_98_pi.html:text/html}, } @book{noauthor_guide_nodate, title = {A {Guide} to the {Internet} of {Things} {Infographic}}, url = {https://www.intel.com/content/dam/www/public/us/en/images/iot/guide-to-iot-infographic.png}, language = {en}, note = {Publication Title: intel}, file = {guide-to-iot-infographic.png:C\:\\Users\\carst\\Zotero\\storage\\YGJUE8C9\\guide-to-iot-infographic.png:image/png}, } @book{noauthor_fair_2016, title = {The {FAIR} {Data} {Principles}}, url = {https://www.force11.org/group/fairgroup/fairprinciples}, abstract = {One of the grand challenges of data-intensive science is to facilitate knowledge discovery by assisting humans and machines in their discovery of, access to, integration and analysis of, task-appropriate scientific data and their associated algorithms and workflows. Here, we describe FAIR - a set of guiding principles to make data Findable, Accessible, Interoperable, and Reusable. The term FAIR was launched at a Lorentz workshop in 2014, the resulting FAIR principles were published in 2016.}, language = {en}, year = {2016}, note = {Publication Title: FORCE11}, file = {Snapshot:C\:\\Users\\carst\\Zotero\\storage\\K3MPKVRR\\fairprinciples.html:text/html}, } @book{noauthor_fair_2019-1, title = {{FAIR} {DO} {Session}}, url = {http://codata2019.csp.escience.cn/dct/page/ 70006}, year = {2019}, } @techreport{crowdflower_2017_nodate, title = {2017 {Data} {Scientist} {Report}}, url = {https://www.kdnuggets.com/the-2017-data-scientist-report-is-now-available.html/}, abstract = {For the third year in a row, CrowdFlower surveyed data scientists (nearly 200 this year) from all manner of organizations, which they have compiled into one free report which you can be downloaded now. This year, lots of insights into the word of AI are included.}, language = {en-US}, author = {{CrowdFlower}}, file = {Snapshot:C\:\\Users\\carst\\Zotero\\storage\\8AJQXRXU\\crowdflower-data-science-report-available.html:text/html}, } @article{oliveira_investigations_2019, title = {Investigations into {Data} {Ecosystems}: a systematic mapping study}, volume = {61}, issn = {0219-3116}, shorttitle = {Investigations into {Data} {Ecosystems}}, url = {https://doi.org/10.1007/s10115-018-1323-6}, doi = {10.1007/s10115-018-1323-6}, abstract = {Data Ecosystems are socio-technical complex networks in which actors interact and collaborate with each other to find, archive, publish, consume, or reuse data as well as to foster innovation, create value, and support new businesses. While the Data Ecosystem field is thus arguably gaining in importance, research on this subject is still in its early stages of development. Up until now, not many academic papers related to Data Ecosystems have been published. Furthermore, to the best of our knowledge, there has been no systematic review of the literature on Data Ecosystems. In this study, we provide an overview of the current literature on Data Ecosystems by conducting a systematic mapping study. This study is intended to function as a snapshot of the research in the field and by doing so identifies the different definitions of Data Ecosystem and analyzes the evolution of Data Ecosystem research. The studies selected have been classified into categories related to the study method, contribution, research topic, and ecosystem domains. Finally, we analyze how Data Ecosystems are structured and organized, and what benefits can be expected from Data Ecosystems and what their limitations are.}, language = {en}, number = {2}, journal = {Knowledge and Information Systems}, author = {Oliveira, Marcelo Iury S. and Barros Lima, Glória de Fátima and Farias Lóscio, Bernadette}, month = jan, year = {2019}, pages = {589--630}, } @phdthesis{nwatchock_a_koul_framework_2019, type = {{PhD} {Thesis}}, title = {A framework for fair and responsible data market ecosystems}, url = {https://archive-ouverte.unige.ch/unige:121388}, abstract = {As access to information has become critically important in our society, we are witnessing an “information race” where many initiatives for data access are proliferating. Recently, a new economy around data has emerged with a growing number of data markets. The term data market covers a whole range of activities where value is derived from data, thus providing benefits to many stakeholders. The data market ecosystem is for its most part uncontrolled, and the actions for creating a secure space are highly fragmented. These main issues undermine the emergence and the development of this critically important ecosystem for the future. Hence, this dissertation addresses the question of the design of fair and responsible data market ecosystems. We study the constituents of data markets and propose a global approach towards the design of a framework for fair and responsible data market ecosystems enabling transparency, trust, fairness and accountability.}, language = {en}, school = {University of Geneva}, author = {Nwatchock A Koul, Aman Sabrina}, year = {2019}, doi = {10.13097/archive-ouverte/unige:121388}, file = {Snapshot:C\:\\Users\\carst\\Zotero\\storage\\S779TMHJ\\unige121388.html:text/html;unige_121388_attachment01.pdf:C\:\\Users\\carst\\Zotero\\storage\\FQZW389H\\unige_121388_attachment01.pdf:application/pdf}, } @techreport{kommission_aufbau_2017, title = {Aufbau einer europäischen {Datenwirtschaft}}, url = {https://eur-lex.europa.eu/legal-content/DE/TXT/PDF/?uri=CELEX:52017DC0009&from=EN.com (2017) 9 final}, language = {de}, author = {Kommission, Europaische}, month = jan, year = {2017}, pages = {20}, file = {2017 - Aufbau einer europäischen Datenwirtschaft.pdf:C\:\\Users\\carst\\Zotero\\storage\\7244TU3P\\2017 - Aufbau einer europäischen Datenwirtschaft.pdf:application/pdf}, } @techreport{kommission_fur_2014, title = {Für eine florierende datengesteuerte {Wirtschaft}}, url = {https://eur-lex.europa.eu/legal-content/DE/TXT/PDF/?uri=CELEX:52014DC0442&from=EN.com%20(2014)%20442%20final}, language = {de}, author = {Kommission, Europäische}, month = jul, year = {2014}, pages = {13}, file = {2014 - Für eine florierende datengesteuerte Wirtschaft.pdf:C\:\\Users\\carst\\Zotero\\storage\\WP7N9X9P\\2014 - Für eine florierende datengesteuerte Wirtschaft.pdf:application/pdf}, } @article{sandt_definition_2019, title = {The {Definition} of {Reuse}}, volume = {18}, issn = {1683-1470}, url = {http://datascience.codata.org/articles/10.5334/dsj-2019-022/}, doi = {10.5334/dsj-2019-022}, abstract = {The ability to reuse research data is now considered a key benefit for the wider research community. Researchers of all disciplines are confronted with the pressure to share their research data so that it can be reused. The demand for data use and reuse has implications on how we document, publish and share research in the first place, and, perhaps most importantly, it affects how we measure the impact of research, which is commonly a measurement of its use and reuse. It is surprising that research communities, policy makers, etc. have not clearly defined what use and reuse is yet. We postulate that a clear definition of use and reuse is needed to establish better metrics for a comprehensive scholarly record of individuals, institutions, organizations, etc. Hence, this article presents a first definition of reuse of research data. Characteristics of reuse are identified by examining the etymology of the term and the analysis of the current discourse, leading to a range of reuse scenarios that show the complexity of today’s research landscape, which has been moving towards a data-driven approach. The analysis underlines that there is no reason to distinguish use and reuse. We discuss what that means for possible new metrics that attempt to cover Open Science practices more comprehensively. We hope that the resulting definition will enable a better and more refined strategy for Open Science.}, language = {en}, number = {1}, journal = {Data Science Journal}, author = {Sandt, Stephanie van de and Dallmeier-Tiessen, Sünje and Lavasa, Artemis and Petras, Vivien}, month = jun, year = {2019}, pages = {22}, file = {Sandt et al. - 2019 - The Definition of Reuse.pdf:C\:\\Users\\carst\\Zotero\\storage\\WQLKAEDM\\Sandt et al. - 2019 - The Definition of Reuse.pdf:application/pdf}, } @book{harvey_analytic_nodate, title = {Analytic {Quality} {Glossary}}, copyright = {copyright Lee Harvey}, url = {http://www.qualityresearchinternational.com/glossary/}, author = {Harvey, Lee}, note = {Publication Title: Quality Research International}, file = {Glossary index:C\:\\Users\\carst\\Zotero\\storage\\Z9DJLPE4\\glossary.html:text/html}, } @techreport{digital_repository_of_ireland_metadata_2014, type = {Digital {Repository} of {Ireland} {Series}}, title = {Metadata {Quality} {Control}}, copyright = {CC BY}, url = {http://dri.ie/sites/default/files/files/metadata-quality-control.pdf}, language = {en}, institution = {Royal Irish Academy}, author = {{Digital Repository of Ireland} and McCarthy, Kate}, year = {2014}, doi = {10.3318/DRI.2015.1}, pages = {[4]}, file = {Digital Repository of Ireland und McCarthy - 2015 - Metadata Quality Control.pdf:C\:\\Users\\carst\\Zotero\\storage\\DFWESUBT\\Digital Repository of Ireland und McCarthy - 2015 - Metadata Quality Control.pdf:application/pdf}, } @article{meyer-doerpinghaus_forschungsdatenmanagement_2015, title = {Forschungsdatenmanagement als {Herausforderung} für {Hochschulen} und {Hochschulbibliotheken}}, volume = {2}, issn = {2363-9814}, url = {https://www.o-bib.de/article/view/2015H4S65-72}, doi = {10.5282/o-bib/2015H4S65-72}, abstract = {Eines der wichtigsten neuen Handlungsfelder der Forschung, das im Zuge der Digitalisierung von Information entstanden ist, ist das Management von Forschungsdaten. Die Hochschulen müssen sich darauf einstellen, ihren Wissenschaftlerinnen und Wissenschaftlern die notwendigen Strukturen und Services zur Verfügung zu stellen. Die in der Hochschulrektorenkonferenz (HRK) organisierten Leitungen der deutschen Hochschulen sehen darin eine zentrale Aufgabe. Die Universität Münster geht mit gutem Beispiel voran: In enger Zusammenarbeit mit der Hochschulleitung hat die Universitäts- und Landesbibliothek damit begonnen, Strukturen und Services zur Unterstützung des Forschungsdatenmanagements aufzubauen.}, language = {de}, number = {4}, journal = {o-bib. Das offene Bibliotheksjournal}, author = {Meyer-Doerpinghaus, Ulrich and Tröger, Beate}, month = dec, year = {2015}, pages = {65--72}, file = {Snapshot:C\:\\Users\\carst\\Zotero\\storage\\FMECBMID\\2015H4S65-72.html:text/html;Full Text PDF:C\:\\Users\\carst\\Zotero\\storage\\M72L58UF\\Meyer-Doerpinghaus und Tröger - 2015 - Forschungsdatenmanagement als Herausforderung für .pdf:application/pdf}, } @article{wilkinson_interoperability_2017, title = {Interoperability and {FAIRness} through a novel combination of {Web} technologies}, volume = {3}, issn = {2376-5992}, url = {https://peerj.com/articles/cs-110}, doi = {10.7717/peerj-cs.110}, abstract = {Data in the life sciences are extremely diverse and are stored in a broad spectrum of repositories ranging from those designed for particular data types (such as KEGG for pathway data or UniProt for protein data) to those that are general-purpose (such as FigShare, Zenodo, Dataverse or EUDAT). These data have widely different levels of sensitivity and security considerations. For example, clinical observations about genetic mutations in patients are highly sensitive, while observations of species diversity are generally not. The lack of uniformity in data models from one repository to another, and in the richness and availability of metadata descriptions, makes integration and analysis of these data a manual, time-consuming task with no scalability. Here we explore a set of resource-oriented Web design patterns for data discovery, accessibility, transformation, and integration that can be implemented by any general- or special-purpose repository as a means to assist users in finding and reusing their data holdings. We show that by using off-the-shelf technologies, interoperability can be achieved atthe level of an individual spreadsheet cell. We note that the behaviours of this architecture compare favourably to the desiderata defined by the FAIR Data Principles, and can therefore represent an exemplar implementation of those principles. The proposed interoperability design patterns may be used to improve discovery and integration of both new and legacy data, maximizing the utility of all scholarly outputs.}, language = {en}, journal = {PeerJ Computer Science}, author = {Wilkinson, Mark D. and Verborgh, Ruben and Santos, Luiz Olavo Bonino da Silva and Clark, Tim and Swertz, Morris A. and Kelpin, Fleur D. L. and Gray, Alasdair J. G. and Schultes, Erik A. and Mulligen, Erik M. van and Ciccarese, Paolo and Kuzniar, Arnold and Gavai, Anand and Thompson, Mark and Kaliyaperumal, Rajaram and Bolleman, Jerven T. and Dumontier, Michel}, month = apr, year = {2017}, pages = {e110}, } @article{tenopir_research_2017, title = {Research {Data} {Services} in {European} {Academic} {Research} {Libraries}}, volume = {27}, issn = {2213-056X}, url = {http://www.liberquarterly.eu/article/10.18352/lq.10180/}, doi = {10.18352/lq.10180}, abstract = {Research data is an essential part of the scholarly record, and management of research data is increasingly seen as an important role for academic libraries. This article presents the results of a survey of directors of the Association of European Research Libraries (LIBER) academic member libraries to discover what types of research data services (RDS) are being offered by European academic research libraries and what services are planned for the future. Overall, the survey found that library directors strongly agree on the importance of RDS. As was found in earlier studies of academic libraries in North America, more European libraries are currently offering or are planning to offer consultative or reference RDS than technical or hands-on RDS. The majority of libraries provide support for training in skills related to RDS for their staff members. Almost all libraries collaborate with other organizations inside their institutions or with outside institutions in order to offer or develop policy related to RDS. We discuss the implications of the current state of RDS in European academic research libraries, and offer directions for future research.}, language = {en}, number = {1}, journal = {LIBER Quarterly}, author = {Tenopir, Carol and Talja, Sanna and Horstmann, Wolfram and Late, Elina and Hughes, Dane and Pollock, Danielle and Schmidt, Birgit and Baird, Lynn and Sandusky, Robert and Allard, Suzie}, month = feb, year = {2017}, pages = {23--44}, file = {Tenopir et al. - 2017 - Research Data Services in European Academic Resear.pdf:C\:\\Users\\carst\\Zotero\\storage\\G5H6EC2K\\Tenopir et al. - 2017 - Research Data Services in European Academic Resear.pdf:application/pdf}, } @inproceedings{atkinson_shaping_2010, title = {Shaping {Ramps} for {Data}-{Intensive} {Research}}, url = {https://eprints.soton.ac.uk/271235/}, abstract = {An 'Intellectual Ramp' enables researchers to move incrementally from their current practice into the adoption of new methods. An investigation of Ramps is an important step towards "crossing the chasm" so that researchers can benefit from new tools, technologies and approaches. This paper will define and explain the concept of Ramps, discuss requirements and the anatomy of Ramps, and propose a measurement framework illustrated by examples to improve the understanding of why ramps work.}, language = {en}, author = {Atkinson, Malcolm and De Roure, David and van Hemert, Jano and Michaelides, Danius}, year = {2010}, pages = {3}, file = {Atkinson et al. - 2010 - Shaping Ramps for Data-Intensive Research.pdf:C\:\\Users\\carst\\Zotero\\storage\\WFAQUPRJ\\Atkinson et al. - 2010 - Shaping Ramps for Data-Intensive Research.pdf:application/pdf}, } @article{brand_hefdi_2018, title = {{HeFDI} – {Die} landesweite {Initiative} zum {Aufbau} von {Forschungsdateninfrastrukturen} in {Hessen}}, volume = {5}, copyright = {Copyright (c) 2018 Wolfgang Stille, Ortrun Brand, Joachim Schachtner}, issn = {2363-9814}, url = {https://www.o-bib.de/article/view/2018H2S14-27}, doi = {10.5282/o-bib/2018H2S14-27}, language = {de}, number = {2}, journal = {o-bib. Das offene Bibliotheksjournal}, author = {Brand, Ortrun and Stille, Wolfgang and Schachtner, Joachim}, month = jul, year = {2018}, pages = {14--27}, file = {Brand et al. - 2018 - HeFDI – Die landesweite Initiative zum Aufbau von .pdf:C\:\\Users\\carst\\Zotero\\storage\\SNB8BEU8\\Brand et al. - 2018 - HeFDI – Die landesweite Initiative zum Aufbau von .pdf:application/pdf}, } @article{brenger_wo_2017, title = {"{Wo} {Sind} {Deine} {Forschungsdaten} {In} 10-{Jahren}⁈" {Awareness} {Für} {Forschungsdatenspeicherung}}, copyright = {Creative Commons Attribution 4.0, Open Access}, shorttitle = {"{Wo} {Sind} {Deine} {Forschungsdaten} {In} 10-{Jahren}?}, url = {https://zenodo.org/record/1000538}, doi = {10.5281/ZENODO.1000538}, abstract = {Die sieben Motive wurden für Awarenessmaßnahmen für Forschungsdatenmanagement und nachhaltige Forschungsdatenspeicherung vom BMBF-Projekt UNEKE (www.uneke.de) der Universität Duisburg-Essen und RWTH Aachen entwickelt und sind zur Nachnutzung gedacht.}, language = {de}, author = {Brenger, Bela and Baurmann, Beate and López, Ania and Rehwald, Stephanie and Wilms, Konstantin}, month = oct, year = {2017}, file = {Postkarte_UNEKE_7.pdf:C\:\\Users\\carst\\Zotero\\storage\\M8EHPRUY\\Postkarte_UNEKE_7.pdf:application/pdf;Postkarte_UNEKE_1.pdf:C\:\\Users\\carst\\Zotero\\storage\\MD48I6QQ\\Postkarte_UNEKE_1.pdf:application/pdf;Postkarte_UNEKE_3.pdf:C\:\\Users\\carst\\Zotero\\storage\\TFLDTP2P\\Postkarte_UNEKE_3.pdf:application/pdf;Postkarte_UNEKE_6.pdf:C\:\\Users\\carst\\Zotero\\storage\\7K559Q3H\\Postkarte_UNEKE_6.pdf:application/pdf;Postkarte_UNEKE_5.pdf:C\:\\Users\\carst\\Zotero\\storage\\PMUNHZ3W\\Postkarte_UNEKE_5.pdf:application/pdf;Postkarte_UNEKE_4.pdf:C\:\\Users\\carst\\Zotero\\storage\\MQCBKZIT\\Postkarte_UNEKE_4.pdf:application/pdf;Postkarte_UNEKE_2.pdf:C\:\\Users\\carst\\Zotero\\storage\\KFPLM25I\\Postkarte_UNEKE_2.pdf:application/pdf}, } @book{dolzycka_forschungsdatenmanagement_2018, title = {Forschungsdatenmanagement}, url = {https://zenodo.org/record/1441115#.X2eEmYvgq-4}, abstract = {Informationsmaterial zum Thema Forschungsdatenmanagement.}, author = {Dolzycka, Dominika and Biernacka, Katarzyna and Buchholz, Petra and Cortez, Katrin}, month = oct, year = {2018}, doi = {10.5281/zenodo.1441115}, file = {Dolzycka et al. - 2018 - Forschungsdatenmanagement.pdf:C\:\\Users\\carst\\Zotero\\storage\\LJ2E5LMY\\Dolzycka et al. - 2018 - Forschungsdatenmanagement.pdf:application/pdf}, } @book{dolzycka_train--trainer_2019, title = {Train-the-{Trainer} {Konzept} zum {Thema} {Forschungsdatenmanagement}}, isbn = {10.5281/zenodo.2581292}, url = {https://zenodo.org/record/2581292#.X2eE54vgq-4}, abstract = {Im Rahmen des BMBF-Projekts FDMentor wurde ein deutschsprachiges Train-the-Trainer Programm zum Thema Forschungsdatenmanagament (FDM) erstellt und in einer Reihe von Workshops pilotiert. Die Anmerkungen und Hinweise der Teilnehmenden der beiden Pilotphasen sowie das Feedback der FDM-Community wurden im letzten Jahr sukzessive eingearbeitet. Die nun vorliegende zweite Version des Train-the-Trainer-Konzepts bietet ein überarbeitetes Skript mit den Inhalten der Lehreinheiten, detaillierte Lehrdrehbücher, Arbeitsmaterialien, Vortragsfolien sowie zahlreiche Arbeitsblätter und Vorlagen, die das Lehren unterstützen sollen. Die behandelten Themen umfassen sowohl die Aspekte des Forschungsdatenmanagements, wie beispielsweise Datenmanagementpläne und die Publikation von Forschungsdaten, als auch didaktische Einheiten zu Lernkonzepten, Workshopgestaltung und eine Reihe von didaktischen Methoden.}, publisher = {Zenodo}, author = {Dolzycka, Dominika and Biernacka, Katarzyna and Helbig, Kerstin and Buchholz, Petra}, month = mar, year = {2019}, doi = {10.5281/zenodo.2581292}, file = {FDMentor_TtT-Konzept_Forschungsdatenmanagement_V2.pdf:C\:\\Users\\carst\\Zotero\\storage\\9VQBMZQ3\\FDMentor_TtT-Konzept_Forschungsdatenmanagement_V2.pdf:application/pdf;_Arbeitsmaterialien_FDMentor.zip:C\:\\Users\\carst\\Zotero\\storage\\57EM5HPR\\_Arbeitsmaterialien_FDMentor.zip:application/x-zip-compressed}, } @article{higman_creating_2017, title = {Creating a {Community} of {Data} {Champions}}, volume = {12}, copyright = {Creative Commons Attribution 4.0 International License}, issn = {1746-8256}, url = {http://www.ijdc.net/article/view/562}, doi = {10.2218/ijdc.v12i2.562}, abstract = {Research Data Management (RDM) presents an unusual challenge for service providers in Higher Education. There is increased awareness of the need for training in this area but the nature of the discipline-specific practices involved make it difficult to provide training across a multi-disciplinary organisation. Whilst most UK universities now have a research data team of some description, they are often small and rarely have the resources necessary to provide targeted training to the different disciplines and research career stages that they are increasingly expected to support. This practice paper describes the approach taken at the University of Cambridge to address this problem by creating a community of Data Champions. This collaborative initiative, working with researchers to provide training and advocacy for good RDM practice, allows for more discipline-specific training to be given, researchers to be credited for their expertise and creates an opportunity for those interested in RDM to exchange knowledge with others. The ‘community of practice’ model has been used in many sectors, including Higher Education, to facilitate collaboration across organisational units and this initiative will adopt some of the same principles to improve communication across a decentralised institution. The Data Champions initiative at Cambridge was launched in September 2016 and this paper reports on the early months, plans for building the community in the future and the possible risks associated with this approach to providing RDM services.}, language = {en}, number = {2}, journal = {International Journal of Digital Curation}, author = {Higman, Rosie and Teperek, Marta and Kingsley, Danny}, month = dec, year = {2017}, pages = {96--106}, file = {Higman et al. - 2017 - Creating a Community of Data Champions.pdf:C\:\\Users\\carst\\Zotero\\storage\\MIK5EVWC\\Higman et al. - 2017 - Creating a Community of Data Champions.pdf:application/pdf}, } @article{dierkes_gottingen_2018, title = {Die {Göttingen} {eResearch} {Alliance}. {Outreach} und {Schulungen} am {Göttingen} {Campus}}, copyright = {Copyright (c) 2018 Jens Dierkes}, url = {https://bausteine-fdm.de/article/view/7824}, doi = {10.17192/bfdm.2018.1.7824}, language = {de}, number = {1}, journal = {Bausteine Forschungsdatenmanagement}, author = {Dierkes, Jens}, month = oct, year = {2018}, pages = {7--10}, file = {Dierkes - 2018 - Die Göttingen eResearch Alliance. Outreach und Sch.pdf:C\:\\Users\\carst\\Zotero\\storage\\EYCL8INI\\Dierkes - 2018 - Die Göttingen eResearch Alliance. Outreach und Sch.pdf:application/pdf}, } @article{strauch_forschungsdatenmanagement_2019, title = {Forschungsdatenmanagement an der {Stiftung} {Universität} {Hildesheim}: {Praktische} {Unterstützung} für {Forschende} und {Studierende} durch die {Universitätsbibliothek}}, volume = {70}, issn = {1619-4292, 1434-4653}, shorttitle = {Forschungsdatenmanagement an der {Stiftung} {Universität} {Hildesheim}}, doi = {10.1515/iwp-2019-2052}, abstract = {Der Artikel Forschungsdatenmanagement an der Stiftung Universität Hildesheim wurde am 04.11.2019 in der Zeitschrift Information - Wissenschaft \& Praxis (Band 70, Heft 5-6) veröffentlicht.}, language = {de}, number = {5-6}, journal = {Information - Wissenschaft \& Praxis}, author = {Strauch, Annette}, month = nov, year = {2019}, pages = {259--263}, file = {Strauch - 2019 - Forschungsdatenmanagement an der Stiftung Universi.pdf:C\:\\Users\\carst\\Zotero\\storage\\QGUV88NW\\Strauch - 2019 - Forschungsdatenmanagement an der Stiftung Universi.pdf:application/pdf}, } @book{earth_science_information_partners_dmt_2020, title = {{DMT} {Clearinghouse}}, url = {https://dmtclearinghouse.esipfed.org/}, language = {en}, author = {{Earth Science Information Partners}}, year = {2020}, } @article{drefs_faire_2018, title = {{FAIRe} {Forschung}: {Wie} {Wissenschaftliche} {Bibliotheken} den {Herausforderungen} von {Open} {Science} begegnen}, volume = {70}, url = {https://opus4.kobv.de/opus4-bib-info/frontdoor/index/index/searchtype/authorsearch/author/Klaus+Tochtermann/start/3/rows/20/author_facetfq/Tochtermann%2C+Klaus/docId/15869}, doi = {urn:nbn:de:0290-opus4-158696}, language = {de}, number = {11}, urldate = {2020-09-19}, journal = {BuB - Forum Bibliothek und Information}, author = {Drefs, Ines and Linne, Monika and Tochtermann, Klaus}, year = {2018}, pages = {636--639}, file = {Drefs et al. - 2018 - FAIRe Forschung Wie Wissenschaftliche Bibliothek.pdf:C\:\\Users\\carst\\Zotero\\storage\\U9HF3F22\\Drefs et al. - 2018 - FAIRe Forschung Wie Wissenschaftliche Bibliothek.pdf:application/pdf}, } @incollection{vancauwenberghe_governance_2018, address = {The Hague}, series = {Information {Technology} and {Law} {Series}}, title = {Governance of {Open} {Data} {Initiatives}}, isbn = {978-94-6265-261-3}, url = {https://doi.org/10.1007/978-94-6265-261-3_5}, abstract = {The effective development and implementation of open data initiatives requires governance in order to avoid gaps, duplications, contradictions and missed opportunities. Appropriate governance instruments should be established to coordinate the activities and contributions of different stakeholders. This chapter reviews the governance of open data initiatives worldwide, using a governance instruments approach as introduced by public administration researchers to analyse coordination and governance in the public sector. Six sets of governance instruments in the governance of open data initiatives are identified: collective decision-making structures, strategic management, allocation of tasks and responsibilities, creation of markets, interorganizational culture and knowledge management, and regulation and formalization of open data initiatives. The chapter shows how each of these six sets of instruments have been applied in various countries in the governance of nation-wide open data initiatives.}, language = {en}, booktitle = {Open {Data} {Exposed}}, publisher = {T.M.C. Asser Press}, author = {Vancauwenberghe, Glenn and Crompvoets, Joep}, editor = {van Loenen, Bastiaan and Vancauwenberghe, Glenn and Crompvoets, Joep}, year = {2018}, doi = {10.1007/978-94-6265-261-3_5}, pages = {79--100}, } @article{bertelmann_einstieg_2014, title = {Einstieg ins {Forschungsdatenmanagement} in den {Geowissenschaften}}, url = {https://gfzpublic.gfz-potsdam.de/pubman/item/item_749901}, doi = {10.2312/LIS.14.01}, abstract = {Nur wenn eine Kultur der Nachnutzung und des freien Teilens von Wissen entsteht, werden Wissenschaftler auch die eigenen Forschungsdaten für die Langzeitverfügbarkeit aufbereiten und freigeben. Zu diesem Zweck ist diese Handreichung als Einstieg in das Thema Forschungsdatenmanagement entstanden.}, language = {de}, author = {Bertelmann, Roland and Gebauer, Petra and Hasler, Tim and Kirchner, Ingo and Peters-Kottig, Wolfgang and Razum, Matthias and Recker, Astrid and Ulbricht, D. and Van Gasselt, Stephan}, year = {2014}, file = {Bertelmann et al. - 2014 - Einstieg ins Forschungsdatenmanagement in den Geow.pdf:C\:\\Users\\carst\\Zotero\\storage\\NCTLBTWD\\Bertelmann et al. - 2014 - Einstieg ins Forschungsdatenmanagement in den Geow.pdf:application/pdf}, } @article{becker_lernmodul_2019, title = {Lernmodul {Forschungsdatenmanagement} auf einen {Blick} – eine {Online}-{Einführung}}, copyright = {Creative Commons Attribution Share Alike 4.0 International, Open Access}, url = {https://zenodo.org/record/3381955}, doi = {10.5281/ZENODO.3381955}, abstract = {Hier wird eine Importdatei für ein Lernmodul zum Thema Forschungsdatenmanagement zur Nachnutzung zur Verfügung gestellt. Inhalte: Einführung FDM; FDM-Zyklus; Forschungsdaten-Policies; Gute wissenschaftliche Praxis; Datenmanagementpläne; Metadaten und Metadatenstandards; FAIR-Prinzipien; Datenqualität; Datenorganisation; Datenspeicherung, Backup und Datenarchivierung; Datenschutz; Urheberrecht; Promotionsrecht. Die einzelnen Kapitel sind jeweils nach einem gemeinsamen Muster aufgebaut: Einführung - Inhalte-Fragen-Nachweise und Weiterführendes - Handout. Technische Voraussetzungen: Erstellt wurde das Lernmodul in ILIAS v5.2.25. Zur Implementierung des Lernmoduls in unterschiedliche Lehrplattformen liegt eine Scormdatei 2004 4th Edition (1567160847\_\_2007\_\_sahs\_29057.zip) bzw. eine Scormdatei 1.2 (1573726668\_\_2007\_\_sahs\_29057.zip) vor. Bitte diese in der Autorenansicht einspielen, um die Inhalte bearbeiten zu können. Die Präsentationsansicht ermöglicht die Darstellung der Inhalte, aber keine Bearbeitung. Da immer wieder Nachfragen kamen wegen unterschiedlicher Fehler, die beim Import der Dateien angezeigt wurden, sind in der Version 1.2. zwei neue Zip-Daten hochgeladen worden: {\textbackslash}textlessstrong{\textbackslash}textgreaterScormdatei 2004 4th Edition 1580735052\_\_2007\_\_sahs\_29057.zip für Ilias Importe und{\textbackslash}textless/strong{\textbackslash}textgreater {\textbackslash}textlessstrong{\textbackslash}textgreaterScormdatei 1.2 1580735646\_\_2007\_\_sahs\_29057.zip für Moodle Importe{\textbackslash}textless/strong{\textbackslash}textgreater Hintergrund: Als hessisches Verbundprojekt haben wir von 2017 bis 2019 im Projekt FOKUS - Forschungsdatenkurse für Studierende und Graduierte in enger Zusammenarbeit mit Wissenschaftlerinnen und Wissenschaftlern verschiedene Inhalte und Aspekte des Forschungsdatenmanagements in Lehrveranstaltungen behandelt. Um ein möglichst breites Feld an Fachdisziplinen zu erreichen, wurden dabei an den beteiligten Hochschulen unterschiedliche Schwerpunkte gesetzt. Zuständig waren folgende Hochschulen: die Philipps-Universität Marburg (Projektleitung) für die Germanistik, Erziehungswissenschaften und Wirtschaftswissenschaften, die Technische Universität Darmstadt für die Chemie und Informatik, die Goethe-Universität Frankfurt am Main für die Filmwissenschaft und die Graduiertenausbildung, die Justus-Liebig-Universität Gießen für die Veterinärmedizin und die Umweltwissenschaften, die Hochschule Fulda für die Graduiertenausbildung. Es hat sich schnell gezeigt, dass gerade bei der Arbeit mit Studierenden und der ersten Heranführung an das Thema eine generische Zugangsweise erforderlich ist. Unsere Erfahrungen haben wir in das vorliegende Web Based Training einfließen lassen. Wir verstehen dieses explizit als erste, keinesfalls vollständige Version eines Angebots, sich mit Forschungsdatenmanagement zu beschäftigen. Daher ermuntern wir dazu, das WBT in allen möglichen Formen nachzunutzen, zu verändern, zu erweitern und an die Bedingungen vor Ort anzupassen. Zudem verstehen wir das WBT nicht nur als Angebot, bei Bedarf individuell diesen Kurs zu absolvieren, sondern insbesondere auch als Grundlage für den Einsatz in der (universitären) Lehre. Einzelne Kapitel oder auch das gesamte WBT können beispielsweise in einem Inverted Classroom Szenario vor einer Lehrveranstaltung durchgearbeitet werden, um es dann in der Präsenzzeit inhaltlich und fachlich zu vertiefen. Vorstellbar ist auch, die hier angebotene Einführung als Grundlage für themenspezifische Ausarbeitungen zu nutzen (beispielsweise in Form von Referaten oder online gestützt in Form von Produkten wie einem Datenmanagementplan).}, language = {de}, author = {Becker, Henrike and Einwächter, Sophie and Klein, Benedikt and Krähwinkel, Esther and Mehl, Sebastian and Müller, Janine and Ostsieker, Frederik and Tauchmann, Christopher and Werthmüller, Julia}, month = nov, year = {2019}, } @article{becker_forschungsdatenkurse_2019, title = {Forschungsdatenkurse für {Studierende} und {Graduierte}: {Lehr}- und {Schulungsmaterialien} zur {Nachnutzung}}, copyright = {Creative Commons Attribution Share Alike 4.0 International, Open Access}, shorttitle = {Forschungsdatenkurse für {Studierende} und {Graduierte}}, url = {https://zenodo.org/record/3381974}, doi = {10.5281/ZENODO.3381974}, abstract = {Hier werden Ablaufpläne, didaktische Überlegungen, gegebenenfalls Präsentationen und weitere eingesetzte Materialien zu Forschungsdatenmanagement-Veranstaltungen im SoSe 2018 und im WS 2018/19 zur Nachnutzung zur Verfügung gestellt. Diese Materialien entstanden im Rahmen des hessischen Verbundprojekts FOKUS – Forschungsdatenkurse für Studierende und Graduierte für folgende Fachdisziplinen: Chemie, Ernährungswissenschaften, Erziehungswissenschaften, Filmwissenschaft, Germanistik, Informatik, Umweltwissenschaften, Veterinärmedizin und Wirtschaftswissenschaften. Für diese Fächer waren BA- und MA-Studierende die Zielgruppe, für Graduierte wurde jeweils eine allgemeine Einführung, eine mit dem Fokus auf Biodiversitätsforschung und eine mit der Zielgruppe Naturwissenschaftlerinnen und Naturwissenschaftler konzipiert und umgesetzt. Was war das Ziel von FOKUS? Im Projekt FOKUS kooperierten fünf hessische Hochschulen, um fachbezogene, modulare Schulungseinheiten im Bereich des Forschungsdatenmanagements zu entwickeln. Dazu wurden in engem Kontakt mit Fachwissenschaftlerinnen und Fachwissenschaftlern fachspezifische und nachnutzbare Lehrinhalte erarbeitet und Pilot-Lehrveranstaltungen durchgeführt und evaluiert. Gleichzeitig sind die beteiligten Hochschulen arbeitsteilig vorgegangen und haben die jeweils aufgeführten fachlichen Schwerpunkte bearbeitet: Erziehungswissenschaften, Germanistik und Wirtschaftswissenschaften (Philipps-Universität Marburg) Veterinärmedizin, Umweltwissenschaften (Justus-Liebig-Universität Gießen) Chemie, Informatik (Technische Universität Darmstadt) Filmwissenschaften, Graduierte über GRADE - Goethe Research Academy for Early Career Researchers (Goethe-Universität Frankfurt) Graduierte (Hochschule Fulda) Wie war das Vorgehen? Im Ergebnis sind die meisten Veranstaltungen integrativ umgesetzt worden, indem Fragen des Forschungsdatenmanagements in bestehende Lehrveranstaltungen eingebracht wurden. Dazu haben die Projektmitarbeiterinnen und -mitarbeiter jeweils einzelne Sitzungen übernommen. Für Graduierte wurden an den Standorten Fulda und Frankfurt eigenständige Workshops angeboten. An der Universität Marburg konnten Studierende der Wirtschaftswissenschaften ebenfalls eine eigenständige Lehrveranstaltung mit drei Präsenzterminen und 3 ECTS-Punkten besuchen. Entsprechend vielfältig und unterschiedlich sind die Konzepte und Umsetzungen der Veranstaltungen. Sie unterscheiden sich nach Zielgruppe (BA- und MA-Studierende, Graduierte, Semester), Inhalten (z.B. eher einführend, praktisch, fachlich oder allgemeiner ausgerichtet) und gewähltem Vorgehen (Präsenzveranstaltungen, Blended Learning, unterschiedliche Aktivierungsgrade der Teilnehmerinnen und Teilnehmer).}, language = {de}, author = {Becker, Henrike and Dorn, Christian and Einwächter, Sophie and Klein, Benedikt and Krähwinkel, Esther and Mehl, Sebastian and Müller, Janine and Ostsieker, Frederik and Tauchmann, Christopher and Werthmüller, Julia}, month = aug, year = {2019}, } @article{otto_designing_2019, title = {Designing a multi-sided data platform: findings from the {International} {Data} {Spaces} case}, volume = {29}, issn = {1422-8890}, shorttitle = {Designing a multi-sided data platform}, url = {https://doi.org/10.1007/s12525-019-00362-x}, doi = {10.1007/s12525-019-00362-x}, abstract = {The paper presents the findings from a 3-year single-case study conducted in connection with the International Data Spaces (IDS) initiative. The IDS represents a multi-sided platform (MSP) for secure and trusted data exchange, which is governed by an institutionalized alliance of different stakeholder organizations. The paper delivers insights gained during the early stages of the platform’s lifecycle (i.e. the platform design process). More specifically, it provides answers to three research questions, namely how alliance-driven MSPs come into existence and evolve, how different stakeholder groups use certain governance mechanisms during the platform design process, and how this process is influenced by regulatory instruments. By contrasting the case of an alliance-driven MSP with the more common approach of the keystone-driven MSP, the results of the case study suggest that different evolutionary paths can be pursued during the early stages of an MSP’s lifecycle. Furthermore, the IDS initiative considers trust and data sovereignty more relevant regulatory instruments compared to pricing, for example. Finally, the study advances the body of scientific knowledge with regard to data being a boundary resource on MSPs.}, language = {en}, number = {4}, journal = {Electronic Markets}, author = {Otto, Boris and Jarke, Matthias}, month = dec, year = {2019}, pages = {561--580}, file = {Otto und Jarke - 2019 - Designing a multi-sided data platform findings fr.pdf:C\:\\Users\\carst\\Zotero\\storage\\D8RFGNHN\\Otto und Jarke - 2019 - Designing a multi-sided data platform findings fr.pdf:application/pdf}, } @book{energie_gaia-x_nodate, title = {{GAIA}-{X}}, url = {https://www.bmwi.de/Redaktion/DE/Dossier/gaia-x.html}, abstract = {Eine vernetzte Dateninfrastruktur für ein europäisches digitales Ökosystem. Erfahren Sie mehr über das Projekt GAIA-X.}, language = {de}, author = {Energie, Bundesministerium für Wirtschaft und}, file = {Snapshot:C\:\\Users\\carst\\Zotero\\storage\\XZRP82CL\\gaia-x.html:text/html}, } @book{von_der_heyde_open_2019, title = {Open {Research} {Data}: {Landscape} and cost analysis of data repositories currently used by the {Swiss} research community, and requirements for the future}, copyright = {Creative Commons Attribution 4.0 International, Open Access}, shorttitle = {Open {Research} {Data}}, url = {https://zenodo.org/record/2643460}, abstract = {The study was jointly commissioned by the SNSF and swissuniversities in 2018. In light of the SNSF’s decision to make sharing data from funded projects mandatory in 2016, this study examined the sharing and reuse behaviour of researchers in the Swiss community in 2018. Since it was to be conducted across all disciplines throughout Switzerland, the range of the questions was very broadly designed and questions from earlier international studies were used for comparability. Additionally, a second questionnaire addressed international repositories in order to learn about their perspectives and plans for future development. The results were analyzed using statistical methods and can be regarded as representative.{\textbackslash}textlessbr{\textbackslash}textgreater Generally, the motivation and concerns for sharing data and reuse in the Swiss community are not different from other scientific communities. Differences in sharing and reuse behaviour are found according to the disciplines of the researchers, which were assessed using the bepress taxonomy. Different methods used by the researchers did not result in different sharing behaviour, but in where the data was shared. While the sharing is done equally in general repositories and smaller disciplinary repositories, of which a great number exist, the researchers prefer to use disciplinary repositories if they want to reuse data.{\textbackslash}textlessbr{\textbackslash}textgreater Overall, about a third of the Swiss research community share data in repositories. The main reason for not sharing was researchers’ plans to publish their results first. Also, many participants claimed to have a different concept of data; while we tried to define terms carefully, apparently there is a need for more discipline-specific information and discussion on the topic. Future requirements for services from the Swiss community are not yet met by the international repositories' plans. Several recommendations on the future SNSF governance on data sharing are proposed to conclude the study. {\textbackslash}textlessstrong{\textbackslash}textgreaterContact{\textbackslash}textless/strong{\textbackslash}textgreater Swiss National Science Foundation (SNSF) Open Research Data Group E-mail: ord@snf.ch swissuniversities Program "Scientific Information" Gabi Schneider E-Mail: isci@swissuniversities.ch}, language = {en}, publisher = {Zenodo}, author = {von der Heyde, Markus}, month = may, year = {2019}, doi = {10.5281/ZENODO.2643460}, file = {von der Heyde - 2019 - Open Research Data Landscape and cost analysis of.pdf:C\:\\Users\\carst\\Zotero\\storage\\KGYZNTJ9\\von der Heyde - 2019 - Open Research Data Landscape and cost analysis of.pdf:application/pdf}, } @article{kindling_forschungsdatenmanagement_2013, title = {Forschungsdatenmanagement an {Hochschulen}}, volume = {23}, url = {https://edoc.hu-berlin.de/handle/18452/9693}, doi = {10.18452/9041}, abstract = {Wie nahezu jede wissenschaftliche Einrichtung steht auch die Humboldt-Universität zu Berlin heute vor der Herausforderung mit den zunehmend in digitaler Form vorliegenden Forschungsdaten umzugehen. Basierend auf den Ergebnissen einer im Jahr 2013 durchgeführten Umfrage zum Umgang mit digitalen Forschungsdaten werden die Ausgangssituation charakterisiert, der Status Quo eingeordnet und strategische Überlegungen für das Forschungsdatenmanagement vorgestellt.}, language = {de}, journal = {LIBREAS. Library Ideas}, author = {Kindling, Maxi and Simukovic, Elena and Schirmbacher, Peter}, month = oct, year = {2013}, file = {Full Text PDF:C\:\\Users\\carst\\Zotero\\storage\\ATQPRSLM\\Kindling et al. - 2013 - Forschungsdatenmanagement an Hochschulen.pdf:application/pdf}, } @article{backer_base_2017, title = {{BASE} ({Bielefeld} {Academic} {Search} {Engine})}, volume = {17}, issn = {1610-1995}, url = {https://doi.org/10.1007/s13222-017-0246-9}, doi = {10.1007/s13222-017-0246-9}, abstract = {Wissenschaftliche Publikationen und ihre beschreibenden Metadaten stehen in stetig zunehmender Anzahl über Plattformen für elektronische Zeitschriften oder digitale Repositorien frei über das Internet zur Verfügung und lassen sich nachnutzen. Die Metadaten können über OAI-PMH (Open Archives Initiative Protocol for Metadata Harvesting) abgerufen werden (Harvesting). Durch Weiterverarbeitung und Indexierung der Metadaten lassen sich Services wie die „Bielefeld Academic Search Engine“ (BASE) entwickeln.}, language = {de}, number = {1}, journal = {Datenbank-Spektrum}, author = {Bäcker, Amelie and Pietsch, Christian and Summann, Friedrich and Wolf, Sebastian}, month = mar, year = {2017}, pages = {5--13}, } @article{lange_datenmarktplatze_2018, title = {Datenmarktplätze in verschiedenen {Forschungsdisziplinen}: {Eine} Übersicht}, volume = {41}, issn = {1432-122X}, shorttitle = {Datenmarktplätze in verschiedenen {Forschungsdisziplinen}}, url = {https://doi.org/10.1007/s00287-017-1044-3}, doi = {10.1007/s00287-017-1044-3}, abstract = {Der Handel mit Daten etabliert sich als immer wichtigerer Wirtschaftsbereich, in dem Datenmarktplätzen als Handelsplattformen eine Schlüsselrolle zukommt. Dementsprechend nimmt auch die Forschung zu Datenmarktplätzen zu und es werden neue Forschungsgebiete und -richtungen identifiziert, welche von verschiedenen Forschungsteams in unterschiedlichen Disziplinen bearbeitet werden. Dieser Artikel gibt zum ersten Mal einen Überblick über die aktuelle Forschung im Bereich Datenmarktplätze in unterschiedlichen Disziplinen. Es wird analysiert, welche Themenfelder erforscht werden und welche Forschungsgebiete weitgehend unberührt sind; außerdem werden Forschungsarbeiten aus ähnlichen Bereichen gegenübergestellt und der Gesamtzusammenhang aufgezeigt.}, language = {de}, number = {3}, journal = {Informatik-Spektrum}, author = {Lange, Juliane and Stahl, Florian and Vossen, Gottfried}, month = jun, year = {2018}, pages = {170--180}, file = {Lange et al. - 2018 - Datenmarktplätze in verschiedenen Forschungsdiszip.pdf:C\:\\Users\\carst\\Zotero\\storage\\VPRPXGR5\\Lange et al. - 2018 - Datenmarktplätze in verschiedenen Forschungsdiszip.pdf:application/pdf}, } @book{gassmann_st_2018, address = {München}, title = {Der {St}. {Galler} {Business} {Model} {Navigator}: 55+ {Karten} zur {Entwicklung} von {Geschäftsmodellen}}, isbn = {978-3-446-45555-9 978-3-446-45947-2 3-446-45555-8}, language = {de}, publisher = {Hanser}, author = {Gassmann, Oliver and Frankenberger, Karolin and Csik, Michaela}, year = {2018}, } @article{banerjee_blockchain_2019, title = {Blockchain {Enabled} {Data} {Marketplace} – {Design} and {Challenges}}, url = {http://arxiv.org/abs/1811.11462}, abstract = {Data is of unprecedented importance today. The most valuable companies of today treat data as a commodity, which they trade and earn revenues. To facilitate such trading, data marketplaces have emerged. Present data marketplaces are inadequate as they fail to satisfy all the desirable properties - fairness, efficiency, security, privacy and adherence to regulations. In this article, we propose a blockchain enabled data marketplace solution that fulfills all required properties. We outline the design, show how to design such a system and discuss the challenges in building a complete data marketplace.}, language = {en}, journal = {arXiv:1811.11462 [cs]}, author = {Banerjee, Prabal and Ruj, Sushmita}, month = sep, year = {2019}, pages = {7}, file = {Banerjee und Rujy - Blockchain Enabled Data Marketplace – Design and C.pdf:C\:\\Users\\carst\\Zotero\\storage\\5VPTGAZR\\Banerjee und Rujy - Blockchain Enabled Data Marketplace – Design and C.pdf:application/pdf}, } @book{noauthor_forschungsdateninfo_nodate, title = {forschungsdaten.info: {Forschung} und {Daten} managen}, url = {https://www.forschungsdaten.info/}, abstract = {Forschungsdaten.info ist das deutschsprachige Informationsportal zu Forschungsdatenmanagement (FDM). Mit praxisnahen Artikeln führt die Seite ins Forschungsdatenmanagement ein. Die Beiträge umfassen dabei die Schritte von der Antrags­planung eines Forschungs­projekts, die Arbeit mit Forschungsdaten im Forschungsalltag, die Umsetzung des Antrags bis hin zur Publikation und der Nachnutzung von Daten. Auch Rechte und Pflichten im Umgang mit Forschungsdaten werden behandelt. Zusätzlich liefern Best-Practice-Beispiele und Informationsmaterial aus den einzelnen Wissenschaftsbereichen Anregungen, um Daten besser (nach-)nutzbar zu machen. Zudem stellen sich auf forschungsdaten.info FDM-Initiativen und -Projekte aus dem deutschsprachigen Raum vor. Redaktionell wird die Plattform von einem überregionalen Team von FDM-Spezialistinnen und -Spezialisten betreut.}, language = {de}, note = {Publication Title: forschungsdaten.info}, file = {Forschungsdaten und Forschungsdatenmanagement:C\:\\Users\\carst\\Zotero\\storage\\9VPIF459\\www.forschungsdaten.info.html:text/html}, } @article{ivanovic_fairness_2019, title = {{FAIRness} of {Repositories} \& {Their} {Data}: {A} {Report} from {LIBER}'s {Research} {Data} {Management} {Working} {Group}}, copyright = {Creative Commons Attribution 4.0 International, Open Access}, shorttitle = {{FAIRness} of {Repositories} \& {Their} {Data}}, url = {https://zenodo.org/record/3251593}, doi = {10.5281/ZENODO.3251593}, author = {Ivanović, Dragan and Schmidt, Birgit and Grim, Rob and Dunning, Alastair}, month = jun, year = {2019}, file = {Ivanović et al. - 2019 - FAIRness of Repositories & Their Data A Report fr.pdf:C\:\\Users\\carst\\Zotero\\storage\\2ZAT44AY\\Ivanović et al. - 2019 - FAIRness of Repositories & Their Data A Report fr.pdf:application/pdf}, } @techreport{deutsche_forschungsgemeinschaft_leitlinien_2015, type = {Leitlinie}, title = {Leitlinien zum {Umgang} mit {Forschungsdaten}}, url = {https://www.dfg.de/download/pdf/foerderung/antragstellung/forschungsdaten/richtlinien_forschungsdaten.pdf}, abstract = {Forschungsdaten sind eine wesentliche Grundlage für das wissenschaftliche Arbeiten. Die Vielfalt solcher Daten entspricht der Vielfalt unterschiedlicher wissenschaftlicher Disziplinen, Erkenntnisinteressen und Forschungsverfahren. Zu Forschungsdaten zählen u.a. Messdaten, Laborwerte, audiovisuelle Informationen, Texte, Surveydaten, Objekte aus Sammlungen oder Proben, die in der wissenschaftlichen Arbeit entstehen, entwickelt oder ausgewertet werden. Methodische Testverfahren, wie Fragebögen, Software und Simulationen können ebenfalls zentrale Ergebnisse wissenschaftlicher Forschung darstellen und sollten daher ebenfalls unter den Begriff Forschungsdaten gefasst werden. Die langfristige Sicherung und Bereitstellung der Forschungsdaten leistet einen Beitrag zur Nachvollziehbarkeit und Qualität der wissenschaftlichen Arbeit und eröffnet wichtige Anschlussmöglichkeiten für die weitere Forschung. Die Allianz der Wissenschaftsorganisationen hat sich bereits mit den im Jahr 2010 verabschiedeten „Grundsätze[n] zum Umgang mit Forschungsdaten“ für die langfristige Sicherung von, den grundsätzlich offenen Zugang zu und die Berücksichtigung fachdisziplinärer Regularien im Umgang mit Forschungsdaten ausgesprochen.1 Die „Leitlinien zum Umgang mit Forschungsdaten“ konkretisieren den mit den „Grundsätzen“ vorgegebenen Rahmen im Kontext der DFG-Förderregularien.}, language = {de}, institution = {Deutsche Forschungsgemeinschaft}, author = {{Deutsche Forschungsgemeinschaft}}, month = sep, year = {2015}, pages = {2}, file = {Deutsche Forschungsgemeinschaft - 2015 - Leitlinien zum Umgang mit Forschungsdaten.pdf:C\:\\Users\\carst\\Zotero\\storage\\57PFCUUE\\Deutsche Forschungsgemeinschaft - 2015 - Leitlinien zum Umgang mit Forschungsdaten.pdf:application/pdf}, } @book{bundesministerium_fur_bildung_und_forschung_bmbf_urheberrecht_2019, address = {Berlin}, edition = {Stand August 2019}, title = {Urheberrecht in der {Wissenschaft}: ein Überblick für {Forschung}, {Lehre} und {Bibliotheken}}, shorttitle = {Urheberrecht in der {Wissenschaft}}, publisher = {Bundesministerium für Bildung und Forschung (BMBF), Referat Ethik und Recht, Rahmenbedingungen der Digitalisierung}, editor = {Bundesministerium für Bildung und Forschung (BMBF), Referat Ethik und Recht, Rahmenbedingungen der Digitalisierung}, year = {2019}, file = {Bundesministerium für Bildung und Forschung (BMBF), Referat Ethik und Recht, Rahmenbedingungen der Digitalisierung - 2019 - Urheberrecht in der Wissenschaft ein Überblick fü.pdf:C\:\\Users\\carst\\Zotero\\storage\\EEKE3QAU\\Bundesministerium für Bildung und Forschung (BMBF), Referat Ethik und Recht, Rahmenbedingungen der Digitalisierung - 2019 - Urheberrecht in der Wissenschaft ein Überblick fü.pdf:application/pdf}, } @book{noauthor_forschungsdatenmanagement_nodate, title = {Forschungsdatenmanagement}, url = {https://www.fwf.ac.at/de/forschungsfoerderung/open-access-policy/forschungsdatenmanagement/}, language = {de}, note = {Publication Title: https://www.fwf.ac.at}, file = {Forschungsdatenmanagement:C\:\\Users\\carst\\Zotero\\storage\\75M63JBA\\forschungsdatenmanagement.html:text/html}, } @article{nosek_promoting_2015, title = {Promoting an open research culture}, volume = {348}, issn = {0036-8075, 1095-9203}, url = {https://science.sciencemag.org/content/348/6242/1422}, doi = {10.1126/science.aab2374}, language = {en}, number = {6242}, journal = {Science}, author = {Nosek, B. A. and Alter, G. and Banks, G. C. and Borsboom, D. and Bowman, S. D. and Breckler, S. J. and Buck, S. and Chambers, C. D. and Chin, G. and Christensen, G. and Contestabile, M. and Dafoe, A. and Eich, E. and Freese, J. and Glennerster, R. and Goroff, D. and Green, D. P. and Hesse, B. and Humphreys, M. and Ishiyama, J. and Karlan, D. and Kraut, A. and Lupia, A. and Mabry, P. and Madon, T. and Malhotra, N. and Mayo-Wilson, E. and McNutt, M. and Miguel, E. and Paluck, E. Levy and Simonsohn, U. and Soderberg, C. and Spellman, B. A. and Turitto, J. and VandenBos, G. and Vazire, S. and Wagenmakers, E. J. and Wilson, R. and Yarkoni, T.}, month = jun, year = {2015}, pages = {1422--1425}, file = {Snapshot:C\:\\Users\\carst\\Zotero\\storage\\ZAVD28VE\\1422.html:text/html;Full Text PDF:C\:\\Users\\carst\\Zotero\\storage\\TCW74LB9\\Nosek et al. - 2015 - Promoting an open research culture.pdf:application/pdf}, } @techreport{mafalda_picarra_monitoring_2015, type = {Briefing {Paper}}, title = {Monitoring {Compliance} with {Open} {Access} policies}, copyright = {CC BY}, url = {http://www.pasteur4oa.eu/sites/pasteur4oa/files/resource/Brief_Monitoring%20compliance%20with%20OA%20policies_0.pdf}, language = {en}, institution = {Pasteur4OA}, author = {Mafalda Picarra, Jisc}, year = {2015}, pages = {14}, file = {Mafalda Picarra - 2015 - Monitoring Compliance with Open Access policies.pdf:C\:\\Users\\carst\\Zotero\\storage\\7UA4EUUW\\Mafalda Picarra - 2015 - Monitoring Compliance with Open Access policies.pdf:application/pdf}, } @book{learn_learn_2017, title = {{LEARN} {Toolkit} of {Best} {Practice} for {Research} {Data} {Management}}, isbn = {10.14324/000.learn.00}, url = {http://dx.doi.org/10.14324/000.learn.00}, abstract = {Research data is the new currency of the digital age. From sonnets to statistics, and genes to geodata, the amount of material being created and stored is growing exponentially. The LERU Roadmap for Research Data identifies a serious gap in the level of preparation amongst research performing organisations. This gulf is prominent in areas such as policy development, awareness of current issues, skills development, training, costs, community building, governance, disciplinary/legal/terminological and geographical differences. LEARN will help decision and policy makers identify sound solutions. Stakeholders can follow this LEARN Toolkit of Best Practice Case Studies, all of which will help organisations to grapple with the data deluge.}, language = {en}, publisher = {Leaders Activating Research Networks (LEARN)}, editor = {{LEARN}}, year = {2017}, file = {LEARN - 2017 - LEARN Toolkit of Best Practice for Research Data M.pdf:C\:\\Users\\carst\\Zotero\\storage\\ZRA3W5L2\\LEARN - 2017 - LEARN Toolkit of Best Practice for Research Data M.pdf:application/pdf}, } @article{jones_developments_2012, title = {Developments in {Research} {Funder} {Data} {Policy}}, volume = {7}, issn = {1746-8256}, url = {http://www.ijdc.net/article/view/209}, doi = {10.2218/ijdc.v7i1.219}, abstract = {This paper reviews developments in funders’ data management and sharing policies, and explores the extent to which they have affected practice. The Digital Curation Centre has been monitoring UK research funders’ data policies since 2008. There have been significant developments in subsequent years, most notably the joint Research Councils UK’s Common Principles on Data Policy and the Engineering and Physical Sciences Research Council’s Policy Framework on Research Data. This paper charts these changes and highlights shifting emphasises in the policies. Institutional data policies and infrastructure are increasingly being developed as a result of these changes. While action is clearly being taken, questions remain about whether the changes are affecting practice on the ground.}, language = {en}, number = {1}, journal = {International Journal of Digital Curation}, author = {Jones, Sarah}, year = {2012}, pages = {114--125}, file = {Jones - 2012 - Developments in Research Funder Data Policy.pdf:C\:\\Users\\carst\\Zotero\\storage\\IKLN5KFJ\\Jones - 2012 - Developments in Research Funder Data Policy.pdf:application/pdf}, } @article{lecarpentier_eudat_2013, title = {{EUDAT}: {A} {New} {Cross}-{Disciplinary} {Data} {Infrastructure} for {Science}}, volume = {8}, issn = {1746-8256}, shorttitle = {{EUDAT}}, url = {http://www.ijdc.net/article/view/8.1.279}, doi = {10.2218/ijdc.v8i1.260}, abstract = {The EUDAT project is a pan-European data initiative that started in October 2011. The project brings together a unique consortium of 25 partners – including research communities, national data and high performance computing (HPC) centres, technology providers, and funding agencies – from 13 countries. EUDAT aims to build a sustainable cross-disciplinary and cross-national data infrastructure that provides a set of shared services for accessing and preserving research data.}, number = {1}, journal = {International Journal of Digital Curation}, author = {Lecarpentier, Damien and Wittenburg, Peter and Elbers, Willem and Michelini, Alberto and Kanso, Riam and Coveney, Peter and Baxter, Rob}, month = jun, year = {2013}, pages = {279--287}, file = {Lecarpentier et al. - 2013 - EUDAT A New Cross-Disciplinary Data Infrastructur.pdf:C\:\\Users\\carst\\Zotero\\storage\\JSLUDXNX\\Lecarpentier et al. - 2013 - EUDAT A New Cross-Disciplinary Data Infrastructur.pdf:application/pdf}, } @book{european_commission_turning_2018, address = {Luxembourg}, title = {Turning {FAIR} data into reality: final report and action plan from the {European} {Commission} expert group on {FAIR} data.}, isbn = {978-92-79-96546-3}, shorttitle = {Turning {FAIR} data into reality}, url = {https://ec.europa.eu/info/sites/info/files/turning_fair_into_reality_1.pdf}, abstract = {To take advantage of the digital revolution, to accelerate research, to engage the power of machine analysis at scale while ensuring transparency, reproducibility and societal utility, data and other digital objects created by and used for research need to be FAIR. Advancing the global Open Science movement and the development of the European Open Science Cloud is the unambiguous objective for this report. This document is both a report and an action plan for turning FAIR into reality. It offers a survey and analysis of what is needed to implement FAIR and it provides a set of concrete recommendations and actions for stakeholders in Europe and beyond. It is our intention that it should provide a framework that will greatly assist the creation of the European Open Science Cloud, and will be applicable to other comparable initiatives globally.}, language = {en}, publisher = {Publications Office of the European Union}, editor = {{European Commission} and {Directorate-General for Research and Innovation}}, year = {2018}, doi = {10.2777/1524}, file = {European Commission und Directorate-General for Research and Innovation - 2018 - Turning FAIR data into reality final report and a.pdf:C\:\\Users\\carst\\Zotero\\storage\\BEZ5W3F7\\European Commission und Directorate-General for Research and Innovation - 2018 - Turning FAIR data into reality final report and a.pdf:application/pdf}, } @article{wittenburg_digital_2019, title = {Digital {Objects} as {Drivers} towards {Convergence} in {Data} {Infrastructures}}, url = {https://b2share.eudat.eu/records/b605d85809ca45679b110719b6c6cb11}, doi = {10.23728/B2SHARE.B605D85809CA45679B110719B6C6CB11}, author = {Wittenburg, Peter and Strawn, George and Mons, Barend and Boninho, Luiz and Schultes, Erik}, year = {2019}, file = {Wittenburg et al. - 2019 - Digital Objects as Drivers towards Convergence in .pdf:C\:\\Users\\carst\\Zotero\\storage\\J5MKQ87C\\Wittenburg et al. - 2019 - Digital Objects as Drivers towards Convergence in .pdf:application/pdf}, } @article{wittenburg_about_2019, title = {About {Building} {Data} {Infrastructures}}, copyright = {open, Public Domain Dedication (CC Zero)}, url = {https://b2share.eudat.eu/records/6b596f01bc224ff284f80a057212e07f}, doi = {10.23728/B2SHARE.6B596F01BC224FF284F80A057212E07F}, abstract = {The great relevance of data for coming to new scientific insights, making progress in tackling the grand challenges and in making commercial profit has widely been commented. Some of the big questions that are currently being discussed are (a) who will own the relevant data and (b) what kind of facilities need to be developed to make data available. With respect to the first question there is no doubt that data from publicly funded research should in principle be open for broad usage. This implies an answer to the second question in so far as data infrastructures (DI) should avoid dependencies on commercial services and interests. This is the reason that huge investments are currently being made, in particular in Europe, towards the development of an eco-system of data infrastructures that build on public investments that have previously been made. Building such data infrastructures should respect a balance between three components: scientific interest (S), technology advancement (T), and organizational form (O); the O dimension should follow the high dynamics in the T and S dimensions. In contrast to the US, where we see a reluctance to invest in DI building, the EU and its member states invest large amounts of funds in DI building. However, we can observe that the three dimensions (S, T, O) are not well balanced, a situation which bears high risks. In this paper some approaches in large data infrastructure initiatives such as EOSC (EC) and NFDI (Germany) are analysed and commented on the risks they take by separating the three dimensions (S, T, O) at least temporarely.}, author = {Wittenburg, Peter and Strawn, George}, month = dec, year = {2019}, file = {Wittenburg und Strawn - 2019 - About Building Data Infrastructures.pdf:C\:\\Users\\carst\\Zotero\\storage\\V2B5N66A\\Wittenburg und Strawn - 2019 - About Building Data Infrastructures.pdf:application/pdf}, } @article{wittenburg_common_2018, title = {Common {Patterns} in {Revolutionary} {Infrastructures} and {Data}}, url = {https://b2share.eudat.eu/records/4e8ac36c0dd343da81fd9e83e72805a0}, doi = {10.23728/B2SHARE.4E8AC36C0DD343DA81FD9E83E72805A0}, abstract = {Large Infrastructures follow certain patterns during their evolution. The paper looks into three examples (electrification, Internet, WorldWide Web), extracts some reoccuring patterns and finally compares it with th state and potential of the data domain. Summarising it states that we seem to be close to a convergence step, but that we lack the agreement yet on a fairly simple concept all could agree with and that can serve as commodity to build upon.}, author = {Wittenburg, Peter and Strawn, George}, year = {2018}, file = {Wittenburg und Strawn - 2018 - Common Patterns in Revolutionary Infrastructures a.pdf:C\:\\Users\\carst\\Zotero\\storage\\YWS2G84I\\Wittenburg und Strawn - 2018 - Common Patterns in Revolutionary Infrastructures a.pdf:application/pdf}, } @article{strawn_open_2019, title = {Open {Science}, {Business} {Analytics}, and {FAIR} {Digital} {Objects}}, url = {https://b2share.eudat.eu/records/6ceeed13eb6340fcb132bcb5b5e3d69a}, doi = {10.23728/B2SHARE.6CEEED13EB6340FCB132BCB5B5E3D69A}, abstract = {The lack of data interoperability is hindering the emergence of Open Science and making data analytics considerably more expensive than it should be. A new technology, FAIR Digital Objects, is seeking to solve this problem along with several others. FAIR is an acronym for findable, accessible, interoperable, and reusable. Digital Object Architecture is a very general virtual layer to ride of top of any data system, which can solve the interoperability problem for heterogeneous data, much as the Internet solved the interoperability problem for heterogeneous networks. A specific project employed these technology will be described.}, author = {Strawn, George O.}, month = feb, year = {2019}, file = {Strawn - 2019 - Open Science, Business Analytics, and FAIR Digital.pdf:C\:\\Users\\carst\\Zotero\\storage\\HATMQCKE\\Strawn - 2019 - Open Science, Business Analytics, and FAIR Digital.pdf:application/pdf}, } @article{schultes_fair_2019, title = {{FAIR} {Principles} and {Digital} {Objects}: {Accelerating} {Convergence} on a {Data} {Infrastructure}}, shorttitle = {{FAIR} {Principles} and {Digital} {Objects}}, url = {https://b2share.eudat.eu/records/166a074bff614a31b05e9df5bfd9809d}, doi = {10.23728/B2SHARE.166A074BFF614A31B05E9DF5BFD9809D}, abstract = {As Moore’s Law and associated technical advances continue to bulldoze their way through society, both exciting possibilities and severe challenges emerge. The upside is the explosive growth of data and compute resources that promise revolutionary modes of discovery and innovation not only within traditional knowledge disciplines, but especially between them. The challenge, however, is to build the large-scale, widely accessible, persistent and automated infrastructures that will be necessary for navigating and managing the unprecedented complexity of exponentially increasing quantities of distributed and heterogenous data. This will require innovations in both the technical and social domains. Inspired by the successful development of the Internet and leveraging the Digital Object Framework and FAIR Principles (for making data Findable, Accessible, Interoperable and Reusable by machines) the GO FAIR initiative works with voluntary stakeholders to accelerate convergence on minimal standards and working implementations leading to an Internet of FAIR Data and Services (IFDS). In close collaboration with GO FAIR and DONA, the RDA GEDE and C2CAMP initiatives will continue its FAIR DO implementation efforts.}, author = {Schultes, Erik and Wittenburg, Peter}, year = {2019}, file = {Schultes und Wittenburg - 2019 - FAIR Principles and Digital Objects Accelerating .pdf:C\:\\Users\\carst\\Zotero\\storage\\LDVTDL4P\\Schultes und Wittenburg - 2019 - FAIR Principles and Digital Objects Accelerating .pdf:application/pdf}, } @book{hughes_networks_1993, address = {Baltimore, Md.}, title = {Networks of power: electrification in western society, 1880 - 1930}, isbn = {978-0-8018-4614-4 978-0-8018-2873-7}, shorttitle = {Networks of power}, language = {en}, publisher = {John Hopkins Univ. Press}, author = {Hughes, Thomas Parke}, year = {1993}, file = {Table of Contents PDF:C\:\\Users\\carst\\Zotero\\storage\\A9LPR6HG\\Hughes - 1993 - Networks of power electrification in western soci.pdf:application/pdf}, } @inproceedings{rantanen_towards_2019, title = {Towards {Ethical} {Data} {Ecosystems}: {A} {Literature} {Study}}, shorttitle = {Towards {Ethical} {Data} {Ecosystems}}, doi = {10.1109/ICE.2019.8792599}, abstract = {While the importance of data is growing as the fuel of the new data economy, also the role of the data ecosystems is growing. The new data ecosystems enables the use, reuse and enrichment of big data sets by or together with third parties. However, in the context of technology management, the governance of these kinds of data ecosystems raises ethical questions and issues that should be acknowledged by researchers and practitioners. This study reviews the extant literature regarding the given advice about ethical considerations. The method of systematic literature study is used to collect the primary articles (N=20). The selected articles are analyzed and themed according to reoccurring themes: privacy, accountability, ownership, accessibility, and motivation. The results show the discussion is fragmented and concrete ethical guidelines are lacking. Thus, this study requires more work for governing data ecosystems in an ethical way.}, booktitle = {2019 {IEEE} {International} {Conference} on {Engineering}, {Technology} and {Innovation} ({ICE}/{ITMC})}, author = {Rantanen, Minna M. and Hyrynsalmi, Sami and Hyrynsalmi, Sonja M.}, month = jun, year = {2019}, pages = {1--9}, file = {IEEE Xplore Abstract Record:C\:\\Users\\carst\\Zotero\\storage\\CPDH8NNN\\8792599.html:text/html}, } @inproceedings{oliveira_what_2018, address = {New York, NY, USA}, series = {dg.o '18}, title = {What is a {Data} {Ecosystem}?}, isbn = {978-1-4503-6526-0}, url = {https://doi.org/10.1145/3209281.3209335}, doi = {10.1145/3209281.3209335}, abstract = {The way that individuals and organizations have produced and consumed data has changed with the advent of new technologies. As a consequence, data has become a tradable and valuable good. There are now Data Ecosystems, in which a number of actors interact with each other to exchange, produce and consume data. Such ecosystems provide an environment for creating, managing and sustaining data sharing initiatives. Despite Data Ecosystems are gaining importance, until now, there is no common agreement on what theories should look like in Data Ecosystems. The evidence is the lack of a well-accepted definition of the term Data Ecosystem. In order to overcome this gap, in this paper, we investigate some theoretical issues that are relevant for Data Ecosystems. Our main focus is on the aspects related to the components of a Data Ecosystem as well as to propose a common definition for a Data Ecosystem term. Therefore, the aim of our work is two-fold. First, we investigate the state of research on the Data Ecosystem field and related kinds of ecosystems, such as Business and Software Ecosystems to enable the development of a common knowledge base. Second, we extract constructs from relevant studies in order to build a common and cohesive definition for Data Ecosystems.}, booktitle = {Proceedings of the 19th {Annual} {International} {Conference} on {Digital} {Government} {Research}: {Governance} in the {Data} {Age}}, publisher = {Association for Computing Machinery}, author = {Oliveira, Marcelo Iury S. and Lóscio, Bernadette Farias}, year = {2018}, } @incollection{charalabidis_multiple_2018, address = {Cham}, series = {Public {Administration} and {Information} {Technology}}, title = {The {Multiple} {Life} {Cycles} of {Open} {Data} {Creation} and {Use}}, isbn = {978-3-319-90850-2}, url = {https://doi.org/10.1007/978-3-319-90850-2_2}, abstract = {Open data can be defined as data that is free of charge or provided at marginal cost, under an open licence , machine readable, and provided in an open format}, language = {en}, booktitle = {The {World} of {Open} {Data}: {Concepts}, {Methods}, {Tools} and {Experiences}}, publisher = {Springer International Publishing}, author = {Charalabidis, Yannis and Zuiderwijk, Anneke and Alexopoulos, Charalampos and Janssen, Marijn and Lampoltshammer, Thomas and Ferro, Enrico}, editor = {Charalabidis, Yannis and Zuiderwijk, Anneke and Alexopoulos, Charalampos and Janssen, Marijn and Lampoltshammer, Thomas and Ferro, Enrico}, year = {2018}, doi = {10.1007/978-3-319-90850-2_2}, pages = {11--31}, } @article{jones_probabilistic_2000, title = {A probabilistic model of information retrieval: development and comparative experiments: {Part} 2}, volume = {36}, issn = {0306-4573}, shorttitle = {A probabilistic model of information retrieval}, url = {http://www.sciencedirect.com/science/article/pii/S0306457300000169}, doi = {10.1016/S0306-4573(00)00016-9}, abstract = {The paper combines a comprehensive account of the probabilistic model of retrieval with new systematic experiments on TREC Programme material. It presents the model from its foundations through its logical development to cover more aspects of retrieval data and a wider range of system functions. Each step in the argument is matched by comparative retrieval tests, to provide a single coherent account of a major line of research. The experiments demonstrate, for a large test collection, that the probabilistic model is effective and robust, and that it responds appropriately, with major improvements in performance, to key features of retrieval situations. Part 1 covers the foundations and the model development for document collection and relevance data, along with the test apparatus. Part 2 covers the further development and elaboration of the model, with extensive testing, and briefly considers other environment conditions and tasks, model training, concluding with comparisons with other approaches and an overall assessment. Data and results tables for both parts are given in Part 1. Key results are summarised in Part 2.}, language = {en}, number = {6}, journal = {Information Processing \& Management}, author = {Jones, K Sparck and Walker, S and Robertson, S. E}, month = nov, year = {2000}, pages = {809--840}, } @article{tunkelang_faceted_nodate, title = {Faceted {Search}}, volume = {1}, issn = {1947-945X}, url = {https://www.morganclaypool.com/doi/abs/10.2200/S00190ED1V01Y200904ICR005}, doi = {10.2200/S00190ED1V01Y200904ICR005}, abstract = {We live in an information age that requires us, more than ever, to represent, access, and use information. Over the last several decades, we have developed a modern science and technology for information retrieval, relentlessly pursuing the vision of a "memex" that Vannevar Bush proposed in his seminal article, "As We May Think." Faceted search plays a key role in this program. Faceted search addresses weaknesses of conventional search approaches and has emerged as a foundation for interactive information retrieval. User studies demonstrate that faceted search provides more effective information-seeking support to users than best-first search. Indeed, faceted search has become increasingly prevalent in online information access systems, particularly for e-commerce and site search. In this lecture, we explore the history, theory, and practice of faceted search. Although we cannot hope to be exhaustive, our aim is to provide sufficient depth and breadth to offer a useful resource to both researchers and practitioners. Because faceted search is an area of interest to computer scientists, information scientists, interface designers, and usability researchers, we do not assume that the reader is a specialist in any of these fields. Rather, we offer a self-contained treatment of the topic, with an extensive bibliography for those who would like to pursue particular aspects in more depth.}, number = {1}, journal = {Synthesis Lectures on Information Concepts, Retrieval, and Services}, author = {Tunkelang, Daniel}, pages = {1--80}, file = {Tunkelang - Faceted Search.pdf:C\:\\Users\\carst\\Zotero\\storage\\IYLAHNVS\\Tunkelang - Faceted Search.pdf:application/pdf}, } @article{chevrier_use_2019, title = {Use and {Understanding} of {Anonymization} and {De}-{Identification} in the {Biomedical} {Literature}: {Scoping} {Review}}, volume = {21}, shorttitle = {Use and {Understanding} of {Anonymization} and {De}-{Identification} in the {Biomedical} {Literature}}, url = {https://www.jmir.org/2019/5/e13484/}, doi = {10.2196/13484}, abstract = {Background: The secondary use of health data is central to biomedical research in the era of data science and precision medicine. National and international initiatives, such as the Global Open Findable, Accessible, Interoperable, and Reusable (GO FAIR) initiative, are supporting this approach in different ways (eg, making the sharing of research data mandatory or improving the legal and ethical frameworks). Preserving patients’ privacy is crucial in this context. De-identification and anonymization are the two most common terms used to refer to the technical approaches that protect privacy and facilitate the secondary use of health data. However, it is difficult to find a consensus on the definitions of the concepts or on the reliability of the techniques used to apply them. A comprehensive review is needed to better understand the domain, its capabilities, its challenges, and the ratio of risk between the data subjects’ privacy on one side, and the benefit of scientific advances on the other. Objective: This work aims at better understanding how the research community comprehends and defines the concepts of de-identification and anonymization. A rich overview should also provide insights into the use and reliability of the methods. Six aspects will be studied: (1) terminology and definitions, (2) backgrounds and places of work of the researchers, (3) reasons for anonymizing or de-identifying health data, (4) limitations of the techniques, (5) legal and ethical aspects, and (6) recommendations of the researchers. Methods: Based on a scoping review protocol designed a priori, MEDLINE was searched for publications discussing de-identification or anonymization and published between 2007 and 2017. The search was restricted to MEDLINE to focus on the life sciences community. The screening process was performed by two reviewers independently. Results: After searching 7972 records that matched at least one search term, 135 publications were screened and 60 full-text articles were included. (1) Terminology: Definitions of the terms de-identification and anonymization were provided in less than half of the articles (29/60, 48\%). When both terms were used (41/60, 68\%), their meanings divided the authors into two equal groups (19/60, 32\%, each) with opposed views. The remaining articles (3/60, 5\%) were equivocal. (2) Backgrounds and locations: Research groups were based predominantly in North America (31/60, 52\%) and in the European Union (22/60, 37\%). The authors came from 19 different domains; computer science (91/248, 36.7\%), biomedical informatics (47/248, 19.0\%), and medicine (38/248, 15.3\%) were the most prevalent ones. (3) Purpose: The main reason declared for applying these techniques is to facilitate biomedical research. (4) Limitations: Progress is made on specific techniques but, overall, limitations remain numerous. (5) Legal and ethical aspects: Differences exist between nations in the definitions, approaches, and legal practices. (6) Recommendations: The combination of organizational, legal, ethical, and technical approaches is necessary to protect health data. Conclusions: Interest is growing for privacy-enhancing techniques in the life sciences community. This interest crosses scientific boundaries, involving primarily computer science, biomedical informatics, and medicine. The variability observed in the use of the terms de-identification and anonymization emphasizes the need for clearer definitions as well as for better education and dissemination of information on the subject. The same observation applies to the methods. Several legislations, such as the American Health Insurance Portability and Accountability Act (HIPAA) and the European General Data Protection Regulation (GDPR), regulate the domain. Using the definitions they provide could help address the variable use of these two concepts in the research community. [J Med Internet Res 2019;21(5):e13484]}, language = {en}, number = {5}, journal = {Journal of Medical Internet Research}, author = {Chevrier, Raphaël and Foufi, Vasiliki and Gaudet-Blavignac, Christophe and Robert, Arnaud and Lovis, Christian}, year = {2019}, pages = {e13484}, file = {Volltext:C\:\\Users\\carst\\Zotero\\storage\\Y85ZL73W\\Chevrier et al. - 2019 - Use and Understanding of Anonymization and De-Iden.pdf:application/pdf}, } @article{kogut_opensource_2001, title = {Open‐{Source} {Software} {Development} and {Distributed} {Innovation}}, volume = {17}, issn = {0266-903X}, url = {https://academic.oup.com/oxrep/article/17/2/248/336991}, doi = {10.1093/oxrep/17.2.248}, abstract = {Open‐source software development is a production model that exploits the distributed intelligence of participants in Internet communities. This model is efficient because of two related reasons: it avoids the inefficiencies of a strong intellectual property regime and it implements concurrently design and testing of software modules. The hazard of open source is that projects can ‘fork’ into competing versions. However, open‐source communities consist of governance structures that constitutionally minimize this danger. Because open source works in a distributed environment, it presents an opportunity for developing countries to participate in frontier innovation.}, language = {en}, number = {2}, journal = {Oxford Review of Economic Policy}, author = {Kogut, Bruce and Metiu, Anca}, month = jun, year = {2001}, pages = {248--264}, } @incollection{beez_context-aware_2018, address = {Berlin, Heidelberg}, title = {Context-{Aware} {Documentation} in the {Smart} {Factory}}, isbn = {978-3-662-55433-3}, url = {https://doi.org/10.1007/978-3-662-55433-3_12}, abstract = {In every factory environment, errors and maintenance situations may occur. They must be handled quickly and accurately. This article describes a semantic application for automatically retrieving technical documentation for fixing such errors and presenting them to factory personnel. For this, machine raw data is collected and semantically enriched using Complex Event Processing (CEP). Semantic events are mapped to technical documentation via an ontology. Particular focus is drawn on the user experience of the semantic application.}, language = {en}, booktitle = {Semantic {Applications}: {Methodology}, {Technology}, {Corporate} {Use}}, publisher = {Springer}, author = {Beez, Ulrich and Kaupp, Lukas and Deuschel, Tilman and Humm, Bernhard G. and Schumann, Fabienne and Bock, Jürgen and Hülsmann, Jens}, editor = {Hoppe, Thomas and Humm, Bernhard and Reibold, Anatol}, year = {2018}, doi = {10.1007/978-3-662-55433-3_12}, pages = {163--180}, file = {Beez et al. - 2018 - Context-Aware Documentation in the Smart Factory.pdf:C\:\\Users\\carst\\Zotero\\storage\\52CXM2U6\\Beez et al. - 2018 - Context-Aware Documentation in the Smart Factory.pdf:application/pdf}, } @inproceedings{kaupp_raw_2017, address = {Karlsruhe}, title = {From {Raw} {Data} to {Smart} {Documentation}: {Introducing} a {Semantic} {Fusion} {Process} for {Cyber}-{Physical} {Systems}}, shorttitle = {From {Raw} {Data} to {Smart} {Documentation}}, url = {https://zenodo.org/record/3581079}, doi = {10.5281/ZENODO.3581079}, abstract = {Machine outage is a considerable problem in smart factories. This paper introduces a novel Semantic Fusion Process for Cyber-Physical Systems (SFP-CPS). It helps reducing machine outage in factories by automatically detecting problems in CPSs and providing suitable documentation to assist factory personnel in resolving the problem. The SFP-CPS operates on raw data collected from the CPS in real-time. Raw data gets normalized, semantically enriched and mapped onto a knowledge base, resulting in a semantic description of the error and its course together with a technical instruction of how to solve the error.}, language = {en}, booktitle = {{CERC2017} {Collaborative} {European} {Research} {Conference}: {Proceedings}}, author = {Kaupp, Lukas and Beez, Ulrich and Humm, Bernhard G. and Hülsmann, Jens}, year = {2017}, pages = {83--97}, file = {Kaupp et al. - 2017 - From Raw Data to Smart Documentation Introducing .pdf:C\:\\Users\\carst\\Zotero\\storage\\264G97PD\\Kaupp et al. - 2017 - From Raw Data to Smart Documentation Introducing .pdf:application/pdf}, } @inproceedings{macintosh_characterizing_2004, address = {Los Alamitos, Ca.}, title = {Characterizing e-participation in policy-making}, doi = {10.1109/HICSS.2004.1265300}, abstract = {This paper argues the urgent need to better understand the e-democracy pilots that have taken place so far and that are currently being developed. It addresses the issues of what should be characterized in e-democracy pilots so as to better identify types of citizen participation exercises and the appropriate technology to support them, as such it offers an analytical framework for electronic participation. Over the last decade there has been a gradual awareness of the need to consider the innovative application of ICTs for participation that enables a wider audience to contribute to democratic debate and where contributions themselves are broader and deeper. This awareness has resulted in a number of isolated e-democracy pilots and research studies. It is important to consolidate this work and characterizes the level of participation, the technology used, the stage in the policy-making process and various issues and constraints, including the potential benefits.}, language = {en}, booktitle = {Proceedings of the 37th {Annual} {Hawaii} {International} {Conference} on {System} {Sciences}, 2004}, publisher = {IEEE Computer Society Press}, author = {Macintosh, A.}, editor = {Sprague, Ralph H.}, year = {2004}, } @inproceedings{may_combining_2010, address = {Piscataway, NJ}, title = {Combining statistical independence testing, visual attribute selection and automated analysis to find relevant attributes for classification}, url = {https://ieeexplore.ieee.org/abstract/document/5654445}, doi = {10.1109/VAST.2010.5654445}, abstract = {We present an iterative strategy for finding a relevant subset of attributes for the purpose of classification in high-dimensional, heterogeneous data sets. The attribute subset is used for the construction of a classifier function. In order to cope with the challenge of scalability, the analysis is split into an overview of all attributes and a detailed analysis of small groups of attributes. The overview provides generic information on statistical dependencies between attributes. With this information the user can select groups of attributes and an analytical method for their detailed analysis. The detailed analysis involves the identification of redundant attributes (via classification or regression) and the creation of summarizing attributes (via clustering or dimension reduction). Our strategy does not prescribe specific analytical methods. Instead, we recursively combine the results of different methods to find or generate a subset of attributes to use for classification.}, booktitle = {2010 {IEEE} {Symposium} on {Visual} {Analytics} {Science} and {Technology}}, publisher = {IEEE Press}, author = {May, Thorsten and Davey, James and Kohlhammer, Jörn}, year = {2010}, pages = {239--240}, } @book{thomas_illuminating_2005, title = {Illuminating the path: the research and development agenda for visual analytics}, isbn = {978-0-7695-2323-1}, shorttitle = {Illuminating the path}, language = {en}, publisher = {IEEE Press}, author = {Thomas, James J. and Cook, Kristin A.}, year = {2005}, file = {Table of Contents PDF:C\:\\Users\\carst\\Zotero\\storage\\8S82L9A9\\Thomas und IEEE Computer Society - 2005 - Illuminating the path the research and developmen.pdf:application/pdf}, } @article{maher_max_2019, title = {The {Max} {Planck} {Institute} {Grand} {Ensemble}: {Enabling} the {Exploration} of {Climate} {System} {Variability}}, volume = {11}, copyright = {©2019. The Authors.}, issn = {1942-2466}, shorttitle = {The {Max} {Planck} {Institute} {Grand} {Ensemble}}, url = {https://agupubs.onlinelibrary.wiley.com/doi/abs/10.1029/2019MS001639}, doi = {10.1029/2019MS001639}, abstract = {The Max Planck Institute Grand Ensemble (MPI-GE) is the largest ensemble of a single comprehensive climate model currently available, with 100 members for the historical simulations (1850–2005) and four forcing scenarios. It is currently the only large ensemble available that includes scenario representative concentration pathway (RCP) 2.6 and a 1\% CO2 scenario. These advantages make MPI-GE a powerful tool. We present an overview of MPI-GE, its components, and detail the experiments completed. We demonstrate how to separate the forced response from internal variability in a large ensemble. This separation allows the quantification of both the forced signal under climate change and the internal variability to unprecedented precision. We then demonstrate multiple ways to evaluate MPI-GE and put observations in the context of a large ensemble, including a novel approach for comparing model internal variability with estimated observed variability. Finally, we present four novel analyses, which can only be completed using a large ensemble. First, we address whether temperature and precipitation have a pathway dependence using the forcing scenarios. Second, the forced signal of the highly noisy atmospheric circulation is computed, and different drivers are identified to be important for the North Pacific and North Atlantic regions. Third, we use the ensemble dimension to investigate the time dependency of Atlantic Meridional Overturning Circulation variability changes under global warming. Last, sea level pressure is used as an example to demonstrate how MPI-GE can be utilized to estimate the ensemble size needed for a given scientific problem and provide insights for future ensemble projects.}, language = {en}, number = {7}, journal = {Journal of Advances in Modeling Earth Systems}, author = {Maher, Nicola and Milinski, Sebastian and Suarez‐Gutierrez, Laura and Botzet, Michael and Dobrynin, Mikhail and Kornblueh, Luis and Kröger, Jürgen and Takano, Yohei and Ghosh, Rohit and Hedemann, Christopher and Li, Chao and Li, Hongmei and Manzini, Elisa and Notz, Dirk and Putrasahan, Dian and Boysen, Lena and Claussen, Martin and Ilyina, Tatiana and Olonscheck, Dirk and Raddatz, Thomas and Stevens, Bjorn and Marotzke, Jochem}, year = {2019}, pages = {2050--2069}, file = {Maher et al. - 2019 - The Max Planck Institute Grand Ensemble Enabling .pdf:C\:\\Users\\carst\\Zotero\\storage\\U736S69Y\\Maher et al. - 2019 - The Max Planck Institute Grand Ensemble Enabling .pdf:application/pdf}, } @book{kaden_drei_2016, title = {Drei {Gründe} für {Forschungsdatenpublikationen}.}, url = {https://www2.hu-berlin.de/edissplus/2016/09/29/gruende-fuer-forschungsdatenpublikationen/}, language = {de}, author = {Kaden, Ben}, month = sep, year = {2016}, note = {Publication Title: eDissPlus DFG-Projekt: Elektronische Dissertationen Plus}, file = {Drei Gründe für Forschungsdatenpublikationen. · eDissPlus:C\:\\Users\\carst\\Zotero\\storage\\B4RGCN95\\gruende-fuer-forschungsdatenpublikationen.html:text/html}, } @article{willmes_building_2014, title = {Building {Research} {Data} {Management} {Infrastructure} using {Open} {Source} {Software}}, volume = {18}, copyright = {© 2013 John Wiley \& Sons Ltd}, issn = {1467-9671}, url = {https://onlinelibrary.wiley.com/doi/abs/10.1111/tgis.12060}, doi = {10.1111/tgis.12060}, abstract = {The implementation of a research data management infrastructure for a large interdisciplinary research project is presented here, based on well-established Free and Open Source Software for Geospatial (FOSS4G) products such as MapServer, MapProxy, GeoExt and pyCSW, as well as the (not primarily geospatial) open source technologies Typo3 and CKAN. The presented implementation depends primarily on the demands for research data management infrastructure by the funding research agency. It also aligns to theory and practice in Research Data Management (RDM) and e-Science. After the research project and related work in the field of RDM are introduced, a detailed description of the architecture and its implementation is given. The article discusses why Open Source and open standards are chosen to implement the infrastructure and provides some suggestions and examples on how to make it easier and more attractive for researchers to upload and publish their primary research data.}, language = {en}, number = {4}, journal = {Transactions in GIS}, author = {Willmes, Christian and Kürner, Daniel and Bareth, Georg}, year = {2014}, pages = {496--509}, } @article{cambazoglu_scalability_2015, title = {Scalability {Challenges} in {Web} {Search} {Engines}}, volume = {7}, issn = {1947-945X}, url = {https://www.morganclaypool.com/doi/10.2200/S00662ED1V01Y201508ICR045}, doi = {10.2200/S00662ED1V01Y201508ICR045}, abstract = {In this book, we aim to provide a fairly comprehensive overview of the scalability and efficiency challenges in large-scale web search engines. More specifically, we cover the issues involved in the design of three separate systems that are commonly available in every web-scale search engine: web crawling, indexing, and query processing systems. We present the performance challenges encountered in these systems and review a wide range of design alternatives employed as solution to these challenges, specifically focusing on algorithmic and architectural optimizations. We discuss the available optimizations at different computational granularities, ranging from a single computer node to a collection of data centers. We provide some hints to both the practitioners and theoreticians involved in the field about the way large-scale web search engines operate and the adopted design choices. Moreover, we survey the efficiency literature, providing pointers to a large number of relatively important research papers. Finally, we discuss some open research problems in the context of search engine efficiency.}, number = {6}, journal = {Synthesis Lectures on Information Concepts, Retrieval, and Services}, author = {Cambazoglu, B. Barla and Baeza-Yates, Ricardo}, month = dec, year = {2015}, pages = {1--138}, } @incollection{flanders_data_2015, address = {Chichester}, title = {Data {Modeling}}, volume = {33}, booktitle = {A {New} {Companion} to {Digital} {Humanities}}, publisher = {John Wiley \& Sons, Ltd}, author = {Flanders, Julia and Jannidis, Fotis}, editor = {Schreibman, Susan and Siemens, Ray and Unsworth, John}, year = {2015}, pages = {229--237}, } @book{frants_automated_1997, address = {San Diego}, series = {Library and information science}, title = {Automated information retrieval: theory and methods}, isbn = {978-0-12-266170-9}, shorttitle = {Automated information retrieval}, publisher = {Academic Press}, author = {Frants, Valery and Shapiro, Jacob and Vojskunskij, Vladimir G.}, year = {1997}, file = {Table of Contents PDF:C\:\\Users\\carst\\Zotero\\storage\\TSX4FRRJ\\Frants et al. - 1997 - Automated information retrieval theory and method.pdf:application/pdf}, } @book{goodfellow_deep_2016, address = {Cambridge, Massachusetts London, England}, series = {Adaptive computation and machine learning}, title = {Deep learning}, isbn = {978-0-262-03561-3}, language = {en}, publisher = {The MIT Press}, author = {Goodfellow, Ian and Bengio, Yoshua and Courville, Aaron}, year = {2016}, file = {Table of Contents PDF:C\:\\Users\\carst\\Zotero\\storage\\ERDQJU22\\Goodfellow et al. - 2016 - Deep learning.pdf:application/pdf}, } @inproceedings{gradl_daten_2017, title = {Daten sammeln, modellieren und durchsuchen mit {DARIAH}-{DE}}, url = {https://fis.uni-bamberg.de/handle/uniba/41926}, doi = {10.5281/zenodo.582316}, abstract = {Die sammlungsübergreifende Recherche und Nachnutzung geisteswissenschaftlicher Forschungsdaten stehen im Blickpunkt aktueller Forschung in den Digital Humanities. Obwohl das Interesse an einer Zusammenführung digitaler Forschungsdaten bereits kurz nach der Einführung erster digitaler Bibliotheken um die Jahrtausendwende entstand, bleibt die Integration von Forschungsdaten über Sammlungsgrenzen hinweg ein aktuelles Forschungsthema. Bei einer forschungsorientierten Betrachtung von Sammlungen digitaler Daten (also z. B. digitale Texte, Digitalisate, Normdaten, Metadaten) stellt sich die Frage nach den Anforderungen und Erfolgskriterien einer übergreifenden Föderation, Verarbeitung und Visualisierung von Forschungsdaten. Entgegen der in der Praxis üblichen Orientierung an institutionellen Anforderungen stellen die in DARIAH-DE entwickelten Konzepte und Dienste zur Verzeichnung, Korrelation und Zusammenführung von Forschungsdaten die Bedürfnisse von WissenschaftlerInnen im Kontext ihrer Forschungsfragen in den Mittelpunkt. Dies äußert sich beispielsweise darin, dass DARIAH-DE keine strukturellen Bedingungen an Forschungsdaten stellt. Stattdessen können Daten so publiziert, modelliert und integriert werden, dass eine möglichst gute Passung an den jeweiligen geisteswissenschaftlichen Kontext erreicht wird. Dieser Workshop wird zunächst in Form kurzer Referate Hintergrundwissen zu den Konzepten und Diensten der DARIAH-DE Föderationsarchitektur vermitteln. Wichtige Bereiche sind dabei nicht nur die Handhabung der Daten selbst sowie Fragen der Lizensierung von Forschungsdaten, sondern auch die Nachnutzbarkeit einmal erhobener oder gesammelter Daten für weitere Forschungsfragen oder zur Nutzung durch andere WissenschaftlerInnen. Ein wesentlicher Anteil des Workshops wird dann insbesondere in der Hands-On-Anwendung der Komponenten durch die TeilnehmerInnen selbst bestehen.}, language = {de}, booktitle = {{DHd} 2017 : {Digitale} {Nachhaltigkeit} : {Konferenzabstracts}}, author = {Gradl, Tobias and Aschauer, Anna and Dogunke, Swantje and Klaffki, Lisa and Schmunk, Stefan and Steyer, Timo}, year = {2017}, pages = {22--27}, file = {Gradl et al. - 2017 - Daten sammeln, modellieren und durchsuchen mit DAR.pdf:C\:\\Users\\carst\\Zotero\\storage\\C5E8KB8V\\Gradl et al. - 2017 - Daten sammeln, modellieren und durchsuchen mit DAR.pdf:application/pdf}, } @book{manning_introduction_2008, address = {Cambridge}, edition = {Reprinted}, title = {Introduction to information retrieval}, isbn = {978-0-521-86571-5}, language = {en}, publisher = {Cambridge Univ. Press}, author = {Manning, Christopher D. and Raghavan, Prabhakar and Schütze, Hinrich}, year = {2008}, file = {Table of Contents PDF:C\:\\Users\\carst\\Zotero\\storage\\TCNPN2FM\\Manning et al. - 2009 - Introduction to information retrieval.pdf:application/pdf}, } @article{marchionini_exploratory_2006, title = {Exploratory search: from finding to understanding}, volume = {49}, issn = {0001-0782}, shorttitle = {Exploratory search}, url = {https://doi.org/10.1145/1121949.1121979}, doi = {10.1145/1121949.1121979}, abstract = {Research tools critical for exploratory search success involve the creation of new interfaces that move the process beyond predictable fact retrieval.}, language = {en}, number = {4}, journal = {Communications of the ACM}, author = {Marchionini, Gary}, month = apr, year = {2006}, pages = {41--46}, } @incollection{neuroth_bibliothek_2017, address = {Stuttgart}, title = {Bibliothek, {Archiv}, {Museum}}, isbn = {978-3-476-05446-3}, url = {https://doi.org/10.1007/978-3-476-05446-3_15}, abstract = {Bibliotheken, Archive und Museen werden unter dem Sammelbegriff Gedächtnisinstitutionen gefasst, da alle drei Organisationstypen die Hauptaufgabe haben, Informationen und Wissen zu sammeln, zu bewahren und zur Verfügung zu stellen. Sie stellen wesentliche Informationseinrichtungen in Deutschland dar und werden überwiegend durch die öffentliche Hand finanziert. Die Gedächtnisinstitutionen sind ein wichtiger Eckpfeiler des kulturellen und wissenschaftlichen Erbes und bieten durch die Bereitstellung und Erforschung der dort gesammelten Informationen den Schlüssel für das Verständnis der Vergangenheit, Gegenwart und Zukunft. Daneben gibt es einige bedeutende, privat finanzierte Einrichtungen zum Beispiel von Firmen oder Privatleuten.}, language = {de}, booktitle = {Digital {Humanities}: {Eine} {Einführung}}, publisher = {J.B. Metzler}, author = {Neuroth, Heike}, editor = {Jannidis, Fotis and Kohle, Hubertus and Rehbein, Malte}, year = {2017}, doi = {10.1007/978-3-476-05446-3_15}, pages = {213--222}, } @article{marchionini_find_2007, title = {Find {What} {You} {Need}, {Understand} {What} {You} {Find}}, volume = {23}, issn = {1044-7318}, url = {https://doi.org/10.1080/10447310701702352}, doi = {10.1080/10447310701702352}, abstract = {This article presents a framework for research and development in user interfaces that support information seeking. The information-seeking process is described, and each of the subprocesses are discussed with an eye toward making user interfaces that closely couple support mechanisms. Recent results from studies related to term suggestions for queries, coupling search and examination, and seamless interaction between overviews and previews are used to illustrate highly interactive information-seeking services.}, number = {3}, journal = {International Journal of Human–Computer Interaction}, author = {Marchionini, Gary and White, Ryen}, month = dec, year = {2007}, pages = {205--237}, file = {Marchionini und White - 2007 - Find What You Need, Understand What You Find.pdf:C\:\\Users\\carst\\Zotero\\storage\\FH72CCTC\\Marchionini und White - 2007 - Find What You Need, Understand What You Find.pdf:application/pdf}, } @article{robertson_relevance_1976, title = {Relevance weighting of search terms}, volume = {27}, copyright = {Copyright © 1976 Wiley Periodicals, Inc., A Wiley Company}, issn = {1097-4571}, url = {https://asistdl.onlinelibrary.wiley.com/doi/abs/10.1002/asi.4630270302}, doi = {10.1002/asi.4630270302}, abstract = {This paper examines statistical techniques for exploiting relevance information to weight search terms. These techniques are presented as a natural extension of weighting methods using information about the distribution of index terms in documents in general. A series of relevance weighting functions is derived and is justified by theoretical considerations. In particular, it is shown that specific weighted search methods are implied by a general probabilistic theory of retrieval. Different applications of relevance weighting are illustrated by experimental results for test collections.}, language = {en}, number = {3}, journal = {Journal of the American Society for Information Science}, author = {Robertson, S. E. and Jones, K. Sparck}, year = {1976}, pages = {129--146}, } @inproceedings{robertson_okapi_1999, address = {Gaithersburg}, title = {Okapi at {TREC}-7: automatic ad hoc, filtering, {VCL} and interactive track}, booktitle = {The {Seventh} {Text} {REtrieval} {Conference} ({TREC}-7)}, publisher = {National Institute of Standards and Technology}, author = {Robertson, S.E. and Walker, S. and Beaulieu, M.}, year = {1999}, pages = {253--264}, } @article{tuytelaars_local_2008, title = {Local {Invariant} {Feature} {Detectors}: {A} {Survey}}, volume = {3}, issn = {1572-2740, 1572-2759}, shorttitle = {Local {Invariant} {Feature} {Detectors}}, url = {https://www.nowpublishers.com/article/Details/CGV-017}, doi = {10.1561/0600000017}, abstract = {In this survey, we give an overview of invariant interest point detectors, how they evolved over time, how they work, and what their respective strengths and weaknesses are. We begin with defining the properties of the ideal local feature detector. This is followed by an overview of the literature over the past four decades organized in different categories of feature extraction methods. We then provide a more detailed analysis of a selection of methods which had a particularly significant impact on the research field. We conclude with a summary and promising future research directions.}, language = {en}, number = {3}, journal = {Foundations and Trends in Computer Graphics and Vision}, author = {Tuytelaars, Tinne and Mikolajczyk, Krystian}, month = jun, year = {2008}, pages = {177--280}, file = {Full Text PDF:C\:\\Users\\carst\\Zotero\\storage\\G8BJR62Q\\Tuytelaars und Mikolajczyk - 2008 - Local Invariant Feature Detectors A Survey.pdf:application/pdf}, } @book{ferber_information_2003, address = {Heidelberg}, edition = {1. Aufl}, title = {Information {Retrieval}: {Suchmodelle} und {Data}-{Mining}-{Verfahren} für {Textsammlungen} und das {Web}}, isbn = {de}, language = {ger}, publisher = {dpunkt-Verl}, author = {Ferber, Reginald}, year = {2003}, } @article{raieli_introducing_2016, title = {Introducing {Multimedia} {Information} {Retrieval} to libraries}, volume = {7}, issn = {2038-1026}, url = {http://doi.org/10.4403/jlis.it-11530}, doi = {10.4403/jlis.it-11530}, number = {3}, journal = {JLIS}, author = {Raieli, Roberto}, year = {2016}, pages = {9--42}, } @article{rixen_zukunftsthema_2018, title = {Zukunftsthema: {Zum} {Umgang} mit {Forschungsdaten}}, volume = {18}, url = {https://www.forschung-und-lehre.de/recht/zukunftsthema-334/}, abstract = {Der Umgang mit Forschungsdaten ist von grundlegender Bedeutung für die Replizierbarkeit von Forschungsergebnissen. Über Rechtsgrundlagen und Probleme.}, language = {de}, number = {2}, journal = {Forschung \& Lehre}, author = {Rixen, Stephan}, editor = {{Deutscher Hochschulverband}}, year = {2018}, file = {Snapshot:C\:\\Users\\carst\\Zotero\\storage\\QU55Y56Y\\zukunftsthema-334.html:text/html}, } @incollection{schoch_aufbau_2017, address = {Stuttgart}, title = {Aufbau von {Datensammlungen}}, isbn = {978-3-476-05446-3}, url = {https://doi.org/10.1007/978-3-476-05446-3_16}, abstract = {Forschung in den digitalen Geisteswissenschaften ist in der Regel datenbasiert, insofern in der Regel eine digitale Repräsentation der Untersuchungsgegenstände vorliegt, die mit Hilfe des Computers bearbeitet, analysiert und/oder verbreitet werden kann. Zudem wird meist nicht nur ein einzelnes Beispiel, sondern eine Gruppe von Gegenständen untersucht, d. h. mit einer Datensammlung gearbeitet.}, language = {de}, booktitle = {Digital {Humanities}: {Eine} {Einführung}}, publisher = {J.B. Metzler}, author = {Schöch, Christof}, editor = {Jannidis, Fotis and Kohle, Hubertus and Rehbein, Malte}, year = {2017}, doi = {10.1007/978-3-476-05446-3_16}, pages = {223--233}, } @incollection{bullin_inhaltsbasierte_2020, address = {Wiesbaden}, series = {Episteme in {Bewegung}}, title = {Die inhaltsbasierte {Bildsuche} und {Bilderschließung}: {Ansätze} und {Problemfelder}}, isbn = {978-3-447-11460-8 3-447-11460-6}, language = {de}, number = {16}, booktitle = {Bilddaten in den {Digitalen} {Geisteswissenschaften}}, publisher = {Harrassowitz Verlag}, author = {Bullin, Martin and Henrich, Andreas}, editor = {Hastik, Canan and Hegel, Philipp}, year = {2020}, } @book{lewandowski_web_2005, address = {Frankfurt am Main}, series = {Reihe {Informationswissenschaft} der {DGI}}, title = {Web {Information} {Retrieval}: {Technologien} zur {Informationssuche} im {Internet}}, isbn = {978-3-925474-55-2 3-925474-55-2}, language = {de}, number = {Bd. 7}, publisher = {Deutsche Gesellschaft für Informationswissenschaft und Informationspraxis}, author = {Lewandowski, Dirk}, year = {2005}, } @article{zhai_study_2004, title = {A study of smoothing methods for language models applied to information retrieval}, volume = {22}, issn = {1046-8188}, url = {https://doi.org/10.1145/984321.984322}, doi = {10.1145/984321.984322}, abstract = {Language modeling approaches to information retrieval are attractive and promising because they connect the problem of retrieval with that of language model estimation, which has been studied extensively in other application areas such as speech recognition. The basic idea of these approaches is to estimate a language model for each document, and to then rank documents by the likelihood of the query according to the estimated language model. A central issue in language model estimation is smoothing, the problem of adjusting the maximum likelihood estimator to compensate for data sparseness. In this article, we study the problem of language model smoothing and its influence on retrieval performance. We examine the sensitivity of retrieval performance to the smoothing parameters and compare several popular smoothing methods on different test collections. Experimental results show that not only is the retrieval performance generally sensitive to the smoothing parameters, but also the sensitivity pattern is affected by the query type, with performance being more sensitive to smoothing for verbose queries than for keyword queries. Verbose queries also generally require more aggressive smoothing to achieve optimal performance. This suggests that smoothing plays two different role—to make the estimated document language model more accurate and to "explain" the noninformative words in the query. In order to decouple these two distinct roles of smoothing, we propose a two-stage smoothing strategy, which yields better sensitivity patterns and facilitates the setting of smoothing parameters automatically. We further propose methods for estimating the smoothing parameters automatically. Evaluation on five different databases and four types of queries indicates that the two-stage smoothing method with the proposed parameter estimation methods consistently gives retrieval performance that is close to—or better than—the best results achieved using a single smoothing method and exhaustive parameter search on the test data.}, number = {2}, journal = {ACM Transactions on Information Systems}, author = {Zhai, Chengxiang and Lafferty, John}, month = apr, year = {2004}, pages = {179--214}, } @inproceedings{gradl_extending_2016, address = {New York, NY, USA}, series = {{DocEng} '16}, title = {Extending {Data} {Models} by {Declaratively} {Specifying} {Contextual} {Knowledge}}, isbn = {978-1-4503-4438-8}, url = {https://doi.org/10.1145/2960811.2967147}, doi = {10.1145/2960811.2967147}, abstract = {The research data landscape of the arts and humanities is characterized by a high degree of heterogeneity. To improve interoperability, recent initiatives and research infrastructures are encouraging the use of standards and best practices. However, custom data models are often considered necessary to exactly reflect the requirements of a particular collection or research project. To address the needs of scholars in the arts and humanities for a composition of research data irrespective of the degree of structuredness and standardization, we propose a concept on the basis of formal languages, which facilitates declarative data modeling by respective domain experts. By identifying and defining grammatical patterns and deriving transformation functions, the structure of data is generated or extended in accordance with the particular context and needs of the domain.}, language = {en}, booktitle = {Proceedings of the 2016 {ACM} {Symposium} on {Document} {Engineering}}, publisher = {Association for Computing Machinery}, author = {Gradl, Tobias and Henrich, Andreas}, editor = {Sablatnig, Robert and Hassan, Tamir}, month = sep, year = {2016}, pages = {123--126}, } @inproceedings{ogilvie_combining_2003, address = {New York, NY, USA}, series = {{SIGIR} '03}, title = {Combining document representations for known-item search}, isbn = {978-1-58113-646-3}, url = {https://doi.org/10.1145/860435.860463}, doi = {10.1145/860435.860463}, abstract = {This paper investigates the pre-conditions for successful combination of document representations formed from structural markup for the task of known-item search. As this task is very similar to work in meta-search and data fusion, we adapt several hypotheses from those research areas and investigate them in this context. To investigate these hypotheses, we present a mixture-based language model and also examine many of the current meta-search algorithms. We find that compatible output from systems is important for successful combination of document representations. We also demonstrate that combining low performing document representations can improve performance, but not consistently. We find that the techniques best suited for this task are robust to the inclusion of poorly performing document representations. We also explore the role of variance of results across systems and its impact on the performance of fusion, with the surprising result that the correct documents have higher variance across document representations than highly ranking incorrect documents.}, booktitle = {Proceedings of the 26th annual international {ACM} {SIGIR} conference on {Research} and development in informaion retrieval}, publisher = {Association for Computing Machinery}, author = {Ogilvie, Paul and Callan, Jamie}, editor = {Clarke, Charles and Cormack, Gordon and Callan, Jamie and Hawking, David and Smeaton, Alan}, month = jun, year = {2003}, pages = {143--150}, file = {Volltext:C\:\\Users\\carst\\Zotero\\storage\\L6FL9HRA\\Ogilvie und Callan - 2003 - Combining document representations for known-item .pdf:application/pdf}, } @book{witten_managing_1999, address = {San Francisco, Calif.}, edition = {2. ed}, series = {The {Morgan} {Kaufmann} series in multimedia information and systems}, title = {Managing gigabytes : compressing and indexing documents and images}, isbn = {1-55860-570-3}, language = {en}, publisher = {Morgan Kaufmann Publishers}, author = {Witten, Ian H. and Moffat, Alistair and Bell, Timothy C.}, year = {1999}, } @article{brin_anatomy_1998, series = {Proceedings of the {Seventh} {International} {World} {Wide} {Web} {Conference}}, title = {The anatomy of a large-scale hypertextual {Web} search engine}, volume = {30}, issn = {0169-7552}, url = {http://www.sciencedirect.com/science/article/pii/S016975529800110X}, doi = {10.1016/S0169-7552(98)00110-X}, abstract = {In this paper, we present Google, a prototype of a large-scale search engine which makes heavy use of the structure present in hypertext. Google is designed to crawl and index the Web efficiently and produce much more satisfying search results than existing systems. The prototype with a full text and hyperlink database of at least 24 million pages is available at http://google.stanford.edu/ To engineer a search engine is a challenging task. Search engines index tens to hundreds of millions of Web pages involving a comparable number of distinct terms. They answer tens of millions of queries every day. Despite the importance of large-scale search engines on the Web, very little academic research has been done on them. Furthermore, due to rapid advance in technology and Web proliferation, creating a Web search engine today is very different from three years ago. This paper provides an in-depth description of our large-scale Web search engine — the first such detailed public description we know of to date. Apart from the problems of scaling traditional search techniques to data of this magnitude, there are new technical challenges involved with using the additional information present in hypertext to produce better search results. This paper addresses this question of how to build a practical large-scale system which can exploit the additional information present in hypertext. Also we look at the problem of how to effectively deal with uncontrolled hypertext collections where anyone can publish anything they want.}, language = {en}, number = {1}, journal = {Computer Networks and ISDN Systems}, author = {Brin, Sergey and Page, Lawrence}, month = apr, year = {1998}, pages = {107--117}, file = {Brin und Page - 1998 - The anatomy of a large-scale hypertextual Web sear.pdf:C\:\\Users\\carst\\Zotero\\storage\\LUXEM8W8\\Brin und Page - 1998 - The anatomy of a large-scale hypertextual Web sear.pdf:application/pdf}, } @incollection{ponceleon_multimedia_2011, address = {Harlow}, edition = {2. ed.}, title = {Multimedia {Information} {Retrieval}}, isbn = {978-0-321-41691-9}, language = {en}, booktitle = {Modern information retrieval : the concepts and technology behind search}, publisher = {Pearson Addison-Wesley}, author = {Ponceleon, Dulce B and Slaney, Malcolm}, year = {2011}, pages = {587--639}, } @book{croft_search_2010, address = {Boston}, title = {Search engines : information retrieval in practice}, isbn = {978-0-13-607224-9}, publisher = {Pearson}, author = {Croft, W. Bruce and Metzler, Donald and Strohman, Trevor}, year = {2010}, } @book{buttcher_information_2010, address = {Cambridge, Mass.}, title = {Information retrieval : implementing and evaluating search engines}, isbn = {978-0-262-02651-2}, publisher = {The MIT Press}, author = {Büttcher, Stefan and Clarke, Charles L. A. and Cormack, Gordon V.}, year = {2010}, } @incollection{gray_jim_2009, address = {Redmond}, title = {Jim {Gray} on {eScience}: {A} {Transformed} {Scientific} {Method}: {Based} on the transcript of a talk given by {Jim} {Gray} to the {NRC}-{CSTB} in {Mountain} {View}, {CA}, on {January} 11, 2007}, isbn = {978-0-9825442-0-4}, language = {en}, booktitle = {The fourth paradigm : {Data}-intensive scientific discovery}, publisher = {Microsoft Research}, author = {Gray, Jim}, editor = {Hey, Tony}, year = {2009}, pages = {xvii--xxxi}, file = {Gray - 2009 - Jim Gray on eScience A Transformed Scientific Met.pdf:C\:\\Users\\carst\\Zotero\\storage\\EJF6F3UX\\Gray - 2009 - Jim Gray on eScience A Transformed Scientific Met.pdf:application/pdf}, } @article{donaldson_user_2015, title = {User conceptions of trustworthiness for digital archival documents}, volume = {66}, copyright = {© 2015 ASIS\&T}, issn = {2330-1643}, url = {https://asistdl.onlinelibrary.wiley.com/doi/abs/10.1002/asi.23330}, doi = {10.1002/asi.23330}, abstract = {Trust is the most important characteristic of digital repositories designed to hold and deliver archival documents that have persistent value to stakeholders. In theoretical models of trust in information, the concept of trustworthiness is emerging as both fundamentally important and understudied, particularly in the domain of digital repositories. This article reports on a qualitative study designed to elicit from groups of end users components of trustworthiness and to assess their relative importance. The study draws on interview data from 3 focus groups with experienced users of the Washington State Digital Archives. Utilizing thematic analysis and micro-interlocutor analysis to examine a combination of interview transcripts and video recordings, the study provides a realistic picture of the strength and character of emergent themes that underpin the more general concept of trustworthiness. The study reinforces the centrality of trustworthiness at the individual document level, but calls into question the formulation of trustworthiness as a concept in Kelton, Fleischmann, and Wallace's (2008) Integrated Model of Trust in Information.}, language = {en}, number = {12}, journal = {Journal of the Association for Information Science and Technology}, author = {Donaldson, Devan Ray and Conway, Paul}, year = {2015}, pages = {2427--2444}, file = {Donaldson und Conway - 2015 - User conceptions of trustworthiness for digital ar.pdf:C\:\\Users\\carst\\Zotero\\storage\\JJJBDIH4\\Donaldson und Conway - 2015 - User conceptions of trustworthiness for digital ar.pdf:application/pdf}, } @article{yoon_role_2017, title = {Role of {Communication} in {Data} {Reuse}}, volume = {54}, copyright = {Copyright © 2017 by Association for Information Science and Technology}, issn = {2373-9231}, url = {https://asistdl.onlinelibrary.wiley.com/doi/abs/10.1002/pra2.2017.14505401050}, doi = {10.1002/pra2.2017.14505401050}, abstract = {In acknowledging the potentials of existing data, researchers' interests in sharing and reusing data have recently emerged. However, sharing and reusing data is not a simple one-step process for researchers. Because data reusers build their work on other researchers' findings, the process of data reuse involves various interactions and communications with other relevant parties. Exploring the nature of communications around data is thus important to fully understand data reuse practices and to support smoother processes of data reuse. This study investigates communications occurring around data during data reusers' experiences through qualitative interview studies involving this group. This study's results show that the communications with different stakeholders mainly support data reuse in three areas: searching, learning, and problem solving. The findings provide valuable insights into the domain of scholarly communication, data reuse, and data services.}, language = {en}, number = {1}, journal = {Proceedings of the Association for Information Science and Technology}, author = {Yoon, Ayoung}, year = {2017}, pages = {463--471}, file = {Yoon - 2017 - Role of communication in data reuse.pdf:C\:\\Users\\carst\\Zotero\\storage\\T7GC33N8\\Yoon - 2017 - Role of communication in data reuse.pdf:application/pdf}, } @article{wu_data_2019, title = {Data {Discovery} {Paradigms}: {User} {Requirements} and {Recommendations} for {Data} {Repositories}}, volume = {18}, issn = {1683-1470}, shorttitle = {Data {Discovery} {Paradigms}}, url = {http://datascience.codata.org/articles/10.5334/dsj-2019-003/}, doi = {10.5334/dsj-2019-003}, abstract = {As data repositories make more data openly available it becomes challenging for researchers to find what they need either from a repository or through web search engines. This study attempts to investigate data users’ requirements and the role that data repositories can play in supporting data discoverability by meeting those requirements. We collected 79 data discovery use cases (or data search scenarios), from which we derived nine functional requirements for data repositories through qualitative analysis. We then applied usability heuristic evaluation and expert review methods to identify best practices that data repositories can implement to meet each functional requirement. We propose the following ten recommendations for data repository operators to consider for improving data discoverability and user’s data search experience: 1. Provide a range of query interfaces to accommodate various data search behaviours. 2. Provide multiple access points to find data. 3. Make it easier for researchers to judge relevance, accessibility and reusability of a data collection from a search summary. 4. Make individual metadata records readable and analysable. 5. Enable sharing and downloading of bibliographic references. 6. Expose data usage statistics. 7. Strive for consistency with other repositories. 8. Identify and aggregate metadata records that describe the same data object. 9. Make metadata records easily indexed and searchable by major web search engines. 10. Follow API search standards and community adopted vocabularies for interoperability.}, language = {en}, number = {1}, journal = {Data Science Journal}, author = {Wu, Mingfang and Psomopoulos, Fotis and Khalsa, Siri Jodha and Waard, Anita de}, month = aug, year = {2019}, pages = {1--13}, file = {Wu et al. - 2019 - Data Discovery Paradigms User Requirements and Re.pdf:C\:\\Users\\carst\\Zotero\\storage\\4WYA769F\\Wu et al. - 2019 - Data Discovery Paradigms User Requirements and Re.pdf:application/pdf}, } @inproceedings{kern_are_2015, address = {Cham}, series = {Lecture {Notes} in {Computer} {Science}}, title = {Are {There} {Any} {Differences} in {Data} {Set} {Retrieval} {Compared} to {Well}-{Known} {Literature} {Retrieval}?}, isbn = {978-3-319-24592-8}, doi = {10.1007/978-3-319-24592-8_15}, abstract = {Digital libraries are nowadays expected to contain more than books and articles. All relevant sources of information for a scholar should be available, including research data. However, does literature retrieval work for data sets as well? In the context of a requirement analysis of a data catalogue for quantitative Social Science research data, we tried to find answers to this question. We conducted two user studies with a total of 53 participants and found similarities and important differences in the users’ needs when searching for data sets in comparison to those already known in literature search. In particular, quantity and quality of metadata are far more important in data set search than in literature search, where convenience is most important. In this paper, we present the methodology of these two user studies, their results and challenges for data set retrieval system that can be derived thereof. One of our key findings is that for empirical social scientists, the choice of research data is more relevant than the choice of literature; therefore they are willing to put more effort into the retrieval process. Due to our choice of use case, our initial findings are limited to the field of Social Sciences. However, because of the similar characteristics for data sets also in other research areas, such as Economics, we assume that our results are applicable for them as well.}, language = {en}, booktitle = {Research and {Advanced} {Technology} for {Digital} {Libraries}}, publisher = {Springer International Publishing}, author = {Kern, Dagmar and Mathiak, Brigitte}, editor = {Kapidakis, Sarantos and Mazurek, Cezary and Werla, Marcin}, year = {2015}, pages = {197--208}, file = {Kern und Mathiak - 2015 - Are There Any Differences in Data Set Retrieval Co.pdf:C\:\\Users\\carst\\Zotero\\storage\\S6PYZUH7\\Kern und Mathiak - 2015 - Are There Any Differences in Data Set Retrieval Co.pdf:application/pdf}, } @article{thanos_research_2017, title = {Research {Data} {Reusability}: {Conceptual} {Foundations}, {Barriers} and {Enabling} {Technologies}}, volume = {5}, copyright = {http://creativecommons.org/licenses/by/3.0/}, shorttitle = {Research {Data} {Reusability}}, url = {https://www.mdpi.com/2304-6775/5/1/2}, doi = {10.3390/publications5010002}, abstract = {High-throughput scientific instruments are generating massive amounts of data. Today, one of the main challenges faced by researchers is to make the best use of the world’s growing wealth of data. Data (re)usability is becoming a distinct characteristic of modern scientific practice. By data (re)usability, we mean the ease of using data for legitimate scientific research by one or more communities of research (consumer communities) that is produced by other communities of research (producer communities). Data (re)usability allows the reanalysis of evidence, reproduction and verification of results, minimizing duplication of effort, and building on the work of others. It has four main dimensions: policy, legal, economic and technological. The paper addresses the technological dimension of data reusability. The conceptual foundations of data reuse as well as the barriers that hamper data reuse are presented and discussed. The data publication process is proposed as a bridge between the data author and user and the relevant technologies enabling this process are presented.}, language = {en}, number = {1}, journal = {Publications}, author = {Thanos, Costantino}, year = {2017}, pages = {2}, file = {Thanos - 2017 - Research Data Reusability Conceptual Foundations,.pdf:C\:\\Users\\carst\\Zotero\\storage\\KXWCV9KP\\Thanos - 2017 - Research Data Reusability Conceptual Foundations,.pdf:application/pdf}, } @article{shen_research_2015, title = {Research {Data} {Sharing} and {Reuse} {Practices} of {Academic} {Faculty} {Researchers}: {A} {Study} of the {Virginia} {Tech} {Data} {Landscape}}, volume = {10}, issn = {1746-8256}, shorttitle = {Research {Data} {Sharing} and {Reuse} {Practices} of {Academic} {Faculty} {Researchers}}, url = {http://www.ijdc.net/article/view/10.2.157}, doi = {10.2218/ijdc.v10i2.359}, abstract = {This paper presents the results of a research data assessment and landscape study in the institutional context of Virginia Tech to determine the data sharing and reuse practices of academic faculty researchers. Through mapping the level of user engagement in “openness of data,” “openness of methodologies and workflows,” and “reuse of existing data,” this study contributes to the current knowledge in data sharing and open access, and supports the strategic development of institutional data stewardship. Asking faculty researchers to self-reflect sharing and reuse from both data producers’ and data users’ perspectives, the study reveals a significant gap between the rather limited sharing activities and the highly perceived reuse or repurpose values regarding data, indicating that potential values of data for future research are lost right after the original work is done. The localized and sporadic data management and documentation practices of researchers also contribute to the obstacles they themselves often encounter when reusing existing data.}, language = {en}, number = {2}, journal = {International Journal of Digital Curation}, author = {Shen, Yi}, month = jun, year = {2015}, pages = {157--175}, file = {Shen - 2015 - Research Data Sharing and Reuse Practices of Acade.pdf:C\:\\Users\\carst\\Zotero\\storage\\CGB2PV7L\\Shen - 2015 - Research Data Sharing and Reuse Practices of Acade.pdf:application/pdf}, } @article{gregory_eleven_2018, title = {Eleven quick tips for finding research data}, volume = {14}, issn = {1553-7358}, doi = {10.1371/journal.pcbi.1006038}, language = {en}, number = {4}, journal = {PLOS Computational Biology}, author = {Gregory, Kathleen and Khalsa, Siri Jodha and Michener, William K. and Psomopoulos, Fotis E. and Waard, Anita de and Wu, Mingfang}, month = apr, year = {2018}, pages = {e1006038}, file = {Gregory et al. - 2018 - Eleven quick tips for finding research data.pdf:C\:\\Users\\carst\\Zotero\\storage\\BPG89PGS\\Gregory et al. - 2018 - Eleven quick tips for finding research data.pdf:application/pdf}, } @book{kitchin_data_2014, address = {Thousand Oaks}, title = {The data revolution : big data, open data, data infrastructures and their consequences}, isbn = {978-1-4462-8747-7 978-1-4462-8748-4}, publisher = {SAGE}, author = {Kitchin, Rob}, year = {2014}, } @article{tenopir_changes_2015, title = {Changes in {Data} {Sharing} and {Data} {Reuse} {Practices} and {Perceptions} among {Scientists} {Worldwide}}, volume = {10}, issn = {1932-6203}, url = {https://journals.plos.org/plosone/article?id=10.1371/journal.pone.0134826}, doi = {10.1371/journal.pone.0134826}, abstract = {The incorporation of data sharing into the research lifecycle is an important part of modern scholarly debate. In this study, the DataONE Usability and Assessment working group addresses two primary goals: To examine the current state of data sharing and reuse perceptions and practices among research scientists as they compare to the 2009/2010 baseline study, and to examine differences in practices and perceptions across age groups, geographic regions, and subject disciplines. We distributed surveys to a multinational sample of scientific researchers at two different time periods (October 2009 to July 2010 and October 2013 to March 2014) to observe current states of data sharing and to see what, if any, changes have occurred in the past 3–4 years. We also looked at differences across age, geographic, and discipline-based groups as they currently exist in the 2013/2014 survey. Results point to increased acceptance of and willingness to engage in data sharing, as well as an increase in actual data sharing behaviors. However, there is also increased perceived risk associated with data sharing, and specific barriers to data sharing persist. There are also differences across age groups, with younger respondents feeling more favorably toward data sharing and reuse, yet making less of their data available than older respondents. Geographic differences exist as well, which can in part be understood in terms of collectivist and individualist cultural differences. An examination of subject disciplines shows that the constraints and enablers of data sharing and reuse manifest differently across disciplines. Implications of these findings include the continued need to build infrastructure that promotes data sharing while recognizing the needs of different research communities. Moving into the future, organizations such as DataONE will continue to assess, monitor, educate, and provide the infrastructure necessary to support such complex grand science challenges.}, language = {en}, number = {8}, journal = {PLOS ONE}, author = {Tenopir, Carol and Dalton, Elizabeth D. and Allard, Suzie and Frame, Mike and Pjesivac, Ivanka and Birch, Ben and Pollock, Danielle and Dorsett, Kristina}, month = aug, year = {2015}, pages = {e0134826}, file = {Tenopir et al. - 2015 - Changes in Data Sharing and Data Reuse Practices a.PDF:C\:\\Users\\carst\\Zotero\\storage\\CIQJMLLL\\Tenopir et al. - 2015 - Changes in Data Sharing and Data Reuse Practices a.PDF:application/pdf}, } @book{bauer_data-warehouse-systeme_2013, address = {Heidelberg}, edition = {4., überarb. und erw. Aufl}, title = {Data-{Warehouse}-{Systeme}: {Architektur}, {Entwicklung}, {Anwendung}}, isbn = {978-3-89864-785-4 3-89864-785-4}, language = {de}, publisher = {dpunkt-Verl}, editor = {Bauer, Andreas and Günzel, Holger}, year = {2013}, } @book{kommission_zukunft_der_informationsinfrastruktur_gesamtkonzept_2011, title = {Gesamtkonzept für die {Informationsinfrastruktur} in {Deutschland}: {Empfehlungen} der {Kommission} {Zukunft} der {Informationsinfrastruktur} im {Auftrag} der {Gemeinsamen} {Wissenschaftskonferenz} des {Bundes} und der {Länder}}, url = {https://www.hof.uni-halle.de/web/dateien/KII_Gesamtkonzept_2011.pdf}, language = {de}, editor = {{Kommission Zukunft der Informationsinfrastruktur}}, year = {2011}, file = {Kommission Zukunft der Informationsinfrastruktur - 2011 - Gesamtkonzept für die Informationsinfrastruktur in.pdf:C\:\\Users\\carst\\Zotero\\storage\\DBDABIPW\\Kommission Zukunft der Informationsinfrastruktur - 2011 - Gesamtkonzept für die Informationsinfrastruktur in.pdf:application/pdf}, } @article{atici_other_2013, title = {Other {People}’s {Data}: {A} {Demonstration} of the {Imperative} of {Publishing} {Primary} {Data}}, volume = {20}, issn = {1573-7764}, shorttitle = {Other {People}’s {Data}}, url = {https://doi.org/10.1007/s10816-012-9132-9}, doi = {10.1007/s10816-012-9132-9}, abstract = {This study explores issues in using data generated by other analysts. Three researchers independently analyzed an orphaned, decades-old zooarchaeological dataset and then compared their analytical approaches and results. Although they took a similar initial approach to determine the dataset’s suitability for analysis, the three researchers generated markedly different interpretive conclusions. In examining how researchers use legacy data, this paper highlights interpretive issues, data integrity concerns, and data documentation needs. In order to meet these needs, we propose greater professional recognition for data dissemination, favoring models of “data publication” over “data sharing” or “data archiving.”}, language = {en}, number = {4}, journal = {Journal of Archaeological Method and Theory}, author = {Atici, Levent and Kansa, Sarah Whitcher and Lev-Tov, Justin and Kansa, Eric C.}, month = dec, year = {2013}, pages = {663--681}, file = {Atici et al. - 2013 - Other People’s Data A Demonstration of the Impera.pdf:C\:\\Users\\carst\\Zotero\\storage\\EBFGNHJF\\Atici et al. - 2013 - Other People’s Data A Demonstration of the Impera.pdf:application/pdf}, } @article{chapman_dataset_2020, title = {Dataset search: a survey}, volume = {29}, issn = {0949-877X}, shorttitle = {Dataset search}, url = {https://doi.org/10.1007/s00778-019-00564-x}, doi = {10.1007/s00778-019-00564-x}, abstract = {Generating value from data requires the ability to find, access and make sense of datasets. There are many efforts underway to encourage data sharing and reuse, from scientific publishers asking authors to submit data alongside manuscripts to data marketplaces, open data portals and data communities. Google recently beta-released a search service for datasets, which allows users to discover data stored in various online repositories via keyword queries. These developments foreshadow an emerging research field around dataset search or retrieval that broadly encompasses frameworks, methods and tools that help match a user data need against a collection of datasets. Here, we survey the state of the art of research and commercial systems and discuss what makes dataset search a field in its own right, with unique challenges and open questions. We look at approaches and implementations from related areas dataset search is drawing upon, including information retrieval, databases, entity-centric and tabular search in order to identify possible paths to tackle these questions as well as immediate next steps that will take the field forward.}, language = {en}, number = {1}, journal = {The VLDB Journal}, author = {Chapman, Adriane and Simperl, Elena and Koesten, Laura and Konstantinidis, George and Ibáñez, Luis-Daniel and Kacprzak, Emilia and Groth, Paul}, month = jan, year = {2020}, pages = {251--272}, file = {Chapman et al. - 2020 - Dataset search a survey.pdf:C\:\\Users\\carst\\Zotero\\storage\\G5GKL69D\\Chapman et al. - 2020 - Dataset search a survey.pdf:application/pdf}, } @book{chapman_guide_2008, address = {Copenhagen}, title = {Guide to best practices for generalising sensitive species occurence data: version 1.0}, url = {https://www.gbif.org/document/80512}, language = {en}, publisher = {Global Biodiversity Information Facility}, author = {Chapman, Arthur D. and Grafton, Oliver}, year = {2008}, file = {Chapman und Grafton - 2008 - Guide to best practices for generalising sensitive.pdf:C\:\\Users\\carst\\Zotero\\storage\\YNBIQA5A\\Chapman und Grafton - 2008 - Guide to best practices for generalising sensitive.pdf:application/pdf}, } @book{noauthor_frequently_2019, title = {Frequently {Asked} {Questions}: {Can} {I} combine material under different {Creative} {Commons} licenses in my work?}, url = {https://creativecommons.org/faq/#can-i-combine-material-under-different-creative-commons-licenses-in-my-work}, year = {2019}, note = {Publication Title: Creative Commons}, file = {Frequently Asked Questions - Creative Commons:C\:\\Users\\carst\\Zotero\\storage\\QUT8EWU4\\faq.html:text/html}, } @book{noauthor_empfehlungen_2007, title = {Empfehlungen für {Forschungsdaten}, {Tools} und {Metadaten} in der {DARIAH}-{DE} {Infrastruktur}}, url = {https://wiki.de.dariah.eu/pages/viewpage.action?pageId=38080370}, language = {de}, year = {2007}, note = {Publication Title: DARIAH-DE. 2017}, file = {Empfehlungen für Forschungsdaten, Tools und Metadaten in der DARIAH-DE Infrastruktur - DARIAH-DE public - DARIAH Wiki:C\:\\Users\\carst\\Zotero\\storage\\KG6Q392E\\viewpage.html:text/html}, } @book{ershova_software_2018, title = {Software updates: the “unknown unknown” of the replication crisis}, shorttitle = {Software updates}, url = {https://blogs.lse.ac.uk/impactofsocialsciences/2018/06/07/software-updates-the-unknown-unknown-of-the-replication-crisis/}, abstract = {The replication crisis is largely concerned with known problems, such as the lack of replication standards, non-availability of data, or p-hacking. One hitherto unknown problem is the potential for…}, language = {en}, author = {Ershova, Anastasia and Schneider, Gerald}, month = jun, year = {2018}, note = {Publication Title: Impact of Social Sciences}, file = {Snapshot:C\:\\Users\\carst\\Zotero\\storage\\2AL9RA4T\\software-updates-the-unknown-unknown-of-the-replication-crisis.html:text/html}, } @incollection{faniel_practices_2017, address = {Chicago}, series = {Curating {Research} {Data}}, title = {Practices {Do} {Not} {Make} {Perfect}: {Disciplinary} {Data} {Sharing} and {Reuse} {Practices} and {Their} {Implications} for {Repository} {Data} {Curation}}, volume = {Practical Strategies for Your Digital Repository}, isbn = {978-0-8389-8858-9}, abstract = {As sharing and reusing research data have become commonplace, library, museum, archive, and data repository staff have had to evolve and meet new needs. It is challenging, given the varied data sharing and reuse practices and traditions at play within and across the different research communities. In this book chapter, we discuss the differences among the social science, archaeological, and zoological communities. Based on interviews and observations with 105 researchers reusing data within these disciplines, we also report 1) how they develop trust in the data they reuse and 2) what sources they engage with to gather contextual information about the data.}, language = {en}, number = {1}, booktitle = {Curating {Research} {Data} {Volume} 1: {Practical} {Strategies} for {Your} {Digital} {Repository}}, publisher = {Association of College and Research Libraries Press}, author = {Faniel, Ixchel M. and Yakel, Elizabeth}, editor = {Johnston, Lisa R}, year = {2017}, pages = {103--126}, file = {Faniel und Yakel - 2017 - Practices Do Not Make Perfect Disciplinary Data S.pdf:C\:\\Users\\carst\\Zotero\\storage\\Q4R7WMXE\\Faniel und Yakel - 2017 - Practices Do Not Make Perfect Disciplinary Data S.pdf:application/pdf}, } @book{noauthor_cc_2019, title = {{CC} {Wiki}: {Data}}, url = {https://wiki.creativecommons.org/wiki/data}, language = {en}, year = {2019}, note = {Publication Title: Creative Commons}, file = {Data - Creative Commons:C\:\\Users\\carst\\Zotero\\storage\\NU8RB7SP\\data.html:text/html}, } @book{deutsche_initiative_fur_netzwerkinformation_thesen_2018, address = {Göttingen}, title = {Thesen zur {Informations}- und {Kommunikationsinfrastruktur} der {Zukunft}}, copyright = {(CC BY-NC-ND 4.0) Attribution 4.0 International}, url = {https://edoc.hu-berlin.de/handle/18452/19876}, language = {de}, editor = {{Deutsche Initiative für Netzwerkinformation}}, month = apr, year = {2018}, doi = {10.18452/19126}, file = {V - 2018 - Thesen zur Informations- und Kommunikationsinfrast.pdf:C\:\\Users\\carst\\Zotero\\storage\\9857JLSG\\V - 2018 - Thesen zur Informations- und Kommunikationsinfrast.pdf:application/pdf}, } @book{noauthor_open_nodate-5, title = {Open {Data} {Handbook}: {File} {Formats}}, url = {http://opendatahandbook.org/guide/en/appendices/file-formats/}, file = {File Formats:C\:\\Users\\carst\\Zotero\\storage\\VKTTPQQ7\\file-formats.html:text/html}, } @article{erdfelder_zur_2018, title = {Zur {Methodologie} von {Replikationsstudien}}, volume = {69}, issn = {0033-3042}, url = {https://econtent.hogrefe.com/doi/10.1026/0033-3042/a000387}, doi = {10.1026/0033-3042/a000387}, abstract = {Zusammenfassung. Replikationsstudien sind in den empirischen Wissenschaften mit unterschiedlichen Zielen verbunden, abhängig davon, ob wir uns im Kontext der Theorieentwicklung oder im Kontext der Theorieüberprüfung bewegen (Entdeckungs- vs. Begründungszusammenhang sensu Reichenbach, 1938). Konzeptuelle Replikationsstudien zielen auf Generalisierung ab und können im Entdeckungszusammenhang nützlich sein. Direkte Replikationsstudien zielen demgegenüber auf den Nachweis der Replizierbarkeit eines bestimmten Forschungsergebnisses unter unabhängigen Bedingungen ab und sind im Begründungszusammenhang unverzichtbar. Ohne die Annahme der direkten Replizierbarkeit wird man sich kaum auf allgemein akzeptierte empirische Tatbestände einigen können, die eine notwendige Voraussetzung für Theorieüberprüfungen in den empirischen Wissenschaften sind. Vor diesem Hintergrund werden Standards für Replikationsstudien vorgeschlagen und begründet. Eine Besonderheit in der Psychologie besteht darin, dass das Replikandum in aller Regel eine statistische Hypothese ist, über die lediglich probabilistisch entschieden werden kann. Dies wirft Folgeprobleme in Bezug auf die Formulierung der Replizierbarkeitshypothese, die Kontrolle statistischer Fehlerwahrscheinlichkeiten bei der Entscheidung über die Replizierbarkeitshypothese, die Bestimmung der zu entdeckenden Effektgröße bei Verzerrung vorliegender Ergebnisse durch Publication Bias, die Festlegung des Stichprobenumfangs und die korrekte Interpretation der Replikationsquote auf, für die Lösungsvorschläge unterbreitet und diskutiert werden.}, number = {1}, journal = {Psychologische Rundschau}, author = {Erdfelder, Edgar and Ulrich, Rolf}, month = jan, year = {2018}, pages = {3--21}, } @article{fecher_uber_2015, title = {Über die {Grenzen} der {Offenheit} in der {Wissenschaft} – {Anspruch} und {Wirklichkeit} bei der {Bereitstellung} und {Nachnutzung} von {Forschungsdaten}}, volume = {66}, issn = {1619-4292, 1434-4653}, doi = {10.1515/iwp-2015-0026}, abstract = {Offene Forschungsdaten tragen zum wissenschaftlichen Fortschritt bei. Sie erlauben es, alte Daten für neue Fragestellungen zu nutzen und Ergebnisse zu überprüfen. Es ist kaum verwunderlich, dass politische Entscheidungsträger, Forschungsförderer, Fachzeitschriften und Forschende selbst zunehmend die Bereitstellung und Nachnutzung von Daten fordern. Allerdings wird dies von Forschenden selbst kaum praktiziert. Wir versuchen die mangelnde Bereitschaft, Forschungsdaten bereitzustellen und nachzunutzen einzuordnen und kommen zu dem Schluss, dass es für Forschende kaum Anreize gibt, Daten zu teilen. Die Nachnutzung scheitert zudem häufig an mangelhafter Datendokumentation.}, language = {de}, number = {2-3}, journal = {Information - Wissenschaft \& Praxis}, author = {Fecher, Benedikt and Puschmann, Cornelius}, month = apr, year = {2015}, pages = {146--150}, file = {Fecher und Puschmann - 2015 - Über die Grenzen der Offenheit in der Wissenschaft.pdf:C\:\\Users\\carst\\Zotero\\storage\\RQMVLI3T\\Fecher und Puschmann - 2015 - Über die Grenzen der Offenheit in der Wissenschaft.pdf:application/pdf}, } @article{kelton_trust_2008, title = {Trust in digital information}, volume = {59}, copyright = {Copyright © 2007 Wiley Periodicals, Inc., A Wiley Company}, issn = {1532-2890}, url = {https://onlinelibrary.wiley.com/doi/abs/10.1002/asi.20722}, doi = {10.1002/asi.20722}, abstract = {Trust in information is developing into a vitally important topic as the Internet becomes increasingly ubiquitous within society. Although many discussions of trust in this environment focus on issues like security, technical reliability, or e-commerce, few address the problem of trust in the information obtained from the Internet. The authors assert that there is a strong need for theoretical and empirical research on trust within the field of information science. As an initial step, the present study develops a model of trust in digital information by integrating the research on trust from the behavioral and social sciences with the research on information quality and human– computer interaction. The model positions trust as a key mediating variable between information quality and information usage, with important consequences for both the producers and consumers of digital information. The authors close by outlining important directions for future research on trust in information science and technology.}, language = {en}, number = {3}, journal = {Journal of the American Society for Information Science and Technology}, author = {Kelton, Kari and Fleischmann, Kenneth R. and Wallace, William A.}, year = {2008}, pages = {363--374}, } @book{fedkenhauer_datenaustausch_2017, address = {Düsseldorf}, title = {Datenaustausch als wesentlicher {Bestandteil} der {Digitalisierung}}, url = {https://www.pwc.de/de/digitale-transformation/studie-datenaustausch-digitalisierung.pdf}, language = {de}, author = {Fedkenhauer, Thomas and Fritzsche-Sterr, Yvonne and Nagel, Lars and Pauer, Angelika and Resetko, Aleksei}, editor = {{PricewaterhouseCoopers GmbH}}, year = {2017}, file = {Fedkenhauer et al. - 2017 - Datenaustausch als wesentlicher Bestandteil der Di.pdf:C\:\\Users\\carst\\Zotero\\storage\\UJJ4SZZK\\Fedkenhauer et al. - 2017 - Datenaustausch als wesentlicher Bestandteil der Di.pdf:application/pdf}, } @article{gregory_understanding_2019, title = {Understanding data search as a socio-technical practice:}, volume = {46}, copyright = {CC BY}, shorttitle = {Understanding data search as a socio-technical practice}, url = {https://journals.sagepub.com/doi/10.1177/0165551519837182}, doi = {10.1177/0165551519837182}, abstract = {Open research data are heralded as having the potential to increase effectiveness, productivity and reproducibility in science, but little is known about the actual practices involved in data search. The socio-technical problem of locating data for reuse is often reduced to the technological dimension of designing data search systems. We combine a bibliometric study of the current academic discourse around data search with interviews with data seekers. In this article, we explore how adopting a contextual, socio-technical perspective can help to understand user practices and behaviour and ultimately help to improve the design of data discovery systems.}, language = {en}, number = {4}, journal = {Journal of Information Science}, author = {Gregory, Kathleen M. and Cousijn, Helena and Groth, Paul and Scharnhorst, Andrea and Wyatt, Sally}, month = apr, year = {2019}, pages = {459--475}, file = {Gregory et al. - 2019 - Understanding data search as a socio-technical pra.pdf:C\:\\Users\\carst\\Zotero\\storage\\DEA4NQZB\\Gregory et al. - 2019 - Understanding data search as a socio-technical pra.pdf:application/pdf}, } @article{gregory_searching_2019, title = {Searching {Data}: {A} {Review} of {Observational} {Data} {Retrieval} {Practices} in {Selected} {Disciplines}}, volume = {70}, issn = {2330-1643}, shorttitle = {Searching {Data}}, url = {https://asistdl.onlinelibrary.wiley.com/doi/abs/10.1002/asi.24165}, doi = {10.1002/asi.24165}, abstract = {A cross-disciplinary examination of the user behaviors involved in seeking and evaluating data is surprisingly absent from the research data discussion. This review explores the data retrieval literature to identify commonalities in how users search for and evaluate observational research data in selected disciplines. Two analytical frameworks, rooted in information retrieval and science and technology studies, are used to identify key similarities in practices as a first step toward developing a model describing data retrieval.}, language = {en}, number = {5}, journal = {Journal of the Association for Information Science and Technology}, author = {Gregory, Kathleen and Groth, Paul and Cousijn, Helena and Scharnhorst, Andrea and Wyatt, Sally}, year = {2019}, pages = {419--432}, file = {Gregory et al. - 2019 - Searching Data A Review of Observational Data Ret.pdf:C\:\\Users\\carst\\Zotero\\storage\\YW42PQPE\\Gregory et al. - 2019 - Searching Data A Review of Observational Data Ret.pdf:application/pdf}, } @book{hoyle_qualitative_2012, title = {A {Qualitative} {Data} {Model} for {DDI}}, url = {https://ddialliance.org/sites/default/files/AQualitativeDataModelForDDI.pdf}, abstract = {The Qualitative Data Model Working Group was established in January 2010 with the charge “To develop a robust XML-based schema for qualitative data exchange (compliant with DDI) and encourage tools development based upon these needs.” This report describes the preliminary model developed by that group via online meetings, and working meetings in Gothenburg (2011) and Bergen (2012). This model, described in UML, was developed to cover three main scenarios: 1.Qualitative data collections needing metadata at the object level only 2.Qualitative data collections where segments of objects need to be delineated and described and where segments of different physical representations of the same logical objects possibly need to be linked3.Qualitative data collections as in the second case where related quantitative data have been generated through techniques such as text mining.}, language = {en}, author = {Hoyle, Larry and {DDI Qualitative Data Working Group}}, year = {2012}, file = {Hoyle und DDI Qualitative Data Working Group - .pdf:C\:\\Users\\carst\\Zotero\\storage\\NSNPH76E\\Hoyle und DDI Qualitative Data Working Group - .pdf:application/pdf}, } @article{kim_experimenting_2018, title = {Experimenting with reproducibility: a case study of robustness in bioinformatics}, volume = {7}, shorttitle = {Experimenting with reproducibility}, url = {https://academic.oup.com/gigascience/article/7/7/giy077/5046609}, doi = {10.1093/gigascience/giy077}, abstract = {Reproducibility has been shown to be limited in many scientific fields. This question is a fundamental tenet of scientific activity, but the related issues of reusability of scientific data are poorly documented. Here, we present a case study of our difficulties in reproducing a published bioinformatics method even though code and data were available. First, we tried to re-run the analysis with the code and data provided by the authors. Second, we reimplemented the whole method in a Python package to avoid dependency on a MATLAB license and ease the execution of the code on a high-performance computing cluster. Third, we assessed reusability of our reimplementation and the quality of our documentation, testing how easy it would be to start from our implementation to reproduce the results. In a second section, we propose solutions from this case study and other observations to improve reproducibility and research efficiency at the individual and collective levels. While finalizing our code, we created case-specific documentation and tutorials for the associated Python package StratiPy. Readers are invited to experiment with our reproducibility case study by generating the two confusion matrices (see more in section “Robustness: from MATLAB to Python, language and organization"). Here, we propose two options: a step-by-step process to follow in a Jupyter/IPython notebook or a Docker container ready to be built and run.}, language = {en}, number = {7}, journal = {GigaScience}, author = {Kim, Yang-Min and Poline, Jean-Baptiste and Dumas, Guillaume}, year = {2018}, pages = {1--8}, file = {Full Text PDF:C\:\\Users\\carst\\Zotero\\storage\\RR3GGZ74\\Kim et al. - 2018 - Experimenting with reproducibility a case study o.pdf:application/pdf}, } @book{noauthor_open_nodate-6, title = {Open {Data} {Training} {Primers}: {Primer} 5.3: {License} {Stacking}}, url = {https://mozillascience.github.io/open-data-primers/5.3-license-stacking.html}, language = {en}, note = {Publication Title: Mozilla Science Labs}, file = {License Stacking | Welcome to Mozilla Science Lab's Open Data Primers!:C\:\\Users\\carst\\Zotero\\storage\\WHA5VDPZ\\5.3-license-stacking.html:text/html}, } @article{mannheimer_discovery_2016, title = {Discovery and {Reuse} of {Open} {Datasets}: {An} {Exploratory} {Study}}, volume = {5}, issn = {2161-3974}, shorttitle = {Discovery and {Reuse} of {Open} {Datasets}}, url = {https://escholarship.umassmed.edu/jeslib/vol5/iss1/5}, doi = {10.7191/jeslib.2016.1091}, abstract = {Objective: This article analyzes twenty cited or downloaded datasets and the repositories that house them, in order to produce insights that can be used by academic libraries to encourage discovery and reuse of research data in institutional repositories. Methods: Using Thomson Reuters’ Data Citation Index and repository download statistics, we identified twenty cited/downloaded datasets. We documented the characteristics of the cited/downloaded datasets and their corresponding repositories in a self-designed rubric. The rubric includes six major categories: basic information; funding agency and journal information; linking and sharing; factors to encourage reuse; repository characteristics; and data description. Results: Our small-scale study suggests that cited/downloaded datasets generally comply with basic recommendations for facilitating reuse: data are documented well; formatted for use with a variety of software; and shared in established, open access repositories. Three significant factors also appear to contribute to dataset discovery: publishing in discipline-specific repositories; indexing in more than one location on the web; and using persistent identifiers. The cited/downloaded datasets in our analysis came from a few specific disciplines, and tended to be funded by agencies with data publication mandates. Conclusions: The results of this exploratory research provide insights that can inform academic librarians as they work to encourage discovery and reuse of institutional datasets. Our analysis also suggests areas in which academic librarians can target open data advocacy in their communities in order to begin to build open data success stories that will fuel future advocacy efforts}, number = {1}, journal = {Journal of eScience Librarianship}, author = {Mannheimer, Sara and Sterman, Leila and Borda, Susan}, month = jul, year = {2016}, pages = {e1091}, file = {Mannheimer et al. - 2016 - Discovery and Reuse of Open Datasets An Explorato.pdf:C\:\\Users\\carst\\Zotero\\storage\\BRRI6DXI\\Mannheimer et al. - 2016 - Discovery and Reuse of Open Datasets An Explorato.pdf:application/pdf}, } @book{national_academy_of_sciences_engineering_and_medicine_reproducibility_2019, title = {Reproducibility and {Replicability} in {Science}}, isbn = {978-0-309-48616-3}, url = {https://www.nap.edu/catalog/25303/reproducibility-and-replicability-in-science}, abstract = {One of the pathways by which the scientific community confirms the validity of a new scientific discovery is by repeating the research that produced it. When a scientific effort fails to independently confirm the computations or results of a previous study, some fear that it may be a symptom of a lack of rigor in science, while others argue that such an observed inconsistency can be an important precursor to new discovery. Concerns about reproducibility and replicability have been expressed in both scientific and popular media. As these concerns came to light, Congress requested that the National Academies of Sciences, Engineering, and Medicine conduct a study to assess the extent of issues related to reproducibility and replicability and to offer recommendations for improving rigor and transparency in scientific research. Reproducibility and Replicability in Science defines reproducibility and replicability and examines the factors that may lead to non-reproducibility and non-replicability in research. Unlike the typical expectation of reproducibility between two computations, expectations about replicability are more nuanced, and in some cases a lack of replicability can aid the process of scientific discovery. This report provides recommendations to researchers, academic institutions, journals, and funders on steps they can take to improve reproducibility and replicability in science.}, language = {en}, author = {{National Academy of Sciences, Engineering, and Medicine}}, month = may, year = {2019}, doi = {10.17226/25303}, file = {National Academy of Sciences - 2019 - Reproducibility and Replicability in Science.pdf:C\:\\Users\\carst\\Zotero\\storage\\JPT4URDN\\National Academy of Sciences - 2019 - Reproducibility and Replicability in Science.pdf:application/pdf}, } @article{pasquetto_uses_2019, title = {Uses and {Reuses} of {Scientific} {Data}: {The} {Data} {Creators}’ {Advantage}}, volume = {1}, shorttitle = {Uses and {Reuses} of {Scientific} {Data}}, url = {https://hdsr.mitpress.mit.edu/pub/jduhd7og/release/7}, doi = {10.1162/99608f92.fc14bf2d}, abstract = {Open access to data, as a core principle of open science, is predicated on assumptions that scientific data can be reused by other researchers. We test those assumptions by asking where scientists find reusable data, how they reuse those data, and how they interpret data they did not collect themselves. By conducting a qualitative meta-analysis of evidence on two long-term, distributed, interdisciplinary consortia, we found that scientists frequently sought data from public collections and from other researchers for comparative purposes such as “ground-truthing” and calibration. When they sought others’ data for reanalysis or for combining with their own data, which was relatively rare, most preferred to collaborate with the data creators. We propose a typology of data reuses ranging from comparative to integrative. Comparative data reuse requires interactional expertise, which involves knowing enough about the data to assess their quality and value for a specific comparison such as calibrating an instrument in a lab experiment. Integrative reuse requires contributory expertise, which involves the ability to perform the action, such as reusing data in a new experiment. Data integration requires more specialized scientific knowledge and deeper levels of epistemic trust in the knowledge products. Metadata, ontologies, and other forms of curation benefit interpretation for any kind of data reuse. Based on these findings, we theorize the data creators’ advantage, that those who create data have intimate and tacit knowledge that can be used as barter to form collaborations for mutual advantage. Data reuse is a process that occurs within knowledge infrastructures that evolve over time, encompassing expertise, trust, communities, technologies, policies, resources, and institutions. (See Supplementary Materials {\textbackslash}textlesshttps://hdsr.mitpress.mit.edu/pub/tn4j86t1{\textbackslash}textgreater for methodological and other details, including a full bibliography.)}, language = {en}, number = {2}, journal = {Harvard Data Science Review}, author = {Pasquetto, Irene V. and Borgman, Christine L. and Wofford, Morgan F.}, month = nov, year = {2019}, file = {Pasquetto et al. - 2019 - Uses and Reuses of Scientific Data The Data Creat.pdf:C\:\\Users\\carst\\Zotero\\storage\\4V4XW8KN\\Pasquetto et al. - 2019 - Uses and Reuses of Scientific Data The Data Creat.pdf:application/pdf}, } @techreport{noauthor_empfehlungen_2012, address = {Berlin}, title = {Empfehlungen zur {Weiterentwicklung} der wissenschaftlichen {Informationsinfrastrukturen} in {Deutschland} bis 2020}, url = {https://www.wissenschaftsrat.de/download/archiv/2359-12.pdf}, language = {de}, number = {Drs. 2359-12}, institution = {Wissenschaftsrat}, year = {2012}, pages = {90}, file = {2012 - Empfehlungen zur Weiterentwicklung der wissenschaf.pdf:C\:\\Users\\carst\\Zotero\\storage\\8IHM8TNS\\2012 - Empfehlungen zur Weiterentwicklung der wissenschaf.pdf:application/pdf}, } @article{zimmerman_not_2007, title = {Not by metadata alone: the use of diverse forms of knowledge to locate data for reuse}, volume = {7}, issn = {1432-1300}, shorttitle = {Not by metadata alone}, url = {https://doi.org/10.1007/s00799-007-0015-8}, doi = {10.1007/s00799-007-0015-8}, abstract = {An important set of challenges for eScience initiatives and digital libraries concern the need to provide scientists with the ability to access data from multiple sources. This paper argues that an analysis of scientists‘ reuse of data prior to the advent of eScience can illuminate the requirements and design of digital libraries and cyberinfrastructure. As part of a larger study on data sharing and reuse, I investigated the processes by which ecologists locate data that were initially collected by others. Ecological data are unusually complex and present daunting problems of interpretation and analysis that must be considered in the design of cyberinfrastructure. The ecologists that I interviewed found ways to overcome many of these difficulties. One part of my results shows that ecologists use formal and informal knowledge that they have gained through disciplinary training and through their own data-gathering experiences to help them overcome hurdles related to finding, acquiring, and validating data collected by others. A second part of my findings reveals that ecologists rely on formal notions of scientific practice that emphasize objectivity to justify the methods they use to collect data for reuse. I discuss the implications of these findings for digital libraries and eScience initiatives.}, language = {en}, number = {1}, journal = {International Journal on Digital Libraries}, author = {Zimmerman, Ann}, month = oct, year = {2007}, pages = {5--16}, } @article{zimmerman_new_2008, title = {New {Knowledge} from {Old} {Data}: {The} {Role} of {Standards} in the {Sharing} and {Reuse} of {Ecological} {Data}}, volume = {33}, shorttitle = {New {Knowledge} from {Old} {Data}}, url = {https://journals.sagepub.com/doi/10.1177/0162243907306704}, doi = {10.1177/0162243907306704}, abstract = {This article analyzes the experiences of ecologists who used data they did not collect themselves. Specifically, the author examines the processes by which ecologists understand and assess the quality of the data they reuse, and investigates the role that standard methods of data collection play in these processes. Standardization is one means by which scientific knowledge is transported from local to public spheres. While standards can be helpful, the results show that knowledge of the local context is critical to ecologists' reuse of data. Yet, this information is often left behind as data move from the private to the public world. The knowledge that ecologists acquire through fieldwork enables them to recover the local details that are so critical to their comprehension of data collected by others. Social processes also play a role in ecologists' efforts to judge the quality of data they reuse.}, language = {en}, number = {5}, journal = {Science, Technology, \& Human Values}, author = {Zimmerman, Ann S.}, month = sep, year = {2008}, pages = {631--652}, } @article{pasquetto_reuse_2017, title = {On the {Reuse} of {Scientific} {Data}}, volume = {16}, issn = {1683-1470}, url = {http://datascience.codata.org/articles/10.5334/dsj-2017-008/}, doi = {10.5334/dsj-2017-008}, abstract = {While science policy promotes data sharing and open data, these are not ends in themselves. Arguments for data sharing are to reproduce research, to make public assets available to the public, to leverage investments in research, and to advance research and innovation. To achieve these expected benefits of data sharing, data must actually be reused by others. Data sharing practices, especially motivations and incentives, have received far more study than has data reuse, perhaps because of the array of contested concepts on which reuse rests and the disparate contexts in which it occurs. Here we explicate concepts of data, sharing, and open data as a means to examine data reuse. We explore distinctions between use and reuse of data. Lastly we propose six research questions on data reuse worthy of pursuit by the community: How can uses of data be distinguished from reuses? When is reproducibility an essential goal? When is data integration an essential goal? What are the tradeoffs between collecting new data and reusing existing data? How do motivations for data collection influence the ability to reuse data? How do standards and formats for data release influence reuse opportunities? We conclude by summarizing the implications of these questions for science policy and for investments in data reuse.}, language = {eng}, journal = {Data Science Journal}, author = {Pasquetto, Irene and Randles, Bernadette and Borgman, Christine}, month = mar, year = {2017}, pages = {1--9}, file = {Pasquetto et al. - 2017 - On the Reuse of Scientific Data.pdf:C\:\\Users\\carst\\Zotero\\storage\\82BHJ4Y4\\Pasquetto et al. - 2017 - On the Reuse of Scientific Data.pdf:application/pdf}, } @book{fowler_concise_1995, address = {Oxford}, edition = {9. Auflage}, title = {The concise {Oxford} dictionary of current {English}}, isbn = {0-19-861319-9 0-19-861320-2}, publisher = {Clarendon Press}, editor = {Fowler, Henry W. and Thompson, Della}, year = {1995}, } @book{lemaire_betriebs-und_2019, address = {Trier}, title = {Das {Betriebs}-und {Geschäftsmodell} der {Virtuellen} {Forschungsumgebung} {FuD}}, copyright = {CC BY 4.0}, url = {https://www.forschungsdaten.org/images/e/e5/01-Lemaire-FuD-Geschaeftsmodell.pdf}, language = {de}, author = {Lemaire, Marina}, month = jun, year = {2019}, file = {Lemaire - 2019 - Das Betriebs-und Geschäftsmodell der Virtuellen Fo.pdf:C\:\\Users\\carst\\Zotero\\storage\\VHGV8FGB\\Lemaire - 2019 - Das Betriebs-und Geschäftsmodell der Virtuellen Fo.pdf:application/pdf}, } @book{neuroth_aktives_2018-1, address = {Berlin}, title = {Aktives {Forschungsdatenmanagement}: {Das} {DFG}-{Projekt} {Research} {Data} {Management} {Organiser} ({RDMO})}, url = {https://opus4.kobv.de/opus4-bib-info/frontdoor/index/index/docId/3688}, abstract = {Forschungsdatenmanagement (FDM) und damit einhergehend Forschungsdatenmanagementpläne (DMP) nehmen national und international an Bedeutung zu. So verlangen verschiedene Förderorganisationen wie die National Science Foundation (USA), der Schweizerische Nationalfonds (SNF), die Deutsche Forschungsgemeinschaft sowie die Europäische Kommission mit Horizon 2020 (H2020) bereits bei Projektanträgen Auskunft über den Umgang mit den nachgenutzten oder erstellten Forschungsdaten. Auch beschäftigen sich zahlreiche Initiativen wie zum Beispiel international die Research Data Alliance (RDA) oder in Deutschland die DINI/nestor-Arbeitsgruppe Forschungsdaten mit dem Thema. International setzt sich dabei die Erkenntnis durch, dass es im Umgang mit Forschungsdaten nicht mit einem einmaligen Erstellen eines Forschungsdatenmanagementplans getan ist, sondern dass sich die Pläne aktiv und dynamisch dem Verlauf des Forschungsprozesses anpassen und entsprechend aktualisiert werden müssen. Darüber hinaus sollen DMP für verschiedene Bedarfe zur Verfügung gestellt werden, um die spezifischen Sichtweisen unterschiedlicher Beteiligter abbilden zu können. Ein DMP-Werkzeug, das solche Pläne unterstützt, kann so etwa verschiedene Bedarfserhebungen (z.B. Speicher-Bedarf, Kosten etc.) für unterschiedliche Rollen (z.B. Forschungsreferat, Projektleitung, IT-Support) ermöglichen. Der Research Data Management Organiser (RDMO) ist ein solches Werkzeug, das im Rahmen eines DFG-Projektes entwickelt und mit Hilfe unterschiedlichen Zielgruppen getestet wurde. Das RDMO-Tool ist multilingual, flexibel an Community- und Organisationsanforderungen anpassbar, als eigene RDMO-Instanz nachnutzbar und unterstützt verschiedene Aufgaben wie zum Beispiel unterschiedliche Export-Funktionen oder die Erledigung zeitlich gebundener Tasks.}, language = {de}, author = {Neuroth, Heike and Engelhardt, Claudia and Vierheller, Janine}, year = {2018}, note = {Type: Konferenzfolien}, file = {Neuroth und Engelhardt - 2018 - Aktives Forschungsdatenmanagement - das DFG-Projek.pdf:C\:\\Users\\carst\\Zotero\\storage\\M69FBGUK\\Neuroth und Engelhardt - 2018 - Aktives Forschungsdatenmanagement - das DFG-Projek.pdf:application/pdf}, } @inproceedings{gartner_preserving_2018, address = {Miyazaki, Japan}, title = {Preserving {Workflow} {Reproducibility}: {The} {RePlay}-{DH} {Client} as a {Tool} for {Process} {Documentation}}, isbn = {979-10-95546-00-9}, url = {http://www.lrec-conf.org/proceedings/lrec2018/pdf/707.pdf}, abstract = {In this paper we present a software tool for elicitation and management of process metadata. It follows our previously publisheddesign idea of an assistant for researchers that aims at minimizing the additional effort required for producing a sustainable workflowdocumentation. With the ever-growing number of linguistic resources available, it also becomes increasingly important to provideproper documentation to make them comparable and to allow meaningful evaluations for specific use cases. The often prevailingpractice of post hoc documentation of resource generation or research processes bears the risk of information loss. Not only doesdetailed documentation of a process aid in achieving reproducibility, it also increases usefulness of the documented work for others asa cornerstone of good scientific practice. Time pressure together with the lack of simple documentation methods leads to workflowdocumentation in practice being an arduous and often neglected task. Our tool ensures a clean documentation for common workflows innatural language processing and digital humanities. Additionally, it can easily be integrated into existing institutional infrastructur.}, language = {en}, booktitle = {Proceedings of the {Eleventh} {International} {Conference} on {Language} {Resources} and {Evaluation} ({LREC} 2018)}, publisher = {European Language Resources Association (ELRA)}, author = {Gärtner, Markus and Hahn, Uli and Hermann, Sibylle}, year = {2018}, file = {Gärtner et al. - 2018 - Preserving Workflow Reproducibility The RePlay-DH.pdf:C\:\\Users\\carst\\Zotero\\storage\\BQNDAW9P\\Gärtner et al. - 2018 - Preserving Workflow Reproducibility The RePlay-DH.pdf:application/pdf}, } @inproceedings{schembera_genesis_2019, address = {Cham}, series = {Communications in {Computer} and {Information} {Science}}, title = {The {Genesis} of {EngMeta} - {A} {Metadata} {Model} for {Research} {Data} in {Computational} {Engineering}}, isbn = {978-3-030-14401-2}, doi = {10.1007/978-3-030-14401-2_12}, abstract = {In computational engineering, numerical simulations produce huge amounts of data. To keep this research data findable, accessible, inter-operable and reusable, a structured description of the data is indispensable. This paper outlines the genesis of EngMeta – a metadata model designed to describe engineering simulation data with a focus on thermodynamics and aerodynamics. The metadata model, developed in close collaboration with engineers, is based on existing standards and adds discipline-specific information as the main contribution. Characteristics of the observed system offer researchers important search criteria. Information on the hardware and software used and the processing steps involved helps to understand and replicate the data. Such metadata are crucial to keeping the data FAIR and bridging the gap to a sustainable research data management in computational engineering.}, language = {en}, booktitle = {Metadata and {Semantic} {Research}}, publisher = {Springer International Publishing}, author = {Schembera, Björn and Iglezakis, Dorothea}, editor = {Garoufallou, Emmanouel and Sartori, Fabio and Siatri, Rania and Zervas, Marios}, year = {2019}, pages = {127--132}, } @article{hess_synergieeffekte_2019, title = {Synergieeffekte durch {Kooperation}: {Hintergründe}, {Aufgaben} und {Potentiale} des {Projekts} {FoDaKo}}, volume = {43}, issn = {0341-4183, 1865-7648}, shorttitle = {Synergieeffekte durch {Kooperation}}, url = {https://www.degruyter.com/view/journals/bfup/43/1/article-p98.xml}, doi = {10.1515/bfp-2019-2009}, abstract = {Vor dem Hintergrund der zunehmenden Bedeutung von Open-Data-Ansätzen und neuen Anforderungen an das Forschungsdatenmanagement (FDM) vonseiten der Förderinstitutionen, streben Forschende und Institutionen einen Wandel bislang praktizierter Vorgehensweisen beim FDM an. In einer universitären Kooperation von drei Rechenzentren und drei Universitätsbibliotheken zielt das Projekt Forschungsdatenmanagement in Kooperation (FoDaKo)1 darauf ab, Synergien nutzbar zu machen und Best Practices für das FDM zu entwickeln und auszutauschen. Der Beitrag gibt einen Einblick in Struktur, Maßnahmen und bislang gesammelte Erfahrungen eines Projekts zur Erforschung des Managements von Forschungsdaten in ihrem Lebenszyklus an den Universitäten Düsseldorf, Siegen und Wuppertal und ermöglicht Dritten damit die Adaption von Ansätzen für ein möglichst kosteneffizientes und nachhaltiges FDM.}, language = {de}, number = {1}, journal = {Bibliothek Forschung und Praxis}, author = {Hess, Volker and Rekowski, Thomas von and Roller, Sabine and Walger, Nicole}, month = apr, year = {2019}, pages = {98--104}, file = {Hess et al. - 2019 - Synergieeffekte durch Kooperation Hintergründe, A.pdf:C\:\\Users\\carst\\Zotero\\storage\\9S72K7C8\\Hess et al. - 2019 - Synergieeffekte durch Kooperation Hintergründe, A.pdf:application/pdf}, } @article{kraft_radar_2016, title = {The {RADAR} {Project} - {A} {Service} for {Research} {Data} {Archival} and {Publication}}, volume = {5}, copyright = {CC BY}, url = {https://doi.org/10.3390/ijgi5030028}, doi = {10.3390/ijgi5030028}, abstract = {The aim of the RADAR (Research Data Repository) project is to set up and establish an infrastructure that facilitates research data management: the infrastructure will allow researchers to store, manage, annotate, cite, curate, search and find scientific data in a digital platform available at any time that can be used by multiple (specialized) disciplines. While appropriate and innovative preservation strategies and systems are in place for the big data communities (e.g., environmental sciences, space, and climate), the stewardship for many other disciplines, often called the “long tail research domains”, is uncertain. Funded by the German Research Foundation (DFG), the RADAR collaboration project develops a service oriented infrastructure for the preservation, publication and traceability of (independent) research data. The key aspect of RADAR is the implementation of a two-stage business model for data preservation and publication: clients may preserve research results for up to 15 years and assign well-graded access rights, or to publish data with a DOI assignment for an unlimited period of time. Potential clients include libraries, research institutions, publishers and open platforms that desire an adaptable digital infrastructure to archive and publish data according to their institutional requirements and workflows}, number = {3}, journal = {ISPRS Int. J. Geo Inf.}, author = {Kraft, Angelina and Razum, Matthias and Potthoff, Jan and Porzel, Andrea and Engel, Thomas and Lange, Frank and Broek, Karina van den and Furtado, Filipe}, year = {2016}, pages = {28}, file = {Kraft et al. - 2016 - The RADAR Project - A Service for Research Data Ar.pdf:C\:\\Users\\carst\\Zotero\\storage\\2LTQA3FH\\Kraft et al. - 2016 - The RADAR Project - A Service for Research Data Ar.pdf:application/pdf}, } @techreport{lauberronsberg_gutachten_2018, title = {Gutachten zu den rechtlichen {Rahmenbedingungen} des {Forschungsdatenmanagements}}, copyright = {CC BY SA}, url = {https://tu-dresden.de/gsw/jura/igetem/jfbimd13/ressourcen/dateien/dateien/DataJus/DataJus_Zusammenfassung_Gutachten_12-07-18.pdf?lang=de}, author = {Lauber‐Rönsberg, Anne and Krahn, Philipp and Baumann, Paul}, month = jul, year = {2018}, pages = {20}, file = {Gutachten zu den rechtlichen Rahmenbedingungen des.pdf:C\:\\Users\\carst\\Zotero\\storage\\W66BULUQ\\Gutachten zu den rechtlichen Rahmenbedingungen des.pdf:application/pdf}, } @article{grunzke_masi_2019, title = {The {MASi} repository service — {Comprehensive}, metadata-driven and multi-community research data management}, volume = {94}, issn = {0167-739X}, url = {http://www.sciencedirect.com/science/article/pii/S0167739X17305344}, doi = {10.1016/j.future.2017.12.023}, abstract = {Nowadays, the daily work of many research communities is characterized by an increasing amount and complexity of data. This makes it increasingly difficult to manage, access and utilize the data to ultimately gain scientific insights based on it. At the same time, domain scientists want to focus on their science instead of IT. The solution is research data management to store data in a structured way enabling easy discovery for future reference and usage. An integral part is the use of metadata. With it, data becomes accessible by its content and context instead of its name and location only. The use of metadata shall be as automatic and seamless as possible in order to foster a high usability. Here, we present the architecture and developments of the Metadata Management for Applied Sciences (MASi) project that is currently building a comprehensive research data management service. MASi extends the existing KIT Data Manager framework by a generic metadata programming interface and a generic graphical web interface. Furthermore, MASi is OAI compliant and supports the OAI-PMH protocol while providing support for provenance information using ProvONE, a well-established and accepted provenance model. To illustrate the practical applicability of the MASi service, we present the adoption of initial use cases within geography, chemistry and digital humanities. The MASi research data management service is currently being prepared to go into production to satisfy the complex and varying requirements in an efficient, useable and sustainable way.}, language = {en}, journal = {Future Generation Computer Systems}, author = {Grunzke, Richard and Hartmann, Volker and Jejkal, Thomas and Kollai, Helen and Prabhune, Ajinkya and Herold, Hendrik and Deicke, Aline and Dressler, Christiane and Dolhoff, Julia and Stanek, Julia and Hoffmann, Alexander and Müller-Pfefferkorn, Ralph and Schrade, Torsten and Meinel, Gotthard and Herres-Pawlis, Sonja and Nagel, Wolfgang E.}, month = may, year = {2019}, pages = {879--894}, file = {ScienceDirect Full Text PDF:C\:\\Users\\carst\\Zotero\\storage\\WD4R4M94\\Grunzke et al. - 2019 - The MASi repository service — Comprehensive, metad.pdf:application/pdf}, } @article{diepenbroek_pangaeainformation_2002, series = {Shareware and freeware in the {Geosciences} {II}. {A} special issue in honour of {John} {Butler}}, title = {{PANGAEA}—an information system for environmental sciences}, volume = {28}, issn = {0098-3004}, url = {http://www.sciencedirect.com/science/article/pii/S0098300402000390}, doi = {10.1016/S0098-3004(02)00039-0}, abstract = {PANGAEA is an information system for processing, long-term storage, and publication of georeferenced data related to earth science fields. Essential services supplied by PANGAEA are project data management and the distribution of visualization and analysis software. Organization of data management includes quality control and publication of data and the dissemination of metadata according to international standards. Data managers are responsible for acquisition and maintenance of data. The data model used reflect the information processing steps in the earth science fields and can handle any related analytical data. The basic technical structure corresponds to a three tiered client/server architecture with a number of comprehensive clients and middleware components controlling the information flow and quality. On the server side a relational database management system (RDBMS) is used for information storage. The web-based clients include a simple search engine (PangaVista) and a data mining tool (ART). The client used for maintenance of information contents is optimized for data management purposes. Analysis and visualization of metainformation and analytical data is supported by a number of software tools, which can either be used as ‘plug-ins’ of the PANGAEA clients or as standalone applications, distributed as freeware from the PANGAEA website. Established and well-documented software tools are the mini-GIS PanMap, the plotting tool PanPlot, and Ocean Data View (ODV) for the exploration of oceanographic data. PANGAEA operates on a long-term basis. The available resources are sufficient not only for the acquisition of new data and the maintenance of the system but also for further technical and organizational developments.}, language = {en}, number = {10}, journal = {Computers \& Geosciences}, author = {Diepenbroek, Michael and Grobe, Hannes and Reinke, Manfred and Schindler, Uwe and Schlitzer, Reiner and Sieger, Rainer and Wefer, Gerold}, month = dec, year = {2002}, pages = {1201--1210}, file = {Diepenbroek et al. - 2002 - PANGAEA—an information system for environmental sc.pdf:C\:\\Users\\carst\\Zotero\\storage\\LDW98V4D\\Diepenbroek et al. - 2002 - PANGAEA—an information system for environmental sc.pdf:application/pdf}, } @article{kaminski_institutionelle_2018, title = {Das institutionelle {Forschungsdatenrepositorium} {FDAT} der {Universität} {Tübingen}}, volume = {5}, issn = {2363-9814}, url = {https://www.o-bib.de/article/view/5324}, doi = {10.5282/o-bib/2018H3S61-75}, abstract = {Das eScience-Center der Universität Tübingen bietet mit dem Forschungsdatenrepositorium FDAT lokalen Forschungsprojekten und Forschenden diverse Dienstleistungen sowie die nötige technische Ausstattung für die Langzeitarchivierung und Nachnutzung ihrer Forschungsdaten an. Dabei folgt FDAT den Richtlinien eines offenen Archivinformationssystems OAIS und wurde von unabhängiger Stelle zertifiziert. Ziel ist es, wissenschaftliche Daten sicher aufzubewahren und der breiten Öffentlichkeit nach Möglichkeit Open Access5 zur Verfügung zu stellen. Darüber hinaus sollen Wissenschaftlerinnen und Wissenschaftler in allen Phasen des Lebenszyklus ihrer Forschungsdaten durch die Betreiber des Repositoriums beraten und technisch unterstützt werden. Das Repositorium wird legitimiert durch die von der Universität Tübingen verabschiedeten Leitlinien zum Umgang mit Forschungsdaten, und neben dem eScience-Center konkret betreut durch die Universitätsbibliothek Tübingen und das ansässige Zentrum für Datenverarbeitung. Das Repositorium hat am 01.01.2017 seinen produktiven Betrieb aufgenommen und hält derzeit (15.08.2018) 6741 digitale Objekte vor.}, language = {de}, number = {3}, journal = {o-bib. Das offene Bibliotheksjournal}, author = {Kaminski, Steve and Brandt, Olaf}, month = sep, year = {2018}, pages = {61--75}, file = {Kaminski und Brandt - 2018 - Das institutionelle Forschungsdatenrepositorium FD.pdf:C\:\\Users\\carst\\Zotero\\storage\\4A7NCHIT\\Kaminski und Brandt - 2018 - Das institutionelle Forschungsdatenrepositorium FD.pdf:application/pdf}, } @article{lee_aimq_2002, title = {{AIMQ}: a methodology for information quality assessment}, volume = {40}, issn = {0378-7206}, shorttitle = {{AIMQ}}, url = {http://www.sciencedirect.com/science/article/pii/S0378720602000435}, doi = {10.1016/S0378-7206(02)00043-5}, abstract = {Information quality (IQ) is critical in organizations. Yet, despite a decade of active research and practice, the field lacks comprehensive methodologies for its assessment and improvement. Here, we develop such a methodology, which we call AIM quality (AIMQ) to form a basis for IQ assessment and benchmarking. The methodology is illustrated through its application to five major organizations. The methodology encompasses a model of IQ, a questionnaire to measure IQ, and analysis techniques for interpreting the IQ measures. We develop and validate the questionnaire and use it to collect data on the status of organizational IQ. These data are used to assess and benchmark IQ for four quadrants of the model. These analysis techniques are applied to analyze the gap between an organization and best practices. They are also applied to analyze gaps between IS professionals and information consumers. The results of the techniques are useful for determining the best area for IQ improvement activities.}, language = {en}, number = {2}, journal = {Information \& Management}, author = {Lee, Yang W. and Strong, Diane M. and Kahn, Beverly K. and Wang, Richard Y.}, month = dec, year = {2002}, pages = {133--146}, } @techreport{meyer_impacts_2016, address = {Rochester, NY}, type = {{SSRN} {Scholarly} {Paper}}, title = {The {Impacts} of {Digital} {Collections}: {Early} {English} {Books} {Online} \& {House} of {Commons} {Parliamentary} {Papers}}, copyright = {CC BY-NC}, shorttitle = {The {Impacts} of {Digital} {Collections}}, url = {https://papers.ssrn.com/abstract=2740299}, abstract = {In 2015, in cooperation with ProQuest, Jisc commissioned this study of the Impacts of Digital Collections focused on two particular collections: Early English Books Online (EEBO) and House of Commons Parliamentary Papers (HCPP). These two collections are just a fraction of the number of collections that Jisc has purchased on behalf of its member institutions. While an understanding of these two collections is not necessarily generalizable to all digital collections (or even all Jisc-provided collections), they were selected because they are both relatively mature in the sense of having been available to users for over a decade, were thought to be well-known in the research community, and also appeal to users from multiple disciplines.}, language = {en}, number = {ID 2740299}, institution = {Social Science Research Network}, author = {Meyer, Eric T. and Eccles, Kathryn}, month = mar, year = {2016}, doi = {10.2139/ssrn.2740299}, pages = {56}, file = {Meyer und Eccles - 2016 - The Impacts of Digital Collections Early English .pdf:C\:\\Users\\carst\\Zotero\\storage\\46DINMW9\\Meyer und Eccles - 2016 - The Impacts of Digital Collections Early English .pdf:application/pdf}, } @article{fowler_frictionless_2018, title = {Frictionless {Data}: {Making} {Research} {Data} {Quality} {Visible}}, volume = {12}, issn = {1746-8256}, shorttitle = {Frictionless {Data}}, url = {http://www.ijdc.net/article/view/577}, doi = {10.2218/ijdc.v12i2.577}, abstract = {There is significant friction in the acquisition, sharing, and reuse of research data. It is estimated that eighty percent of data analysis is invested in the cleaning and mapping of data (Dasu and Johnson,2003). This friction hampers researchers not well versed in data preparation techniques from reusing an ever-increasing amount of data available within research data repositories. Frictionless Data is an ongoing project at Open Knowledge International focused on removing this friction. We are doing this by developing a set of tools, specifications, and best practices for describing, publishing, and validating data. The heart of this project is the “Data Package”, a containerization format for data based on existing practices for publishing open source software. This paper will report on current progress toward that goal.}, language = {en}, number = {2}, journal = {International Journal of Digital Curation}, author = {Fowler, Dan and Barratt, Jo and Walsh, Paul}, month = may, year = {2018}, pages = {274--285}, file = {Fowler et al. - 2018 - Frictionless Data Making Research Data Quality Vi.pdf:C\:\\Users\\carst\\Zotero\\storage\\QTPZK8FL\\Fowler et al. - 2018 - Frictionless Data Making Research Data Quality Vi.pdf:application/pdf}, } @inproceedings{gavrilis_measuring_2015, address = {Cham}, series = {Lecture {Notes} in {Computer} {Science}}, title = {Measuring {Quality} in {Metadata} {Repositories}}, isbn = {978-3-319-24592-8}, doi = {10.1007/978-3-319-24592-8_5}, abstract = {The need for good quality metadata records becomes a necessity given the large quantities of digital content that is available through digital repositories and the increasing number of web services that use this content. The context in which metadata are generated and used affects the problem in question and therefore a flexible metadata quality evaluation model that can be easily and widely used has yet to be presented. This paper proposes a robust multidimensional metadata quality evaluation model that measures metadata quality based on five metrics and by taking into account contextual parameters concerning metadata generation and use. An implementation of this metadata quality evaluation model is presented and tested against a large number of real metadata records from the humanities domain and for different applications.}, language = {en}, booktitle = {Research and {Advanced} {Technology} for {Digital} {Libraries}}, publisher = {Springer International Publishing}, author = {Gavrilis, Dimitris and Makri, Dimitra-Nefeli and Papachristopoulos, Leonidas and Angelis, Stavros and Kravvaritis, Konstantinos and Papatheodorou, Christos and Constantopoulos, Panos}, editor = {Kapidakis, Sarantos and Mazurek, Cezary and Werla, Marcin}, year = {2015}, pages = {56--67}, } @inproceedings{suominen_improving_2012, address = {Berlin, Heidelberg}, series = {Lecture {Notes} in {Computer} {Science}}, title = {Improving the {Quality} of {SKOS} {Vocabularies} with {Skosify}}, isbn = {978-3-642-33876-2}, doi = {10.1007/978-3-642-33876-2_34}, abstract = {Simple Knowledge Organization System (SKOS) vocabularies are commonly used to represent lightweight conceptual vocabularies such as taxonomies, classifications and thesauri on the Web of Data. We identified 11 criteria for evaluating the validity and quality of SKOS vocabularies. We then analyzed 14 such vocabularies against the identified criteria and found most of them to contain structural errors. Our tool, Skosify, can be used to automatically validate SKOS vocabularies and correct many problems, helping to improve their quality and validity.}, language = {en}, booktitle = {Knowledge {Engineering} and {Knowledge} {Management}}, publisher = {Springer}, author = {Suominen, Osma and Hyvönen, Eero}, editor = {ten Teije, Annette and Völker, Johanna and Handschuh, Siegfried and Stuckenschmidt, Heiner and d’Acquin, Mathieu and Nikolov, Andriy and Aussenac-Gilles, Nathalie and Hernandez, Nathalie}, year = {2012}, pages = {383--397}, } @article{strezoski_omniart_2017, title = {{OmniArt}: {Multi}-task {Deep} {Learning} for {Artistic} {Data} {Analysis}}, shorttitle = {{OmniArt}}, url = {http://arxiv.org/abs/1708.00684}, abstract = {Vast amounts of artistic data is scattered on-line from both museums and art applications. Collecting, processing and studying it with respect to all accompanying attributes is an expensive process. With a motivation to speed up and improve the quality of categorical analysis in the artistic domain, in this paper we propose an efficient and accurate method for multi-task learning with a shared representation applied in the artistic domain. We continue to show how different multi-task configurations of our method behave on artistic data and outperform handcrafted feature approaches as well as convolutional neural networks. In addition to the method and analysis, we propose a challenge like nature to the new aggregated data set with almost half a million samples and structured meta-data to encourage further research and societal engagement.}, journal = {arXiv:1708.00684 [cs]}, author = {Strezoski, Gjorgji and Worring, Marcel}, month = aug, year = {2017}, file = {arXiv Fulltext PDF:C\:\\Users\\carst\\Zotero\\storage\\2S2IMMZC\\Strezoski und Worring - 2017 - OmniArt Multi-task Deep Learning for Artistic Dat.pdf:application/pdf}, } @inproceedings{kiraly_validating_2019, address = {New York, NY, USA}, series = {{DATeCH2019}}, title = {Validating 126 million {MARC} records}, isbn = {978-1-4503-7194-0}, url = {https://doi.org/10.1145/3322905.3322929}, doi = {10.1145/3322905.3322929}, abstract = {The paper describes the method and results of validation of 14 library catalogues. The format of the catalog record is Machine Readable Catalog (MARC21) which is the most popular metadata standards for describing books. The research investigates the structural features of the record and as a result finds and classifies different commonly found issues. The most frequent issue types are usage of undocumented schema elements, then improper values in places where a value should be taken from a dictionary, or should match to other strict requirements.}, booktitle = {Proceedings of the 3rd {International} {Conference} on {Digital} {Access} to {Textual} {Cultural} {Heritage}}, publisher = {Association for Computing Machinery}, author = {Király, Péter}, month = may, year = {2019}, pages = {161--168}, } @inproceedings{manguinhas_quality_2006, address = {Berlin, Heidelberg}, series = {Lecture {Notes} in {Computer} {Science}}, title = {Quality {Control} of {Metadata}: {A} {Case} with {UNIMARC}}, isbn = {978-3-540-44638-5}, shorttitle = {Quality {Control} of {Metadata}}, doi = {10.1007/11863878_21}, abstract = {UNIMARC is a family of bibliographic metadata schemas with formats for descriptive information, classification, authorities and holdings. This paper describes the automation of quality control processes required in order to monitor and enforce quality of UNIMARC records. The results are accomplished by format schemas expressed in XML. This paper also describes the tools that take advantage of this technology to support the quality control processes, as also its actual applications in services at the National Library of Portugal.}, language = {en}, booktitle = {Research and {Advanced} {Technology} for {Digital} {Libraries}}, publisher = {Springer}, author = {Manguinhas, Hugo and Borbinha, José}, editor = {Gonzalo, Julio and Thanos, Costantino and Verdejo, M. Felisa and Carrasco, Rafael C.}, year = {2006}, pages = {244--255}, } @article{gueguen_metadata_2019, title = {Metadata quality at scale: {Metadata} quality control at the {Digital} {Public} {Library} of {America}}, volume = {7}, shorttitle = {Metadata quality at scale}, url = {https://www.ingentaconnect.com/content/hsp/jdmm/2019/00000007/00000002/art00003}, abstract = {The Digital Public Library of America (DPLA) began aggregating data in 2012 and launched its public interface and website in April 2013. That initial set of 2 million records from 16 providers (some of which represented state or community-based aggregations themselves) has since grown to more than 20 million records from 40 providers, who collectively represent around 3,000 individual institutions across the USA. Over the last five years, work on metadata quality at DPLA has shown that to make good decisions about content, coherence and conformance to standards, providers must understand the context of the aggregation with which their records are being shared. This paper reviews the existing literature on metadata quality analysis, and provides an analysis of the metadata quality initiatives at DPLA. DPLA’s work shows that it is more effective to use a combination of automated and community-driven methods to improve data quality than to use either approach in isolation.}, number = {2}, journal = {Journal of Digital Media Management}, author = {Gueguen, Gretchen}, month = jan, year = {2019}, pages = {115--126}, } @incollection{bruce_continuum_2004, title = {The {Continuum} of {Metadata} {Quality}: {Defining}, {Expressing}, {Exploiting}}, isbn = {978-0-8389-0882-2}, shorttitle = {The {Continuum} of {Metadata} {Quality}}, url = {https://ecommons.cornell.edu/handle/1813/7895}, abstract = {Like pornography, metadata quality is difficult to define. We know it when we see it, but conveying the full bundle of assumptions and experience that allow us to identify it is a different matter. For this reason, among others, few outside the library community have written about defining metadata quality. Still less has been said about enforcing quality in ways that do not require unacceptable levels of human effort.}, language = {en}, booktitle = {Metadata in {Practice}}, publisher = {ALA Editions}, author = {Bruce, Thomas R. and Hillmann, Diane I.}, editor = {Hillmann, Diane I. and Westbrooks, E.}, year = {2004}, pages = {238--256}, } @book{groskopf_quartz_2015, title = {The {Quartz} guide to bad data}, url = {https://qz.com/572338/the-quartz-guide-to-bad-data/}, abstract = {An exhaustive reference to problems seen in real-world data along with suggestions on how to resolve them.}, language = {en}, author = {Groskopf, Christopher}, month = dec, year = {2015}, note = {Publication Title: Quartz}, file = {Snapshot:C\:\\Users\\carst\\Zotero\\storage\\VVVCD89N\\the-quartz-guide-to-bad-data.html:text/html}, } @book{ifla_study_group_on_the_functional_requirements_for_bibliographic_records_functional_1998, address = {München}, series = {{UBCIM} {Publications}. {New} {Series}}, title = {Functional {Requirements} for {Bibliographic} {Records}: {Final} {Report}}, isbn = {978-3-11-096245-1}, shorttitle = {Functional {Requirements} for {Bibliographic} {Records}}, abstract = {Functional Requirements for Bibliographic Records von wurde am 07.02.2013 von De Gruyter Saur veröffentlicht.}, language = {en}, number = {19}, publisher = {De Gruyter Saur}, editor = {{IFLA Study Group on the Functional Requirements for Bibliographic Records}}, year = {1998}, doi = {10.1515/9783110962451}, } @book{delsey_functional_2002, title = {Functional {Analysis} of the {MARC} 21 {Bibliographic} and {Holdings} {Formats}}, url = {https://www.loc.gov/marc/marc-functional-analysis/original_source/analysis.pdf}, language = {en}, author = {Delsey, Tom}, month = jan, year = {2002}, file = {Delsey - 2002 - Functional Analysis of the MARC 21 Bibliographic a.pdf:C\:\\Users\\carst\\Zotero\\storage\\REJA2VTK\\Delsey - 2002 - Functional Analysis of the MARC 21 Bibliographic a.pdf:application/pdf}, } @book{noauthor_fair_2019-2, title = {{FAIR} {Metrics}}, url = {http://fairmetrics.org}, month = apr, year = {2019}, note = {Publication Title: GO FAIR Metrics Group}, } @phdthesis{kiraly_measuring_2019, address = {Göttingen}, type = {Dissertation}, title = {Measuring {Metadata} {Quality}}, url = {http://rgdoi.net/10.13140/RG.2.2.33177.77920}, language = {en}, school = {Georg-August-Universität Göttingen, Faculty of Humanities}, author = {Király, Péter}, year = {2019}, doi = {10.13140/RG.2.2.33177.77920}, file = {Király - 2019 - Measuring Metadata Quality.pdf:C\:\\Users\\carst\\Zotero\\storage\\BHQ3UFSN\\Király - 2019 - Measuring Metadata Quality.pdf:application/pdf}, } @book{national_information_standards_organization_niso_framework_2007, address = {Baltimore, MD}, edition = {3rd edition}, title = {A framework of guidance for building good digital collections}, copyright = {Copyright © 2007 by the National Information Standards Organization}, isbn = {978-1-880124-74-1}, url = {https://www.niso.org/sites/default/files/2017-08/framework3.pdf}, language = {en}, publisher = {National Information Standards Organization (NISO)}, editor = {{National Information Standards Organization (NISO)}}, year = {2007}, file = {National Information Standards Organization (NISO) - 2007 - A framework of guidance for building good digital .pdf:C\:\\Users\\carst\\Zotero\\storage\\JC4UUTEP\\National Information Standards Organization (NISO) - 2007 - A framework of guidance for building good digital .pdf:application/pdf}, } @book{network_development_and_marc_standards_office_functional_nodate, title = {Functional {Analysis} of {MARC} 21 {Bibliographic} and {Holdings} {Formats}}, url = {https://www.loc.gov/marc/marc-functional-analysis/functional-analysis.html}, language = {en}, author = {{Network Development and MARC Standards Office} and {Network Development and MARC Standards Office} and {Library of Congress}}, file = {Functional Analysis of MARC 21 (Library of Congress):C\:\\Users\\carst\\Zotero\\storage\\TAYRUQ53\\functional-analysis.html:text/html}, } @article{ochoa_automatic_2009, title = {Automatic evaluation of metadata quality in digital repositories}, volume = {10}, issn = {1432-1300}, url = {https://doi.org/10.1007/s00799-009-0054-4}, doi = {10.1007/s00799-009-0054-4}, abstract = {Owing to the recent developments in automatic metadata generation and interoperability between digital repositories, the production of metadata is now vastly surpassing manual quality control capabilities. Abandoning quality control altogether is problematic, because low-quality metadata compromise the effectiveness of services that repositories provide to their users. To address this problem, we present a set of scalable quality metrics for metadata based on the Bruce \& Hillman framework for metadata quality control. We perform three experiments to evaluate our metrics: (1) the degree of correlation between the metrics and manual quality reviews, (2) the discriminatory power between metadata sets and (3) the usefulness of the metrics as low-quality filters. Through statistical analysis, we found that several metrics, especially Text Information Content, correlate well with human evaluation and that the average of all the metrics are roughly as effective as people to flag low-quality instances. The implications of this finding are discussed. Finally, we propose possible applications of the metrics to improve tools for the administration of digital repositories.}, language = {en}, number = {2}, journal = {International Journal on Digital Libraries}, author = {Ochoa, Xavier and Duval, Erik}, month = aug, year = {2009}, pages = {67--91}, } @phdthesis{palavitsinis_metadata_2013, type = {Dissertation}, title = {Metadata {Quality} {Issues} in {Learning} {Repositories}}, url = {https://www.researchgate.net/publication/260424499_Metadata-_Quality_Issues_in_Learning_Repositories}, abstract = {Metadata lies at the heart of every digital repository project in the sense that it defines and drives the description of digital content stored in the repositories. Metadata allows content to be successfully stored, managed and retrieved but also preserved in the long-term. Despite the enormous importance of metadata in digital repositories, one that is widely recognized, studies indicate that what is defined as metadata quality, is relatively low in most cases of digital repositories. Metadata quality is loosely defined as “fitness for purpose” meaning that low quality of metadata means that metadata cannot fulfill its purpose which is to allow for the successful storage, management and retrieval of resources. In practice, low metadata quality leads to ineffective searches for content, ones that recall the wrong resources or even worse, no resources which makes them invisible to the intended user, that is the “client” of each digital repository. The present dissertation approaches this problem by proposing a comprehensive metadata quality assurance method, namely the Metadata Quality Assurance Certification Process (MQACP). The basic idea of this dissertation is to propose a set of methods that can be deployed throughout the lifecycle of a repository to ensure that metadata generated from content providers are of high quality. These methods have to be straightforward, simple to apply with measurable results. They also have to be adaptable with minimum effort so that they can be used in different contexts easily. This set of methods was described analytically, taking into account the actors needed to apply them, describing the tools needed and defining the anticipated outcomes. In order to test our proposal, we applied it on a Learning Federation of repositories, from day 1 of its existence until it reached its maturity and regular operation. We supported the metadata creation process throughout the different phases of the repositories involved by setting up specific experiments using the methods and tools of the MQACP. Throughout each phase, we measured the resulting metadata quality to certify that the anticipated improvement in metadata quality actually took place. Lastly, through these different phases, the cost of the MQACP application was measured to provide a comparison basis for future applications. Based on the success of this first application, we decided to validate the MQACP approach by applying it on another two cases of a Cultural and a Research Federation of repositories. This would allow us to prove the transferability of the approach to other cases the present some similarities with the initial one but mainly significant differences. The results showed that the MQACP was successfully adapted to the new contexts, with minimum adaptations needed, with similar results produced and also with comparable costs. In addition, looking closer at the common experiments carried out in each phase of each use case, we were able to identify interesting patterns in the behavior of content providers that can be further researched. The dissertation is completed with a set of future research directions that came out of the cases examined. These research directions can be explored in order to support the next version of the MQACP in terms of the methods deployed, the tools used to assess metadata quality as well as the cost analysis of the MQACP methods.}, language = {en}, school = {Universidad de Alcalá Departamento de Ciencias de la Computación}, author = {Palavitsinis, Nikos}, year = {2013}, file = {_.pdf:C\:\\Users\\carst\\Zotero\\storage\\MK45YP8R\\_.pdf:application/pdf}, } @inproceedings{phillips_experiments_2018, title = {Experiments in {Operationalizing} {Metadata} {Quality} {Interfaces}: {A} {Case} {Study} at the {University} of {North} {Texas} {Libraries}}, copyright = {Copyright (c) 2018 International Conference on Dublin Core and Metadata Applications}, shorttitle = {Experiments in {Operationalizing} {Metadata} {Quality} {Interfaces}}, url = {https://dcpapers.dublincore.org/pubs/article/view/3967}, abstract = {This case study presents work underway at the University of North Texas (UNT) Libraries to design and implement interfaces and tools for analyzing metadata quality in their local metadata editing environment. It discusses the rationale for including these kinds of tools in locally-developed systems and discusses several interfaces currently being used at UNT to improve the quality of metadata managed within the Digital Collections.}, language = {en-US}, booktitle = {International {Conference} on {Dublin} {Core} and {Metadata} {Applications} 2018}, author = {Phillips, Mark Edward and Tarver, Hannah}, year = {2018}, pages = {15--23}, file = {Phillips und Tarver - 2018 - Experiments in Operationalizing Metadata Quality I.pdf:C\:\\Users\\carst\\Zotero\\storage\\WI7QSZQK\\Phillips und Tarver - 2018 - Experiments in Operationalizing Metadata Quality I.pdf:application/pdf}, } @book{ben_sapping_2017, title = {Sapping {Attention}: {A} brief visual history of {MARC} cataloging at the {Library} of {Congress}.}, shorttitle = {Sapping {Attention}}, url = {http://sappingattention.blogspot.com/2017/05/a-brief-visual-history-of-marc.html}, author = {{Ben}}, month = may, year = {2017}, note = {Publication Title: Sapping Attention}, file = {Blogspot Snapshot:C\:\\Users\\carst\\Zotero\\storage\\MTZI5JLU\\a-brief-visual-history-of-marc.html:text/html}, } @book{w3c_working_group_note_data_nodate, title = {Data on the {Web} {Best} {Practices}: {Data} {Quality} {Vocabulary}}, shorttitle = {Data on the {Web} {Best} {Practices}}, url = {https://www.w3.org/TR/vocab-dqv/}, language = {en}, author = {{W3C Working Group Note}}, file = {Snapshot:C\:\\Users\\carst\\Zotero\\storage\\C5AN4HMB\\NOTE-vocab-dqv-20161215.html:text/html}, } @article{zaveri_quality_2016, title = {Quality assessment for {Linked} {Data}: {A} {Survey}}, volume = {7}, issn = {1570-0844}, shorttitle = {Quality assessment for {Linked} {Data}}, url = {https://content.iospress.com/articles/semantic-web/sw175}, doi = {10.3233/SW-150175}, abstract = {The development and standardization of Semantic Web technologies has resulted in an unprecedented volume of data being published on the Web as Linked Data (LD). However, we observe widely varying data quality ranging from extensively curated datasets to crowdsourced and extracted data of relatively low quality. In this article, we present the results of a systematic review of approaches for assessing the quality of LD. We gather existing approaches and analyze them qualitatively. In particular, we unify and formalize commonly used terminologies across papers related to data quality and provide a comprehensive list of 18 quality dimensions and 69 metrics. Additionally, we qualitatively analyze the 30 core approaches and 12 tools using a set of attributes. The aim of this article is to provide researchers and data curators a comprehensive understanding of existing work, thereby encouraging further experimentation and development of new approaches focused towards data quality, specifically for LD.}, language = {en}, number = {1}, journal = {Semantic Web}, author = {Zaveri, Amrapali and Rula, Anisa and Maurino, Andrea and Pietrobon, Ricardo and Lehmann, Jens and Auer, Sören}, month = jan, year = {2016}, pages = {63--93}, } @article{wilkinson_design_2018, title = {A design framework and exemplar metrics for {FAIRness}}, volume = {5}, issn = {2052-4463}, url = {https://www.nature.com/articles/sdata2018118}, doi = {10.1038/sdata.2018.118}, language = {en}, number = {1}, journal = {Scientific Data}, author = {Wilkinson, Mark D. and Sansone, Susanna-Assunta and Schultes, Erik and Doorn, Peter and Bonino da Silva Santos, Luiz Olavo and Dumontier, Michel}, month = jun, year = {2018}, pages = {180118}, file = {Wilkinson et al. - 2018 - A design framework and exemplar metrics for FAIRne.pdf:C\:\\Users\\carst\\Zotero\\storage\\PVHSQQG3\\Wilkinson et al. - 2018 - A design framework and exemplar metrics for FAIRne.pdf:application/pdf}, } @book{noauthor_license_2019, title = {License}, url = {https://wiki.pangaea.de/wiki/License}, language = {en}, year = {2019}, note = {Publication Title: PangaWiki}, file = {License - PangaWiki:C\:\\Users\\carst\\Zotero\\storage\\R4TSZ8QT\\License.html:text/html}, } @book{noauthor_about_nodate, title = {About {Zenodo}}, url = {https://about.zenodo.org/}, abstract = {Zenodo is a free and open digital archive built by CERN and OpenAIRE, enabling researchers to share and preserve research output in any size, format and from all fields of research.}, language = {en}, note = {Publication Title: Zenodo}, file = {Snapshot:C\:\\Users\\carst\\Zotero\\storage\\RZ3GM4ND\\about.zenodo.org.html:text/html}, } @book{noauthor_kit_nodate, title = {{KIT}: {Karlsruher} {Institut} für {Technologie}}, copyright = {Alle Rechte liegen beim Autor siehe Impressum}, url = {https://www.kit.edu/index.php}, language = {de}, file = {Snapshot:C\:\\Users\\carst\\Zotero\\storage\\NMVI7SUH\\index.html:text/html}, } @techreport{commission_european_2016, address = {Brussels}, type = {Communication from the {Commission} to the {European} {Parliament}, the {Council}, the {European} {Economic} and {Social} {Committee} of the {Committee} of the {Regions}}, title = {European {Cloud} {Initiative} -{Building} a competitive data and knowledge economy in {Europe}}, url = {https://eur-lex.europa.eu/legal-content/EN/TXT/PDF/?uri=CELEX:52016DC0178&from=EN}, language = {en}, institution = {European Comission}, author = {Commission, European}, month = apr, year = {2016}, pages = {13}, file = {2016 - European Cloud Initiative -Building a competitive .pdf:C\:\\Users\\carst\\Zotero\\storage\\UJADZ9RI\\2016 - European Cloud Initiative -Building a competitive .pdf:application/pdf}, } @book{neuroth_langzeitarchivierung_2012, address = {Boizenburg Göttingen}, title = {Langzeitarchivierung von {Forschungsdaten}: eine {Bestandsaufnahme}}, copyright = {CC BY-NC-SA}, isbn = {978-3-86488-008-7}, url = {urn:nbn:de:0008-2010071949}, language = {de}, publisher = {Hülsbusch Univ.-Verl. Göttingen}, editor = {Neuroth, Heike and Strathmann, Stefan and Oßwald, Achim and Scheffel, Regine and Klump, Jens and Ludwig, Jens and Deutschland, nestor-Kompetenznetzwerk Langzeitarchivierung und Langzeitverfügbarkeit Digitaler Ressourcen für}, year = {2012}, file = {Neuroth et al. - 2012 - Langzeitarchivierung von Forschungsdaten eine Bes.pdf:C\:\\Users\\carst\\Zotero\\storage\\IVX7EBUW\\Neuroth et al. - 2012 - Langzeitarchivierung von Forschungsdaten eine Bes.pdf:application/pdf;Neuroth et al. - 2012 - Langzeitarchivierung von Forschungsdaten eine Bes.pdf:C\:\\Users\\carst\\Zotero\\storage\\TDUQNZ4A\\Neuroth et al. - 2012 - Langzeitarchivierung von Forschungsdaten eine Bes.pdf:application/pdf}, } @article{curdt_etablierung_2019, title = {Etablierung von {Forschungsdatenmanagement}-{Services} in geowissenschaftlichen {Sonderforschungsbereichen} am {Beispiel} des {SFB}/{Transregio} 32, {SFB} 1211 und {SFB}/ {Transregio} 228}, copyright = {Copyright (c) 2019 Constanze Curdt, Dirk Hoffmeister, Tanja Kramm, Ulrich Lang, Georg Bareth}, url = {https://bausteine-fdm.de/article/view/8103}, doi = {10.17192/bfdm.2019.2.8103}, language = {de}, number = {2}, journal = {Bausteine Forschungsdatenmanagement}, author = {Curdt, Constanze and Hoffmeister, Dirk and Kramm, Tanja and Lang, Ulrich and Bareth, Georg}, month = sep, year = {2019}, pages = {61--67}, file = {Curdt et al. - 2019 - Etablierung von Forschungsdatenmanagement-Services.pdf:C\:\\Users\\carst\\Zotero\\storage\\IF5J9AQM\\Curdt et al. - 2019 - Etablierung von Forschungsdatenmanagement-Services.pdf:application/pdf;Curdt et al. - 2019 - Etablierung von Forschungsdatenmanagement-Services.pdf:C\:\\Users\\carst\\Zotero\\storage\\524G8XEK\\Curdt et al. - 2019 - Etablierung von Forschungsdatenmanagement-Services.pdf:application/pdf}, } @article{forschungsdaten_forschungsdatenmanagement_2018, title = {Forschungsdatenmanagement: {Eine} {Handreichung} [{Arbeitsgruppe} {Forschungsdaten} der {Schwerpunktinitiative} „{Digitale} {Information}“ der {Allianz} der deutschen {Wissenschaftsorganisationen}]}, copyright = {Creative Commons Attribution 4.0 International (CC BY 4.0)}, url = {https://gfzpublic.gfz-potsdam.de/pubman/faces/ViewItemOverviewPage.jsp?itemId=item_3055893}, doi = {10.2312/allianzoa.029}, abstract = {Diese Handreichung soll als Einstieg für Wissenschaftlerinnen und Wissenschaftler, die mit digitalen Daten arbeiten, sowie für alle an dieser Thematik Interessierten dienen und bietet darüber hinaus Hinweise zu weiterführender Information. Sie wurde von der Arbeitsgruppe „Forschungsdaten“ der Schwerpunktinitiative „Digitale Information“ der Allianz der deutschen Wissenschaftsorganisationen1 verfasst.}, language = {de}, author = {Forschungsdaten, Arbeitsgruppe}, year = {2018}, pages = {14}, file = {handreichung forschungsdatenmanagement.pdf:C\:\\Users\\carst\\Zotero\\storage\\RYMKYW8U\\handreichung forschungsdatenmanagement.pdf:application/pdf;Arbeitsgruppe Forschungsdaten - 2018 - Forschungsdatenmanagement Eine Handreichung [Arbe.pdf:C\:\\Users\\carst\\Zotero\\storage\\EZKJQ3EN\\Arbeitsgruppe Forschungsdaten - 2018 - Forschungsdatenmanagement Eine Handreichung [Arbe.pdf:application/pdf}, } @techreport{noauthor_leistung_2016, address = {Göttingen}, title = {Leistung aus {Vielfalt}: {Empfehlungen} zu {Strukturen}, {Prozessen} und {Finanzierung} des {Forschungsdatenmanagements} in {Deutschland}}, copyright = {Creative Commons Namensnennung –Weitergabe unter gleichen Bedingungen 4.0 International}, url = {https://d-nb.info/1104292440/34}, language = {de}, institution = {Rat für Informationsinfrastrukturen}, year = {2016}, pages = {160}, file = {2016 - Leistung aus Vielfalt Empfehlungen zu Strukturen,.pdf:C\:\\Users\\carst\\Zotero\\storage\\VKQ7I4QY\\2016 - Leistung aus Vielfalt Empfehlungen zu Strukturen,.pdf:application/pdf}, } @article{eyring_overview_2016, title = {Overview of the {Coupled} {Model} {Intercomparison} {Project} {Phase} 6 ({CMIP6}) experimental design and organization}, volume = {9}, copyright = {CC BY}, issn = {1991-959X}, url = {https://gmd.copernicus.org/articles/9/1937/2016/}, doi = {https://doi.org/10.5194/gmd-9-1937-2016}, abstract = {By coordinating the design and distribution of global climate model simulations of the past, current, and future climate, the Coupled Model Intercomparison Project (CMIP) has become one of the foundational elements of climate science. However, the need to address an ever-expanding range of scientific questions arising from more and more research communities has made it necessary to revise the organization of CMIP. After a long and wide community consultation, a new and more federated structure has been put in place. It consists of three major elements: (1) a handful of common experiments, the DECK (Diagnostic, Evaluation and Characterization of Klima) and CMIP historical simulations (1850–near present) that will maintain continuity and help document basic characteristics of models across different phases of CMIP; (2) common standards, coordination, infrastructure, and documentation that will facilitate the distribution of model outputs and the characterization of the model ensemble; and (3) an ensemble of CMIP-Endorsed Model Intercomparison Projects (MIPs) that will be specific to a particular phase of CMIP (now CMIP6) and that will build on the DECK and CMIP historical simulations to address a large range of specific questions and fill the scientific gaps of the previous CMIP phases. The DECK and CMIP historical simulations, together with the use of CMIP data standards, will be the entry cards for models participating in CMIP. Participation in CMIP6-Endorsed MIPs by individual modelling groups will be at their own discretion and will depend on their scientific interests and priorities. With the Grand Science Challenges of the World Climate Research Programme (WCRP) as its scientific backdrop, CMIP6 will address three broad questions: – How does the Earth system respond to forcing? – What are the origins and consequences of systematic model biases? – How can we assess future climate changes given internal climate variability, predictability, and uncertainties in scenarios? This CMIP6 overview paper presents the background and rationale for the new structure of CMIP, provides a detailed description of the DECK and CMIP6 historical simulations, and includes a brief introduction to the 21 CMIP6-Endorsed MIPs}, language = {en}, number = {5}, journal = {Geoscientific Model Development}, author = {Eyring, Veronika and Bony, Sandrine and Meehl, Gerald A. and Senior, Catherine A. and Stevens, Bjorn and Stouffer, Ronald J. and Taylor, Karl E.}, month = may, year = {2016}, pages = {1937--1958}, file = {Volltext:C\:\\Users\\carst\\Zotero\\storage\\T2EHKJ2W\\Eyring et al. - 2016 - Overview of the Coupled Model Intercomparison Proj.pdf:application/pdf;Eyring et al. - 2016 - Overview of the Coupled Model Intercomparison Proj.pdf:C\:\\Users\\carst\\Zotero\\storage\\MUL6XKRW\\Eyring et al. - 2016 - Overview of the Coupled Model Intercomparison Proj.pdf:application/pdf}, } @inproceedings{virkar_investigating_2019, address = {Cham}, series = {Lecture {Notes} in {Computer} {Science}}, title = {Investigating the {Social}, {Political}, {Economic} and {Cultural} {Implications} of {Data} {Trading}}, isbn = {978-3-030-27325-5}, doi = {10.1007/978-3-030-27325-5_17}, abstract = {Data market initiatives have, by assigning monetary value to data, and connecting the various actors responsible for its efficient production and consumption, far reaching consequences for national economies. The Data Market Austria (DMA) project represents a unique opportunity for Austria to leverage the enormous potential socio-economic benefits accruing from increased trade of data. At the same time, however, a number of key challenges to the successful uptake of the project needs to be considered, and new problems emerging from this new form of digital commercial infrastructure need to be anticipated and addressed. This study aims to examine how the benefits accruing to increased participation in a data-driven ecosystem can be applied to tackle the long-standing socio-cultural challenges and the possible societal and cultural impediments to the successful unfolding out of a data market. Theoretical discussions framed from arguments obtained through a systematic review of academic and scholarly literature are juxtaposed with empirical data obtained from data science experts and DMA project personnel to test whether they stand up to real-world practicalities and to narrow the focus onto the Austria-specific context. Our findings reveal that data is a dual-purpose commodity that has both commercial value and social application. To amplify the benefits accruing from increased data trading, it is vital that a country establishes a sound open data strategy and a balanced regulatory framework for data trading.}, language = {en}, booktitle = {Electronic {Government}}, publisher = {Springer International Publishing}, author = {Virkar, Shefali and Viale Pereira, Gabriela and Vignoli, Michela}, editor = {Lindgren, Ida and Janssen, Marijn and Lee, Habin and Polini, Andrea and Rodríguez Bolívar, Manuel Pedro and Scholl, Hans Jochen and Tambouris, Efthimios}, year = {2019}, pages = {215--229}, } @article{pampel_making_2013, title = {Making {Research} {Data} {Repositories} {Visible}: {The} re3data.org {Registry}}, volume = {8}, issn = {1932-6203}, shorttitle = {Making {Research} {Data} {Repositories} {Visible}}, url = {https://journals.plos.org/plosone/article?id=10.1371/journal.pone.0078080}, doi = {10.1371/journal.pone.0078080}, abstract = {Researchers require infrastructures that ensure a maximum of accessibility, stability and reliability to facilitate working with and sharing of research data. Such infrastructures are being increasingly summarized under the term Research Data Repositories (RDR). The project re3data.org–Registry of Research Data Repositories–has begun to index research data repositories in 2012 and offers researchers, funding organizations, libraries and publishers an overview of the heterogeneous research data repository landscape. In July 2013 re3data.org lists 400 research data repositories and counting. 288 of these are described in detail using the re3data.org vocabulary. Information icons help researchers to easily identify an adequate repository for the storage and reuse of their data. This article describes the heterogeneous RDR landscape and presents a typology of institutional, disciplinary, multidisciplinary and project-specific RDR. Further the article outlines the features of re3data.org, and shows how this registry helps to identify appropriate repositories for storage and search of research data.}, language = {en}, number = {11}, journal = {PLOS ONE}, author = {Pampel, Heinz and Vierkant, Paul and Scholze, Frank and Bertelmann, Roland and Kindling, Maxi and Klump, Jens and Goebelbecker, Hans-Jürgen and Gundlach, Jens and Schirmbacher, Peter and Dierolf, Uwe}, month = nov, year = {2013}, pages = {e78080}, file = {Pampel et al. - 2013 - Making Research Data Repositories Visible The re3.PDF:C\:\\Users\\carst\\Zotero\\storage\\V2WN5QNI\\Pampel et al. - 2013 - Making Research Data Repositories Visible The re3.PDF:application/pdf;Pampel et al. - 2013 - Making Research Data Repositories Visible The re3.PDF:C\:\\Users\\carst\\Zotero\\storage\\YLYS97CC\\Pampel et al. - 2013 - Making Research Data Repositories Visible The re3.PDF:application/pdf}, } @book{noauthor_nationale_nodate, title = {Nationale {Forschungsdateninfrastruktur}}, url = {https://www.dfg.de/foerderung/programme/nfdi/}, abstract = {Die nationale Forschungsdateninfrastruktur (NFDI) soll die Datenbestände von Wissenschaft und Forschung systematisch erschließen, nachhaltig sichern und zugänglich machen sowie (inter-)national vernetzen. Sie wird in einem aus der Wissenschaft getriebenen Prozess als vernetzte Struktur eigeninitiativ agierender Konsortien aufgebaut werden.}, language = {de}, note = {Publication Title: DFG: Deutsche Forschungsgemeinschaft}, file = {DFG - Deutsche Forschungsgemeinschaft - Nationale Forschungsdateninfrastruktur:C\:\\Users\\carst\\Zotero\\storage\\629KHEDG\\nfdi.html:text/html;DFG - Deutsche Forschungsgemeinschaft - Nationale Forschungsdateninfrastruktur:C\:\\Users\\carst\\Zotero\\storage\\C6NPG9GY\\nfdi.html:text/html}, } @book{noauthor_informationsinfrastrukturen_nodate, title = {Informationsinfrastrukturen / {NFDI}}, url = {https://www.gwk-bonn.de/themen/weitere-arbeitsgebiete/informationsinfrastrukturen-nfdi/}, language = {de}, note = {Publication Title: Gemeinsame Wissenschaftskonfernz (GWK)}, file = {Informationsinfrastrukturen / NFDI | GWK-Bonn:C\:\\Users\\carst\\Zotero\\storage\\S2A535RL\\informationsinfrastrukturen-nfdi.html:text/html}, } @book{noauthor_rda_nodate, title = {{RDA}: {Research} {Data} {Alliance}}, url = {https://rd-alliance.org/}, abstract = {The Research Data Alliance (RDA) is an international organization aimed to sharing and promote the acceleration of data driven innovation worldwide.}, language = {en}, file = {Snapshot:C\:\\Users\\carst\\Zotero\\storage\\IAA5WBHA\\rd-alliance.org.html:text/html}, } @book{noauthor_go_nodate-1, title = {{GO} {FAIR} initiative: {Make} your data \& services {FAIR}}, shorttitle = {{GO} {FAIR} initiative}, url = {https://www.go-fair.org/}, abstract = {A bottom-up international approach for the practical implementation of the European Open Science Cloud (EOSC) as part of a global Internet of FAIR Data \& Services}, language = {en}, note = {Publication Title: GO FAIR}, file = {Snapshot:C\:\\Users\\carst\\Zotero\\storage\\6PQUFUWX\\www.go-fair.org.html:text/html}, } @book{noauthor_rfii_nodate, title = {{RfII}: {Rat} für {Informationsinfrastrukturen}}, url = {http://www.rfii.de/de/start/}, abstract = {Der als Beratungsgremium von Bund und Ländern berufene Rat für Informationsinfrastrukturen (RfII) begleitet die Entwicklung des deutschen Wissenschaftssystems im Zuge des digitalen Wandels.}, language = {de-DE}, note = {Publication Title: RfII}, file = {Snapshot:C\:\\Users\\carst\\Zotero\\storage\\4R5E6ATG\\start.html:text/html}, } @book{noauthor_forderprogramm_nodate, title = {Förderprogramm „{Informationsinfrastrukturen} für {Forschungsdaten}“}, url = {https://www.dfg.de/foerderung/programme/infrastruktur/lis/lis_foerderangebote/forschungsdaten/}, note = {Publication Title: DFG: Deutsche Forschungsgemeinschaft}, file = {DFG - Deutsche Forschungsgemeinschaft - Förderprogramm „Informationsinfrastrukturen für Forschungsdaten“:C\:\\Users\\carst\\Zotero\\storage\\XWJJRLVZ\\forschungsdaten.html:text/html}, } @book{noauthor_nationale_nodate-1, title = {Nationale {Forschungsdateninfrastruktur} ({NFDI})}, url = {https://www.nfdi.de/}, abstract = {In der Nationalen Forschungsdateninfrastruktur (NFDI) sollen die wertvollen Datenbestände von Wissenschaft und Forschung für das gesamte deutsche Wissenschaftssystem systematisch erschlossen, vernetzt und nachhaltig sowie qualitativ nutzbar gemacht werden.}, language = {de}, file = {Snapshot:C\:\\Users\\carst\\Zotero\\storage\\828KXVQJ\\www.nfdi.de.html:text/html}, } @book{noauthor_expertengremium_nodate, title = {Expertengremium {Nationale} {Forschungsdateninfrastruktur} ({NFDI})}, url = {https://www.dfg.de/dfg_profil/gremien/gremium/index.jsp?id=426076674}, abstract = {Liste der Gremienmitglieder}, language = {de}, note = {Publication Title: DFG: Deutsche Forschungsgemeinschaft}, file = {Snapshot:C\:\\Users\\carst\\Zotero\\storage\\M5JMQ8PE\\index.html:text/html}, } @techreport{noauthor_nationale_2020, type = {Satzung}, title = {Nationale {Forschungsdateninfrastruktur} ({NFDI}) e.{V}.: {Satzung}}, url = {https://cdn.website-editor.net/25abfc2078d74313bbe63818c335df0e/files/uploaded/Satzung%2520NFDI%2520eV_final.pdf}, abstract = {Die Gemeinsame Wissenschaftskonferenz (GWK) hat die vorliegende Fassung am 26. Juni 2020 gebilligt.}, language = {de}, institution = {Gemeinsame Wissenschaftskonferenz (GWK)}, month = jun, year = {2020}, pages = {15}, file = {_.pdf:C\:\\Users\\carst\\Zotero\\storage\\V9TFRHB9\\_.pdf:application/pdf}, } @book{noauthor_nfdi-konferenz_nodate, title = {{NFDI}-{Konferenz} 2019}, url = {https://www.dfg.de/foerderung/programme/nfdi/konferenz_2019/index.html}, abstract = {Die erste NFDI-Konferenz fand am 13. und 14. Mai 2019 im Maritim-Hotel in Bonn statt.}, language = {de}, note = {Publication Title: DFG: Deutsche Forschungsgemeinschaft}, file = {DFG - Deutsche Forschungsgemeinschaft - NFDI-Konferenz 2019:C\:\\Users\\carst\\Zotero\\storage\\FJLZDT58\\index.html:text/html}, } @book{noauthor_nfdi-konferenz_nodate-1, title = {{NFDI}-{Konferenz} 2020}, url = {https://www.dfg.de/foerderung/programme/nfdi/konferenz_2020/index.html}, abstract = {Die zweite NFDI-Konferenz fand am 8./9. Juli 2020 als Webinar statt.}, language = {de}, note = {Publication Title: DFG: Deutsche Forschungsgemeinschaft}, file = {DFG - Deutsche Forschungsgemeinschaft - NFDI-Konferenz 2020:C\:\\Users\\carst\\Zotero\\storage\\8MLM5DGE\\index.html:text/html}, } @techreport{deutsche_forschungsgemeinschaft_nationale_2019, address = {Bonn}, title = {Nationale {Forschungsdateninfrastruktur}: {Statistische} Übersichten zum {Antragseingang}}, url = {https://www.dfg.de/download/pdf/foerderung/programme/nfdi/191212_nfdi_statistik_antragseingang.pdf}, language = {de}, number = {Version: 1.0}, institution = {DFG: Deutsche Forschungsgemeinschaft}, author = {{Deutsche Forschungsgemeinschaft}}, month = nov, year = {2019}, pages = {18}, file = {_.pdf:C\:\\Users\\carst\\Zotero\\storage\\G9SZ79U3\\_.pdf:application/pdf}, } @techreport{noauthor_nationale_2020-1, address = {Bonn}, title = {Nationale {Forschungsdateninfrastruktur}: {Statistische} Übersicht zu den {Förderentscheidungen} in der ersten {Ausschreibungsrunde}}, url = {https://www.dfg.de/download/pdf/foerderung/programme/nfdi/20200626_nfdi_foerderentscheidungen.pdf}, language = {de}, year = {2020}, pages = {10}, file = {2020 - Nationale Forschungsdateninfrastruktur Statistisc.pdf:C\:\\Users\\carst\\Zotero\\storage\\QPR9LGXG\\2020 - Nationale Forschungsdateninfrastruktur Statistisc.pdf:application/pdf}, } @book{noauthor_landesinitiative_nodate, title = {Landesinitiative für {Forschungsdatenmanagement} (fdm.nrw)}, url = {https://www.fdm.nrw/}, abstract = {Koordination, Vernetzung, Unterstützung: die zentrale Kontaktstelle für institutionelles Forschungsdatenmanagement (FDM) in NRW}, language = {de-DE}, file = {Snapshot:C\:\\Users\\carst\\Zotero\\storage\\A542Q3BU\\www.fdm.nrw.html:text/html}, } @techreport{eickhoff_anmerkungen_2020, title = {Anmerkungen zu den {Abstracts} und {Ausblick} auf das {Verfahren}}, url = {https://www.dfg.de/download/pdf/foerderung/programme/nfdi/nfdi_konferenz_2020/vortrag_eickhoff.pdf}, language = {de}, author = {Eickhoff, Ulrike}, month = jul, year = {2020}, pages = {11}, file = {_.pdf:C\:\\Users\\carst\\Zotero\\storage\\BSM3PJGG\\_.pdf:application/pdf}, } @book{noauthor_publikationen_nodate-1, title = {Publikationen}, url = {https://mwk.baden-wuerttemberg.de/de/service/publikation/did/e-science/}, abstract = {Barrierefrei gestalteter Internetauftritt des Ministerium für Wissenschaft, Forschung und Kunst Baden-Württemberg.}, language = {de}, note = {Publication Title: Baden-Württemberg.de}, file = {Snapshot:C\:\\Users\\carst\\Zotero\\storage\\D3FPN52Z\\publikationen.html:text/html}, } @book{noauthor_landesprogramm_nodate, title = {Landesprogramm {LOEWE}}, url = {https://wissenschaft.hessen.de/wissenschaft/landesprogramm-loewe}, language = {de}, note = {Publication Title: Hessisches Ministerium für Wissenschaft und Kunst}, file = {Landesprogramm LOEWE | Hessisches Ministerium für Wissenschaft und Kunst:C\:\\Users\\carst\\Zotero\\storage\\PG4JX79K\\landesprogramm-loewe.html:text/html}, } @techreport{noauthor_kooperationsvereinbarung_nodate, type = {Kooperationsvereinbarung}, title = {Kooperationsvereinbarung der {Mitgliedshochschulen} und des {Ministeriums} für {Kultur} und {Wissenschaft} des {Landes} {Nordrhein}-{Westfalen} zur {Begründung} der {Digitalen} {Hochschule} {NRW}}, shorttitle = {{DH}.{NRW} {Kooperationsvereinbarung}}, url = {https://www.dh.nrw/fileadmin/user_upload/dh-nrw/pdf_word_Dokumente/DH.NRW_Kooperationsvereinbarung.pdf}, abstract = {Die„DigitaleHochschuleNRW“(DH.NRW) isteineKooperationsplattformvonderzeit42Hochschulen des Landes Nordrhein-Westfalen und des Ministeriums für Kultur und Wissenschaft des Landes Nordrhein-Westfalen (MKW) zur kooperativen Weiterentwicklung von Digitalisierungsprozessen und hochschulergreifendenMaßnahmen im Hochschulwesen. Die DH.NRW umfasst alle drei in Nordrhein- Westfalen bestehenden Hochschularten.DieDH.NRWgeht ausdemArbeitskreises„Datenverarbeitungs-Infrastrukturausschu ss“(DV-ISA) hervor,deraufviele Jahre konstruktiverZusammenarbeit imHandlungsfeld„Infrastrukturen\& Management“ zurückblickt.Mit Gründ ung der DH.NRW wird der bisherige Handlungsrahmen nun um die Felder „Studium\&Lehre“ sowie„Forschung“ erweitert.Davonausgenommen istdasThema Digitalisierung als Forschungsfeld}, language = {de}, pages = {8}, file = {_.pdf:C\:\\Users\\carst\\Zotero\\storage\\ALDE9T3L\\_.pdf:application/pdf}, } @book{noauthor_dhnrw_nodate, title = {{DH}.{NRW}: {Startseite}}, url = {https://www.dh.nrw/}, language = {de}, file = {DH.NRW\: Startseite:C\:\\Users\\carst\\Zotero\\storage\\GMAGRC69\\www.dh.nrw.html:text/html}, } @techreport{noauthor_verfahrensordnung_2018, address = {Hagen}, type = {Verfahrensordnung}, title = {Verfahrensordnung: {Digitale} {Hochschule} {NRW} ({DH}.{NRW})}, url = {https://www.dh.nrw/fileadmin/user_upload/dh-nrw/pdf_word_Dokumente/DH.NRW_Verfahrensordnung.pdf}, abstract = {Auf Grundlage der Zielsetzungen gemäß § 1 der Kooperationsvereinbarung der Mitgliedshochschulen (Mitglieder) und des Ministeriums für Kultur und Wissenschaft des Landes Nordrhein-Westfalen (MKW) zur Begründung der Digitalen Hochschule NRW (Kooperationsvereinbarung) und auf Basis der Ermächtigung in § 5 dieser Kooperationsvereinbarung gibt sich die Digitale Hochschule NRW (DH.NRW) zur Organisation der kooperativen Zusammenarbeit ihrer Gremien diese Verfahrensordnung.Die Verfahrensordnung regelt gemäß § 5 der Kooperationsvereinbarung die Zuständigkeiten und Aufgaben der entscheidungsbeteiligten Gremien und Organisationseinheiten der DH.NRW mit Ausnahme des bereits in § 6 der Kooperationsvereinbarung definierten Vorstandes.}, language = {de}, number = {Fassung vom 14.12.2018}, month = dec, year = {2018}, pages = {8}, file = {_.pdf:C\:\\Users\\carst\\Zotero\\storage\\WDD8MN7H\\_.pdf:application/pdf}, } @book{noauthor_doi-service_nodate, title = {{DOI}-{Service}}, url = {https://www.tib.eu/de/publizieren-archivieren/doi-service}, abstract = {Die Vergabe eines Digital Object Identifier (DOI) ermöglicht es, auf Objekte nachhaltig und eindeutig zuzugreifen. Ein DOI kann ähnlich einer ISBN zur Identifizierung eines Objektes eingesetzt werden und besitzt die Funktion, dieses zu lokalisieren. Auf diese Weise können wissenschaftliche Ergebnisse zuverlässig und in standardisierter Form zitiert werden. Die TIB vergibt DOI-Namen für Forschungsdaten, nicht-textuelle Materialien wie Videos, Bilder oder 3D-Modelle, graue Literatur und Artikel in Open-Access-Zeitschriften. Der DOI-Service der TIB steht wissenschaftlichen Einrichtungen, Publikationsservices sowie dem Hochschul- und Forschungsbereich zur Verfügung.}, language = {de}, note = {Publication Title: Technische Informationsbibliothek (TIB)}, file = {DOI-Service - Technische Informationsbibliothek (TIB):C\:\\Users\\carst\\Zotero\\storage\\XYUW9KLB\\doi-service.html:text/html}, } @article{noauthor_grundsatze_2001, title = {Grundsätze der {Universität} des {Saarlandes} zur {Sicherungguter} wissenschaftlicher {Praxis}: {Vom} 06. {Juni} 2001}, url = {https://www.uni-saarland.de/fileadmin/upload/verwaltung/fundstellen/Forschungsangelegenheiten/DB01-342.pdf}, language = {de}, number = {18}, journal = {Dienstblatt der Hochschulen des Saarlandes}, month = jun, year = {2001}, pages = {341--344}, file = {2001 - Grundsätze der Universität des Saarlandes zur Sich.pdf:C\:\\Users\\carst\\Zotero\\storage\\CRMDPWLK\\2001 - Grundsätze der Universität des Saarlandes zur Sich.pdf:application/pdf}, } @book{noauthor_historisches_nodate, title = {Historisches {Datenzentrum} {Sachsen}-{Anhalt}}, url = {https://www.geschichte.uni-halle.de/struktur/hist-data/}, language = {de}, note = {Publication Title: Martin-Luther-Universität Halle-Wittenberg}, file = {Historisches Datenzentrum Sachsen-Anhalt:C\:\\Users\\carst\\Zotero\\storage\\IH44UTKR\\hist-data.html:text/html}, } @article{bierwirth_leipzig-berlin-erklarung_2020, title = {Leipzig-{Berlin}-{Erklärung} zu {NFDI}-{Querschnittsthemen} der {Infrastrukturentwicklung}}, copyright = {Creative Commons Attribution 4.0 International, Open Access}, url = {https://zenodo.org/record/3895209}, doi = {10.5281/ZENODO.3895209}, abstract = {Für den wissenschaftsgeleiteten Aufbau der Nationalen Forschungsdaten-Infrastruktur (NFDI) muss sich die Infrastruktur gemeinsam mit der Forschung weiterentwickeln. Die dafür notwendigen, wechselseitigen Abstimmungen müssen auf Basis tragfähiger Prozesse und Strukturen sichergestellt werden. Themen, die für mehrere Fachkonsortien relevant sind, müssen im Sinne einer nachhaltigen Funktionalität kooperativ und über einzelne Konsortien hinweg bearbeitet werden. Dieses Dokument identifiziert solche Querschnittsthemen und Wege zu ihrer Bearbeitung in der NFDI.{\textbackslash}textlessbr{\textbackslash}textgreater {\textbackslash}textlessbr{\textbackslash}textgreater Um diese Herausforderung abgestimmt zu adressieren, hat sich die Mehrzahl der Fachkonsortien im Sommer 2019 auf die “Berlin Declaration on NFDI Cross-Cutting Topics” verständigt. Auf einer gemeinsamen Veranstaltung am 25. Februar 2020 in Berlin haben sich Vertreterinnen und Vertreter von Fachkonsortien und Querschnittsinitiativen erneut über die Handlungsfelder der NFDI-übergreifenden Infrastrukturentwicklung ausgetauscht. Dabei haben Fachkonsortien und Querschnittsinitiativen vier modellhafte Vorschläge erarbeitet, um diese Handlungsfelder zu erweitern und im Rahmen der NFDI belastbar und nachhaltig umzusetzen. Diese „Leipzig-Berlin-Erklärung zu NFDI-Querschnittsthemen der Infrastrukturentwicklung“ dient als Diskussionsimpuls und richtet sich an alle Konsortien und am Aufbau der NFDI Beteiligten, sowie diejenigen Fachgruppen, die näher mit Forschungsdatenmanagement befasst sind.{\textbackslash}textlessbr{\textbackslash}textgreater {\textbackslash}textlessbr{\textbackslash}textgreater Mit der Unterzeichnung dieser Erklärung bestätigen die 27 Konsortien, dass sie gemeinschaftlich und im Einklang mit dem Direktorat und den Gremien der NFDI die benannten Querschnittsthemen und Handlungsfelder weiterentwickeln und im Sinne einer NFDI bearbeiten werden.}, language = {de}, author = {Bierwirth, Maik and Glöckner, Frank Oliver and Grimm, Christian and Schimmler, Sonja and Boehm, Franziska and Busse, Christian and Degkwitz, Andreas and Koepler, Oliver and Neuroth, Heike}, month = jun, year = {2020}, pages = {11}, file = {Bierwirth et al. - 2020 - Leipzig-Berlin-Erklärung zu NFDI-Querschnittstheme.pdf:C\:\\Users\\carst\\Zotero\\storage\\J29E75FB\\Bierwirth et al. - 2020 - Leipzig-Berlin-Erklärung zu NFDI-Querschnittstheme.pdf:application/pdf}, } @book{noauthor_eresearch_nodate, title = {{eResearch} {Alliance}}, url = {https://www.eresearch.uni-goettingen.de/}, abstract = {Wir unterstützen Forschende am Göttingen Campus mit Diensten, Schulungen und Beratungen zu Forschungsdatenmanagement und eResearch.}, language = {de-DE}, note = {Publication Title: eResearch Alliance}, file = {Snapshot:C\:\\Users\\carst\\Zotero\\storage\\MCWRN7WZ\\de.html:text/html}, } @book{noauthor_wissenschaftliche_nodate, title = {Wissenschaftliche {Kommission} {Niedersachsen}}, url = {https://www.wk.niedersachsen.de}, abstract = {Die Wissenschaftliche Kommission in Niedersachsen (WKN) ist ein unabhängiges Expertengremium, das 1997 auf Kabinettsbeschluss dauerhaft eingerichtet wurde und das die niedersächsische Landesregierung in Fragen der Wissenschafts- und Forschungspolitik berät.}, language = {de}, file = {Wissenschaftliche Kommission Niedersachsen:C\:\\Users\\carst\\Zotero\\storage\\HUC357LR\\startseite.html:text/html}, } @techreport{noauthor_leitlinie_2015, address = {Kiel}, type = {Beschluss}, title = {Leitlinie zum {Umgang} mit {Forschungsdaten}: {Empfehlungen} zum {Umgang} mit {Forschungsdaten} der {Christian}-{Albrechts}-{Universität} zu {Kiel}}, url = {http://www.uni-kiel.de/download/pm/2015/2015-408-leitlinie-forschungsdaten.pdf}, language = {de}, number = {Beschluss des Präsidiums vom 14.07.2015}, institution = {Christian-Albrechts-Universität zu Kiel, Präsidium}, month = jul, year = {2015}, pages = {1}, file = {_.pdf:C\:\\Users\\carst\\Zotero\\storage\\VI3H288P\\_.pdf:application/pdf}, } @book{noauthor_gaus-allianz_nodate, title = {Gauß-{Allianz}}, url = {https://gauss-allianz.de/}, language = {de}, } @book{hyndman_2018_2019, title = {2018 year in review!}, url = {https://figshare.com/blog/2018_year_in_review_/464}, language = {en}, author = {Hyndman, Alan}, month = jan, year = {2019}, note = {Publication Title: https://figshare.com/blog}, file = {2018 year in review!:C\:\\Users\\carst\\Zotero\\storage\\EW3AZRQN\\464.html:text/html}, } @article{klump_principles_2020, title = {Principles and best practices in data versioning for all data sets big and small}, copyright = {CC BY-SA 4.0}, url = {https://www.rd-alliance.org/group/data-versioning-wg/outcomes/principles-and-best-practices-data-versioning-all-data-sets-big}, doi = {10.15497/RDA00042}, abstract = {The demand for better reproducibility of research results is growing. More and more data is becoming available online. In some cases, the datasets have become so large that downloading the data is no longer feasible. Data can also be offered through web services and accessed on demand. This means that parts of the data are accessed at a remote source when needed. In this scenario, it will become increasingly important for a researcher to be able to cite the exact extract of the data set that was used to underpin their research publication. However, while the means to identify datasets using persistent identifiers have been in place for more than a decade, systematic data versioning practices are currently not available. Versioning procedures and best practices are well established for scientific software. The related Wikipedia article gives an overview of software versioning practices. The codebase of large software projects does bear some semblance to large dynamic datasets. Are therefore versioning practices for code also suitable for data sets or do we need a separate suite of practices for data versioning? How can we apply our knowledge of versioning code to improve data versioning practices? This Working Group investigated to which extent these practices can be used to enhance the reproducibility of scientific results. The Research Data Alliance (RDA) Data Versioning Working Group produced this white paper to document use cases and practices, and to make recommendations for the versioning of research data. To further adoption of the outcomes, the Working Group contributed selected use cases and recommended data versioning practices to other groups in RDA and W3C. The outcomes of the RDA Data Versioning Working Group add a central element to the systematic management of research data at any scale by providing recommendations for standard practices in the versioning of research data. These practice guidelines are illustrated by a collection of use cases. Version information: The version 1.0 was submitted to RDA for community review. This version has 38 use cases. Changes in the version 1.1 include: add the google use case (\#Google, No. 39), add figure captures, and revised the ASTER (\#ASTER) use case}, language = {en}, journal = {Research Data Alliance}, author = {Klump, Jens and Wyborn, Lesley and Downs, Robert and Asmi, Ari and Wu, Mingfang and Ryder, Gerry and Martin, Julia}, year = {2020}, } @article{klump_criteria_2011, title = {Criteria for the {Trustworthiness} of {Data} {Centres}}, volume = {17}, issn = {1082-9873}, url = {http://www.dlib.org/dlib/january11/klump/01klump.html}, doi = {10.1045/january2011-klump}, language = {en}, number = {1/2}, journal = {D-Lib Magazine}, author = {Klump, Jens}, year = {2011}, file = {Klump - Criteria for the Trustworthiness of Data Centres.pdf:C\:\\Users\\carst\\Zotero\\storage\\6ZQQSUA4\\Klump - Criteria for the Trustworthiness of Data Centres.pdf:application/pdf}, } @article{klump_data_2006, title = {Data publication in the open access initiative}, volume = {5}, issn = {1683-1470}, url = {http://datascience.codata.org/articles/abstract/10.2481/dsj.5.79/}, doi = {10.2481/dsj.5.79}, abstract = {The 'Berlin Declaration' was published in 2003 as a guideline to policy makers to promote the Internet as a functional instrument for a global scientific knowledge base. Because knowledge is derived from data, the principles of the 'Berlin Declaration' should apply to data as well. Today, access to scientific data is hampered by structural deficits in the publication process. Data publication needs to offer authors an incentive to publish data through long-term repositories. Data publication also requires an adequate licence model that protects the intellectual property rights of the author while allowing further use of the data by the scientific community.}, language = {en}, journal = {Data Science Journal}, author = {Klump, Jens and Bertelmann, Roland and Brase, Jan and Diepenbroek, Michael and Grobe, Hannes and Höck, Heinke and Lautenschlager, Michael and Schindler, Uwe and Sens, Irina and Wächter, Joachim}, month = jun, year = {2006}, pages = {79--83}, file = {Klump et al. - 2006 - Data publication in the open access initiative.pdf:C\:\\Users\\carst\\Zotero\\storage\\TZ5MDI28\\Klump et al. - 2006 - Data publication in the open access initiative.pdf:application/pdf}, } @article{klump_doi_2016, title = {{DOI} for geoscience data - how early practices shape present perceptions}, volume = {9}, issn = {1865-0481}, url = {https://doi.org/10.1007/s12145-015-0231-5}, doi = {10.1007/s12145-015-0231-5}, abstract = {The first minting of Digital Object Identifiers (DOI) for research data happened in 2004 in the context of the project “Publication and citation of primary scientific data” (STD-DOI). Some of the concepts and perceptions about DOI for data today have their roots in the way this project implemented DOI for research data and the decisions made in those early days still shape the discussion about the use of persistent identifiers for research data today. This project also laid the foundation for a tighter integration of journal publications and data. Promoted by early adopters, such as PANGAEA, DOI registration for data has reached a high level of maturity and has become an integral part of scientific publishing. This paper discusses the fundamental concepts applied in the identification of DOI for research data and how these can be interpreted for alternative and future applications of persistent identifiers for research data.}, language = {en}, number = {1}, journal = {Earth Science Informatics}, author = {Klump, Jens and Huber, Robert and Diepenbroek, Michael}, month = mar, year = {2016}, pages = {123--136}, } @book{noauthor_berlin_2003, title = {Berlin {Declaration} on {Open} {Access} to {Knowledge} in the {Sciences} and {Humanities}}, url = {https://openaccess.mpg.de/}, abstract = {Startseite}, year = {2003}, note = {Publication Title: Max Planck Society}, file = {Snapshot:C\:\\Users\\carst\\Zotero\\storage\\PF3CU9WV\\openaccess.mpg.de.html:text/html}, } @book{mcadoo_how_2013, title = {How to {Cite} a {Data} {Set} in {APA} {Style}}, url = {https://blog.apastyle.org/apastyle/2013/12/how-to-cite-a-data-set-in-apa-style.html}, language = {en}, author = {McAdoo, Timothy}, month = dec, year = {2013}, note = {Publication Title: APA Style Blog}, } @book{noauthor_reporting_2019, title = {Reporting standards and availability of data, materials, code and protocols}, url = {https://www.nature.com/nature-research/editorial-policies/reporting-standards}, year = {2019}, note = {Publication Title: Nature}, } @incollection{pampel_re3dataorg_2012, address = {Jülich}, series = {Schriften des {Forschungszentrums} {Jülich} : {Bibliothek} / {Library}}, title = {re3data.org: {Aufbau} eines {Verzeichnisses} von {Forschungsdaten}-{Repositorien}. {Ein} {Werkstattbericht}}, isbn = {978-3-89336-821-1}, abstract = {Der sprichwörtliche „Gelehrte im stillen Kämmerlein“ ist fast völlig verschwunden.Allenthalben gibt es interdisziplinäre Arbeitsgruppen, internationale Kooperationenund virtuelle Forschungsumgebungen. Grundlage solcher Arbeitsformen ist stetseine Vernetzung, die unter vielen verschiedenen Aspekten erfolgen kann. AllenAnsätzen ist gemeinsam, dass die Verknüpfung von Fragmenten etwas Neueshervorbringt, das in den isolierten Teilsystemen noch nicht vorhanden war.Mit der WissKom2012 „Vernetztes Wissen – Daten, Menschen, Systeme“ greift dieZentralbibliothek des Forschungszentrums Jülich erneut Themen im Spannungsfeldvon „Bibliothek - Information - Wissenschaft“ in einer Konferenz interdisziplinär aufund versucht, neue Handlungsfelder für Bibliotheken aufzuzeigen. Diese sechsteKonferenz der Zentralbibliothek thematisiert den immer wichtiger werdenden Bereichder Forschungsdaten und den nachhaltigen Umgang mit ihnen. Sie zeigt auf, wasInterdisziplinarität konkret bedeutet und wie bislang isolierte Systeme vernetztwerden können und so Mehrwert entsteht. Der Konferenzband enthält neben denAusführungen der Referenten zudem die Beiträge der Poster Session sowie denFestvortrag von Prof. Viktor Mayer-Schönberger mit dem Titel „Delete: Die Tugenddes Vergessens in digitalen Zeiten“.Ich danke allen herzlich, die zum Gelingen der Konferenz beigetragen haben:Vortragende und Moderatoren, Aussteller, Mitglieder des Programmkomitees,Organisatoren und last but not least die Teilnehmerinnen und Teilnehmer. Einbesonderer Dank gilt dem Forschungszentrum Jülich und den Sponsoren, die dieseKonferenz erst möglich gemacht haben}, language = {de}, number = {21}, booktitle = {Vernetztes {Wissen} – {Daten}, {Menschen}, {Systeme}: 6. {Konferenz} der {Zentralbibliothek} {Forschungszentrum} {Jülich}}, publisher = {Forschungszentrum Jülich GmbH Zentralbibliothek}, author = {Pampel, Heinz and Goebelbecker, Hans-Jürgen and Vierkant, Paul}, editor = {Mittermaier, Bernhard}, year = {2012}, pages = {61--73}, } @article{paskin_digital_2005, title = {Digital {Object} {Identifiers} for scientific data}, volume = {4}, issn = {1683-1470}, url = {http://datascience.codata.org/articles/abstract/10.2481/dsj.4.12/}, doi = {10.2481/dsj.4.12}, abstract = {The Digital Object Identifier (DOI) is a system for identifying content objects in the digital environment. DOIs are names assigned to any entity for use on Internet digital networks. Scientific data sets may be identified by DOIs, and several efforts are now underway in this area. This paper outlines the underlying architecture of the DOI system, and two such efforts which are applying DOIs to content objects of scientific data.}, language = {en}, journal = {Data Science Journal}, author = {Paskin, Norman}, year = {2005}, pages = {12--20}, file = {Paskin - 2006 - Digital Object Identifiers for scientific data.pdf:C\:\\Users\\carst\\Zotero\\storage\\TNWL2VAP\\Paskin - 2006 - Digital Object Identifiers for scientific data.pdf:application/pdf}, } @article{smith_international_1996, title = {International {Large}-{Scale} {Sequencing} {Meeting}}, volume = {7}, url = {https://web.ornl.gov/sci/techresources/Human_Genome/publicat/hgn/v7n6/19intern.shtml}, number = {6}, journal = {Human Genome News}, author = {Smith, David and Carrano, Anthony}, year = {1996}, } @article{stall_advancing_2018, title = {Advancing {FAIR} {Data} in {Earth}, {Space}, and {Environmental} {Science}}, volume = {99}, issn = {2324-9250}, url = {https://eos.org/agu-news/advancing-fair-data-in-earth-space-and-environmental-science}, doi = {10.1029/2018EO109301}, abstract = {The Enabling FAIR Data project has brought together a broad spectrum of Earth, space, and environmental science leaders to ensure that data are findable, accessible, interoperable, and reusable.}, journal = {Eos}, author = {Stall, Shelley and Yarmey, Lynn and Boehm, Reid and Cousijn, Helena and Cruse, Patricia and Cutcher-Gershenfeld, Joel and Dasler, Robin and de Waard, Anita and Duerr, Ruth and Elger, Kirsten and Fenner, Martin and Glaves, Helen and Hanson, Brooks and Hausman, Jessica and Heber, Joerg and Hills, Denise and Hoebelheinrich, Nancy and Hou, Sophie and Kinkade, Danie and Koskela, Rebecca and Martin, Raleigh and Lehnert, Kerstin and Murphy, Fiona and Nosek, Brian and Parsons, Mark and Petters, Jonathan and Plante, Raymond and Robinson, Erin and Samors, Robert and Servilla, Mark and Ulrich, Robert and Witt, Michael and Wyborn, Lesley}, month = nov, year = {2018}, file = {Volltext:C\:\\Users\\carst\\Zotero\\storage\\NBXTQ24C\\Stall et al. - 2018 - Advancing FAIR Data in Earth, Space, and Environme.pdf:application/pdf}, } @book{the_university_of_chicago_press_editorial_staff_chicago_2017, edition = {17th Edition}, title = {The {Chicago} {Manual} of {Style}}, isbn = {978-0-226-28705-8}, url = {https://www.bibliovault.org/BV.landing.epl?ISBN=9780226287058}, publisher = {University of Chicago Press}, editor = {{The University of Chicago Press Editorial Staff}}, year = {2017}, doi = {10.7208/cmos17}, } @article{piwowar_data_2013, title = {Data reuse and the open data citation advantage}, volume = {1}, issn = {2167-8359}, url = {https://peerj.com/articles/175}, doi = {10.7717/peerj.175}, abstract = {Background. Attribution to the original contributor upon reuse of published data is important both as a reward for data creators and to document the provenance of research findings. Previous studies have found that papers with publicly available datasets receive a higher number of citations than similar studies without available data. However, few previous analyses have had the statistical power to control for the many variables known to predict citation rate, which has led to uncertain estimates of the “citation benefit”. Furthermore, little is known about patterns in data reuse over time and across datasets. Method and Results. Here, we look at citation rates while controlling for many known citation predictors and investigate the variability of data reuse. In a multivariate regression on 10,555 studies that created gene expression microarray data, we found that studies that made data available in a public repository received 9\% (95\% confidence interval: 5\% to 13\%) more citations than similar studies for which the data was not made available. Date of publication, journal impact factor, open access status, number of authors, first and last author publication history, corresponding author country, institution citation history, and study topic were included as covariates. The citation benefit varied with date of dataset deposition: a citation benefit was most clear for papers published in 2004 and 2005, at about 30\%. Authors published most papers using their own datasets within two years of their first publication on the dataset, whereas data reuse papers published by third-party investigators continued to accumulate for at least six years. To study patterns of data reuse directly, we compiled 9,724 instances of third party data reuse via mention of GEO or ArrayExpress accession numbers in the full text of papers. The level of third-party data use was high: for 100 datasets deposited in year 0, we estimated that 40 papers in PubMed reused a dataset by year 2, 100 by year 4, and more than 150 data reuse papers had been published by year 5. Data reuse was distributed across a broad base of datasets: a very conservative estimate found that 20\% of the datasets deposited between 2003 and 2007 had been reused at least once by third parties. Conclusion. After accounting for other factors affecting citation rate, we find a robust citation benefit from open data, although a smaller one than previously reported. We conclude there is a direct effect of third-party data reuse that persists for years beyond the time when researchers have published most of the papers reusing their own data. Other factors that may also contribute to the citation benefit are considered. We further conclude that, at least for gene expression microarray data, a substantial fraction of archived datasets are reused, and that the intensity of dataset reuse has been steadily increasing since 2003.}, language = {en}, journal = {PeerJ}, author = {Piwowar, Heather A. and Vision, Todd J.}, month = oct, year = {2013}, pages = {e175}, file = {Piwowar und Vision - 2013 - Data reuse and the open data citation advantage.pdf:C\:\\Users\\carst\\Zotero\\storage\\LGLBBDC7\\Piwowar und Vision - 2013 - Data reuse and the open data citation advantage.pdf:application/pdf}, } @book{noauthor_data_2019, title = {Data {Availability}}, url = {https://journals.plos.org/plosbiology/s/data-availability}, year = {2019}, note = {Publication Title: PLOS}, } @techreport{rucknagel_metadata_2015, title = {Metadata {Schema} for the {Description} of {Research} {Data} {Repositories}}, url = {https://gfzpublic.gfz-potsdam.de/pubman/item/item_1397899}, language = {en}, number = {Version 3.0}, institution = {re3data}, author = {Rücknagel, Jessika and Vierkant, Paul and Ulrich, Robert and Kloska, Gabriele and Schnepf, Edeltraud and Fichtmüller, David and Reuter, Evelyn and Semrau, Angelika and Kindling, Maxi and Pampel, H. and Witt, Michael and Fritze, Florian and Van De Sandt, Stephanie and Klump, Jens and Goebelbecker, Hans-Jürgen and Skarupianski, Michael and Bertelmann, Roland and Schirmbacher, Peter and Scholze, Frank and Kramer, Claudia and Fuchs, Claudio and Spier, Shaked and Kirchhoff, Agnes}, year = {2015}, doi = {10.2312/RE3.008}, pages = {29}, file = {Rücknagel et al. - 2015 - Metadata Schema for the Description of Research Da.pdf:C\:\\Users\\carst\\Zotero\\storage\\NHQT6KFM\\Rücknagel et al. - 2015 - Metadata Schema for the Description of Research Da.pdf:application/pdf}, } @article{sayers_genbank_2019, title = {{GenBank}}, volume = {47}, issn = {0305-1048}, url = {https://academic.oup.com/nar/article/47/D1/D94/5144964}, doi = {10.1093/nar/gky989}, abstract = {GenBank® (www.ncbi.nlm.nih.gov/genbank/) is a comprehensive database that contains publicly available nucleotide sequences for 420 000 formally described species. Most GenBank submissions are made using BankIt, the NCBI Submission Portal, or the tool tbl2asn, and are obtained from individual laboratories and batch submissions from large-scale sequencing projects, including whole genome shotgun (WGS) and environmental sampling projects. Daily data exchange with the European Nucleotide Archive (ENA) and the DNA Data Bank of Japan (DDBJ) ensures worldwide coverage. GenBank is accessible through the NCBI Nucleotide database, which links to related information such as taxonomy, genomes, protein sequences and structures, and biomedical journal literature in PubMed. BLAST provides sequence similarity searches of GenBank and other sequence databases. Complete bimonthly releases and daily updates of the GenBank database are available by FTP. Recent updates include an expansion of sequence identifier formats to accommodate expected database growth, submission wizards for ribosomal RNA, and the transfer of Expressed Sequence Tag (EST) and Genome Survey Sequence (GSS) data into the Nucleotide database.}, language = {en}, number = {D1}, journal = {Nucleic Acids Research}, author = {Sayers, Eric W. and Cavanaugh, Mark and Clark, Karen and Ostell, James and Pruitt, Kim D. and Karsch-Mizrachi, Ilene}, month = jan, year = {2019}, pages = {D94--D99}, file = {Sayers et al. - 2019 - GenBank.pdf:C\:\\Users\\carst\\Zotero\\storage\\GUHCTF6F\\Sayers et al. - 2019 - GenBank.pdf:application/pdf}, } @book{shepherdson_new_2018, title = {The new {CESSDA} data catalogue}, url = {https://zenodo.org/record/2530106#.X22fc-3gpoU}, abstract = {The new CESSDA Data Catalogue provides a unified multilingual search/browse interface to Social Science and Humanities (SSH) data stored in decentralised and specialised data centres operated by CESSDA Service Providers. The metadata describing the Service Providers’ holdings is harvested via Open APIs and indexed (by language) to provide an aggregated collection covering a wide range of institutions. This work relates to two of CESSDA’s four pillars, namely Technology and Tools \& Services (the other two being Training and Trust). By building a modern cloud-based infrastructure, CESSDA is able to operate its services efficiently and effectively without dependency on any single national infrastructure. The adoption of state-of-the containerisation methods ensures the scalability needed to serve the European SSH research community. Secondly, by mapping existing DDI metadata formats to the CESSDA Metadata Model, existing well established (e.g. OAI-PMH) interfaces and exchange methods can be seamlessly harnessed to underpin this addition to CESSDA’s suite of user friendly tools \& services.}, language = {en}, author = {Shepherdson, John and Thiel, Carsten}, month = dec, year = {2018}, doi = {10.5281/zenodo.2530106}, file = {Shepherdson und Thiel - 2018 - The new CESSDA data catalogue.pdf:C\:\\Users\\carst\\Zotero\\storage\\BHLNF5L5\\Shepherdson und Thiel - 2018 - The new CESSDA data catalogue.pdf:application/pdf}, } @book{noauthor_about_nodate-1, title = {About {\textbackslash}textbar {Scientific} {Data}}, copyright = {©2020 Macmillan Publishers Limited. All Rights Reserved.}, url = {https://www.nature.com/sdata/about}, language = {en}, note = {Publication Title: Springer Nature}, file = {Snapshot:C\:\\Users\\carst\\Zotero\\storage\\58ZZ6WLX\\about.html:text/html}, } @techreport{voigt_report_2016, address = {Potsdam}, title = {Report on the {Data} {Base} of the {International} {Geodynamics} and {Earth} {Tide} {Service} ({IGETS})}, copyright = {CC BY SA}, url = {https://gfzpublic.gfz-potsdam.de/pubman/item/item_1870888}, abstract = {The International Geodynamics and Earth Tide Service (IGETS) was established in 2015 by the International Association of Geodesy (IAG). IGETS continues the activities of the Global Geodynamics Project (GGP, 1997-2015) to provide support to geodetic and geophysical research activities using superconducting gravimeter data within the context of an international network. The primary objective of IGETS is to provide a service to monitor temporal variations of the Earth’s gravity field through long-term records from ground gravimeters, tiltmeters, strainmeters and other geodynamic sensors. IGETS also continues the activities of the International Center for Earth Tides, in particular, in collecting, archiving and distributing Earth tide records from long series of the various geodynamic sensors. This report is a compilation of data descriptions originating to a large part from GGP but including updates and extensions for IGETS.}, language = {en}, institution = {Deutsches GeoForschungsZentrum GFZ}, author = {Voigt, Christian and Förste, C. and Wziontek, Hartmut and Crossley, David and Meurers, Bruno and Pálinkáš, Vojtech and Hinderer, Jacques and Boy, Jean-Paul and Barriot, Jean-Pierre and Sun, Heping}, year = {2016}, doi = {10.2312/GFZ.b103-16087}, pages = {24}, file = {Voigt et al. - 2016 - Report on the Data Base of the International Geody.pdf:C\:\\Users\\carst\\Zotero\\storage\\RIB2CSPI\\Voigt et al. - 2016 - Report on the Data Base of the International Geody.pdf:application/pdf}, } @article{piwowar_sharing_2007, title = {Sharing {Detailed} {Research} {Data} {Is} {Associated} with {Increased} {Citation} {Rate}}, volume = {2}, issn = {1932-6203}, url = {https://journals.plos.org/plosone/article?id=10.1371/journal.pone.0000308}, doi = {10.1371/journal.pone.0000308}, abstract = {BackgroundSharing research data provides benefit to the general scientific community, but the benefit is less obvious for the investigator who makes his or her data available.Principal FindingsWe examined the citation history of 85 cancer microarray clinical trial publications with respect to the availability of their data. The 48\% of trials with publicly available microarray data received 85\% of the aggregate citations. Publicly available data was significantly (p = 0.006) associated with a 69\% increase in citations, independently of journal impact factor, date of publication, and author country of origin using linear regression.SignificanceThis correlation between publicly available data and increased literature impact may further motivate investigators to share their detailed research data.}, language = {en}, number = {3}, journal = {PLOS ONE}, author = {Piwowar, Heather A. and Day, Roger S. and Fridsma, Douglas B.}, month = mar, year = {2007}, pages = {e308}, file = {Piwowar et al. - 2007 - Sharing Detailed Research Data Is Associated with .PDF:C\:\\Users\\carst\\Zotero\\storage\\GJ8UZR3J\\Piwowar et al. - 2007 - Sharing Detailed Research Data Is Associated with .PDF:application/pdf}, } @article{rauber_data_2015, title = {Data {Citation} of {Evolving} {Data}: {Recommendations} of the {Working} {Group} on {Data} {Citation} ({WGDC})}, shorttitle = {Data {Citation} of {Evolving} {Data}}, url = {https://zenodo.org/record/1406002#.X22eRu3gpoV}, doi = {10.15497/RDA00016}, abstract = {The WGDC recommendations enable researchers and data centers to identify and cite data used in experiments and studies. Instead of providing static data exports or textual descriptions of data subsets, we support a dynamic, query centric view of data sets. The proposed solution enables precise identification of the very subset and version of data used, supporting reproducibility of processes, sharing and reuse of data. The goal of the WG were to create identification mechanisms that (a) allow us to identify and cite arbitrary views of data, from a single record to an entire data set in a precise, machine-actionable manner; (b) allow us to cite and retrieve that data as it existed at a certain point in time, whether the database is static or highly dynamic; and (c) is stable across different technologies and technological changes. The WG recommends solving this challenge by (1) ensuring that data is stored in a versioned and timestamped manner and (2) identifying data sets by storing and assigning persistent identifiers (PIDs) to timestamped queries that can be re-executed against the timestamped data store.}, language = {en}, author = {Rauber, Andreas and Asmi, Ari and van Uytvanck, Dieter and Proell, Stefan}, month = oct, year = {2015}, file = {Zenodo Full Text PDF:C\:\\Users\\carst\\Zotero\\storage\\VMKM6DIG\\Rauber et al. - 2015 - Data Citation of Evolving Data Recommendations of.pdf:application/pdf}, } @book{wziontek_superconducting_2017, title = {Superconducting {Gravimeter} {Data} from {Bad} {Homburg} - {Level} 1}, copyright = {CC BY 4.0}, url = {http://dataservices.gfz-potsdam.de/igets/showshort.php?id=escidoc:2387890}, abstract = {The International Geodynamics and Earth Tide Service (IGETS) was established in 2015 by the International Association of Geodesy (IAG). IGETS continues the activities of the Global Geodynamics Project (GGP, 1997-2015) to provide support to geodetic and geophysical research activities using superconducting gravimeter (SG) data within the context of an international network. The gravimetric reference station Bad Homburg (Germany) is operated by the Federal Agency for Cartography and Geodesy (BKG) and was established in 1979. Continuous temporal gravity and atmospheric pressure time series from the different SGs is made available to the IGETS data base hosted by ISDC (Information System and Data Center) at GFZ. The district town Bad Homburg is situated about 25 km north-east of Frankfurt/Main (longitude: 8.61 E, latitude: 50.23 N, height above MSL: 188 m) on the southern slope of the Taunus mountains. The gravity station is located inside a cellar of the Landgraves' Castle and is characterized by low environmental noise. Inside the vault a separated housing with insulated walls enables controlled environment. All rooms are thermally stabilized by an air-conditioning system. The station provides 5 separate piers of concrete (2 for SG, 3 for AG) which are grounded on bedrock (approx. 1.8 m deep, on greenschist) and are decoupled from the wooden floor. Since 1981, an almost uninterrupted time series of gravity and barometric pressure variations was acquired with different SGs. In 1993, a series of repeated measurements with different absolute gravimeters (AG) was started. From December 1999 to April 2007 the dual sphere SG CD030 was operated and since February 2007 OSG044 is recording. These data are available at the IGETS database. The first SG installed at the station was TT40 (1981/04-1989/06, later at Richmond/Florida, USA) and TT60 (1985/06-1988/11, later at Wettzell/Germany). Several shorter test registrations were performed with SG103 (1998/12-1999/04), as well as SG030 (2008/02-2010/06) and SG029 (2011/03-2012/04) after upgrades. In November 2003 and May 2004 two ground water wells within a distance of 200 m were constructed to support the monitoring of local water storage changes. Due to the excellent stability of the station and the ability of facilities to inter-compare absolute and superconducting gravimeters, the station Bad Homburg was developed as a reference site for the national German gravity reference system. In Addition, data from the superconducting gravimeter iGrav006 operated by GFZ Potsdam from March 2015 until March 2017 are available (Güntner et al., 2017, http://doi.org/10.5880/igets.we.gfz.l1.001).}, language = {en}, publisher = {BKG Federal Agency for Cartography and Geodesy}, author = {Wziontek, Hartmut and Wolf, Peter and Nowak, Ilona and Richter, Bernd and Rülke, Axel and Wilmes, Herbert}, year = {2017}, doi = {10.5880/IGETS.BH.L1.001}, } @techreport{wellcome_trust_sharing_2003, address = {Fort Lauderdale, USA}, type = {Report}, title = {Sharing {Data} from {Large}-scale {Biological} {Research} {Projects}: {A} {System} of {Tripartite} {Responsibility}: {Report} of a meeting organized by the {Wellcome} {Trust} and held on 14–15 {January} 2003 at {Fort} {Lauderdale}, {USA}.}, url = {https://www.genome.gov/Pages/Research/WellcomeReport0303.pdf}, language = {en}, institution = {Wellcome Trust}, author = {{Wellcome Trust}}, year = {2003}, pages = {6}, file = {Wellcome Trust - 2003 - Sharing Data from Large-scale Biological Research .pdf:C\:\\Users\\carst\\Zotero\\storage\\TJYFS7KU\\Wellcome Trust - 2003 - Sharing Data from Large-scale Biological Research .pdf:application/pdf}, } @book{grashoff_digital_2016, title = {Digital {Pantheon}: {Results} of the complete scan of the {Pantheon}}, shorttitle = {Digital {Pantheon}}, url = {http://repository.edition-topoi.org/collection/BDPP}, abstract = {The collection 'Digital Pantheon' is based on research data of the Bern Digital Pantheon project. This project - directed by Gerd Graßhoff, Michael Heinzelmann and Markus Wäfler of the University of Bern - created a digital 3d scan of the Pantheon in Rome using a laser scanner in several scanning campaigns in the years 2005 to 2008. On the basis of these data, the registered papers were published and the website www.digitalpantheon.ch established. Since 2010, this website is no longer being maintained. The collection 'Digital Pantheon' is further processing the research data. It provides long-term archiving of the data, which has been further analysed within the framework of the Excellence Cluster TOPOI, and makes it available to interested researchers and the public.}, publisher = {Edition Topoi}, author = {Graßhoff, Gerd and Heinzelmann, Michael and Wäfler, Markus and Berndt, Christian and Albers, Jon and Kaelin, Oskar and Kulawik, Bernd and Rosenbauer, Ralph and Theocharis, Nikolaos and Lustenberger, Michael and Fritsch, Bernhard}, year = {2016}, doi = {10.17171/1-4}, } @article{achterberg_fachinformationsdienst_2018, title = {Der {Fachinformationsdienst} {Geowissenschaften} der festen {Erde} ({FID} {GEO}):: {The} {Specialised} {Information} {Service} for {Solid} {Earth} {Geosciences} ({FID} {GEO}):}, volume = {52}, issn = {0006-1972, 2194-9646}, shorttitle = {Der {Fachinformationsdienst} {Geowissenschaften} der festen {Erde} ({FID} {GEO})}, url = {https://www.degruyter.com/view/journals/bd/52/5/article-p391.xml}, doi = {10.1515/bd-2018-0045}, abstract = {Der Fachinformationsdienst Geowissenschaften der festen Erde (FID GEO), betrieben durch die Niedersächsische Staats- und Universitätsbibliothek (SUB) Göttingen und die Bibliothek des Wissenschaftsparks Albert Einstein am GeoForschungsZentrum (GFZ) Potsdam, entwickelt diverse Angebote in den Bereichen E-Publizieren, Forschungsdaten und Digitalisierung. Auf der Homepage www.fidgeo.de sind die Services zusammengestellt. Auf dem Publikationsserver des FID GEO, GEO-LEOe-docs, werden Inhalte dauerhaft frei zugänglich angeboten. Für die Publikation von Forschungsdaten über den FID GEO steht das Repositorium GFZ Data Services zur Verfügung. FID GEO kooperiert eng mit den geowissenschaftlichen Fachgesellschaften.}, language = {de}, number = {5}, journal = {Bibliotheksdienst}, author = {Achterberg, Inke and Bertelmann, Roland and Elger, Kirsten and Hübner, Andreas and Pfurr, Norbert and Schüler, Mechthild}, month = may, year = {2018}, pages = {391--405}, file = {Achterberg et al. - 2018 - Der Fachinformationsdienst Geowissenschaften der f.pdf:C\:\\Users\\carst\\Zotero\\storage\\Q9RE4VN9\\Achterberg et al. - 2018 - Der Fachinformationsdienst Geowissenschaften der f.pdf:application/pdf}, } @article{bertelmann_discovery_2012, title = {Discovery jenseits von “all you can eat” und “one size fits all”}, volume = {36}, issn = {0341-4183, 1865-7648}, url = {https://www.degruyter.com/view/journals/bfup/36/3/article-p369.xml}, doi = {10.1515/bfp-2012-0050}, abstract = {Der Artikel Discovery jenseits von “all you can eat” und “one size fits all” wurde am 01.12 in der Zeitschrift Bibliothek Forschung und Praxis (Band 36, Heft 3) veröffentlicht.}, language = {de}, number = {3}, journal = {Bibliothek Forschung und Praxis}, author = {Bertelmann, Roland and Szott, Sascha and Höhnow, Tobias}, month = dec, year = {2012}, pages = {369--376}, file = {Bertelmann et al. - 2012 - Discovery jenseits von “all you can eat” und “one .pdf:C\:\\Users\\carst\\Zotero\\storage\\4B49LWFG\\Bertelmann et al. - 2012 - Discovery jenseits von “all you can eat” und “one .pdf:application/pdf}, } @article{burton_scholix_2017, title = {The {Scholix} {Framework} for {Interoperability} in {Data}-{Literature} {Information} {Exchange}}, volume = {23}, issn = {1082-9873}, url = {http://www.dlib.org/dlib/january17/burton/01burton.html}, doi = {10.1045/january2017-burton}, abstract = {The Scholix Framework (SCHOlarly LInk eXchange) is a high level interoperability framework for exchanging information about the links between scholarly literature and data, as well as between datasets. Over the past decade, publishers, data centers, and indexing services have agreed on and implemented numerous bilateral agreements to establish bidirectional links between research data and the scholarly literature. However, because of the considerable differences inherent to these many agreements, there is very limited interoperability between the various solutions. This situation is fueling systemic inefficiencies and limiting the value of these, separated, sets of links. Scholix, a framework proposed by the RDA/WDS Publishing Data Services working group, envisions a universal interlinking service and proposes the technical guidelines of a multi-hub interoperability framework. Hubs are natural collection and aggregation points for data-literature information from their respective communities. Relevant hubs for the communities of data centers, repositories, and journals include DataCite, OpenAIRE, and Crossref, respectively. The framework respects existing community-specific practices while enabling interoperability among the hubs through a common conceptual model, an information model and open exchange protocols. The proposed framework will make research data, and the related literature, easier to find and easier to interpret and reuse, and will provide additional incentives for researchers to share their data}, language = {en}, number = {1/2}, journal = {D-Lib Magazine}, author = {Burton, Adrian and Aryani, Amir and Koers, Hylke and Manghi, Paolo and La Bruzzo, Sandro and Stocker, Markus and Diepenbroek, Michael and Schindler, Uwe and Fenner, Martin}, year = {2017}, } @book{noauthor_mandatory_nodate, title = {Mandatory {Data} {Deposition}}, url = {https://www.cell.com/cell/authors}, note = {Publication Title: Cell}, } @article{toronto_international_data_release_workshop_authors_prepublication_2009, title = {Prepublication data sharing}, volume = {461}, issn = {0028-0836, 1476-4687}, url = {http://www.nature.com/articles/461168a}, doi = {10.1038/461168a}, abstract = {Rapid release of prepublication data has served the field of genomics well. Attendees at a workshop in Toronto recommend extending the practice to other biological data sets.}, language = {en}, number = {7261}, journal = {Nature}, editor = {{Toronto International Data Release Workshop Authors}}, year = {2009}, pages = {168--170}, file = {Akzeptierte Version:C\:\\Users\\carst\\Zotero\\storage\\64YWLFCI\\Toronto International Data Release Workshop Authors - 2009 - Prepublication data sharing.pdf:application/pdf}, } @book{burgess_building_2018, title = {Building {Google} {Dataset} {Search} and {Fostering} an {Open} {Data} {Ecosystem}}, url = {http://ai.googleblog.com/2018/09/building-google-dataset-search-and.html}, abstract = {Posted by Matthew Burgess and Natasha Noy, Google AI Earlier this month we launched Google Dataset Search , a tool designed to make it eas...}, language = {en}, author = {Burgess, Matthew and Noy, Natasha}, month = sep, year = {2018}, note = {Publication Title: Google AI Blog}, file = {Snapshot:C\:\\Users\\carst\\Zotero\\storage\\9VTFYLAU\\building-google-dataset-search-and.html:text/html}, } @book{noauthor_genbank_2015, title = {{GenBank} {Celebrates} 25 {Years} of {Service} with {Two}-{Day} {Conference}; {Leading} {Scientists} {Will} {Discuss} the {DNA} {Database} at {April} 7-8 {Meeting}}, url = {https://www.nih.gov/news-events/news-releases/genbank-celebrates-25-years-service-two-day-conference-leading-scientists-will-discuss-dna-database-april-7-8-meeting}, abstract = {For a quarter century, GenBank has helped advance scientific discovery worldwide. Established by the National Institutes of Health (NIH) in 1982, the database of nucleic acid sequences is one of the key tools that scientists use to conduct biomedical and biologic research. Since its creation, GenBank has grown at an exponential rate, doubling in size every 18 months. In celebration of this vital resource and its contribution to science over the last 25 years, the National Center for Biotechnology Information, National Library of Medicine (NLM), NIH, is holding a two-day conference on GenBank.}, language = {en}, month = sep, year = {2015}, note = {Publication Title: National Institutes of Health (NIH)}, file = {Snapshot:C\:\\Users\\carst\\Zotero\\storage\\653X8NYN\\genbank-celebrates-25-years-service-two-day-conference-leading-scientists-will-discuss-dna-data.html:text/html}, } @techreport{dallmeier-tiessen_strategien_2011, type = {Working {Paper}}, title = {Strategien bei der {Veröffentlichung} von {Forschungsdaten}}, url = {https://www.econstor.eu/handle/10419/75349}, abstract = {Forschungsdaten liegen in Abhängigkeit der Disziplinen in vielfältigen Formen und Formaten vor. Sie sind in allen Disziplinen Teil des wissenschaftlichen Erkenntnisprozesses. Als digitales Informationsobjekt sind sie komplex und bislang wenig untersucht. Mit den Möglichkeiten neuer Informationstechnologien werden in den letzten Jahren neue Wege in der Publikation von Forschungsdaten beschritten. Mit Blick auf die Naturwissenschaften werden im Folgenden drei Publikationsmodelle beschrieben: Die Veröffentlichung von Forschungsdaten als eigenständiges Objekt in einem Forschungsdatenrepositorium, die Veröffentlichung von Forschungsdaten mit textueller Dokumentation und die Veröffentlichung von Forschungsdaten als Anreicherung einer interpretativen Text-Publikation.}, language = {de}, number = {173}, institution = {RatSWD}, author = {Dallmeier-Tiessen, Sünje}, year = {2011}, file = {Dallmeier-Tiessen - 2011 - Strategien bei der Veröffentlichung von Forschungs.pdf:C\:\\Users\\carst\\Zotero\\storage\\AY4DHEQ8\\Dallmeier-Tiessen - 2011 - Strategien bei der Veröffentlichung von Forschungs.pdf:application/pdf}, } @article{datacite_metadata_working_group_datacite_2019, title = {{DataCite} {Metadata} {Schema} {Documentation} for the {Publication} and {Citation} of {Research} {Data} v4.3}, url = {https://schema.datacite.org/meta/kernel-4.3/}, doi = {10.14454/7XQ3-ZF69}, language = {en}, editor = {{DataCite Metadata Working Group}}, year = {2019}, file = {DataCite Metadata Working Group - 2019 - DataCite Metadata Schema Documentation for the Pub.pdf:C\:\\Users\\carst\\Zotero\\storage\\56ZZ7IXB\\DataCite Metadata Working Group - 2019 - DataCite Metadata Schema Documentation for the Pub.pdf:application/pdf}, } @book{noauthor_about_2017, title = {About}, url = {https://www.coretrustseal.org/about/}, abstract = {The World Data System of the International Science Council (WDS) and the Data Seal of Approval (DSA) are pleased to announce the launch of a new certification organization: CoreTrustSeal. CoreTrust…}, language = {en-GB}, month = jun, year = {2017}, note = {Publication Title: CoreTrustSeal}, file = {Snapshot:C\:\\Users\\carst\\Zotero\\storage\\99GYFX2H\\about.html:text/html}, } @book{noauthor_aims_nodate, title = {Aims and scope}, url = {https://www.earth-system-science-data.net/about/aims_and_scope.htmlAims and scope}, language = {en}, note = {Publication Title: ESSD}, } @book{noauthor_statement_2015, title = {Statement of {Commitment} from {Earth} and {Space} {Science} {Publishers} and {Data} {Facilities}}, url = {http://www.copdess.org/statement-of-commitment/}, language = {en-US}, month = jan, year = {2015}, note = {Publication Title: COPDESS Statement of Commitment}, file = {Snapshot:C\:\\Users\\carst\\Zotero\\storage\\IYR4N7IB\\statement-of-commitment.html:text/html}, } @techreport{group_joint_2014, title = {Joint {Declaration} of {Data} {Citation} {Principles}}, url = {https://www.force11.org/group/joint-declaration-data-citation-principles-final}, abstract = {Sound, reproducible scholarship rests upon a foundation of robust, accessible data. For this to be so in practice as well as theory, data must be accorded due importance in the practice of scholarship and in the enduring scholarly record. In other words, data should be considered legitimate, citable products of research. Data citation, like the citation of other evidence and sources, is good research practice and is part of the scholarly ecosystem supporting data reuse. In support of this assertion, and to encourage good practice, we offer a set of guiding principles for data within scholarly literature, another dataset, or any other research object.}, institution = {Force11}, author = {Group, Data Citation Synthesis}, year = {2014}, doi = {10.25490/A97F-EGYK}, } @article{drachen_sharing_2016, title = {Sharing data increases citations}, volume = {26}, issn = {2213-056X}, url = {http://www.liberquarterly.eu/article/10.18352/lq.10149/}, doi = {10.18352/lq.10149}, abstract = {This paper presents some indications to the existence of a citation advantage related to sharing data using astrophysics as a case. Through bibliometric analyses we find a citation advantage for astrophysical papers in core journals. The advantage arises as indexed papers are associated with data by bibliographical links, and consists of papers receiving on average significantly more citations per paper per year, than do papers not associated with links to data.}, language = {en}, number = {2}, journal = {LIBER Quarterly}, author = {Drachen, Thea and Ellegaard, Ole and Larsen, Asger and Dorch, Søren}, month = aug, year = {2016}, pages = {67--82}, file = {Drachen et al. - 2016 - Sharing data increases citations.pdf:C\:\\Users\\carst\\Zotero\\storage\\43UU2LWW\\Drachen et al. - 2016 - Sharing data increases citations.pdf:application/pdf}, } @book{noauthor_g8_2013, title = {G8 {Science} {Ministers} {Statement}}, url = {https://www.gov.uk/government/news/g8-science-ministers-statement}, abstract = {On 12 June the Royal Society hosted the first ever G8 joint Science Ministers and national science academies meeting in London}, language = {en}, year = {2013}, note = {Publication Title: GOV.UK}, file = {Snapshot:C\:\\Users\\carst\\Zotero\\storage\\5N9GXNLX\\g8-science-ministers-statement.html:text/html}, } @book{noauthor_datensatz_nodate, title = {Datensatz}, url = {https://developers.google.com/search/docs/data-types/dataset?hl=de}, language = {de}, note = {Publication Title: Google Developers}, file = {Snapshot:C\:\\Users\\carst\\Zotero\\storage\\42DFK3A3\\dataset.html:text/html}, } @article{hanson_committing_2015, title = {Committing to {Publishing} {Data} in the {Earth} and {Space} {Sciences}}, volume = {96}, issn = {2324-9250}, url = {https://eos.org/agu-news/committing-publishing-data-earth-space-sciences}, doi = {10.1029/2015EO022207}, abstract = {A new initiative joins together publishers and data facilities to enable data stewardship.}, journal = {Eos}, author = {Hanson, Brooks and Kerstin Lehnert, Kerstin Lehnert and Cutcher-Gershenfeld, Joel}, month = jan, year = {2015}, } @article{dorch_data_2015, title = {The data sharing advantage in astrophysics}, url = {http://arxiv.org/abs/1511.02512}, abstract = {We present here evidence for the existence of a citation advantage within astrophysics for papers that link to data. Using simple measures based on publication data from NASA Astrophysics Data System we find a citation advantage for papers with links to data receiving on the average significantly more citations per paper than papers without links to data. Furthermore, using INSPEC and Web of Science databases we investigate whether either papers of an experimental or theoretical nature display different citation behavior.}, journal = {arXiv:1511.02512 [astro-ph]}, author = {Dorch, S. B. F. and Drachen, T. M. and Ellegaard, O.}, month = nov, year = {2015}, file = {Dorch et al. - 2015 - The data sharing advantage in astrophysics.pdf:C\:\\Users\\carst\\Zotero\\storage\\R4U4ALC9\\Dorch et al. - 2015 - The data sharing advantage in astrophysics.pdf:application/pdf}, } @article{bloom_data_2014, title = {Data {Access} for the {Open} {Access} {Literature}: {PLOS}'s {Data} {Policy}}, volume = {12}, issn = {1545-7885}, shorttitle = {Data {Access} for the {Open} {Access} {Literature}}, url = {https://journals.plos.org/plosbiology/article?id=10.1371/journal.pbio.1001797}, doi = {10.1371/journal.pbio.1001797}, language = {en}, number = {2}, journal = {PLOS Biology}, author = {Bloom, Theodora and Ganley, Emma and Winker, Margaret}, month = feb, year = {2014}, pages = {e1001797}, file = {Bloom et al. - 2014 - Data Access for the Open Access Literature PLOS's.PDF:C\:\\Users\\carst\\Zotero\\storage\\YUW3J9KA\\Bloom et al. - 2014 - Data Access for the Open Access Literature PLOS's.PDF:application/pdf}, } @incollection{diepenbroek_data_1999, address = {Berlin, Heidelberg}, title = {Data {Management} of {Proxy} {Parameters} with {PANGAEA}}, isbn = {978-3-642-58646-0}, url = {https://doi.org/10.1007/978-3-642-58646-0_29}, abstract = {Specific parameters determined from marine sediments can be used as proxy data to calculate former ocean properties. To use this scientific resource effectively an information system is needed which guarantees consistent longtime storage of the proxy data and provides easy access for the scientific community. An information system to archive proxy data of paleoclimatic relevance, together with the related meta-information, raw data and evaluated paleoclimatic data, is presented here. The system provides standardized import and export routines, easy access with uniform retrieval functions, and tools for the visualization of data. The network is designed as a client/server system providing access through the Internet.}, language = {en}, booktitle = {Use of {Proxies} in {Paleoceanography}: {Examples} from the {South} {Atlantic}}, publisher = {Springer}, author = {Diepenbroek, M. and Grobe, H. and Reinke, M. and Schlitzer, R. and Sieger, R.}, editor = {Fischer, Gerhard and Wefer, Gerold}, year = {1999}, doi = {10.1007/978-3-642-58646-0_29}, pages = {715--727}, file = {Diepenbroek et al. - 1999 - Data Management of Proxy Parameters with PANGAEA.pdf:C\:\\Users\\carst\\Zotero\\storage\\F6E8HXMD\\Diepenbroek et al. - 1999 - Data Management of Proxy Parameters with PANGAEA.pdf:application/pdf}, } @article{belter_measuring_2014, title = {Measuring the {Value} of {Research} {Data}: {A} {Citation} {Analysis} of {Oceanographic} {Data} {Sets}}, volume = {9}, issn = {1932-6203}, shorttitle = {Measuring the {Value} of {Research} {Data}}, url = {https://journals.plos.org/plosone/article?id=10.1371/journal.pone.0092590}, doi = {10.1371/journal.pone.0092590}, abstract = {Evaluation of scientific research is becoming increasingly reliant on publication-based bibliometric indicators, which may result in the devaluation of other scientific activities - such as data curation – that do not necessarily result in the production of scientific publications. This issue may undermine the movement to openly share and cite data sets in scientific publications because researchers are unlikely to devote the effort necessary to curate their research data if they are unlikely to receive credit for doing so. This analysis attempts to demonstrate the bibliometric impact of properly curated and openly accessible data sets by attempting to generate citation counts for three data sets archived at the National Oceanographic Data Center. My findings suggest that all three data sets are highly cited, with estimated citation counts in most cases higher than 99\% of all the journal articles published in Oceanography during the same years. I also find that methods of citing and referring to these data sets in scientific publications are highly inconsistent, despite the fact that a formal citation format is suggested for each data set. These findings have important implications for developing a data citation format, encouraging researchers to properly curate their research data, and evaluating the bibliometric impact of individuals and institutions.}, language = {en}, number = {3}, journal = {PLOS ONE}, author = {Belter, Christopher W.}, month = mar, year = {2014}, pages = {e92590}, file = {Belter - 2014 - Measuring the Value of Research Data A Citation A.PDF:C\:\\Users\\carst\\Zotero\\storage\\V7AR475G\\Belter - 2014 - Measuring the Value of Research Data A Citation A.PDF:application/pdf}, } @article{colavizza_citation_2020, title = {The citation advantage of linking publications to research data}, volume = {15}, issn = {1932-6203}, url = {http://arxiv.org/abs/1907.02565}, doi = {10.1371/journal.pone.0230416}, abstract = {Efforts to make research results open and reproducible are increasingly reflected by journal policies encouraging or mandating authors to provide data availability statements. As a consequence of this, there has been a strong uptake of data availability statements in recent literature. Nevertheless, it is still unclear what proportion of these statements actually contain well-formed links to data, for example via a URL or permanent identifier, and if there is an added value in providing such links. We consider 531,889 journal articles published by PLOS and BMC, develop an automatic system for labelling their data availability statements according to four categories based on their content and the type of data availability they display, and finally analyze the citation advantage of different statement categories via regression. We find that, following mandated publisher policies, data availability statements become very common. In 2018 93.7\% of 21,793 PLOS articles and 88.2\% of 31,956 BMC articles had data availability statements. Data availability statements containing a link to data in a repository – rather than being available on request or included as supporting information files – are a fraction of the total. In 2017 and 2018, 20.8\% of PLOS publications and 12.2\% of BMC publications provided DAS containing a link to data in a repository. We also find an association between articles that include statements that link to data in a repository and up to 25.36\% (\${\textbackslash}textbackslashpm\${\textbackslash}textasciitilde1.07\%) higher citation impact on average, using a citation prediction model. We discuss the potential implications of these results for authors (researchers) and journal publishers who make the effort of sharing their data in repositories. All our data and code are made available in order to reproduce and extend our results.}, language = {en}, number = {4}, journal = {PLOS ONE}, author = {Colavizza, Giovanni and Hrynaszkiewicz, Iain and Staden, Isla and Whitaker, Kirstie and McGillivray, Barbara}, month = apr, year = {2020}, pages = {e0230416}, file = {Colavizza et al. - 2020 - The citation advantage of linking publications to .pdf:C\:\\Users\\carst\\Zotero\\storage\\CVFJZBP7\\Colavizza et al. - 2020 - The citation advantage of linking publications to .pdf:application/pdf}, } @book{fenner_datacites_2019, title = {{DataCite}'s {New} {Search}}, url = {https://blog.datacite.org/improving-search/}, abstract = {Today we are announcing our first new functionality of 2019, a much improved search for DataCite DOIs and metadata. While the DataCite Search user interface has not changed, changes under the hood bring many important improvements and are our biggest...}, author = {Fenner, Martin}, month = jan, year = {2019}, doi = {10.5438/VYD9-TY64}, } @article{weigel_framework_nodate, title = {A {Framework} for {Extended} {Persistent} {Identification} of {Scientific} {Assets}}, volume = {12}, issn = {1683-1470}, url = {http://datascience.codata.org/articles/abstract/10.2481/dsj.12-036/}, doi = {10.2481/dsj.12-036}, abstract = {Several scientific communities relying on e-science infrastructures are in need of persistent identifiers for data and contextual information. In this article, we present a framework for persistent identification that fundamentally supports context information. It is installed as a number of low-level requirements and abstract data type descriptions, flexible enough to envelope context information while remaining compatible with existing definitions and infrastructures. The abstract data type definitions we draw from the requirements and exemplary use cases can act as an evaluation tool for existing implementations or as a blueprint for future persistent identification infrastructures. A prototypic implementation based on the Handle System is briefly introduced. We also lay the groundwork for establishing a graph of persistent entities that can act as a base layer for more sophisticated information schemas to preserve context information.}, language = {en}, journal = {Data Science Journal}, author = {Weigel, Tobias and Lautenschlager, Michael and Toussaint, Frank and Kindermann, Stephan}, pages = {10--22}, file = {Weigel et al. - A Framework for Extended Persistent Identification.pdf:C\:\\Users\\carst\\Zotero\\storage\\IN9IX5TA\\Weigel et al. - A Framework for Extended Persistent Identification.pdf:application/pdf}, } @techreport{eyring_esmvaltool_2019, type = {preprint}, title = {{ESMValTool} v2.0 – {Extended} set of large-scale diagnostics for quasi-operational and comprehensive evaluation of {Earth} system models in {CMIP}}, url = {https://gmd.copernicus.org/preprints/gmd-2019-291/gmd-2019-291.pdf}, abstract = {The Earth System Model Evaluation Tool (ESMValTool) is a community diagnostics and performance metrics tool designed to improve comprehensive and routine evaluation of Earth System Models (ESMs) participating in the Coupled Model Intercomparison Project (CMIP). It has undergone rapid development since the first release in 2016 and is now a well-tested tool that provides end-to-end provenance tracking to ensure reproducibility. It consists of an easy-to-install, well documented Python package providing the core functionalities (ESMValCore) that performs common pre-processing operations and a diagnostic part that includes tailored diagnostics and performance metrics for specific scientific applications. Here we describe large-scale diagnostics of the second major release of the tool that supports the evaluation of ESMs participating in CMIP Phase 6 (CMIP6). ESMValTool v2.0 includes a large collection of diagnostics and performance metrics for atmospheric, oceanic, and terrestrial variables for the mean state, trends, and variability. ESMValTool v2.0 also successfully reproduces figures from the evaluation and projections chapters of the Intergovernmental Panel on Climate Change (IPCC) Fifth Assessment Report (AR5) and incorporates updates from targeted analysis packages, such as the NCAR Climate Variability Diagnostics Package for the evaluation of modes of variability the Thermodynamic Diagnostic Tool (TheDiaTo) to evaluate the energetics of the climate system, as well as parts of AutoAssess that contains a mix of top-down performance metrics. The tool has been fully integrated into the Earth System Grid Federation (ESGF) infrastructure at the Deutsches Klima Rechenzentrum (DKRZ) to provide evaluation results from CMIP6 model simulations shortly after the output is published to the CMIP archive. A result browser has been implemented that enables advanced monitoring of the evaluation results by a broad user community at much faster timescales than what was possible in CMIP5.}, institution = {Climate and Earth System Modeling}, author = {Eyring, Veronika and Bock, Lisa and Lauer, Axel and Righi, Mattia and Schlund, Manuel and Andela, Bouwe and Arnone, Enrico and Bellprat, Omar and Brötz, Björn and Caron, Louis-Phillippe and Carvalhais, Nuno and Cionni, Irene and Cortesi, Nicola and Crezee, Bas and Davin, Edouard and Davini, Paolo and Debeire, Kevin and de Mora, Lee and Deser, Clara and Docquier, David and Earnshaw, Paul and Ehbrecht, Carsten and Gier, Bettina K. and Gonzalez-Reviriego, Nube and Goodman, Paul and Hagemann, Stefan and Hardiman, Steven and Hassler, Birgit and Hunter, Alasdair and Kadow, Christopher and Kindermann, Stephan and Koirala, Sujan and Koldunov, Nikolay V. and Lejeune, Quentin and Lembo, Valerio and Lovato, Tomas and Lucarini, Valerio and Massonnet, Francois and Müller, Benjamin and Pandde, Amarjiit and Pérez-Zanón, Nuria and Phillips, Adam and Predoi, Valeriu and Russell, Joellen and Sellar, Alistair and Serva, Federico and Stacke, Tobias and Swaminathan, Ranjini and Torralba, Verónica and Vegas-Regidor, Javier and von Hardenberg, Jost and Weigel, Katja and Zimmermann, Klaus}, month = nov, year = {2019}, doi = {10.5194/gmd-2019-291}, file = {Eyring et al. - 2019 - ESMValTool v2.0 – Extended set of large-scale diag.pdf:C\:\\Users\\carst\\Zotero\\storage\\SRRE9MMT\\Eyring et al. - 2019 - ESMValTool v2.0 – Extended set of large-scale diag.pdf:application/pdf}, } @article{balaji_requirements_2018, title = {Requirements for a global data infrastructure in support of {CMIP6}}, volume = {11}, issn = {1991-959X}, url = {https://gmd.copernicus.org/articles/11/3659/2018/}, doi = {https://doi.org/10.5194/gmd-11-3659-2018}, abstract = {The World Climate Research Programme(WCRP)’s Working Group on Climate Modelling (WGCM)Infrastructure Panel (WIP) was formed in 2014 in responseto the explosive growth in size and complexity of CoupledModel Intercomparison Projects (CMIPs) between CMIP3(2005–2006) and CMIP5 (2011–2012). This article presentsthe WIP recommendations for the global data infrastruc-ture needed to support CMIP design, future growth, andevolution. Developed in close coordination with those whobuild and run the existing infrastructure (the Earth SystemGrid Federation; ESGF), the recommendations are basedon several principles beginning with the need to separaterequirements, implementation, and operations. Other im-portant principles include the consideration of the diversityof community needs around data – a data ecosystem – theimportance of provenance, the need for automation, and theobligation to measure costs and benefits.This paper concentrates on requirements, recognizing thediversity of communities involved (modelers, analysts, soft-ware developers, and downstream users). Such requirementsinclude the need for scientific reproducibility and account-ability alongside the need to record and track data usage.One key element is to generate a dataset-centric rather thansystem-centric focus, with an aim to making the infrastruc-ture less prone to systemic failure.With these overarching principles and requirements, theWIP has produced a set of position papers, which are summa-rized in the latter pages of this document. They provide spec-ifications for managing and delivering model output, includ-ing strategies for replication and versioning, licensing, dataquality assurance, citation, long-term archiving, and datasettracking. They also describe a new and more formal approachfor specifying what data, and associated metadata, should besaved, which enables future data volumes to be estimated,particularly for well-defined projects such as CMIP6.The paper concludes with a future facing consideration ofthe global data infrastructure evolution that follows from theblurring of boundaries between climate and weather, and thechanging nature of published scientific results in the digitalage.}, language = {en}, number = {9}, journal = {Geoscientific Model Development}, author = {Balaji, Venkatramani and Taylor, Karl E. and Juckes, Martin and Lawrence, Bryan N. and Durack, Paul J. and Lautenschlager, Michael and Blanton, Chris and Cinquini, Luca and Denvil, Sébastien and Elkington, Mark and Guglielmo, Francesca and Guilyardi, Eric and Hassell, David and Kharin, Slava and Kindermann, Stefan and Nikonov, Sergey and Radhakrishnan, Aparna and Stockhause, Martina and Weigel, Tobias and Williams, Dean}, month = sep, year = {2018}, pages = {3659--3680}, file = {Balaji et al. - 2018 - Requirements for a global data infrastructure in s.pdf:C\:\\Users\\carst\\Zotero\\storage\\X7PKAVMJ\\Balaji et al. - 2018 - Requirements for a global data infrastructure in s.pdf:application/pdf}, } @inproceedings{ehbrecht_projects_2018, title = {{PROJECTS} {BASED} {ON} {THE} {WEB} {PROCESSING} {SERVICE} {FRAMEWORK} {BIRDHOUSE}}, volume = {XLII-4-W8}, copyright = {CC BY}, url = {https://www.int-arch-photogramm-remote-sens-spatial-inf-sci.net/XLII-4-W8/43/2018/}, doi = {https://doi.org/10.5194/isprs-archives-XLII-4-W8-43-2018}, abstract = {Birdhouse is a collaborative project open for the community to participate. It is a software framework containing a collection of Web Processing Services (WPS). The deployed algorithms are focusing on Earth Systems and environmental data processing with the philosophy of streamlining the software development and deployment. By supporting climate, earth observation and biodiversity data and processes, Birdhouse can be used in a wide array of Earth sciences projects and workflows. The core benefit of this project is to allow the seamless use of climate services developed by a diverse network of national meteorological offices, regional climate service providers, academics, not-for-profit research centers and private industry. As governments move toward open-data policies, there will be a need for analytical services that extract value out of the deluge of information. Using an interoperable software architecture, institutions can provide both data and services allowing users to process the data remotely from a laptop, instead of having to acquire and maintain large storage infrastructures.}, language = {en}, booktitle = {{ISPRS} - {International} {Archives} of the {Photogrammetry}, {Remote} {Sensing} and {Spatial} {Information} {Sciences}}, publisher = {Copernicus GmbH}, author = {Ehbrecht, C. and Landry, T. and Hempelmann, N. and Huard, D. and Kindermann, S.}, month = jul, year = {2018}, pages = {43--47}, file = {Ehbrecht et al. - 2018 - PROJECTS BASED ON THE WEB PROCESSING SERVICE FRAME.pdf:C\:\\Users\\carst\\Zotero\\storage\\EZB42BLK\\Ehbrecht et al. - 2018 - PROJECTS BASED ON THE WEB PROCESSING SERVICE FRAME.pdf:application/pdf}, } @article{weigel_actionable_2014, title = {Actionable {Persistent} {Identifier} {Collections}}, volume = {12}, issn = {1683-1470}, url = {http://datascience.codata.org/articles/abstract/10.2481/dsj.12-058/}, doi = {10.2481/dsj.12-058}, abstract = {Persistent Identifiers (PIDs) have lately received a lot of attention from scientific infrastructure projects and communities that aim to employ them for management of massive amounts of research data and metadata objects. Such usage scenarios, however, require additional facilities to enable automated data management with PIDs. In this article, we present a conceptual framework that is based on the idea of using common abstract data types (ADTs) in combination with PIDs. This provides a well-defined interface layer that abstracts from both underlying PID systems and higher-level applications. Our practical implementation is based on the Handle System, yet the fundamental concept of PID-based ADTs is transferable to other infrastructures, and it is well suited to achieve interoperability between them.}, language = {en}, journal = {Data Science Journal}, author = {Weigel, Tobias and Kindermann, Stephan and Lautenschlager, Michael}, month = jan, year = {2014}, pages = {191--206}, file = {Weigel et al. - 2014 - Actionable Persistent Identifier Collections.pdf:C\:\\Users\\carst\\Zotero\\storage\\CLWLJRDT\\Weigel et al. - 2014 - Actionable Persistent Identifier Collections.pdf:application/pdf}, } @book{aggarwal_outlier_2017, edition = {2}, title = {Outlier {Analysis}}, isbn = {978-3-319-47577-6}, url = {https://www.springer.com/de/book/9783319475776}, abstract = {This book provides comprehensive coverage of the field of outlier analysis from a computer science point of view. It integrates methods from data mining, machine learning, and statistics within the computational framework and therefore appeals to multiple communities. The chapters of this book can be organized into three categories:Basic algorithms: Chapters 1 through 7 discuss the fundamental algorithms for outlier analysis, including probabilistic and statistical methods, linear methods, proximity-based methods, high-dimensional (subspace) methods, ensemble methods, and supervised methods.Domain-specific methods: Chapters 8 through 12 discuss outlier detection algorithms for various domains of data, such as text, categorical data, time-series data, discrete sequence data, spatial data, and network data.Applications: Chapter 13 is devoted to various applications of outlier analysis. Some guidance is also provided for the practitioner.The second edition of this book is more detailed and is written to appeal to both researchers and practitioners. Significant new material has been added on topics such as kernel methods, one-class support-vector machines, matrix factorization, neural networks, outlier ensembles, time-series methods, and subspace methods. It is written as a textbook and can be used for classroom teaching.}, language = {en}, publisher = {Springer International Publishing}, editor = {Aggarwal, Charu C.}, year = {2017}, doi = {10.1007/978-3-319-47578-3}, } @article{blei_latent_2003, title = {Latent dirichlet allocation}, volume = {3}, issn = {1532-4435}, abstract = {We describe latent Dirichlet allocation (LDA), a generative probabilistic model for collections of discrete data such as text corpora. LDA is a three-level hierarchical Bayesian model, in which each item of a collection is modeled as a finite mixture over an underlying set of topics. Each topic is, in turn, modeled as an infinite mixture over an underlying set of topic probabilities. In the context of text modeling, the topic probabilities provide an explicit representation of a document. We present efficient approximate inference techniques based on variational methods and an EM algorithm for empirical Bayes parameter estimation. We report results in document modeling, text classification, and collaborative filtering, comparing to a mixture of unigrams model and the probabilistic LSI model.}, number = {null}, journal = {The Journal of Machine Learning Research}, author = {Blei, David M. and Ng, Andrew Y. and Jordan, Michael I.}, month = mar, year = {2003}, pages = {993--1022}, file = {Blei et al. - 2003 - Latent dirichlet allocation.pdf:C\:\\Users\\carst\\Zotero\\storage\\65SRKC66\\Blei et al. - 2003 - Latent dirichlet allocation.pdf:application/pdf}, } @incollection{burkhardt_fundamental_2014, series = {Advances in {Electronic} {Government}, {Digital} {Divide}, and {Regional} {Development}}, title = {Fundamental {Aspects} for {E}-{Government}}, isbn = {978-1-4666-6236-0 978-1-4666-6237-7}, url = {http://services.igi-global.com/resolvedoi/resolve.aspx?doi=10.4018/978-1-4666-6236-0}, language = {en}, booktitle = {Handbook of {Research} on {Advanced} {ICT} {Integration} for {Governance} and {Policy} {Modeling}}, publisher = {IGI Global}, author = {Burkhardt, Dirk and Nazemi, Kawa and Zilke, Jan R. and Kohlhammer, Jörn and Kuijper, Arjan}, editor = {Sonntagbauer, Peter and Nazemi, Kawa and Sonntagbauer, Susanne and Prister, Giorgio and Burkhardt, Dirk and Mahmood, Zaigham}, year = {2014}, doi = {10.4018/978-1-4666-6236-0}, pages = {1--18}, file = {Eingereichte Version:C\:\\Users\\carst\\Zotero\\storage\\DSDUKXPS\\Sonntagbauer et al. - 2014 - Handbook of Research on Advanced ICT Integration f.pdf:application/pdf}, } @book{keim_mastering_2010, address = {Goslar}, title = {Mastering the information age: solving problems with visual analytics}, isbn = {978-3-905673-77-7}, shorttitle = {Mastering the information age}, language = {en}, publisher = {Eurographics Association}, editor = {Keim, Daniel}, year = {2010}, file = {Table of Contents PDF:C\:\\Users\\carst\\Zotero\\storage\\RGIVHTVP\\Keim - 2010 - Mastering the information age solving problems wi.pdf:application/pdf}, } @book{ward_interactive_2010, address = {Hoboken}, title = {Interactive data visualization: foundations, techniques, and applications}, isbn = {978-1-56881-473-5}, shorttitle = {Interactive data visualization}, language = {en}, publisher = {360 Degree Business}, author = {Ward, Matthew and Grinstein, Georges G. and Keim, Daniel}, year = {2010}, file = {Table of Contents PDF:C\:\\Users\\carst\\Zotero\\storage\\J57FL2C5\\Ward et al. - 2010 - Interactive data visualization foundations, techn.pdf:application/pdf}, } @inproceedings{nazemi_visual_2019-1, address = {Cham}, series = {Lecture {Notes} in {Computer} {Science}}, title = {A {Visual} {Analytics} {Approach} for {Analyzing} {Technological} {Trends} in {Technology} and {Innovation} {Management}}, isbn = {978-3-030-33723-0}, doi = {10.1007/978-3-030-33723-0_23}, abstract = {Visual Analytics provides with a combination of automated techniques and interactive visualizations huge analysis possibilities in technology and innovation management. Thereby not only the use of machine learning data mining methods plays an important role. Due to the high interaction capabilities, it provides a more user-centered approach, where users are able to manipulate the entire analysis process and get the most valuable information. Existing Visual Analytics systems for Trend Analytics and technology and innovation management do not really make use of this unique feature and almost neglect the human in the analysis process. Outcomes from research in information search, information visualization and technology management can lead to more sophisticated Visual Analytics systems that involved the human in the entire analysis process. We propose in this paper a new interaction approach for Visual Analytics in technology and innovation management with a special focus on technological trend analytics.}, language = {en}, booktitle = {Advances in {Visual} {Computing}}, publisher = {Springer International Publishing}, author = {Nazemi, Kawa and Burkhardt, Dirk}, editor = {Bebis, George and Boyle, Richard and Parvin, Bahram and Koracin, Darko and Ushizima, Daniela and Chai, Sek and Sueda, Shinjiro and Lin, Xin and Lu, Aidong and Thalmann, Daniel and Wang, Chaoli and Xu, Panpan}, year = {2019}, pages = {283--294}, file = {Eingereichte Version:C\:\\Users\\carst\\Zotero\\storage\\FF9D3Q7P\\Nazemi und Burkhardt - 2019 - A Visual Analytics Approach for Analyzing Technolo.pdf:application/pdf}, } @article{el-assady_semantic_2020, title = {Semantic {Concept} {Spaces}: {Guided} {Topic} {Model} {Refinement} using {Word}-{Embedding} {Projections}}, volume = {26}, issn = {1941-0506}, shorttitle = {Semantic {Concept} {Spaces}}, doi = {10.1109/TVCG.2019.2934654}, abstract = {We present a framework that allows users to incorporate the semantics of their domain knowledge for topic model refinement while remaining model-agnostic. Our approach enables users to (1) understand the semantic space of the model, (2) identify regions of potential conflicts and problems, and (3) readjust the semantic relation of concepts based on their understanding, directly influencing the topic modeling. These tasks are supported by an interactive visual analytics workspace that uses word-embedding projections to define concept regions which can then be refined. The user-refined concepts are independent of a particular document collection and can be transferred to related corpora. All user interactions within the concept space directly affect the semantic relations of the underlying vector space model, which, in turn, change the topic modeling. In addition to direct manipulation, our system guides the users' decisionmaking process through recommended interactions that point out potential improvements. This targeted refinement aims at minimizing the feedback required for an efficient human-in-the-loop process. We confirm the improvements achieved through our approach in two user studies that show topic model quality improvements through our visual knowledge externalization and learning process.}, number = {1}, journal = {IEEE Transactions on Visualization and Computer Graphics}, author = {El-Assady, Mennatallah and Kehlbeck, Rebecca and Collins, Christopher and Keim, Daniel and Deussen, Oliver}, year = {2020}, pages = {1001--1011}, file = {Eingereichte Version:C\:\\Users\\carst\\Zotero\\storage\\EYFZT4IQ\\El-Assady et al. - 2020 - Semantic Concept Spaces Guided Topic Model Refine.pdf:application/pdf}, } @inproceedings{burkhardt_visualizing_2018, title = {Visualizing {Law} - {A} {Norm}-{Graph} {Visualization} {Approach} based on {Semantic} {Legal} {Data}}, copyright = {Creative Commons Attribution 4.0 International, Open Access}, url = {https://zenodo.org/record/2543729}, doi = {10.5281/ZENODO.2543729}, abstract = {Laws or in general legal documents regulate a wide range of our daily life and also define the borders of business models and commercial services. However, legal text and laws are almost hard to understand. From other domains it is already known that visualizations can help understanding complex aspects easier. In fact, in this paper we introduce a new approach to visualize legal texts in a Norm-graph visualization. In the developed Norm-graph visualization it is possible to show major aspects of laws and make it easier for users to understand it. The Norm-graph is based on semantic legal data, a so called Legal-Concept-Ontology.}, language = {en}, author = {Burkhardt, Dirk and Nazemi, Kawa}, year = {2018}, file = {Burkhardt und Nazemi - 2018 - Visualizing Law - A Norm-Graph Visualization Appro.pdf:C\:\\Users\\carst\\Zotero\\storage\\B3EN2DI4\\Burkhardt und Nazemi - 2018 - Visualizing Law - A Norm-Graph Visualization Appro.pdf:application/pdf}, } @inproceedings{stahl_data_2016-1, address = {Berlin, Heidelberg}, series = {Lecture {Notes} in {Computer} {Science}}, title = {Data {Quality} {Scores} for {Pricing} on {Data} {Marketplaces}}, isbn = {978-3-662-49381-6}, doi = {10.1007/978-3-662-49381-6_21}, abstract = {Data and data-related services are increasingly being traded on data marketplaces. However, value attribution of data is still not well-understood, in particular when two competing offers are to be compared. This paper discusses the role data quality can play in this context and suggests a weighted quality score that allows for ‘quality for money’ comparisons of different offerings.}, language = {en}, booktitle = {Intelligent {Information} and {Database} {Systems}}, publisher = {Springer}, author = {Stahl, Florian and Vossen, Gottfried}, editor = {Nguyen, Ngoc Thanh and Trawiński, Bogdan and Fujita, Hamido and Hong, Tzung-Pei}, year = {2016}, pages = {215--224}, } @techreport{stahl_data_2014-1, type = {{ERCIS} {Working} {Paper}}, title = {The data marketplace survey revisited}, url = {https://econpapers.repec.org/paper/zbwercisw/18.htm}, abstract = {Trading data as a commodity is increasingly popular. To get a better understanding of emerging data marketplaces, we have conducted two surveys to systematically gather and evaluate their characteristics. This paper is a continuation of a survey we conducted in 2012; it describes our findings from a second round done in 2013. Our study shows that the market is vivid with numerous exits and changes in its core business. We try to identify trends in this young field and explain them. Notably, there is a definite trend towards high quality data.}, number = {18}, institution = {University of Münster, European Research Center for Information Systems (ERCIS)}, author = {Stahl, Florian and Schomm, Fabian and Vossen, Gottfried}, year = {2014}, file = {Stahl et al. - 2014 - The data marketplace survey revisited.pdf:C\:\\Users\\carst\\Zotero\\storage\\AF92579T\\Stahl et al. - 2014 - The data marketplace survey revisited.pdf:application/pdf}, } @inproceedings{muschalle_pricing_2013-1, address = {Berlin, Heidelberg}, series = {Lecture {Notes} in {Business} {Information} {Processing}}, title = {Pricing {Approaches} for {Data} {Markets}}, isbn = {978-3-642-39872-8}, doi = {10.1007/978-3-642-39872-8_10}, abstract = {Currently, multiple data vendors utilize the cloud-computing paradigm for trading raw data, associated analytical services, and analytic results as a commodity good. We observe that these vendors often move the functionality of data warehouses to cloud-based platforms. On such platforms, vendors provide services for integrating and analyzing data from public and commercial data sources. We present insights from interviews with seven established vendors about their key challenges with regard to pricing strategies in different market situations and derive associated research problems for the business intelligence community.}, language = {en}, booktitle = {Enabling {Real}-{Time} {Business} {Intelligence}}, publisher = {Springer}, author = {Muschalle, Alexander and Stahl, Florian and Löser, Alexander and Vossen, Gottfried}, editor = {Castellanos, Malu and Dayal, Umeshwar and Rundensteiner, Elke A.}, year = {2013}, pages = {129--144}, } @book{meisel_datenmarktplatze_2019-1, address = {Dortmund}, series = {{ISST}-{Bericht}}, title = {Datenmarktplätze - {Plattformen} für {Datenaustausch} und {Datenmonetarisierung} in der {Data} {Economy}}, language = {de}, publisher = {Fraunhofer Institut für Software- und Systemtechnik}, author = {Meisel, Lukas and Spiekermann, Markus}, editor = {Otto, Boris and Rehof, Jakob}, year = {2019}, file = {Meisel und Spiekermann - 2019 - Datenmarktplätze - Plattformen für Datenaustausch .pdf:C\:\\Users\\carst\\Zotero\\storage\\46RDF2UY\\Meisel und Spiekermann - 2019 - Datenmarktplätze - Plattformen für Datenaustausch .pdf:application/pdf}, } @inproceedings{chen_towards_2019-1, address = {New York}, series = {{SIGMOD} '19}, title = {Towards {Model}-{Based} {Pricing} for {Machine} {Learning} in a {Data} {Marketplace}}, isbn = {978-1-4503-5643-5}, url = {https://doi.org/10.1145/3299869.3300078}, doi = {10.1145/3299869.3300078}, abstract = {Data analytics using machine learning (ML) has become ubiquitous in science, business intelligence, journalism and many other domains. While a lot of work focuses on reducing the training cost, inference runtime and storage cost of ML models, little work studies how to reduce the cost of data acquisition, which potentially leads to a loss of sellers' revenue and buyers' affordability and efficiency. In this paper, we propose a model-based pricing (MBP) framework, which instead of pricing the data, directly prices ML model instances. We first formally describe the desired properties of the MBP framework, with a focus on avoiding arbitrage. Next, we show a concrete realization of the MBP framework via a noise injection approach, which provably satisfies the desired formal properties. Based on the proposed framework, we then provide algorithmic solutions on how the seller can assign prices to models under different market scenarios (such as to maximize revenue). Finally, we conduct extensive experiments, which validate that the MBP framework can provide high revenue to the seller, high affordability to the buyer, and also operate on low runtime cost.}, booktitle = {Proceedings of the 2019 {International} {Conference} on {Management} of {Data}}, publisher = {Association for Computing Machinery}, author = {Chen, Lingjiao and Koutris, Paraschos and Kumar, Arun}, year = {2019}, pages = {1535--1552}, } @inproceedings{attard_data_2016-1, title = {Data {Value} {Networks}: {Enabling} a {New} {Data} {Ecosystem}}, isbn = {978-1-5090-4470-2}, url = {https://ieeexplore.ieee.org/document/7817090}, doi = {10.1109/WI.2016.0073}, abstract = {With the increasing permeation of data into all dimensions of our information society, data is progressively becoming the basis for many products and services. It is hence becoming more and more vital to identify the means and methods how to exploit the value of this data. In this paper we provide our definition of the Data Value Network, where we specifically cater for non-tangible data products. We also propose a Demand and Supply Distribution Model with the aim of providing insight on how an entity can participate in the global data market by producing a data product, as well as a concrete implementation through the Demand and Supply as a Service. Through our contributions we project our vision of generating a new Economic Data Ecosystem that has the Web of Data as its core.}, booktitle = {2016 {IEEE}/{WIC}/{ACM} {International} {Conference} on {Web} {Intelligence} ({WI})}, author = {Attard, Judie and Orlandi, Anneke and Auer, Sören}, editor = {{IEEE}}, year = {2016}, pages = {453--456}, } @incollection{charalabidis_organizational_2018-1, address = {Cham}, series = {Public {Administration} and {Information} {Technology}}, title = {Organizational {Issues}: {How} to {Open} {Up} {Government} {Data}?}, isbn = {978-3-319-90850-2}, shorttitle = {Organizational {Issues}}, url = {https://doi.org/10.1007/978-3-319-90850-2_4}, abstract = {Governments create and collect enormous amounts of data, for instance concerning voting results, transport, energy, education, and employment. These datasets are often stored in an archive that is not accessible for others than the organization’s employees. To attain benefits such as transparency, engagement, and innovation, many governmental organizations are now also providing public access to this data. However, in opening up their data, these organizations face many issues, including the lack of standard procedures, the threat of privacy violations when releasing data, accidentally releasing policy-sensitive data, the risk of data misuse, challenges regarding the ownership of data and required changes at different organizational layers. These issues often hinder the easy publication of government data.}, language = {en}, booktitle = {The {World} of {Open} {Data}: {Concepts}, {Methods}, {Tools} and {Experiences}}, publisher = {Springer International Publishing}, author = {Charalabidis, Yannis and Zuiderwijk, Anneke and Alexopoulos, Charalampos and Janssen, Marijn and Lampoltshammer, Thomas and Ferro, Enrico}, editor = {Charalabidis, Yannis and Zuiderwijk, Anneke and Alexopoulos, Charalampos and Janssen, Marijn and Lampoltshammer, Thomas and Ferro, Enrico}, year = {2018}, doi = {10.1007/978-3-319-90850-2_4}, pages = {57--73}, } @article{dai_open_2018-1, title = {Open and inclusive collaboration in science: {A} framework}, copyright = {© OECD/OCDE 2018}, shorttitle = {Open and inclusive collaboration in science}, url = {https://www.oecd-ilibrary.org/industry-and-services/open-and-inclusive-collaboration-in-science_2dbff737-en}, doi = {10.1787/2dbff737-en}, abstract = {Digitalisation is fundamentally changing science and the paper lays out some of the opportunities, risks and major policy challenges associated with these changes. More specifically, the paper lays out a conceptual framework for open science. This framework incorporates access to data and information, as well as civil society engagement, in the different stages of the scientific research process. It is not meant to be prescriptive but should help different communities to decide on their own priorities within the open science space and to better visualise how these priorities link to different stage of the scientific process and to different actors. Such a framework can be useful also in considering how best to incentivise and measure different aspects of open science.}, language = {en}, journal = {OECD Science, Technology and Industry Working Papers 2018/07}, author = {Dai, Qian and Shin, Eunjung and Smith, Carthage}, month = mar, year = {2018}, pages = {29}, file = {Dai et al. - 2018 - Open and inclusive collaboration in science A fra.pdf:C\:\\Users\\carst\\Zotero\\storage\\33L7SLTF\\Dai et al. - 2018 - Open and inclusive collaboration in science A fra.pdf:application/pdf}, } @incollection{ghosh_data_2018-1, address = {Singapore}, series = {Studies in {Big} {Data}}, title = {Data {Marketplace} as a {Platform} for {Sharing} {Scientific} {Data}}, isbn = {978-981-10-7515-5}, url = {https://doi.org/10.1007/978-981-10-7515-5_7}, abstract = {Data marketplace is an emerging service model to facilitate data exchange between its producers and consumers. While the service has been motivated by a business model for data and has established itself in the commercial sector over the last few years, it is possible to build a data sharing platform for the scientific community on this model. This article analyzes the motivational and technical challenges for scientific data exchange and proposes use of data marketplace service model to address them.}, language = {en}, booktitle = {Data {Science} {Landscape}: {Towards} {Research} {Standards} and {Protocols}}, publisher = {Springer}, author = {Ghosh, Hiranmay}, editor = {Munshi, Usha Mujoo and Verma, Neeta}, year = {2018}, doi = {10.1007/978-981-10-7515-5_7}, pages = {99--105}, } @incollection{charalabidis_open_2018-2, address = {Cham}, series = {Public {Administration} and {Information} {Technology}}, title = {Open {Data} {Evaluation} {Models}: {Theory} and {Practice}}, isbn = {978-3-319-90850-2}, shorttitle = {Open {Data} {Evaluation} {Models}}, url = {https://doi.org/10.1007/978-3-319-90850-2_8}, abstract = {Evaluation of Open Data is a systematic determination of open data merit, worth and significance, using criteria governed by a set of standards (Farbey, Land, \& Targett, 1999). It is an essential procedure trying to ignite a learning and innovation process leading to a more effective data exploitation. Examples of questions to be answered by open data evaluation could be: what is the current status of published data against the best practices identified, how effectively these data are published or used, what are the most valuable data for users, what are the problems and barriers discouraging the publication and use of open data and in which extend these barriers affects users’ behaviour towards data usage. The answers on these questions will affect the next developments of an open data portal or initiative and the publication procedure.}, language = {en}, booktitle = {The {World} of {Open} {Data}: {Concepts}, {Methods}, {Tools} and {Experiences}}, publisher = {Springer International Publishing}, author = {Charalabidis, Yannis and Zuiderwijk, Anneke and Alexopoulos, Charalampos and Janssen, Marijn and Lampoltshammer, Thomas and Ferro, Enrico}, editor = {Charalabidis, Yannis and Zuiderwijk, Anneke and Alexopoulos, Charalampos and Janssen, Marijn and Lampoltshammer, Thomas and Ferro, Enrico}, year = {2018}, doi = {10.1007/978-3-319-90850-2_8}, pages = {137--172}, } @incollection{charalabidis_open_2018-3, address = {Cham}, series = {Public {Administration} and {Information} {Technology}}, title = {Open {Data} {Value} and {Business} {Models}}, isbn = {978-3-319-90850-2}, url = {https://doi.org/10.1007/978-3-319-90850-2_7}, abstract = {The chapter focuses on innovation processes aspiring to generate value through a purposeful and effective exploitation of data released in an open format. On the one hand, such processes represent a great opportunity for private and public organizations while, on the other, they pose a number of challenges having to do with creating the technical, legal and procedural preconditions as well as identifying appropriate business models that may guarantee the long term financial viability of such activities. As a matter of fact, while information sharing is widely recognized as a value multiplier, the release of information in an open data format through creative common licenses generates information-based common goods characterized by nonrivalry and nonexcludability in fruition. An aspect posing significant challenges for the pursuit of sustainable competitive advantages.}, language = {en}, booktitle = {The {World} of {Open} {Data}: {Concepts}, {Methods}, {Tools} and {Experiences}}, publisher = {Springer International Publishing}, author = {Charalabidis, Yannis and Zuiderwijk, Anneke and Alexopoulos, Charalampos and Janssen, Marijn and Lampoltshammer, Thomas and Ferro, Enrico}, editor = {Charalabidis, Yannis and Zuiderwijk, Anneke and Alexopoulos, Charalampos and Janssen, Marijn and Lampoltshammer, Thomas and Ferro, Enrico}, year = {2018}, doi = {10.1007/978-3-319-90850-2_7}, pages = {115--136}, } @book{munshi_data_2018-2, address = {Singapore}, series = {Studies in {Big} {Data}}, title = {Data {Science} {Landscape}}, volume = {38}, isbn = {978-981-10-7514-8 978-981-10-7515-5}, language = {en}, publisher = {Springer Singapore}, editor = {Munshi, Usha Mujoo and Verma, Neeta}, year = {2018}, doi = {10.1007/978-981-10-7515-5}, } @book{ivanschitz_data_2018-1, title = {A {Data} {Market} with {Decentralized} {Repositories}}, url = {https://openreview.net/pdf?id=rkgzBg7yeX}, abstract = {In the current era of ever growing data volumes and increased commercialization of data, an interest for datamarkets is on the rise. When the participants in this markets need access to large amounts of data, as necessaryfor big data applications, a centralized approach becomes unfeasible. In this paper, we argue for a data marketbased on decentralized data repositories and outline an implementation approach currently being undertakenby the Data Market Austria projec}, author = {Ivanschitz, Bernd-Peter and Lampoltshammer, Thomas J. and Mireles, Victor and Revenko, Artem and Schlarb, Sven and Thurnay, Lorinc}, year = {2018}, file = {Ivanschitz et al. - 2018 - A Data Market with Decentralized Repositories.pdf:C\:\\Users\\carst\\Zotero\\storage\\Y5VQFVNI\\Ivanschitz et al. - 2018 - A Data Market with Decentralized Repositories.pdf:application/pdf}, } @incollection{munshi_data_2018-3, address = {Singapore}, series = {Studies in {Big} {Data}}, title = {Data {Science} {LandscapeData} science landscape: {Tracking} the {Ecosystem}}, isbn = {978-981-10-7515-5}, shorttitle = {Data {Science} {LandscapeData} science landscape}, url = {https://doi.org/10.1007/978-981-10-7515-5_1}, abstract = {The big data phenomenon is continuously evolving, so is its entire ecosystem. In the recent past due to the advancing technologies/resources cropping up on all fronts, we have moved from data deficit to data deluge . The real challenge is in deriving benefits from the data tsunami for public good. Thus, it is imperative to build infrastructure to store and process humongous data. It is equally important to evolve innovative mechanisms for data analytics to draw inferences that can facilitate smart research and good decision making landscape. The paper dwells on some of the core elements of the big data ecosystem and endeavors to present the current scenario by identifying and portraying various initiatives to address big data boom.}, language = {en}, booktitle = {Data {Science} {Landscape}: {Towards} {Research} {Standards} and {Protocols}}, publisher = {Springer}, author = {Munshi, Usha Mujoo}, editor = {Munshi, Usha Mujoo and Verma, Neeta}, year = {2018}, doi = {10.1007/978-981-10-7515-5_1}, pages = {1--31}, } @incollection{welle_donker_funding_2018-1, address = {The Hague}, series = {Information {Technology} and {Law} {Series}}, title = {Funding {Open} {Data}}, isbn = {978-94-6265-261-3}, url = {https://doi.org/10.1007/978-94-6265-261-3_4}, abstract = {Open government data are fast becoming entrenched in our society. However, even though open government data may be “free”, it is not “gratis”. It takes substantial human and financial resources not only to collect and maintain government data, but also to process the data to be suitable for distribution as open data. Those resources need to be funded. In this chapter, we identify potential funding models for open data. We also explore the costs of implementing open data policies, and the benefits of open data, both for the open data organisation and for society. We demonstrate that the once-off operational costs of open data supply are marginal compared to the total operational costs of the open data organisation. Open data leads to efficiency gains within the open data organisation and to societal benefits. However, to reap those benefits, it is essential that organisations switching to open data, receive compensation, at least in the short-term. The compensation may be found in a new paid role in the information value chain.}, language = {en}, booktitle = {Open {Data} {Exposed}}, publisher = {T.M.C. Asser Press}, author = {Welle Donker, Frederika}, editor = {van Loenen, Bastiaan and Vancauwenberghe, Glenn and Crompvoets, Joep}, year = {2018}, doi = {10.1007/978-94-6265-261-3_4}, pages = {55--78}, file = {Welle Donker - 2018 - Funding Open Data.pdf:C\:\\Users\\carst\\Zotero\\storage\\NZE9L6DL\\Welle Donker - 2018 - Funding Open Data.pdf:application/pdf}, } @book{the_british_standards_institution_pas_nodate, title = {{PAS} 182 {Smart} city concept model – {Guide} to establishing a model for data interoperability}, url = {https://www.bsigroup.com/en-GB/smart-cities/Smart-Cities-Standards-and-Publication/PAS-182-smart-cities-data-concept-model/}, language = {en}, author = {{The British Standards Institution}}, } @book{both_berliner_2011-1, address = {Berlin}, title = {Berliner {Open} {Data} {Strategie}}, publisher = {Fraunhofer-Verl}, author = {Both, Wolfgang and Schieferdecker, Ina Kathrin}, year = {2011}, } @book{noauthor_plattform_nodate-1, title = {Die {Plattform} für {Citizen} {Science}}, url = {https://www.buergerschaffenwissen.de/}, language = {de}, file = {Snapshot:C\:\\Users\\carst\\Zotero\\storage\\29I5QY5J\\www.buergerschaffenwissen.de.html:text/html}, } @incollection{catal_urbane_2018-1, address = {Berlin}, edition = {1. Auflage}, series = {Beuth {Innovation}}, title = {Urbane {Datenplattformen} in der {Cloud}}, isbn = {978-3-410-27681-4}, language = {de}, booktitle = {Mensch und {Technik} in der {Smart} {City}: {Die} menschliche {Smart} {City}}, publisher = {Beuth Verlag GmbH}, author = {Catal, Faruk and Lämmel, Philipp and Schieferdecker, Ina and Tcholtchev, Nikolay}, year = {2018}, pages = {143--158}, } @book{catal_mensch_2018-1, address = {Berlin}, edition = {1. Auflage}, series = {Beuth {Innovation}}, title = {Mensch und {Technik} in der {Smart} {City}: {Die} menschliche {Smart} {City}}, isbn = {978-3-410-27681-4}, language = {de}, publisher = {Beuth Verlag GmbH}, author = {Catal, Faruk and Drescher, Burkhard and Eickhoff, Antje and Fehling, Thomas and Haist, Karin and Hellweg, Uli and Jursch, Ulrich and Kahl, Holger and Kemmerzehl, Richard and Klaus, Agata and Kleewein, Klaus and Kreitsch, Thomas and Lämmel, Philipp and Mader, Michael and Mienkus, Rolf and Möhlendick, Barbara and Müller, Christian and Pahl-Weber, Elke and Schieferdecker, Ina and Schonowski, Joachim and Tank, Ralf and Tcholtchev, Nikolay and Weis, Matthias}, editor = {Hertzsch, Eckhart and Heuser, Lutz}, year = {2018}, } @book{bundesnetzagentur_fur_elektrizitat_gas_telekommunikation_post_und_eisenbahne_daten_2018, address = {Bomm}, title = {Daten als {Wettbewerbs}-und {Wertschöpfungsfaktor} in den {Netzsektoren}: {Eine} {Analyse} vor dem {Hintergrund} der digitalen {Transformation}, {Bonn}: {Bundesnetzagentur} für {Elektrizität}, {Gas}, {Telekommunikation}, {Post} und {Eisenbahnen}}, url = {https://www.bundesnetzagentur.de/SharedDocs/Downloads/DE/Allgemeines/Bundesnetzagentur/Publikationen/Berichte/2018/Digitalisierung.pdf?__blob=publicationFile&v=4}, language = {de}, author = {{Bundesnetzagentur für Elektrizität, Gas, Telekommunikation, Post und Eisenbahne}}, year = {2018}, file = {Bundesnetzagentur für Elektrizität, Gas, Telekommunikation, Post und Eisenbahne - 2018 - Daten als Wettbewerbs-und Wertschöpfungsfaktor in .pdf:C\:\\Users\\carst\\Zotero\\storage\\R55ZB4IE\\Bundesnetzagentur für Elektrizität, Gas, Telekommunikation, Post und Eisenbahne - 2018 - Daten als Wettbewerbs-und Wertschöpfungsfaktor in .pdf:application/pdf}, } @book{noauthor_cc_nodate-1, title = {{CC} creative commons}, url = {https://creativecommons.org/}, language = {en-US}, note = {Publication Title: Creative Commons}, file = {Snapshot:C\:\\Users\\carst\\Zotero\\storage\\MDYI2382\\creativecommons.org.html:text/html}, } @book{noauthor_european_2017-2, title = {European {Data} {Portal}}, url = {https://www.europeandataportal.eu/en}, year = {2017}, file = {Home | European Data Portal:C\:\\Users\\carst\\Zotero\\storage\\XV6CSBG6\\en.html:text/html}, } @article{noauthor_richtlinie_nodate-1, title = {Richtlinie ({EU}) 2019/1024 des {Europäischen} {Parlaments} und des {Rats} vom 20. {Juni} 2019 über offene {Daten} und die {Weiterverwendung} von {Informationen} des öffentlichen {Sektors}}, url = {https://eur-lex.europa.eu/legal-content/DE/TXT/HTML/?uri=CELEX:32019L1024&from=DE}, language = {de}, number = {L 172/56}, journal = {Amtsblatt der Europäischen Union}, } @techreport{european_commission_building_2017, address = {Brussels}, type = {Communication from the {Commission} to the {European} {Parliament}, the {Council}, the {European} {Economic} and {Social} {Committee} and the {Committee} of the {Regions}}, title = {Building a {European} {Data} {Economy}}, url = {https://eur-lex.europa.eu/legal-content/EN/TXT/PDF/?uri=CELEX:52017DC0009}, language = {en}, institution = {European Commission}, author = {{European Commission}}, month = jan, year = {2017}, pages = {18}, file = {2017 - Building a European Data Economy.pdf:C\:\\Users\\carst\\Zotero\\storage\\57236PCS\\2017 - Building a European Data Economy.pdf:application/pdf}, } @techreport{european_commission_towards_2018-1, address = {Brussels}, type = {Communication from the {Commission} to the {European} {Parliament}, the {Council}, the {European} {Economic} and {Social} {Committee} and the {Committee} of the {Regions}}, title = {Towards a common {European} data space}, url = {https://eur-lex.europa.eu/legal-content/EN/TXT/PDF/?uri=CELEX:52018DC0232}, language = {en}, institution = {European Commission}, author = {{European Commission}}, month = apr, year = {2018}, pages = {14}, file = {2018 - Towards a common European data space.pdf:C\:\\Users\\carst\\Zotero\\storage\\F6AV4GFW\\2018 - Towards a common European data space.pdf:application/pdf}, } @book{das_fraunhofer-institut_fur_offene_kommunikationssysteme_fokus_piveau_nodate-1, title = {piveau}, url = {https://www.piveau.de/}, abstract = {piveau ist ein Datenmanagement-Ökosystem für den öffentlichen Sektor. Es bietet Komponenten und Werkzeuge für die Unterstützung der kompletten Verarbeitungskette von der Erfassung, Aggregation, Bereitstellung bis zur Nutzung der Daten.}, language = {de ; en}, author = {{Das Fraunhofer-Institut für Offene Kommunikationssysteme FOKUS}}, file = {piveau:C\:\\Users\\carst\\Zotero\\storage\\6KLFKSIF\\www.piveau.de.html:text/html}, } @article{geiger_open_2012-1, title = {Open {Government} and ({Linked}) ({Open}) ({Government}) ({Data})}, volume = {4}, issn = {2075-9517}, url = {https://www.jedem.org/index.php/jedem/article/view/143}, doi = {10.29379/jedem.v4i2.143}, language = {en}, number = {2}, journal = {JeDEM - eJournal of eDemocracy and Open Government}, author = {Geiger, Christian Philipp and Lucke, Jörn von}, month = dec, year = {2012}, pages = {265--278}, file = {Geiger und Lucke - 2012 - Open Government and (Linked) (Open) (Government) (.pdf:C\:\\Users\\carst\\Zotero\\storage\\SS4R685L\\Geiger und Lucke - 2012 - Open Government and (Linked) (Open) (Government) (.pdf:application/pdf}, } @incollection{helene_govdata_2014-1, title = {{GovData} - {Das} {Datenportal} für {Deutschland}}, isbn = {978-3-8487-1131-4}, booktitle = {Transparenz, {Partizipation}, {Kollaboration}}, publisher = {Nomos Verlagsgesellschaft mbH \& Co. KG}, author = {Helene, Maria}, editor = {Hill, Hermann and Martini, Mario and Wagner, Edgar}, year = {2014}, doi = {10.5771/9783845252636-109}, pages = {109--116}, } @book{noauthor_infrastructure_nodate-1, title = {Infrastructure for spatial information in {Europe}}, url = {https://inspire.ec.europa.eu/}, note = {Publication Title: INSPIRE Knowledge Base}, file = {INSPIRE | Welcome to INSPIRE:C\:\\Users\\carst\\Zotero\\storage\\X8INR5KR\\inspire.ec.europa.eu.html:text/html}, } @article{kim_creative_2017-1, title = {The {Creative} {Commons} and {Copyright} {Protection} in the {Digital} {Era}: {Uses} of {Creative} {Commons} {Licenses}}, volume = {13}, shorttitle = {The {Creative} {Commons} and {Copyright} {Protection} in the {Digital} {Era}}, url = {https://academic.oup.com/jcmc/article/13/1/187/4583060}, doi = {10.1111/j.1083-6101.2007.00392.x}, abstract = {As digital technology thrusts complexity upon copyright law, conflict has escalated between copyright holders desperate to institute a vigorous enforcement mechanism against copying in order to protect their ownership and others who underscore the importance of public interests in accessing and using copyrighted works. This study explores whether Creative Commons (CC) licenses are a viable solution for copyright protection in the digital era. Through a mixed-methods approach involving a web-based survey of CC licensors, a content analysis of CC-licensed works, and interviews, the study characterizes CC licensors, the ways that CC licensors produce creative works, the private interests that CC licenses serve, and the public interests that CC licenses serve. The findings suggest that the Creative Commons can alleviate some of the problems caused by the copyright conflict.}, language = {en}, number = {1}, journal = {Journal of Computer-Mediated Communication}, author = {Kim, Minjeong}, month = oct, year = {2017}, pages = {187--209}, file = {Kim - 2017 - The Creative Commons and Copyright Protection in t.pdf:C\:\\Users\\carst\\Zotero\\storage\\THXDA9AD\\Kim - 2017 - The Creative Commons and Copyright Protection in t.pdf:application/pdf}, } @book{klessmann_open_2012-1, title = {Open {Government} {Data} {Deutschland}: eine {Studie} zu {Open} {Government} in {Deutschland} im {Auftrag} des {Bundesministerium} des {Innern}}, shorttitle = {Open {Government} {Data} {Deutschland}}, url = {https://www.bmi.bund.de/SharedDocs/Downloads/DE/Themen/OED_Verwaltung/ModerneVerwaltung/opengovernment.pdf?__blob=publicationFile.}, language = {de}, publisher = {Deutschland / Bundesministerium}, author = {Klessmann, Jens and Denker, Philipp and Schieferdecker, Ina and Schulz, Sönke E and Hoepner, Petra and {Evanela Lapi} and Marienfeld, Florian and {Lena-Sophie Müller} and Tcholtchev, Nikolay and Rein-Fischböck, Katharina}, year = {2012}, doi = {10.13140/RG.2.1.4506.6321}, file = {Klessmann et al. - 2012 - Open Government Data Deutschland eine Studie zu O.pdf:C\:\\Users\\carst\\Zotero\\storage\\WJ2VNF4M\\Klessmann et al. - 2012 - Open Government Data Deutschland eine Studie zu O.pdf:application/pdf}, } @book{krcmar_informationsmanagement_2015-1, address = {Berlin, Heidelberg}, title = {Informationsmanagement}, isbn = {978-3-662-45862-4 978-3-662-45863-1}, language = {de}, publisher = {Springer Berlin Heidelberg}, author = {Krcmar, Helmut}, year = {2015}, doi = {10.1007/978-3-662-45863-1}, } @book{kuzev_open_2016-1, address = {Berlin}, title = {Open {Data}: die wichtigsten {Fakten} zu offenen {Daten}: {Grundlagen}, {Rahmenbedingungen} und {Beispiele} zur {Nutzung} von {Open} {Data}}, url = {https://www.kas.de/de/einzeltitel/-/content/open-data1}, language = {de}, publisher = {Konrad-Adenauer-Stiftung}, author = {Kuzev, Pencho}, year = {2016}, file = {Kuzev - 2016 - Open Data die wichtigsten Fakten zu offenen Daten.pdf:C\:\\Users\\carst\\Zotero\\storage\\A93IQRLQ\\Kuzev - 2016 - Open Data die wichtigsten Fakten zu offenen Daten.pdf:application/pdf}, } @book{noauthor_openstreetmap_nodate-1, title = {{OpenStreetMap}}, url = {https://www.openstreetmap.org/}, abstract = {OpenStreetMap ist eine Karte der Welt, erstellt von Menschen wie dir und frei verwendbar unter einer offenen Lizenz.}, language = {de}, note = {Publication Title: OpenStreetMap}, file = {Snapshot:C\:\\Users\\carst\\Zotero\\storage\\7S5N8MDB\\www.openstreetmap.org.html:text/html}, } @inproceedings{schieferdecker_urban_2016-1, address = {New York, NY, USA}, series = {{OpenSym} '16}, title = {Urban {Data} {Platforms}: {An} {Overview}}, isbn = {978-1-4503-4481-4}, shorttitle = {Urban {Data} {Platforms}}, url = {https://doi.org/10.1145/2962132.2984894}, doi = {10.1145/2962132.2984894}, abstract = {Along the increasing digitization and interconnection in almost every domain in society or business, data is growing exponentially. It is expected that the worldwide Internet traffic will triple until 2020 in comparison to 2015. In the same time, the transmitted data volume will move from 53,2 Exabytes per months to 161 Exabytes per months [Cisco, 2016]. Cities and communities can support the provisioning and usage of urban data and benefit from resulting new services for the monitoring, understanding, decision making, steering, and control. Providing urban data is also supported by the ongoing movement of opening governmental data, but goes beyond. Urban data can include data from public, industrial, scientific or private sources. Yet, the design of urban data is still ongoing and numerous initiatives and standardization efforts on smart cities and communities put the grounds for the uptake and interoperability of urban data.}, language = {en}, booktitle = {Proceedings of the 12th {International} {Symposium} on {Open} {Collaboration} {Companion}}, publisher = {Association for Computing Machinery}, author = {Schieferdecker, Ina and Tcholtchev, Nikolay and Lämmel, Philipp}, month = aug, year = {2016}, pages = {1--4}, } @techreport{schieferdecker_handreichung_2019-1, address = {Berlin}, title = {Handreichung zur {Studie}: {Urbane} {Datenräume} - {Möglichkeiten} von {Datenaustausch} und {Zusammenarbeit} im urbanen {Raum}}, copyright = {© Fraunhofer FOKUS, Berlin, 2019}, url = {https://cdn0.scrvt.com/fokus/702aa1480e55b335/bc8c65c81a42/190311_Handreichung_UDR_02.pdf}, language = {de}, institution = {Fraunhofer FOKUS}, author = {Schieferdecker, Ina and Bruns, Lina and Cuno, Silke and Flügge, Matthias and Isakovic, Karsten and Klessman, Jens and Lämmel, Philipp and Stadtkewit, Dustin and Tcholtchev, Nikolay and Lange, Christoph and Imbusch, Benedikt T. and Strauß, Leonie and Vastag, Alex and Flocke, Florian and Kraft, Volker}, year = {2019}, pages = {24}, file = {Schieferdecker et al. - 2019 - Handreichung zur Studie Urbane Datenräume - Mögli.pdf:C\:\\Users\\carst\\Zotero\\storage\\KDJHRAMD\\Schieferdecker et al. - 2019 - Handreichung zur Studie Urbane Datenräume - Mögli.pdf:application/pdf}, } @techreport{schieferdecker_urbane_2018-1, address = {Berlin}, title = {Urbane {Datenräume} - {Möglichkeiten} von {Datenaustausch} und {Zusammenarbeit} im urbanen {Raum}}, copyright = {© Fraunhofer FOKUS, Berlin, 2018}, url = {https://cdn0.scrvt.com/fokus/774af17bdc0a18cd/69f7a401c168/UDR_Studie_062018.pdf}, language = {de}, institution = {Fraunhofer FOKUS}, author = {Schieferdecker, Ina and Bruns, Lina and Cuno, Silke and Flügge, Matthias and Isakovic, Karsten and Klessman, Jens and Lämmel, Philipp and Stadtkewit, Dustin and Tcholtchev, Nikolay and Lange, Christoph and Imbusch, Benedikt T. and Strauß, Leonie and Vastag, Alex and Flocke, Florian and Kraft, Volker}, year = {2018}, pages = {250}, file = {Schieferdecker et al. - 2018 - Urbane Datenräume - Möglichkeiten von Datenaustaus.pdf:C\:\\Users\\carst\\Zotero\\storage\\ETTMS22K\\Schieferdecker et al. - 2018 - Urbane Datenräume - Möglichkeiten von Datenaustaus.pdf:application/pdf}, } @incollection{niedbal_smart_2020-1, address = {Wiesbaden}, title = {„{Smart} {Cities}“ als Überbegriff für eine lebenswerte, komfortable und {Teilhabe} ermöglichende {Umgebung}}, isbn = {978-3-658-27232-6}, url = {https://doi.org/10.1007/978-3-658-27232-6_49}, abstract = {Der Verkehr in Städten steht vor einer fundamentalen Transformation. Das Internet der Dinge und die vielfach gewonnenen Daten bilden dabei das Rückgrat der Städte. Sharing- und On-Demand-Angebote, die in den öffentlichen Verkehr integriert sind, reduzieren die verkehrliche Belastung. Intelligente Schließfächer und Lastenfahrräder fungieren als wichtiger Bestandteil der innerstädtischen Logistik von morgen. Bahnhöfe entwickeln sich zur multimodalen Mobilitätsplattform weiter und sind ein zentraler Ort in Städten. Sie bieten Aufenthaltsqualität und dienen Menschen im Rahmen von Coworking-Angeboten als flexibler, dezentraler Arbeitsplatz.}, language = {de}, booktitle = {Smart {City} – {Made} in {Germany}: {Die} {Smart}-{City}-{Bewegung} als {Treiber} einer gesellschaftlichen {Transformation}}, publisher = {Springer Fachmedien}, author = {Niedbal, Meike}, editor = {Etezadzadeh, Chirine}, year = {2020}, doi = {10.1007/978-3-658-27232-6_49}, pages = {469--484}, } @book{noauthor_openseamap_nodate-1, title = {{OpenSeaMap} - die freie {Seekarte}}, url = {https://www.openseamap.org}, file = {OpenSeaMap\: Startseite:C\:\\Users\\carst\\Zotero\\storage\\NSNMRRPA\\index.html:text/html}, } @book{noauthor_data_nodate-3, title = {Data {Platform} – {Open} {Power} {System} {Data}}, url = {https://data.open-power-system-data.org/}, abstract = {This is the Open Power System Data platform. We provide European power system data in five packages.}, language = {en}, file = {Data Platform – Open Power System Data:C\:\\Users\\carst\\Zotero\\storage\\CQHK4XQ7\\data.open-power-system-data.org.html:text/html}, } @book{senatsverwaltung_fur_wirtschaft_energie_und_betriebe_berlin_nodate-1, title = {Berlin {Open} {Data} {Portal}}, url = {https://daten.berlin.de/}, language = {de}, author = {{Senatsverwaltung für Wirtschaft, Energie und Betriebe}}, file = {Offene Daten Berlin | Offene Daten lesbar für Mensch und Maschine. Das ist das Ziel.:C\:\\Users\\carst\\Zotero\\storage\\RDWLCZ3G\\daten.berlin.de.html:text/html}, } @book{noauthor_stromnetz_2012-1, title = {Stromnetz {Berlin}}, url = {http://www.netzdaten-berlin.de}, year = {2012}, note = {Publication Title: Netzdaten Berlin – das Pilotportal}, } @book{preische_digitales_2014-1, address = {Berlin}, edition = {Redaktionsschluss: Oktober 2013}, series = {Daten und {Fakten}}, title = {Digitales {Gold}}, language = {de}, author = {Preische, Jens}, editor = {{TSB Technologiestiftung Berlin}}, year = {2014}, file = {Preische - 2014 - Digitales Gold.pdf:C\:\\Users\\carst\\Zotero\\storage\\SZ5RAZ7W\\Preische - 2014 - Digitales Gold.pdf:application/pdf}, } @book{wissenschaftlicher_beirat_der_bundesregierung_globale_umweltveranderungen_unsere_2019-1, title = {Unsere gemeinsame digitale {Zukunft}}, isbn = {978-3-946830-02-3}, url = {https://www.wbgu.de/de/publikationen/publikation/unsere-gemeinsame-digitale-zukunft}, language = {de}, author = {{Wissenschaftlicher Beirat der Bundesregierung Globale Umweltveränderungen}}, year = {2019}, } @book{sunlight_foundation_ten_2010-1, address = {Washington}, title = {Ten principles for opening up government information}, url = {https://sunlightfoundation.com/policy/documents/ten-open-data-principles/}, language = {en}, publisher = {Sunlight Foundation}, author = {{Sunlight Foundation}}, year = {2010}, } @book{noauthor_wheelmap_nodate-1, title = {Wheelmap}, url = {https://wheelmap.org}, abstract = {Wheelmap is an online map to search, find and mark wheelchair-accessible places. Get involved by marking public places like bars, restaurants, cinemas or supermarkets.}, language = {de}, note = {Publication Title: Wheelmap}, file = {Snapshot:C\:\\Users\\carst\\Zotero\\storage\\8L2RFG7C\\wheelmap.org.html:text/html}, } @article{lerner_revolution_2018-1, title = {Revolution in {Health} {Care}: {How} {Will} {Data} {Science} {Impact} {Doctor}–{Patient} {Relationships}?}, volume = {6}, issn = {2296-2565}, shorttitle = {Revolution in {Health} {Care}}, url = {https://www.frontiersin.org/articles/10.3389/fpubh.2018.00099/full}, doi = {10.3389/fpubh.2018.00099}, abstract = {Revolution in Health Care: How Will Data Science Impact Doctor–Patient Relationships?}, language = {en}, journal = {Frontiers in Public Health}, author = {Lerner, Ivan and Veil, Raphaël and Nguyen, Dinh-Phong and Luu, Vinh Phuc and Jantzen, Rodolphe}, year = {2018}, file = {Lerner et al. - 2018 - Revolution in Health Care How Will Data Science I.pdf:C\:\\Users\\carst\\Zotero\\storage\\BDJ2652Z\\Lerner et al. - 2018 - Revolution in Health Care How Will Data Science I.pdf:application/pdf}, } @inproceedings{schieferdecker_towards_2017-1, title = {Towards an {Open} {Data} {Based} {ICT} {Reference} {Architecture} for {Smart} {Cities}}, doi = {10.1109/CeDEM.2017.18}, abstract = {Given that ICT is at the heart of today's Smart City approach, it is of paramount importance to investigate concepts, which would enable the unification, the common understanding and the replication of ICT architectures/solutions/models across multiple cities. This unified and replicable approach can be best achieved by a very abstract model, aiming to capture the taxonomy and high-level structure of complex integrative ICT solutions for Smart Cities. The approach should be based on the idea of openness with respect to interfaces, software components and especially data, which is to be seen as the main ingredient of an ICT eco-system for Smart Cities. This paper presents an Open Data based ICT Reference Architecture for Smart Cities, which is developed within the EU project Triangulum [1].}, booktitle = {2017 {Conference} for {E}-{Democracy} and {Open} {Government} ({CeDEM})}, author = {Schieferdecker, Ina and Tcholtchev, Nikolay and Lämmel, Philipp and Scholz, Robert and Lapi, Evanela}, month = may, year = {2017}, pages = {184--193}, } @book{das_fraunhofer-institut_fur_offene_kommunikationssysteme_fokus_wikipedia_nodate-1, title = {Wikipedia}, url = {https://www.wikipedia.org/}, author = {{Das Fraunhofer-Institut für Offene Kommunikationssysteme FOKUS}}, file = {Wikipedia:C\:\\Users\\carst\\Zotero\\storage\\P2TK2JEM\\www.wikipedia.org.html:text/html}, } @article{kindling_landscape_2017-1, title = {The {Landscape} of {Research} {Data} {Repositories} in 2015: {A} re3data {Analysis}}, volume = {23}, issn = {1082-9873}, shorttitle = {The {Landscape} of {Research} {Data} {Repositories} in 2015}, url = {http://www.dlib.org/dlib/march17/kindling/03kindling.html}, doi = {10.1045/march2017-kindling}, abstract = {This article provides a comprehensive descriptive and statistical analysis of metadata information on 1,381 research data repositories worldwide and across all research disciplines. The analyzed metadata is derived from the re3data database, enabling search and browse functionalities for the global registry of research data repositories. The analysis focuses mainly on institutions that operate research data repositories, types and subjects of research data repositories (RDR), access conditions as well as services provided by the research data repositories. RDR differ in terms of the service levels they offer, languages they support or standards they comply with. These statements are commonly acknowledged by saying the RDR landscape is heterogeneous. As expected, we found a heterogeneous RDR landscape that is mostly influenced by the repositories' disciplinary background for which they offer services.}, language = {en}, number = {3/4}, journal = {D-Lib Magazine}, author = {Kindling, Maxi and Pampel, Heinz and van de Sandt, Stephanie and Rücknagel, Jessika and Vierkant, Paul and Kloska, Gabriele and Witt, Michael and Schirmbacher, Peter and Bertelmann, Roland and Scholze, Frank}, year = {2017}, file = {Eingereichte Version:C\:\\Users\\carst\\Zotero\\storage\\D7K9CZ5X\\Kindling et al. - 2017 - The Landscape of Research Data Repositories in 201.pdf:application/pdf}, } @article{kaden_warum_2018-1, title = {Warum {Forschungsdaten} nicht publiziert werden}, copyright = {Creative Commons BY 3.0}, issn = {1860-7950}, url = {https://edoc.hu-berlin.de/bitstream/handle/18452/20046/kaden-fd.pdf?sequence=1&isAllowed=y}, doi = {10.18452/19284}, language = {de}, journal = {LIBREAS. Library Ideas}, author = {Kaden, Ben}, year = {2018}, pages = {8}, file = {Kaden - 2018 - Warum Forschungsdaten nicht publiziert werden.pdf:C\:\\Users\\carst\\Zotero\\storage\\R6AQVZ8Q\\Kaden - 2018 - Warum Forschungsdaten nicht publiziert werden.pdf:application/pdf}, } @incollection{hagendorff_open_2016-1, address = {Stuttgart}, title = {Open {Data}}, isbn = {978-3-476-05394-7}, booktitle = {Handbuch {Medien}- und {Informationsethik}}, publisher = {J.B. Metzler}, author = {Hagendorff, Thilo}, editor = {Heesen, Jessica}, year = {2016}, pages = {227--233}, } @book{herb_uberwachungskapitalismus_nodate-1, title = {Überwachungskapitalismus und {Wissenschaftssteuerung}}, url = {https://www.heise.de/tp/features/Ueberwachungskapitalismus-und-Wissenschaftssteuerung-4480357.html}, abstract = {Die Metamorphose des Wissenschaftsverlags Elsevier zum Research Intelligence Dienstleister ist paradigmatisch für die neuen Möglichkeiten der Protokollierung und Steuerung von Wissenschaft}, language = {de}, author = {Herb, Ulrich}, note = {Publication Title: Telepolis}, file = {Snapshot:C\:\\Users\\carst\\Zotero\\storage\\VXUU7UKT\\Ueberwachungskapitalismus-und-Wissenschaftssteuerung-4480357.html:text/html}, } @incollection{immenhauser_habent_2018-1, title = {habent sua fata data: der {Beitrag} der {Schweizerischen} {Akademie} der {Geistes}- und {Sozialwissenschaften} zur nachhaltigen {Sicherung} von {Forschungsdaten}}, isbn = {978-3-11-055379-6}, abstract = {Der vorliegende Band versammelt eine repräsentative Auswahl an Initiativen der Wissenschaftlichen Bibliotheken der Schweiz, die als Resultat erfolgreicher Kooperationen ein hohes Innovationspotential aufweisen. Im Mittelpunkt stehen wissenschafts-, hochschul- und förderpolitische Themen in Verbindung mit Services zur Forschungsunterstützung wie nationale Open Access-Strategie, Forschungsdatenmanagement, Digitalisierung sowie Präsentations- und Serviceplattformen. Ebenfalls aufgegriffen werden Fragen der baulichen Entwicklung und strategischen Standortplanung, Aus- und Weiterbildung oder die Bedeutung überregional und landesweit agierender Verbände. Der Band wird als Festschrift für Susanna Bliggenstorfer anlässlich ihres Rücktrittes als Direktorin der Zentralbibliothek Zürich herausgegeben.}, language = {de}, booktitle = {Bibliotheken der {Schweiz}: {Innovation} durch {Kooperation}: {Festschrift} für {Susanna} {Bliggenstorfer} anlässlich ihres {Rücktrittes} als {Direktorin} der {Zentralbibliothek} {Zürich}}, publisher = {De Gruyter Saur}, author = {Immenhauser, Beat}, editor = {{Zentralbibliothek Zürich} and Keller, Alice and Uhl, Susanne}, year = {2018}, file = {Immenhauser - 2018 - habent sua fata data der Beitrag der Schweizerisc.pdf:C\:\\Users\\carst\\Zotero\\storage\\P3QN9BQ5\\Immenhauser - 2018 - habent sua fata data der Beitrag der Schweizerisc.pdf:application/pdf}, } @incollection{pampel_stand_2015-1, address = {Bonn}, title = {Stand und {Perspektive} des globalen {Verzeichnisses} von {Forschungsdaten}-{Repositorien} re3data.org}, isbn = {978-3-88579-637-4}, abstract = {Das Projekt re3data.org – Registry of Research Data Repositories macht Forschungsdaten-Repositorien in einem web-basierten Verzeichnis auffindbar. Das Ziel von re3data.org ist es, Forschenden eine Orientierung über bestehende Repositorien zur dauerhaften Zugänglichmachung von digitalen Forschungsdaten zu bieten, um „data sharing“ und „data re-use“ in der Wissenschaft zu fördern. Der Beitrag ordnet den Dienst in aktuelle Diskussionen um den offenen Zugang zu Forschungsdaten ein und beschreibt Stand und Perspektive von re3data.org.}, language = {Beitr. teilw. in dt., teilw. in engl. Sprache}, booktitle = {8. {DFN}-{Forum} {Kommunikationstechnologien} {Beiträge} der {Fachtagung} 08.06.-09.06.2015 in {Lübeck}}, publisher = {Gesellschaft für Informatik e.V.}, author = {Pampel, Heinz and Bertelmann, Roland and Scholze, Frank and Vierkant, Paul and Kindling, Maxi}, editor = {Müller, Paul}, year = {2015}, pages = {13--22}, file = {Pampel et al. - 2015 - Stand und Perspektive des globalen Verzeichnisses .pdf:C\:\\Users\\carst\\Zotero\\storage\\KM3YCMPE\\Pampel et al. - 2015 - Stand und Perspektive des globalen Verzeichnisses .pdf:application/pdf}, } @book{eynden_managing_2011-1, address = {Colchester}, edition = {Third edition, fully revised}, title = {Managing and sharing data: a best practice guide for researchers}, copyright = {CC BY NC SA}, isbn = {978-1-904059-78-3}, shorttitle = {Managing and sharing data}, url = {https://ukdataservice.ac.uk/media/622417/managingsharing.pdf}, language = {en}, publisher = {Print Essex at the University of Essex}, author = {Eynden, Veerle van den and Corti, Louise and Woollard, Matthew and Bishop, Libby and Horton, Laurence}, year = {2011}, file = {Eynden et al. - 2011 - Managing and sharing data a best practice guide f.pdf:C\:\\Users\\carst\\Zotero\\storage\\IHFISHHA\\Eynden et al. - 2011 - Managing and sharing data a best practice guide f.pdf:application/pdf}, } @book{goldhammer_okonomischer_2017-1, address = {Berlin}, title = {Ökonomischer {Wert} von {Verbraucherdaten} für {Adress}- und {Datenhändler}: {Studie} im {Auftrag} des {Bundesministeriums} der {Justiz} und für {Verbraucherschutz}:}, url = {https://www.goldmedia.com/fileadmin/goldmedia/2015/Studien/2017/Verbraucherdaten_BMJV/Studie_Wert_Daten_Adresshaendler_Goldmedia_BMJV_2017.pdf}, language = {de}, author = {Goldhammer, Klaus and Wiegand, André}, editor = {{Goldmedia GmbH Strategy Consulting}}, month = apr, year = {2017}, file = {Goldhammer und Wiegand - 2017 - Ökonomischer Wert von Verbraucherdaten für Adress-.pdf:C\:\\Users\\carst\\Zotero\\storage\\PD76LLKF\\Goldhammer und Wiegand - 2017 - Ökonomischer Wert von Verbraucherdaten für Adress-.pdf:application/pdf}, } @article{cusumano_business_2019-1, title = {The {Business} of {Platforms}: {Strategy} in the {Age} of {Digital} {Competition}, {Innovation}, and {Power}}, shorttitle = {The {Business} of {Platforms}}, url = {https://www.hbs.edu/faculty/Pages/item.aspx?num=56021}, abstract = {The Business of Platforms explores the strategic, economic, and technology management challenges of digital platform businesses. We have five major themes in the book: 1) The world’s most valuable companies are all platforms, in part because platforms have network effects, with the potential for a winner-take-all or winner-take-most outcome. 2) Platforms come in 3 flavors: innovation platforms, transaction platforms, and hybrid platforms. We suggest that the world is moving towards more and more hybrids, and we identify the key steps in building a successful platform. 3) Failure is more likely than winner-take-all: mispricing, mistrust, mistiming, and hubris lead to hundreds of failures. 4) Old “dogs” can learn new tricks: conventional companies can adapt to a platform world with a buy, build, or belong strategy. And 5) Platforms are a double-edge sword: abuse of power, bullying poor labor practices, and bad actors can undermine even the most successful platforms. The book concludes with an exploration of platform battles of the future, including voice wars (Alexa vs. Hey Google vs. Siri), ridesharing and autonomous car platforms, quantum computing, and CRISPR.}, language = {en-us}, author = {Cusumano, Michael A. and Gawer, Annabelle and Yoffie, David B.}, month = may, year = {2019}, file = {Snapshot:C\:\\Users\\carst\\Zotero\\storage\\X575M7X2\\item.html:text/html}, } @book{mayer-schonberger_digital_2017-1, address = {Berlin}, title = {Das {Digital}: {Markt}, {Wertschöpfung} und {Gerechtigkeit} im {Datenkapitalismus}}, isbn = {978-3-430-20233-6}, shorttitle = {Das {Digital}}, language = {de}, publisher = {Econ}, author = {Mayer-Schönberger, Viktor and Ramge, Thomas}, year = {2017}, file = {Table of Contents PDF:C\:\\Users\\carst\\Zotero\\storage\\MVGA5HWY\\Mayer-Schönberger und Ramge - 2017 - Das Digital Markt, Wertschöpfung und Gerechtigkei.pdf:application/pdf}, } @article{schomm_marketplaces_2013-1, title = {Marketplaces for {Data}: {An} {Initial} {Survey}}, volume = {42}, issn = {0163-5808}, url = {https://doi.org/10.1145/2481528.2481532}, doi = {10.1145/2481528.2481532}, abstract = {Data is becoming more and more of a commodity, so that it is not surprising that data has reached the status of tradable goods. An increasing number of data providers is recognizing this and is consequently setting up platforms for selling, buying, or trading data. We identify several categories and dimensions of data marketplaces and data vendors and provide a snapshot of the situation as of Summer 2012.}, language = {en}, number = {1}, journal = {SIGMOD Rec.}, author = {Schomm, Fabian and Stahl, Florian and Vossen, Gottfried}, year = {2013}, pages = {15--26}, } @article{stahl_marketplaces_2017-1, title = {Marketplaces for {Digital} {Data}: {Quo} {Vadis}?}, volume = {10}, copyright = {Copyright (c) 2017 Florian Stahl,Fabian Schomm,Lara Vomfell,Gottfried Vossen}, issn = {1913-8989}, shorttitle = {Marketplaces for {Digital} {Data}}, url = {http://www.ccsenet.org/journal/index.php/cis/article/view/70439}, doi = {10.5539/cis.v10n4p22}, abstract = {The survey presented in this work investigates emerging markets for data and is the third of its kind, providing a deeper understanding of this emerging type of market. The findings indicate that data providers focus on limited business models and that data remains individualized and differentiated. Nevertheless, a trend towards commoditization for certain types of data can be foreseen, which allows an outlook to further developments in this area.}, language = {en}, number = {4}, journal = {Computer and Information Science}, author = {Stahl, Florian and Schomm, Fabian and Vomfell, Lara and Vossen, Gottfried}, month = oct, year = {2017}, pages = {22}, file = {Stahl et al. - 2017 - Marketplaces for Digital Data Quo Vadis.pdf:C\:\\Users\\carst\\Zotero\\storage\\DVJ23QF6\\Stahl et al. - 2017 - Marketplaces for Digital Data Quo Vadis.pdf:application/pdf}, } @article{stahl_classification_2016-1, title = {A classification framework for data marketplaces}, volume = {3}, issn = {2196-8896}, url = {https://doi.org/10.1007/s40595-016-0064-2}, doi = {10.1007/s40595-016-0064-2}, abstract = {Trading data as a commodity has become increasingly popular in recent years, and data marketplaces have emerged as a new business model where data from a variety of sources can be collected, aggregated, processed, enriched, bought, and sold. They are effectively changing the way data are distributed and managed on the Internet. To get a better understanding of the emergence of data marketplaces, we have conducted several surveys in recent years to systematically gather and evaluate their characteristics. This paper takes a broader perspective and relates data marketplaces as currently discussed in computer science to the neoclassical notions of market and marketplace from economics. Specifically, we provide a typology of electronic marketplaces and discuss their approaches to the distribution of data. Finally, we provide a distinct definition of data marketplaces, leading to a classification framework that can provide structure for the emerging field of data marketplace research.}, language = {en}, number = {3}, journal = {Vietnam Journal of Computer Science}, author = {Stahl, Florian and Schomm, Fabian and Vossen, Gottfried and Vomfell, Lara}, month = aug, year = {2016}, pages = {137--143}, file = {Stahl et al. - 2016 - A classification framework for data marketplaces.pdf:C\:\\Users\\carst\\Zotero\\storage\\24RWYFJB\\Stahl et al. - 2016 - A classification framework for data marketplaces.pdf:application/pdf}, } @article{stahl_name_2017-1, title = {Name {Your} {Own} {Price} on {Data} {Marketplaces}}, volume = {28}, issn = {0868-4952}, url = {https://content.iospress.com/articles/informatica/inf1134}, abstract = {A novel approach to pricing on data marketplaces is proposed, which is based on the Name Your Own Price (NYOP) principle: customers suggest their own price for a (relational) data product and in return receive a custom-tailored one. The result is a f}, language = {en}, number = {1}, journal = {Informatica}, author = {Stahl, Florian and Vossen, Gottfried}, month = jan, year = {2017}, pages = {155--180}, file = {Snapshot:C\:\\Users\\carst\\Zotero\\storage\\AW8UE8CR\\inf1134.html:text/html}, } @inproceedings{koutris_query-based_2012-1, address = {New York}, series = {{PODS} '12}, title = {Query-based data pricing}, isbn = {978-1-4503-1248-6}, url = {https://doi.org/10.1145/2213556.2213582}, doi = {10.1145/2213556.2213582}, abstract = {Data is increasingly being bought and sold online, and Web-based marketplace services have emerged to facilitate these activities. However, current mechanisms for pricing data are very simple: buyers can choose only from a set of explicit views, each with a specific price. In this paper, we propose a framework for pricing data on the Internet that, given the price of a few views, allows the price of any query to be derived automatically. We call this capability "query-based pricing." We first identify two important properties that the pricing function must satisfy, called arbitrage-free and discount-free. Then, we prove that there exists a unique function that satisfies these properties and extends the seller's explicit prices to all queries. When both the views and the query are Unions of Conjunctive Queries, the complexity of computing the price is high. To ensure tractability, we restrict the explicit prices to be defined only on selection views (which is the common practice today). We give an algorithm with polynomial time data complexity for computing the price of any chain query by reducing the problem to network flow. Furthermore, we completely characterize the class of Conjunctive Queries without self-joins that have PTIME data complexity (this class is slightly larger than chain queries), and prove that pricing all other queries is NP-complete, thus establishing a dichotomy on the complexity of the pricing problem when all views are selection queries.}, booktitle = {Proceedings of the 31st {ACM} {SIGMOD}-{SIGACT}-{SIGAI} symposium on {Principles} of {Database} {Systems}}, publisher = {Association for Computing Machinery}, author = {Koutris, Paraschos and Upadhyaya, Prasang and Balazinska, Magdalena and Howe, Bill and Suciu, Dan}, month = may, year = {2012}, pages = {167--178}, file = {Koutris et al. - 2012 - Query-based data pricing.pdf:C\:\\Users\\carst\\Zotero\\storage\\97ZGAFI4\\Koutris et al. - 2012 - Query-based data pricing.pdf:application/pdf}, } @article{stahl_preismodelle_2015-1, title = {Preismodelle für {Datenmarktplätze}}, volume = {38}, issn = {1432-122X}, url = {https://doi.org/10.1007/s00287-013-0751-7}, doi = {10.1007/s00287-013-0751-7}, abstract = {Eine zunehmende Zahl von Anbietern nutzt das Cloud-Computing-Paradigma für einen Handel mit Daten und analytischen Dienstleistungen. In dieser qualitativen Studie präsentieren wir die Ergebnisse aus Interviews mit zwölf etablierten Anbietern. Unsere Ergebnisse zeigen insbesondere eine große Unsicherheit bezüglich der Preissetzung und Preismodellwahl. Ferner erlauben sie eine Abstraktion der betrachteten Marktplätze auf ein einheitliches Schema mit sieben Akteuren sowie sechs atomaren und zwei hybriden Preisstrategien abstrahieren. Darüber hinaus bietet diese Papier erstmals eine strukturierte Entscheidungshilfe für die Wahl eines geeigneten Preismodells für Datenmarktplätze und legt somit den Grundstein für eine algorithmische Unterstützung bei Preismodellwahl und Preisfindung.}, language = {de}, number = {2}, journal = {Informatik-Spektrum}, author = {Stahl, Florian and Löser, Alexander and Vossen, Gottfried}, month = apr, year = {2015}, pages = {133--141}, } @inproceedings{martins_supporting_2019-1, title = {Supporting {Customers} with {Limited} {Budget} in {Data} {Marketplaces}}, doi = {10.1109/LA-CCI47412.2019.9037038}, abstract = {As the competitiveness and dynamics of current markets intensify, companies and organizations see opportunities to optimize their strategies and increase their business advantage in data-driven decision-making. This has led to an emergence of data marketplaces, where providers can sell data, while consumers can purchase it. However, the process of acquiring data from a marketplace involves issuing queries with an associated monetary cost, and data consumers often struggle to purchase the targeted data set of appropriate volume and content within their budget. Two issues need to be considered: One is querying itself, which may require API calls, structured queries written in SQL, graph queries written in Neo4J, or any other language framework. Querying is often a stepwise process that starts from generic queries and gets refined as the user learns about the data that results. The other issue is the cost involved, which consists of the price a consumer has to pay for the data and that of processing the various queries. In this paper, the second issue is studied from a computational perspective; in particular, we propose a novel framework for data-purchase support that considers data purchase from a marketplace as a sequence of interactions between the data provider (or the marketplace) and the consumer. This allows us to deal with scenarios in which the consumer has a limited budget, insufficient to embrace the complete data set he or she targets. We formalize the problem setting and the characteristics of available queries offered by the data provider so that efficient (approximation) algorithms can be devised. Our empirical results demonstrate that intelligent algorithms can aid the data consumer with near-optimum solutions that consider her preferences about the queries to be issue to the data provider.}, booktitle = {2019 {IEEE} {Latin} {American} {Conference} on {Computational} {Intelligence} ({LA}-{CCI})}, author = {Martins, Denis Mayr Lima and Lechtenbörger, Jens and Vossen, Gottfried}, month = nov, year = {2019}, pages = {1--6}, } @inproceedings{travizano_wibson_2018-1, address = {San Francisco}, title = {Wibson: {A} {Decentralized} {Data} {Marketplace}}, shorttitle = {Wibson}, url = {http://arxiv.org/abs/1812.09966}, abstract = {Our aim is for Wibson to be a blockchain-based, decentralized data marketplace that provides individuals a way to securely and anonymously sell information in a trusted environment. The combination of the Wibson token and blockchain-enabled smart contracts hopes to allow Data Sellers and Data Buyers to transact with each other directly while providing individuals the ability to maintain anonymity as desired. Wibson intends that its data marketplace will provide infrastructure and financial incentives for individuals to securely sell personal information without sacrificing personal privacy. Data Buyers receive information from willing and actively participating individuals with the benefit of knowing that the personal information should be accurate and current.}, author = {Travizano, Matias and Sarraute, Carlos and Ajzenman, Gustavo and Minnoni, Martin}, year = {2018}, file = {Travizano et al. - 2018 - Wibson A Decentralized Data Marketplace.pdf:C\:\\Users\\carst\\Zotero\\storage\\X2I7IRUX\\Travizano et al. - 2018 - Wibson A Decentralized Data Marketplace.pdf:application/pdf}, } @article{bracher_fashion_2016-1, title = {Fashion {DNA}: {Merging} {Content} and {Sales} {Data} for {Recommendation} and {Article} {Mapping}}, shorttitle = {Fashion {DNA}}, url = {http://arxiv.org/abs/1609.02489}, abstract = {We present a method to determine Fashion DNA, coordinate vectors locating fashion items in an abstract space. Our approach is based on a deep neural network architecture that ingests curated article information such as tags and images, and is trained to predict sales for a large set of frequent customers. In the process, a dual space of customer style preferences naturally arises. Interpretation of the metric of these spaces is straightforward: The product of Fashion DNA and customer style vectors yields the forecast purchase likelihood for the customer-item pair, while the angle between Fashion DNA vectors is a measure of item similarity. Importantly, our models are able to generate unbiased purchase probabilities for fashion items based solely on article information, even in absence of sales data, thus circumventing the "cold-start problem" of collaborative recommendation approaches. Likewise, it generalizes easily and reliably to customers outside the training set. We experiment with Fashion DNA models based on visual and/or tag item data, evaluate their recommendation power, and discuss the resulting article similarities.}, journal = {arXiv:1609.02489 [cs]}, author = {Bracher, Christian and Heinz, Sebastian and Vollgraf, Roland}, month = sep, year = {2016}, pages = {[10]}, file = {Bracher et al. - 2016 - Fashion DNA Merging Content and Sales Data for Re.pdf:C\:\\Users\\carst\\Zotero\\storage\\3HEMCVV2\\Bracher et al. - 2016 - Fashion DNA Merging Content and Sales Data for Re.pdf:application/pdf}, } @article{devlin_bert_2019-1, title = {{BERT}: {Pre}-training of {Deep} {Bidirectional} {Transformers} for {Language} {Understanding}}, shorttitle = {{BERT}}, url = {http://arxiv.org/abs/1810.04805}, abstract = {We introduce a new language representation model called BERT, which stands for Bidirectional Encoder Representations from Transformers. Unlike recent language representation models, BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly conditioning on both left and right context in all layers. As a result, the pre-trained BERT model can be fine-tuned with just one additional output layer to create state-of-the-art models for a wide range of tasks, such as question answering and language inference, without substantial task-specific architecture modifications. BERT is conceptually simple and empirically powerful. It obtains new state-of-the-art results on eleven natural language processing tasks, including pushing the GLUE score to 80.5\% (7.7\% point absolute improvement), MultiNLI accuracy to 86.7\% (4.6\% absolute improvement), SQuAD v1.1 question answering Test F1 to 93.2 (1.5 point absolute improvement) and SQuAD v2.0 Test F1 to 83.1 (5.1 point absolute improvement).}, journal = {arXiv:1810.04805 [cs]}, author = {Devlin, Jacob and Chang, Ming-Wei and Lee, Kenton and Toutanova, Kristina}, month = may, year = {2019}, file = {Devlin et al. - 2019 - BERT Pre-training of Deep Bidirectional Transform.pdf:C\:\\Users\\carst\\Zotero\\storage\\LDXL3ZTS\\Devlin et al. - 2019 - BERT Pre-training of Deep Bidirectional Transform.pdf:application/pdf}, } @inproceedings{tang_get_2014-1, address = {Cham}, series = {Lecture {Notes} in {Computer} {Science}}, title = {Get a {Sample} for a {Discount}}, isbn = {978-3-319-10073-9}, doi = {10.1007/978-3-319-10073-9_3}, abstract = {While price and data quality should define the major trade-off for consumers in data markets, prices are usually prescribed by vendors and data quality is not negotiable. In this paper we study a model where data quality can be traded for a discount. We focus on the case of XML documents and consider completeness as the quality dimension. In our setting, the data provider offers an XML document, and sets both the price of the document and a weight to each node of the document, depending on its potential worth. The data consumer proposes a price. If the proposed price is lower than that of the entire document, then the data consumer receives a sample, i.e., a random rooted subtree of the document whose selection depends on the discounted price and the weight of nodes. By requesting several samples, the data consumer can iteratively explore the data in the document. We show that the uniform random sampling of a rooted subtree with prescribed weight is unfortunately intractable. However, we are able to identify several practical cases that are tractable. The first case is uniform random sampling of a rooted subtree with prescribed size; the second case restricts to binary weights. For both these practical cases we present polynomial-time algorithms and explain how they can be integrated into an iterative exploratory sampling approach.}, language = {en}, booktitle = {Database and {Expert} {Systems} {Applications}}, publisher = {Springer International Publishing}, author = {Tang, Ruiming and Amarilli, Antoine and Senellart, Pierre and Bressan, Stéphane}, editor = {Decker, Hendrik and Lhotská, Lenka and Link, Sebastian and Spies, Marcus and Wagner, Roland R.}, year = {2014}, pages = {20--34}, } @inproceedings{agarwal_marketplace_2019-1, address = {New York, NY, USA}, series = {{EC} '19}, title = {A {Marketplace} for {Data}: {An} {Algorithmic} {Solution}}, isbn = {978-1-4503-6792-9}, url = {https://doi.org/10.1145/3328526.3329589}, doi = {10.1145/3328526.3329589}, abstract = {In this work, we aim to design a data marketplace; a robust real-time matching mechanism to efficiently buy and sell training data for Machine Learning tasks. While the monetization of data and pre-trained models is an essential focus of industry today, there does not exist a market mechanism to price training data and match buyers to sellers while still addressing the associated (computational and other) complexity. The challenge in creating such a market stems from the very nature of data as an asset: (i) it is freely replicable; (ii) its value is inherently combinatorial due to correlation with signal in other data; (iii) prediction tasks and the value of accuracy vary widely; (iv) usefulness of training data is difficult to verify a priori without first applying it to a prediction task. As our main contributions we: (i) propose a mathematical model for a two-sided data market and formally define the key associated challenges; (ii) construct algorithms for such a market to function and analyze how they meet the challenges defined. We highlight two technical contributions: (i) a new notion of "fairness" required for cooperative games with freely replicable goods; (ii) a truthful, zero regret mechanism to auction a class of combinatorial goods based on utilizing Myerson's payment function and the Multiplicative Weights algorithm. These might be of independent interest.}, booktitle = {Proceedings of the 2019 {ACM} {Conference} on {Economics} and {Computation}}, publisher = {Association for Computing Machinery}, author = {Agarwal, Anish and Dahleh, Munther and Sarkar, Tuhin}, year = {2019}, pages = {701--726}, file = {Agarwal et al. - 2019 - A Marketplace for Data An Algorithmic Solution.pdf:C\:\\Users\\carst\\Zotero\\storage\\IRAL86YW\\Agarwal et al. - 2019 - A Marketplace for Data An Algorithmic Solution.pdf:application/pdf}, } @article{wilkinson_fair_2016-1, title = {The {FAIR} {Guiding} {Principles} for scientific data management and stewardship}, volume = {3}, issn = {2052-4463}, url = {https://doi.org/10.1038/sdata.2016.18}, doi = {10.1038/sdata.2016.18}, abstract = {There is an urgent need to improve the infrastructure supporting the reuse of scholarly data. A diverse set of stakeholders—representing academia, industry, funding agencies, and scholarly publishers—have come together to design and jointly endorse a concise and measureable set of principles that we refer to as the FAIR Data Principles. The intent is that these may act as a guideline for those wishing to enhance the reusability of their data holdings. Distinct from peer initiatives that focus on the human scholar, the FAIR Principles put specific emphasis on enhancing the ability of machines to automatically find and use the data, in addition to supporting its reuse by individuals. This Comment is the first formal publication of the FAIR Principles, and includes the rationale behind them, and some exemplar implementations in the community.}, number = {1}, journal = {Scientific Data}, author = {Wilkinson, Mark D. and Dumontier, Michel and Aalbersberg, IJsbrand Jan and Appleton, Gabrielle and Axton, Myles and Baak, Arie and Blomberg, Niklas and Boiten, Jan-Willem and da Silva Santos, Luiz Bonino and Bourne, Philip E. and Bouwman, Jildau and Brookes, Anthony J. and Clark, Tim and Crosas, Mercè and Dillo, Ingrid and Dumon, Olivier and Edmunds, Scott and Evelo, Chris T. and Finkers, Richard and Gonzalez-Beltran, Alejandra and Gray, Alasdair J.G. and Groth, Paul and Goble, Carole and Grethe, Jeffrey S. and Heringa, Jaap and ’t Hoen, Peter A.C and Hooft, Rob and Kuhn, Tobias and Kok, Ruben and Kok, Joost and Lusher, Scott J. and Martone, Maryann E. and Mons, Albert and Packer, Abel L. and Persson, Bengt and Rocca-Serra, Philippe and Roos, Marco and van Schaik, Rene and Sansone, Susanna-Assunta and Schultes, Erik and Sengstag, Thierry and Slater, Ted and Strawn, George and Swertz, Morris A. and Thompson, Mark and van der Lei, Johan and van Mulligen, Erik and Velterop, Jan and Waagmeester, Andra and Wittenburg, Peter and Wolstencroft, Katherine and Zhao, Jun and Mons, Barend}, month = mar, year = {2016}, } @incollection{klump_langzeiterhaltung_2011-1, address = {Bad Honnef}, title = {Langzeiterhaltung digitaler {Forschungsdaten}}, isbn = {978-3-88347-283-6}, abstract = {Das Handbuch Forschungsdatenmanagement ist konzipiert als Leitfaden für das Selbststudium sowie zur Unterstützung der Aus- und Weiterbildung auf dem aktuellen Stand der Diskussion. Sie richtet sich insbesondere an Einsteiger im Forschungsdatenmanagement, aber gleichermaßen auch an wissenschaftliche Datenkuratoren, IT-Administratoren und Informationswissenschaftler, die ihre Aufgaben im Forschungsdatenmanagement nicht mehr nur einzelfall- oder disziplinorientiert, sondern in Hinblick auf die Arbeit in und an Forschungsdateninfrastrukturen wahrnehmen wollen. Und so war die Aufgabe für die Autorinnen und Autoren in ihrem Kapitel nicht nur den State-of-the-Art darzustellen, sondern das Thema so aufzubereiten, dass z. B. über die Referenzen das weitere Einarbeiten in die Themenfelder erleichtert wird. Zentrale Aspekte des Forschungsdatenmanagements werden in der Publikation aus informationswissenschaftlicher und anwendungsbezogener Perspektive disziplinübergreifend eingeführt.}, language = {de}, booktitle = {Handbuch {Forschungsdatenmanagement}}, publisher = {Bock + Herchen}, author = {Klump, Jens}, editor = {Büttner, Stephan and Hobohm, Hans-Christoph and Müller, Lars}, year = {2011}, pages = {115--119}, } @book{senatskanzlei_geschafts-_und_koordinierungsstelle_govdata_datenportal_nodate-1, title = {Das {Datenportal} für {Deutschland}: {Open} {Government}: {Verwaltungsdaten} transparent, offen und frei nutzbar}, url = {https://www.govdata.de/}, author = {{Senatskanzlei, Geschäfts- und Koordinierungsstelle GovData}}, file = {GovData | Datenportal für Deutschland - GovData:C\:\\Users\\carst\\Zotero\\storage\\PG9AQE3N\\www.govdata.de.html:text/html}, } @techreport{noauthor_din_nodate-1, title = {{DIN} {SPEC} 91357:2017-12, {Referenzarchitekturmodell} {Offene} {Urbane} {Plattform} ({OUP})}, shorttitle = {{DIN} {SPEC} 91357}, url = {https://www.beuth.de/de/-/-/281077528}, institution = {Beuth Verlag GmbH}, doi = {10.31030/2780217}, } @article{davies_open_2014-1, title = {Open {Data}: {Growing} {Up} and {Getting} {Specific}}, volume = {6}, issn = {2075-9517}, shorttitle = {Open {Data}}, url = {https://jedem.org/index.php/jedem/article/view/344}, doi = {10.29379/jedem.v6i1.344}, language = {en}, number = {1}, journal = {JeDEM - eJournal of eDemocracy and Open Government}, author = {Davies, Tim and Janssen, Marijn and Schieferdecker, Ina and Höchtl, Jan}, month = nov, year = {2014}, pages = {i--iii}, file = {Daviees et al. - 2014 - Open Data Growing Up and Getting Specific.pdf:C\:\\Users\\carst\\Zotero\\storage\\TUTMKIW8\\Daviees et al. - 2014 - Open Data Growing Up and Getting Specific.pdf:application/pdf}, } @techreport{bauer_zukunftsfahige_2020-1, title = {Zukunftsfähige {Städte} und {Regionen}. {Eine} neue {Strategie} für die breite {Umsetzung} nachhaltiger {Stadentwicklung} in {Deutschland}}, author = {Bauer, Wilhelm and Radecki, Alanus von and Ottendörfer, Eva}, year = {2020}, } @book{noauthor_hightech_nodate-1, title = {Hightech {Forum}: {Offene} {Wissenschaft} und {Innovation}}, url = {https://www.hightech-forum.de/beratungsthemen/offene-wissenschaft-und-innovation/}, language = {de}, note = {Publication Title: Hightech Forum}, file = {Snapshot:C\:\\Users\\carst\\Zotero\\storage\\WRG6MKVU\\offene-wissenschaft-und-innovation.html:text/html}, } @article{oliveira_investigations_2019-1, title = {Investigations into {Data} {Ecosystems}: a systematic mapping study}, volume = {61}, issn = {0219-3116}, shorttitle = {Investigations into {Data} {Ecosystems}}, url = {https://doi.org/10.1007/s10115-018-1323-6}, doi = {10.1007/s10115-018-1323-6}, abstract = {Data Ecosystems are socio-technical complex networks in which actors interact and collaborate with each other to find, archive, publish, consume, or reuse data as well as to foster innovation, create value, and support new businesses. While the Data Ecosystem field is thus arguably gaining in importance, research on this subject is still in its early stages of development. Up until now, not many academic papers related to Data Ecosystems have been published. Furthermore, to the best of our knowledge, there has been no systematic review of the literature on Data Ecosystems. In this study, we provide an overview of the current literature on Data Ecosystems by conducting a systematic mapping study. This study is intended to function as a snapshot of the research in the field and by doing so identifies the different definitions of Data Ecosystem and analyzes the evolution of Data Ecosystem research. The studies selected have been classified into categories related to the study method, contribution, research topic, and ecosystem domains. Finally, we analyze how Data Ecosystems are structured and organized, and what benefits can be expected from Data Ecosystems and what their limitations are.}, language = {en}, number = {2}, journal = {Knowledge and Information Systems}, author = {Oliveira, Marcelo Iury S. and Barros Lima, Glória de Fátima and Farias Lóscio, Bernadette}, month = jan, year = {2019}, pages = {589--630}, } @phdthesis{nwatchock_a_koul_framework_2019-1, type = {{PhD} {Thesis}}, title = {A framework for fair and responsible data market ecosystems}, url = {https://archive-ouverte.unige.ch/unige:121388}, abstract = {As access to information has become critically important in our society, we are witnessing an “information race” where many initiatives for data access are proliferating. Recently, a new economy around data has emerged with a growing number of data markets. The term data market covers a whole range of activities where value is derived from data, thus providing benefits to many stakeholders. The data market ecosystem is for its most part uncontrolled, and the actions for creating a secure space are highly fragmented. These main issues undermine the emergence and the development of this critically important ecosystem for the future. Hence, this dissertation addresses the question of the design of fair and responsible data market ecosystems. We study the constituents of data markets and propose a global approach towards the design of a framework for fair and responsible data market ecosystems enabling transparency, trust, fairness and accountability.}, language = {en}, school = {University of Geneva}, author = {Nwatchock A Koul, Aman Sabrina}, year = {2019}, doi = {10.13097/archive-ouverte/unige:121388}, file = {Snapshot:C\:\\Users\\carst\\Zotero\\storage\\8LIDSV8K\\unige121388.html:text/html;unige_121388_attachment01.pdf:C\:\\Users\\carst\\Zotero\\storage\\8L39PT6K\\unige_121388_attachment01.pdf:application/pdf}, }