@article{dumphart_robust_2023, title = {Robust deep learning-based gait event detection across various pathologies}, volume = {18}, copyright = {CC-BY}, issn = {1932-6203}, url = {https://dx.plos.org/10.1371/journal.pone.0288555}, doi = {10.1371/journal.pone.0288555}, abstract = {The correct estimation of gait events is essential for the interpretation and calculation of 3D gait analysis (3DGA) data. Depending on the severity of the underlying pathology and the availability of force plates, gait events can be set either manually by trained clinicians or detected by automated event detection algorithms. The downside of manually estimated events is the tedious and time-intensive work which leads to subjective assessments. For automated event detection algorithms, the drawback is, that there is no standardized method available. Algorithms show varying robustness and accuracy on different pathologies and are often dependent on setup or pathology-specific thresholds. In this paper, we aim at closing this gap by introducing a novel deep learning-based gait event detection algorithm called IntellEvent , which shows to be accurate and robust across multiple pathologies. For this study, we utilized a retrospective clinical 3DGA dataset of 1211 patients with four different pathologies (malrotation deformities of the lower limbs, club foot, infantile cerebral palsy (ICP), and ICP with only drop foot characteristics) and 61 healthy controls. We propose a recurrent neural network architecture based on long-short term memory (LSTM) and trained it with 3D position and velocity information to predict initial contact (IC) and foot off (FO) events. We compared IntellEvent to a state-of-the-art heuristic approach and a machine learning method called DeepEvent. IntellEvent outperforms both methods and detects IC events on average within 5.4 ms and FO events within 11.3 ms with a detection rate of ≥ 99\% and ≥ 95\%, respectively. Our investigation on generalizability across laboratories suggests that models trained on data from a different laboratory need to be applied with care due to setup variations or differences in capturing frequencies.}, language = {en}, number = {8}, urldate = {2023-08-17}, journal = {PLOS ONE}, author = {Dumphart, Bernhard and Slijepcevic, Djordje and Zeppelzauer, Matthias and Kranzl, Andreas and Unglaube, Fabian and Baca, Arnold and Horsak, Brian}, editor = {Srinivasan, Kathiravan}, month = aug, year = {2023}, keywords = {Artificial intelligence, Biomechanics, Center for Artificial Intelligence, Center for Digital Health Innovation, Center for Digital Health and Social Innovation, Department Gesundheit, Department Medien und Digitale Technologien, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Gait Analysis, Institut für Creative Media Technologies, Institut für Gesundheitswissenschaften, Machine learning, Phaidra, SP CDHSI Motor Rehabilitation, SP IGW Clinical \& Healthcare Research, Wiss. Beitrag, best, best-bdumphart, peer-reviewed}, pages = {e0288555}, } @inproceedings{slijepcevic_towards_2023, address = {Heidelberg}, series = {{GAMMA} 2023 {Abstracts}}, title = {Towards more transparency: {The} utility of {Grad}-{CAM} in tracing back deep learning based classification decisions in children with cerebral palsy}, volume = {100}, copyright = {Copyright}, shorttitle = {Towards more transparency}, url = {https://www.sciencedirect.com/science/article/pii/S0966636222006828}, doi = {10.1016/j.gaitpost.2022.11.045}, abstract = {GAMMA Conference}, language = {en}, urldate = {2023-03-10}, booktitle = {Gait \& {Posture}}, author = {Slijepcevic, Djordje and Zeppelzauer, Matthias and Unglaube, Fabian and Kranzl, Andreas and Breiteneder, Christian and Horsak, Brian}, month = mar, year = {2023}, note = {Projekt: I3D}, keywords = {Biomechanics, Center for Artificial Intelligence, Center for Digital Health and Social Innovation, Department Gesundheit, Department Medien und Digitale Technologien, Forschungsgruppe Media Computing, Gait Analysis, Gait Classification, Institut für Creative Media Technologies, Institut für Gesundheitswissenschaften, Machine Learning, SP CDHSI Motor Rehabilitation, Vortrag, Wiss. Beitrag, best, peer-reviewed}, pages = {32--33}, } @inproceedings{zeppelzauer_automatic_2018, address = {Yokohama, Japan}, title = {Automatic {Prediction} of {Building} {Age} from {Photographs}}, isbn = {978-1-4503-5046-4}, url = {https://arxiv.org/pdf/1804.02205}, doi = {10/ghpp2k}, language = {en}, urldate = {2018-10-10}, booktitle = {Proceedings of the {ACM} {International} {Conference} on {Multimedia} {Retrieval} ({ICMR} '18)}, publisher = {ACM Press}, author = {Zeppelzauer, Matthias and Despotovic, Miroslav and Sakeena, Muntaha and Koch, David and Döller, Mario}, year = {2018}, note = {Projekt: ImmBild Projekt: ImmoAge}, keywords = {Center for Artificial Intelligence, Computer Vision, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Machine Learning, Media Computing Group, Publikationstyp Schriftpublikation, Visual Computing, Wiss. Beitrag, best, best-mzeppelzauer, peer-reviewed}, pages = {126--134}, } @inproceedings{luh_penquest_2022, address = {Tunis, Tunisia}, title = {{PenQuest} {Reloaded}: {A} {Digital} {Cyber} {Defense} {Game} for {Technical} {Education}}, isbn = {978-1-66544-434-7}, shorttitle = {{PenQuest} {Reloaded}}, url = {https://ieeexplore.ieee.org/document/9766700/}, doi = {10.1109/EDUCON52537.2022.9766700}, abstract = {Today’s IT and OT infrastructure is threatened by a plethora of cyber-attacks conducted by actors with different motivations and means. Furthermore, the complexity of these exposed systems as well as the adversaries’ sophisticated technical arsenal makes it increasingly difficult to plan and implement an organization’s defense. Understanding the link between specific attacks and effective mitigating measures is particularly challenging – as is understanding the underlying information security concepts. To support the training of current, and more importantly, nascent security engineers, we propose PenQuest, a digital attack and defense game where an attacker attempts to compromise an abstracted IT infrastructure and the defender works to prevent or mitigate the threat. The game is based on MITRE ATT\&CK, D3FEND, and the NIST SP 800-53 security standard and incorporates a multitude of concepts such as cyber kill chains, attack vectors, network segmentation, and more. PenQuest is built to support security education and risk assessment and was evaluated with a class of engineering students as well as independent security experts. Initial results show a significant increase in knowledge retention and attest to the game’s feasibility for educational use.}, urldate = {2023-01-25}, booktitle = {2022 {IEEE} {Global} {Engineering} {Education} {Conference} ({EDUCON})}, publisher = {IEEE}, author = {Luh, Robert and Eresheim, Sebastian and Größbacher, Stefanie and Petelin, Thomas and Mayr, Florian and Tavolato, Paul and Schrittwieser, Sebastian}, month = mar, year = {2022}, note = {Projekt: PenQuest}, keywords = {Department Medien und Digitale Technologien, Education / Computers \& Technology, Forschungsgruppe Media Computing, Games, Institut für Creative Media Technologies, Paper, Security, Vortrag, best, peer-reviewed}, pages = {906--914}, } @inproceedings{judmaier_untersuchungsmethoden_2014, address = {Berlin}, title = {Untersuchungsmethoden zur {Gendersensibilität} von {Arbeitsplätzen} im {Umfeld} sicherheitskritischer {Systeme}}, booktitle = {Gender {UseT}}, publisher = {Kompetenzzentrum Technik – Diversity – Chancengleichheit e.V.}, author = {Judmaier, Peter and Pohl, Margit and Michelberger, Frank and Bichler, Romana and Erharter, Dorothea and Fränzl, Thomas and Kunz, Angelika}, year = {2014}, keywords = {Creative Industries, Department Gesundheit, Department Technologie, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Institut für Gesundheitswissenschaften, Institut für Mobilitätsforschung, Publikationstyp Schriftpublikation, SP IGW Health Promotion \& Healthy Ageing, Studiengang Physiotherapie, best, peer-reviewed, user centered design, ⛔ No DOI found}, } @inproceedings{blumenstein_evaluating_2016, address = {Baltimore, MD, USA}, title = {Evaluating {Information} {Visualization} on {Mobile} {Devices}: {Gaps} and {Challenges} in the {Empirical} {Evaluation} {Design} {Space}}, isbn = {978-1-4503-4818-8}, url = {https://phaidra.fhstp.ac.at/o:4873}, doi = {10/cwc6}, abstract = {With their increasingly widespread use, mobile devices have become a highly relevant target environment for Information Visualization. However, far too little attention has been paid to evaluation of interactive visualization techniques on mobile devices. To fill this gap, this paper provides a structured overview of the commonly used evaluation approaches for mobile visualization. For this, it systematically reviews the scientific literature of major InfoVis and HCI venues and categorizes the relevant work based on six dimensions circumscribing the design and evaluation space for visualization on mobile devices. Based on the 21 evaluations reviewed, reproducibility, device variety and usage environment surface as the three main issues in evaluation of information visualization on mobile devices. To overcome these issues, we argue for a transparent description of all research aspects and propose to focus more on context of usage and technology.}, booktitle = {Proceedings of 2016 {Workshop} on {Beyond} {Time} {And} {Errors}: {Novel} {Evaluation} {Methods} {For} {Visualization}}, publisher = {ACM}, author = {Blumenstein, Kerstin and Niederer, Christina and Wagner, Markus and Schmiedl, Grischa and Rind, Alexander and Aigner, Wolfgang}, year = {2016}, note = {Projekt: KAVA-Time Projekt: Couragierte Gemeinde Projekt: VALID Projekt: VisOnFire}, keywords = {Forschungsgruppe Digital Technologies, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, best, best-kblumenstein, best-lbaigner, best-lbwagnerm, evaluation, information visualization, mobile, peer-reviewed}, pages = {125--132}, } @inproceedings{wagner_problem_2014, address = {Paris}, title = {Problem {Characterization} and {Abstraction} for {Visual} {Analytics} in {Behavior}-{Based} {Malware} {Pattern} {Analysis}}, url = {https://ifs.tuwien.ac.at/~rind/preprint/wagner_2014_VizSec_problem.pdf}, doi = {10/cv8p}, abstract = {Behavior-based analysis of emerging malware families involves finding suspicious patterns in large collections of execution traces. This activity cannot be automated for previously unknown malware families and thus malware analysts would benefit greatly from integrating visual analytics methods in their process. However existing approaches are limited to fairly static representations of data and there is no systematic characterization and abstraction of this problem domain. Therefore we performed a systematic literature study, conducted a focus group as well as semi-structured interviews with 10 malware analysts to elicit a problem abstraction along the lines of data, users, and tasks. The requirements emerging from this work can serve as basis for future design proposals to visual analytics-supported malware pattern analysis.}, booktitle = {Proceedings of the {Eleventh} {Workshop} on {Visualization} for {Cyber} {Security}}, publisher = {ACM}, author = {Wagner, Markus and Aigner, Wolfgang and Rind, Alexander and Dornhackl, Hermann and Kadletz, Konstantin and Luh, Robert and Tavolato, Paul}, editor = {Harrison, Lane}, month = nov, year = {2014}, note = {Projekt: TARGET Projekt: KAVA-Time}, keywords = {2014, Creative Industries, Department Technologie, FH SP Cyber Security, Forschungsgruppe Digital Technologies, Forschungsgruppe Media Computing, Forschungsgruppe Secure Societies, Institut für Creative Media Technologies, Institut für IT Sicherheitsforschung, KAVA-Time, Publikationstyp Präsentation, Publikationstyp Schriftpublikation, Visual analytics, best, best-lbwagnerm, evaluation, malicious software, malware analysis, peer-reviewed, problem characterization and abstraction, user centered design, visualization}, pages = {9 -- 16}, } @inproceedings{bogl_visual_2014, title = {Visual {Analytics} {Methods} to {Guide} {Diagnostics} for {Time} {Series} {Model} {Predictions}}, url = {https://publik.tuwien.ac.at/files/PubDat_232994.pdf}, abstract = {Visual Analytics methods are used to guide domain experts in the task of model selection through an interactive visual exploration environment with short feedback cycles. Evaluation showed the benefits of this approach. However, experts also expressed the demand for prediction capabilities as being already important during the model selection process. Furthermore, good model candidates might show only small variations in the information criteria and structures which are not easily recognizable in the residual plots. To achieve this, we propose TiMoVA-Predict to close the gap and to support different types of predictions with a Visual Analytics approach. Providing prediction capabilities in addition to the information criteria and the residual plots, allows for interactively assessing the predictions during the model selection process via an visual exploration environment.}, urldate = {2022-05-24}, booktitle = {Proceedings of the {IEEE} {VIS} 2014 {Workshop} {Visualization} for {Predictive} {Analytics}, {VPA}}, author = {Bögl, Markus and Aigner, Wolfgang and Filzmoser, Peter and Gschwandtner, Theresia and Lammarsch, Tim and Miksch, Silvia and Rind, Alexander}, editor = {Perer, Adam and Bertini, Enrico and Maciejewski, Ross and Sun, Jimeng}, year = {2014}, note = {Projekt: KAVA-Time}, keywords = {Creative Industries, Department Technologie, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Publikationstyp Schriftpublikation, best, peer-reviewed, visualization, ⛔ No DOI found}, } @article{zielinski_persistence_2021, title = {Persistence {Codebooks} for {Topological} {Data} {Analysis}}, volume = {54}, copyright = {Open Access}, issn = {0269-2821}, url = {https://rdcu.be/b6ENZ}, doi = {https://doi.org/10.1007/s10462-020-09897-4}, journal = {Journal of Artificial Intelligence Review}, author = {Zielinski, Bartosz and Lipinski, Michal and Juda, Mateusz and {Zeppelzauer, Matthias} and {Dlotko, Pawel}}, year = {2021}, keywords = {3D surface classification, Center for Artificial Intelligence, Computer Vision, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Green OA, Institut für Creative Media Technologies, Machine Learning, Media Computing Group, Open Access, Surface texture analysis, Visual Computing, Wiss. Beitrag, best, best-mzeppelzauer, peer-reviewed, persistence diagram, persistence image, persistent homology, surface representation, surface topology analysis}, pages = {1969--2009}, } @inproceedings{von_suess_schwein_2016, address = {Wien}, title = {Ein {Schwein} für einen {Tag} – {Alternate} {Reality} {Game} aus dem {Forschungslab}}, url = {http://www.fh-vie.ac.at/Forschung/FH-Forschungsforum-2016}, booktitle = {Forschungsforum der österreichischen {Fachhochschulkonferenz}}, author = {von Suess, Rosa and Biechele, Jennifer and Prochaska, Harald and Blumenstein, Kerstin and Ederer, Thomas and Gebesmair, Andreas}, year = {2016}, keywords = {2016, Beitrag in Tagungsband, Department Technologie, Diversität und Demokratieentwicklung, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, SP IlseA Partizipation, SP MW Global Media Markets \& Local Practices, Wiss. Beitrag, best, best lbvonsuess, peer-reviewed, ⛔ No DOI found}, } @inproceedings{iber_auditory_2019, address = {Nottingham, UNited Kingdom}, title = {Auditory {Augmented} {Reality} for {Cyber} {Physical} {Production} {Systems}}, isbn = {978-1-4503-7297-8}, doi = {10.1145/3356590.3356600}}, abstract = {We describe a proof-of-concept approach on the sonification of estimated operation states of 3D printing processes. The results of this study form the basis for the development of an “intelligent” noise protection headphone as part of Cyber Physical Production Systems, which provides auditorily augmented information to machine operators and enables radio communication between them. Further application areas are implementations in control rooms (equipped with multichannel loudspeaker systems) and utilization for training purposes. The focus of our research lies on situation-specific acoustic processing of conditioned machine sounds and operation related data with high information content, considering the often highly auditorily influenced working knowledge of skilled workers. As a proof-of-concept the data stream of error probability estimations regarding partly manipulated 3D printing processes was mapped to three sonification models, giving evidence about momentary operation states. The neural network applied indicates a high accuracy ({\textgreater}93\%) concerning error estimation distinguishing between normal and manipulated operation states. None of the manipulated states could be identified by listening. An auditory augmentation, respectively sonification of these error estimations provides a considerable benefit to process monitoring.}, booktitle = {{AudioMostly} ({AM}'19)}, publisher = {ACM New York, NY, USA}, author = {Iber, Michael and Lechner, Patrik and Jandl, Christian and Mader, Manuel and Reichmann, Michael}, year = {2019}, note = {Projekt: IML}, keywords = {Auditory Display, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Forschungsgruppe Media Creation, Immersive Media (AR, VR, 360°), Institut für Creative Media Technologies, Smart Manufacturing, Vortrag, best, best-lbiber, peer-reviewed, ⚠️ Invalid DOI}, } @article{roider_recognition_2015, title = {Recognition of sustainable mobility as basis for the transition into a digitial gaming concept for young people}, url = {http://www.iatbr2015.org.uk/index.php/iatbr/iatbr2015/paper/view/399}, abstract = {Mobility behaviour is often shaped through social habituation in childhood and adolescence. Persons who are accustomed to use sustainable transport modes will stick to their manner later on in life too. Since children and young adults undergo drastic physical and mental developmental steps in a short period of time, awareness raising campaigns must exactly match with different age groups and their current mobility behavior. Against this background of behavioral development of children, the research project BewusstMobil developed a concept for a smartphone application which automatically collects individual travel behaviour patterns, quantifies current environmental-friendly and health promoting behaviour and acts as platform for a game which young people can play online. The basic idea is that children and young people aged 12 to 18 years collect points by their current mobility behavior and chosen transport mode. Depending on how they travel (bus, tram, walking, etc.) and how environmental-friendly or healthy this mode of transport is, they collect more or less points i.e. for example if a person walks or travels by train, he/she gets more points as if traveling by car. Recognition of trip data (start and end points, mileage, time used, transport mode) is activated after the start of the application in the background and as soon as the person moves. Data are assessed by using GPS positioning and movement data such as speed and acceleration. In most cases, indicators for sustainable mobility are available on a very general level and rather aimed at assessing infrastructure projects. However, in this application, individual mobility and the benefit for the young transport user has to be evaluated. Thus, indicators known from research and practice are selected and transferred to an individual level. This includes pollutants and GHG-emissions (ecological impacts), costs (economic impacts), travel time and health benefits (social aspects). Depending on the selected transport mode, quantities are calculated for each indicator. The factors included in the calculation are the trip length [km] and the trip duration [min], which are determined through GPS tracking. The normalization is done by monetization, mainly based on recommendations for a cost-benefit analysis, i.e. different dimensions [g / veh-km, min, €] are converted to costs by using market prices (or "shadow prices"). These values are weighted according to their importance in order to ensure the assessment of environmentally friendliness and health promotion of individual mobility of children and adolescents. These parameters form the basis for the games’ scoring system. Points earned due to individual mobility behavior, can be traded in for proceeding on virtual routes of different modes on a map of Austria in the smartphone application, in order to reach virtual locations where the player can win goodies (promotional gifts, shopping vouchers, concert tickets, etc.). As several players try to reach these locations simultaneously, and the number of goodies at each location is limited the game creates motivation for playing and competing among participants. The application was tested in three different schools in the Province of Lower Austria covering pupils aged 13 to 18. A clear knowledge of the environmental impact of the transport system was identified prior to the test phase, but this knowledge hardly influences the choice of transport mode currently. However, a change in behavior had been reported during the gaming. Since this is not due to the transfer of (new) knowledge, it can be assumed that this shift was primarily caused by the game design and the information contained in the game mechanics that were incorporated resulting in a change of behavior. Generally, data collection as well as the overall concept and usage were clear for all users. However, younger people; age 13 years seems to lose interest faster than the other age classes. In particular, the interests concerning the competition were more relevant to higher classes, but at the same time the willingness to cheat increased. Thus, the full technical functionality was mentioned as requirement for further use of the application. Particularly, mode detection and tracking of individual trips have to ensure high accuracy and thus a fair scoring, since the desire for fair playing conditions was at the top in all age groups. From a scientific point of view, the application offers the opportunity to learn more about mobility behavior of young people and to use the digital world to raise the awareness and to influence user’s mode choice in a positive way. The presentation will give an overview of the data collection, the scoring system quantifying sustainable mobility behavior and illustrate the game concept based on the automatic recognition of individual trip data. Moreover, results of the before and after attitudinal and mobility surveys will demonstrate current mobility behavior, the perception of the application and the influence on the individual mobility behavior of young people based on current data.}, author = {Roider, Oliver and Judmaier, Peter and Barberi, Alessandro and Michelberger, Frank}, year = {2015}, note = {Projekt: FORSCH31}, keywords = {2015, Center for Sustainable Mobility, Department Medien und Digitale Technologien, Department Technologie, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Institut für Mobilitätsforschung, Publikationstyp Schriftpublikation, best, interdisziplinär, peer-reviewed, ⛔ No DOI found}, } @inproceedings{judmaier_untersuchungsmethoden_2014, address = {Berlin}, title = {Untersuchungsmethoden zur {Gendersensibilität} von {Arbeitsplätzen} im {Umfeld} sicherheitskritischer {Systeme}}, booktitle = {Gender {UseT}}, publisher = {Kompetenzzentrum Technik – Diversity – Chancengleichheit e.V.}, author = {Judmaier, Peter and Pohl, Margit and Michelberger, Frank and Bichler, Romana and Erharter, Dorothea and Fränzl, Thomas and Kunz, Angelika}, year = {2014}, note = {Projekt: GenSiSys}, keywords = {Center for Sustainable Mobility, Creative Industries, Department Gesundheit, Department Technologie, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Institut für Gesundheitswissenschaften, Institut für Mobilitätsforschung, Publikationstyp Schriftpublikation, SP IGW Health Promotion \& Healthy Ageing, Studiengang Physiotherapie, best, peer-reviewed, user centered design, ⛔ No DOI found}, } @incollection{wagner_visual_2017, title = {Visual {Analytics}: {Foundations} and {Experiences} in {Malware} {Analysis}}, isbn = {978-1-4987-7641-7}, abstract = {This chapter starts by providing some background in behavior-based malware analysis. Subsequently, it introduces VA and its main components based on the knowledge generation model for VA (Sacha et al., 2014). Then, it demonstrates the applicability of VA in in this subfield of software security with three projects that illustrate practical experience of VA methods: MalwareVis (Zhuo et al., 2012) supports network forensics and malware analysis by visually assessing TCP and DNS network streams. SEEM (Gove et al., 2014) allows visual comparison of multiple large attribute sets of malware samples, thereby enabling bulk classification. KAMAS (Wagner et al. 2017) is a knowledge-assisted visualization system for behavior-based malware forensics enabled by API calls and system call traces. Future directions in visual analytics for malware analysis conclude the chapter.}, booktitle = {Empirical {Research} for {Software} {Security}: {Foundations} and {Experience}}, publisher = {CRC/Taylor and Francis}, author = {Wagner, Markus and Sacha, Dominik and Rind, Alexander and Fischer, Fabian and Luh, Robert and Schrittwieser, Sebastian and Keim, Daniel A and Aigner, Wolfgang}, editor = {Othmane, Lotfi Ben and Jaatun, Martin Gilje and Weippl, Edgar}, year = {2017}, note = {Projekt: KAVA-Time}, keywords = {FH SP Cyber Security, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Digital Technologies, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Publikationstyp Schriftpublikation, Visual Computing, Visual analytics, Wiss. Beitrag, best, best-lbwagnerm, data, interaction, knowledge generation, malware analysis, model, peer-reviewed, visualization}, pages = {139--171}, } @article{iber_auditory_2020, title = {Auditory augmented process monitoring for cyber physical production systems}, issn = {1617-4909, 1617-4917}, url = {http://link.springer.com/10.1007/s00779-020-01394-3}, doi = {10/ghz24q}, language = {en}, urldate = {2020-03-30}, journal = {Personal and Ubiquitous Computing}, author = {Iber, Michael and Lechner, Patrik and Jandl, Christian and Mader, Manuel and Reichmann, Michael}, month = mar, year = {2020}, note = {Projekt: IML}, keywords = {Auditory Display, Eintrag überprüfen, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Forschungsgruppe Media Creation, Green OA, Immersive Media (AR, VR, 360°), Institut für Creative Media Technologies, Open Access, Smart Manufacturing, Wiss. Beitrag, best, best-lbiber, peer-reviewed}, } @article{wagner_knowledge-assisted_2017, title = {A knowledge-assisted visual malware analysis system: design, validation, and reflection of {KAMAS}}, issn = {0167-4048}, shorttitle = {A knowledge-assisted visual malware analysis system}, url = {http://www.sciencedirect.com/science/article/pii/S0167404817300263}, doi = {10/b5j9}, abstract = {IT-security experts engage in behavior-based malware analysis in order to learn about previously unknown samples of malicious software (malware) or malware families. For this, they need to find and categorize suspicious patterns from large collections of execution traces. Currently available systems do not meet the analysts' needs which are described as: visual access suitable for complex data structures, visual representations appropriate for IT-security experts, provision of workflow-specific interaction techniques, and the ability to externalize knowledge in the form of rules to ease the analysis process and to share with colleagues. To close this gap, we designed and developed KAMAS, a knowledge-assisted visualization system for behavior-based malware analysis. This paper is a design study that describes the design, implementation, and evaluation of the prototype. We report on the validation of KAMAS with expert reviews, a user study with domain experts and focus group meetings with analysts from industry. Additionally, we reflect on the acquired insights of the design study and discuss the advantages and disadvantages of the applied visualization methods. An interesting finding is that the arc-diagram was one of the preferred visualization techniques during the design phase but did not provide the expected benefits for finding patterns. In contrast, the seemingly simple looking connection line was described as supportive in finding the link between the rule overview table and the rule detail table which are playing a central role for the analysis in KAMAS.}, number = {67}, urldate = {2017-02-17}, journal = {Computers \& Security}, author = {Wagner, Markus and Rind, Alexander and Thür, Niklas and Aigner, Wolfgang}, year = {2017}, note = {Projekt: KAVA-Time}, keywords = {Department Medien und Digitale Technologien, Department Technologie, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Digital Technologies, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Publikationstyp Schriftpublikation, Visual Computing, Visual analytics, Wiss. Beitrag, behavior-based, best, best-lbaigner, best-lbwagnerm, design study, interactive, knowledge generation, malicious software, malware analysis, peer-reviewed, prototype, visualization}, pages = {1--15}, } @article{stitz_thermalplot_2015, title = {{ThermalPlot}: {Visualizing} {Multi}-{Attribute} {Time}-{Series} {Data} {Using} a {Thermal} {Metaphor}}, volume = {22}, issn = {1077-2626}, url = {http://thinkh.github.io/paper-2015-thermalplot/resources/2016_thermalplot_preprint.pdf}, doi = {10/ghppzs}, abstract = {Multi-attribute time-series data plays a vital role in many different domains, such as economics, sensor networks, and biology. An important task when making sense of such data is to provide users with an overview to identify items that show an interesting development over time, including both absolute and relative changes in multiple attributes simultaneously. However, this is not well supported by existing visualization techniques. To address this issue, we present ThermalPlot, a visualization technique that summarizes combinations of multiple attributes over time using an items position, the most salient visual variable. More precisely, the x-position in the ThermalPlot is based on a user-defined degree-of-interest (DoI) function that combines multiple attributes over time. The y-position is determined by the relative change in the DoI value (DDoI) within a user-specified time window. Animating this mapping via a moving time window gives rise to circular movements of items over time—as in thermal systems. To help the user to identify important items that match user-defined temporal patterns and to increase the techniques scalability, we adapt the level of detail of the items representation based on the DoI value. Furthermore, we present an interactive exploration environment for multi-attribute time-series data that ties together a carefully chosen set of visualizations, designed to support analysts in interacting with the ThermalPlot technique. We demonstrate the effectiveness of our technique by means of two usage scenarios that address the visual analysis of economic development data and of stock market data.}, journal = {IEEE Transactions on Visualization and Computer Graphics}, author = {Stitz, Holger and Gratzl, Samuel and Aigner, Wolfgang and Streit, Marc}, year = {2015}, note = {Projekt: KAVA-Time Projekt: VisOnFire}, keywords = {Economics, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Market research, Trajectory, Visual Computing, Visualization, Wiss. Beitrag, animation, best, best-lbaigner, data visualization, encoding, focus+context, multi-attribute data, peer-reviewed, semantic zooming, time-dependent data}, pages = {2594--2607}, } @article{lammarsch_mind_2014, title = {Mind the {Time}: {Unleashing} {Temporal} {Aspects} in {Pattern} {Discovery}}, volume = {38}, url = {http://publik.tuwien.ac.at/files/PubDat_220406.pdf}, doi = {10/f3szvj}, abstract = {Temporal Data Mining is a core concept of Knowledge Discovery in Databases handling time-oriented data. State-of-the-art methods are capable of preserving the temporal order of events as well as the temporal intervals in between. The temporal characteristics of the events themselves, however, can likely lead to numerous uninteresting patterns found by current approaches. We present a new definition of the temporal characteristics of events and enhance related work for pattern finding by utilizing temporal relations, like meets, starts, or during, instead of just intervals between events. These prerequisites result in a new procedure for Temporal Data Mining that preserves and mines additional time-oriented information. Our procedure is supported by an interactive visual interface for exploring the patterns. Furthermore, we illustrate the effciency of our procedure presenting an benchmark of the procedure\’s run-time behavior. A usage scenario shows how the procedure can provide new insights.}, journal = {Computers \& Graphics}, author = {Lammarsch, Tim and Aigner, Wolfgang and Bertone, Alessio and Miksch, Silvia and Rind, Alexander}, editor = {Jorge, Joaquim and Schuman, Heidrun and Pohl, Margit and Schulz, Hans-Jörg}, year = {2014}, note = {{\textless}br /{\textgreater} Projekt: KAVA-Time}, keywords = {FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, KDD, Pattern Finding, Time-Oriented Data, Visual Computing, Wiss. Beitrag, best, data mining, interactive visualization, peer-reviewed, temporal data mining, visual analytics}, pages = {38--50}, } @article{zeppelzauer_interactive_2016, title = {Interactive {3D} {Segmentation} of {Rock}-{Art} by {Enhanced} {Depth} {Maps} and {Gradient} {Preserving} {Regularization}}, volume = {9}, issn = {1556-4673}, url = {https://publik.tuwien.ac.at/files/publik_258520.pdf}, doi = {10/ghpp2n}, number = {4}, journal = {ACM Journal on Computing and Cultural Heritage}, author = {Zeppelzauer, Matthias and Poier, Georg and Seidl, Markus and Reinbacher, Christian and Schulter, Samuel, Christian and Breiteneder, C. and Bischof, Horst}, month = jul, year = {2016}, note = {Article 19 Projekt: PITOTI 3D}, keywords = {Center for Artificial Intelligence, Computer Vision, Department Medien und Digitale Technologien, Department Technologie, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Machine Learning, Media Computing Group, Publikationstyp Schriftpublikation, Wiss. Beitrag, best, peer-reviewed}, pages = {19:1--19:30}, } @article{wagner_kavagait_2018, title = {{KAVAGait}: {Knowledge}-{Assisted} {Visual} {Analytics} for {Clinical} {Gait} {Analysis}}, volume = {25}, url = {https://doi.org/10.1109/TVCG.2017.2785271}, doi = {10/ghppzn}, abstract = {In 2014, more than 10 million people in the US were affected by an ambulatory disability. Thus, gait rehabilitation is a crucial part of health care systems. The quantification of human locomotion enables clinicians to describe and analyze a patient’s gait performance in detail and allows them to base clinical decisions on objective data. These assessments generate a vast amount of complex data which need to be interpreted in a short time period. We conducted a design study in cooperation with gait analysis experts to develop a novel Knowledge-Assisted Visual Analytics solution for clinical Gait analysis (KAVAGait). KAVAGait allows the clinician to store and inspect complex data derived during clinical gait analysis. The system incorporates innovative and interactive visual interface concepts, which were developed based on the needs of clinicians. Additionally, an explicit knowledge store (EKS) allows externalization and storage of implicit knowledge from clinicians. It makes this information available for others, supporting the process of data inspection and clinical decision making. We validated our system by conducting expert reviews, a user study, and a case study. Results suggest that KAVAGait is able to support a clinician during clinical practice by visualizing complex gait data and providing knowledge of other clinicians.}, number = {3}, journal = {IEEE Transactions on Visualization and Computer Graphics (TVCG)}, author = {Wagner, Markus and Slijepcevic, Djordje and Horsak, Brian and Rind, Alexander and Zeppelzauer, Matthias and Aigner, Wolfgang}, year = {2018}, note = {Projekt: KAVA-Time Projekt: IntelliGait Projekt: CARMA Projekt: DHLab}, keywords = {Biomechanics, Center for Artificial Intelligence, Center for Digital Health Innovation, Center for Digital Health and Social Innovation, DHLab, Design Study, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Digital Technologies, Forschungsgruppe Media Computing, Gait Analysis, Gait Classification, Healthcare, Human Gait Analysis, Human-Computer Interaction, Institut für Creative Media Technologies, Institut für Gesundheitswissenschaften, Machine Learning, Media Computing Group, SP CDHSI Motor Rehabilitation, SP IGW Clinical \& Healthcare Research, Visual analytics, Wiss. Beitrag, best, best-bhorsak, best-lbaigner, best-lbwagnerm, best-mzeppelzauer, information visualization, knowledge generation, peer-reviewed}, pages = {1528--1542}, } @article{zeppelzauer_study_2016, title = {A {Study} on {Topological} {Descriptors} for the {Analysis} of {3D} {Surface} {Texture}}, abstract = {Methods from computational topology are becoming more and more popular in computer vision and have shown to improve the state-of-the-art in several tasks. In this paper, we investigate the applicability of topological descriptors in the context of 3D surface analysis for the classification of different surface textures. We present a comprehensive study on topological descriptors, investigate their robustness and expressiveness and compare them with state-of-the-art methods. Results show that class-specific information is reflected well in topological descriptors. The investigated descriptors can directly compete with non-topological descriptors and capture orthogonal information. Moreover they improve the state-of-the-art in combination with non-topological descriptors.}, journal = {Journal on Computer and System Sciences}, author = {Zeppelzauer, Matthias and Zielinski, Bartosz and Juda, Mateusz and Seidl, Markus}, year = {2016}, note = {Projekt: PITOTI 3D}, keywords = {2016, 3D surface classification, Center for Artificial Intelligence, Department Medien und Digitale Technologien, Department Technologie, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Media Computing Group, Publikationstyp Schriftpublikation, SP, Surface texture analysis, Wiss. Beitrag, best, best-lbseidl, peer-reviewed, persistence diagram, persistence image, persistent homology, surface representation, surface topology analysis, ⛔ No DOI found}, pages = {60}, } @inproceedings{horsak_explainable_2020, address = {München, Deutschland}, title = {Explainable {Artificial} {Intelligence} ({XAI}) und ihre {Anwendung} auf {Klassifikationsprobleme} in der {Ganganalyse}}, booktitle = {Abstractband des 3. {GAMMA} {Kongress}}, author = {Horsak, Brian and Dumphart, Bernhard and Slijepcevic, Djordje and Zeppelzauer, Matthias}, year = {2020}, note = {Projekt: ReMoCap-Lab Projekt: DHLab Projekt: I3D}, keywords = {Biomechanics, Center for Artificial Intelligence, Center for Digital Health Innovation, Center for Digital Health and Social Innovation, Department Gesundheit, Eintrag überprüfen, Forschungsgruppe Media Computing, Gait Analysis, Gait Classification, Green OA, Institut für Creative Media Technologies, Institut für Gesundheitswissenschaften, Machine Learning, Publikationstyp Präsentation, Publikationstyp Schriftpublikation, SP CDHSI Motor Rehabilitation, SP IGW Clinical \& Healthcare Research, Studiengang Physiotherapie, Vortrag, Wiss. Beitrag, best, best-bhorsak, peer-reviewed, ⛔ No DOI found}, } @article{slijepcevic_automatic_2018, title = {Automatic {Classification} of {Functional} {Gait} {Disorders}}, volume = {5}, issn = {2168-2194}, url = {https://arxiv.org/abs/1712.06405}, doi = {10/ghz24w}, number = {22}, urldate = {2017-12-21}, journal = {IEEE Journal of Biomedical and Health Informatics}, author = {Slijepcevic, Djordje and Zeppelzauer, Matthias and Raberger, Anna-Maria and Schwab, Caterine and Schuller, Michael and Baca, Arnold and Breiteneder, Christian and Horsak, Brian}, year = {2018}, note = {Projekt: IntelliGait Projekt: CARMA Projekt: DHLab}, keywords = {Biomechanics, Center for Artificial Intelligence, Center for Digital Health Innovation, Center for Digital Health and Social Innovation, DHLab, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Gait Analysis, Gait Classification, Institut für Creative Media Technologies, Institut für Gesundheitswissenschaften, Machine Learning, Media Computing Group, SP CDHSI Motor Rehabilitation, SP IGW Clinical \& Healthcare Research, Studiengang Physiotherapie, Wiss. Beitrag, best, best-bhorsak, best-mzeppelzauer, peer-reviewed}, pages = {1653 -- 1661}, } @article{horst_explaining_2020, title = {Explaining automated gender classification of human gait}, volume = {81, supplement 1}, url = {http://www.sciencedirect.com/science/article/pii/S0966636220303568}, doi = {10/ghr9k6}, language = {en}, urldate = {2020-09-14}, journal = {Gait \& Posture}, author = {Horst, F. and Slijepcevic, D. and Zeppelzauer, M. and Raberger, A. M. and Lapuschkin, S. and Samek, W. and Schöllhorn, W. I. and Breiteneder, C. and Horsak, B.}, year = {2020}, note = {Projekt: ReMoCap-Lab Projekt: I3D}, keywords = {2020, Biomechanics, Center for Artificial Intelligence, Center for Digital Health Innovation, Center for Digital Health and Social Innovation, DHLab, Department Gesundheit, Digital Health, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Gait Analysis, Gait Classification, Institut für Creative Media Technologies, Institut für Gesundheitswissenschaften, Institutional Access, Machine Learning, Media Computing Group, Poster, SP CDHSI Motor Rehabilitation, SP IGW Clinical \& Healthcare Research, Studiengang Physiotherapie, Vortrag, Wiss. Beitrag, best, best-bhorsak, peer-reviewed}, pages = {159--160}, } @inproceedings{iber_pilotstudie_2015, address = {FH St. Pölten}, title = {Pilotstudie zur sonifikationsgestützten {Ganganalyse}}, isbn = {987-3-86488-090-2}, abstract = {Verletzungs- oder krankheitsbedingte Beeinträchtigungen des Ganges stellen die physiotherapeutische Behandlung vor große Herausforderungen. Aktuelle Technologien erlauben heute die Entwicklung preiswerter tragbarer Ganganalysesysteme, die den gewohnten Bewegungsablauf nicht einschränken und auch außerhalb eines Labors verwendet werden können. Über eine diagnostische Anwendung hinaus können sie auch den motorischen Lernprozess in der physiotherapeutischen Behandlung unterstützen. Eine akustische Darstellung des Abrollverhaltens erlaubt PatientInnen mögliche Abweichungen wahrzunehmen und ermöglicht folglich Eigenkontrolle und Eigenständigkeit beim Üben. Auf Grundlage dieser Rahmenbedingungen wurde ein Hardware-Prototyp bestehend aus einem Paar mit Sensoren ausgestatteter Schuhsohlen und einem Mikroprozessor mit BluetoothLE entwickelt, der Bewegungsdaten in Echtzeit an ein handelsübliches mobiles Endgerät schickt. Auf diesem werden die parametrisierten Daten in Echtzeit sonifiziert, d.h. als Klänge synthetisiert, und über Kopfhörer der PatientIn zugespielt. Dadurch erhält die PatientIn eine zusätzliche Rückmeldung zu seinem Gangmuster. In einer Pilotstudie wurden Sonifikationsvarianten entwickelt und nach einer Vorauswahl durch PhysiotherapeutInnen durch eine Gruppe gesunder ProbandInnen evaluiert. Darüber hinaus wurde der objektive Einfluss der Sonifikationen auf das Gangmuster anhand von Bewegungsdaten, die mit Druckmessplatten erhobenen wurden, verglichen.}, booktitle = {Forum {Medientechnik} - {Next} {Generation}, {New} {Ideas}}, publisher = {Verlag Werner Hülsbusch, Fachverlag für Medientechnik und -wirtschaft}, author = {Iber, Michael and Horsak, Brian and Bauer, Karin and Kiselka, Anita and Gorgas, Anna-Maria and Dlapka, Ronald and Doppler, Jakob}, year = {2015}, note = {Projekt: CARMA Projekt: DHLab}, keywords = {2015, Biomechanics, Center for Digital Health Innovation, DHLab, Department Gesundheit und Soziales, Department Medien und Digitale Technologien, Forschungsgruppe Digital Technologies, Forschungsgruppe Media Computing, Gait Analysis, Institut für Creative Media Technologies, Institut für Gesundheitswissenschaften, Publikationstyp Präsentation, Publikationstyp Schriftpublikation, Publikationstyp Vortrag, SP IGW Clinical \& Healthcare Research, Studiengang Physiotherapie, Wiss. Beitrag, best, best-bhorsak, peer-reviewed, project\_carma, project\_sonigait}, pages = {51--68}, } @article{despotovic_prediction_2019, title = {Prediction and analysis of heating energy demand for detached houses by computer vision}, volume = {193}, issn = {0360-5442}, url = {https://www.sciencedirect.com/science/article/pii/S0378778818336430?via%3Dihub}, doi = {10/fsxn}, abstract = {Exterior images of real estate contain a large number of visual clues which allow conclusions about the heating energy demand (HED) of a building. Up to now, HED has been determined by specially trained experts such as architects, civil engineers, etc. either on the basis of consumption data or estimated demand values. In this article, we present a novel approach to determine the HED of detached houses. Our suggested approach is based solely on the visual appearance and assumes that exterior images of a building contain a variety of information that allows inferences about the HED of a building. For this, we use the powerful techniques of image analysis and computer vision which are already successfully used in different domains like surveillance, image search, and robotics. The results show that our approach works well and in addition to the HED, the construction period of a building can also be determined. Our algorithm achieves a classification accuracy of 62\% for HED and 57\% for construction age epoch.}, journal = {Energy \& Buildings}, author = {Despotovic, Miroslav and Koch, David and Leiber, Sascha and Döller, Mario and Sakeena, Muntaha and Zeppelzauer, Matthias}, year = {2019}, note = {Projekt: ImmBild Projekt: ImmoAge}, keywords = {Center for Artificial Intelligence, Computer Vision, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Machine Learning, Media Computing Group, Visual Computing, Wiss. Beitrag, best, peer-reviewed}, pages = {29--35}, } @inproceedings{zeppelzauer_sonicontrol_2018, address = {Seoul, South Korea}, title = {{SoniControl} - {A} {Mobile} {Ultrasonic} {Firewall}}, url = {https://arxiv.org/abs/1807.07617}, doi = {10/gh377f}, abstract = {The exchange of data between mobile devices in the near-ultrasonic frequency band is a new promising technology for near field communication (NFC) but also raises a number of privacy concerns. We present the first ultrasonic firewall that reliably detects ultrasonic communication and provides the user with effective means to prevent hidden data exchange. This demonstration showcases a new media-based communication technology ("data over audio") together with its related privacy concerns. It enables users to (i) interactively test out and experience ultrasonic information exchange and (ii) shows how to protect oneself against unwanted tracking.}, urldate = {2018-10-10}, booktitle = {Proceedings of the {ACM} {International} {Conference} on {Multimedia}}, publisher = {ACM Press}, author = {Zeppelzauer, Matthias and Ringot, Alexis and Taurer, Florian}, year = {2018}, note = {arXiv: 1807.07617}, keywords = {Acoustic Cookies, Acoustic Firewall, Acoustic Tracking, Center for Artificial Intelligence, Computer Science - Cryptography and Security, Computer Science - Multimedia, FH SP Cyber Security, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Machine Learning, Media Computing Group, Ultrasound Data Transmission, Wiss. Beitrag, best, best-aringot, peer-reviewed}, } @article{bernard_vial_2018, title = {{VIAL} – {A} {Unified} {Process} for {Visual}-{Interactive} {Labeling}}, volume = {34}, copyright = {Springer, Berlin, Heidelberg}, issn = {1432-2315}, url = {https://bit.ly/2My1Yrt}, doi = {10/gd5hr3}, abstract = {The assignment of labels to data instances is a fundamental prerequisite for many machine learning tasks. Moreover, labeling is a frequently applied process in visual-interactive analysis approaches and visual analytics. However, the strategies for creating labels usually differ between these two fields. This raises the question whether synergies between the different approaches can be attained. In this paper, we study the process of labeling data instances with the user in the loop, from both the machine learning and visual-interactive perspective. Based on a review of differences and commonalities, we propose the ’Visual-Interactive Labeling‘ (VIAL) process that unifies both approaches. We describe the six major steps of the process and discuss their specific challenges. Additionally, we present two heterogeneous usage scenarios from the novel VIAL perspective, one on metric distance learning and one on object detection in videos. Finally, we discuss general challenges to VIAL and point out necessary work for the realization of future VIAL approaches.}, number = {1189}, journal = {The Visual Computer}, author = {Bernard, Jürgen and Zeppelzauer, Matthias and Sedlmair, Michael and Aigner, Wolfgang}, year = {2018}, note = {Projekt: KAVA-Time Projekt: IntelliGait Projekt: CARMA}, keywords = {Active Learning, Candidate Selection, Center for Artificial Intelligence, Creative Industries, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Interactive Labeling, Labeling Strategies, Machine Learning, Media Computing Group, Visual Interactive Labeling, best, best-mzeppelzauer, information visualization}, pages = {16}, } @article{bernard_jurgen_taxonomy_2021, title = {A {Taxonomy} of {Property} {Measures} to {Unify} {Active} {Learning} and {Human}-centered {Approaches} to {Data} {Labeling}}, volume = {11}, copyright = {Open Access}, issn = {2160-6455}, url = {https://dl.acm.org/doi/abs/10.1145/3439333}, doi = {10/gnt2wf}, number = {3-4}, journal = {ACM Transactions on Interactive Intelligent Systems (TiiS)}, author = {{Bernard, Jürgen} and Hutter, Marco and Sedlmair, Michael and {Zeppelzauer, Matthias} and {Munzner, Tamara}}, year = {2021}, note = {Projekt: BigDataAnalytics Projekt: I3D Projekt: PlantAI}, keywords = {2020, Center for Artificial Intelligence, Department Medien und Digitale Technologien, Eintrag überprüfen, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Media Computing Group, Publikationstyp Schriftpublikation, Wiss. Beitrag, best, best-mzeppelzauer, peer-reviewed}, pages = {1--42}, } @inproceedings{blumenstein_bringing_2017, title = {Bringing {Your} {Own} {Device} into {Multi}-device {Ecologies} - {A} {Technical} {Concept}}, url = {http://mc.fhstp.ac.at/sites/default/files/publications/1040_Blumenstein.pdf}, doi = {10/ghppx8}, abstract = {Almost every visitor brings their own mobile device (e.g., smartphone or tablet) to the museum. Although, many museums include interactive exhibits (e.g., multi-touch tables), the visitors’ own devices are rarely used as part of a device ecology. Currently, there is no suitable infrastructure to seamlessly link different devices in museums. Our approach is to integrate the visitor’s own device in a multi-device ecology (MDE) in the museum to enhance the visitor’s exhibition experience. Thus, we present a technical concept to set up such MDEs integrating the well-established TUIO framework for multi-touch interaction on and between devices.}, booktitle = {Proceedings of the 2017 {ACM} {International} {Conference} on {Interactive} {Surfaces} and {Spaces}}, publisher = {ACM}, author = {Blumenstein, Kerstin and Kaltenbrunner, Martin and Seidl, Markus and Breban, Laura and Thür, Niklas and Aigner, Wolfgang}, month = oct, year = {2017}, note = {Projekt: MEETeUX}, keywords = {Center for Artificial Intelligence, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Poster, Publikationstyp Schriftpublikation, Wiss. Beitrag, best, best-kblumenstein, best-lbseidl, peer-reviewed}, pages = {306--311}, } @inproceedings{gschwandtner_timecleanser_2014, title = {{TimeCleanser}: {A} {Visual} {Analytics} {Approach} for {Data} {Cleansing} of {Time}-{Oriented} {Data}}, isbn = {978-1-4503-2769-5}, doi = {10/ghtw5j}, abstract = {{\textless}p{\textgreater}Poor data quality leads to unreliable results of any kind of data processing and has profound economic impact. Although there are tools to help users with the task of data cleansing, support for dealing with the specifics of time-oriented data is rather poor. However, the time dimension has very specific characteristics which introduce quality problems, that are different from other kinds of data. We present TimeCleanser, an interactive Visual Analytics system to support the task of data cleansing of ime-oriented data. In order to help the user to deal with these special characteristics and quality problems, TimeCleanser combines semi-automatic quality checks, visualizations, and directly editable data tables. The evaluation of the TimeCleanser system within a focus group (two target users, one developer, and two Human Computer Interaction experts) shows that (a) our proposed method is suited to detect hidden quality problems of time-oriented data and (b) that it facilitates the complex task of data cleansing.{\textless}/p{\textgreater}}, booktitle = {14th {International} {Conference} on {Knowledge} {Technologies} and {Data}-driven {Business} (i-{KNOW} 2014)}, publisher = {ACM Press}, author = {Gschwandtner, Theresia and Aigner, Wolfgang and Miksch, Silvia and Gärtner, Johannes and Kriglstein, Simone and Pohl, Margit and Suchy, Nikolaus}, editor = {Lindstaedt, Stefanie and Granitzer, Michael and Sack, Harald}, year = {2014}, note = {Projekt: KAVA-Time}, keywords = {Creative Industries, Department Technologie, Design Study, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Publikationstyp Schriftpublikation, Time-Oriented Data, Visual analytics, best, data quality, peer-reviewed, visualization}, pages = {1--8}, } @article{bogl_cycle_2017, title = {Cycle {Plot} {Revisited}: {Multivariate} {Outlier} {Detection} {Using} a {Distance}-{Based} {Abstraction}}, volume = {36}, url = {http://publik.tuwien.ac.at/files/publik_260233.pdf}, doi = {10/gbnsx6}, abstract = {The cycle plot is an established and effective visualization technique for identifying and comprehending patterns in periodic time series, like trends and seasonal cycles. It also allows to visually identify and contextualize extreme values and outliers from a different perspective. Unfortunately, it is limited to univariate data. For multivariate time series, patterns that exist across several dimensions are much harder or impossible to explore. We propose a modified cycle plot using a distance-based abstraction (Mahalanobis distance) to reduce multiple dimensions to one overview dimension and retain a representation similar to the original. Utilizing this distance-based cycle plot in an interactive exploration environment, we enhance the Visual Analytics capacity of cycle plots for multivariate outlier detection. To enable interactive exploration and interpretation of outliers, we employ coordinated multiple views that juxtapose a distance-based cycle plot with Cleveland’s original cycle plots of the underlying dimensions. With our approach it is possible to judge the outlyingness regarding the seasonal cycle in multivariate periodic time series.}, journal = {Computer Graphics Forum}, author = {Bögl, Markus and Filzmoser, Peter and Gschwandtner, Theresia and Lammarsch, Tim and Leite, Roger A. and Miksch, Silvia and Rind, Alexander}, year = {2017}, note = {Projekt: KAVA-Time}, keywords = {2017, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Publikationstyp Schriftpublikation, Time-Oriented Data, Visual analytics, best, multivariate data, outlier detection, robust statistics, seasonal time series, time series}, pages = {227--238}, } @inproceedings{rind_user_2014, series = {{BELIV} '14}, title = {User {Tasks} for {Evaluation}: {Untangling} the {Terminology} {Throughout} {Visualization} {Design} and {Development}}, isbn = {978-1-4503-3209-5}, url = {http://publik.tuwien.ac.at/files/PubDat_232654.pdf}, doi = {10/f3szvm}, abstract = {User tasks play a pivotal role in evaluation throughout visualization design and development. However, the term 'task' is used ambiguously within the visualization community. In this position paper, we critically analyze the relevant literature and systematically compare definitions for 'task' and the usage of related terminology. In doing so, we identify a three-dimensional conceptual space of user tasks in visualization. Using these dimensions, visualization researchers can better formulate their contributions which helps advance visualization as a whole.}, booktitle = {Proceedings of the {Fifth} {Workshop} on {Beyond} {Time} and {Errors}: {Novel} {Evaluation} {Methods} for {Visualization}}, publisher = {ACM}, author = {Rind, Alexander and Aigner, Wolfgang and Wagner, Markus and Miksch, Silvia and Lammarsch, Tim}, editor = {Lam, Heidi and Isenberg, Petra and Isenberg, Tobias and Sedlmair, Michael}, year = {2014}, note = {Projekt: KAVA-Time}, keywords = {2014, Creative Industries, Department Technologie, Forschungsgruppe Digital Technologies, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Publikationstyp Präsentation, Publikationstyp Schriftpublikation, Reflections, best, interaction, peer-reviewed, reflections, task taxonomy, taxonomy of tasks, terminology, visualization}, pages = {9--15}, } @article{seidl_automated_2015, title = {Automated classification of petroglyphs}, issn = {2212-0548}, url = {http://www.sciencedirect.com/science/article/pii/S2212054815000090}, doi = {10/gd6csz}, abstract = {Abstract In this paper, we address the problem of automated petroglyph classification in a large real-world dataset. The dataset which contains more than 1000 petroglyphs is based on tracings from the \{UNESCO\} world heritage site Valcamonica, Italy and is expert-classified into two parallel typologies. For automated classifications of petroglyphs we utilise a combination of existing shape descriptors and a recently developed graph-based petroglyph descriptor. We achieve good classification results. We evaluate how the results can be incorporated into the daily work of archaeologists. We demonstrate that our tools can clearly enhance the process of manual classification.}, number = {0}, journal = {Digital Applications in Archaeology and Cultural Heritage}, author = {Seidl, Markus and Wieser, Ewald and Alexander, Craig}, year = {2015}, note = {Projekt: PITOTI 3D}, keywords = {2015, Creative Industries, Department Technologie, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Publikationstyp Schriftpublikation, Wiss. Beitrag, best, peer-reviewed, visual computing}, pages = {--}, } @inproceedings{seidl_automated_2012, address = {New York, NY, USA}, series = {{ICVGIP} '12}, title = {Automated petroglyph image segmentation with interactive classifier fusion}, isbn = {978-1-4503-1660-6}, url = {http://doi.acm.org/10.1145/2425333.2425399}, doi = {10/gh372j}, booktitle = {Proceedings of the {Eighth} {Indian} {Conference} on {Computer} {Vision}, {Graphics} and {Image} {Processing}}, publisher = {ACM}, author = {Seidl, Markus and Breiteneder, Christian}, year = {2012}, note = {Projekt: FORSCH08 Projekt: PITOTI 3D}, keywords = {Center for Artificial Intelligence, Department Technologie, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Pattern recognition, Publikationstyp Präsentation, Publikationstyp Schriftpublikation, Wiss. Beitrag, best, best-lbseidl, experimental study, image features, image segmentation, peer-reviewed, petroglyphs, pixel classification, rock art}, pages = {66:1--66:8}, } @article{slijepcevic_explainable_2023, title = {Explainable {Machine} {Learning} in {Human} {Gait} {Analysis}: {A} {Study} on {Children} {With} {Cerebral} {Palsy}}, volume = {11}, copyright = {CC-BY-NC-ND}, issn = {2169-3536}, shorttitle = {Explainable {Machine} {Learning} in {Human} {Gait} {Analysis}}, url = {https://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=10164110}, doi = {10.1109/ACCESS.2023.3289986}, abstract = {This work investigates the effectiveness of various machine learning (ML) methods in classifying human gait patterns associated with cerebral palsy (CP) and examines the clinical relevance of the learned features using explainability approaches. We trained different ML models, including convolutional neural networks, self-normalizing neural networks, random forests, and decision trees, and generated explanations for the trained models. For the deep neural networks, Grad-CAM explanations were aggregated on different levels to obtain explanations at the decision, class and model level. We investigate which subsets of 3D gait analysis data are particularly suitable for the classification of CP-related gait patterns. The results demonstrate the superiority of kinematic over ground reaction force data for this classification task and show that traditional ML approaches such as random forests and decision trees achieve better results and focus more on clinically relevant regions compared to deep neural networks. The best configuration, using sagittal knee and ankle angles with a random forest, achieved a classification accuracy of 93.4 \% over all four CP classes (crouch gait, apparent equinus, jump gait, and true equinus). Deep neural networks utilized not only clinically relevant features but also additional ones for their predictions, which may provide novel insights into the data and raise new research questions. Overall, the article provides insights into the application of ML in clinical practice and highlights the importance of explainability to promote trust and understanding of ML models.}, journal = {IEEE Access}, author = {Slijepcevic, Djordje and Zeppelzauer, Matthias and Unglaube, Fabian and Kranzl, Andreas and Breiteneder, Christian and Horsak, Brian}, year = {2023}, note = {Conference Name: IEEE Access}, keywords = {Biomechanics, Center for Artificial Intelligence, Center for Digital Health and Social Innovation, Departement Gesundheit, Department Gesundheit, Department Medien und Digitale Technologien, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Gait Analysis, Gait Classification, Institut für Creative Media Technologies, Institut für Gesundheitswissenschaften, Machine Learning, SP CDHSI Motor Rehabilitation, Wiss. Beitrag, best, best-bhorsak, peer-reviewed}, pages = {65906--65923}, } @article{de_jesus_oliveira_auditory_2023, title = {Auditory feedback in tele-rehabilitation based on automated gait classification}, copyright = {CC-BY}, issn = {1617-4917}, url = {https://doi.org/10.1007/s00779-023-01723-2}, doi = {10.1007/s00779-023-01723-2}, abstract = {In this paper, we describe a proof-of-concept for the implementation of a wearable auditory biofeedback system based on a sensor-instrumented insole. Such a system aims to assist everyday users with static and dynamic exercises for gait rehabilitation interventions by providing auditory feedback based on plantar pressure distribution and automated classification of functional gait disorders. As ground reaction force (GRF) data are frequently used in clinical practice to quantitatively describe human motion and have been successfully used for the classification of gait patterns into clinically relevant classes, a feed-forward neural network was implemented on the firmware of the insoles to estimate the GRFs using pressure and acceleration data. The estimated GRFs approximated well the GRF measurements obtained from force plates. To distinguish between physiological gait and gait disorders, we trained and evaluated a support vector machine with labeled data from a publicly accessible dataset. The automated gait classification was then sonified for auditory feedback. The potential of the implemented auditory feedback for preventive and supportive applications in physical therapy was finally assessed with both expert and non-expert participants. A focus group revealed experts’ expectations for the proposed system, while a usability study assessed the clarity of the auditory feedback to everyday users. The evaluation shows promising results regarding the usefulness of our system in this application area.}, language = {en}, urldate = {2023-05-16}, journal = {Personal and Ubiquitous Computing}, author = {de Jesus Oliveira, Victor Adriel and Slijepčević, Djordje and Dumphart, Bernhard and Ferstl, Stefan and Reis, Joschua and Raberger, Anna-Maria and Heller, Mario and Horsak, Brian and Iber, Michael}, month = may, year = {2023}, keywords = {Biofeedback, Biomechanics, Center for Digital Health and Social Innovation, Departement Gesundheit, Departement Medien und Digitale Technologien, Department Gesundheit, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Gait Analysis, Gait Classification, Institut für Creative Media Technologies, Institut für Gesundheitswissenschaften, Machine Learning, Phaidra, SP CDHSI Motor Rehabilitation, Wiss. Beitrag, best, best-bhorsak, best-lbiber, peer-reviewed}, } @article{horst_modeling_2023, title = {Modeling biological individuality using machine learning: {A} study on human gait}, volume = {21}, copyright = {CC-BY-NC-ND}, issn = {2001-0370}, shorttitle = {Modeling biological individuality using machine learning}, doi = {10.1016/j.csbj.2023.06.009}, abstract = {Human gait is a complex and unique biological process that can offer valuable insights into an individual's health and well-being. In this work, we leverage a machine learning-based approach to model individual gait signatures and identify factors contributing to inter-individual variability in gait patterns. We provide a comprehensive analysis of gait individuality by (1) demonstrating the uniqueness of gait signatures in a large-scale dataset and (2) highlighting the gait characteristics that are most distinctive to each individual. We utilized the data from three publicly available datasets comprising 5368 bilateral ground reaction force recordings during level overground walking from 671 distinct healthy individuals. Our results show that individuals can be identified with a prediction accuracy of 99.3\% by using the bilateral signals of all three ground reaction force components, with only 10 out of 1342 recordings in our test data being misclassified. This indicates that the combination of bilateral ground reaction force signals with all three components provides a more comprehensive and accurate representation of an individual's gait signature. The highest accuracy was achieved by (linear) Support Vector Machines (99.3\%), followed by Random Forests (98.7\%), Convolutional Neural Networks (95.8\%), and Decision Trees (82.8\%). The proposed approach provides a powerful tool to better understand biological individuality and has potential applications in personalized healthcare, clinical diagnosis, and therapeutic interventions.}, language = {eng}, journal = {Computational and Structural Biotechnology Journal}, author = {Horst, Fabian and Slijepcevic, Djordje and Simak, Marvin and Horsak, Brian and Schöllhorn, Wolfgang Immanuel and Zeppelzauer, Matthias}, year = {2023}, pmid = {37416082}, pmcid = {PMC10319823}, keywords = {Biomechanics, Center for Artificial Intelligence, Center for Digital Health and Social Innovation, Departement Gesundheit, Departement Medien und Digitale Technologien, Department Gesundheit, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Gait Analysis, Gait Classification, Institut für Creative Media Technologies, Institut für Gesundheitswissenschaften, Machine Learning, Media Computing Group, Musculoskeletal Simulations, Phaidra, SP CDHSI Motor Rehabilitation, SP IGW Clinical \& Healthcare Research, Wiss. Beitrag, best, best-bhorsak, peer-reviewed}, pages = {3414--3423}, } @inproceedings{slijepcevic_ground_2017, address = {Trondheim, Norway}, title = {Ground reaction force measurements for gait classification tasks: {Effects} of different {PCA}-based representations}, volume = {57}, url = {http://www.gaitposture.com/article/S0966-6362(17)30712-9/pdf}, doi = {10.1016/j.gaitpost.2017}, booktitle = {Gait \& {Posture} {Supplement}}, author = {Slijepcevic, Djordje and Horsak, Brian and Schwab, Caterine and Raberger, Anna-Maria and Schüller, Michael and Baca, Arnold and Breitender, Christian and Zeppelzauer, Matthias}, year = {2017}, note = {Projekt: IntelliGait Projekt: DHLab}, keywords = {2017, Biofeedback, Biomechanics, Center for Artificial Intelligence, Center for Digital Health Innovation, Center for Digital Health and Social Innovation, Creative Industries, DHLab, Department Technologie, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Gait Analysis, Institut für Creative Media Technologies, Institut für Gesundheitswissenschaften, Machine Learning, Media Computing Group, Pattern recognition, Publikationstyp Präsentation, Publikationstyp Schriftpublikation, SP CDHSI Motor Rehabilitation, SP IGW Clinical \& Healthcare Research, Studiengang Physiotherapie, Wiss. Beitrag, best, best-bhorsak, peer-reviewed, project\_carma, project\_intelligait, ⚠️ Invalid DOI}, pages = {4--5}, } @article{horsak_gaitrec_2020, title = {{GaitRec}, a large-scale ground reaction force dataset of healthy and impaired gait}, volume = {7:143}, copyright = {CC BY}, url = {https://www.nature.com/articles/s41597-020-0481-z}, doi = {10/gh372d}, number = {1}, journal = {Scientific Data}, author = {Horsak, Brian and Slijepcevic, Djordje and Raberger, Anna-Maria and Schwab, Caterine and Worisch, Marianne and Zeppelzauer, Matthias}, year = {2020}, note = {Projekt: I3D Projekt: IntelliGait Projekt: DHLab}, keywords = {2019, Biomechanics, Center for Artificial Intelligence, Center for Digital Health Innovation, Center for Digital Health and Social Innovation, DHLab, Eintrag überprüfen, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Gait Analysis, Gait Classification, Green OA, Institut für Creative Media Technologies, Institut für Gesundheitswissenschaften, Machine Learning, Media Computing Group, Open Access, SP CDHSI Motor Rehabilitation, SP IGW Clinical \& Healthcare Research, Studiengang Physiotherapie, Wiss. Beitrag, best, best-bhorsak, best-mzeppelzauer, peer-reviewed, submitted}, pages = {1--8}, } @inproceedings{schwab_intelligait_2018, address = {Hamburg, Deutschland}, title = {{IntelliGait}: {Automatische} {Gangmusteranalyse} für die robuste {Erkennung} von {Gangstörungen}}, booktitle = {Tagungsband des 2ten {GAMMA} {Kongress} ({Gesellschaft} für die {Analyse} {Menschlicher} {Motorik} in ihrer klinischen {Anwendung})}, author = {Schwab, Caterine and Slijepcevic, Djordje and Zeppelzauer, Matthias and Raberger, Anna-Maria and Dumphart, Bernhard and Baca, Arnold and Breitender, Christian and Horsak, Brian}, year = {2018}, note = {Projekt: IntelliGait Projekt: CARMA Projekt: DHLab}, keywords = {Biomechanics, Center for Artificial Intelligence, Center for Digital Health Innovation, Center for Digital Health and Social Innovation, Creative Industries, DHLab, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Gait Analysis, Gait Classification, Institut für Creative Media Technologies, Institut für Gesundheitswissenschaften, Machine Learning, Media Computing Group, Pattern recognition, SP CDHSI Motor Rehabilitation, SP IGW Clinical \& Healthcare Research, Studiengang Physiotherapie, Wiss. Beitrag, best, best-bhorsak, peer-reviewed, ⛔ No DOI found}, } @inproceedings{slijepcevic_towards_2018, address = {Prague, Czech Republic}, title = {Towards an optimal combination of input signals and derived representations for gait classification based on ground reaction force measurements.}, volume = {65}, doi = {10/gh38wn}, booktitle = {Gait \& {Posture} {Supplement}}, author = {Slijepcevic, Djordje and Zeppelzauer, Matthias and Schwab, Caterine and Raberger, Anna-Maria and Dumphart, B and Baca, Arnold and Breiteneder, Christian and Horsak, Brian}, year = {2018}, note = {Projekt: IntelliGait Projekt: CARMA Projekt: DHLab}, keywords = {Biomechanics, Center for Artificial Intelligence, Center for Digital Health and Social Innovation, Classification, DHLab, FH SP Data Analytics \& Visual Computing, Feature Representations, Forschungsgruppe Media Computing, Gait Analysis, Gait Classification, Gait Recognition, Human Gait Analysis, Institut für Creative Media Technologies, Institut für Gesundheitswissenschaften, Machine Learning, Media Computing Group, PCA, SP CDHSI Motor Rehabilitation, SP IGW Clinical \& Healthcare Research, SVM, Wiss. Beitrag, best, best-bhorsak, pattern recognition, peer-reviewed}, } @inproceedings{slijepcevic_usefullness_2019, address = {Vienna, Austria}, title = {On the usefullness of statistical parameter mapping for feature selection in automated gait classification}, booktitle = {Book of {Abstracts} of the 25th {Conference} of the {European} {Society} of {Biomechanics} ({ESB})}, author = {Slijepcevic, Djordje and Raberger, Anna-Maria and Zeppelzauer, Matthias and Dumphart, Bernhard and Breiteneder, Christian and Horsak, Brian}, year = {2019}, note = {Projekt: IntelliGait Projekt: DHLab}, keywords = {Biomechanics, Center for Artificial Intelligence, Center for Digital Health and Social Innovation, DHLab, Digital Health, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Gait Analysis, Gait Classification, Institut für Creative Media Technologies, Institut für Gesundheitswissenschaften, Machine Learning, Media Computing Group, SP CDHSI Motor Rehabilitation, SP IGW Clinical \& Healthcare Research, Studiengang Physiotherapie, Vortrag, Wiss. Beitrag, best, peer-reviewed, ⛔ No DOI found}, pages = {1}, } @article{stoiber_netflower_2019, title = {netflower: {Dynamic} {Network} {Visualization} for {Data} {Journalists}}, volume = {38}, url = {https://phaidra.fhstp.ac.at/download/o:4838}, doi = {10/ghm4jz}, abstract = {Abstract Journalists need visual interfaces that cater to the exploratory nature of their investigative activities. In this paper, we report on a four-year design study with data journalists. The main result is netflower, a visual exploration tool that supports journalists in investigating quantitative flows in dynamic network data for story-finding. The visual metaphor is based on Sankey diagrams and has been extended to make it capable of processing large amounts of input data as well as network change over time. We followed a structured, iterative design process including requirement analysis and multiple design and prototyping iterations in close cooperation with journalists. To validate our concept and prototype, a workshop series and two diary studies were conducted with journalists. Our findings indicate that the prototype can be picked up quickly by journalists and valuable insights can be achieved in a few hours. The prototype can be accessed at: http://netflower.fhstp.ac.at/}, journal = {Computer Graphics Forum (EuroVis '19)}, author = {Stoiber, Christina and Rind, Alexander and Grassinger, Florian and Gutounig, Robert and Goldgruber, Eva and Sedlmair, Michael and Emrich, Stefan and Aigner, Wolfgang}, month = jun, year = {2019}, note = {Projekt: VALID Projekt: VisOnFire}, keywords = {FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Human-Computer Interaction, Institut für Creative Media Technologies, Visual Computing, Vortrag, Wiss. Beitrag, best, best-cniederer, best-cstoiber, best-fgrassinger, best-lbaigner, peer-reviewed}, } @inproceedings{rind_pubviz_2017, title = {{PubViz}: {Lightweight} {Visual} {Presentation} of {Publication} {Data}}, url = {https://phaidra.fhstp.ac.at/download/o:4834}, doi = {10/cwdc}, abstract = {Publications play a central role in presenting the outcome of scientific research but are typically presented as textual lists, whereas related work in visualization of publication focuses on exploration – not presentation. To bridge this gap, we conducted a design study of an interactive visual representation of publication data in a BibTeX file. This paper reports our domain and problem characterization as well as our visualization design decisions in light of our user-centered design process including interviews, two user studies with a paper prototype and a d3.js prototype, and practical application at our group’s website.}, booktitle = {Proc. {Eurographics} {Conf}. {Visualization} ({EuroVis}) – {Short} {Paper}}, publisher = {EuroGraphics}, author = {Rind, Alexander and Haberson, Andrea and Blumenstein, Kerstin and Niederer, Christina and Wagner, Markus and Aigner, Wolfgang}, editor = {Kozlíková, Barbora and Schreck, Tobias and Wischgoll, Thomas}, month = jun, year = {2017}, note = {Projekt: VisOnFire Projekt: KAVA-Time Projekt: VALID}, keywords = {Design Study, Forschungsgruppe Digital Technologies, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, User-Centered Design, Vortrag, Wiss. Beitrag, best, best-arind, bibliography, interactive, peer-reviewed, prototype, publication list, visual presentation, visualization}, pages = {169--173}, } @article{slijepcevic_kanonymity_2021, title = {k‑{Anonymity} in {Practice}: {How} {Generalisation} and {Suppression} {Affect} {Machine} {Learning} {Classifiers}}, volume = {111}, copyright = {Open Access}, issn = {0167-4048}, url = {https://doi.org/10.1016/j.cose.2021.102488}, doi = {10.1016/j.cose.2021.102488}, journal = {Computers \& Security}, author = {Slijepčević, Djordje and Henzl, Maximilian and Klausner, Lukas Daniel and Dam, Tobias and Kieseberg, Peter and Zeppelzauer, Matthias}, month = oct, year = {2021}, keywords = {Center for Artificial Intelligence, FH SP Cyber Security, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Institut für IT Sicherheitsforschung, SP IT Sec Applied Security \& Data Science, Wiss. Beitrag, best, best-mzeppelzauer, peer-reviewed}, pages = {19}, } @inproceedings{zeppelzauer_novel_2015, address = {Boston, MA, USA}, title = {A {Novel} {Annotation} {Tool} for {Complex} {Petroglyph} {Shapes}}, abstract = {We present a novel semi-automatic annotation tool for the construction of large real-world shape datasets. The tool enables the collaborative semi-automatic segmentation and annotation of shapes. Shapes are stored together with their annotations in a database and can be retrieved efficiently to construct custom shape datasets. The resulting datasets should stimulte further reasearch in the domain of shape recognition and matching.}, booktitle = {The {Future} of {Datasets} in {Vision} {Workshop} (in conjunction with {CVPR} 2015)}, author = {Zeppelzauer, Matthias and Wieser, Ewald and Seidl, Markus}, year = {2015}, note = {Projekt: PITOTI 3D}, keywords = {2015, Center for Artificial Intelligence, Creative Industries, Department Medien und Digitale Technologien, Department Technologie, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Machine Learning, Media Computing Group, Publikationstyp Präsentation, Publikationstyp Schriftpublikation, Vortrag, Wiss. Beitrag, best, peer-reviewed, poster, ⛔ No DOI found}, } @article{niederer_taco_2018, title = {{TACO}: {Visualizing} {Changes} in {Tables} {Over} {Time}}, volume = {24}, doi = {10/ghppzq}, abstract = {Multivariate, tabular data is one of the most common data structures used in many different domains. Over time, tables can undergo changes in both structure and content, which results in multiple versions of the same table. A challenging task when working with such derived tables is to understand what exactly has changed between versions in terms of additions/deletions, reorder, merge/split, and content changes. For textual data, a variety of commonplace "diff" tools exist that support the task of investigating changes between revisions of a text. Although there are some comparison tools which assist users in inspecting differences between multiple table instances, the resulting visualizations are often difficult to interpret or do not scale to large tables with thousands of rows and columns. To address these challenges, we developed TACO, an interactive comparison tool that visualizes effectively the differences between multiple tables at various levels of detail. With TACO we show (1) the aggregated differences between multiple table versions over time, (2) the aggregated changes between two selected table versions, and (3) detailed changes between the selection. To demonstrate the effectiveness of our approach, we show its application by means of two usage scenarios.}, number = {1}, journal = {IEEE Transactions on Visualization and Computer Graphics (InfoVis ’17)}, author = {Niederer, Christina and Stitz, Holger and Hourieh, Reem and Grassinger, Florian and Aigner, Wolfgang and Streit, Marc}, year = {2018}, note = {Projekt: VisOnFire}, keywords = {Center for Digital Health Innovation, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Wiss. Beitrag, best, best-cniederer, best-cstoiber, best-lbaigner, peer-reviewed}, pages = {677--686}, } @article{pohl_life_2018, title = {Life at risk - an empirical investigation of the design of workplaces in control rooms}, volume = {13}, issn = {1646-3692}, url = {http://www.iadisportal.org/ijcsis/papers/2018130105.pdf}, doi = {10/gh38bq}, abstract = {Control rooms for supervising traffic or emergency situations are increasingly introduced in companies and other governmental organisations. Employees of safety-critical workplaces have to deal with a large amount of problems every day: Quick decision-making, coping with an overload of information and stress are the most important. Designing appropriate workplaces for employees in such organisations is essential. The following paper describes the results of an investigation in two companies with control rooms. Results indicate that the systems deployed in such control rooms support the operators efficiently. Stress is a more important influencing factor. We could not find any significant gender effects concerning the design of workplaces in control rooms.}, journal = {IADIS International Journal on Computer Science and Information Systems}, author = {Pohl, Margit and Weissenböck, Elisabeth and Judmaier, Peter and Viertelmayr, Andrea and Rottermanner, Gernot}, year = {2018}, note = {Projekt: GenSiSys}, keywords = {Center for Digital Health Innovation, Forschungsgruppe Media Computing, Human-Computer Interaction, Institut für Creative Media Technologies, Wiss. Beitrag, best, peer-reviewed}, } @inproceedings{zielinski_persistence_2019, address = {Macao, China}, title = {Persistence {Bag}-of-{Words} for {Topological} {Data} {Analysis}}, url = {http://arxiv.org/abs/1802.04852}, doi = {10/ghpp7z}, urldate = {2018-10-10}, booktitle = {Proceedings of the {International} {Joint} {Conference} on {Artificial} {Intelligence} 2019}, author = {Zielinski, Bartosz and {Lipinski, Michal} and Juda, Mateusz and Zeppelzauer, Matthias and {Dlotko, Pawel}}, year = {2019}, note = {arXiv: 1802.04852}, keywords = {Artificial Intelligence, Center for Artificial Intelligence, Computer Vision, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Machine Learning, Mathematics - Algebraic Topology, Media Computing Group, Statistics, Vortrag, Wiss. Beitrag, best, best-mzeppelzauer, peer-reviewed}, pages = {6}, } @article{zeppelzauer_establishing_2015, title = {Establishing the fundamentals for an elephant early warning and monitoring system}, volume = {8:409}, url = {http://www.biomedcentral.com/content/pdf/s13104-015-1370-y.pdf}, doi = {10/gb3pth}, abstract = {The decline of habitat for elephants due to expanding human activity is a serious conservation problem. This has continuously escalated the human–elephant conflict in Africa and Asia. Elephants make extensive use of powerful infrasonic calls (rumbles) that travel distances of up to several kilometers. This makes elephants well-suited for acoustic monitoring because it enables detecting elephants even if they are out of sight. In sight, their distinct visual appearance makes them a good candidate for visual monitoring. We provide an integrated overview of our interdisciplinary project that established the scientific fundamentals for a future early warning and monitoring system for humans who regularly experience serious conflict with elephants. We first draw the big picture of an early warning and monitoring system, then review the developed solutions for automatic acoustic and visual detection, discuss specific challenges and present open future work necessary to build a robust and reliable early warning and monitoring system that is able to operate in situ. We present a method for the automated detection of elephant rumbles that is robust to the diverse noise sources present in situ. We evaluated the method on an extensive set of audio data recorded under natural field conditions. Results show that the proposed method outperforms existing approaches and accurately detects elephant rumbles. Our visual detection method shows that tracking elephants in wildlife videos (of different sizes and postures) is feasible and particularly robust at near distances. From our project results we draw a number of conclusions that are discussed and summarized. We clearly identified the most critical challenges and necessary improvements of the proposed detection methods and conclude that our findings have the potential to form the basis for a future automated early warning system for elephants. We discuss challenges that need to be solved and summarize open topics in the context of a future early warning and monitoring system. We conclude that a long-term evaluation of the presented methods in situ using real-time prototypes is the most important next step to transfer the developed methods into practical implementation.}, journal = {BMC Research Notes}, author = {Zeppelzauer, Matthias and Stöger, A.}, month = sep, year = {2015}, keywords = {2015, Acoustic monitoring, Audio Analysis, Automatic call detection, Call classification, Center for Artificial Intelligence, Classification, Creative Industries, Department Medien und Digitale Technologien, Department Technologie, Elephants, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Loxodonta africana, Machine Learning, Media Computing Group, Noise Reduction, Object Detection, Pattern recognition, Publikationstyp Schriftpublikation, Signal Enhancement, Video Analysis, Visual monitoring, Visual tracking, Vocalizations, Wiss. Beitrag, best, human–elephant conflict, peer-reviewed}, pages = {15}, } @article{wieser_study_2016, title = {A {Study} on {Skeletonization} of {Complex} {Petroglyph} {Shapes}}, issn = {1573-7721}, url = {http://link.springer.com/article/10.1007/s11042-016-3395-1}, doi = {10/ghpp2r}, abstract = {In this paper, we present a study on skeletonization of real-world shape data. The data stem from the cultural heritage domain and represent contact tracings of prehistoric petroglyphs. Automated analysis can support the work of archeologists on the investigation and categorization of petroglyphs. One strategy to describe petroglyph shapes is skeleton-based. The skeletonization of petroglyphs is challenging since their shapes are complex, contain numerous holes and are often incomplete or disconnected. Thus they pose an interesting testbed for skeletonization. We present a large real-world dataset consisting of more than 1100 petroglyph shapes. We investigate their properties and requirements for the purpose of skeletonization, and evaluate the applicability of state-of-the-art skeletonization and skeleton pruning algorithms on this type of data. Experiments show that pre-processing of the shapes is crucial to obtain robust skeletons. We propose an adaptive pre-processing method for petroglyph shapes and improve several state-of-the-art skeletonization algorithms to make them suitable for the complex material. Evaluations on our dataset show that 79.8 \% of all shapes can be improved by the proposed pre-processing techniques and are thus better suited for subsequent skeletonization. Furthermore we observe that a thinning of the shapes produces robust skeletons for 83.5 \% of our shapes and outperforms more sophisticated skeletonization techniques.}, journal = {Multimedia Tools and Applications (Springer)}, author = {Wieser, Ewald and Seidl, Markus and Zeppelzauer, Matthias}, year = {2016}, note = {Projekt: PITOTI 3D}, keywords = {2016, Center for Artificial Intelligence, Computer Vision, Department Medien und Digitale Technologien, Department Technologie, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Machine Learning, Media Computing Group, Pattern recognition, Publikationstyp Schriftpublikation, Real-world shape data, Shape pre-processing, Skeletionization, Skeletonization, Wiss. Beitrag, best, peer-reviewed, petroglyphs}, pages = {1--19}, } @article{zeppelzauer_study_2018, title = {A {Study} on {Topological} {Descriptors} for the {Analysis} of {3D} {Surface} {Texture}}, volume = {167}, issn = {1077-3142}, url = {https://arxiv.org/pdf/1710.10662}, doi = {10/ghpp2h}, abstract = {Methods from computational topology are becoming more and more popular in computer vision and have shown to improve the state-of-the-art in several tasks. In this paper, we investigate the applicability of topological descriptors in the context of 3D surface analysis for the classification of different surface textures. We present a comprehensive study on topological descriptors, investigate their robustness and expressiveness and compare them with state-of-the-art methods. Results show that class-specific information is reflected well in topological descriptors. The investigated descriptors can directly compete with non-topological descriptors and capture orthogonal information. Moreover they improve the state-of-the-art in combination with non-topological descriptors.}, journal = {Journal on Computer Vision and Image Understanding (CVIU)}, author = {Zeppelzauer, Matthias and Zielinski, Bartosz and Juda, Mateusz and Seidl, Markus}, year = {2018}, note = {Projekt: PITOTI 3D}, keywords = {3D surface classification, Center for Artificial Intelligence, Computer Vision, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Machine Learning, Media Computing Group, Surface texture analysis, Visual Computing, Wiss. Beitrag, best, best-lbseidl, best-mzeppelzauer, peer-reviewed, persistence diagram, persistence image, persistent homology, surface representation, surface topology analysis}, pages = {74 -- 88}, } @article{zeppelzauer_multimodal_2016, title = {Multimodal classification of events in social media}, issn = {0262-8856}, url = {https://arxiv.org/pdf/1601.00599}, doi = {10/ghpp2q}, abstract = {Abstract A large amount of social media hosted on platforms like Flickr and Instagram is related to social events. The task of social event classification refers to the distinction of event and non-event-related contents as well as the classification of event types (e.g. sports events and concerts). In this paper, we provide an extensive study of textual, visual, as well as multimodal representations for social event classification. We investigate the strengths and weaknesses of the modalities and study the synergy effects between the modalities. Experimental results obtained with our multimodal representation outperform state-of-the-art methods and provide a new baseline for future research.}, journal = {Image and Vision Computing}, author = {Zeppelzauer, Matthias and Schopfhauser, Daniel}, year = {2016}, keywords = {2016, Center for Artificial Intelligence, Computer Vision, Department Medien und Digitale Technologien, Department Technologie, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Machine Learning, Media Computing Group, Multimodal retrieval, Publikationstyp Schriftpublikation, Wiss. Beitrag, best, best-mzeppelzauer, peer-reviewed}, } @article{bernard_jurgen_comparing_2017, title = {Comparing {Visual}-{Interactive} {Labeling} with {Active} {Learning}: {An} {Experimental} {Study}}, volume = {24}, issn = {1077-2626}, url = {http://eprints.cs.univie.ac.at/5257/1/bernard2017labeling.pdf}, doi = {10/gcqb3r}, number = {1}, journal = {IEEE Transactions on Visualization and Computer Graphics (TVCG)}, author = {{Bernard, Jürgen} and Hutter, Marco and Zeppelzauer, Matthias and Fellner, Dieter and Sedlmair, Michael}, year = {2017}, keywords = {2017, Center for Artificial Intelligence, Creative Industries, Department Medien und Digitale Technologien, Department Technologie, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Machine Learning, Media Computing Group, Publikationstyp Schriftpublikation, Wiss. Beitrag, best, best-mzeppelzauer, peer-reviewed}, } @article{seidl_gradual_2011, title = {Gradual transition detection in historic film material —a systematic study}, volume = {4}, issn = {1556-4673}, url = {http://doi.acm.org/10.1145/2069276.2069279}, doi = {10/fzsqr8}, number = {3}, journal = {J. Comput. Cult. Herit.}, author = {Seidl, Markus and Zeppelzauer, Matthias and Mitrović, Dalibor and Breiteneder, Christian}, year = {2011}, keywords = {Center for Artificial Intelligence, Computer Vision, Department Technologie, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Machine Learning, Media Computing Group, Publikationstyp Schriftpublikation, Wiss. Beitrag, best, best-lbseidl, peer-reviewed}, pages = {10:1--10:18}, } @inproceedings{federico_role_2017, address = {Paolo Federico and Markus Wagner equally contributed to this paper and are both to be regarded as first authors.}, title = {The {Role} of {Explicit} {Knowledge}: {A} {Conceptual} {Model} of {Knowledge}-{Assisted} {Visual} {Analytics}}, url = {https://publik.tuwien.ac.at/files/publik_261674.pdf}, doi = {10/ghppzr}, abstract = {Visual Analytics (VA) aims to combine the strengths of humans and computers for effective data analysis. In this endeavor, humans’ tacit knowledge from prior experience is an important asset that can be leveraged by both human and computer to improve the analytic process. While VA environments are starting to include features to formalize, store, and utilize such knowledge, the mechanisms and degree in which these environments integrate explicit knowledge varies widely. Additionally, this important class of VA environments has never been elaborated on by existing work on VA theory. This paper proposes a conceptual model of Knowledge-assisted VA conceptually grounded on the visualization model by van Wijk. We apply the model to describe various examples of knowledge-assisted VA from the literature and elaborate on three of them in finer detail. Moreover, we illustrate the utilization of the model to compare different design alternatives and to evaluate existing approaches with respect to their use of knowledge. Finally, the model can inspire designers to generate novel VA environments using explicit knowledge effectively.}, booktitle = {{IEEE} {Conference} on {Visual} {Analytics} {Science} and {Technology} ({VAST})}, publisher = {IEEE}, author = {Federico, Paolo and Wagner, Markus and Rind, Alexander and Amor-Amorós, Albert and Miksch, Silvia and Aigner, Wolfgang}, year = {2017}, note = {Projekt: KAVA-Time}, keywords = {Center for Digital Health Innovation, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Digital Technologies, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Visual analytics, Vortrag, Wiss. Beitrag, automated analysis, best, best-lbaigner, explicit knowledge, information visualization, peer-reviewed, tacit knowledge, theory and model}, pages = {92--103}, } @article{miksch_matter_2014, title = {A {Matter} of {Time}: {Applying} a {Data}-{Users}-{Tasks} {Design} {Triangle} to {Visual} {Analytics} of {Time}-{Oriented} {Data}}, volume = {38}, url = {http://www.ifs.tuwien.ac.at/~silvia/pub/publications/miksch_cag_design-triangle-2014.pdf}, doi = {10/f3szvk}, abstract = {Increasing amounts of data offer great opportunities to promote technological progress and business success. Visual Analytics (VA) aims at enabling the exploration and the understanding of large and complex data sets by intertwining interactive visualization, data analysis, human-computer interaction, as well as cognitive and perceptual science. We propose a design triangle, which considers three main aspects to ease the design: (1) the characteristics of the data, (2) the users, and (3) the users\’ tasks. Addressing the particular characteristics of time and time-oriented data focus the VA methods, but turns the design space into a more complex and challenging one. We demonstrate the applicability of the design triangle by three use cases tackling the time-oriented aspects explicitly. Our design triangle provides a high-level framework, which is simple and very effective for the design process as well as easily applicable for both, researchers and practitioners.}, journal = {Computers \& Graphics}, author = {Miksch, Silvia and Aigner, Wolfgang}, year = {2014}, note = {Projekt: KAVA-Time {\textless}pre wrap=""{\textgreater} Available online 16 November 2013: accepted manuscript (unformatted and unedited PDF): {\textless}a class="moz-txt-link-freetext" href="http://authors.elsevier.com/sd/article/S0097849313001817"{\textgreater}http://authors.elsevier.com/sd/article/S0097849313001817{\textless}/a{\textgreater}{\textless}/pre{\textgreater}}, keywords = {Creative Industries, Department Technologie, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Interactive Visualization, Publikationstyp Schriftpublikation, Time-Oriented Data, Visual Computing, Visual analytics, Wiss. Beitrag, best, best-lbaigner, interaction design, peer-reviewed, temporal data mining, visualization}, pages = {286--290}, } @article{rind_task_2016, title = {Task {Cube}: {A} {Three}-{Dimensional} {Conceptual} {Space} of {User} {Tasks} in {Visualization} {Design} and {Evaluation}}, volume = {15}, url = {https://publik.tuwien.ac.at/files/PubDat_247156.pdf}, doi = {10/f3szvq}, abstract = {User tasks play a pivotal role in visualization design and evaluation. However, the term ‘task’ is used ambiguously within the visualization community. In this article, we critically analyze the relevant literature and systematically compare definitions for ‘task’ and the usage of related terminology. In doing so, we identify a three-dimensional conceptual space of user tasks in visualization, referred to as task cube, and the more precise concepts ‘objective’ and ‘action’ for tasks. We illustrate the usage of the task cube’s dimensions in an objective-driven visualization process, in different scenarios of visualization design and evaluation, and for comparing categorizations of abstract tasks. Thus, visualization researchers can better formulate their contributions which helps advance visualization as a whole.}, number = {4}, journal = {Information Visualization}, author = {Rind, Alexander and Aigner, Wolfgang and Wagner, Markus and Miksch, Silvia and Lammarsch, Tim}, year = {2016}, note = {Projekt: KAVA-Time Projekt: VALID}, keywords = {Action, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Digital Technologies, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Publikationstyp Schriftpublikation, Visual Computing, Wiss. Beitrag, best, best-arind, best-lbaigner, best-lbwagnerm, design guidelines, interaction, objective, peer-reviewed, task frameworks, task taxonomy, terminology, visualization theory}, pages = {288--300}, } @article{alsallakh_state---art_2015, title = {The {State}-of-the-{Art} of {Set} {Visualization}}, volume = {Early view}, issn = {1467-8659}, url = {http://onlinelibrary.wiley.com/doi/10.1111/cgf.12722/abstract}, doi = {10/cwc5}, abstract = {Sets comprise a generic data model that has been used in a variety of data analysis problems. Such problems involve analysing and visualizing set relations between multiple sets defined over the same collection of elements. However, visualizing sets is a non-trivial problem due to the large number of possible relations between them. We provide a systematic overview of state-of-the-art techniques for visualizing different kinds of set relations. We classify these techniques into six main categories according to the visual representations they use and the tasks they support. We compare the categories to provide guidance for choosing an appropriate technique for a given problem. Finally, we identify challenges in this area that need further research and propose possible directions to address these challenges. Further resources on set visualization are available at http://www.setviz.net.}, language = {en}, urldate = {2016-01-12}, journal = {Computer Graphics Forum}, author = {Alsallakh, Bilal and Micallef, Luana and Aigner, Wolfgang and Hauser, Helwig and Miksch, Silvia and Rodgers, Peter}, year = {2015}, note = {Projekt: KAVA-Time}, keywords = {FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Visual Computing, Wiss. Beitrag, best, best-lbaigner, peer-reviewed, visualization}, } @inproceedings{dumphart_is_2023, series = {{ESMAC} 2023 {Abstracts}}, title = {Is it time to re-think the appropriateness of autocorrelation for gait event detection? {Preliminary} results of an ongoing study}, volume = {106}, shorttitle = {Is it time to re-think the appropriateness of autocorrelation for gait event detection?}, url = {https://www.sciencedirect.com/science/article/pii/S0966636223009840}, doi = {10.1016/j.gaitpost.2023.07.064}, abstract = {Introduction Recent developments in machine learning (ML)-based gait event detection have demonstrated superior results in terms of robustness and temporal accuracy compared to heuristic approaches [1–3]. “Autocorrelation” (AC) is an implemented heuristic algorithm in the Vicon Nexus application, which identifies events based on a recurring pattern of a certain marker. Clinicians often rely on the use of AC or other heuristic approaches to identify initial contact (IC) and foot off (FO) events. However, no literature exists on the accuracy of AC for event detection. We have recently developed IntellEvent [4], a ML-based event detection algorithm which has superior accuracy compared to current state-of-the-art methods [1,5]. We aim to evaluate its applicability in daily clinical use compared to the AC method. Research question How large are the temporal differences of gait events detected by IntellEvent and the AC method? Methods The retrospective dataset for this study comprises 3DGA data. Patients were classified having either malrotation deformities (MD, n=20) or infantile cerebral palsy (ICP, n=18). IntellEvent was used to detect all IC and FO events using the left and right velocity of the heel, ankle, and toe trajectories. For the AC method a threshold of 20N was used to detect all IC and FO events on force plates. Subsequently, AC was used to detect all other events using the least square method of the x-axis trajectory (direction of motion). We validated IntellEvent by comparing its predictions to events solely identified with force plates. Afterwards, we calculated the differences between the remaining events of IntellEvent and the AC method. Results Mean absolute errors (95\% confidence interval) of IntellEvent compared to the ground truth for IC (MD: 2.4ms (2.1-2.8), ICP: 3.7ms (3.2–4.1)) and FO (MD: 7.5ms (6.8–8.1), ICP: 10.5 (9.5–11.4)) events showed a high temporal accuracy for both pathologies (Fig. 1). The comparison between IntellEvent and the AC events shows greater deviations for IC (MD: 10.1ms (9.6–0.6), ICP: 11.5ms (10.9–12.1)) and FO (MD: 9.3ms (8.8–9.7), ICP: 15.4ms (14.6–16.2)). Fig. 1. Temporal errors between IntellEvent vs. force plate data (blue) and IntellEvent vs. Autocorrelation (orange). Dotted grey lines indicate an error of 6.67ms (= 1 frames) and dotted red lines an error of 26.66ms (= 4 frames). Discussion IntellEvent achieves a very high temporal accuracy and robustness when compared to ground truth data. For the IC events, a high deviation between IntellEvent and AC was observed. Therefore, the results suggest that the AC can potentially introduce errors that may affect clinical decision making. Our preliminary results indicate that AC events need to be used with care when applied to pathological gait patterns and ML-based methods such as IntellEvent could improve the overall accuracy of gait event detection.}, urldate = {2023-09-18}, booktitle = {Gait \& {Posture}}, author = {Dumphart, Bernhard and Slijepcevic, Djordje and Kranz, Andreas and Zeppelzauer, Matthias and Horsak, Brian}, month = sep, year = {2023}, keywords = {Biomechanics, Center for Artificial Intelligence, Center for Digital Health Innovation, Center for Digital Health and Social Innovation, Department Gesundheit, Department Medien und Digitale Technologien, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Gait Analysis, Institut für Creative Media Technologies, Institut für Gesundheitswissenschaften, Machine Learning, SP CDHSI Motor Rehabilitation, SP IGW Clinical \& Healthcare Research, Vortrag, Wiss. Beitrag, best, best-bhorsak, peer-reviewed}, pages = {S50--S51}, } @inproceedings{slijepcevic_towards_2023, series = {{ESMAC} 2023 {Abstracts}}, title = {Towards personalized gait rehabilitation: {How} robustly can we identify personal gait signatures with machine learning?}, volume = {106}, copyright = {Copyright}, shorttitle = {Towards personalized gait rehabilitation}, url = {https://www.sciencedirect.com/science/article/pii/S0966636223012523}, doi = {10.1016/j.gaitpost.2023.07.232}, abstract = {Introduction Personalizing gait rehabilitation requires a comprehensive understanding of the unique gait characteristics of an individual patient, i.e., personal gait signature. Utilizing machine learning to classify individuals based on their gait can help to identify gait signatures [1]. This work exemplifies how an explainable artificial intelligence method can identify the most important input features that characterize the personal gait signature. Research question How robust can gait signatures be identified with machine learning and how sensitive are these signatures with respect to the amount of training data per person? Methods We utilized subsets of the AIST Gait Database 2019 [2], the GaitRec dataset [3], and the Gutenberg Gait Database [4] containing bilateral ground reaction forces (GRFs) during level walking at a self-selected speed. Eight GRF samples from each of 2,092 individuals (1,410/680 male/female, 809/1,283 health control/gait disorder, 1,355/737 shod/barefoot) were used for a gait-based person classification with a (linear) support vector machine (SVM). Two randomly selected samples from each individual served as test data. Gait signatures were identified using relevance scores obtained with layer-wise relevance propagation [5]. To assess the robustness of the identified gait signatures, we compared the relevance scores using Pearson’s correlation coefficient between step-wise reduced training data, from k=6 to k=1 training samples per individual. Results For the baseline setup (k=6), the SVM achieved a test classification accuracy of 99.1\% with 36 out of 4184 test samples being misclassified. The results for the setups with reduced training samples are visualized in Fig. 1. Fig. 1: Overview of the experimental results. Discussion A reduction of training samples per individual causes a decrease in classification accuracy (e.g., by 17.7\% in the case of one training sample per individual). The results show that at least five training samples per individual are necessary to achieve a classification accuracy of approximately 99\% for over 2,000 individuals. A similar effect is observed for gait signatures, which also show a slight degradation in robustness as the number of training samples decreases. In some cases, a model trained with less data per individual learns a different gait signature than a model trained with more data. In the test sample with the lowest correlation (see Fig. 1E), we observe a significant deviation in relevance for some input features. However, only 114 test samples (2.7\%) are below a moderate correlation of r=0.4 [6], indicating that gait signatures are quite robust, even when using one training sample per individual. This is supported by a strong median correlation of r=0.71 [6] (and the highest correlation of r=0.96) between the gait signatures. As automatically identified gait signatures seem to be robust, this approach has the potential to serve as a basis for tailoring interventions to each patient’s specific needs.}, urldate = {2023-09-21}, booktitle = {Gait \& {Posture}}, author = {Slijepcevic, Djordje and Horst, Fabian and Simak, Marvin and Schöllhorn, Wolfgang Immanuel and Zeppelzauer, Matthias and Horsak, Brian}, month = sep, year = {2023}, keywords = {Center for Artificial Intelligence, Center for Digital Health and Social Innovation, Department Medien und Digitale Technologien, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Institut für Gesundheitswissenschaften, SP CDHSI Motor Rehabilitation, Vortrag, Wiss. Beitrag, best, best-bhorsak, peer-reviewed}, pages = {S192--S193}, } @inproceedings{rind_trustworthy_2022, title = {Trustworthy {Visual} {Analytics} in {Clinical} {Gait} {Analysis}: {A} {Case} {Study} for {Patients} with {Cerebral} {Palsy}}, isbn = {978-1-66549-356-7}, url = {https://arxiv.org/abs/2208.05232}, doi = {10.1109/TREX57753.2022.00006}, abstract = {Three-dimensional clinical gait analysis is essential for selecting optimal treatment interventions for patients with cerebral palsy (CP), but generates a large amount of time series data. For the automated analysis of these data, machine learning approaches yield promising results. However, due to their black-box nature, such approaches are often mistrusted by clinicians. We propose gaitXplorer, a visual analytics approach for the classification of CP-related gait patterns that integrates Grad-CAM, a well-established explainable artificial intelligence algorithm, for explanations of machine learning classifications. Regions of high relevance for classification are highlighted in the interactive visual interface. The approach is evaluated in a case study with two clinical gait experts. They inspected the explanations for a sample of eight patients using the visual interface and expressed which relevance scores they found trustworthy and which they found suspicious. Overall, the clinicians gave positive feedback on the approach as it allowed them a better understanding of which regions in the data were relevant for the classification.}, booktitle = {Proc. 2022 {IEEE} {Workshop} on {TRust} and {EXpertise} in {Visual} {Analytics} ({TREX})}, publisher = {IEEE}, author = {Rind, Alexander and Slijepcevic, Djordje and Zeppelzauer, Matthias and Unglaube, Fabian and Kranzl, Andreas and Horsak, Brian}, year = {2022}, note = {Projekt: SoniVis Projekt: ReMoCap-Lab Projekt: I3D}, keywords = {Biomechanics, Center for Artificial Intelligence, Center for Digital Health Innovation, Center for Digital Health and Social Innovation, Data Science, Departement Medien und Digitale Technologien, Department Gesundheit, Forschungsgruppe Media Computing, Gait Analysis, Gait Classification, Human-Computer Interaction, Institut für Creative Media Technologies, Machine Learning, SP CDHSI Motor Rehabilitation, Schriftpublikation, Visual Computing, Visualization, Vortrag, Wiss. Beitrag, best, best-arind, peer-reviewed}, pages = {7--15}, } @article{slijepcevic_explaining_2022, title = {Explaining {Machine} {Learning} {Models} for {Clinical} {Gait} {Analysis}}, volume = {3}, copyright = {CC-BY-NC-SA}, issn = {2691-1957}, url = {https://doi.org/10.1145/3474121}, doi = {10.1145/3474121}, number = {2}, journal = {ACM Transactions on Computing for Healthcare}, author = {Slijepcevic, Djordje and Horst, Fabian and Lapuschkin, Sebastian and Horsak, Brian and Raberger, Anna-Maria and Kranzl, Andreas and Samek, Wojciech and Breitender, Christian and Schöllhorn, Wolfgang and Zeppelzauer, Matthias}, year = {2022}, note = {Projekt: I3D Projekt: ReMoCapLab Projekt: DHLab}, keywords = {2020, Biomechanics, Center for Artificial Intelligence, Center for Digital Health Innovation, Center for Digital Health and Social Innovation, DHLab, Department Gesundheit, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Gait Analysis, Gait Classification, Institut für Creative Media Technologies, Institut für Gesundheitswissenschaften, Machine Learning, Media Computing Group, SP CDHSI Motor Rehabilitation, Studiengang Physiotherapie, Wiss. Beitrag, best, best-mzeppelzauer, peer-reviewed}, pages = {14:1--14:27}, } @article{slijepcevic_input_2020, title = {Input {Representations} and {Classification} {Strategies} for {Automated} {Human} {Gait} {Analysis}}, volume = {76}, issn = {0966-6362}, doi = {10/ghz24x}, journal = {Gait \& Posture}, author = {Slijepcevic, Djordje and Zeppelzauer, Matthias and Schwab, Caterine and Raberger, Anna-Maria and Breitender, Christian and Horsak, Brian}, year = {2020}, note = {Projekt: IntelliGait Projekt: I3D Projekt: ReMoCap-Lab Projekt: DHLab}, keywords = {2020, Biomechanics, Center for Artificial Intelligence, Center for Digital Health Innovation, Center for Digital Health and Social Innovation, Department Gesundheit, Eintrag überprüfen, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Gait Analysis, Gait Classification, Green OA, Institut für Creative Media Technologies, Institut für Gesundheitswissenschaften, Machine Learning, Media Computing Group, Open Access, SP CDHSI Motor Rehabilitation, SP IGW Clinical \& Healthcare Research, Studiengang Physiotherapie, Wiss. Beitrag, best, best-bhorsak, peer-reviewed}, pages = {198--203}, } @inproceedings{slijepcevic_usefulness_2019, address = {Vienna, Austria}, title = {On the usefulness of statistical parameter mapping for feature selection in automated gait classification}, booktitle = {Book of {Abstracts} of the 25th {Conference} of the {European} {Society} of {Biomechanics} ({ESB})}, author = {Slijepcevic, Djordje and Raberger, Anna-Maria and Zeppelzauer, Matthias and Dumphart, Bernhard and Breiteneder, Christian and Horsak, Brian}, year = {2019}, note = {Projekt: IntelliGait Projekt: DHLab}, keywords = {Biomechanics, Center for Artificial Intelligence, Center for Digital Health Innovation, Digital Health, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Gait Analysis, Gait Classification, Institut für Creative Media Technologies, Institut für Gesundheitswissenschaften, Machine Learning, Media Computing Group, SP IGW Clinical \& Healthcare Research, Studiengang Physiotherapie, Vortrag, Wiss. Beitrag, best, best-bhorsak, peer-reviewed, ⛔ No DOI found}, pages = {1}, } @inproceedings{slijepcevic_explaining_2022, series = {{ESMAC} 2022 {Abstracts}}, title = {Explaining machine learning models for age classification in human gait analysis}, volume = {97}, url = {https://www.sciencedirect.com/science/article/pii/S0966636222003538}, doi = {10.1016/j.gaitpost.2022.07.153}, language = {en}, urldate = {2022-11-11}, booktitle = {Gait \& {Posture}}, author = {Slijepcevic, D. and Horst, F. and Simak, M. and Lapuschkin, S. and Raberger, A. M. and Samek, W. and Breiteneder, C. and Schöllhorn, W. I. and Zeppelzauer, M. and Horsak, B.}, month = sep, year = {2022}, note = {Projekt: I3D}, keywords = {Biomechanics, Center for Artificial Intelligence, Center for Digital Health and Social Innovation, Department Gesundheit, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Gait Analysis, Gait Classification, Institut für Creative Media Technologies, Institut für Gesundheitswissenschaften, Machine Learning, Media Computing Group, SP CDHSI Motor Rehabilitation, SP IGW Clinical \& Healthcare Research, Vortrag, Wiss. Beitrag, best, peer-reviewed}, pages = {S252--S253}, } @inproceedings{krondorfer_deep_2021, series = {{ESMAC} 2021 {Abstracts}}, title = {Deep learning-based similarity retrieval in clinical {3D} gait analysis}, volume = {90}, url = {https://www.sciencedirect.com/science/article/pii/S0966636221003751}, doi = {https://doi.org/10.1016/j.gaitpost.2021.09.066}, language = {en}, urldate = {2021-10-15}, booktitle = {Gait \& {Posture}}, author = {Krondorfer, P. and Slijepčević, D. and Unglaube, F. and Kranzl, A. and Breiteneder, C. and Zeppelzauer, M. and Horsak, B.}, month = oct, year = {2021}, note = {Projekt: I3D}, keywords = {Biomechanics, Center for Artificial Intelligence, Center for Digital Health Innovation, Center for Digital Health and Social Innovation, Department Gesundheit, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Gait Analysis, Gait Classification, Institut für Creative Media Technologies, Institut für Gesundheitswissenschaften, Machine Learning, SP CDHSI Motor Rehabilitation, Vortrag, Wiss. Beitrag, best, peer-reviewed}, pages = {127--128}, } @inproceedings{dumphart_automated_2021, series = {{ESMAC} 2021 {Abstracts}}, title = {An automated deep learning-based gait event detection algorithm for various pathologies}, volume = {90}, url = {https://www.sciencedirect.com/science/article/pii/S0966636221003350}, doi = {https://doi.org/10.1016/j.gaitpost.2021.09.026}, language = {en}, urldate = {2021-10-15}, booktitle = {Gait \& {Posture}}, author = {Dumphart, B. and Slijepčević, D. and Unglaube, F. and Kranzl, A. and Baca, A. and Zeppelzauer, M. and Horsak, B.}, month = oct, year = {2021}, note = {Projekt: ELSA}, keywords = {Biomechanics, Center for Artificial Intelligence, Center for Digital Health Innovation, Center for Digital Health and Social Innovation, Department Gesundheit, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Gait Analysis, Gait Classification, Institut für Creative Media Technologies, Institut für Gesundheitswissenschaften, Machine Learning, SP CDHSI Motor Rehabilitation, Vortrag, Wiss. Beitrag, best, peer-reviewed}, pages = {50--51}, } @article{horsak_overground_2021, title = {Overground {Walking} in a {Fully} {Immersive} {Virtual} {Reality}: {A} {Comprehensive} {Study} on the {Effects} on {Full}-{Body} {Walking} {Biomechanics}}, volume = {9}, copyright = {CC-BY}, issn = {2296-4185}, shorttitle = {Overground {Walking} in a {Fully} {Immersive} {Virtual} {Reality}}, url = {https://www.frontiersin.org/article/10.3389/fbioe.2021.780314}, doi = {https://doi.org/10.3389/fbioe.2021.780314}, abstract = {Virtual reality (VR) is an emerging technology offering tremendous opportunities to aid gait rehabilitation. To this date, real walking with users immersed in virtual environments with head-mounted displays (HMDs) is either possible with treadmills or room-scale (overground) VR setups. Especially for the latter, there is a growing interest in applications for interactive gait training as they could allow for more self-paced and natural walking. This study investigated if walking in an overground VR environment has relevant effects on 3D gait biomechanics. A convenience sample of 21 healthy individuals underwent standard 3D gait analysis during four randomly assigned walking conditions: the real laboratory (RLab), a virtual laboratory resembling the real world (VRLab), a small version of the VRlab (VRLab−), and a version which is twice as long as the VRlab (VRLab+). To immerse the participants in the virtual environment we used a VR-HMD, which was operated wireless and calibrated in a way that the virtual labs would match the real-world. Walking speed and a single measure of gait kinematic variability (GaitSD) served as primary outcomes next to standard spatio-temporal parameters, their coefficients of variant (CV\%), kinematics, and kinetics. Briefly described, participants demonstrated a slower walking pattern (−0.09 ± 0.06 m/s) and small accompanying kinematic and kinetic changes. Participants also showed a markedly increased gait variability in lower extremity gait kinematics and spatio-temporal parameters. No differences were found between walking in VRLab+ vs. VRLab−. Most of the kinematic and kinetic differences were too small to be regarded as relevant, but increased kinematic variability (+57\%) along with increased percent double support time (+4\%), and increased step width variability (+38\%) indicate gait adaptions toward a more conservative or cautious gait due to instability induced by the VR environment. We suggest considering these effects in the design of VR-based overground training devices. Our study lays the foundation for upcoming developments in the field of VR-assisted gait rehabilitation as it describes how VR in overground walking scenarios impacts our gait pattern. This information is of high relevance when one wants to develop purposeful rehabilitation tools.}, urldate = {2021-12-03}, journal = {Frontiers in Bioengineering and Biotechnology}, author = {Horsak, Brian and Simonlehner, Mark and Schöffer, Lucas and Dumphart, Bernhard and Jalaeefar, Arian and Husinsky, Matthias}, year = {2021}, keywords = {Biofeedback, Biomechanics, Center for Digital Health and Social Innovation, Department Gesundheit, Forschungsgruppe Digital Technologies, Forschungsgruppe Media Computing, Gait Analysis, Institut für Gesundheitswissenschaften, SP CDHSI Motor Rehabilitation, Virtual Reality, Wiss. Beitrag, best, peer-reviewed}, pages = {1236}, } @inproceedings{iber_mind_2021, title = {Mind the {Steps}: {Towards} {Auditory} {Feedback} in {Tele}-{Rehabilitation} {Based} on {Automated} {Gait} {Classification}}, doi = {10/gnt2tc}, abstract = {We describe a proof-of-concept for the implementation of a mobile auditory biofeedback system based on automated classification of functional gait disorders. The classification is embedded in a sensor-instrumented insole and is based on ground reaction forces (GRFs). GRF data have been successfully used for the classification of gait patterns into clinically relevant classes and are frequently used in clinical practice to quantitatively describe human motion. A feed-forward neural network that was implemented on the firmware of the insole is used to estimate the GRFs using pressure and accelerator data. Compared to GRF measurements obtained from force plates, the estimated GRFs performed highly accurately. To distinguish between physiological gait and gait disorders, we trained and evaluated a support vector machine with labeled data from a publicly accessible database. The automated gait classification was sonified for auditory feedback. The high potential of the implemented auditory feedback for preventive and supportive applications in physical therapy, such as supervised therapy settings and tele-rehabilitation, was highlighted by a semi- structured interview with two experts.}, booktitle = {In {Proceedings} of the 16th {International} {Audio} {Mostly} {Conference} ({AM}’21)}, publisher = {ACM}, author = {Iber, Michael and Dumphart, Bernhard and Oliveira, Victor A. de. J. and Ferstl, Stefan and Reis, Joschua and Slijepcevic, Djordje and Heller, Mario and Raberger, Anna-Maria and Horsak, Brian}, year = {2021}, note = {Projekt: Sonigait II}, keywords = {Artificial Intelligence, Biofeedback, Biomechanics, CDHI, Digital Health, Forschungsgruppe Media Computing, Gait Analysis, Human-computer interaction, Institut für Creative Media Technologies, Institut für Gesundheitswissenschaften, Motor rehabilitation, Vortrag, Wiss. Beitrag, best, best-lbiber, peer-reviewed}, } @article{roider_recognition_2015, title = {Recognition of sustainable mobility as basis for the transition into a digitial gaming concept for young people}, url = {http://www.iatbr2015.org.uk/index.php/iatbr/iatbr2015/paper/view/399}, abstract = {Mobility behaviour is often shaped through social habituation in childhood and adolescence. Persons who are accustomed to use sustainable transport modes will stick to their manner later on in life too. Since children and young adults undergo drastic physical and mental developmental steps in a short period of time, awareness raising campaigns must exactly match with different age groups and their current mobility behavior. Against this background of behavioral development of children, the research project BewusstMobil developed a concept for a smartphone application which automatically collects individual travel behaviour patterns, quantifies current environmental-friendly and health promoting behaviour and acts as platform for a game which young people can play online. The basic idea is that children and young people aged 12 to 18 years collect points by their current mobility behavior and chosen transport mode. Depending on how they travel (bus, tram, walking, etc.) and how environmental-friendly or healthy this mode of transport is, they collect more or less points i.e. for example if a person walks or travels by train, he/she gets more points as if traveling by car. Recognition of trip data (start and end points, mileage, time used, transport mode) is activated after the start of the application in the background and as soon as the person moves. Data are assessed by using GPS positioning and movement data such as speed and acceleration. In most cases, indicators for sustainable mobility are available on a very general level and rather aimed at assessing infrastructure projects. However, in this application, individual mobility and the benefit for the young transport user has to be evaluated. Thus, indicators known from research and practice are selected and transferred to an individual level. This includes pollutants and GHG-emissions (ecological impacts), costs (economic impacts), travel time and health benefits (social aspects). Depending on the selected transport mode, quantities are calculated for each indicator. The factors included in the calculation are the trip length [km] and the trip duration [min], which are determined through GPS tracking. The normalization is done by monetization, mainly based on recommendations for a cost-benefit analysis, i.e. different dimensions [g / veh-km, min, €] are converted to costs by using market prices (or "shadow prices"). These values are weighted according to their importance in order to ensure the assessment of environmentally friendliness and health promotion of individual mobility of children and adolescents. These parameters form the basis for the games’ scoring system. Points earned due to individual mobility behavior, can be traded in for proceeding on virtual routes of different modes on a map of Austria in the smartphone application, in order to reach virtual locations where the player can win goodies (promotional gifts, shopping vouchers, concert tickets, etc.). As several players try to reach these locations simultaneously, and the number of goodies at each location is limited the game creates motivation for playing and competing among participants. The application was tested in three different schools in the Province of Lower Austria covering pupils aged 13 to 18. A clear knowledge of the environmental impact of the transport system was identified prior to the test phase, but this knowledge hardly influences the choice of transport mode currently. However, a change in behavior had been reported during the gaming. Since this is not due to the transfer of (new) knowledge, it can be assumed that this shift was primarily caused by the game design and the information contained in the game mechanics that were incorporated resulting in a change of behavior. Generally, data collection as well as the overall concept and usage were clear for all users. However, younger people; age 13 years seems to lose interest faster than the other age classes. In particular, the interests concerning the competition were more relevant to higher classes, but at the same time the willingness to cheat increased. Thus, the full technical functionality was mentioned as requirement for further use of the application. Particularly, mode detection and tracking of individual trips have to ensure high accuracy and thus a fair scoring, since the desire for fair playing conditions was at the top in all age groups. From a scientific point of view, the application offers the opportunity to learn more about mobility behavior of young people and to use the digital world to raise the awareness and to influence user’s mode choice in a positive way. The presentation will give an overview of the data collection, the scoring system quantifying sustainable mobility behavior and illustrate the game concept based on the automatic recognition of individual trip data. Moreover, results of the before and after attitudinal and mobility surveys will demonstrate current mobility behavior, the perception of the application and the influence on the individual mobility behavior of young people based on current data.}, author = {Roider, Oliver and Judmaier, Peter and Barberi, Alessandro and Michelberger, Frank}, year = {2015}, keywords = {2015, Department Medien und Digitale Technologien, Department Technologie, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Institut für Mobilitätsforschung, Publikationstyp Schriftpublikation, best, interdisziplinär, peer-reviewed, ⛔ No DOI found}, } @inproceedings{seidl_markus_towards_2019, address = {Dublin, Irland}, title = {Towards {Distinction} of {Rock} {Art} {Pecking} {Styles} with a {Hybrid} {2D}/{3D} {Approach}}, booktitle = {Proceedings of the {International} {Conference} on {Content}-based {Multimedia} {Indexing} ({CBMI})}, author = {{Seidl, Markus} and {Zeppelzauer, Matthias}}, year = {2019}, note = {Projekt: PITOTI 3D}, keywords = {Center for Artificial Intelligence, Computer Vision, Digital Heritage, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Machine Learning, Media Computing Group, Visual Computing, Vortrag, Wiss. Beitrag, best, peer-reviewed, ⛔ No DOI found}, pages = {4}, } @inproceedings{wagner_survey_2015, address = {Cagliari, Italy}, title = {A {Survey} of {Visualization} {Systems} for {Malware} {Analysis}}, url = {http://mc.fhstp.ac.at/supp/EuroVisStar2015}, doi = {10/cwc4}, abstract = {Due to the increasing threat from malicious software (malware), monitoring of vulnerable systems is becoming increasingly important. The need to log and analyze activity encompasses networks, individual computers, as well as mobile devices. While there are various automatic approaches and techniques available to detect, identify, or capture malware, the actual analysis of the ever-increasing number of suspicious samples is a time-consuming process for malware analysts. The use of visualization and highly interactive visual analytics systems can help to support this analysis process with respect to investigation, comparison, and summarization of malware samples. Currently, there is no survey available that reviews available visualization systems supporting this important and emerging field. We provide a systematic overview and categorization of malware visualization systems from the perspective of visual analytics. Additionally, we identify and evaluate data providers and commercial tools that produce meaningful input data for the reviewed malware visualization systems. This helps to reveal data types that are currently underrepresented, enabling new research opportunities in the visualization community.}, booktitle = {Eurographics {Conference} on {Visualization} ({EuroVis}) - {STARs}}, publisher = {The Eurographics Association}, author = {Wagner, Markus and Fischer, Fabian and Luh, Robert and Haberson, Andrea and Rind, Alexander and Keim, Daniel A. and Aigner, Wolfgang}, editor = {Borgo, Rita and Ganovelli, Fabio and Viola, Ivan}, year = {2015}, note = {Projekt: TARGET Projekt: KAVA-Time}, keywords = {Creative Industries, FH SP Cyber Security, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Digital Technologies, Forschungsgruppe Media Computing, Forschungsgruppe Secure Societies, Institut für Creative Media Technologies, Institut für IT Sicherheitsforschung, Josef Ressel Zentrum TARGET, KAVA-Time, Model/Taxonomy, Publikationstyp Präsentation, Publikationstyp Schriftpublikation, Time-Oriented Data, Visual Computing, Visual analytics, Vortrag, Wiss. Beitrag, best, best-lbaigner, best-lbwagnerm, best-rluh, information visualization, interdisziplinär, malicious software, malware, peer-reviewed, survey, taxonomy, visualization}, pages = {105--125}, } @article{luh_sequin_2018, title = {{SEQUIN}: a grammar inference framework for analyzing malicious system behavior}, url = {http://mc.fhstp.ac.at/sites/default/files/publications/Luh_2018_SEQUIN.pdf}, doi = {10/cwdf}, abstract = {Targeted attacks on IT systems are a rising threat to the confidentiality of sensitive data and the availability of critical systems. The emergence of Advanced Persistent Threats (APTs) made it paramount to fully understand the particulars of such attacks in order to improve or devise effective defense mechanisms. Grammar inference paired with visual analytics (VA) techniques offers a powerful foundation for the automated extraction of behavioral patterns from sequential event traces. To facilitate the interpretation and analysis of APTs, we present SEQUIN, a grammar inference system based on the Sequitur compression algorithm that constructs a context-free grammar (CFG) from string-based input data. In addition to recursive rule extraction, we expanded the procedure through automated assessment routines capable of dealing with multiple input sources and types. This automated assessment enables the accurate identification of interesting frequent or anomalous patterns in sequential corpora of arbitrary quantity and origin. On the formal side, we extended the CFG with attributes that help describe the extracted (malicious) actions. Discovery-focused pattern visualization of the output is provided by our dedicated KAMAS VA prototype.}, journal = {Journal of Computer Virology and Hacking Techniques}, author = {Luh, Robert and Schramm, Gregor and Wagner, Markus and Janicke, Helge and Schrittwieser, Sebastian}, year = {2018}, note = {Projekt: TARGET Projekt: KAVA-Time}, keywords = {FH SP Cyber Security, Forschungsgruppe Digital Technologies, Forschungsgruppe Media Computing, Forschungsgruppe Secure Societies, Institut für Creative Media Technologies, Institut für IT Sicherheitsforschung, Josef Ressel Zentrum TARGET, Visual analytics, Wiss. Beitrag, attribute grammar, best, best-lbwagner, best-rluh, knowledge generation, malware analysis, peer-reviewed, system behavior}, pages = {01 -- 21}, } @article{zaharieva_cross-platform_2015, title = {Cross-{Platform} {Social} {Event} {Detection}}, volume = {22}, issn = {1070-986X}, url = {http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=7045414&tag=1}, doi = {10/gh3773}, abstract = {It is becoming more and more popular to share personal experiences on platforms such as Flickr and YouTube. Uploaded media is usually described by both technical and user-generated metadata that is commonly used for their access and retrieval. Thus, a crucial aspect in this context is the quality and reliability of provided metadata. The mining of media across sharing platforms bears the additional challenge about potential di⬚erences in the maintained metadata. In order to provide a baseline for further research, we perform a thorough evaluation of the usefulness of available metadata in the context of social event detection in both single media repository scenario and across di⬚erent platforms.}, number = {3}, journal = {IEEE Multimedia}, author = {Zaharieva, Maia and Del Fabro, Manfred and Zeppelzauer, Matthias}, month = jan, year = {2015}, keywords = {2015, Center for Artificial Intelligence, Computer Vision, Creative Industries, Department Technologie, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Machine Learning, Media Computing Group, Pattern recognition, Publikationstyp Schriftpublikation, Wiss. Beitrag, best, best-mzeppelzauer, peer-reviewed}, pages = {14}, } @inproceedings{bernard_towards_2018, address = {Brno, Czech Republic}, title = {Towards {User}-{Centered} {Active} {Learning} {Algorithms}}, volume = {37}, url = {http://doi.wiley.com/10.1111/cgf.13406}, doi = {10/gdw79h}, language = {en}, urldate = {2018-10-10}, booktitle = {Computer {Graphics} {Forum}}, author = {Bernard, Jürgen and Zeppelzauer, Matthias and Lehmann, Markus and Müller, Martin and Sedlmair, Michael}, year = {2018}, keywords = {Center for Artificial Intelligence, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Machine Learning, Media Computing Group, Visual analytics, Wiss. Beitrag, best, best-mzeppelzauer, peer-reviewed}, pages = {121--132}, } @article{andrienko_viewing_2018, title = {Viewing {Visual} {Analytics} as {Model} {Building}}, volume = {37}, url = {http://openaccess.city.ac.uk/19078/}, doi = {10/gdv9s7}, abstract = {To complement the currently existing definitions and conceptual frameworks of visual analytics, which focus mainly on activities performed by analysts and types of techniques they use, we attempt to define the expected results of these activities. We argue that the main goal of doing visual analytics is to build a mental and/or formal model of a certain piece of reality reflected in data. The purpose of the model may be to understand, to forecast or to control this piece of reality. Based on this model-building perspective, we propose a detailed conceptual framework in which the visual analytics process is considered as a goal-oriented workflow producing a model as a result. We demonstrate how this framework can be used for performing an analytical survey of the visual analytics research field and identifying the directions and areas where further research is needed.}, number = {6}, journal = {Computer Graphics Forum}, author = {Andrienko, Natalia and Lammarsch, Tim and Andrienko, Gennady and Fuchs, Georg and Keim, Daniel A. and Miksch, Silvia and Rind, Alexander}, year = {2018}, note = {Projekt: KAVA-Time}, keywords = {FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Visual analytics, Wiss. Beitrag, analytical process, best, best-arind, knowledge generation, peer-reviewed, theory and model}, pages = {275--299}, } @book{michelberger_gender-_2016, address = {Berlin}, title = {Gender- und {Diversity}-{Kriterien} im sicherheitskritischen {Arbeitsumfeld}}, isbn = {978-3-7418-0996-5}, url = {https://www.epubli.de/shop/buch/51924}, abstract = {Der vorliegende Leitfaden soll Verantwortliche und EntscheidungsträgerInnen in Organisationseinheiten im sicherheitskritischen Bereich bei der Umsetzung von Kriterien im Bereich Gender und Diversity unterstützen. Er liefert konkrete Methoden und Anleitungen und gliedert sich in drei Abschnitte: (1) Guidelines: Auf Grundlage einer Literaturrecherche erstellte zentrale Richtlinien für Gender und Diversity in sicherheitskritischen Kontrollräumen und Leitstellen. (2) Methoden: Detaillierte Beschreibung der angeführten Methoden im Abschnitt „Guidelines“. (3) Testleitfäden: Praxiserprobte Leitfäden als Vorlage für die eigene Testdurchführung. Ziel ist es, einen übersichtlichen und verständlichen Einstieg zur Durchführung von Untersuchungen anzubieten, um so Schwachstellen im eigenen Verantwortungsbereich ausfindig machen zu können. Die gefundenen Ergebnisse können in Folge bei Design-, Organisations- und Ausstattungsentscheidungen unterstützen. Der Leitfaden entstand im Zuge des Forschungsprojektes „GenSiSys“. Das Projekt wurde vom BMVIT im Rahmen des Programms Talente gefördert. Dieses Dokument beinhaltet jedoch keine Daten oder Auswertungen aus den im Rahmen des Projektes zur Erprobung der Methoden durchgeführten Testläufe im praktischen Umfeld.}, publisher = {epubli (neopubli GmbH)}, author = {Michelberger, Frank and Judmaier, Peter and Rottermanner, Gernot and Größbacher, Stefanie and Viertelmayer, Andrea and Bichler, Romana and Laser, Birgit and Erharter, Dorothea and Pohl, Margit and Weissenböck, Elisabeth and Münch, Jens and Kraner, Florian-Silvester and Jordan, Werner and Palmetzhofer, Gerd and Fischer, Leonhard and Kriegshaber, Ursula}, year = {2016}, note = {Projekt: GenSiSys}, keywords = {Center for Sustainable Mobility, Forschungsgruppe Media Computing, Gender \& Diversity, Institut für Creative Media Technologies, Safety-critical workplaces, Studiengang Physiotherapie, best, best-lbmichelberger, evaluation}, } @inproceedings{zaharieva_social_2015, address = {Shanghai, China}, title = {Social {Event} {Mining} in {Large} {Photo} {Collections}}, abstract = {A significant part of publicly available photos on the Internet depicts a variety of different social events. In order to organize this steadily growing media content and to make it easily accessible, novel indexing methods are required. Essential research questions in this context concern the efficient detection (clustering), classification, and retrieval of social events in large media collections. In this paper we explore two aspects of social events mining. First, the initial clustering of a given photo collection into single events and, second, the retrieval of relevant social events based on user queries. For both aspects we employ commonly available metadata information, such as user, time, GPS data, and user-generated textual descriptions. Performed evaluations in the context of social event detection demonstrate the strong generalization ability of our approach and the potential of contextual data such as time, user, and location. Experiments with social event retrieval clearly indicate the open challenge of mapping between previously detected event clusters and heterogeneous user queries.}, booktitle = {Proceedings of the {International} {Conference} on {Multimedia} {Retrieval}}, publisher = {ACM Press}, author = {Zaharieva, Maia and Zeppelzauer, Matthias and Del Fabro, Manfred and Schopfhauser, Daniel}, month = mar, year = {2015}, keywords = {2015, Center for Artificial Intelligence, Computer Vision, Creative Industries, Department Technologie, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Machine Learning, Media Computing Group, Pattern recognition, Publikationstyp Schriftpublikation, Wiss. Beitrag, best, best-mzeppelzauer, peer-reviewed, visual computing}, } @inproceedings{agapito_graph-based_2015, series = {Lecture {Notes} in {Computer} {Science}}, title = {Graph-{Based} {Shape} {Similarity} of {Petroglyphs}}, volume = {8925}, isbn = {978-3-319-16177-8}, url = {http://dx.doi.org/10.1007/978-3-319-16178-5_9}, language = {English}, booktitle = {Computer {Vision} - {ECCV} 2014 {Workshops}}, publisher = {Springer International Publishing}, author = {Seidl, Markus and Wieser, Ewald and Zeppelzauer, Matthias and Pinz, Axel and Breiteneder, Christian}, editor = {Agapito, Lourdes and Bronstein, Michael M. and Rother, Carsten}, year = {2015}, note = {Projekt: PITOTI 3D}, keywords = {2015, Center for Artificial Intelligence, Computer Vision, Creative Industries, Department Technologie, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Graph edit distance, Graph embedding, Institut für Creative Media Technologies, Machine Learning, Media Computing Group, Petroglyph similarity, Publikationstyp Schriftpublikation, Shape similarity, Vortrag, Wiss. Beitrag, best, best-lbseidl, graph matching, peer-reviewed, visual computing}, pages = {133--148}, } @inproceedings{zeppelzauer_interactive_2015, address = {Granada, Spain}, title = {Interactive {Segmentation} of {Rock}-{Art} in {High}-{Resolution} {3D} {Reconstructions}}, booktitle = {Conference {Proceedings} of {Digital} {Heritage} 2015 {Full} {Papers}}, author = {Zeppelzauer, Matthias and Poier, Georg and Seidl, Markus and Reinbacher, Christian and Breiteneder, Christian and Bischof, Horst}, month = oct, year = {2015}, note = {Projekt: PITOTI 3D}, keywords = {2015, Center for Artificial Intelligence, Computer Vision, Creative Industries, Department Medien und Digitale Technologien, Department Technologie, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Machine Learning, Media Computing Group, Publikationstyp Schriftpublikation, Vortrag, Wiss. Beitrag, best, peer-reviewed}, } @inproceedings{salvador_cultural_2015, address = {Boston, Massachusetts, United States}, title = {Cultural {Event} {Recognition} with {Visual} {ConvNets} and {Temporal} {Models}}, url = {http://arxiv.org/abs/1504.06567}, abstract = {This paper presents our contribution to the ChaLearn Challenge 2015 on Cultural Event Classification. The challenge in this task is to automatically classify images from 50 different cultural events. Our solution is based on the combination of visual features extracted from convolutional neural networks with temporal information using a hierarchical classifier scheme. We extract visual features from the last three fully connected layers of both CaffeNet (pretrained with ImageNet) and our fine tuned version for the ChaLearn challenge. We propose a late fusion strategy that trains a separate low-level SVM on each of the extracted neural codes. The class predictions of the low-level SVMs form the input to a higher level SVM, which gives the final event scores. We achieve our best result by adding a temporal refinement step into our classification scheme, which is applied directly to the output of each low-level SVM. Our approach penalizes high classification scores based on visual features when their time stamp does not match well an event-specific temporal distribution learned from the training and validation data. Our system achieved the second best result in the ChaLearn Challenge 2015 on Cultural Event Classification with a mean average precision of 0.767 on the test set.}, booktitle = {Proceedings of the {CVPR} {Workshop} {ChaLearn} {Looking} at {People} 2015}, publisher = {IEEE}, author = {Salvador, Amaia and Zeppelzauer, Matthias and Manchón-Vizuente, Daniel and Calafell, Andrea and Giró-i-Nieto, Xavier}, month = apr, year = {2015}, keywords = {2015, Center for Artificial Intelligence, Computer Vision, Creative Industries, Department Technologie, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Machine Learning, Media Computing Group, Publikationstyp Präsentation, Publikationstyp Schriftpublikation, Wiss. Beitrag, best, best-mzeppelzauer, peer-reviewed, visual computing}, } @incollection{rind_visual_2017, address = {Cham}, series = {℡e-{Health}}, title = {Visual {Analytics} of {Electronic} {Health} {Records} with a {Focus} on {Time}}, abstract = {Visual Analytics is a field of computer science that deals with methods to perform data analysis using both computer-based methods and human judgment facilitated by direct interaction with visual representations of data. Electronic health record systems that apply Visual Analytics methods have the potential to provide healthcare stakeholders with much-needed cognitive support in exploring and querying records. This chapter presents Visual Analytics projects addressing five particular challenges of electronic health records: (1) The complexity of time-oriented data constitutes a cross-cutting challenge so that all projects need to consider design aspects of time-oriented data in one way or another. (2) As electronic health records encompass patient conditions and treatment, they are inherently heterogeneous data. (3) Scaling from single patients to cohorts requires approaches for relative time, space efficiency, and aggregation. (4) Data quality and uncertainty are common issues that need to be considered in real-world projects. (5) A user-centered design process and suitable interaction techniques are another cross-cutting challenge for each and every Visual Analytics project.}, booktitle = {New {Perspectives} in {Medical} {Records}: {Meeting} the {Needs} of {Patients} and {Practitioners}}, publisher = {Springer}, author = {Rind, Alexander and Federico, Paolo and Gschwandtner, Theresia and Aigner, Wolfgang and Doppler, Jakob and Wagner, Markus}, editor = {Rinaldi, Giovanni}, year = {2017}, doi = {10.1007/978-3-319-28661-7_5}, keywords = {Center for Digital Health Innovation, Center for Digital Health and Social Innovation, Digital Health, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Digital Technologies, Forschungsgruppe Media Computing, Healthcare, Institut für Creative Media Technologies, Publikationstyp Schriftpublikation, Time-Oriented Data, Visual Computing, Visual analytics, best, best-lbwagnerm, data quality, electronic health records, knowledge, medical data}, pages = {65--77}, } @inproceedings{doppler_design_2016, address = {Frankfurt am Main}, title = {Design and {Evaluation} of a {Second} {Screen} {Communication} and {Gaming} {Platform} for {Fostering} {Teleparticipation} of the {Socially} {Isolated} {Elderly}}, volume = {Advanced Technologies and Societal Change}, isbn = {978-3-319-26343-4}, url = {http://www.springer.com/us/book/9783319263434}, booktitle = {Ambient {Assisted} {Living} - 8. {AAL}-{Kongress} 2015}, publisher = {Springer International Publishing}, author = {Doppler, Jakob and Rottermanner, Gernot and Sommer, Sabine and Pflegerl, Johannes and Judmaier, Peter}, editor = {Wichert, Reiner and Klausing, Helmut}, year = {2016}, note = {Projekt: BRELOMATE II Projekt: UMBRELLO Projekt: BRELOMATE}, keywords = {2016, Brelomate, Center for Digital Health Innovation, Center for Digital Health and Social Innovation, DHC, Department Medien und Digitale Technologien, Department Soziales, Department Technologie, Digital Healthcare, EN, Forschungsgruppe Digital Technologies, Forschungsgruppe Media Computing, Ilse Arlt Institut, Institut für Creative Media Technologies, Publikationstyp Präsentation, Publikationstyp Schriftpublikation, Publikationstyp Vortrag, Q3/14, best, games, mobile computing, peer-reviewed, project\_brelomate, user centered design}, pages = {3--13}, } @incollection{aigner_visualization_2015, address = {Boca Raton, Florida, USA}, edition = {2nd}, title = {Visualization {Techniques} for {Time}-{Oriented} {Data}}, isbn = {978-1-4822-5737-3}, url = {https://www.crcpress.com/product/isbn/9781482257373}, booktitle = {Interactive {Data} {Visualization}: {Foundations}, {Techniques}, and {Applications}}, publisher = {A K Peters/CRC Press}, author = {Aigner, Wolfgang and Miksch, Silvia and Schumann, Heidrun and Tominski, Christian}, editor = {Ward, Matthwe O. and Grinstein, Georges and Keim, David}, year = {2015}, note = {eingeladen Projekt: KAVA-Time Projekt: VALID}, keywords = {Center for Digital Health Innovation, Creative Industries, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Time-Oriented Data, Visual Computing, Wiss. Beitrag, best, best-lbaigner, visualization}, pages = {253--284}, } @misc{blumenstein_wie_2015, address = {St. Pölten, Austria}, title = {Wie erfolgreich erweisen sich {Multiplattformstrategien} international agierender {MedienproduzentInnen}?}, url = {https://ctv.fhstp.ac.at/konfbeitrag/wie-erfolgreich-erweisen-sich-multiplattformstrategien-international-agierender-medienproduzentinnen/}, urldate = {2015-09-05}, author = {Blumenstein, Kerstin and von Suess, Rosa}, month = may, year = {2015}, keywords = {2015, Department Medien und Digitale Technologien, Department Technologie, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Publikationstyp Präsentation, SP MW Global Media Markets \& Local Practices, Vortrag, best, best lbvonsuess}, } @techreport{von_suess_priticop_2013, address = {St. Pölten}, type = {Studie, {Ergebnisse} einer {Auftragsforschung} für {Terra} {Mater} und {Servus} {TV}}, title = {Priticop 3.0. {Formatentwicklung}: {Wissenschaftsmagazin} im {Fernsehen}}, shorttitle = {Priticop}, author = {von Suess, Rosa and Blumenstein, Kerstin and Doppler, Jakob and Kuntner, Georg and Schneider, Angelika}, year = {2013}, keywords = {2013, Department Technologie, Ergebnisse einer Auftragsforschung, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, SP MW Global Media Markets \& Local Practices, best, best lbvonsuess}, } @incollection{tominski_images_2017, title = {Images of {Time}: {Visual} {Representation} of {Time}-{Oriented} {Data}}, url = {http://mc.fhstp.ac.at/sites/default/files/publications/Tominski17ImagesOfTime.pdf}, booktitle = {Information {Design}: {Research} and {Practice}}, publisher = {Gower/Routledge}, author = {Tominski, Christian and Aigner, Wolfgang and Miksch, Silvia and Schumann, Heidrun}, editor = {Black, A. and Luna, Paul and Lund, O. and Walker, S.}, year = {2017}, note = {Projekt: VisOnFire Projekt: KAVA-Time Projekt: VALID}, keywords = {Center for Digital Health Innovation, FH SP Data Analytics \& Visual Computing, Forschungsgruppe Media Computing, Institut für Creative Media Technologies, Visual Computing, Wiss. Beitrag, best, best-lbaigner, peer-reviewed}, pages = {23--42}, }