<script src="https://bibbase.org/show?bib=http://ambientintelligence.aalto.fi/bibtex/LiteraturAll&folding=0&filter=group:ambience&jsonp=1"></script>
<?php
$contents = file_get_contents("https://bibbase.org/show?bib=http://ambientintelligence.aalto.fi/bibtex/LiteraturAll&folding=0&filter=group:ambience");
print_r($contents);
?>
<iframe src="https://bibbase.org/show?bib=http://ambientintelligence.aalto.fi/bibtex/LiteraturAll&folding=0&filter=group:ambience"></iframe>
For more details see the documention.
To the site owner:
Action required! Mendeley is changing its API. In order to keep using Mendeley with BibBase past April 14th, you need to:
@article{ahmed2022, title={Knowledge Sharing in AI Services: A Market-based Approach}, author={Thatha Mohammed and Si-Ahmed Naas and Stephan Sigg and Mario Di Francesco}, journal={IEEE Internet of Things Journal}, year={2022}, publisher={IEEE}, group = {ambience}, project = {redi} }
@inproceedings{Rubio22, title={User Localization Using RF Sensing. A Performance Comparison between LIS and mmWave Radars}, author={Cristian Jesus Vaca Rubio and Dariush Salami and Petar Popovski and Elisabeth De Carvalho and Zheng-Hua Tan and Stephan Sigg}, booktitle={EUSIPCO}, year={2022}, group = {ambience}, project = {windmill} }
@article{salami2022tesla, title={Tesla-rapture: A lightweight gesture recognition system from mmwave radar sparse point clouds}, author={Salami, Dariush and Hasibi, Ramin and Palipana, Sameera and Popovski, Petar and Michoel, Tom and Sigg, Stephan}, journal={IEEE Transactions on Mobile Computing}, year={2022}, publisher={IEEE}, group = {ambience}, project = {radiosense} }
@inproceedings{Anay2022RIS, title={Energy-Efficient Design for RIS-Assisted UAV Communications in Beyond-5G Networks}, author={Anay Ajit Deshpande and Cristian Jesus Vaca Rubio and Salman Mohebi and Dariush Salami and Elisabeth De Carvalho and Stephan Sigg and Michele Zorzi and Andrea Zanella}, booktitle={IEEE MedComNet}, year={2022}, group = {ambience}, project = {windmill} }
@article{ma2022privacy, title={Privacy-preserving federated learning based on multi-key homomorphic encryption}, author={Ma, Jing and Naas, Si-Ahmed and Sigg, Stephan and Lyu, Xixiang}, journal={International Journal of Intelligent Systems}, year={2022}, publisher={Wiley Online Library}, group = {ambience} }
@inproceedings{, title={Detection of an Ataxia-type disease from EMG and IMU sensors }, author={Tobias Dorszewski and Weixuan Jiang and Stephan Sigg}, booktitle={The 20th International Conference on Pervasive Computing and Communications (PerCom 2021), adjunct}, year={2022}, group = {ambience}, project = {MIRAS} } %%% 2021 %%%
@InProceedings{Salami_2021_MLSP, author = {Dariush Salami and Stephan Sigg}, booktitle = {IEEE 31st International Workshop on Machine Learning for Signal Processing (MLSP)}, title = {Zero-shot Motion Pattern Recognition from 4D Point Clouds}, year = {2021}, project ={radiosense,windmill}, group={ambience} }
@inproceedings{backstrom2021privacy, title={Privacy in Speech Communication Technology}, author={B{\"a}ckstr{\"o}m, Tom and Zarazaga, Pablo Perez and Das, Sneha and Sigg, Stephan}, booktitle={Fonetiikan p{\"a}iv{\"a}t-Phonetics Symposium}, year={2021}, group={ambience} }
@inproceedings{backstrom2021intuitive, title={Intuitive Privacy from Acoustic Reach: A Case for Networked Voice User-Interfaces}, author={B{\"a}ckstr{\"o}m, Tom and Das, Sneha and Zarazaga, Pablo P{\'e}rez and Fischer, Johannes and Findling, Rainhard Dieter and Sigg, Stephan and Nguyen, Le Ngu}, booktitle={Proc. 2021 ISCA Symposium on Security and Privacy in Speech Communication}, pages={57--61}, year={2021}, group={ambience} }
@article{Mayrhofer_2021_ACMCS, author={Rene Mayrhofer and Stephan Sigg}, journal={ACM Computing Surveys}, title={Adversary Models for Mobile Device Authentication}, year={2021}, abstract={Mobile device authentication has been a highly active research topic for over 10 years, with a vast range of methods proposed and analyzed. In related areas, such as secure channel protocols, remote authentication, or desktop user authentication, strong, systematic, and increasingly formal threat models have been established and are used to qualitatively compare different methods. However, the analysis of mobile device authentication is often based on weak adversary models, suggesting overly optimistic results on their respective security. In this article, we introduce a new classification of adversaries to better analyze and compare mobile device authentication methods. We apply this classification to a systematic literature survey. The survey shows that security is still an afterthought and that most proposed protocols lack a comprehensive security analysis. The proposed classification of adversaries provides a strong and practical adversary model that offers a comparable and transparent classification of security properties in mobile device authentication. }, issue_date = {to appear}, publisher = {ACM}, volume = { }, number = { }, pages = {1-33}, group = {ambience}, doi = {10.1145/3477601} }
@article{Le_2021_TMC, author={Le Ngu Nguyen and Stephan Sigg and Jari Lietzen and Rainhard Dieter Findling and Kalle Ruttik}, journal={IEEE Transactions on Mobile Computing}, title={Camouflage learning. Feature value obscuring ambient intelligence for constrained devices}, year={2021}, abstract={Ambient intelligence demands collaboration schemes for distributed constrained devices which are not only highly energy efficient in distributed sensing, processing and communication, but which also respect data privacy. Traditional algorithms for distributed processing suffer in Ambient intelligence domains either from limited data privacy, or from their excessive processing demands for constrained distributed devices. In this paper, we present Camouflage learning, a distributed machine learning scheme that obscures the trained model via probabilistic collaboration using physical-layer computation offloading and demonstrate the feasibility of the approach on backscatter communication prototypes and in comparison with Federated learning. We show that Camouflage learning is more energy efficient than traditional schemes and that it requires less communication overhead while reducing the computation load through physical-layer computation offloading. The scheme is synchronization-agnostic and thus appropriate for sharply constrained, synchronization-incapable devices. We demonstrate model training and inference on four distinct datasets and investigate the performance of the scheme with respect to communication range, impact of challenging communication environments, power consumption, and the backscatter hardware prototype. }, issue_date = {July 2021}, publisher = {IEEE}, volume = { }, number = { }, pages = {1-17}, group = {ambience}, project = {abacus} }
@inproceedings{Beck2020BCGECG, title={BCG and ECG-based secure communication for medical devices in Body Area Networks}, author={Nils Beck and Si Zuo and Stephan Sigg}, booktitle={The 19th International Conference on Pervasive Computing and Communications (PerCom 2021), adjunct}, year={2021}, abstract={An increasing amount of medical devices, such as pace makers or insulin pumps, is able to communicate in wireless Body Area Networks (BANs). While this facilitates interaction between users and medical devices, something that was previously more complicated or - in the case of implanted devices - often impossible, it also raises security and privacy questions. We exploit the wide availability of ballistocardiographs (BCG) and electocardiographs (ECG) in consumer wearables and propose MEDISCOM, an ad-hoc, implicit and secure communication protocol for medical devices in local BANs. Deriving common secret keys from a body’s BCG or ECG signal. MEDISCOM ensures confidentiality and integrity of sensitive medical data and also continuously authenticates devices, requiring no explicit user interaction and maintaining a low computational overhead. We consider relevant attack vectors and show how MEDISCOM is resilient towards them. Furthermore, we validate the security of the secret keys that our protocol derives on BCG and ECG data from 29 subjects. }, group = {ambience}, project = {abacus} }
@inproceedings{Sigg2020Camouflage, title={Camouflage Learning}, author={Stephan Sigg and Le Ngu Nguyen and Jing Ma}, booktitle={The 19th International Conference on Pervasive Computing and Communications (PerCom 2021), adjunct}, year={2021}, abstract={Federated learning has been proposed as a concept for distributed machine learning which enforces privacy by avoiding sharing private data with a coordinator or distributed nodes. Instead of gathering datasets to a central server for model training in traditional machine learning, in federated learning, model updates are computed locally at distributed devices and merged at a coordinator. However, information on local data might be leaked through the model updates. We propose Camouflage learning, a distributed machine learning scheme that distributes both the data and the model. Neither the distributed devices nor the coordinator is at any point in time in possession of the complete model. Furthermore, data and model are obfuscated during distributed model inference and distributed model training. Camouflage learning can be implemented with various Machine learning schemes. }, group = {ambience}, project = {radiosense, abacus} }
@inproceedings{Manila2020BatteryLess, title={Towards battery-less RF sensing}, author={Manila Kodali and Le Ngu Nguyen and Stephan Sigg}, booktitle={The 19th International Conference on Pervasive Computing and Communications (PerCom 2021), WiP}, year={2021}, abstract={Recent work has demonstrated the use of the radio interface as a sensing modality for gestures, activities and situational perception. The field generally moves towards larger bandwidths, multiple antennas, and higher, mmWave frequency domains, which allow for the recognition of minute movements. We envision another set of applications for RF sensing: battery-less autonomous sensing devices. In this work, we investigate transceiver-less passive RF-sensors which are excited by the fluctuation of the received power over the wireless channel. In particular, we demonstrate the use of battery-less RF-sensing for applications of on-body gesture recognition integrated into smart garment, as well as the integration of such sensing capabilities into smart surfaces. }, group = {ambience}, project = {radiosense,abacus} }
@article{Sameera_2021_IMWUT, author={Sameera Palipana and Dariush Salami and Luis Leiva and Stephan Sigg}, journal={Proceedings of the ACM on Interactive, Mobile, Wearable and Ubiquitous Technologies (IMWUT)}, title={Pantomime: Mid-Air Gesture Recognition with Sparse Millimeter-Wave Radar Point Clouds}, year={2021}, abstract={We introduce Pantomime, a novel mid-air gesture recognition system exploiting spatio-temporal properties of millimeter-wave radio frequency (RF) signals. Pantomime is positioned in a unique region of the RF landscape: mid-resolution mid-range high-frequency sensing, which makes it ideal for motion gesture interaction. We configure a commercial frequency-modulated continuous-wave radar device to promote spatial information over temporal resolution by means of sparse 3D point clouds, and contribute a deep learning architecture that directly consumes the point cloud, enabling real-time performance with low computational demands. Pantomime achieves 95\% accuracy and 99\% AUC in a challenging set of 21 gestures articulated by 45 participants in two indoor environments, outperforming four state-of-the-art 3D point cloud recognizers. We also analyze the effect of environment, articulation speed, angle, and distance to the sensor. We conclude that Pantomime is resilient to various input conditions and that it may enable novel applications in industrial, vehicular, and smart home scenarios. }, issue_date = {March 2021}, publisher = {ACM New York, NY, USA}, volume = {5}, number = {1}, pages = {1-27}, group = {ambience}, project = {radiosense,windmill} }
@inproceedings{hitz2020Emotion, title={Sharing geotagged pictures for an Emotion-based Recommender System}, author={Andreas Hitz and Si-Ahmed Naas and Stephan Sigg}, booktitle={The 19th International Conference on Pervasive Computing and Communications (PerCom 2021), adjunct}, year={2021}, abstract={Recommender systems are prominently used for movie or app recommendation or in e-commerce by considering profiles, past preferences and increasingly also further personalized measures. We designed and implemented an emotion-based recommender system for city visitors that takes into account user emotion and user location for the recommendation process. We conducted a comparative study between the emotion-based recommender system and recommender systems based on traditional measures. Our evaluation study involved 28 participators and the experiments showed that the emotion-based recommender system increased the average rating of the recommendation by almost 19%. We conclude that the use of emotion can significantly improve the results and especially their level of personalization. }, group = {ambience} } %%% 2020 %%%
@inproceedings{naas2020global, title={A Global Brain fuelled by Local intelligence: Optimizing Mobile Services and Networks with AI}, author={Naas, Si-Ahmed and Mohammed, Thaha and Sigg, Stephan}, booktitle={2020 16th International Conference on Mobility, Sensing and Networking (MSN)}, pages={23--32}, year={2020}, organization={IEEE}, group={ambience} }
@PhDThesis{LeThesis2020, author = "Le Ngu Nguyen", title = "Security from Implicit Information", school = "Aalto University", year = "2020", month = "September", isbn = "978-952-64-0013-6", url_Paper ={https://aaltodoc.aalto.fi/handle/123456789/46392}, abstract = {We present novel security mechanisms using implicit information extracted from physiological, behavioural, and ambient data. These mechanisms are implemented with reference to device-to-user and inter-device relationships, including: user authentication with transient image-based passwords, device-to-device secure connection initialization based on vocal commands, collaborative inference over the communication channel, and continuous on-body device pairing. Authentication methods based on passwords require users to explicitly set their passwords and change them regularly. We introduce a method to generate always-fresh authentication challenges from videos collected by wearable cameras. We implement two password formats that expect users to arrange or select images according to their chronological information. Radio waves are mainly used for data transmission. We implement function computation over the wireless signals to perform collaborative inference. We encode information into burst sequences in such a way that arithmetic functions can be computed using the interference. Hence, data is hidden inside the wireless signals and implicitly aggregated. Our algorithms allow us to train and deploy a classifier efficiently with the support of minimal backscatter devices. To initialize a connection between a personal device (e.g. smart-phone) and shared appliances (e.g. smart-screens), users are required to explicitly ask for connection information including device identities and PIN codes. We propose to leverage natural vocal commands to select shared appliance types and generate secure communication keys from the audio implicitly. We perform experiments to verify that device proximity defined by audio fingerprints can restrict the range of device-to-device communication. PIN codes in device pairing must be manually entered or verified by users. This is inconvenient in scenarios when pairing is performed frequently or devices have limited user interfaces. Our methods generate secure pairing keys for on-body devices continuously from sensor data. Our mechanisms automatically disconnect the devices when they leave the user's body. To cover all human activities, we leverage gait in human ambulatory actions and heartbeat in resting postures.}, group = {ambience}, project = {abacus}}
@PhDThesis{MuneebaThesis2020, author = "Muneeba Raja", title = "Toward Complex 3D Movement Detection to Analyze Human Behavior via Radio-Frequency Signals", school = "Aalto University", year = "2020", month = "September", isbn = "978-952-60-3988-6", url_Paper ={https://aaltodoc.aalto.fi/handle/123456789/46311}, abstract = {A driver's attention, parallel actions, and emotions directly influence driving behavior. Any secondary task, be it cognitive, visual, or manual, that diverts driver focus from the primary task of driving is a source of distraction. Longer response time, inability to scan the road, and missing visual cues can all lead to car crashes with serious consequences. Current research focuses on detecting distraction by means of vehicle-mounted video cameras or wearable sensors for tracking eye movements and head rotation. Facial expressions, speech, and physiological signals are also among the widely used indicators for detecting distraction. These approaches are accurate, fast, and reliable but come with a high installation cost, requirements related to lighting conditions, privacy intrusions, and energy consumption. Over the past decade, the use of radio signals has been investigated as a possible solution for the aforementioned limitations of today's technologies. Changes in radio-signal patterns caused by movements of the human body can be analyzed and thereby used in detecting humans' gestures and activities. Human behavior and emotions, in particular, are less explored in this regard and are addressed mostly with reference to physiological signals. The thesis exploited multiple wireless technologies (1.8~GHz, WiFi, and millimeter wave) and combinations thereof to detect complex 3D movements of a driver in a car. Upper-body movements are vital indicators of a driver's behavior in a car, and the information from these movements could be used to generate appropriate feedback, such as warnings or provision of directives for actions that would avoid jeopardizing safety. Existing wireless-system-based solutions focus primarily on either large or small movements, or they address well-defined activities. They do not consider discriminating large movements from small ones, let alone their directions, within a single system. These limitations underscore the requirement to address complex natural-behavior situations precisely such as that in a car, which demands not only isolating particular movements but also classifying and predicting them. The research to reach the attendant goals exploited physical properties of RF signals, several hardware-software combinations, and building of algorithms to process and detect body movements -- from the simple to the complex. Additionally, distinctive feature sets were addressed for machine-learning techniques to find patterns in data and predict states accordingly. The systems were evaluated by performing extensive real-world studies.}, group = {ambience}, project = {radiosense}}
@inproceedings{Ahmed2020gaze, title={Functional Gaze Prediction in Egocentric Video}, author={Si Ahmed Naas and Xiaolan Jiang and Stephan Sigg and Yusheng Ji}, booktitle={18th International Conference on Advances in Mobile Computing and Multimedia (MoMM2020)}, year={2020}, group = {ambience} }
@article{Pablo_2020_Acoustic, author={Pablo Pérez Zarazaga and Tom B\"ackstr\"om and Stephan Sigg}, journal={IEEE Access}, title={Acoustic Fingerprints for Access Management in Ad-Hoc Sensor Networks}, year={2020}, abstract={Voice user interfaces can offer intuitive interaction with our devices, but the usability and audio quality could be further improved if multiple devices could collaborate to provide a distributed voice user interface. To ensure that users' voices are not shared with unauthorized devices, it is however necessary to design an access management system that adapts to the users' needs. Prior work has demonstrated that a combination of audio fingerprinting and fuzzy cryptography yields a robust pairing of devices without sharing the information that they record. However, the robustness of these systems is partially based on the extensive duration of the recordings that are required to obtain the fingerprint. This paper analyzes methods for robust generation of acoustic fingerprints in short periods of time to enable the responsive pairing of devices according to changes in the acoustic scenery and can be integrated into other typical speech processing tools.}, group = {ambience}, doi = {10.1109/ACCESS.2020.3022618} }
@inproceedings{hytonen2020BCG, title={Analysing Ballistocardiography for Pervasive Healthcare}, author={Roni Hytönen and Alison Tshala and Jan Schreier and Melissa Holopainen and Aada Forsman and Minna Oksanen and Rainhard Findling and Le Ngu Nguyen and Nico Jähne-Raden and Stephan Sigg}, booktitle={16th International Conference on Mobility, Sensing and Networking (MSN 2020) }, year={2020}, group = {ambience} }
@inproceedings{naas2020GlobalBrain, title={A Global Brain fuelled by Local intelligence Optimizing Mobile Services and Networks with AI}, author={Si Ahmed Naas and Thaha Mohammed and Stephan Sigg}, booktitle={16th International Conference on Mobility, Sensing and Networking (MSN 2020) }, year={2020}, group = {ambience} }
@inproceedings{salami2020MQTT, title={A FAIR Extension for the MQTT Protocol}, author={Dariush Salami and Olga Streibel and Stephan Sigg}, booktitle={16th International Conference on Mobility, Sensing and Networking (MSN 2020) }, year={2020}, group = {ambience}, project={abacus} }
@inproceedings{naas2020RealTime, title={Real-time Emotion Recognition for Sales}, author={Si Ahmed Naas and Stephan Sigg}, booktitle={16th International Conference on Mobility, Sensing and Networking (MSN 2020) }, year={2020}, group = {ambience} }
@article{Sanaz_2020_IoT, author={Sanaz Kianoush and Stefano Savazzi and Manuel Beschi and Stephan Sigg and Vittorio Rampa}, journal={IEEE Internet of Things Journal}, title={A Multisensory Edge-Cloud Platform for Opportunistic Sensing in Cobot Environments}, year={2020}, doi = {10.1109/JIOT.2020.3011809}, project={radiosense}, group = {ambience} }
@article{Chiang_2020_Viewpoint, author={Xiaolan Jiang and Si Ahmed Naas and Yi-Han Chiang and Stephan Sigg and Yusheng Ji}, journal={IEEE Access}, title={SVP: Sinusoidal Viewport Predictionfor 360-Degree Video Streaming}, year={2020}, abstract={The rapid growth of user expectations and network technologies has proliferated the service needs of 360-degree video streaming. In the light of the unprecedented bitrates required to deliver entire 360-degree videos, tile-based streaming, which associates viewport and non-viewport tiles with different qualities, has emerged as a promising way to facilitate 360-degree video streaming in practice. Existing work on viewport prediction primarily targets prediction accuracy, which potentially gives rise to excessive computational overhead and latency. In this paper, we propose a sinusoidal viewport prediction (SVP) system for 360-degree video streaming to overcome the aforementioned issues. In particular, the SVP system leverages 1) sinusoidal values of rotation angles to predict orientation, 2) the relationship between prediction errors, prediction time window and head movement velocities to improve the prediction accuracy, and 3) the normalized viewing probabilities of tiles to further improve adaptive bitrate (ABR) streaming performance. To evaluate the performance of the SVP system, we conduct extensive simulations based on real-world datasets. Simulation results demonstrate that the SVP system outperforms state-of-the-art schemes under various buffer thresholds and bandwidth settings in terms of viewport prediction accuracy and video quality, revealing its applicability to both live and video-on-demand streaming in practical scenarios.}, group = {ambience}, volume = {8}, doi = {10.1109/ACCESS.2020.3022062} }
@InProceedings{Nico_2020_BCG, author = {Nico Jähne-Raden and Marie Cathrine Wolf and Stephan Sigg and Ulf Kulau}, booktitle = {Studies in health technology and Informatics}, title = {Development of a Presentation Interface for Seismo- and Ballistocardiographic Data}, year = {2020}, project ={ballisto}, group={ambience} }
@InProceedings{Nico_2020_HeartFailure, author = {Nico Jähne-Raden and Udo Bavendiek and Henrike Gutschleg and Ulf Kulau and Stephan Sigg and Marie Cathrine Wolf and Tanja Zeppernick and Michael Marschollek}, booktitle = {Studies in health technology and Informatics}, title = {A structured measurement of highly synchronous real-time ballistocardiography signal data of heart failure patients}, year = {2020}, project ={ballisto}, group={ambience} }
@InProceedings{Salami_2020_MLSP, author = {Dariush Salami and Sameera Palipana and Manila Kodali and Stephan Sigg}, booktitle = {IEEE 30th International Workshop on Machine Learning for Signal Processing (MLSP)}, title = {Motion Pattern Recognition in 4D Point Clouds}, year = {2020}, project ={radiosense,windmill}, group={ambience} }
@article{Muneeba_2020_3D, author={ Muneeba Raja and Zahra Vali and Sameera Palipana and David G. Michelson and Stephan Sigg }, journal={IEEE Access}, title={3D head motion detection using millimeter-wave Doppler radar}, year={2020}, doi = {10.1109/ACCESS.2020.2973957}, project={radiosense}, group = {ambience} }
@InProceedings{Sigg_2020_ProvableConsent, author = {Stephan Sigg and Le Ngu Nguyen and Pablo Perez Zarazaga and Tom Backstrom}, booktitle = {18th Annual IEEE International Conference on Pervasive Computing and Communications (PerCom) adjunct}, title = {Provable Consent for Voice User Interfaces}, year = {2020}, abstract = {The proliferation of acoustic human-computer interaction raises privacy concerns since it allows Voice User Interfaces (VUI) to overhear human speech and to analyze and share content of overheard conversation in cloud datacenters and with third parties. This process is non-transparent regarding when and which audio is recorded, the reach of the speech recording, the information extracted from a recording and the purpose for which it is used. To return control over the use of audio content to the individual who generated it, we promote intuitive privacy for VUIs, featuring a lightweight consent mechanism as well as means of secure verification (proof of consent) for any recorded piece of audio. In particular, through audio fingerprinting and fuzzy cryptography, we establish a trust zone, whose area is implicitly controlled by voice loudness with respect to environmental noise (Signal-to-Noise Ratio (SNR)). Secure keys are exchanged to verify consent on the use of an audio sequence via digital signatures. We performed experiments with different levels of human voice, corresponding to various trust situations (e.g. whispering and group discussion). A second scenario was investigated in which a VUI outside of the trust zone could not obtain the shared secret key.}, note = {PerCom 2020 BEST WIP PAPER}, group = {ambience} }
@InProceedings{Sameera_2020_Beamsteering, author = {Sameera Palipana and Nicolas Malm and Stephan Sigg}, booktitle = {18th Annual IEEE International Conference on Pervasive Computing and Communications (PerCom) }, title = {Beamsteering for training-free Recognition of Multiple Humans Performing Distinct Activities}, year = {2020}, doi = {10.1109/PerCom45495.2020.9127374}, abstract = {Recognition of the context of humans plays an important role in pervasive applications such as intrusion detection, human density estimation for heating, ventilation and air-conditioning in smart buildings, as well as safety guarantee for workers during human-robot interaction. Radio vision is able to provide these sensing capabilities with low privacy intrusion. A common challenge though, for current radio sensing solutions is to distinguish simultaneous movement from multiple subjects. We present an approach that exploits multi-antenna installations, for instance, found in upcoming 5G instrumentations, to detect and extract activities from spatially scattered human targets in an ad-hoc manner in arbitrary environments and without prior training of the multi-subject detection. We perform receiver-side beamforming and beam-steering over different azimuth angles to detect human presence in those regions separately. We characterize the resultant fluctuations in the spatial streams due to human influence using a case study and make the traces publicly available. We demonstrate the potential of this approach through two applications: 1) By feeding the similarities of the resulting spatial streams into a clustering algorithm, we count the humans in a given area without prior training. (up to 6 people in a 22.4m2 area with an accuracy that significantly exceeds the related work). 2) We further demonstrate that simultaneously conducted activities and gestures can be extracted from the spatial streams through blind source separation.}, %url_Paper = {http://ambientintelligence.aalto.fi/paper/findling_closed_eye_eog.pdf}, project = {radiosense}, group = {ambience} }
@article{Lin_2020_Contactless, author={Nan Lin and Yangjie Cao and Fuchao Wang and Xinxin Lu and Bo Zhang and Zhi Liu and Stephan Sigg}, journal={IEEE Internet of Things Journal}, title={Contactless Body Movement Recognition during Sleep via WiFi Signals}, year={2020}, doi = {10.1109/JIOT.2019.2960823}, group = {ambience}, project = {radiosense} }
@inproceedings{jahne2019high, title={High-Resolution Synchronous Digital Ballistocardiography Setup}, author={Nico J{\"a}hne-Raden and Ulf Kulau and Henrike G{\"u}tschleg and Thiemo Clausen and Tobias Jura and Stephan Sigg and Lars Wolf}, booktitle={2019 Computing in Cardiology (CinC)}, year={2019}, group = {ambience}, organization={IEEE} }
@article{Qiyue_2019_DCGAN, author={Qiyue Li and Heng Qu and Zhi Liu and Nana Zhou and Wei Sun and Stephan Sigg and Jie Li}, journal={IEEE Transactions on Emerging Topics in Computational Intelligence}, title={AF-DCGAN: Amplitude Feature Deep Convolutional GAN for Fingerprint Construction in Indoor Localization Systems}, year={2019}, abstract = {With widely deployed WiFi network and the uniqueness feature (fingerprint) of wireless channel information, fingerprinting based WiFi positioning is currently the mainstream indoor positioning method, in which fingerprint database construction is crucial. However, for accuracy, this approach requires enough data to be sampled at many reference points, which consumes excessive efforts and time. In this paper, we collect Channel State Information (CSI) data at reference points by the method of device-free localization, then we convert collected CSI data into amplitude feature maps and extend the fingerprint database using the proposed Amplitude-Feature Deep Convolutional Generative Adversarial Network (AF-DCGAN) model. The use of AF-DCGAN accelerates convergence during the training phase, and substantially increases the diversity of the CSI amplitude feature map. The extended fingerprint database both reduces the human effort involved in fingerprint database construction and the accuracy of an indoor localization system, as demonstrated in the experiments. }, doi = {10.1109/TETCI.2019.2948058}, project = {radiosense}, group = {ambience} }
@article{Sameera_2019_access, author={Sameera Palipana and Stephan Sigg}, journal={IEEE Access}, title={Extracting Human Context through Receiver-end Beamforming}, year={2019}, abstract = {Device-free passive sensing of the human targets using wireless signals have acquired much attention in the recent past because of its importance in many applications including security, heating, ventilation and air conditioning (HVACs), activity recognition, and elderly care. In this paper, we use receiver-side beamforming to isolate the array response of a human target when the line of sight array response is several magnitudes stronger than the human response. The solution is implemented in a 5G testbed using a software-defined radio (SDR) platform. As beamforming with SDRs faces the challenge to train the beamformer to different azimuth angles, we present an algorithm to generate the steering vectors for all azimuth angles from a few training directions amidst imprecise prior information on the training steering vectors. We extract the direction of arrival (DoA) from the array response of the human target, and conducting experiments in a semi-anechoic chamber, we detect the DoAs of up to four stationary human targets and track the DoA of up to two walking persons simultaneously. }, project = {radiosense}, group = {ambience} }
@InProceedings{Findling_19_HidemyGaze, author = {Rainhard Dieter Findling and Tahmid Quddus and Stephan Sigg}, booktitle = {17th International Conference on Advances in Mobile Computing and Multimedia}, title = {Hide my Gaze with {EOG}! {T}owards Closed-Eye Gaze Gesture Passwords that Resist Observation-Attacks with Electrooculography in Smart Glasses}, year = {2019}, abstract = {Smart glasses allow for gaze gesture passwords as a hands-free form of mobile authentication. However, pupil movements for password input are easily observed by attackers, who thereby can derive the password. In this paper we investigate closed-eye gaze gesture passwords with EOG sensors in smart glasses. We propose an approach to detect and recognize closed-eye gaze gestures, together with a 7 and 9 character gaze gesture alphabet. Our evaluation indicates good gaze gesture detection rates. However, recognition is challenging specifically for vertical eye movements with 71.2\%-86.5\% accuracy and better results for opened than closed eyes. We further find that closed-eye gaze gesture passwords are difficult to attack from observations with 0% success rate in our evaluation, while attacks on open eye passwords succeed with 61\%. This indicates that closed-eye gaze gesture passwords protect the authentication secret significantly better than their open eye counterparts.}, url_Paper = {http://ambientintelligence.aalto.fi/paper/findling_closed_eye_eog.pdf}, project = {hidemygaze}, group = {ambience} }
@InProceedings{Fristroem_19_FreeFormGaze, author = {Eira Fristr\"om and Elias Lius and Niki Ulmanen and Paavo Hietala and Pauliina K\"arkk\"ainen and Tommi M\"akinen and Stephan Sigg and Rainhard Dieter Findling}, booktitle = {17th International Conference on Advances in Mobile Computing and Multimedia}, title = {Free-Form Gaze Passwords from Cameras Embedded in Smart Glasses}, year = {2019}, abstract = {Contemporary personal mobile devices support a variety of authentication approaches, featuring different levels of security and usability. With cameras embedded in smart glasses, seamless, hands-free mobile authentication based on gaze is possible. Gaze authentication relies on knowledge as a secret, and gaze passwords are composed from a series of gaze points or gaze gestures. This paper investigates the concept of free-form mobile gaze passwords. Instead of relying on gaze gestures or points, free-form gaze gestures exploit the trajectory of the gaze over time. We collect and investigate a set of 29 different free-form gaze passwords from 19 subjects. In addition, the practical security of the approach is investigated in a study with 6 attackers observing eye movements during password input to subsequently perform spoofing. Our investigation indicates that most free-form gaze passwords can be expressed as a set of common geometrical shapes. Further, our free-form gaze authentication yields a true positive rate of 81\% and a false positive rate with other gaze passwords of 12\%, while targeted observation and spoofing is successful in 17.5\% of all cases. Our usability study reveals that further work on the usability of gaze input is required as subjects reported that they felt uncomfortable creating and performing free-form passwords.}, url_Paper = {http://ambientintelligence.aalto.fi/paper/momm19_freeformgaze.pdf}, project = {hidemygaze}, group = {ambience} }
@InProceedings{Ebner_19_TennisStrokeClassification, author = {Christopher J. Ebner and Rainhard Dieter Findling}, booktitle = {17th International Conference on Advances in Mobile Computing and Multimedia}, title = {Tennis Stroke Classification: Comparing Wrist and Racket as IMU Sensor Position}, year = {2019}, abstract = {Automatic tennis stroke recognition can help tennis players improve their training experience. Previous work has used sensors positions on both wrist and tennis racket, of which different physiological aspects bring different sensing capabilities. However, no comparison of the performance of both positions has been done yet. In this paper we comparatively assess wrist and racket sensor positions for tennis stroke detection and classification. We investigate detection and classification rates with 8 well-known stroke types and visualize their differences in 3D acceleration and angular velocity. Our stroke detection utilizes a peak detection with thresholding and windowing on the derivative of sensed acceleration, while for our stroke recognition we evaluate different feature sets and classification models. Despite the different physiological aspects of wrist and racket as sensor position, for a controlled environment results indicate similar performance in both stroke detection (98.5\%-99.5\%) and user-dependent and independent classification (89\%-99\%).}, url_Paper = {http://ambientintelligence.aalto.fi/paper/Tennis_Stroke_Recognition.pdf}, group = {ambience}}
@InProceedings{Hintze_19_CORMORANTImplementingRisk, author = {Daniel Hintze and Matthias F\"uller and Sebastian Scholz and Rainhard Dieter Findling and Muhammad Muaaz and Philipp Kapfer and Wilhelm N\"ussler and Ren\'e Mayrhofer}, booktitle = {17th International Conference on Advances in Mobile Computing and Multimedia}, title = {CORMORANT: On Implementing Risk-Aware Multi-Modal Biometric Cross-Device Authentication For Android}, year = {2019}, abstract = {This paper presents the design and open source implementation of CORMORANT , an Android authentication framework able to increase usability and security of mobile authentication. It uses transparent behavioral and physiological biometrics like gait, face, voice, and keystrokes dynamics to continuously evaluate the user’s identity without explicit interaction. Using signals like location, time of day, and nearby devices to assess the risk of unauthorized access, the required level of confidence in the user’s identity is dynamically adjusted. Authentication results are shared securely, end-to-end encrypted using the Signal messaging protocol, with trusted devices to facilitate cross-device authentication for co-located devices, detected using Bluetooth low energy beacons. CORMORANT is able to reduce the authentication overhead by up to 97\% compared to conventional knowledge-based authentication whilst increasing security at the same time. We share our perspective on some of the successes and shortcomings we encountered implementing and evaluating CORMORANT to hope to inform others working on similar projects.}, url_Paper = {http://ambientintelligence.aalto.fi/paper/Hintze_19_CORMORANTImplementingRisk_cameraReady.pdf}, group = {ambience}}
@InProceedings{Pirklbauer_19_PredictingCategoryFire, author = {Kevin Pirklbauer and Rainhard Dieter Findling}, booktitle = {Emerging Research Projects and Show Cases Symposium ({SHOW} 2019)}, title = {Predicting the Category of Fire Department Operations}, year = {2019}, abstract = {Voluntary fire departments have limited human and material resources. Machine learning aided prediction of fire department operation details can benefit their resource planning and distribution. While there is previous work on predicting certain aspects of operations within a given operation category, operation categories themselves have not been predicted yet. In this paper we propose an approach to fire department operation category prediction based on location, time, and weather information, and compare the performance of multiple machine learning models with cross validation. To evaluate our approach, we use two years of fire department data from Upper Austria, featuring 16.827 individual operations, and predict its major three operation categories. Preliminary results indicate a prediction accuracy of 61\%. While this performance is already noticeably better than uninformed prediction (34% accuracy), we intend to further reduce the prediction error utilizingmore sophisticated features and models.}, url_Paper = {http://ambientintelligence.aalto.fi/paper/momm2019_fire_department_operation_prediction.pdf}, group = {ambience} }
@InProceedings{Palipana_2019_buildsys, author={Sameera Palipana and Stephan Sigg}, booktitle={Conference on Systems for Energy-Efficient Buildings, Cities and Transportation (Buildsys) (adjunct)}, title={Receiver-Side Beamforming to Isolate Channel Perturbations from a Human Target in a Device-Free Setting}, year={2019}, abstract={We present an approach to isolate the angular response of a human on a receiver-side beamformer when the line of sight is sevaral magnitudes stronger than the human response. The solution is implemented in a 5G testbed using a software-defined radio (SDR) platform. Beamforming with SDRs faces the challenge to train the beamformer to different azimuth angles. We present an algorithm to generate the steering vectors from a few training points amidst imprecise prior information. In particular, we assign asimuth angles to steering vectors converted from phase rotations of signals transmitted from reference directions. Furthermore, we detect a human and estimate the direction from strong signal perturbations towards that angle. Experiments for a person performing in-place activities in a semi-anechoic chamber show a detection accuracy of 100% and a maximum median direction of arrival error of 40 degree.}, project = {radiosense}, group = {ambience} }
@InProceedings{Sigg_2019_miel, author={Stephan Sigg and Sameera Palipana and Stefano Savazzi and Sanaz Kianoush}, booktitle={International Conference on Business Process Management (adjunct)}, title={Capturing human-machine interaction events from radio sensors in Industry 4.0 environments}, year={2019}, abstract={In manufacturing environments, human workers interact with increasingly autonomous machinery. To ensure workspace safety and production efficiency during human-robot cooperation, continuous and accurate tracking and perception of workers activities is required. The RadioSense project intends to move forward the state-of-the-art in advanced sensing and perception for next generation manufacturing workspace. In this paper, we describe our ongoing efforts towards multi-subject recognition cases with multiple persons conducting several simultaneous activities. Perturbations induced by moving bodies/objects on the electro-magnetic wavefield can be processed for environmental perception. In particular, we will adopt next generation (5G) high-frequency technologies as well as distributed massive MIMO systems. }, project = {radiosense}, group = {ambience}}
@inproceedings{Backstrom_2019_MyData, author = {Tom Backstrom and Sneha Das and Pablo Perez Zarazaga and Stephan Sigg and Rainhard Findling and Michael Laakasuo}, title = {With whom are you talking? Privacy in Speech Interfaces}, booktitle = {Proceedings of the 4th annual conference of the MyData Global network ({MyData} 2019)}, year = {2019}, address = {Helsinki, Finland}, month = sep, abstract = {Speech is about interaction. It is more than just passing messages – the listener nods and finishes the sentence for you. Interaction is so essentially a part of normal speech, that non-interactive speech has its own name: it is a monologue. It's not normal. Normal speech is about interaction. Privacy is a very natural part of such spoken interactions. We intuitively lower our voices to a whisper when we want to tell a secret. We thus change the way we speak depending on the level of privacy. In a public speech, we would not reveal intimate secrets. We thus change the content of our speech depending on the level of privacy. Furthermore, in a cafeteria, we would match our speaking volume to the background noise. We therefore change our speech in an interaction with the surroundings. Overall, we change both the manner of speaking and its content, in an interaction with our environment. Our research team is interested in the question of how such notions of privacy should be taken into account in the design of speech interfaces, such as Alexa/Amazon, Siri/Apple, Google and Mycroft. We believe that in the design of good user-interfaces, you should strive for technology which is intuitive to use. If your speech assistant handles privacy in a similar way as a natural person does, then most likely it would feel natural to the user. A key concept for us is modelling the users’ experience of privacy. Technology should understand our feelings towards privacy, how we experience it and act accordingly. From the myData-perspective, this means that all (speech) data is about interactions, between two or more parties. Ownership of such data is then also shared among the participating parties. There is no singular owner of data, but access and management of data must always happen in mutual agreement. In fact, the same applies to many other media as well. It is obvious that chatting on WhatsApp is a shared experience. Interesting (=good) photographs are those which entail a story; "This is when we went to the beach with Sophie." The myData concept should be adapted to take into account such frequently appearing real-life data. In our view, data becomes more interesting when it is about an interaction. In other words, since interaction is so central to our understanding of the world, it should then also be reflected in our data representations. To include the most significant data, we should turn our attention from myData to focus on ourData. Here, the importance of data is then dependent on, and even defined by, with whom are you talking?}, group = {ambience}}
@InProceedings{Perez_2019_eusipco, author={Pablo Perez and Tom Backstrom and Stephan Sigg}, title={Robust and Responsive Acoustic Pairing of Devices Using Decorrelating Time-Frequency Modelling}, booktitle={27th European Signal Processing Conference (EUSIPCO)}, year={2019}, abstract = {Voice user interfaces have increased in popularity, as they enable natural interaction with different applications using one’s voice. To improve their usability and audio quality, several devices could interact to provide a unified voice user interface. However, with devices cooperating and sharing voice-related information, user privacy may be at risk. Therefore, access management rules that preserve user privacy are important. State-of-the-art methods for acoustic pairing of devices provide fingerprinting based on the time-frequency representation of the acoustic signal and error-correction. We propose to use such acoustic fingerprinting to authorise devices which are acoustically close. We aim to obtain fingerprints of ambient audio adapted to the requirements of voice user interfaces. Our experiments show that the responsiveness and robustness is improved by combining overlapping windows and decorrelating transforms.}, url_Paper = {http://ambientintelligence.aalto.fi/paper/perezEusipco2019.pdf}, group = {ambience}}
@article{Le_2019_multimedia, author={Le Ngu Nguyen and Stephan Sigg}, journal={IEEE Multimedia Communications -- Frontiers, SI on Social and Mobile Connected Smart Objects}, title={Learning a Classification Model over Vertically-Partitioned Healthcare Data}, year={2019}, project = {abacus}, group = {ambience}}
@inproceedings{corbalan2019chorus, title={Chorus: UWB concurrent transmissions for GPS-like passive localization of countless targets}, author={Corbal{\'a}n, Pablo and Picco, Gian Pietro and Palipana, Sameera}, booktitle={Proceedings of the 18th International Conference on Information Processing in Sensor Networks}, pages={133--144}, year={2019}, organization={ACM}, group = {ambience}}
@article{Savazzi_2019_transformative, author={Stefano Savazzi and Stephan Sigg and Federico Vicentini and Sanaz Kianoush and Rainhard Findling}, journal={IEEE Computer, SI on on Transformative Computing and Communication}, title={On the use of stray wireless signals for sensing: a look beyond 5G for the next generation industry}, year={2019}, number = {7}, pages = {25-36}, volume = {52}, doi = {10.1109/MC.2019.2913626}, abstract = {Transformative techniques to capture and process wireless stray radiation originated from different radio sources are gaining increasing attention. They can be applied to human sensing, behavior recognition, localization and mapping. The omnipresent radio-frequency (RF) stray radiation of wireless devices (WiFi, Cellular or any Personal/Body Area Network) encodes a 3D view of all objects traversed by its propagation. A trained machine learning model is then applied to features extracted in real-time from radio signals to isolate body-induced footprints or environmental alterations. The technology can augment and transform existing radio-devices into ubiquitously distributed sensors that simultaneously act as wireless transmitters and receivers (e.g. fast time-multiplexed). Thereby, 5G-empowered tiny device networks transform into a dense web of RF-imaging links that extract a view of an environment, for instance, to monitor manufacturing processes in next generation industrial set-ups (Industry 4.0, I4.0). This article highlights emerging transformative computing tools for radio sensing, promotes key technology enablers in 5G communication and reports deployment experiences.}, project = {radiosense}, group = {ambience}}
@article{Hintze_2019_Ubicomp, author = {Daniel Hintze and Matthias F\"uller and Sebastian Scholz and Rainhard Findling and Muhammad Muaaz and Philipp Kapfer and Eckhard Kochand Ren\'{e} Mayrhofer}, journal = {Proceedings of the ACM on Interactive, Mobile, Wearable and Ubiquitous Technologies (IMWUT)}, title = {CORMORANT: Ubiquitous Risk-Aware Multi-Modal Biometric Authentication Across Mobile Devices}, year = {2019}, month = sep, abstract = {People own and carry an increasing number of ubiquitous mobile devices, such as smartphones, tablets, and notebooks. Being small and mobile, those devices have a high propensity to become lost or stolen. Since mobile devices provide access to their owners’ digital lives, strong authentication is vital to protect sensitive information and services against unauthorized access. However, at least one in three devices is unprotected, with inconvenience of traditional authentication being the paramount reason. We present the concept of CORMORANT , an approach to significantly reduce the manual burden of mobile user verification through risk-aware, multi-modal biometric, cross-device authentication. Transparent behavioral and physiological biometrics like gait, voice, face, and keystroke dynamics are used to continuously evaluate the user’s identity without explicit interaction. The required level of confidence in the user’s identity is dynamically adjusted based on the risk of unauthorized access derived from signals like location, time of day and nearby devices. Authentication results are shared securely with trusted devices to facilitate cross-device authentication for co-located devices. Conducting a large-scale agent-based simulation of 4 000 users based on more than 720 000 days of real-world device usage traces and 6.7 million simulated robberies and thefts sourced from police reports, we found the proposed approach is able to reduce the frequency of password entries required on smartphones by 97.82% whilst simultaneously reducing the risk of unauthorized access in the event of a crime by 97.72%, compared to conventional knowledge-based authentication. }, doi={10.1145/2800835.2800906}, group = {ambience}}
@InProceedings{Rainhard_2019_iwann, author={Rainhard Dieter Findling and Le Ngu Nguyen and Stephan Sigg}, title={Closed-Eye Gaze Gestures: Detection and Recognition of Closed-Eye Movements with Cameras in Smart Glasses}, booktitle={International Work-Conference on Artificial Neural Networks}, year={2019}, doi = {10.1007/978-3-030-20521-8_27}, abstract ={Gaze gestures bear potential for user input with mobile devices, especially smart glasses, due to being always available and hands-free. So far, gaze gesture recognition approaches have utilized open-eye movements only and disregarded closed-eye movements. This paper is a first investigation of the feasibility of detecting and recognizing closed-eye gaze gestures from close-up optical sources, e.g. eye-facing cameras embedded in smart glasses. We propose four different closed-eye gaze gesture protocols, which extend the alphabet of existing open-eye gaze gesture approaches. We further propose a methodology for detecting and extracting the corresponding closed-eye movements with full optical flow, time series processing, and machine learning. In the evaluation of the four protocols we find closed-eye gaze gestures to be detected 82.8%-91.6% of the time, and extracted gestures to be recognized correctly with an accuracy of 92.9%-99.2%.}, url_Paper = {http://ambientintelligence.aalto.fi/findling/pdfs/publications/Findling_19_ClosedEyeGaze.pdf}, project = {hidemygaze}, group = {ambience}}
@InProceedings{Ferran_2019_iwann, author={Kacper Skawinski and Ferran Montraveta Roca and Rainhard Dieter Findling and Stephan Sigg}, title={Workout Type Recognition and Repetition Counting with CNNs from 3D Acceleration Sensed on the Chest}, booktitle={International Work-Conference on Artificial Neural Networks}, year={2019}, doi = {10.1007/978-3-030-20521-8_29}, volume = {11506}, series = {LNCS}, pages = {347--359}, month = jun, abstract = {Sports and workout activities have become important parts of modern life. Nowadays, many people track characteristics about their sport activities with their mobile devices, which feature inertial measurement unit (IMU) sensors. In this paper we present a methodology to detect and recognize workout, as well as to count repetitions done in a recognized type of workout, from a single 3D accelerometer worn at the chest. We consider four different types of workout (pushups, situps, squats and jumping jacks). Our technical approach to workout type recognition and repetition counting is based on machine learning with a convolutional neural network. Our evaluation utilizes data of 10 subjects, which wear a Movesense sensors on their chest during their workout. We thereby find that workouts are recognized correctly on average 89.9% of the time, and the workout repetition counting yields an average detection accuracy of 97.9% over all types of workout.}, url_Paper = {http://ambientintelligence.aalto.fi/findling/pdfs/publications/Skawinski_19_WorkoutTypeRecognition.pdf}, group = {ambience}}
@article{Raja_2019_antenna, author={Muneeba Raja and Aidan Hughes and Yixuan Xu and Parham zarei and David G. Michelson and Stephan Sigg}, journal={IEEE Antennas and Wireless Propagation letters}, title={Wireless Multi-frequency Feature Set to Simplify Human 3D Pose Estimation}, year={2019}, volume={18}, number={5}, pages={876-880}, doi = {10.1109/LAWP.2019.2904580}, abstract = {We present a multifrequency feature set to detect driver's three-dimensional (3-D) head and torso movements from fluctuations in the radio frequency channel due to body movements. Current features used for movement detection are based on the time-of-flight, received signal strength, and channel state information and come with the limitations of coarse tracking, sensitivity toward multipath effects, and handling corrupted phase data, respectively. There is no standalone feature set that accurately detects small and large movements and determines the direction in 3-D space. We resolve this problem by using two radio signals at widely separated frequencies in a monostatic configuration. By combining information about displacement, velocity, and direction of movements derived from the Doppler effect at each frequency, we expand the number of existing features. We separate pitch, roll, and yaw movements of head from torso and arm. The extracted feature set is used to train a K-Nearest Neighbor classification algorithm, which could provide behavioral awareness to cars while being less invasive as compared to camera-based systems. The training results on data from four participants reveal that the classification accuracy is 77.4% at 1.8 GHz, it is 87.4% at 30 GHz, and multifrequency feature set improves the accuracy to 92%.}, project = {radiosense}, group = {ambience}}
@InProceedings{Le_2019_hotsalsa, author={Le Ngu Nguyen and Stephan Sigg}, title={Learning with Vertically Partitioned Data, Binary Feedback and Random Parameter update}, booktitle={Workshop on Hot Topics in Social and Mobile Connected Smart Objects, in conjuction with IEEE International Conference on Computer Communications (INFOCOM)}, year={2019}, project = {abacus}, group = {ambience}}
@article{Sigg_2019_tweb, author={Stephan Sigg and Ella Peltonen and Eemil Lagerspetz and Petteri Nurmi and Sasu Tarkoma}, journal={ACM Transactions on the WEB}, title={Exploiting usage to predict instantaneous app popularity: Trend filters and retention rates}, year={2019}, abstract = {Popularity of mobile apps is traditionally measured by metrics such as the number of downloads, installations, or user ratings. A problem with these measures is that they reflect usage only indirectly. Indeed, retention rates, i.e., the number of days users continue to interact with an installed app, have been suggested to predict successful app lifecycles. We conduct the first independent and large-scale study of retention rates and usage trends on a dataset of app-usage data from a community of 339,842 users and more than 213,667 apps. Our analysis shows that, on average, applications lose 65% of their users in the first week, while very popular applications (top 100) lose only 35%. It also reveals, however, that many applications have more complex usage behaviour patterns due to seasonality, marketing, or other factors. To capture such effects, we develop a novel app-usage trend measure which provides instantaneous information about the popularity of an application. Analysis of our data using this trend filter shows that roughly 40% of all apps never gain more than a handful of users (Marginal apps). Less than 0.1% of the remaining 60% are constantly popular (Dominant apps), 1% have a quick drain of usage after an initial steep rise (Expired apps), and 6% continuously rise in popularity (Hot apps). From these, we can distinguish, for instance, trendsetters from copycat apps. We conclude by demonstrating that usage behaviour trend information can be used to develop better mobile app recommendations.}, doi = {10.1145/3199677}, group = {ambience}}
@article{Schuermann_2019_tmc, author={Arne Bruesch and Le Ngu Nguyen and Dominik Schuermann and Stephan Sigg and Lars Wolf}, journal={IEEE Transactions on Mobile Computing}, title={Security Properties of Gait for Mobile Device Pairing}, year={2019}, abstract = {Gait has been proposed as a feature for mobile device pairing across arbitrary positions on the human body. Results indicate that the correlation in gait-based features across different body locations is sufficient to establish secure device pairing. However, the population size of the studies is limited and powerful attackers with e.g. capability of video recording are not considered. We present a concise discussion of security properties of gait-based pairing schemes including quantization, classification and analysis of attack surfaces, of statistical properties of generated sequences, an entropy analysis, as well as possible threats and security weaknesses. For one of the schemes considered, we present modifications to fix an identified security flaw. As a general limitation of gait-based authentication or pairing systems, we further demonstrate that an adversary with video support can create key sequences that are sufficiently close to on-body generated acceleration sequences to breach gait-based security mechanisms.}, doi = {10.1109/TMC.2019.2897933}, project = {bandana}, group = {ambience}}
@article{Haukipuro_2019_IEICE, author={Eeva-Sofia Haukipuro and Ville Kolehmainen and Janne Myllarinen and Sebastian Remander and Janne T. Salo and Tuomas Takko and Le Ngu Nguyen and Stephan Sigg and Rainhard Findling}, journal={IEICE Transactions, Special issue on Sensing, Wireless Networking, Data Collection, Analysis and Processing Technologies for Ambient Intelligence with Internet of Things}, title={Mobile Brainwaves: On the Interchangeability of Simple Authentication Tasks with Low-Cost, Single-Electrode EEG Devices}, year={2019}, url_Paper = {http://ambientintelligence.aalto.fi/findling/pdfs/publications/Haukipuro_19_MobileBrainwavesInterchangeability.pdf}, abstract = {Electroencephalography (EEG) for biometric authentication has received some attention in recent years. In this paper, we explore the effect of three simple EEG related authentication tasks, namely resting, thinking about a picture, and moving a single finger, on mobile, low-cost, single electrode based EEG authentication. We present details of our authentication pipeline, including extracting features from the frequency power spectrum and MFCC, and training a multilayer perceptron classifier for authentication. For our evaluation we record an EEG dataset of 27 test subjects. We use a baseline, task-agnostic, and task-specific evaluation setup to investigate if different tasks can be used in place of each other for authentication. We further evaluate if tasks themselves can be told apart from each other. Evaluation results suggest that tasks differ, hence to some extent are distinguishable, as well as that our authentication approach can work in a task-specific as well as in a task-agnostic manner.}, doi = {10.1587/transcom.2018SEP0016}, group = {ambience} } %%% 2018 %%%
@article{Kianoush_2018_IoTJ, author={Sanaz Kianoush and Muneeba Raja and Stefano Savazzi and Stephan Sigg}, journal={IEEE Internet of Things Journal}, title={A cloud-IoT platform for passive radio sensing: challenges and application case studies}, year={2018}, doi={10.1109/JIOT.2018.2834530}, group = {ambience}}
@article{palipana2018falldefi, title={FallDeFi: Ubiquitous fall detection using commodity Wi-Fi devices}, author={Palipana, Sameera and Rojas, David and Agrawal, Piyush and Pesch, Dirk}, journal={Proceedings of the ACM on Interactive, Mobile, Wearable and Ubiquitous Technologies}, volume={1}, number={4}, pages={155}, year={2018}, publisher={ACM}, group = {ambience} }
@article{Findling_2018_TMC, author = {Rainhard Findling and Michael H\"olzl and Ren\'e Mayrhofer}, title = {Mobile Match-on-Card Authentication Using Offline-Simplified Models with Gait and Face Biometrics}, journal = {IEEE Transactions on Mobile Computing (TMC)}, year = {2018}, volume = {14}, number = {11}, pages = {2578-2590}, url_Paper = {http://ambientintelligence.aalto.fi/findling/pdfs/publications/Findling_18_MobileMatchon.pdf}, doi = {https://doi.org/10.1109/TMC.2018.2812883}, abstract = {Biometrics have become important for mobile authentication, e.g. to unlock devices before using them. One way to protect biometric information stored on mobile devices from disclosure is using embedded smart cards (SCs) with biometric match-on-card (MOC) approaches. However, computational restrictions of SCs also limit biometric matching procedures. We present a mobile MOC approach that uses offline training to obtain authentication models with a simplistic internal representation in the final trained state, wherefore we adapt features and model representation to enable their usage on SCs. The pre-trained model can be shipped with SCs on mobile devices without requiring retraining to enroll users. We apply our approach to acceleration based mobile gait authentication as well as face authentication and compare authentication accuracy and computation time of 16 and 32 bit Java Card SCs. Using 16 instead of 32 bit SCs has little impact on authentication performance and is faster due to less data transfer and computations on the SC. Results indicate 11.4% and 2.4-5.4% EER for gait respectively face authentication, with transmission and computation durations on SCs in the range of 2s respectively 1s. To the best of our knowledge this work represents the first practical approach towards acceleration based gait MOC authentication.}, group = {ambience}}
@article{Schuermann_2018_pmc, title = "Moves like Jagger: Exploiting variations in instantaneous gait for spontaneous device pairing ", journal = "Pervasive and Mobile Computing ", year = "2018", volume = "47", month = "July", doi = "https://doi.org/10.1016/j.pmcj.2018.03.006", url_Paper = {https://authors.elsevier.com/c/1WufU5bwSmo0nk}, author = "Dominik Schürmann and Arne Brüsch and Ngu Nguyen and Stephan Sigg and Lars Wolf", abstract = "Abstract Seamless device pairing conditioned on the context of use fosters novel application domains and ease of use. Examples are automatic device pairings with objects interacted with, such as instrumented shopping baskets, electronic tourist guides (e.g. tablets), fitness trackers or other fitness equipment. We propose a cryptographically secure spontaneous authentication scheme, BANDANA, that exploits correlation in acceleration sequences from devices worn or carried together by the same person to extract always-fresh secure secrets. On two real world datasets with 15 and 482 subjects, \{BANDANA\} generated fingerprints achieved intra- (50%) and inter-body ( > 75 % ) similarity sufficient for secure key generation via fuzzy cryptography. Using \{BCH\} codes, best results are achieved with 48 bit fingerprints from 12 gait cycles generating 16 bit long keys. Statistical bias of the generated fingerprints has been evaluated as well as vulnerabilities towards relevant attack scenarios. ", project = {bandana}, group = {ambience} }
@article{Shi_2017_tvt, author={Shuyu Shi and Stephan Sigg and Lin Chen and Yusheng Ji}, journal={IEEE Transactions on Vehicular Technology}, title={Accurate Location Tracking from CSI-based Passive Device-free Probabilistic Fingerprinting}, year={2018}, doi={10.1109/TVT.2018.2810307}, url_Paper={http://dx.doi.org/10.1109/TVT.2018.2810307}, group = {ambience}}
@InProceedings{Raja_2018_icdcs, author={Muneeba Raja and Viviane Ghaderi and Stephan Sigg}, title={WiBot! In-Vehicle Behaviour and Gesture Recognition Using Wireless Network Edge}, booktitle={38th IEEE International Conference on Distributed Computing Systems (ICDCS 2018)}, year={2018}, group = {ambience} }
@InProceedings{Raja_2018_vtc, author={Muneeba Raja and Viviane Ghaderi and Stephan Sigg}, title={Detecting Driver's Disracted Behaviour from Wi-Fi}, booktitle={Vehicular Technology Conference (vtc 2018-spring)}, year={2018}, group = {ambience}}
@InProceedings{Savazzi_2018_wfiot, author={Stefano Savazzi and Stephan Sigg and Monica Nicoli and Sanash Kianoush and Franck Le Gall and Hamza Baqa and David Remon}, title={A cloud-IoT Model for Reconfigurable Radio Sensing: The Radio.Sense Platform}, booktitle={2018 IEEE 4th World Forum on Internet of Things (WF-IoT)}, year={2018}, month={February}, group = {ambience}}
@INPROCEEDINGS{Kazuya_2018_PerCom, author={Kazuya Ohara and Takuya Maekawa and Stephan Sigg and Moustafa Youssef}, booktitle={2018 IEEE International Conference on Pervasive Computing and Communication (WiP)}, title={Preliminary Investigation of Position Independent Gesture Recognition Using Wi-Fi CSI}, year={2018}, month={March}, group = {ambience}}
@INPROCEEDINGS{Le_2018_PerCom, author={Le Ngu Nguyen and Stephan Sigg and Ulf Kulau}, booktitle={2018 IEEE International Conference on Pervasive Computing and Communication (WiP)}, title={Representation Learning for Sensor-based Device Pairing}, year={2018}, month={March}, group = {ambience}}
@INPROCEEDINGS{Le_2018_PerComDemo, author={Le Ngu Nguyen and Caglar Yuce Kaya and Arne Bruesch and Dominik Schuermann and Stephan Sigg and Lars Wolf}, booktitle={2018 IEEE International Conference on Pervasive Computing and Communication (Demo)}, title={Demo of BANDANA -- Body Area Network Device-to-device Authentication Using Natural gAit}, year={2018}, month={March}, project = {bandana}, group = {ambience}}
@INPROCEEDINGS{Niklas_2018_PerComWS, author={Niklas Strengell and Stephan Sigg}, booktitle={2018 IEEE International Conference on Pervasive Computing and Communication (adjunct)}, title={Local emotions -- using social media to understnad human-environment interaction in cities}, year={2018}, month={March}, group = {ambience}}
@INPROCEEDINGS{Le_2018_PerComWS, author={Le Ngu Nguyen and Stephan Sigg}, booktitle={2018 IEEE International Conference on Pervasive Computing and Communication (adjunct)}, title={Secure Context-based Pairing for Unprecedented Devices}, year={2018}, month={March}, group = {ambience}}
@INPROCEEDINGS{Le_2018_PerComWS2, author={Le Ngu Nguyen and Stephan Sigg}, booktitle={2018 IEEE International Conference on Pervasive Computing and Communication (adjunct)}, title={User Authentication based on Personal Image Experiences}, year={2018}, month={March}, group = {ambience}}
@article{hintze2017large, title={A Large-Scale, Long-Term Analysis of Mobile Device Usage Characteristics}, author={Hintze, Daniel and Hintze, Philipp and Findling, Rainhard D and Mayrhofer, Ren{\'e}}, journal={Proceedings of the ACM on Interactive, Mobile, Wearable and Ubiquitous Technologies}, volume={1}, number={2}, pages={13}, year={2017}, publisher={ACM}, abstract = {Today, mobile devices like smartphones and tablets have become an indispensable part of people’s lives, posing many new questions e.g., in terms of interaction methods, but also security. In this paper, we conduct a large scale, long term analysis of mobile device usage characteristics like session length, interaction frequency, and daily usage in locked and unlocked state with respect to location context and diurnal pattern. Based on detailed logs from 29,279 mobile phones and tablets representing a total of 5,811 years of usage time, we identify and analyze 52.2 million usage sessions with some participants providing data for more than four years. Our results show that context has a highly significant effect on both frequency and extent of mobile device usage, with mobile phones being used twice as much at home compared to in the office. Interestingly, devices are unlocked for only 46% of the interactions. We found that with an average of 60 interactions per day, smartphones are used almost thrice as often as tablet devices (23), while usage sessions on tablets are three times longer, hence are used almost for an equal amount of time throughout the day. We conclude that usage session characteristics differ considerably between tablets and smartphones. These results inform future approaches to mobile interaction as well as security.}, doi = {10.1145/3090078}, url_Paper = {http://ambientintelligence.aalto.fi/findling/pdfs/publications/Hintze_17_LargeScaleLong.pdf}, group = {ambience}}
@MISC{Sigg_2017_habil, type = {habilitation}, author = "Stephan Sigg", title = "Some aspects of physical prototyping in Pervasive Computing", school = "TU Braunschweig", year = "2017", month = "April", note = "TU Braunschweig, habilitation thesis", url_Paper = "../paper/Habil_180105_v7_STS.pdf", group = {ambience} }
@ARTICLE{Muneeba_2017_Geospatial, author={Muneeba Raja and Anja Exler and Samuli Hemminki and ShinIchi Konomi and Stephan Sigg and Sozo Inoue}, journal={Springer GeoInformatica}, title={Towards pervasive geospatial affect perception}, year={2017}, abstract = {Due to the enormous penetration of connected computing devices with diverse sensing and localization capabilities, a good fraction of an individual’s activities, locations, and social connections can be sensed and spatially pinpointed. We see significant potential to advance the field of personal activity sensing and tracking beyond its current state of simple activities, at the same time linking activities geospatially. We investigate the detection of sentiment from environmental, on-body and smartphone sensors and propose an affect map as an interface to accumulate and interpret data about emotion and mood from diverse set of sensing sources. In this paper, we first survey existing work on affect sensing and geospatial systems, before presenting a taxonomy of large-scale affect sensing. We discuss model relationships among human emotions and geo-spaces using networks, apply clustering algorithms to the networks and visualize clusters on a map considering space, time and mobility. For the recognition of emotion and mood, we report from two stud- ies exploiting environmental and on-body sensors. Thereafter, we propose a framework for large-scale affect sensing and discuss challenges and open issues for future work. }, doi={10.1007/s10707-017-0294-1}, url={http://dx.doi.org/10.1007/s10707-017-0294-1}, keywords={Geospatial mapping, Emotion recognition, Device-free sensing, Activity recognition}, group = {ambience}}
@InBook{savazzi2017, title = {Radar for In-Door Monitoring: Detection, Localization, and Assessment}, chapter = {Wireless Sensing for Device-Free Recognition of Human Motion}, publisher = {Taylor and Francis Group}, year = {2017}, author = {Stefano Savazzi and Stephan Sigg and Monica Nicoli and Vittorio Rampa and Sanaz Kianoush and Umberto Spagnolini}, group = {ambience}}
@article{sigggi, title={GI-Dagstuhl Seminar 16353 on Aware Machine to Machine Communication}, author={Sigg, Stephan and Arumaithurai, Mayutan and Wang, Xiaoyan}, journal={Informatik-Spektrum}, year={2017}, volume = {40}, url_Paper = {https://link.springer.com/content/pdf/10.1007%2Fs00287-017-1065-y.pdf}, group = {ambience}}
@InProceedings{Singh_2017_dacsc, author={Isha Singh and Stephan Sigg}, title={Smart City Environmental perception from ambient cellular signals}, booktitle={International Workshop on Distributed Autonomous Computing in Smart City}, year={2017}, month={August}, group = {ambience}}
@InProceedings{Bahareh_2017_vtc, author={Bahareh Gholampooryazdi and Isha Singh and Stephan Sigg}, title={5G Ubiquitous Sensing: Passive Environmental Perception in Cellular Systems}, booktitle={Vehicular Technology Conference (vtc 2017-fall)}, year={2017}, month={September}, group = {ambience}}
@INPROCEEDINGS{Schuermann_2017_PerCom, author={Dominik Schuermann and Arne Bruesch and Stephan Sigg and Lars Wolf}, booktitle={2017 IEEE International Conference on Pervasive Computing and Communication}, title={BANDANA – Body Area Network Device-to-device Authentication using Natural gAit}, year={2017}, abstract={Abstract—Secure spontaneous authentication between devices worn at arbitrary location on the same body is a challenging, yet unsolved problem. We propose BANDANA, the first-ever implicit secure device-to-device authentication scheme for devices worn on the same body. Our approach leverages instantaneous variation in acceleration patterns from gait sequences to extract always-fresh secure secrets. It enables secure spontaneous pairing of devices worn on the same body or interacted with. The method is robust against noise in sensor readings and active attackers. We demonstrate the robustness of BANDANA on two gait datasets and discuss the discriminability of intra- and inter-body cases, robustness to statistical bias, as well as possible attack scenarios.}, %doi={10.1109/PERCOMW.2016.7457119}, month={March}, project = {bandana}, group = {ambience}}
@INPROCEEDINGS{LeWiP_2017_PerCom, author={Le Ngu Nguyen and Stephan Sigg}, booktitle={2017 IEEE International Conference on Pervasive Computing and Communication (WiP)}, title={PassFrame: Generating Image-based Passwords from Egocentric Videos}, year={2017}, abstract={Wearable cameras have been widely used not only in sport and public safety but also as life-logging gadgets. They record diverse visual information that is meaningful to the users. In this paper, we analyse first-person-view videos to develop a personalized user authentication mechanism. Our proposed algorithm generates provisional passwords which benefit a variety of purposes such as normally unlocking a mobile device or fallback authentication. First, representative frames are extracted from the egocentric videos. Then, they are split into distinguishable segments before a clustering procedure is applied to discard repetitive scenes. The whole process aims to retain memorable images to form the authentication challenges. We integrate eye tracking data to select informative sequences of video frames and suggest another alternative method if an eye-facing camera is not available. To evaluate our system, we perform the experiments in different settings including object-interaction activities and traveling contexts. Even though our mechanism produces variable passwords, the log-in effort is comparable with approaches based on static challenges. }, %doi={10.1109/PERCOMW.2016.7457119}, month={March}, project={passframe}, group = {ambience}}
@INPROCEEDINGS{LeDemo_2017_PerCom, author={Le Ngu Nguyen and Stephan Sigg}, booktitle={2017 IEEE International Conference on Pervasive Computing and Communication (demo)}, title={Demo of PassFrame: Generating Image-based Passwords from Egocentric Videos}, year={2017}, abstract={We demonstrate a personalized user authentication mechanism based on first-person-view videos. Our proposed algorithm forms temporary image-based authentication challenges which benefit a variety of purposes such as unlocking a mobile device or fallback authentication. First, representative frames are extracted from the egocentric videos. Then, they are split into distinguishable segments before repetitive scenes are discarded through a clustering procedure. We integrate eye tracking data to select informative sequences of video frames and suggest an alternative method based on image quality. For evaluation, we perform experiments in different settings including object-interaction activities and traveling contexts. We assessed the authentication scheme in the presence of an informed attacker and observed that the entry time is significantly higher than that of the legitimate user. }, %doi={10.1109/PERCOMW.2016.7457119}, month={March}, project={passframe}, group = {ambience}}
@INPROCEEDINGS{Muneeba_2017_ETFA, author={Muneeba Raja and Stephan Sigg}, booktitle={22nd IEEE International Conference on Emerging Technologies And Factory Automation (ETFA'17)}, title={RFexpress! – Exploiting the wireless network edge for RF-based emotion sensing}, year={2017}, group = {ambience}}
@INPROCEEDINGS{MuneebaWiP_2017_PerCom, author={Muneeba Raja and Stephan Sigg}, booktitle={2017 IEEE International Conference on Pervasive Computing and Communication (WiP)}, title={RFexpress! - RF Emotion Recognition in the Wild}, year={2017}, abstract={We present RFexpress! the first-ever system to recognize emotion from body movements and gestures via DeviceFree Activity Recognition (DFAR). We focus on the distinction between neutral and agitated states in realistic environments. In particular, the system is able to detect risky driving behaviour in a vehicular setting as well as spotting angry conversations in an indoor environment. In case studies with 8 and 5 subjects the system could achieve recognition accuracies of 82.9% and 64%. We study the effectiveness of DFAR emotion and activity recognition systems in real environments such as cafes, malls, outdoor and office spaces. We measure radio characteristics in these environments at different days and times and analyse the impact of variations in the Signal to Noise Ratio (SNR) on the accuracy of DFAR emotion and activity recognition. In a case study with 5 subjects, we then find critical SNR values under which activity and emotion recognition results are no longer reliable. }, %doi={10.1109/PERCOMW.2016.7457119}, month={March}, group = {ambience}}
@INPROCEEDINGS{BaharehWiP_2017_PerCom, author={Bahareh Gholampooryazdi and Stephan Sigg}, booktitle={2017 IEEE International Conference on Pervasive Computing and Communication (WiP)}, title={Walking Speed Recognition from 5G Prototype System}, year={2017}, abstract={We investigate the recognition of walking speed by a prototypical 5G system exploiting 52 OFDM carriers over 12.48 MHz bandwidth at 3.45 GHz. We consider the impact of the number of channels exploited and compare the recognition performance with the accuracy achieved by acceleration-based sensing. Our results achieved in an experimental setting with five subjects suggest that accurate recognition of activities and environmental situations can be a reliable implicit service of future 5G installations. }, %doi={10.1109/PERCOMW.2016.7457119}, month={March}, group = {ambience}}
@article{kefer2017evaluating, title={Evaluating the Placement of Arm-Worn Devices for Recognizing Variations of Dynamic Hand Gestures.}, author={Kefer, Kathrin and Holzmann, Clemens and Findling, Rainhard Dieter}, journal={J. Mobile Multimedia}, volume={12}, number={3\&4}, pages={225--242}, year={2017}, group = {ambience}}
@inproceedings{fernandez2017mobile, title={Mobile Wrist Vein Authentication Using SIFT Features}, author={Fern{\'a}ndez, Pol and Findling, Rainhard Dieter}, booktitle={EUROCAST}, pages={140}, year={2017}, group = {ambience} } %%% 2016 %%%
@InProceedings{Wu_2016_globecom, author={Bowen Yang and Chao Wu and Stephan Sigg and Yaoxue Zhang}, title={CoCo (Context vs. Content): Behavior-Inspired Social Media Recommendation for Mobile Apps}, booktitle={2016 IEEE Global Communications Conference (Globecom)}, year={2016}, month={December}, abstract={Exponential growth of media generated in online social networks demands effective recommendation to improve the efficiency of media access especially for mobile users. In particular, content, objective quality or general popularity are less decisive for the prediction of user-click behavior than friendship-conditioned patterns. Existing recommender systems however, rarely consider user behavior in real-life. By means of a large-scale data-driven analysis over real-life mobile Twitter traces from 15,144 users over a period of one year, we reveal the importance of social closeness related behavior features. This paper proposes CoCo, the first-ever behavior-inspired mobile social media recommender system to improve the media access experience. CoCo exploits representative behavior features via a latent bias based machine learning approach. Our comprehensive evaluation through trace-driven emulations on the Android app exposes a superior accuracy of 72.3%, with a small additional daily energy consumption of 1.3% and a monthly data overhead of 9.1MB.}, group = {ambience}}
@inproceedings{findling2016mobile, title={Mobile gait match-on-card authentication from acceleration data with offline-simplified models}, author={Findling, Rainhard Dieter and H{\"o}lzl, Michael and Mayrhofer, Ren{\'e}}, booktitle={Proceedings of the 14th International Conference on Advances in Mobile Computing and Multi Media}, pages={250--260}, year={2016}, organization={ACM}, group = {ambience}}
@ARTICLE{Savazzi_2016_AAL, author={S. Savazzi and S. Sigg and M. Nicoli and V. Rampa and S. Kianoush and U. Spagnolini}, journal={IEEE Signal Processing Magazine}, title={Device-Free Radio Vision for Assisted Living: Leveraging wireless channel quality information for human sensing}, year={2016}, volume={33}, number={2}, pages={45-58}, abstract = {Wireless propagation is conventionally considered as the enabling tool for transporting information in digital communications. However, recent research has shown that the perturbations of the same electromagnetic (EM) fields that are adopted for data transmission can be used as a powerful sensing tool for device-free radio vision. Applications range from human body motion detection and localization to passive gesture recognition. In line with the current evolution of mobile phone sensing [1], radio terminals are not only ubiquitous communication interfaces, but they also incorporate novel or augmented sensing potential, capable of acquiring an accurate human-scale understanding of space and motion. This article shows how radio-frequency (RF) signals can be employed to provide a device-free environmental vision and investigates the detection and tracking capabilities for potential benefits in daily life.}, keywords={assisted living,object detection,EM fields,device-free environmental vision,device-free radio vision,human body motion detection,human sensing wireless propagation,passive gesture recognition}, doi={10.1109/MSP.2015.2496324}, ISSN={1053-5888}, month={March}, url_Paper={http://dx.doi.org/10.1109/MSP.2015.2496324}, group = {ambience}}
@InProceedings{Shi_2016_vtc, author={Shuyu Shi and Stephan Sigg and Yusheng Ji}, title={Probabilistic Fingerprinting Based Passive Device-free Localization from Channel State Information}, booktitle={Vehicular Technology Conference (vtc 2016-spring)}, year={2016}, month={May}, abstract={Given the ubiquitous distribution of electronic devices equipped with a radio frequency (RF) interface, researchers have shown great interest in analyzing signal ?uctuation on this interface for environmental perception. A popular example is the enabling of indoor localization with RF signals. As an alternative to active device-based positioning, device-free passive (DfP) indoor localization has the advantage that the sensed individuals do not require to carry RF sensors. We propose a probabilistic ?ngerprinting-based technique for DfP indoor localization. Our system adopts CSI readings derived from off-the-shelf WiFi 802.11n wireless cards which can provide fine-grained subchannel measurements in the context of MIMOOFDM PHY layer parameters. This complex channel information enables accurate localization of non-equipped individualsOur scheme further boosts the localization efficiency by using principal component analysis (PCA) to identify the most relevanfeature vectors. The experimental results demonstrate that our system can achieve an accuracy of over 92% and an error distance smaller than 0:5m. We also investigate the effect of other parameters on the performance of our system, including packetransmission rate the number of links as well as the number of principle components. }, group = {ambience}}
@INPROCEEDINGS{Raja_2016_CoSDEO, author={Muneeba Raja and Stephan Sigg}, booktitle={2016 IEEE International Conference on Pervasive Computing and Communication Workshops (PerCom Workshops)}, title={Applicability of RF-based methods for emotion recognition: A survey}, year={2016}, pages={1-6}, abstract={Human emotion recognition has attracted a lot of research in recent years. However, conventional methods for sensing human emotions are either expensive or privacy intrusive. In this paper, we explore a connection between emotion recognition and RF-based activity recognition that can lead to a novel ubiquitous emotion sensing technology. We discuss the latest literature from both domains, highlight the potential of body movements for accurate emotion detection and focus on how emotion recognition could be done using inexpensive, less privacy intrusive, device-free RF sensing methods. Applications include environment and crowd behaviour tracking in real time, assisted living, health monitoring, or also domestic appliance control. As a result of this survey, we propose RF-based device free recognition for emotion detection based on body movements. However, it requires overcoming challenges, such as accuracy, to outperform classical methods.}, keywords={Emotion recognition;Feature extraction;Mood;Sensors;Speech;Speech recognition;Tracking}, doi={10.1109/PERCOMW.2016.7457119}, month={March}, group = {ambience}}
@inproceedings{palipana2016channel, title={Channel state information based human presence detection using non-linear techniques}, author={Palipana, Sameera and Agrawal, Piyush and Pesch, Dirk}, booktitle={Proceedings of the 3rd ACM International Conference on Systems for Energy-Efficient Built Environments}, pages={177--186}, year={2016}, organization={ACM}, group = {ambience} }
@article{Liu_2016_Empty, title={Empty Scene and Non-empty Scene Detection from Wi-Sun Acceleration Sensor}, author={Liu, Zhi and Sigg, Stephan and Tsuda, Toshitaka and Watanabe, Hiroshi}, year={2016}, abstract = {Wireless sensor networks (WSNs) are widely deployed to help detect disaster such as landslides, etc. in countries like Japan due to their efficiency and low cost. But there are various problems in these systems, such as the different kind of noise (such as random visit from animals, which leads to unnecessary false alarms). Orthogonally, context aware activity recognition draws increasing attention recently, where different environmental or martial sensors are adopted for activity recognition in various application scenarios. This paper studies a disaster detection system utilizing the Wi-Sun acceleration sensors together with the Wi-Sun signal fluctuation and considers the detection of human beings in the scene of interest. Hence, the false alarms introduced by the animals’ walk in, etc. could be detected using similar method. During the detection process, not only the wireless signal but also the acceleration value collected by the sensors are adopted for a better detection result. Then feature values are calculated for event detection based on the data samples. K nearest neighbors (KNN) is used to classify the events in the scene of interest: empty and non-empty. The detection results are promising as shown in the result section and the proposed method is applicable to a real landslide to avoid animal-induced false alarms.}, url_Paper = {http://www.ams.giti.waseda.ac.jp/pdf-files/2016_IEICE_bs_03_027.pdf}, group = {ambience} } %%% 2015 %%%
@inproceedings{Nguyen:2015:BAR:2829875.2829930, author = {Le Ngu Nguyen and Rodr\'{\i}guez-Mart\'{\i}n, Daniel and Catal\`{a}, Andreu and P{\'e}rez-L\'{o}pez, Carlos and Sam\`{a}, Albert and Cavallaro, Andrea}, title = {Basketball Activity Recognition Using Wearable Inertial Measurement Units}, booktitle = {Proceedings of the XVI International Conference on Human Computer Interaction}, series = {Interacci\ón '15}, year = {2015}, isbn = {978-1-4503-3463-1}, articleno = {60}, numpages = {6}, doi = {10.1145/2829875.2829930}, group = {ambience}}
@inproceedings{hintze2015confidence, title={Confidence and risk estimation plugins for multi-modal authentication on mobile devices using cormorant}, author={Hintze, Daniel and Muaaz, Muhammad and Findling, Rainhard D and Scholz, Sebastian and Koch, Eckhard and Mayrhofer, Ren{\'e}}, booktitle={Proceedings of the 13th International Conference on Advances in Mobile Computing and Multimedia}, pages={384--388}, year={2015}, organization={ACM}, group = {ambience}}
@inproceedings{hintze2015cormorant, title={Cormorant: towards continuous risk-aware multi-modal cross-device authentication}, author={Hintze, Daniel and Findling, Rainhard D and Muaaz, Muhammad and Koch, Eckhard and Mayrhofer, Ren{\'e}}, booktitle={Adjunct Proceedings of the 2015 ACM International Joint Conference on Pervasive and Ubiquitous Computing and Proceedings of the 2015 ACM International Symposium on Wearable Computers}, pages={169--172}, year={2015}, organization={ACM}, group = {ambience}}
@article{findling2015key, title={Key Code Recognition: Case Study of Automatically Deriving the Code of a Physical Key from Mobile Device Camera Images for the EVVA A key profile}, author={Findling, Rainhard Dieter}, year={2015}, group = {ambience}}
@inproceedings{findling2015towards, title={Towards device-to-user authentication: Protecting against phishing hardware by ensuring mobile device authenticity using vibration patterns}, author={Findling, Rainhard Dieter and Mayrhofer, Rene}, booktitle={Proceedings of the 14th International Conference on Mobile and Ubiquitous Multimedia}, pages={131--135}, year={2015}, organization={ACM}, group = {ambience}}
@article{DBLP:journals/corr/Nguyen15b, author = {Le Ngu Nguyen}, title = {Feature Learning for Interaction Activity Recognition in {RGBD} Videos}, journal = {CoRR}, volume = {abs/1508.02246}, year = {2015}, url = {http://arxiv.org/abs/1508.02246}, timestamp = {Tue, 01 Sep 2015 14:42:40 +0200}, biburl = {http://dblp.uni-trier.de/rec/bib/journals/corr/Nguyen15b}, bibsource = {dblp computer science bibliography, http://dblp.org}, group = {ambience}}
@inproceedings{Sigg_2015_ubicomp, author = {Sigg, Stephan}, title = {Contact-free Sensing for Collective Activity Recognition}, booktitle = {Adjunct Proceedings of the 2015 ACM International Joint Conference on Pervasive and Ubiquitous Computing and Proceedings of the 2015 ACM International Symposium on Wearable Computers}, series = {UbiComp/ISWC'15 Adjunct}, year = {2015}, isbn = {978-1-4503-3575-1}, location = {Osaka, Japan}, pages = {885--886}, numpages = {2}, doi = {10.1145/2800835.2809504}, acmid = {2809504}, publisher = {ACM}, address = {New York, NY, USA}, keywords = {contact-free sensing, device-free RF, sentiment sensing, smart city}, abstract={We are surrounded by a multitude of communicating sensing devices. Furnished with wearables to serve our very needs, we traverse sensor-rich environments of smart cities. Through new open standards and novel protocols this loose collection of devices increasingly evolves into a superorganism of wearables and environmental devices. All these devices share a single ubiquitous sensor type: the RF-interface. Ubiquitously available signals from e.g. FM-radio, WiFi or UMTS can be exploited as sensor for presence, location, crowd-size, activity or gestures. On-site training of these systems will soon be replaced by offline-raytracing techniques and recognition accuracies will further increase with the Channel State Information (CSI) on recent OFDM receivers. In contrast to RSSI, CSI features channel response information as a PHY layer power feature, revealing amplitudes and phases of each subcarrier. Exemplary, enabled by this superorganism of wireless devices, we envision the advance of sentiment sensing, smart city architectures as well as autonomous intelligent spaces. }, group = {ambience} }
@article{Sigg_2015_sensing, author = {Stephan Sigg and Kai Kunze and Xiaoming Fu}, title = {Recent Advances and Challenges in Ubiquitous Sensing}, journal = {CoRR}, volume = {abs/1503.04973}, year = {2015}, url = {http://arxiv.org/abs/1503.04973}, timestamp = {Thu, 09 Apr 2015 11:33:20 +0200}, biburl = {http://dblp.uni-trier.de/rec/bib/journals/corr/SiggKF15}, bibsource = {dblp computer science bibliography, http://dblp.org}, abstract= {Ubiquitous sensing is tightly coupled with activity recognition. This survey reviews recent advances in Ubiquitous sensing and looks ahead on promising future directions. In particular, Ubiquitous sensing crosses new barriers giving us new ways to interact with the environment or to inspect our psyche. Through sensing paradigms that parasitically utilise stimuli from the noise of environmental, third-party pre-installed systems, sensing leaves the boundaries of the personal domain. Compared to previous environmental sensing approaches, these new systems mitigate high installation and placement cost by providing a robustness towards process noise. On the other hand, sensing focuses inward and attempts to capture mental activities such as cognitive load, fatigue or emotion through advances in, for instance, eye-gaze sensing systems or interpretation of body gesture or pose. This survey summarises these developments and discusses current research questions and promising future directions. }, group = {ambience}}
@INPROCEEDINGS{Rauterberg_2015_demo, author={C. Rauterberg and S. Sigg and X. Fu}, booktitle={Pervasive Computing and Communication Workshops (PerCom Workshops), 2015 IEEE International Conference on}, title={Demo abstract: Use the force, Luke: Implementation of RF-based gesture interaction on an android phone}, year={2015}, pages={190-192}, abstract={Various approaches exist to detect gestures and movements via smartphones. Most of them, however, require that the smartphone is carried on-body. The abscence of reliable ad-hoc on-line gesture detection from environmental sources inspired this project for on-line hand gesture detection on a smartphone using only WiFi RSSI. We highlight our line of work and explain problems at hand to provide information for possible future work. We will furthermore introduce wifiJedi, a smartphone application, that is able to detect movement in front of the smartphone by reading the WiFi RSSI and use this information to control a Slideshow.}, keywords={RSSI;smart phones;user interfaces;wireless LAN;WiFi RSSI;environmental sources;on-line hand gesture detection;reliable ad-hoc on-line gesture detection;smartphones;Accuracy;Gesture recognition;IEEE 802.11 Standards;Receivers;Smart phones;Training}, doi={10.1109/PERCOMW.2015.7134018}, month={March}, group = {ambience}}
@article{Rauterberg_2015_force, title={Simply Use the Force}, author={Rauterberg, Christoph and Velten, Matthias and Sigg, Stephan and Fu, Xiaoming}, journal={IEEE/KuVS NetSys}, year={2015}, url_Paper = {https://www.netsys2015.com/wp-content/uploads/NetSys2015_Demo_Rauterberg.pdf}, group = {ambience} } %%% 2014 %%%
@Inbook{Quach2014, author="Quan Quach and Le Ngu Nguyen and Tien Dinh", editor="Huynh, Nam Van and Denoeux, Thierry and Tran, Hung Dang and Le, Cuong Anh and Pham, Bao Son", title="Secure Authentication for Mobile Devices Based on Acoustic Background Fingerprint", bookTitle="Knowledge and Systems Engineering: Proceedings of the Fifth International Conference KSE 2013, Volume 1", year="2014", publisher="Springer International Publishing", address="Cham", pages="375--387", isbn="978-3-319-02741-8", doi="10.1007/978-3-319-02741-8_32", url="http://dx.doi.org/10.1007/978-3-319-02741-8_32", group = {ambience}}
@inproceedings{mayrhofer2014optimal, title={Optimal derotation of shared acceleration time series by determining relative spatial alignment}, author={Mayrhofer, Rene and Hlavacs, Helmut and Findling, Rainhard Dieter}, booktitle={Proceedings of the 16th International Conference on Information Integration and Web-based Applications \& Services}, pages={71--78}, year={2014}, organization={ACM}, group = {ambience}}
@inproceedings{hintze2014diversity, title={Diversity in locked and unlocked mobile device usage}, author={Hintze, Daniel and Findling, Rainhard D and Muaaz, Muhammad and Scholz, Sebastian and Mayrhofer, Ren{\'e}}, booktitle={Proceedings of the 2014 ACM International Joint Conference on Pervasive and Ubiquitous Computing: Adjunct Publication}, pages={379--384}, year={2014}, organization={ACM}, group = {ambience} }
@inproceedings{He_2014_crowdsensing, author={Jiyin He and Kai Kunze and Christoph Lofi and Sanjay K. Madria and Stephan Sigg}, title={Towards Mobile Sensor-Aware Crowdsourcing: Architecture, Opportunities and Challenges}, booktitle={UnCrowd 2014: DASFAA Workshop on Uncertain and Crowdsourced Data}, year={2014}, abstract={The recent success of general purpose crowdsourcing platforms like Amazon Mechanical Turk paved the way for a plethora of crowd-enabled applications and workflows. However, the variety of tasks which can be approached via such crowdsourcing platforms is limited by constraints of the web-based interface. In this paper, we propose mobile user interface clients. Switching to mobile clients has the potential to radically change the way crowdsourcing is performed, and allows for a new breed of crowdsourcing tasks. Here, especially the ability to tap into the wealth of precision sensors embedded in modern mobile hardware is a game changer. In this paper, we will discuss opportunities and challenges resulting from such a platform, and discuss a reference architecture}, doi={http://dx.doi.org/10.1007/978-3-662-43984-5_31}, group = {ambience}}
@article{mayrhofer2014determining, title={Determining Relative Spatial Alignment based on Shared Acceleration Time Series}, author={Mayrhofer, Rene and Hlavacs, Helmut and Findling, Rainhard Dieter}, year={2014}, group = {ambience}}
@inproceedings{hintze2014mobile, title={Mobile device usage characteristics: The effect of context and form factor on locked and unlocked usage}, author={Hintze, Daniel and Findling, Rainhard D and Scholz, Sebastian and Mayrhofer, Ren{\'e}}, booktitle={Proceedings of the 12th international conference on advances in mobile computing and multimedia}, pages={105--114}, year={2014}, organization={ACM}, group = {ambience}}
@inproceedings{findling2014shakeunlock, title={Shakeunlock: Securely unlock mobile devices by shaking them together}, author={Findling, Rainhard Dieter and Muaaz, Muhammad and Hintze, Daniel and Mayrhofer, Ren{\'e}}, booktitle={Proceedings of the 12th International Conference on Advances in Mobile Computing and Multimedia}, pages={165--174}, year={2014}, organization={ACM}, group = {ambience} }
@inproceedings{Sigg_2014_telepathic, author={Stephan Sigg and Ulf Blanke and Gerhard Troester}, title="The Telepathic Phone: Frictionless Activity Recognition from WiFi-RSSI", booktitle = {IEEE International Conference on Pervasive Computing and Communications (PerCom)}, series = {PerCom '14}, year={2014}, doi={http://dx.doi.org/10.1109/PerCom.2014.6813955}, abstract={We investigate the use of WiFi Received Signal Strength Information (RSSI) at a mobile phone for the recognition of situations, activities and gestures. In particular, we propose a device-free and passive activity recognition system that does not require any device carried by the user and uses ambient signals. We discuss challenges and lessons learned for the design of such a system on a mobile phone and propose appropriate features to extract activity characteristics from RSSI. We demonstrate the feasibility of recognising activities, gestures and environmental situations from RSSI obtained by a mobile phone. The case studies were conducted over a period of about two months in which about 12 hours of continuous RSSI data was sampled, in two countries and with 11 participants in total. Results demonstrate the potential to utilise RSSI for the extension of the environmental perception of a mobile device as well as for the interaction with touch-free gestures. The system achieves an accuracy of 0.51 while distinguishing as many as 11 gestures and can reach 0.72 on average for four more disparate ones.}, url_Slides={http://www.stephansigg.de/stephan/slides/Talk_PerCom_1403_Sigg-small.pdf}, group = {ambience}}
@article{Sigg_2014_teach, author={Stephan Sigg and Shuyu Shi and Yusheng Ji}, title={Teach your WiFi-Device: Recognise Gestures and Simultaneous Activities from Time-Domain RF-Features}, journal={International Journal on Ambient Computing and Intelligence (IJACI)}, volume={6}, number={1}, year={2014}, doi={http://dx.doi.org/10.4018/ijaci.2014010102}, abstract={The authors consider two untackled problems in RF-based activity recognition: the distinction of simultaneously conducted activities of individuals and the recognition of gestures from purely time-domain-based features. Recognition is based on a single antenna system. This is important for the application in end-user devices which are usually single-antenna systems and have seldom access to more sophisticated, e.g. frequency-based features. In case studies with software defined radio nodes utilised in an active, device-free activity recognition DFAR system, the authors observe a good recognition accuracy for the detection of multiple simultaneously conducted activities with two and more receive devices. Four gestures and two baseline situations are distinguished with good accuracy in a second case study.}, group = {ambience}}
@article{Shi_2014_attention, author={Shuyu Shi and Stephan Sigg and Wei Zhao and Yusheng Ji}, title= {Monitoring of Attention from Ambient FM-radio Signals}, month={Jan-March}, journal={IEEE Pervasive Computing, Special Issue - Managing Attention in Pervasive Environments}, year={2014}, doi={http://dx.doi.org/10.1109/MPRV.2014.13}, abstract={The authors investigate the classification of FM radio signal fluctuation for monitoring the attention of individuals moving toward a static object. In particular, they distinguish between an empty and populated corridor and, for the latter, determine whether the individuals are moving or standing still and whether they're directing their attention toward a particular poster. This information can provide some hint whether a person is paying attention to a specific poster as well as the location of the particular poster. This article is part of a special issue on managing attention.}, group = {ambience}}
@Article{Sigg_2014_beamforming, title = {A fast binary feedback-based distributed adaptive carrier synchronisation for transmission among clusters of disconnected IoT nodes in smart spaces}, journal = {Ad Hoc Netw.}, author = {Stephan Sigg}, year = {2014}, doi = {http://dx.doi.org/10.1016/j.adhoc.2013.12.006}, abstract = {We propose a transmission scheme among groups of disconnected IoT devices in a smart space. In particular, we propose the use of a local random search implementation to speed up the synchronisation of carriers for distributed adaptive transmit beamforming. We achieve a sharp bound on the asymptotic carrier synchronisation time which is significantly lower than for previously proposed carrier synchronisation processes. Also, we consider the impact of environmental conditions in smart spaces on this synchronisation process in simulations and a case study.}, keywords = {Smart spaces, Transmission scheme, Collaborative, Sensor networks, Carrier synchronisation}, group = {ambience} }
@inproceedings{Sigg_2014_social, author = {Sigg, Stephan and Fu, Xiaoming}, title = {Social Opportunistic Sensing and Social Centric Networking: Enabling Technology for Smart Cities}, booktitle = {Proceedings of the 2014 ACM International Workshop on Wireless and Mobile Technologies for Smart Cities}, series = {WiMobCity '14}, year = {2014}, isbn = {978-1-4503-3036-7}, location = {Philadelphia, Pennsylvania, USA}, pages = {83--90}, numpages = {8}, doi = {http://dx.doi.org/10.1145/2633661.2633674}, address = {New York, NY, USA}, keywords = {context centric networking, crowdsourcing, environmental sensing, participatory sensing, smart cities}, abstract={In recent years, with tremendous advances in areas like mobile devices, algorithms for distributed systems, communication technology or protocols, all basic technological pieces to realise a Smart City are at hand. Missing, however, is a mechanism that bridges these pieces to ease the creation of Smart Cities at a larger scale. In this visionary paper, we discuss challenges of Smart Cities and propose enabling technologies to bridge the above mentioned pieces for their actual realisation. In particular, we introduce the concepts of Social Opportunistic Sensing (SOS) and Social Centric Networking (SCN). While the former is an enabling technology to interconnect all parties in a Smart City, the latter has the potential to enhance offline social networks in Internet of Things (IoT) enhanced Smart Cities by connecting individuals based on their automatically updated profile via context-based routing.}, group = {ambience}}
@inproceedings{palipana2014scalable, title={Scalable and Self-sustained Algorithms for Femto-Cell Interference Mitigation}, author={Palipana, Sameera and Zaki, Yasir and Toseef, Umar and Chen, Jay and Goerg, Carmelita}, booktitle={International Conference on Mobile Networks and Management}, pages={3--17}, year={2014}, organization={Springer}, group = {ambience}} %%% 2013 %%%
@ARTICLE{Sigg_2014_RFsensing, author={S. Sigg and M. Scholz and S. Shi and Y. Ji and M. Beigl}, journal={IEEE Transactions on Mobile Computing}, title={RF-Sensing of Activities from Non-Cooperative Subjects in Device-Free Recognition Systems Using Ambient and Local Signals}, year={2014}, volume={13}, number={4}, pages={907-920}, abstract={We consider the detection of activities from non-cooperating individuals with features obtained on the radio frequency channel. Since environmental changes impact the transmission channel between devices, the detection of this alteration can be used to classify environmental situations. We identify relevant features to detect activities of non-actively transmitting subjects. In particular, we distinguish with high accuracy an empty environment or a walking, lying, crawling or standing person, in case-studies of an active, device-free activity recognition system with software defined radios. We distinguish between two cases in which the transmitter is either under the control of the system or ambient. For activity detection the application of one-stage and two-stage classifiers is considered. Apart from the discrimination of the above activities, we can show that a detected activity can also be localized simultaneously within an area of less than 1 meter radius.}, keywords={radio transmitters;signal detection;software radio;RF-sensing;activity detection;ambient signals;device-free recognition systems;local signals;noncooperative subjects;one-stage classifier;radio frequency channel;radio transmitter;software defined radio;two-stage classifier;Accuracy;Feature extraction;Monitoring;Radio transmitters;Sensors;Transceivers;Wireless communication;Pervasive computing;and processing;location-dependent and sensitive;signal analysis;signal processing;synthesis}, doi={http://dx.doi.org/10.1109/TMC.2013.28}, ISSN={1536-1233}, month={April}, group = {ambience}}
@inproceedings{Prediction_2013_David, title={2nd workshop on recent advances in behavior prediction and pro-active pervasive computing}, author={David, Klaus and Klein, Bernd Niklas and Lau, Sian Lun and Sigg, Stephan and Ziebart, Brian}, booktitle={Proceedings of the 2013 ACM conference on Pervasive and ubiquitous computing adjunct publication}, pages={435--440}, year={2013}, organization={ACM}, group = {ambience}}
@inproceedings{findling2013range, title={Range face segmentation: Face detection and segmentation for authentication in mobile device range images}, author={Findling, Rainhard D and Wenny, Fabian and Holzmann, Clemens and Mayrhofer, Ren{\'e}}, booktitle={Proceedings of International Conference on Advances in Mobile Computing \& Multimedia}, pages={260}, year={2013}, organization={ACM}, group = {ambience}}
@article{dieter2013towards, title={Towards pan shot face unlock: Using biometric face information from different perspectives to unlock mobile devices}, author={Dieter Findling, Rainhard and Mayrhofer, Rene}, journal={International Journal of Pervasive Computing and Communications}, volume={9}, number={3}, pages={190--208}, year={2013}, publisher={Emerald Group Publishing Limited}, group = {ambience}}
@inproceedings{findling2013towards, title={Towards secure personal device unlock using stereo camera pan shots}, author={Findling, Rainhard D and Mayrhofer, Rene}, booktitle={International Conference on Computer Aided Systems Theory}, pages={417--425}, year={2013}, organization={Springer}, group = {ambience} }
@inproceedings{Pervasive_Sigg_2014, author = {Stephan Sigg and Mario Hock and Markus Scholz and Gerhard Troester and Lars Wolf and Yusheng Ji and Michael Beigl}, title = {Passive, device-free recognition on your mobile phone: tools, features and a case study}, booktitle = {Proceedings of the 10th International Conference on Mobile and Ubiquitous Systems: Computing, Networking and Services}, year = {2013}, group = {ambience}}
@inproceedings{DeviceFreeRecognition_Shi_2013c, author="Shuyu Shi and Stephan Sigg and Yusheng Ji", title={A comparison of two approaches to activity recognition of individuals in an indoor environment}, booktitle={2013 IEICE General Conference}, year={2013}, group = {ambience}}
@InProceedings{ Pervasive_Sigg_2013, author = "Stephan Sigg and Shuyu Shi and Felix Buesching and Yusheng Ji and Lars Wolf", title = "Leveraging RF-channel fluctuation for activity recognition", booktitle = "Proceedings of the 11th International Conference on Advances in Mobile Computing and Multimedia (MoMM2013)", year = "2013", group = {ambience} }
@inproceedings{udugama2013analytical, title={Analytical characterisation of multi-path content delivery in content centric networks}, author={Udugama, Asanga and Palipana, Sameera and Goerg, Carmelita}, booktitle={2013 Conference on Future Internet Communications (CFIC)}, pages={1--7}, year={2013}, organization={IEEE}, group = {ambience} }
@article{Pervasive_Scholz_2013b, author={Markus Scholz and Dawud Gordon and Leonardo Ramirez and Stephan Sigg and Tobias Dyrks and Michael Beigl}, title={A Concept for Support of Firefighter Frontline Communication}, journal={Future Internet}, volume={5}, number={2}, pages={113-127}, year={2013}, group = {ambience}}
@Article{ Cryptography_Sigg_2012, author = "Dominik Schuermann and Stephan Sigg", title = "Secure communication based on ambient audio", journal = "IEEE Transactions on Mobile Computing", year = "2013", volume = "12", number = "2", note = "doi.ieeecomputersociety.org/10.1109/TMC.2011.271", group = {ambience}} %%% 2012 %%%
@ARTICLE{4027, author={Sigg, Stephan and Gordon, Dawud and Zengen, Georg von and Beigl, Michael and Haseloff, Sandra and David, Klaus}, journal={IEEE Transactions on Mobile Computing}, title={Investigation of Context Prediction Accuracy for Different Context Abstraction Levels}, year={2012}, month={june}, volume={11}, number={6}, pages={1047 -1059}, group = {ambience}}
@INPROCEEDINGS{FunctionComputation_Sigg_2012, author={Stephan Sigg and Predrag Jakimovski and Michael Beigl}, booktitle={3rd International Conference on the Internet of Things (IOT)}, title={Calculation of functions on the RF-channel for IoT}, year={2012}, pages={107-113}, group = {ambience}}