@conference {1492, title = {How Routing Strategies Impact Urban Emissions}, booktitle = {Proceedings of the 30th International Conference on Advances in Geographic Information Systems}, year = {2022}, publisher = {Association for Computing Machinery}, organization = {Association for Computing Machinery}, address = {New York, NY, USA}, abstract = {Navigation apps use routing algorithms to suggest the best path to reach a user{\textquoteright}s desired destination. Although undoubtedly useful, navigation apps{\textquoteright} impact on the urban environment (e.g., CO2 emissions and pollution) is still largely unclear. In this work, we design a simulation framework to assess the impact of routing algorithms on carbon dioxide emissions within an urban environment. Using APIs from TomTom and OpenStreetMap, we find that settings in which either all vehicles or none of them follow a navigation app{\textquoteright}s suggestion lead to the worst impact in terms of CO2 emissions. In contrast, when just a portion (around half) of vehicles follow these suggestions, and some degree of randomness is added to the remaining vehicles{\textquoteright} paths, we observe a reduction in the overall CO2 emissions over the road network. Our work is a first step towards designing next-generation routing principles that may increase urban well-being while satisfying individual needs.}, isbn = {9781450395298}, doi = {10.1145/3557915.3560977}, url = {https://doi.org/10.1145/3557915.3560977}, author = {Cornacchia, Giuliano and B{\"o}hm, Matteo and Giovanni Mauro and Mirco Nanni and Dino Pedreschi and Luca Pappalardo} } @article {1489, title = {Methods and tools for causal discovery and causal inference}, journal = {WIREs Data Mining Knowl. Discov.}, volume = {12}, number = {2}, year = {2022}, abstract = {Causality is a complex concept, which roots its developments across several fields, such as statistics, economics, epidemiology, computer science, and philosophy. In recent years, the study of causal relationships has become a crucial part of the Artificial Intelligence community, as causality can be a key tool for overcoming some limitations of correlation-based Machine Learning systems. Causality research can generally be divided into two main branches, that is, causal discovery and causal inference. The former focuses on obtaining causal knowledge directly from observational data. The latter aims to estimate the impact deriving from a change of a certain variable over an outcome of interest. This article aims at covering several methodologies that have been developed for both tasks. This survey does not only focus on theoretical aspects. But also provides a practical toolkit for interested researchers and practitioners, including software, datasets, and running examples.}, author = {Ana Rita Nogueira and Andrea Pugnana and Salvatore Ruggieri and Dino Pedreschi and Jo{\~a}o Gama} } @proceedings {1498, title = {Semantic Enrichment of XAI Explanations for Healthcare}, year = {2022}, abstract = {Explaining black-box models decisions is crucial to increase doctors{\textquoteright} trust in AI-based clinical decision support systems. However, current eXplainable Artificial Intelligence (XAI) techniques usually provide explanations that are not easily understandable by experts outside of AI. Furthermore, most of the them produce explanations that consider only the input features of the algorithm. However, broader information about the clinical context of a patient is usually available even if not processed by the AI-based clinical decision support system for its decision. Enriching the explanations with relevant clinical information concerning the health status of a patient would increase the ability of human experts to assess the reliability of the AI decision. Therefore, in this paper we present a methodology that aims to enable clinical reasoning by semantically enriching AI explanations. Starting from a medical AI explanation based only on the input features provided to the algorithm, our methodology leverages medical ontologies and NLP embedding techniques to link relevant information present in the patient{\textquoteright}s clinical notes to the original explanation. We validate our methodology with two experiments involving a human expert. Our results highlight promising performance in correctly identifying relevant information about the diseases of the patients, in particular about the associated morphology. This suggests that the presented methodology could be a first step toward developing a natural language explanation of AI decision support systems.}, author = {Corbucci, Luca and Anna Monreale and Cecilia Panigutti and Michela Natilli and Smiraglio, Simona and Dino Pedreschi} } @article {1476, title = {Benchmarking and Survey of Explanation Methods for Black Box Models}, journal = {CoRR}, volume = {abs/2102.13076}, year = {2021}, url = {https://arxiv.org/abs/2102.13076}, author = {Francesco Bodria and Fosca Giannotti and Riccardo Guidotti and Francesca Naretto and Dino Pedreschi and S Rinzivillo} } @article {1400, title = {Give more data, awareness and control to individual citizens, and they will help COVID-19 containment}, year = {2021}, month = {2021/02/02}, abstract = {The rapid dynamics of COVID-19 calls for quick and effective tracking of virus transmission chains and early detection of outbreaks, especially in the {\textquotedblleft}phase 2{\textquotedblright} of the pandemic, when lockdown and other restriction measures are progressively withdrawn, in order to avoid or minimize contagion resurgence. For this purpose, contact-tracing apps are being proposed for large scale adoption by many countries. A centralized approach, where data sensed by the app are all sent to a nation-wide server, raises concerns about citizens{\textquoteright} privacy and needlessly strong digital surveillance, thus alerting us to the need to minimize personal data collection and avoiding location tracking. We advocate the conceptual advantage of a decentralized approach, where both contact and location data are collected exclusively in individual citizens{\textquoteright} {\textquotedblleft}personal data stores{\textquotedblright}, to be shared separately and selectively (e.g., with a backend system, but possibly also with other citizens), voluntarily, only when the citizen has tested positive for COVID-19, and with a privacy preserving level of granularity. This approach better protects the personal sphere of citizens and affords multiple benefits: it allows for detailed information gathering for infected people in a privacy-preserving fashion; and, in turn this enables both contact tracing, and, the early detection of outbreak hotspots on more finely-granulated geographic scale. The decentralized approach is also scalable to large populations, in that only the data of positive patients need be handled at a central level. Our recommendation is two-fold. First to extend existing decentralized architectures with a light touch, in order to manage the collection of location data locally on the device, and allow the user to share spatio-temporal aggregates{\textemdash}if and when they want and for specific aims{\textemdash}with health authorities, for instance. Second, we favour a longer-term pursuit of realizing a Personal Data Store vision, giving users the opportunity to contribute to collective good in the measure they want, enhancing self-awareness, and cultivating collective efforts for rebuilding society.}, isbn = {1572-8439}, doi = {https://doi.org/10.1007/s10676-020-09572-w}, url = {https://link.springer.com/article/10.1007/s10676-020-09572-w}, author = {Mirco Nanni and Andrienko, Gennady and Barabasi, Albert-Laszlo and Boldrini, Chiara and Bonchi, Francesco and Cattuto, Ciro and Chiaromonte, Francesca and Comand{\'e}, Giovanni and Conti, Marco and Cot{\'e}, Mark and Dignum, Frank and Dignum, Virginia and Domingo-Ferrer, Josep and Ferragina, Paolo and Fosca Giannotti and Riccardo Guidotti and Helbing, Dirk and Kaski, Kimmo and Kert{\'e}sz, J{\'a}nos and Lehmann, Sune and Lepri, Bruno and Lukowicz, Paul and Matwin, Stan and Jim{\'e}nez, David Meg{\'\i}as and Anna Monreale and Morik, Katharina and Oliver, Nuria and Passarella, Andrea and Passerini, Andrea and Dino Pedreschi and Pentland, Alex and Pianesi, Fabio and Francesca Pratesi and S Rinzivillo and Salvatore Ruggieri and Siebes, Arno and Torra, Vicenc and Roberto Trasarti and Hoven, Jeroen van den and Vespignani, Alessandro} } @article {1401, title = {GLocalX - From Local to Global Explanations of Black Box AI Models}, volume = {294}, year = {2021}, month = {2021/05/01/}, pages = {103457}, abstract = {Artificial Intelligence (AI) has come to prominence as one of the major components of our society, with applications in most aspects of our lives. In this field, complex and highly nonlinear machine learning models such as ensemble models, deep neural networks, and Support Vector Machines have consistently shown remarkable accuracy in solving complex tasks. Although accurate, AI models often are {\textquotedblleft}black boxes{\textquotedblright} which we are not able to understand. Relying on these models has a multifaceted impact and raises significant concerns about their transparency. Applications in sensitive and critical domains are a strong motivational factor in trying to understand the behavior of black boxes. We propose to address this issue by providing an interpretable layer on top of black box models by aggregating {\textquotedblleft}local{\textquotedblright} explanations. We present GLocalX, a {\textquotedblleft}local-first{\textquotedblright} model agnostic explanation method. Starting from local explanations expressed in form of local decision rules, GLocalX iteratively generalizes them into global explanations by hierarchically aggregating them. Our goal is to learn accurate yet simple interpretable models to emulate the given black box, and, if possible, replace it entirely. We validate GLocalX in a set of experiments in standard and constrained settings with limited or no access to either data or local explanations. Experiments show that GLocalX is able to accurately emulate several models with simple and small models, reaching state-of-the-art performance against natively global solutions. Our findings show how it is often possible to achieve a high level of both accuracy and comprehensibility of classification models, even in complex domains with high-dimensional data, without necessarily trading one property for the other. This is a key requirement for a trustworthy AI, necessary for adoption in high-stakes decision making applications.}, isbn = {0004-3702}, doi = {https://doi.org/10.1016/j.artint.2021.103457}, url = {https://www.sciencedirect.com/science/article/pii/S0004370221000084}, author = {Mattia Setzu and Riccardo Guidotti and Anna Monreale and Franco Turini and Dino Pedreschi and Fosca Giannotti} } @article {1435, title = {Introduction to the special issue on social mining and big data ecosystem for open, responsible data science}, year = {2021}, month = {2021/03/05}, isbn = {2364-4168}, doi = {https://link.springer.com/article/10.1007/s41060-021-00253-5}, url = {https://doi.org/10.1007/s41060-021-00253-5}, author = {Luca Pappalardo and Grossi, Valerio and Dino Pedreschi} } @conference {1408, title = {Analysis and Visualization of Performance Indicators in University Admission Tests}, booktitle = {Formal Methods. FM 2019 International Workshops}, year = {2020}, month = {2020//}, publisher = {Springer International Publishing}, organization = {Springer International Publishing}, address = {Cham}, abstract = {This paper presents an analytical platform for evaluation of the performance and anomaly detection of tests for admission to public universities in Italy. Each test is personalized for each student and is composed of a series of questions, classified on different domains (e.g. maths, science, logic, etc.). Since each test is unique for composition, it is crucial to guarantee a similar level of difficulty for all the tests in a session. For this reason, to each question, it is assigned a level of difficulty from a domain expert. Thus, the general difficultness of a test depends on the correct classification of each item. We propose two approaches to detect outliers. A visualization-based approach using dynamic filter and responsive visual widgets. A data mining approach to evaluate the performance of the different questions for five years. We used clustering to group the questions according to a set of performance indicators to provide labeling of the data-driven level of difficulty. The measured level is compared with the a priori assigned by experts. The misclassifications are then highlighted to the expert, who will be able to refine the question or the classification. Sequential pattern mining is used to check if biases are present in the composition of the tests and their performance. This analysis is meant to exclude overlaps or direct dependencies among questions. Analyzing co-occurrences we are able to state that the composition of each test is fair and uniform for all the students, even on several sessions. The analytical results are presented to the expert through a visual web application that loads the analytical data and indicators and composes an interactive dashboard. The user may explore the patterns and models extracted by filtering and changing thresholds and analytical parameters.}, isbn = {978-3-030-54994-7}, doi = {https://doi.org/10.1007/978-3-030-54994-7_14}, url = {https://link.springer.com/chapter/10.1007/978-3-030-54994-7_14}, author = {Michela Natilli and Daniele Fadda and S Rinzivillo and Dino Pedreschi and Licari, Federica}, editor = {Sekerinski, Emil and Moreira, Nelma and Oliveira, Jos{\'e} N. and Ratiu, Daniel and Riccardo Guidotti and Farrell, Marie and Luckcuck, Matt and Marmsoler, Diego and Campos, Jos{\'e} and Astarte, Troy and Gonnord, Laure and Cerone, Antonio and Couto, Luis and Dongol, Brijesh and Kutrib, Martin and Monteiro, Pedro and Delmas, David} } @article {1336, title = {Artificial Intelligence (AI): new developments and innovations applied to e-commerce}, year = {2020}, month = {05/2020}, institution = {European Parliament{\textquoteright}s committee on the Internal Market and Consumer Protection}, abstract = {This in-depth analysis discusses the opportunities and challenges brought by the recent and the foreseeable developments of Artificial Intelligence into online platforms and marketplaces. The paper advocates the importance to support tustworthy, explainable AI (in order to fight discrimination and manipulation, and empower citizens), and societal-aware AI (in order to fight polarisation, monopolistic concentration and excessive inequality, and pursue diversity and openness). This document was provided by the Policy Department for Economic, Scientific and Quality of Life Policies at the request of the committee on the Internal Market and Consumer Protection (IMCO).}, url = {https://www.europarl.europa.eu/thinktank/en/document.html?reference=IPOL_IDA(2020)648791}, author = {Dino Pedreschi and Ioanna Miliou} } @article {1344, title = {Authenticated Outlier Mining for Outsourced Databases}, journal = {IEEE Transactions on Dependable and Secure Computing}, volume = {17}, year = {2020}, month = {Jan-03-2020}, pages = {222 - 235}, abstract = {The Data-Mining-as-a-Service (DMaS) paradigm is becoming the focus of research, as it allows the data owner (client) who lacks expertise and/or computational resources to outsource their data and mining needs to a third-party service provider (server). Outsourcing, however, raises some issues about result integrity: how could the client verify the mining results returned by the server are both sound and complete? In this paper, we focus on outlier mining, an important mining task. Previous verification techniques use an authenticated data structure (ADS) for correctness authentication, which may incur much space and communication cost. In this paper, we propose a novel solution that returns a probabilistic result integrity guarantee with much cheaper verification cost. The key idea is to insert a set of artificial records (ARs) into the dataset, from which it constructs a set of artificial outliers (AOs) and artificial non-outliers (ANOs). The AOs and ANOs are used by the client to detect any incomplete and/or incorrect mining results with a probabilistic guarantee. The main challenge that we address is how to construct ARs so that they do not change the (non-)outlierness of original records, while guaranteeing that the client can identify ANOs and AOs without executing mining. Furthermore, we build a strategic game and show that a Nash equilibrium exists only when the server returns correct outliers. Our implementation and experiments demonstrate that our verification solution is efficient and lightweight.}, issn = {1545-5971}, doi = {10.1109/TDSC.885810.1109/TDSC.2017.2754493}, url = {https://ieeexplore.ieee.org/xpl/RecentIssue.jsp?punumber=8858https://ieeexplore.ieee.org/document/8048342/http://xplorestaging.ieee.org/ielx7/8858/9034462/08048342.pdf?arnumber=8048342https://ieeexplore.ieee.org/ielam/8858/9034462/8048342-aam.pdf}, author = {Dong, Boxiang and Wang, Hui and Anna Monreale and Dino Pedreschi and Fosca Giannotti and Guo, Wenge} } @conference {1406, title = {Black Box Explanation by Learning Image Exemplars in the Latent Feature Space}, booktitle = {Machine Learning and Knowledge Discovery in Databases}, year = {2020}, month = {2020//}, publisher = {Springer International Publishing}, organization = {Springer International Publishing}, address = {Cham}, abstract = {We present an approach to explain the decisions of black box models for image classification. While using the black box to label images, our explanation method exploits the latent feature space learned through an adversarial autoencoder. The proposed method first generates exemplar images in the latent feature space and learns a decision tree classifier. Then, it selects and decodes exemplars respecting local decision rules. Finally, it visualizes them in a manner that shows to the user how the exemplars can be modified to either stay within their class, or to become counter-factuals by {\textquotedblleft}morphing{\textquotedblright} into another class. Since we focus on black box decision systems for image classification, the explanation obtained from the exemplars also provides a saliency map highlighting the areas of the image that contribute to its classification, and areas of the image that push it into another class. We present the results of an experimental evaluation on three datasets and two black box models. Besides providing the most useful and interpretable explanations, we show that the proposed method outperforms existing explainers in terms of fidelity, relevance, coherence, and stability.}, isbn = {978-3-030-46150-8}, doi = {https://doi.org/10.1007/978-3-030-46150-8_12}, url = {https://link.springer.com/chapter/10.1007/978-3-030-46150-8_12}, author = {Riccardo Guidotti and Anna Monreale and Matwin, Stan and Dino Pedreschi}, editor = {Brefeld, Ulf and Fromont, Elisa and Hotho, Andreas and Knobbe, Arno and Maathuis, Marloes and Robardet, C{\'e}line} } @article {1423, title = {Causal inference for social discrimination reasoning}, volume = {54}, year = {2020}, month = {2020/04/01}, pages = {425 - 437}, abstract = {The discovery of discriminatory bias in human or automated decision making is a task of increasing importance and difficulty, exacerbated by the pervasive use of machine learning and data mining. Currently, discrimination discovery largely relies upon correlation analysis of decisions records, disregarding the impact of confounding biases. We present a method for causal discrimination discovery based on propensity score analysis, a statistical tool for filtering out the effect of confounding variables. We introduce causal measures of discrimination which quantify the effect of group membership on the decisions, and highlight causal discrimination/favoritism patterns by learning regression trees over the novel measures. We validate our approach on two real world datasets. Our proposed framework for causal discrimination has the potential to enhance the transparency of machine learning with tools for detecting discriminatory bias both in the training data and in the learning algorithms.}, isbn = {1573-7675}, doi = {https://doi.org/10.1007/s10844-019-00580-x}, url = {https://link.springer.com/article/10.1007/s10844-019-00580-x}, author = {Qureshi, Bilal and Kamiran, Faisal and Karim, Asim and Salvatore Ruggieri and Dino Pedreschi} } @conference {1285, title = {Doctor XAI: an ontology-based approach to black-box sequential data classification explanations}, booktitle = {Proceedings of the 2020 Conference on Fairness, Accountability, and Transparency}, year = {2020}, abstract = {Several recent advancements in Machine Learning involve black-box models: algorithms that do not provide human-understandable explanations in support of their decisions. This limitation hampers the fairness, accountability and transparency of these models; the field of eXplainable Artificial Intelligence (XAI) tries to solve this problem providing human-understandable explanations for black-box models. However, healthcare datasets (and the related learning tasks) often present peculiar features, such as sequential data, multi-label predictions, and links to structured background knowledge. In this paper, we introduce Doctor XAI, a model-agnostic explainability technique able to deal with multi-labeled, sequential, ontology-linked data. We focus on explaining Doctor AI, a multilabel classifier which takes as input the clinical history of a patient in order to predict the next visit. Furthermore, we show how exploiting the temporal dimension in the data and the domain knowledge encoded in the medical ontology improves the quality of the mined explanations.}, doi = {10.1145/3351095.3372855}, url = {https://dl.acm.org/doi/pdf/10.1145/3351095.3372855?download=true}, author = {Cecilia Panigutti and Perotti, Alan and Dino Pedreschi} } @article {1403, title = {Error Estimation of Ultra-Short Heart Rate Variability Parameters: Effect of Missing Data Caused by Motion Artifacts}, journal = {Sensors}, volume = {20}, number = {24}, year = {2020}, pages = {7122}, abstract = {Application of ultra\–short Heart Rate Variability (HRV) is desirable in order to increase the applicability of HRV features to wrist-worn wearable devices equipped with heart rate sensors that are nowadays becoming more and more popular in people\’s daily life. This study is focused in particular on the the two most used HRV parameters, i.e., the standard deviation of inter-beat intervals (SDNN) and the root Mean Squared error of successive inter-beat intervals differences (rMSSD). The huge problem of extracting these HRV parameters from wrist-worn devices is that their data are affected by the motion artifacts. For this reason, estimating the error caused by this huge quantity of missing values is fundamental to obtain reliable HRV parameters from these devices. To this aim, we simulate missing values induced by motion artifacts (from 0 to 70\%) in an ultra-short time window (i.e., from 4 min to 30 s) by the random walk Gilbert burst model in 22 young healthy subjects. In addition, 30 s and 2 min ultra-short time windows are required to estimate rMSSD and SDNN, respectively. Moreover, due to the fact that ultra-short time window does not permit assessing very low frequencies, and the SDNN is highly affected by these frequencies, the bias for estimating SDNN continues to increase as the time window length decreases. On the contrary, a small error is detected in rMSSD up to 30 s due to the fact that it is highly affected by high frequencies which are possible to be evaluated even if the time window length decreases. Finally, the missing values have a small effect on rMSSD and SDNN estimation. As a matter of fact, the HRV parameter errors increase slightly as the percentage of missing values increase.}, issn = {1424-8220}, doi = {10.3390/s20247122}, url = {https://www.mdpi.com/1424-8220/20/24/7122}, author = {Alessio Rossi and Dino Pedreschi and Clifton, David A. and Morelli, Davide} } @article {1405, title = {An ethico-legal framework for social data science}, year = {2020}, month = {2020/03/31}, abstract = {This paper presents a framework for research infrastructures enabling ethically sensitive and legally compliant data science in Europe. Our goal is to describe how to design and implement an open platform for big data social science, including, in particular, personal data. To this end, we discuss a number of infrastructural, organizational and methodological principles to be developed for a concrete implementation. These include not only systematically tools and methodologies that effectively enable both the empirical evaluation of the privacy risk and data transformations by using privacy-preserving approaches, but also the development of training materials (a massive open online course) and organizational instruments based on legal and ethical principles. This paper provides, by way of example, the implementation that was adopted within the context of the SoBigData Research Infrastructure.}, isbn = {2364-4168}, doi = {https://doi.org/10.1007/s41060-020-00211-7}, url = {https://link.springer.com/article/10.1007/s41060-020-00211-7}, author = {Forg{\'o}, Nikolaus and H{\"a}nold, Stefanie and van~den Hoven, Jeroen and Kr{\"u}gel, Tina and Lishchuk, Iryna and Mahieu, Ren{\'e} and Anna Monreale and Dino Pedreschi and Francesca Pratesi and van Putten, David} } @article {1404, title = {Human migration: the big data perspective}, journal = {International Journal of Data Science and Analytics}, year = {2020}, month = {2020/03/23}, pages = {1{\textendash}20}, abstract = {How can big data help to understand the migration phenomenon? In this paper, we try to answer this question through an analysis of various phases of migration, comparing traditional and novel data sources and models at each phase. We concentrate on three phases of migration, at each phase describing the state of the art and recent developments and ideas. The first phase includes the journey, and we study migration flows and stocks, providing examples where big data can have an impact. The second phase discusses the stay, i.e. migrant integration in the destination country. We explore various data sets and models that can be used to quantify and understand migrant integration, with the final aim of providing the basis for the construction of a novel multi-level integration index. The last phase is related to the effects of migration on the source countries and the return of migrants.}, isbn = {2364-4168}, doi = {https://doi.org/10.1007/s41060-020-00213-5}, url = {https://link.springer.com/article/10.1007\%2Fs41060-020-00213-5}, author = {Alina Sirbu and Andrienko, Gennady and Andrienko, Natalia and Boldrini, Chiara and Conti, Marco and Fosca Giannotti and Riccardo Guidotti and Bertoli, Simone and Jisu Kim and Muntean, Cristina Ioana and Luca Pappalardo and Passarella, Andrea and Dino Pedreschi and Pollacci, Laura and Francesca Pratesi and Sharma, Rajesh} } @booklet {1425, title = {Mobile phone data analytics against the COVID-19 epidemics in Italy: flow diversity and local job markets during the national lockdown}, year = {2020}, abstract = {Understanding collective mobility patterns is crucial to plan the restart of production and economic activities, which are currently put in stand-by to fight the diffusion of the epidemics. In this report, we use mobile phone data to infer the movements of people between Italian provinces and municipalities, and we analyze the incoming, outcoming and internal mobility flows before and during the national lockdown (March 9th, 2020) and after the closure of non-necessary productive and economic activities (March 23th, 2020). The population flow across provinces and municipalities enable for the modelling of a risk index tailored for the mobility of each municipality or province. Such an index would be a useful indicator to drive counter-measures in reaction to a sudden reactivation of the epidemics. Mobile phone data, even when aggregated to preserve the privacy of individuals, are a useful data source to track the evolution in time of human mobility, hence allowing for monitoring the effectiveness of control measures such as physical distancing. We address the following analytical questions: How does the mobility structure of a territory change? Do incoming and outcoming flows become more predictable during the lockdown, and what are the differences between weekdays and weekends? Can we detect proper local job markets based on human mobility flows, to eventually shape the borders of a local outbreak?}, doi = {https://dx.doi.org/10.32079/ISTI-TR-2020/005}, url = {https://arxiv.org/abs/2004.11278}, author = {Pietro Bonato and Paolo Cintia and Francesco Fabbri and Daniele Fadda and Fosca Giannotti and Pier Luigi Lopalco and Sara Mazzilli and Mirco Nanni and Luca Pappalardo and Dino Pedreschi and Francesco Penone and S Rinzivillo and Giulio Rossetti and Marcello Savarese and Lara Tavoschi} } @article {1339, title = {The relationship between human mobility and viral transmissibility during the COVID-19 epidemics in Italy}, journal = {arXiv preprint arXiv:2006.03141}, year = {2020}, abstract = {We describe in this report our studies to understand the relationship between human mobility and the spreading of COVID-19, as an aid to manage the restart of the social and economic activities after the lockdown and monitor the epidemics in the coming weeks and months. We compare the evolution (from January to May 2020) of the daily mobility flows in Italy, measured by means of nation-wide mobile phone data, and the evolution of transmissibility, measured by the net reproduction number, i.e., the mean number of secondary infections generated by one primary infector in the presence of control interventions and human behavioural adaptations. We find a striking relationship between the negative variation of mobility flows and the net reproduction number, in all Italian regions, between March 11th and March 18th, when the country entered the lockdown. This observation allows us to quantify the time needed to "switch off" the country mobility (one week) and the time required to bring the net reproduction number below 1 (one week). A reasonably simple regression model provides evidence that the net reproduction number is correlated with a region{\textquoteright}s incoming, outgoing and internal mobility. We also find a strong relationship between the number of days above the epidemic threshold before the mobility flows reduce significantly as an effect of lockdowns, and the total number of confirmed SARS-CoV-2 infections per 100k inhabitants, thus indirectly showing the effectiveness of the lockdown and the other non-pharmaceutical interventions in the containment of the contagion. Our study demonstrates the value of "big" mobility data to the monitoring of key epidemic indicators to inform choices as the epidemics unfolds in the coming months.}, url = {https://arxiv.org/abs/2006.03141}, author = {Paolo Cintia and Daniele Fadda and Fosca Giannotti and Luca Pappalardo and Giulio Rossetti and Dino Pedreschi and S Rinzivillo and Bonato, Pietro and Fabbri, Francesco and Penone, Francesco and Savarese, Marcello and Checchi, Daniele and Chiaromonte, Francesca and Vineis , Paolo and Guzzetta, Giorgio and Riccardo, Flavia and Marziano, Valentina and Poletti, Piero and Trentini, Filippo and Bella, Antonio and Andrianou, Xanthi and Del Manso, Martina and Fabiani, Massimo and Bellino, Stefania and Boros, Stefano and Mateo Urdiales, Alberto and Vescio, Maria Fenicia and Brusaferro, Silvio and Rezza, Giovanni and Pezzotti, Patrizio and Ajelli, Marco and Merler, Stefano} } @article {1260, title = {The AI black box Explanation Problem}, journal = {ERCIM NEWS}, number = {116}, year = {2019}, pages = {12{\textendash}13}, author = {Riccardo Guidotti and Anna Monreale and Dino Pedreschi} } @article {1216, title = {Algorithmic bias amplifies opinion fragmentation and polarization: A bounded confidence model}, journal = {PloS one}, volume = {14}, number = {3}, year = {2019}, pages = {e0213246}, abstract = {The flow of information reaching us via the online media platforms is optimized not by the information content or relevance but by popularity and proximity to the target. This is typically performed in order to maximise platform usage. As a side effect, this introduces an algorithmic bias that is believed to enhance fragmentation and polarization of the societal debate. To study this phenomenon, we modify the well-known continuous opinion dynamics model of bounded confidence in order to account for the algorithmic bias and investigate its consequences. In the simplest version of the original model the pairs of discussion participants are chosen at random and their opinions get closer to each other if they are within a fixed tolerance level. We modify the selection rule of the discussion partners: there is an enhanced probability to choose individuals whose opinions are already close to each other, thus mimicking the behavior of online media which suggest interaction with similar peers. As a result we observe: a) an increased tendency towards opinion fragmentation, which emerges also in conditions where the original model would predict consensus, b) increased polarisation of opinions and c) a dramatic slowing down of the speed at which the convergence at the asymptotic state is reached, which makes the system highly unstable. Fragmentation and polarization are augmented by a fragmented initial population.}, doi = {10.1371/journal.pone.0213246}, url = {https://journals.plos.org/plosone/article?id=10.1371/journal.pone.0213246}, author = {Alina Sirbu and Dino Pedreschi and Fosca Giannotti and Kert{\'e}sz, J{\'a}nos} } @article {1296, title = {Causal inference for social discrimination reasoning}, journal = {Journal of Intelligent Information Systems}, year = {2019}, pages = {1{\textendash}13}, abstract = {The discovery of discriminatory bias in human or automated decision making is a task of increasing importance and difficulty, exacerbated by the pervasive use of machine learning and data mining. Currently, discrimination discovery largely relies upon correlation analysis of decisions records, disregarding the impact of confounding biases. We present a method for causal discrimination discovery based on propensity score analysis, a statistical tool for filtering out the effect of confounding variables. We introduce causal measures of discrimination which quantify the effect of group membership on the decisions, and highlight causal discrimination/favoritism patterns by learning regression trees over the novel measures. We validate our approach on two real world datasets. Our proposed framework for causal discrimination has the potential to enhance the transparency of machine learning with tools for detecting discriminatory bias both in the training data and in the learning algorithms.}, doi = {10.1007/s10844-019-00580-x}, url = {https://link.springer.com/article/10.1007/s10844-019-00580-x}, author = {Qureshi, Bilal and Kamiran, Faisal and Karim, Asim and Salvatore Ruggieri and Dino Pedreschi} } @conference {1262, title = {Explaining multi-label black-box classifiers for health applications}, booktitle = {International Workshop on Health Intelligence}, year = {2019}, publisher = {Springer}, organization = {Springer}, abstract = {Today the state-of-the-art performance in classification is achieved by the so-called {\textquotedblleft}black boxes{\textquotedblright}, i.e. decision-making systems whose internal logic is obscure. Such models could revolutionize the health-care system, however their deployment in real-world diagnosis decision support systems is subject to several risks and limitations due to the lack of transparency. The typical classification problem in health-care requires a multi-label approach since the possible labels are not mutually exclusive, e.g. diagnoses. We propose MARLENA, a model-agnostic method which explains multi-label black box decisions. MARLENA explains an individual decision in three steps. First, it generates a synthetic neighborhood around the instance to be explained using a strategy suitable for multi-label decisions. It then learns a decision tree on such neighborhood and finally derives from it a decision rule that explains the black box decision. Our experiments show that MARLENA performs well in terms of mimicking the black box behavior while gaining at the same time a notable amount of interpretability through compact decision rules, i.e. rules with limited length.}, doi = {10.1007/978-3-030-24409-5_9}, url = {https://link.springer.com/chapter/10.1007/978-3-030-24409-5_9}, author = {Cecilia Panigutti and Riccardo Guidotti and Anna Monreale and Dino Pedreschi} } @article {1283, title = {Factual and Counterfactual Explanations for Black Box Decision Making}, journal = {IEEE Intelligent Systems}, year = {2019}, abstract = {The rise of sophisticated machine learning models has brought accurate but obscure decision systems, which hide their logic, thus undermining transparency, trust, and the adoption of artificial intelligence (AI) in socially sensitive and safety-critical contexts. We introduce a local rule-based explanation method, providing faithful explanations of the decision made by a black box classifier on a specific instance. The proposed method first learns an interpretable, local classifier on a synthetic neighborhood of the instance under investigation, generated by a genetic algorithm. Then, it derives from the interpretable classifier an explanation consisting of a decision rule, explaining the factual reasons of the decision, and a set of counterfactuals, suggesting the changes in the instance features that would lead to a different outcome. Experimental results show that the proposed method outperforms existing approaches in terms of the quality of the explanations and of the accuracy in mimicking the black box.}, doi = {10.1109/MIS.2019.2957223}, url = {https://ieeexplore.ieee.org/abstract/document/8920138}, author = {Riccardo Guidotti and Anna Monreale and Fosca Giannotti and Dino Pedreschi and Salvatore Ruggieri and Franco Turini} } @conference {1215, title = {Meaningful explanations of Black Box AI decision systems}, booktitle = {Proceedings of the AAAI Conference on Artificial Intelligence}, year = {2019}, abstract = {Black box AI systems for automated decision making, often based on machine learning over (big) data, map a user{\textquoteright}s features into a class or a score without exposing the reasons why. This is problematic not only for lack of transparency, but also for possible biases inherited by the algorithms from human prejudices and collection artifacts hidden in the training data, which may lead to unfair or wrong decisions. We focus on the urgent open challenge of how to construct meaningful explanations of opaque AI/ML systems, introducing the local-toglobal framework for black box explanation, articulated along three lines: (i) the language for expressing explanations in terms of logic rules, with statistical and causal interpretation; (ii) the inference of local explanations for revealing the decision rationale for a specific case, by auditing the black box in the vicinity of the target instance; (iii), the bottom-up generalization of many local explanations into simple global ones, with algorithms that optimize for quality and comprehensibility. We argue that the local-first approach opens the door to a wide variety of alternative solutions along different dimensions: a variety of data sources (relational, text, images, etc.), a variety of learning problems (multi-label classification, regression, scoring, ranking), a variety of languages for expressing meaningful explanations, a variety of means to audit a black box.}, doi = {10.1609/aaai.v33i01.33019780}, url = {https://aaai.org/ojs/index.php/AAAI/article/view/5050}, author = {Dino Pedreschi and Fosca Giannotti and Riccardo Guidotti and Anna Monreale and Salvatore Ruggieri and Franco Turini} } @article {1294, title = {PlayeRank: data-driven performance evaluation and player ranking in soccer via a machine learning approach}, journal = {ACM Transactions on Intelligent Systems and Technology (TIST)}, volume = {10}, number = {5}, year = {2019}, pages = {1{\textendash}27}, abstract = {The problem of evaluating the performance of soccer players is attracting the interest of many companies and the scientific community, thanks to the availability of massive data capturing all the events generated during a match (e.g., tackles, passes, shots, etc.). Unfortunately, there is no consolidated and widely accepted metric for measuring performance quality in all of its facets. In this article, we design and implement PlayeRank, a data-driven framework that offers a principled multi-dimensional and role-aware evaluation of the performance of soccer players. We build our framework by deploying a massive dataset of soccer-logs and consisting of millions of match events pertaining to four seasons of 18 prominent soccer competitions. By comparing PlayeRank to known algorithms for performance evaluation in soccer, and by exploiting a dataset of players{\textquoteright} evaluations made by professional soccer scouts, we show that PlayeRank significantly outperforms the competitors. We also explore the ratings produced by PlayeRank and discover interesting patterns about the nature of excellent performances and what distinguishes the top players from the others. At the end, we explore some applications of PlayeRank{\textemdash}i.e. searching players and player versatility{\textemdash}showing its flexibility and efficiency, which makes it worth to be used in the design of a scalable platform for soccer analytics.}, doi = {10.1145/3343172}, url = {https://dl.acm.org/doi/abs/10.1145/3343172}, author = {Luca Pappalardo and Paolo Cintia and Ferragina, Paolo and Massucco, Emanuele and Dino Pedreschi and Fosca Giannotti} } @article {1266, title = {A public data set of spatio-temporal match events in soccer competitions}, journal = {Scientific data}, volume = {6}, number = {1}, year = {2019}, pages = {1{\textendash}15}, abstract = {Soccer analytics is attracting increasing interest in academia and industry, thanks to the availability of sensing technologies that provide high-fidelity data streams for every match. Unfortunately, these detailed data are owned by specialized companies and hence are rarely publicly available for scientific research. To fill this gap, this paper describes the largest open collection of soccer-logs ever released, containing all the spatio-temporal events (passes, shots, fouls, etc.) that occured during each match for an entire season of seven prominent soccer competitions. Each match event contains information about its position, time, outcome, player and characteristics. The nature of team sports like soccer, halfway between the abstraction of a game and the reality of complex social systems, combined with the unique size and composition of this dataset, provide an ideal ground for tackling a wide range of data science problems, including the measurement and evaluation of performance, both at individual and at collective level, and the determinants of success and failure.}, doi = {10.1038/s41597-019-0247-7}, url = {https://www.nature.com/articles/s41597-019-0247-7}, author = {Luca Pappalardo and Paolo Cintia and Alessio Rossi and Massucco, Emanuele and Ferragina, Paolo and Dino Pedreschi and Fosca Giannotti} } @article {1217, title = {Public opinion and Algorithmic bias}, journal = {ERCIM News}, number = {116}, year = {2019}, url = {https://ercim-news.ercim.eu/en116/special/public-opinion-and-algorithmic-bias}, author = {Alina Sirbu and Fosca Giannotti and Dino Pedreschi and Kert{\'e}sz, J{\'a}nos} } @article {1272, title = {A Visual Analytics Platform to Measure Performance on University Entrance Tests (Discussion Paper)}, year = {2019}, author = {Boncoraglio, Daniele and Deri, Francesca and Distefano, Francesco and Daniele Fadda and Filippi, Giorgio and Forte, Giuseppe and Licari, Federica and Michela Natilli and Dino Pedreschi and S Rinzivillo} } @article {1179, title = {Active and passive diffusion processes in complex networks}, journal = {Applied network science}, volume = {3}, number = {1}, year = {2018}, pages = {42}, abstract = {Ideas, information, viruses: all of them, with their mechanisms, spread over the complex social information, viruses: all tissues described by our interpersonal relations. Usually, to simulate and understand the unfolding of such complex phenomena are used general mathematical models; these models act agnostically from the object of which they simulate the diffusion, thus considering spreading of virus, ideas and innovations alike. Indeed, such degree of abstraction makes it easier to define a standard set of tools that can be applied to heterogeneous contexts; however, it can also lead to biased, incorrect, simulation outcomes. In this work we introduce the concepts of active and passive diffusion to discriminate the degree in which individuals choice affect the overall spreading of content over a social graph. Moving from the analysis of a well-known passive diffusion schema, the Threshold model (that can be used to model peer-pressure related processes), we introduce two novel approaches whose aim is to provide active and mixed schemas applicable in the context of innovations/ideas diffusion simulation. Our analysis, performed both in synthetic and real-world data, underline that the adoption of exclusively passive/active models leads to conflicting results, thus highlighting the need of mixed approaches to capture the real complexity of the simulated system better.}, doi = {https://doi.org/10.1007/s41109-018-0100-5}, url = {https://link.springer.com/article/10.1007/s41109-018-0100-5}, author = {Letizia Milli and Giulio Rossetti and Dino Pedreschi and Fosca Giannotti} } @conference {1021, title = {Diffusive Phenomena in Dynamic Networks: a data-driven study}, booktitle = {International Conference on Complex Networks CompleNet}, year = {2018}, publisher = {Springer}, organization = {Springer}, address = {Boston March 5-8 2018}, abstract = {Everyday, ideas, information as well as viruses spread over complex social tissues described by our interpersonal relations. So far, the network contexts upon which diffusive phenomena unfold have usually considered static, composed by a fixed set of nodes and edges. Recent studies describe social networks as rapidly changing topologies. In this work {\textendash} following a data-driven approach {\textendash} we compare the behaviors of classical spreading models when used to analyze a given social network whose topological dynamics are observed at different temporal-granularities. Our goal is to shed some light on the impacts that the adoption of a static topology has on spreading simulations as well as to provide an alternative formulation of two classical diffusion models.}, doi = {10.1007/978-3-319-73198-8_13}, url = {https://link.springer.com/chapter/10.1007/978-3-319-73198-8_13}, author = {Letizia Milli and Giulio Rossetti and Dino Pedreschi and Fosca Giannotti} } @conference {1046, title = {Discovering Mobility Functional Areas: A Mobility Data Analysis Approach}, booktitle = {International Workshop on Complex Networks}, year = {2018}, publisher = {Springer}, organization = {Springer}, abstract = {How do we measure the borders of urban areas and therefore decide which are the functional units of the territory? Nowadays, we typically do that just looking at census data, while in this work we aim to identify functional areas for mobility in a completely data-driven way. Our solution makes use of human mobility data (vehicle trajectories) and consists in an agglomerative process which gradually groups together those municipalities that maximize internal vehicular traffic while minimizing external one. The approach is tested against a dataset of trips involving individuals of an Italian Region, obtaining a new territorial division which allows us to identify mobility attractors. Leveraging such partitioning and external knowledge, we show that our method outperforms the state-of-the-art algorithms. Indeed, the outcome of our approach is of great value to public administrations for creating synergies within the aggregations of the territories obtained.}, doi = {10.1007/978-3-319-73198-8_27}, url = {https://link.springer.com/chapter/10.1007/978-3-319-73198-8_27}, author = {Lorenzo Gabrielli and Daniele Fadda and Giulio Rossetti and Mirco Nanni and Piccinini, Leonardo and Dino Pedreschi and Fosca Giannotti and Patrizia Lattarulo} } @article {1130, title = {Discovering temporal regularities in retail customers{\textquoteright} shopping behavior}, journal = {EPJ Data Science}, volume = {7}, number = {1}, year = {2018}, month = {01/2018}, pages = {6}, abstract = {In this paper we investigate the regularities characterizing the temporal purchasing behavior of the customers of a retail market chain. Most of the literature studying purchasing behavior focuses on what customers buy while giving few importance to the temporal dimension. As a consequence, the state of the art does not allow capturing which are the temporal purchasing patterns of each customers. These patterns should describe the customer{\textquoteright}s temporal habits highlighting when she typically makes a purchase in correlation with information about the amount of expenditure, number of purchased items and other similar aggregates. This knowledge could be exploited for different scopes: set temporal discounts for making the purchases of customers more regular with respect the time, set personalized discounts in the day and time window preferred by the customer, provide recommendations for shopping time schedule, etc. To this aim, we introduce a framework for extracting from personal retail data a temporal purchasing profile able to summarize whether and when a customer makes her distinctive purchases. The individual profile describes a set of regular and characterizing shopping behavioral patterns, and the sequences in which these patterns take place. We show how to compare different customers by providing a collective perspective to their individual profiles, and how to group the customers with respect to these comparable profiles. By analyzing real datasets containing millions of shopping sessions we found that there is a limited number of patterns summarizing the temporal purchasing behavior of all the customers, and that they are sequentially followed in a finite number of ways. Moreover, we recognized regular customers characterized by a small number of temporal purchasing behaviors, and changing customers characterized by various types of temporal purchasing behaviors. Finally, we discuss on how the profiles can be exploited both by customers to enable personalized services, and by the retail market chain for providing tailored discounts based on temporal purchasing regularity.}, doi = {10.1140/epjds/s13688-018-0133-0}, url = {https://epjdatascience.springeropen.com/articles/10.1140/epjds/s13688-018-0133-0}, author = {Riccardo Guidotti and Lorenzo Gabrielli and Anna Monreale and Dino Pedreschi and Fosca Giannotti} } @conference {1039, title = {The Fractal Dimension of Music: Geography, Popularity and Sentiment Analysis}, booktitle = {International Conference on Smart Objects and Technologies for Social Good}, year = {2018}, publisher = {Springer}, organization = {Springer}, abstract = {Nowadays there is a growing standardization of musical contents. Our finding comes out from a cross-service multi-level dataset analysis where we study how geography affects the music production. The investigation presented in this paper highlights the existence of a {\textquotedblleft}fractal{\textquotedblright} musical structure that relates the technical characteristics of the music produced at regional, national and world level. Moreover, a similar structure emerges also when we analyze the musicians{\textquoteright} popularity and the polarity of their songs defined as the mood that they are able to convey. Furthermore, the clusters identified are markedly distinct one from another with respect to popularity and sentiment.}, doi = {10.1007/978-3-319-76111-4_19}, url = {https://link.springer.com/chapter/10.1007/978-3-319-76111-4_19}, author = {Pollacci, Laura and Riccardo Guidotti and Giulio Rossetti and Fosca Giannotti and Dino Pedreschi} } @article {1194, title = {Gastroesophageal reflux symptoms among Italian university students: epidemiology and dietary correlates using automatically recorded transactions}, journal = {BMC gastroenterology}, volume = {18}, number = {1}, year = {2018}, pages = {116}, abstract = {Background: Gastroesophageal reflux disease (GERD) is one of the most common gastrointestinal disorders worldwide, with relevant impact on the quality of life and health care costs.The aim of our study is to assess the prevalence of GERD based on self-reported symptoms among university students in central Italy. The secondary aim is to evaluate lifestyle correlates, particularly eating habits, in GERD students using automatically recorded transactions through cashiers at university canteen. Methods: A web-survey was created and launched through an app, ad-hoc developed for an interactive exchange of information with students, including anthropometric data and lifestyle habits. Moreover, the web-survey allowed users a self-diagnosis of GERD through a simple questionnaire. As regard eating habits, detailed collection of meals consumed, including number and type of dishes, were automatically recorded through cashiers at the university canteen equipped with an automatic registration system. Results: We collected 3012 questionnaires. A total of 792 students (26.2\% of the respondents) reported typical GERD symptoms occurring at least weekly. Female sex was more prevalent than male sex. In the set of students with GERD, the percentage of smokers was higher, and our results showed that when BMI tends to higher values the percentage of students with GERD tends to increase. When evaluating correlates with diet, we found, among all users, a lower frequency of legumes choice in GERD students and, among frequent users, a lower frequency of choice of pasta and rice in GERD students. Discussion: The results of our study are in line with the values reported in the literature. Nowadays, GERD is a common problem in our communities, and can potentially lead to serious medical complications; the economic burden involved in the diagnostic and therapeutic management of the disease has a relevant impact on healthcare costs. Conclusions: To our knowledge, this is the first study evaluating the prevalence of typical GERD{\textendash}related symptoms in a young population of University students in Italy. Considering the young age of enrolled subjects, our prevalence rate, relatively high compared to the usual estimates, could represent a further negative factor for the future economic sustainability of the healthcare system. Keywords: Gastroesophageal reflux disease, GERD, Heartburn, Regurgitation, Diet, Prevalence, University students}, doi = {10.1186/s12876-018-0832-9}, url = {https://bmcgastroenterol.biomedcentral.com/articles/10.1186/s12876-018-0832-9}, author = {Martinucci, Irene and Michela Natilli and Lorenzoni, Valentina and Luca Pappalardo and Anna Monreale and Turchetti, Giuseppe and Dino Pedreschi and Marchi, Santino and Barale, Roberto and de Bortoli, Nicola} } @conference {1292, title = {Helping your docker images to spread based on explainable models}, booktitle = {Joint European Conference on Machine Learning and Knowledge Discovery in Databases}, year = {2018}, publisher = {Springer}, organization = {Springer}, abstract = {Docker is on the rise in today{\textquoteright}s enterprise IT. It permits shipping applications inside portable containers, which run from so-called Docker images. Docker images are distributed in public registries, which also monitor their popularity. The popularity of an image impacts on its actual usage, and hence on the potential revenues for its developers. In this paper, we present a solution based on interpretable decision tree and regression trees for estimating the popularity of a given Docker image, and for understanding how to improve an image to increase its popularity. The results presented in this work can provide valuable insights to Docker developers, helping them in spreading their images. Code related to this paper is available at: https://github.com/di-unipi-socc/DockerImageMiner.}, doi = {10.1007/978-3-030-10997-4_13}, url = {https://link.springer.com/chapter/10.1007/978-3-030-10997-4_13}, author = {Riccardo Guidotti and Soldani, Jacopo and Neri, Davide and Brogi, Antonio and Dino Pedreschi} } @inbook {1422, title = {How Data Mining and Machine Learning Evolved from Relational Data Base to Data Science}, booktitle = {A Comprehensive Guide Through the Italian Database Research Over the Last 25 Years}, year = {2018}, pages = {287 - 306}, publisher = {Springer International Publishing}, organization = {Springer International Publishing}, address = {Cham}, abstract = {During the last 35 years, data management principles such as physical and logical independence, declarative querying and cost-based optimization have led to profound pervasiveness of relational databases in any kind of organization. More importantly, these technical advances have enabled the first round of business intelligence applications and laid the foundation for managing and analyzing Big Data today.}, isbn = {978-3-319-61893-7}, doi = {https://doi.org/10.1007/978-3-319-61893-7_17}, url = {https://link.springer.com/chapter/10.1007\%2F978-3-319-61893-7_17}, author = {Amato, G. and Candela, L. and Castelli, D. and Esuli, A. and Falchi, F. and Gennaro, C. and Fosca Giannotti and Anna Monreale and Mirco Nanni and Pagano, P. and Luca Pappalardo and Dino Pedreschi and Francesca Pratesi and Rabitti, F. and S Rinzivillo and Giulio Rossetti and Salvatore Ruggieri and Sebastiani, F. and Tesconi, M.}, editor = {Flesca, Sergio and Greco, Sergio and Masciari, Elio and Sacc{\`a}, Domenico} } @article {1133, title = {The italian music superdiversity}, journal = {Multimedia Tools and Applications}, year = {2018}, pages = {1{\textendash}23}, abstract = {Globalization can lead to a growing standardization of musical contents. Using a cross-service multi-level dataset we investigate the actual Italian music scene. The investigation highlights the musical Italian superdiversity both individually analyzing the geographical and lexical dimensions and combining them. Using different kinds of features over the geographical dimension leads to two similar, comparable and coherent results, confirming the strong and essential correlation between melodies and lyrics. The profiles identified are markedly distinct one from another with respect to sentiment, lexicon, and melodic features. Through a novel application of a sentiment spreading algorithm and songs{\textquoteright} melodic features, we are able to highlight discriminant characteristics that violate the standard regional political boundaries, reconfiguring them following the actual musical communicative practices.}, doi = {10.1007/s11042-018-6511-6}, url = {https://link.springer.com/article/10.1007/s11042-018-6511-6}, author = {Pollacci, Laura and Riccardo Guidotti and Giulio Rossetti and Fosca Giannotti and Dino Pedreschi} } @article {1131, title = {Local Rule-Based Explanations of Black Box Decision Systems}, year = {2018}, author = {Riccardo Guidotti and Anna Monreale and Salvatore Ruggieri and Dino Pedreschi and Franco Turini and Fosca Giannotti} } @article {1047, title = {NDlib: a python library to model and analyze diffusion processes over complex networks}, journal = {International Journal of Data Science and Analytics}, volume = {5}, number = {1}, year = {2018}, pages = {61{\textendash}79}, abstract = {Nowadays the analysis of dynamics of and on networks represents a hot topic in the social network analysis playground. To support students, teachers, developers and researchers, in this work we introduce a novel framework, namely NDlib, an environment designed to describe diffusion simulations. NDlib is designed to be a multi-level ecosystem that can be fruitfully used by different user segments. For this reason, upon NDlib, we designed a simulation server that allows remote execution of experiments as well as an online visualization tool that abstracts its programmatic interface and makes available the simulation platform to non-technicians.}, doi = {10.1007/s41060-017-0086-6}, url = {https://link.springer.com/article/10.1007/s41060-017-0086-6}, author = {Giulio Rossetti and Letizia Milli and S Rinzivillo and Alina Sirbu and Dino Pedreschi and Fosca Giannotti} } @article {1132, title = {Open the Black Box Data-Driven Explanation of Black Box Decision Systems}, year = {2018}, author = {Dino Pedreschi and Fosca Giannotti and Riccardo Guidotti and Anna Monreale and Luca Pappalardo and Salvatore Ruggieri and Franco Turini} } @article {1134, title = {Personalized Market Basket Prediction with Temporal Annotated Recurring Sequences}, journal = {IEEE Transactions on Knowledge and Data Engineering}, year = {2018}, abstract = {Nowadays, a hot challenge for supermarket chains is to offer personalized services to their customers. Market basket prediction, i.e., supplying the customer a shopping list for the next purchase according to her current needs, is one of these services. Current approaches are not capable of capturing at the same time the different factors influencing the customer{\textquoteright}s decision process: co-occurrence, sequentuality, periodicity and recurrency of the purchased items. To this aim, we define a pattern Temporal Annotated Recurring Sequence (TARS) able to capture simultaneously and adaptively all these factors. We define the method to extract TARS and develop a predictor for next basket named TBP (TARS Based Predictor) that, on top of TARS, is able to understand the level of the customer{\textquoteright}s stocks and recommend the set of most necessary items. By adopting the TBP the supermarket chains could crop tailored suggestions for each individual customer which in turn could effectively speed up their shopping sessions. A deep experimentation shows that TARS are able to explain the customer purchase behavior, and that TBP outperforms the state-of-the-art competitors.}, doi = {10.1109/TKDE.2018.2872587}, url = {https://ieeexplore.ieee.org/abstract/document/8477157}, author = {Riccardo Guidotti and Giulio Rossetti and Luca Pappalardo and Fosca Giannotti and Dino Pedreschi} } @article {1138, title = {PRUDEnce: a system for assessing privacy risk vs utility in data sharing ecosystems}, journal = {Transactions on Data Privacy}, volume = {11}, number = {2}, year = {2018}, month = {08/2018}, abstract = {Data describing human activities are an important source of knowledge useful for understanding individual and collective behavior and for developing a wide range of user services. Unfortunately, this kind of data is sensitive, because people{\textquoteright}s whereabouts may allow re-identification of individuals in a de-identified database. Therefore, Data Providers, before sharing those data, must apply any sort of anonymization to lower the privacy risks, but they must be aware and capable of controlling also the data quality, since these two factors are often a trade-off. In this paper we propose PRUDEnce (Privacy Risk versus Utility in Data sharing Ecosystems), a system enabling a privacy-aware ecosystem for sharing personal data. It is based on a methodology for assessing both the empirical (not theoretical) privacy risk associated to users represented in the data, and the data quality guaranteed only with users not at risk. Our proposal is able to support the Data Provider in the exploration of a repertoire of possible data transformations with the aim of selecting one specific transformation that yields an adequate trade-off between data quality and privacy risk. We study the practical effectiveness of our proposal over three data formats underlying many services, defined on real mobility data, i.e., presence data, trajectory data and road segment data.}, url = {http://www.tdp.cat/issues16/tdp.a284a17.pdf}, author = {Francesca Pratesi and Anna Monreale and Roberto Trasarti and Fosca Giannotti and Dino Pedreschi and Yanagihara, Tadashi} } @article {1261, title = {A survey of methods for explaining black box models}, journal = {ACM computing surveys (CSUR)}, volume = {51}, number = {5}, year = {2018}, pages = {93}, abstract = {In recent years, many accurate decision support systems have been constructed as black boxes, that is as systems that hide their internal logic to the user. This lack of explanation constitutes both a practical and an ethical issue. The literature reports many approaches aimed at overcoming this crucial weakness, sometimes at the cost of sacrificing accuracy for interpretability. The applications in which black box decision systems can be used are various, and each approach is typically developed to provide a solution for a specific problem and, as a consequence, it explicitly or implicitly delineates its own definition of interpretability and explanation. The aim of this article is to provide a classification of the main problems addressed in the literature with respect to the notion of explanation and the type of black box system. Given a problem definition, a black box type, and a desired explanation, this survey should help the researcher to find the proposals more useful for his own work. The proposed classification of approaches to open black box models should also be useful for putting the many research open questions in perspective.}, doi = {10.1145/3236009}, url = {https://dl.acm.org/doi/abs/10.1145/3236009}, author = {Riccardo Guidotti and Anna Monreale and Salvatore Ruggieri and Franco Turini and Fosca Giannotti and Dino Pedreschi} } @article {1051, title = {Authenticated Outlier Mining for Outsourced Databases}, journal = {IEEE Transactions on Dependable and Secure Computing}, year = {2017}, abstract = {The Data-Mining-as-a-Service (DMaS) paradigm is becoming the focus of research, as it allows the data owner (client) who lacks expertise and/or computational resources to outsource their data and mining needs to a third-party service provider (server). Outsourcing, however, raises some issues about result integrity: how could the client verify the mining results returned by the server are both sound and complete? In this paper, we focus on outlier mining, an important mining task. Previous verification techniques use an authenticated data structure (ADS) for correctness authentication, which may incur much space and communication cost. In this paper, we propose a novel solution that returns a probabilistic result integrity guarantee with much cheaper verification cost. The key idea is to insert a set of artificial records (ARs) into the dataset, from which it constructs a set of artificial outliers (AOs) and artificial non-outliers (ANOs). The AOs and ANOs are used by the client to detect any incomplete and/or incorrect mining results with a probabilistic guarantee. The main challenge that we address is how to construct ARs so that they do not change the (non-)outlierness of original records, while guaranteeing that the client can identify ANOs and AOs without executing mining. Furthermore, we build a strategic game and show that a Nash equilibrium exists only when the server returns correct outliers. Our implementation and experiments demonstrate that our verification solution is efficient and lightweight.}, doi = {10.1109/TDSC.2017.2754493}, url = {https://ieeexplore.ieee.org/document/8048342/}, author = {Dong, Boxiang and Hui Wendy Wang and Anna Monreale and Dino Pedreschi and Fosca Giannotti and W Guo} } @conference {953, title = {Clustering Individual Transactional Data for Masses of Users}, booktitle = {Proceedings of the 23rd ACM SIGKDD International Conference on Knowledge Discovery and Data Mining}, year = {2017}, publisher = {ACM}, organization = {ACM}, abstract = {Mining a large number of datasets recording human activities for making sense of individual data is the key enabler of a new wave of personalized knowledge-based services. In this paper we focus on the problem of clustering individual transactional data for a large mass of users. Transactional data is a very pervasive kind of information that is collected by several services, often involving huge pools of users. We propose txmeans, a parameter-free clustering algorithm able to efficiently partitioning transactional data in a completely automatic way. Txmeans is designed for the case where clustering must be applied on a massive number of different datasets, for instance when a large set of users need to be analyzed individually and each of them has generated a long history of transactions. A deep experimentation on both real and synthetic datasets shows the practical effectiveness of txmeans for the mass clustering of different personal datasets, and suggests that txmeans outperforms existing methods in terms of quality and efficiency. Finally, we present a personal cart assistant application based on txmeans}, doi = {10.1145/3097983.3098034}, author = {Riccardo Guidotti and Anna Monreale and Mirco Nanni and Fosca Giannotti and Dino Pedreschi} } @unpublished {1512, title = {Data Science a Game-changer for Science and Innovation}, year = {2017}, month = {03/2017}, publisher = {G7 Academy}, abstract = {Digital technology is ubiquitous and very much part of public and private organizations and of individuals{\textquoteright} lives. People and things are becoming increasingly interconnected. Smartphones, smart buildings, smart factories, smart cities, autonomous vehicles and other smart environments and devices are filled with digital sensors, all of them creating an abundance of data. Governance and health care collect, generate and use data in an unprecedented quantity. New high- throughput scientific instruments and methods, like telescopes, satellites, accelerators, supercomputers, sensor networks and gene sequencing methods as well as large scale simulations generate massive amounts of data. Often referred to as data deluge, or Big Data, massive datasets revolutionize the way research is carried out, resulting in the emergence of a new, fourth paradigm of science based on data-intensive computing and data driven discovery4. Accordingly, the path to the solution of the problem of sustainable development will lead through Big Data, as maintaining the whole complexity of our modern society, including communication and traffic services, manufacturing, trade and commerce, financial services, health security, science, education and policy making requires this novel approach. The new availability of huge amounts of data, along with advanced tools of exploratory data analysis, data mining/machine learning, and data visualization, and scalable infrastructures, has produced a spectacular change in the scientific method: all this is Data Science. This paper describes the main issues around Data Science as it will play out in the coming years in science and society. It focus on the scientific, technical and ethical challenges (A), on its role for disruptive innovation for science, industry, policy and people (B), on its scientific, technological and educational challenges (C) and finally, on the quantitative expectations of its economic impact (D). In our work we could count on many reports and studies on the subject, particularly on the BDVA5 and ERCIM6 reports.}, author = {Fabio Beltram and Fosca Giannotti and Dino Pedreschi} } @article {1018, title = {Forecasting success via early adoptions analysis: A data-driven study}, journal = {PloS one}, volume = {12}, number = {12}, year = {2017}, pages = {e0189096}, abstract = {Innovations are continuously launched over markets, such as new products over the retail market or new artists over the music scene. Some innovations become a success; others don{\textquoteright}t. Forecasting which innovations will succeed at the beginning of their lifecycle is hard. In this paper, we provide a data-driven, large-scale account of the existence of a special niche among early adopters, individuals that consistently tend to adopt successful innovations before they reach success: we will call them Hit-Savvy. Hit-Savvy can be discovered in very different markets and retain over time their ability to anticipate the success of innovations. As our second contribution, we devise a predictive analytical process, exploiting Hit-Savvy as signals, which achieves high accuracy in the early-stage prediction of successful innovations, far beyond the reach of state-of-the-art time series forecasting models. Indeed, our findings and predictive model can be fruitfully used to support marketing strategies and product placement.}, author = {Giulio Rossetti and Letizia Milli and Fosca Giannotti and Dino Pedreschi} } @conference {1129, title = {The Fractal Dimension of Music: Geography, Popularity and Sentiment Analysis}, booktitle = {International Conference on Smart Objects and Technologies for Social Good}, year = {2017}, publisher = {Springer, Cham}, organization = {Springer, Cham}, abstract = {Nowadays there is a growing standardization of musical contents. Our finding comes out from a cross-service multi-level dataset analysis where we study how geography affects the music production. The investigation presented in this paper highlights the existence of a {\textquotedblleft}fractal{\textquotedblright} musical structure that relates the technical characteristics of the music produced at regional, national and world level. Moreover, a similar structure emerges also when we analyze the musicians{\textquoteright} popularity and the polarity of their songs defined as the mood that they are able to convey. Furthermore, the clusters identified are markedly distinct one from another with respect to popularity and sentiment. }, doi = {https://doi.org/10.1007/978-3-319-76111-4_19}, url = {https://link.springer.com/chapter/10.1007/978-3-319-76111-4_19}, author = {Pollacci, Laura and Riccardo Guidotti and Giulio Rossetti and Fosca Giannotti and Dino Pedreschi} } @article {959, title = {ICON Loop Carpooling Show Case}, journal = {Data Mining and Constraint Programming: Foundations of a Cross-Disciplinary Approach}, volume = {10101}, year = {2017}, pages = {310}, abstract = {In this chapter we describe a proactive carpooling service that combines induction and optimization mechanisms to maximize the impact of carpooling within a community. The approach autonomously infers the mobility demand of the users through the analysis of their mobility traces (i.e. Data Mining of GPS trajectories) and builds the network of all possible ride sharing opportunities among the users. Then, the maximal set of carpooling matches that satisfy some standard requirements (maximal capacity of vehicles, etc.) is computed through Constraint Programming models, and the resulting matches are proactively proposed to the users. Finally, in order to maximize the expected impact of the service, the probability that each carpooling match is accepted by the users involved is inferred through Machine Learning mechanisms and put in the CP model. The whole process is reiterated at regular intervals, thus forming an instance of the general ICON loop.}, url = {https://link.springer.com/content/pdf/10.1007/978-3-319-50137-6.pdf$\#$page=314}, author = {Mirco Nanni and Lars Kotthoff and Riccardo Guidotti and Barry O{\textquoteright}Sullivan and Dino Pedreschi} } @article {955, title = {The Inductive Constraint Programming Loop}, journal = {IEEE Intelligent Systems}, year = {2017}, abstract = {Constraint programming is used for a variety of real-world optimization problems, such as planning, scheduling and resource allocation problems. At the same time, one continuously gathers vast amounts of data about these problems. Current constraint programming software does not exploit such data to update schedules, resources and plans. We propose a new framework, which we call the inductive constraint programming loop. In this approach data is gathered and analyzed systematically in order to dynamically revise and adapt constraints and optimization criteria. Inductive Constraint Programming aims at bridging the gap between the areas of data mining and machine learning on the one hand, and constraint programming on the other.}, doi = {10.1109/MIS.2017.265115706}, author = {Bessiere, Christian and De Raedt, Luc and Tias Guns and Lars Kotthoff and Mirco Nanni and Siegfried Nijssen and Barry O{\textquoteright}Sullivan and Paparrizou, Anastasia and Dino Pedreschi and Simonis, Helmut} } @article {958, title = {The Inductive Constraint Programming Loop}, journal = {Data Mining and Constraint Programming: Foundations of a Cross-Disciplinary Approach}, volume = {10101}, year = {2017}, pages = {303}, abstract = {Constraint programming is used for a variety of real-world optimization problems, such as planning, scheduling and resource allocation problems. At the same time, one continuously gathers vast amounts of data about these problems. Current constraint programming software does not exploit such data to update schedules, resources and plans. We propose a new framework, that we call the Inductive Constraint Programming (ICON) loop. In this approach data is gathered and analyzed systematically in order to dynamically revise and adapt constraints and optimization criteria. Inductive Constraint Programming aims at bridging the gap between the areas of data mining and machine learning on the one hand, and constraint programming on the other end.}, url = {https://link.springer.com/content/pdf/10.1007/978-3-319-50137-6.pdf$\#$page=307}, author = {Mirco Nanni and Siegfried Nijssen and Barry O{\textquoteright}Sullivan and Paparrizou, Anastasia and Dino Pedreschi and Simonis, Helmut} } @conference {1019, title = {Information diffusion in complex networks: The active/passive conundrum}, booktitle = {International Workshop on Complex Networks and their Applications}, year = {2017}, publisher = {Springer}, organization = {Springer}, abstract = {Ideas, information, viruses: all of them, with their mechanisms, can spread over the complex social tissues described by our interpersonal relations. Classical spreading models can agnostically from the object of which they simulate the diffusion, thus considering spreading of virus, ideas and innovations alike. Indeed, such simplification makes easier to define a standard set of tools that can be applied to heterogeneous contexts; however, it can also lead to biased, partial, simulation outcomes. In this work we discuss the concepts of active and passive diffusion: moving from analysis of a well-known passive model, the Threshold one, we introduce two novel approaches whose aim is to provide active and mixed schemas applicable in the context of innovations/ideas diffusion simulation. Our data-driven analysis shows how, in such context, the adoption of exclusively passive/active models leads to conflicting results, thus highlighting the need of mixed approaches.}, doi = {10.1007/978-3-319-72150-7_25}, url = {https://link.springer.com/chapter/10.1007/978-3-319-72150-7_25}, author = {Letizia Milli and Giulio Rossetti and Dino Pedreschi and Fosca Giannotti} } @conference {1024, title = {Market Basket Prediction using User-Centric Temporal Annotated Recurring Sequences}, booktitle = {2017 IEEE International Conference on Data Mining (ICDM)}, year = {2017}, publisher = {IEEE}, organization = {IEEE}, abstract = {Nowadays, a hot challenge for supermarket chains is to offer personalized services to their customers. Market basket prediction, i.e., supplying the customer a shopping list for the next purchase according to her current needs, is one of these services. Current approaches are not capable of capturing at the same time the different factors influencing the customer{\textquoteright}s decision process: co-occurrence, sequentuality, periodicity and recurrency of the purchased items. To this aim, we define a pattern named Temporal Annotated Recurring Sequence (TARS). We define the method to extract TARS and develop a predictor for next basket named TBP (TARS Based Predictor) that, on top of TARS, is able to understand the level of the customer{\textquoteright}s stocks and recommend the set of most necessary items. A deep experimentation shows that TARS can explain the customers{\textquoteright} purchase behavior, and that TBP outperforms the state-of-the-art competitors.}, author = {Riccardo Guidotti and Giulio Rossetti and Luca Pappalardo and Fosca Giannotti and Dino Pedreschi} } @article {1020, title = {NDlib: a python library to model and analyze diffusion processes over complex networks}, journal = {International Journal of Data Science and Analytics}, year = {2017}, pages = {1{\textendash}19}, abstract = {Nowadays the analysis of dynamics of and on networks represents a hot topic in the social network analysis playground.To support students, teachers, developers and researchers, in this work we introduce a novel framework, namely NDlib, an environment designed to describe diffusion simulations. NDlib is designed to be a multi-level ecosystem that can be fruitfully used by different user segments. For this reason, upon NDlib, we designed a simulation server that allows remote execution of experiments as well as an online visualization tool that abstracts its programmatic interface and makes available the simulation platform to non-technicians.}, author = {Giulio Rossetti and Letizia Milli and S Rinzivillo and Alina Sirbu and Dino Pedreschi and Fosca Giannotti} } @conference {1022, title = {NDlib: Studying Network Diffusion Dynamics}, booktitle = {IEEE International Conference on Data Science and Advanced Analytics, DSA}, year = {2017}, address = {Tokyo}, abstract = {Nowadays the analysis of diffusive phenomena occurring on top of complex networks represents a hot topic in the Social Network Analysis playground. In order to support students, teachers, developers and researchers in this work we introduce a novel simulation framework, ND LIB . ND LIB is designed to be a multi-level ecosystem that can be fruitfully used by different user segments. Upon the diffusion library, we designed a simulation server that allows remote execution of experiments and an online visualization tool that abstract the programmatic interface and makes available the simulation platform to non-technicians.}, doi = {https://doi.org/10.1109/DSAA.2017.6}, url = {https://ieeexplore.ieee.org/abstract/document/8259774}, author = {Giulio Rossetti and Letizia Milli and S Rinzivillo and Alina Sirbu and Dino Pedreschi and Fosca Giannotti} } @article {956, title = {Never drive alone: Boosting carpooling with network analysis}, journal = {Information Systems}, volume = {64}, year = {2017}, pages = {237{\textendash}257}, abstract = {Carpooling, i.e., the act where two or more travelers share the same car for a common trip, is one of the possibilities brought forward to reduce traffic and its externalities, but experience shows that it is difficult to boost the adoption of carpooling to significant levels. In our study, we analyze the potential impact of carpooling as a collective phenomenon emerging from people׳s mobility, by network analytics. Based on big mobility data from travelers in a given territory, we construct the network of potential carpooling, where nodes correspond to the users and links to possible shared trips, and analyze the structural and topological properties of this network, such as network communities and node ranking, to the purpose of highlighting the subpopulations with higher chances to create a carpooling community, and the propensity of users to be either drivers or passengers in a shared car. Our study is anchored to reality thanks to a large mobility dataset, consisting of the complete one-month-long GPS trajectories of approx. 10\% circulating cars in Tuscany. We also analyze the aggregated outcome of carpooling by means of empirical simulations, showing how an assignment policy exploiting the network analytic concepts of communities and node rankings minimizes the number of single occupancy vehicles observed after carpooling.}, doi = {10.1016/j.is.2016.03.006}, author = {Riccardo Guidotti and Mirco Nanni and S Rinzivillo and Dino Pedreschi and Fosca Giannotti} } @article {957, title = {Next Basket Prediction using Recurring Sequential Patterns}, journal = {arXiv preprint arXiv:1702.07158}, year = {2017}, abstract = {Nowadays, a hot challenge for supermarket chains is to offer personalized services for their customers. Next basket prediction, i.e., supplying the customer a shopping list for the next purchase according to her current needs, is one of these services. Current approaches are not capable to capture at the same time the different factors influencing the customer{\textquoteright}s decision process: co-occurrency, sequentuality, periodicity and recurrency of the purchased items. To this aim, we define a pattern Temporal Annotated Recurring Sequence (TARS) able to capture simultaneously and adaptively all these factors. We define the method to extract TARS and develop a predictor for next basket named TBP (TARS Based Predictor) that, on top of TARS, is able to to understand the level of the customer{\textquoteright}s stocks and recommend the set of most necessary items. By adopting the TBP the supermarket chains could crop tailored suggestions for each individual customer which in turn could effectively speed up their shopping sessions. A deep experimentation shows that TARS are able to explain the customer purchase behavior, and that TBP outperforms the state-of-the-art competitors.}, url = {https://arxiv.org/abs/1702.07158}, author = {Riccardo Guidotti and Giulio Rossetti and Luca Pappalardo and Fosca Giannotti and Dino Pedreschi} } @article {1025, title = {Node-centric Community Discovery: From static to dynamic social network analysis}, journal = {Online Social Networks and Media}, volume = {3}, year = {2017}, pages = {32{\textendash}48}, abstract = {Nowadays, online social networks represent privileged playgrounds that enable researchers to study, characterize and understand complex human behaviors. Social Network Analysis, commonly known as SNA, is the multidisciplinary field of research under which researchers of different backgrounds perform their studies: one of the hottest topics in such diversified context is indeed Community Discovery. Clustering individuals, whose relations are described by a networked structure, into homogeneous communities is a complex task required by several analytical processes. Moreover, due to the user-centric and dynamic nature of online social services, during the last decades, particular emphasis was dedicated to the definition of node-centric, overlapping and evolutive Community Discovery methodologies. In this paper we provide a comprehensive and concise review of the main results, both algorithmic and analytical, we obtained in this field. Moreover, to better underline the rationale behind our research activity on Community Discovery, in this work we provide a synthetic review of the relevant literature, discussing not only methodological results but also analytical ones.}, doi = {https://doi.org/10.1016/j.osnem.2017.10.003}, url = {https://www.sciencedirect.com/science/article/abs/pii/S2468696417301052}, author = {Giulio Rossetti and Dino Pedreschi and Fosca Giannotti} } @conference {1049, title = {Privacy Preserving Multidimensional Profiling}, booktitle = {International Conference on Smart Objects and Technologies for Social Good}, year = {2017}, publisher = {Springer}, organization = {Springer}, abstract = {Recently, big data had become central in the analysis of human behavior and the development of innovative services. In particular, a new class of services is emerging, taking advantage of different sources of data, in order to consider the multiple aspects of human beings. Unfortunately, these data can lead to re-identification problems and other privacy leaks, as diffusely reported in both scientific literature and media. The risk is even more pressing if multiple sources of data are linked together since a potential adversary could know information related to each dataset. For this reason, it is necessary to evaluate accurately and mitigate the individual privacy risk before releasing personal data. In this paper, we propose a methodology for the first task, i.e., assessing privacy risk, in a multidimensional scenario, defining some possible privacy attacks and simulating them using real-world datasets.}, doi = {10.1007/978-3-319-76111-4_15}, url = {https://link.springer.com/chapter/10.1007/978-3-319-76111-4_15}, author = {Francesca Pratesi and Anna Monreale and Fosca Giannotti and Dino Pedreschi} } @conference {1050, title = {Sentiment Spreading: An Epidemic Model for Lexicon-Based Sentiment Analysis on Twitter}, booktitle = {Conference of the Italian Association for Artificial Intelligence}, year = {2017}, publisher = {Springer}, organization = {Springer}, abstract = {While sentiment analysis has received significant attention in the last years, problems still exist when tools need to be applied to microblogging content. This because, typically, the text to be analysed consists of very short messages lacking in structure and semantic context. At the same time, the amount of text produced by online platforms is enormous. So, one needs simple, fast and effective methods in order to be able to efficiently study sentiment in these data. Lexicon-based methods, which use a predefined dictionary of terms tagged with sentiment valences to evaluate sentiment in longer sentences, can be a valid approach. Here we present a method based on epidemic spreading to automatically extend the dictionary used in lexicon-based sentiment analysis, starting from a reduced dictionary and large amounts of Twitter data. The resulting dictionary is shown to contain valences that correlate well with human-annotated sentiment, and to produce tweet sentiment classifications comparable to the original dictionary, with the advantage of being able to tag more tweets than the original. The method is easily extensible to various languages and applicable to large amounts of data.}, doi = {10.1007/978-3-319-70169-1_9}, url = {https://link.springer.com/chapter/10.1007/978-3-319-70169-1_9}, author = {Pollacci, Laura and Alina Sirbu and Fosca Giannotti and Dino Pedreschi and Claudio Lucchese and Muntean, Cristina Ioana} } @conference {1031, title = {There{\textquoteright}s A Path For Everyone: A Data-Driven Personal Model Reproducing Mobility Agendas}, booktitle = {4th IEEE International Conference on Data Science and Advanced Analytics (DSAA 2017)}, year = {2017}, publisher = {IEEE}, organization = {IEEE}, address = {Tokyo}, author = {Riccardo Guidotti and Roberto Trasarti and Mirco Nanni and Fosca Giannotti and Dino Pedreschi} } @article {954, title = {Tiles: an online algorithm for community discovery in dynamic social networks}, journal = {Machine Learning}, volume = {106}, number = {8}, year = {2017}, pages = {1213{\textendash}1241}, abstract = {Community discovery has emerged during the last decade as one of the most challenging problems in social network analysis. Many algorithms have been proposed to find communities on static networks, i.e. networks which do not change in time. However, social networks are dynamic realities (e.g. call graphs, online social networks): in such scenarios static community discovery fails to identify a partition of the graph that is semantically consistent with the temporal information expressed by the data. In this work we propose Tiles, an algorithm that extracts overlapping communities and tracks their evolution in time following an online iterative procedure. Our algorithm operates following a domino effect strategy, dynamically recomputing nodes community memberships whenever a new interaction takes place. We compare Tiles with state-of-the-art community detection algorithms on both synthetic and real world networks having annotated community structure: our experiments show that the proposed approach is able to guarantee lower execution times and better correspondence with the ground truth communities than its competitors. Moreover, we illustrate the specifics of the proposed approach by discussing the properties of identified communities it is able to identify.}, doi = {10.1007/s10994-016-5582-8}, url = {https://link.springer.com/article/10.1007/s10994-016-5582-8}, author = {Giulio Rossetti and Luca Pappalardo and Dino Pedreschi and Fosca Giannotti} } @booklet {962, title = {Advances in Network Science: 12th International Conference and School, NetSci-X 2016, Wroclaw, Poland, January 11-13, 2016, Proceedings}, year = {2016}, abstract = {This book constitutes the refereed proceedings of the 12th International Conference and School of Network Science, NetSci-X 2016, held in Wroclaw, Poland, in January 2016. The 12 full and 6 short papers were carefully reviewed and selected from 59 submissions. The papers deal with the study of network models in domains ranging from biology and physics to computer science, from financial markets to cultural integration, and from social media to infectious diseases.}, doi = {10.1007/978-3-319-28361-6}, author = {Wierzbicki, Adam and Brandes, Ulrik and Schweitzer, Frank and Dino Pedreschi} } @article {961, title = {An analytical framework to nowcast well-being using mobile phone data}, journal = {International Journal of Data Science and Analytics}, volume = {2}, number = {1-2}, year = {2016}, pages = {75{\textendash}92}, abstract = {An intriguing open question is whether measurements derived from Big Data recording human activities can yield high-fidelity proxies of socio-economic development and well-being. Can we monitor and predict the socio-economic development of a territory just by observing the behavior of its inhabitants through the lens of Big Data? In this paper, we design a data-driven analytical framework that uses mobility measures and social measures extracted from mobile phone data to estimate indicators for socio-economic development and well-being. We discover that the diversity of mobility, defined in terms of entropy of the individual users{\textquoteright} trajectories, exhibits (i) significant correlation with two different socio-economic indicators and (ii) the highest importance in predictive models built to predict the socio-economic indicators. Our analytical framework opens an interesting perspective to study human behavior through the lens of Big Data by means of new statistical indicators that quantify and possibly {\textquotedblleft}nowcast{\textquotedblright} the well-being and the socio-economic development of a territory.}, doi = {10.1007/s41060-016-0013-2}, author = {Luca Pappalardo and Maarten Vanhoof and Lorenzo Gabrielli and Zbigniew Smoreda and Dino Pedreschi and Fosca Giannotti} } @conference {882, title = {Audio Ergo Sum}, booktitle = {Federation of International Conferences on Software Technologies: Applications and Foundations}, year = {2016}, publisher = {Springer}, organization = {Springer}, abstract = {Nobody can state {\textquotedblleft}Rock is my favorite genre{\textquotedblright} or {\textquotedblleft}David Bowie is my favorite artist{\textquotedblright}. We defined a Personal Listening Data Model able to capture musical preferences through indicators and patterns, and we discovered that we are all characterized by a limited set of musical preferences, but not by a unique predilection. The empowered capacity of mobile devices and their growing adoption in our everyday life is generating an enormous increment in the production of personal data such as calls, positioning, online purchases and even music listening. Musical listening is a type of data that has started receiving more attention from the scientific community as consequence of the increasing availability of rich and punctual online data sources. Starting from the listening of 30k Last.Fm users, we show how the employment of the Personal Listening Data Models can provide higher levels of self-awareness. In addition, the proposed model will enable the development of a wide range of analysis and musical services both at personal and at collective level.}, doi = {10.1007/978-3-319-50230-4_5}, author = {Riccardo Guidotti and Giulio Rossetti and Dino Pedreschi} } @article {852, title = {Big Data Research in Italy: A Perspective}, journal = {Engineering}, volume = {2}, number = {2}, year = {2016}, month = {06/2016}, pages = {163}, abstract = {The aim of this article is to synthetically describe the research projects that a selection of Italian universities is undertaking in the context of big data. Far from being exhaustive, this article has the objective of offering a sample of distinct applications that address the issue of managing huge amounts of data in Italy, collected in relation to diverse domains.}, issn = {print: 2095-8099 / online: 2096-0026}, doi = {10.1016/J.ENG.2016.02.011}, url = {http://engineering.org.cn/EN/abstract/article_12288.shtml}, author = {Sonia Bergamaschi and Emanuele Carlini and Michelangelo Ceci and Barbara Furletti and Fosca Giannotti and Donato Malerba and Mario Mezzanzanica and Anna Monreale and Gabriella Pasi and Dino Pedreschi and Raffaele Perego and Salvatore Ruggieri} } @booklet {963, title = {Data Mining and Constraint Programming - Foundations of a Cross-Disciplinary Approach.}, year = {2016}, abstract = {A successful integration of constraint programming and data mining has the potential to lead to a new ICT paradigm with far reaching implications. It could change the face of data mining and machine learning, as well as constraint programming technology. It would not only allow one to use data mining techniques in constraint programming to identify and update constraints and optimization criteria, but also to employ constraints and criteria in data mining and machine learning in order to discover models compatible with prior knowledge. This book reports on some key results obtained on this integrated and cross- disciplinary approach within the European FP7 FET Open project no. 284715 on {\textquotedblleft}Inductive Constraint Programming{\textquotedblright} and a number of associated workshops and Dagstuhl seminars. The book is structured in five parts: background; learning to model; learning to solve; constraint programming for data mining; and showcases. }, doi = {10.1007/978-3-319-50137-6}, author = {Bessiere, Christian and De Raedt, Luc and Lars Kotthoff and Siegfried Nijssen and Barry O{\textquoteright}Sullivan and Dino Pedreschi} } @inbook {965, title = {Data Mining and Constraints: An Overview}, booktitle = {Data Mining and Constraint Programming}, year = {2016}, pages = {25{\textendash}48}, publisher = {Springer International Publishing}, organization = {Springer International Publishing}, abstract = {This paper provides an overview of the current state-of-the-art on using constraints in knowledge discovery and data mining. The use of constraints requires mechanisms for defining and evaluating them during the knowledge extraction process. We give a structured account of three main groups of constraints based on the specific context in which they are defined and used. The aim is to provide a complete view on constraints as a building block of data mining methods.}, doi = {10.1007/978-3-319-50137-6_2}, author = {Valerio Grossi and Dino Pedreschi and Franco Turini} } @article {875, title = {Driving Profiles Computation and Monitoring for Car Insurance CRM}, journal = {Journal ACM Transactions on Intelligent Systems and Technology (TIST)}, volume = {8}, number = {1}, year = {2016}, pages = {14:1{\textendash}14:26}, abstract = {Customer segmentation is one of the most traditional and valued tasks in customer relationship management (CRM). In this article, we explore the problem in the context of the car insurance industry, where the mobility behavior of customers plays a key role: Different mobility needs, driving habits, and skills imply also different requirements (level of coverage provided by the insurance) and risks (of accidents). In the present work, we describe a methodology to extract several indicators describing the driving profile of customers, and we provide a clustering-oriented instantiation of the segmentation problem based on such indicators. Then, we consider the availability of a continuous flow of fresh mobility data sent by the circulating vehicles, aiming at keeping our segments constantly up to date. We tackle a major scalability issue that emerges in this context when the number of customers is large-namely, the communication bottleneck-by proposing and implementing a sophisticated distributed monitoring solution that reduces communications between vehicles and company servers to the essential. We validate the framework on a large database of real mobility data coming from GPS devices on private cars. Finally, we analyze the privacy risks that the proposed approach might involve for the users, providing and evaluating a countermeasure based on data perturbation.}, doi = {10.1145/2912148}, url = {http://doi.acm.org/10.1145/2912148}, author = {Mirco Nanni and Roberto Trasarti and Anna Monreale and Valerio Grossi and Dino Pedreschi} } @inbook {817, title = {Going Beyond GDP to Nowcast Well-Being Using Retail Market Data}, booktitle = {Advances in Network Science}, year = {2016}, pages = {29{\textendash}42}, publisher = {Springer International Publishing}, organization = {Springer International Publishing}, abstract = {One of the most used measures of the economic health of a nation is the Gross Domestic Product (GDP): the market value of all officially recognized final goods and services produced within a country in a given period of time. GDP, prosperity and well-being of the citizens of a country have been shown to be highly correlated. However, GDP is an imperfect measure in many respects. GDP usually takes a lot of time to be estimated and arguably the well-being of the people is not quantifiable simply by the market value of the products available to them. In this paper we use a quantification of the average sophistication of satisfied needs of a population as an alternative to GDP. We show that this quantification can be calculated more easily than GDP and it is a very promising predictor of the GDP value, anticipating its estimation by six months. The measure is arguably a more multifaceted evaluation of the well-being of the population, as it tells us more about how people are satisfying their needs. Our study is based on a large dataset of retail micro transactions happening across the Italian territory.}, doi = {10.1007/978-3-319-28361-6_3}, author = {Riccardo Guidotti and Michele Coscia and Dino Pedreschi and Diego Pennacchioli} } @article {960, title = {Homophilic network decomposition: a community-centric analysis of online social services}, journal = {Social Network Analysis and Mining}, volume = {6}, number = {1}, year = {2016}, pages = {103}, abstract = {In this paper we formulate the homophilic network decomposition problem: Is it possible to identify a network partition whose structure is able to characterize the degree of homophily of its nodes? The aim of our work is to understand the relations between the homophily of individuals and the topological features expressed by specific network substructures. We apply several community detection algorithms on three large-scale online social networks{\textemdash}Skype, LastFM and Google+{\textemdash}and advocate the need of identifying the right algorithm for each specific network in order to extract a homophilic network decomposition. Our results show clear relations between the topological features of communities and the degree of homophily of their nodes in three online social scenarios: product engagement in the Skype network, number of listened songs on LastFM and homogeneous level of education among users of Google+.}, doi = {10.1007/s1327}, author = {Giulio Rossetti and Luca Pappalardo and Riivo Kikas and Dino Pedreschi and Fosca Giannotti and Marlon Dumas} } @article {866, title = {A supervised approach for intra-/inter-community interaction prediction in dynamic social networks}, journal = {Social Network Analysis and Mining}, volume = {6}, number = {1}, year = {2016}, month = {09/2016}, pages = {86}, abstract = {Due to the growing availability of Internet services in the last decade, the interactions between people became more and more easy to establish. For example, we can have an intercontinental job interview, or we can send real-time multimedia content to any friend of us just owning a smartphone. All this kind of human activities generates digital footprints, that describe a complex, rapidly evolving, network structures. In such dynamic scenario, one of the most challenging tasks involves the prediction of future interactions between couples of actors (i.e., users in online social networks, researchers in collaboration networks). In this paper, we approach such problem by leveraging networks dynamics: to this extent, we propose a supervised learning approach which exploits features computed by time-aware forecasts of topological measures calculated between node pairs. Moreover, since real social networks are generally composed by weakly connected modules, we instantiate the interaction prediction problem in two disjoint applicative scenarios: intra-community and inter-community link prediction. Experimental results on real time-stamped networks show how our approach is able to reach high accuracy. Furthermore, we analyze the performances of our methodology when varying the typologies of features, community discovery algorithms and forecast methods.}, issn = {1869-5469}, doi = {10.1007/s13278-016-0397-y}, url = {http://dx.doi.org/10.1007/s13278-016-0397-y}, author = {Giulio Rossetti and Riccardo Guidotti and Ioanna Miliou and Dino Pedreschi and Fosca Giannotti} } @inbook {964, title = {Understanding human mobility with big data}, booktitle = {Solving Large Scale Learning Tasks. Challenges and Algorithms}, year = {2016}, pages = {208{\textendash}220}, publisher = {Springer International Publishing}, organization = {Springer International Publishing}, abstract = {The paper illustrates basic methods of mobility data mining, designed to extract from the big mobility data the patterns of collective movement behavior, i.e., discover the subgroups of travelers characterized by a common purpose, profiles of individual movement activity, i.e., characterize the routine mobility of each traveler. We illustrate a number of concrete case studies where mobility data mining is put at work to create powerful analytical services for policy makers, businesses, public administrations, and individual citizens.}, doi = {10.1007/978-3-319-41706-6_10}, author = {Fosca Giannotti and Lorenzo Gabrielli and Dino Pedreschi and S Rinzivillo} } @article {867, title = {Unveiling mobility complexity through complex network analysis}, journal = {Social Network Analysis and Mining}, volume = {6}, number = {1}, year = {2016}, pages = {59}, abstract = {The availability of massive digital traces of individuals is offering a series of novel insights on the understanding of patterns characterizing human mobility. Many studies try to semantically enrich mobility data with annotations about human activities. However, these approaches either focus on places with high frequencies (e.g., home and work), or relay on background knowledge (e.g., public available points of interest). In this paper, we depart from the concept of frequency and we focus on a high level representation of mobility using network analytics. The visits of each driver to each systematic destination are modeled as links in a bipartite network where a set of nodes represents drivers and the other set represents places. We extract such network from two real datasets of human mobility based, respectively, on GPS and GSM data. We introduce the concept of mobility complexity of drivers and places as a ranking analysis over the nodes of these networks. In addition, by means of community discovery analysis, we differentiate subgroups of drivers and places according both to their homogeneity and to their mobility complexity.}, doi = {10.1007/s13278-016-0369-2}, author = {Riccardo Guidotti and Anna Monreale and S Rinzivillo and Dino Pedreschi and Fosca Giannotti} } @conference {763, title = {Behavioral Entropy and Profitability in Retail}, booktitle = {IEEE International Conference on Data Science and Advanced Analytics (IEEE DSAA{\textquoteright}2015)}, year = {2015}, publisher = {IEEE}, organization = {IEEE}, address = {Paris}, abstract = {Human behavior is predictable in principle: people are systematic in their everyday choices. This predictability can be used to plan events and infrastructure, both for the public good and for private gains. In this paper we investigate the largely unexplored relationship between the systematic behavior of a customer and its profitability for a retail company. We estimate a customer{\textquoteright}s behavioral entropy over two dimensions: the basket entropy is the variety of what customers buy, and the spatio-temporal entropy is the spatial and temporal variety of their shopping sessions. To estimate the basket and the spatiotemporal entropy we use data mining and information theoretic techniques. We find that predictable systematic customers are more profitable for a supermarket: their average per capita expenditures are higher than non systematic customers and they visit the shops more often. However, this higher individual profitability is masked by its overall level. The highly systematic customers are a minority of the customer set. As a consequence, the total amount of revenues they generate is small. We suggest that favoring a systematic behavior in their customers might be a good strategy for supermarkets to increase revenue. These results are based on data coming from a large Italian supermarket chain, including more than 50 thousand customers visiting 23 shops to purchase more than 80 thousand distinct products.}, author = {Riccardo Guidotti and Michele Coscia and Dino Pedreschi and Diego Pennacchioli} } @conference {756, title = {City users{\textquoteright} classification with mobile phone data}, booktitle = {IEEE Big Data}, year = {2015}, month = {11/2015}, address = {Santa Clara (CA) - USA}, abstract = {Nowadays mobile phone data are an actual proxy for studying the users{\textquoteright} social life and urban dynamics. In this paper we present the Sociometer, and analytical framework aimed at classifying mobile phone users into behavioral categories by means of their call habits. The analytical process starts from spatio-temporal profiles, learns the different behaviors, and returns annotated profiles. After the description of the methodology and its evaluation, we present an application of the Sociometer for studying city users of one small and one big city, evaluating the impact of big events in these cities.}, author = {Lorenzo Gabrielli and Barbara Furletti and Roberto Trasarti and Fosca Giannotti and Dino Pedreschi} } @conference {878, title = {Clustering Formulation Using Constraint Optimization}, booktitle = {Software Engineering and Formal Methods - {SEFM} 2015 Collocated Workshops: ATSE, HOFM, MoKMaSD, and VERY*SCART, York, UK, September 7-8, 2015, Revised Selected Papers}, year = {2015}, publisher = {Springer Berlin Heidelberg}, organization = {Springer Berlin Heidelberg}, abstract = {The problem of clustering a set of data is a textbook machine learning problem, but at the same time, at heart, a typical optimization problem. Given an objective function, such as minimizing the intra-cluster distances or maximizing the inter-cluster distances, the task is to find an assignment of data points to clusters that achieves this objective. In this paper, we present a constraint programming model for a centroid based clustering and one for a density based clustering. In particular, as a key contribution, we show how the expressivity introduced by the formulation of the problem by constraint programming makes the standard problem easy to be extended with other constraints that permit to generate interesting variants of the problem. We show this important aspect in two different ways: first, we show how the formulation of the density-based clustering by constraint programming makes it very similar to the label propagation problem and then, we propose a variant of the standard label propagation approach.}, doi = {10.1007/978-3-662-49224-6_9}, url = {http://dx.doi.org/10.1007/978-3-662-49224-6_9}, author = {Valerio Grossi and Anna Monreale and Mirco Nanni and Dino Pedreschi and Franco Turini} } @conference {819, title = {Community-centric analysis of user engagement in Skype social network}, booktitle = {International conference on Advances in Social Network Analysis and Mining}, year = {2015}, publisher = {IEEE}, organization = {IEEE}, address = {Paris, France}, isbn = {978-1-4503-3854-7}, doi = {10.1145/2808797.2809384}, url = {http://dl.acm.org/citation.cfm?doid=2808797.2809384}, author = {Giulio Rossetti and Luca Pappalardo and Riivo Kikas and Dino Pedreschi and Fosca Giannotti and Marlon Dumas} } @article {759, title = {Discrimination- and privacy-aware patterns}, journal = {Data Min. Knowl. Discov.}, volume = {29}, number = {6}, year = {2015}, pages = {1733{\textendash}1782}, abstract = {Data mining is gaining societal momentum due to the ever increasing availability of large amounts of human data, easily collected by a variety of sensing technologies. We are therefore faced with unprecedented opportunities and risks: a deeper understanding of human behavior and how our society works is darkened by a greater chance of privacy intrusion and unfair discrimination based on the extracted patterns and profiles. Consider the case when a set of patterns extracted from the personal data of a population of individual persons is released for a subsequent use into a decision making process, such as, e.g., granting or denying credit. First, the set of patterns may reveal sensitive information about individual persons in the training population and, second, decision rules based on such patterns may lead to unfair discrimination, depending on what is represented in the training cases. Although methods independently addressing privacy or discrimination in data mining have been proposed in the literature, in this context we argue that privacy and discrimination risks should be tackled together, and we present a methodology for doing so while publishing frequent pattern mining results. We describe a set of pattern sanitization methods, one for each discrimination measure used in the legal literature, to achieve a fair publishing of frequent patterns in combination with two possible privacy transformations: one based on k-anonymity and one based on differential privacy. Our proposed pattern sanitization methods based on k-anonymity yield both privacy- and discrimination-protected patterns, while introducing reasonable (controlled) pattern distortion. Moreover, they obtain a better trade-off between protection and data quality than the sanitization methods based on differential privacy. Finally, the effectiveness of our proposals is assessed by extensive experiments. }, doi = {10.1007/s10618-014-0393-7}, url = {http://dx.doi.org/10.1007/s10618-014-0393-7}, author = {Sara Hajian and Josep Domingo-Ferrer and Anna Monreale and Dino Pedreschi and Fosca Giannotti} } @proceedings {770, title = {The harsh rule of the goals: data-driven performance indicators for football teams}, year = {2015}, abstract = {{\textemdash}Sports analytics in general, and football (soccer in USA) analytics in particular, have evolved in recent years in an amazing way, thanks to automated or semi-automated sensing technologies that provide high-fidelity data streams extracted from every game. In this paper we propose a data-driven approach and show that there is a large potential to boost the understanding of football team performance. From observational data of football games we extract a set of pass-based performance indicators and summarize them in the H indicator. We observe a strong correlation among the proposed indicator and the success of a team, and therefore perform a simulation on the four major European championships (78 teams, almost 1500 games). The outcome of each game in the championship was replaced by a synthetic outcome (win, loss or draw) based on the performance indicators computed for each team. We found that the final rankings in the simulated championships are very close to the actual rankings in the real championships, and show that teams with high ranking error show extreme values of a defense/attack efficiency measure, the Pezzali score. Our results are surprising given the simplicity of the proposed indicators, suggesting that a complex systems{\textquoteright} view on football data has the potential of revealing hidden patterns and behavior of superior quality.}, url = {https://www.researchgate.net/profile/Luca_Pappalardo/publication/281318318_The_harsh_rule_of_the_goals_data-driven_performance_indicators_for_football_teams/links/561668e308ae37cfe4090a5d.pdf}, author = {Paolo Cintia and Luca Pappalardo and Dino Pedreschi and Fosca Giannotti and Marco Malvaldi} } @conference {816, title = {Interaction Prediction in Dynamic Networks exploiting Community Discovery}, booktitle = {International conference on Advances in Social Network Analysis and Mining, ASONAM 2015}, year = {2015}, publisher = {IEEE}, organization = {IEEE}, address = {Paris, France}, abstract = {Due to the growing availability of online social services, interactions between people became more and more easy to establish and track. Online social human activities generate digital footprints, that describe complex, rapidly evolving, dynamic networks. In such scenario one of the most challenging task to address involves the prediction of future interactions between couples of actors. In this study, we want to leverage networks dynamics and community structure to predict which are the future interactions more likely to appear. To this extent, we propose a supervised learning approach which exploit features computed by time-aware forecasts of topological measures calculated between pair of nodes belonging to the same community. Our experiments on real dynamic networks show that the designed analytical process is able to achieve interesting results.}, isbn = {978-1-4503-3854-7}, doi = {0.1145/2808797.2809401}, url = {http://dl.acm.org/citation.cfm?doid=2808797.2809401}, author = {Giulio Rossetti and Riccardo Guidotti and Diego Pennacchioli and Dino Pedreschi and Fosca Giannotti} } @conference {820, title = {Quantification in Social Networks}, booktitle = {International Conference on Data Science and Advanced Analytics (IEEE DSAA{\textquoteright}2015)}, year = {2015}, publisher = {IEEE}, organization = {IEEE}, address = {Paris, France}, abstract = {In many real-world applications there is a need to monitor the distribution of a population across different classes, and to track changes in this distribution over time. As an example, an important task is to monitor the percentage of unemployed adults in a given region. When the membership of an individual in a class cannot be established deterministically, a typical solution is the classification task. However, in the above applications the final goal is not determining which class the individuals belong to, but estimating the prevalence of each class in the unlabeled data. This task is called quantification. Most of the work in the literature addressed the quantification problem considering data presented in conventional attribute format. Since the ever-growing availability of web and social media we have a flourish of network data representing a new important source of information and by using quantification network techniques we could quantify collective behavior, i.e., the number of users that are involved in certain type of activities, preferences, or behaviors. In this paper we exploit the homophily effect observed in many social networks in order to construct a quantifier for networked data. Our experiments show the effectiveness of the proposed approaches and the comparison with the existing state-of-the-art quantification methods shows that they are more accurate. }, doi = {10.1109/DSAA.2015.7344845}, url = {http://www.giuliorossetti.net/about/wp-content/uploads/2015/12/main_DSAA.pdf}, author = {Letizia Milli and Anna Monreale and Giulio Rossetti and Dino Pedreschi and Fosca Giannotti and Fabrizio Sebastiani} } @article {723, title = {Returners and explorers dichotomy in human mobility}, journal = {Nat Commun}, volume = {6}, year = {2015}, month = {09}, abstract = {The availability of massive digital traces of human whereabouts has offered a series of novel insights on the quantitative patterns characterizing human mobility. In particular, numerous recent studies have lead to an unexpected consensus: the considerable variability in the characteristic travelled distance of individuals coexists with a high degree of predictability of their future locations. Here we shed light on this surprising coexistence by systematically investigating the impact of recurrent mobility on the characteristic distance travelled by individuals. Using both mobile phone and GPS data, we discover the existence of two distinct classes of individuals: returners and explorers. As existing models of human mobility cannot explain the existence of these two classes, we develop more realistic models able to capture the empirical findings. Finally, we show that returners and explorers play a distinct quantifiable role in spreading phenomena and that a correlation exists between their mobility patterns and social interactions.}, url = {http://dx.doi.org/10.1038/ncomms9166}, author = {Luca Pappalardo and Filippo Simini and S Rinzivillo and Dino Pedreschi and Fosca Giannotti and Barabasi, Albert-Laszlo} } @article {990, title = {A risk model for privacy in trajectory data}, journal = {Journal of Trust Management}, volume = {2}, number = {1}, year = {2015}, pages = {9}, abstract = {Time sequence data relating to users, such as medical histories and mobility data, are good candidates for data mining, but often contain highly sensitive information. Different methods in privacy-preserving data publishing are utilised to release such private data so that individual records in the released data cannot be re-linked to specific users with a high degree of certainty. These methods provide theoretical worst-case privacy risks as measures of the privacy protection that they offer. However, often with many real-world data the worst-case scenario is too pessimistic and does not provide a realistic view of the privacy risks: the real probability of re-identification is often much lower than the theoretical worst-case risk. In this paper, we propose a novel empirical risk model for privacy which, in relation to the cost of privacy attacks, demonstrates better the practical risks associated with a privacy preserving data release. We show detailed evaluation of the proposed risk model by using k-anonymised real-world mobility data and then, we show how the empirical evaluation of the privacy risk has a different trend in synthetic data describing random movements.}, doi = {10.1186/s40493-015-0020-6}, author = {Anirban Basu and Anna Monreale and Roberto Trasarti and Juan Camilo Corena and Fosca Giannotti and Dino Pedreschi and Shinsaku Kiyomoto and Yutaka Miyake and Tadashi Yanagihara} } @article {724, title = {Small Area Model-Based Estimators Using Big Data Sources}, journal = {Journal of Official Statistics}, volume = {31}, number = {2}, year = {2015}, pages = {263{\textendash}281}, author = {Stefano Marchetti and Caterina Giusti and Monica Pratesi and Nicola Salvati and Fosca Giannotti and Dino Pedreschi and S Rinzivillo and Luca Pappalardo and Lorenzo Gabrielli} } @article {564, title = {Anonymity preserving sequential pattern mining}, journal = {Artif. Intell. Law}, volume = {22}, number = {2}, year = {2014}, pages = {141{\textendash}173}, abstract = {The increasing availability of personal data of a sequential nature, such as time-stamped transaction or location data, enables increasingly sophisticated sequential pattern mining techniques. However, privacy is at risk if it is possible to reconstruct the identity of individuals from sequential data. Therefore, it is important to develop privacy-preserving techniques that support publishing of really anonymous data, without altering the analysis results significantly. In this paper we propose to apply the Privacy-by-design paradigm for designing a technological framework to counter the threats of undesirable, unlawful effects of privacy violation on sequence data, without obstructing the knowledge discovery opportunities of data mining technologies. First, we introduce a k-anonymity framework for sequence data, by defining the sequence linking attack model and its associated countermeasure, a k-anonymity notion for sequence datasets, which provides a formal protection against the attack. Second, we instantiate this framework and provide a specific method for constructing the k-anonymous version of a sequence dataset, which preserves the results of sequential pattern mining, together with several basic statistics and other analytical properties of the original data, including the clustering structure. A comprehensive experimental study on realistic datasets of process-logs, web-logs and GPS tracks is carried out, which empirically shows how, in our proposed method, the protection of privacy meets analytical utility.}, doi = {10.1007/s10506-014-9154-6}, url = {http://dx.doi.org/10.1007/s10506-014-9154-6}, author = {Anna Monreale and Dino Pedreschi and Ruggero G. Pensa and Fabio Pinelli} } @conference {574, title = {Big data analytics for smart mobility: a case study}, booktitle = {EDBT/ICDT 2014 Workshops - Mining Urban Data (MUD)}, year = {2014}, month = {03/2014}, address = {Athens, Greece}, url = {http://ceur-ws.org/Vol-1133/paper-57.pdf}, author = {Barbara Furletti and Roberto Trasarti and Lorenzo Gabrielli and Mirco Nanni and Dino Pedreschi} } @conference {637, title = {CF-inspired Privacy-Preserving Prediction of Next Location in the Cloud}, booktitle = {Cloud Computing Technology and Science (CloudCom), 2014 IEEE 6th International Conference on}, year = {2014}, publisher = {IEEE}, organization = {IEEE}, abstract = {Mobility data gathered from location sensors such as Global Positioning System (GPS) enabled phones and vehicles is valuable for spatio-temporal data mining for various location-based services (LBS). Such data is often considered sensitive and there exist many a mechanism for privacy preserving analyses of the data. Through various anonymisation mechanisms, it can be ensured with a high probability that a particular individual cannot be identified when mobility data is outsourced to third parties for analysis. However, challenges remain with the privacy of the queries on outsourced analysis results, especially when the queries are sent directly to third parties by end-users. Drawing inspiration from our earlier work in privacy preserving collaborative filtering (CF) and next location prediction, in this exploratory work, we propose a novel representation of trajectory data in the CF domain and experiment with a privacy preserving Slope One CF predictor. We present evaluations for the accuracy and the computational performance of our proposal using anonymised data gathered from real traffic data in the Italian cities of Pisa and Milan. One use-case is a third-party location-prediction-as-a-service deployed on a public cloud, which can respond to privacy-preserving queries while enabling data owners to build a rich predictor on the cloud. }, doi = {10.1109/CloudCom.2014.114}, url = {http://dx.doi.org/10.1109/CloudCom.2014.114}, author = {Anirban Basu and Juan Camilo Corena and Anna Monreale and Dino Pedreschi and Fosca Giannotti and Shinsaku Kiyomoto and Vaidya, Jaideep and Yutaka Miyake} } @conference {566, title = {Fair pattern discovery}, booktitle = {Symposium on Applied Computing, {SAC} 2014, Gyeongju, Republic of Korea - March 24 - 28, 2014}, year = {2014}, pages = {113{\textendash}120}, abstract = {Data mining is gaining societal momentum due to the ever increasing availability of large amounts of human data, easily collected by a variety of sensing technologies. We are assisting to unprecedented opportunities of understanding human and society behavior that unfortunately is darkened by several risks for human rights: one of this is the unfair discrimination based on the extracted patterns and profiles. Consider the case when a set of patterns extracted from the personal data of a population of individual persons is released for subsequent use in a decision making process, such as, e.g., granting or denying credit. Decision rules based on such patterns may lead to unfair discrimination, depending on what is represented in the training cases. In this context, we address the discrimination risks resulting from publishing frequent patterns. We present a set of pattern sanitization methods, one for each discrimination measure used in the legal literature, for fair (discrimination-protected) publishing of frequent pattern mining results. Our proposed pattern sanitization methods yield discrimination-protected patterns, while introducing reasonable (controlled) pattern distortion. Finally, the effectiveness of our proposals is assessed by extensive experiments.}, doi = {10.1145/2554850.2555043}, url = {http://doi.acm.org/10.1145/2554850.2555043}, author = {Sara Hajian and Anna Monreale and Dino Pedreschi and Josep Domingo-Ferrer and Fosca Giannotti} } @conference {727, title = {Mining efficient training patterns of non-professional cyclists}, booktitle = {22nd Italian Symposium on Advanced Database Systems, {SEBD} 2014, Sorrento Coast, Italy, June 16-18, 2014.}, year = {2014}, author = {Paolo Cintia and Luca Pappalardo and Dino Pedreschi} } @conference {827, title = {Overlap versus partition: marketing classification and customer profiling in complex networks of products}, booktitle = {Data engineering workshops (ICDEW), 2014 IEEE 30th international conference on}, year = {2014}, publisher = {IEEE}, organization = {IEEE}, abstract = {In recent years we witnessed the explosion in the availability of data regarding human and customer behavior in the market. This data richness era has fostered the development of useful applications in understanding how markets and the minds of the customers work. In this paper we focus on the analysis of complex networks based on customer behavior. Complex network analysis has provided a new and wide toolbox for the classic data mining task of clustering. With community discovery, i.e. the detection of functional modules in complex networks, we are now able to group together customers and products using a variety of different criteria. The aim of this paper is to explore this new analytic degree of freedom. We are interested in providing a case study uncovering the meaning of different community discovery algorithms on a network of products connected together because co-purchased by the same customers. We focus our interest in the different interpretation of a partition approach, where each product belongs to a single community, against an overlapping approach, where each product can belong to multiple communities. We found that the former is useful to improve the marketing classification of products, while the latter is able to create a collection of different customer profiles.}, doi = {10.1109/ICDEW.2014.6818312}, url = {http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=6818312}, author = {Diego Pennacchioli and Michele Coscia and Dino Pedreschi} } @conference {623, title = {The patterns of musical influence on the Last.Fm social network}, booktitle = {22nd Italian Symposium on Advanced Database Systems, {SEBD} 2014, Sorrento Coast, Italy, June 16-18, 2014.}, year = {2014}, author = {Diego Pennacchioli and Giulio Rossetti and Luca Pappalardo and Dino Pedreschi and Fosca Giannotti and Michele Coscia} } @conference {565, title = {A Privacy Risk Model for Trajectory Data}, booktitle = {Trust Management {VIII} - 8th {IFIP} {WG} 11.11 International Conference, {IFIPTM} 2014, Singapore, July 7-10, 2014. Proceedings}, year = {2014}, pages = {125{\textendash}140}, abstract = {Time sequence data relating to users, such as medical histories and mobility data, are good candidates for data mining, but often contain highly sensitive information. Different methods in privacy-preserving data publishing are utilised to release such private data so that individual records in the released data cannot be re-linked to specific users with a high degree of certainty. These methods provide theoretical worst-case privacy risks as measures of the privacy protection that they offer. However, often with many real-world data the worst-case scenario is too pessimistic and does not provide a realistic view of the privacy risks: the real probability of re-identification is often much lower than the theoretical worst-case risk. In this paper we propose a novel empirical risk model for privacy which, in relation to the cost of privacy attacks, demonstrates better the practical risks associated with a privacy preserving data release. We show detailed evaluation of the proposed risk model by using k-anonymised real-world mobility data.}, doi = {10.1007/978-3-662-43813-8_9}, url = {http://dx.doi.org/10.1007/978-3-662-43813-8_9}, author = {Anirban Basu and Anna Monreale and Juan Camilo Corena and Fosca Giannotti and Dino Pedreschi and Shinsaku Kiyomoto and Yutaka Miyake and Tadashi Yanagihara and Roberto Trasarti} } @article {EPJ14, title = {Privacy-by-Design in Big Data Analytics and Social Mining}, journal = {EPJ Data Science}, volume = {10}, year = {2014}, note = {2014:10}, abstract = {Privacy is ever-growing concern in our society and is becoming a fundamental aspect to take into account when one wants to use, publish and analyze data involving human personal sensitive information. Unfortunately, it is increasingly hard to transform the data in a way that it protects sensitive information: we live in the era of big data characterized by unprecedented opportunities to sense, store and analyze social data describing human activities in great detail and resolution. As a result, privacy preservation simply cannot be accomplished by de-identification alone. In this paper, we propose the privacy-by-design paradigm to develop technological frameworks for countering the threats of undesirable, unlawful effects of privacy violation, without obstructing the knowledge discovery opportunities of social mining and big data analytical technologies. Our main idea is to inscribe privacy protection into the knowledge discovery technology by design, so that the analysis incorporates the relevant privacy requirements from the start.}, doi = {10.1140/epjds/s13688-014-0010-4}, author = {Anna Monreale and S Rinzivillo and Francesca Pratesi and Fosca Giannotti and Dino Pedreschi} } @conference {725, title = {The purpose of motion: Learning activities from Individual Mobility Networks}, booktitle = {International Conference on Data Science and Advanced Analytics, {DSAA} 2014, Shanghai, China, October 30 - November 1, 2014}, year = {2014}, doi = {10.1109/DSAA.2014.7058090}, url = {http://dx.doi.org/10.1109/DSAA.2014.7058090}, author = {S Rinzivillo and Lorenzo Gabrielli and Mirco Nanni and Luca Pappalardo and Dino Pedreschi and Fosca Giannotti} } @article {828, title = {The retail market as a complex system}, journal = {EPJ Data Science}, volume = {3}, number = {1}, year = {2014}, pages = {1{\textendash}27}, abstract = {Aim of this paper is to introduce the complex system perspective into retail market analysis. Currently, to understand the retail market means to search for local patterns at the micro level, involving the segmentation, separation and profiling of diverse groups of consumers. In other contexts, however, markets are modelled as complex systems. Such strategy is able to uncover emerging regularities and patterns that make markets more predictable, e.g. enabling to predict how much a country{\textquoteright}s GDP will grow. Rather than isolate actors in homogeneous groups, this strategy requires to consider the system as a whole, as the emerging pattern can be detected only as a result of the interaction between its self-organizing parts. This assumption holds also in the retail market: each customer can be seen as an independent unit maximizing its own utility function. As a consequence, the global behaviour of the retail market naturally emerges, enabling a novel description of its properties, complementary to the local pattern approach. Such task demands for a data-driven empirical framework. In this paper, we analyse a unique transaction database, recording the micro-purchases of a million customers observed for several years in the stores of a national supermarket chain. We show the emergence of the fundamental pattern of this complex system, connecting the products{\textquoteright} volumes of sales with the customers{\textquoteright} volumes of purchases. This pattern has a number of applications. We provide three of them. By enabling us to evaluate the sophistication of needs that a customer has and a product satisfies, this pattern has been applied to the task of uncovering the hierarchy of needs of the customers, providing a hint about what is the next product a customer could be interested in buying and predicting in which shop she is likely to go to buy it.}, doi = {10.1140/epjds/s13688-014-0033-x}, url = {http://link.springer.com/article/10.1140/epjds/s13688-014-0033-x}, author = {Diego Pennacchioli and Michele Coscia and S Rinzivillo and Fosca Giannotti and Dino Pedreschi} } @inbook {636, title = {Retrieving Points of Interest from Human Systematic Movements}, booktitle = {Software Engineering and Formal Methods}, year = {2014}, pages = {294{\textendash}308}, publisher = {Springer International Publishing}, organization = {Springer International Publishing}, abstract = {Human mobility analysis is emerging as a more and more fundamental task to deeply understand human behavior. In the last decade these kind of studies have become feasible thanks to the massive increase in availability of mobility data. A crucial point, for many mobility applications and analysis, is to extract interesting locations for people. In this paper, we propose a novel methodology to retrieve efficiently significant places of interest from movement data. Using car drivers{\textquoteright} systematic movements we mine everyday interesting locations, that is, places around which people life gravitates. The outcomes show the empirical evidence that these places capture nearly the whole mobility even though generated only from systematic movements abstractions.}, doi = {10.1007/978-3-319-15201-1_19}, author = {Riccardo Guidotti and Anna Monreale and S Rinzivillo and Dino Pedreschi and Fosca Giannotti} } @article {622, title = {Uncovering Hierarchical and Overlapping Communities with a Local-First Approach}, journal = {{TKDD}}, volume = {9}, number = {1}, year = {2014}, pages = {6}, abstract = {Community discovery in complex networks is the task of organizing a network{\textquoteright}s structure by grouping together nodes related to each other. Traditional approaches are based on the assumption that there is a global-level organization in the network. However, in many scenarios, each node is the bearer of complex information and cannot be classified in disjoint clusters. The top-down global view of the partition approach is not designed for this. Here, we represent this complex information as multiple latent labels, and we postulate that edges in the networks are created among nodes carrying similar labels. The latent labels are the communities a node belongs to and we discover them with a simple local-first approach to community discovery. This is achieved by democratically letting each node vote for the communities it sees surrounding it in its limited view of the global system, its ego neighborhood, using a label propagation algorithm, assuming that each node is aware of the label it shares with each of its connections. The local communities are merged hierarchically, unveiling the modular organization of the network at the global level and identifying overlapping groups and groups of groups. We tested this intuition against the state-of-the-art overlapping community discovery and found that our new method advances in the chosen scenarios in the quality of the obtained communities. We perform a test on benchmark and on real-world networks, evaluating the quality of the community coverage by using the extracted communities to predict the metadata attached to the nodes, which we consider external information about the latent labels. We also provide an explanation about why real-world networks contain overlapping communities and how our logic is able to capture them. Finally, we show how our method is deterministic, is incremental, and has a limited time complexity, so that it can be used on real-world scale networks.}, doi = {10.1145/2629511}, url = {http://doi.acm.org/10.1145/2629511}, author = {Michele Coscia and Giulio Rossetti and Fosca Giannotti and Dino Pedreschi} } @conference {573, title = {Use of mobile phone data to estimate mobility flows. Measuring urban population and inter-city mobility using big data in an integrated approach}, booktitle = {47th SIS Scientific Meeting of the Italian Statistica Society}, year = {2014}, month = {06/2014}, address = {Cagliari }, abstract = {The Big Data, originating from the digital breadcrumbs of human activi- ties, sensed as a by-product of the technologies that we use for our daily activities, let us to observe the individual and collective behavior of people at an unprecedented detail. Many dimensions of our social life have big data {\textquotedblleft}proxies{\textquotedblright}, as the mobile calls data for mobility. In this paper we investigate to what extent such {\textquotedblright}big data{\textquotedblright}, in integration with administrative ones, could be a support in producing reliable and timely estimates of inter-city mobility. The study has been jointly developed by Is- tat, CNR, University of Pisa in the range of interest of the {\textquotedblleft}Commssione di studio avente il compito di orientare le scelte dellIstat sul tema dei Big Data {\textquotedblright}. In an on- going project at ISTAT, called {\textquotedblleft}Persons and Places{\textquotedblright} {\textendash} based on an integration of administrative data sources, it has been produced a first release of Origin Destina- tion matrix {\textendash} at municipality level {\textendash} assuming that the places of residence and that of work (or study) be the terminal points of usual individual mobility for work or study. The coincidence between the city of residence and that of work (or study) {\textendash} is considered as a proxy of the absence of intercity mobility for a person (we define him a static resident). The opposite case is considered as a proxy of presence of mo- bility (the person is a dynamic resident: commuter or embedded). As administrative data do not contain information on frequency of the mobility, the idea is to specify an estimate method, using calling data as support, to define for each municipality the stock of standing residents, embedded city users and daily city users (commuters)}, isbn = {978-88-8467-874-4}, url = {http://www.sis2014.it/proceedings/allpapers/3026.pdf}, author = {Barbara Furletti and Lorenzo Gabrielli and Fosca Giannotti and Letizia Milli and Mirco Nanni and Dino Pedreschi} } @conference {731, title = {Comparing General Mobility and Mobility by Car}, booktitle = {Computational Intelligence and 11th Brazilian Congress on Computational Intelligence (BRICS-CCI CBIC), 2013 BRICS Congress on}, year = {2013}, month = {Sept}, doi = {10.1109/BRICS-CCI-CBIC.2013.116}, author = {Luca Pappalardo and Filippo Simini and S Rinzivillo and Dino Pedreschi and Fosca Giannotti} } @inbook {634, title = {The discovery of discrimination}, booktitle = {Discrimination and privacy in the information society}, year = {2013}, pages = {91{\textendash}108}, publisher = {Springer}, organization = {Springer}, author = {Dino Pedreschi and Salvatore Ruggieri and Franco Turini} } @conference {729, title = {"Engine Matters": {A} First Large Scale Data Driven Study on Cyclists{\textquoteright} Performance}, booktitle = {13th {IEEE} International Conference on Data Mining Workshops, {ICDM} Workshops, TX, USA, December 7-10, 2013}, year = {2013}, doi = {10.1109/ICDMW.2013.41}, url = {http://dx.doi.org/10.1109/ICDMW.2013.41}, author = {Paolo Cintia and Luca Pappalardo and Dino Pedreschi} } @article {567, title = {Evolving networks: Eras and turning points}, journal = {Intell. Data Anal.}, volume = {17}, number = {1}, year = {2013}, pages = {27{\textendash}48}, abstract = {Within the large body of research in complex network analysis, an important topic is the temporal evolution of networks. Existing approaches aim at analyzing the evolution on the global and the local scale, extracting properties of either the entire network or local patterns. In this paper, we focus on detecting clusters of temporal snapshots of a network, to be interpreted as eras of evolution. To this aim, we introduce a novel hierarchical clustering methodology, based on a dissimilarity measure (derived from the Jaccard coefficient) between two temporal snapshots of the network, able to detect the turning points at the beginning of the eras. We devise a framework to discover and browse the eras, either in top-down or a bottom-up fashion, supporting the exploration of the evolution at any level of temporal resolution. We show how our approach applies to real networks and null models, by detecting eras in an evolving co-authorship graph extracted from a bibliographic dataset, a collaboration graph extracted from a cinema database, and a network extracted from a database of terrorist attacks; we illustrate how the discovered temporal clustering highlights the crucial moments when the networks witnessed profound changes in their structure. Our approach is finally boosted by introducing a meaningful labeling of the obtained clusters, such as the characterizing topics of each discovered era, thus adding a semantic dimension to our analysis.}, doi = {10.3233/IDA-120566}, url = {http://dx.doi.org/10.3233/IDA-120566}, author = {Michele Berlingerio and Michele Coscia and Fosca Giannotti and Anna Monreale and Dino Pedreschi} } @proceedings {529, title = {Explaining the PRoduct Range Effect in Purchase Data}, year = {2013}, author = {Diego Pennacchioli and Michele Coscia and S Rinzivillo and Dino Pedreschi and Fosca Giannotti} } @conference {504, title = {Measuring tie strength in multidimensional networks}, booktitle = {SEDB 2013}, year = {2013}, month = {2013}, author = {Giulio Rossetti and Luca Pappalardo and Dino Pedreschi} } @conference {615, title = {Privacy-Aware Distributed Mobility Data Analytics}, booktitle = {SEBD}, year = {2013}, address = {Roccella Jonica}, abstract = {We propose an approach to preserve privacy in an analytical processing within a distributed setting, and tackle the problem of obtaining aggregated information about vehicle traffic in a city from movement data collected by individual vehicles and shipped to a central server. Movement data are sensitive because they may describe typical movement behaviors and therefore be used for re-identification of individuals in a database. We provide a privacy-preserving framework for movement data aggregation based on trajectory generalization in a distributed environment. The proposed solution, based on the differential privacy model and on sketching techniques for efficient data compression, provides a formal data protection safeguard. Using real-life data, we demonstrate the effectiveness of our approach also in terms of data utility preserved by the data transformation. }, author = {Francesca Pratesi and Anna Monreale and Hui Wendy Wang and S Rinzivillo and Dino Pedreschi and Gennady Andrienko and Natalia Andrienko} } @inbook {571, title = {Privacy-Preserving Distributed Movement Data Aggregation}, booktitle = {Geographic Information Science at the Heart of Europe}, series = {Lecture Notes in Geoinformation and Cartography}, year = {2013}, pages = {225-245}, publisher = {Springer International Publishing}, organization = {Springer International Publishing}, abstract = {We propose a novel approach to privacy-preserving analytical processing within a distributed setting, and tackle the problem of obtaining aggregated information about vehicle traffic in a city from movement data collected by individual vehicles and shipped to a central server. Movement data are sensitive because people{\textquoteright}s whereabouts have the potential to reveal intimate personal traits, such as religious or sexual preferences, and may allow re-identification of individuals in a database. We provide a privacy-preserving framework for movement data aggregation based on trajectory generalization in a distributed environment. The proposed solution, based on the differential privacy model and on sketching techniques for efficient data compression, provides a formal data protection safeguard. Using real-life data, we demonstrate the effectiveness of our approach also in terms of data utility preserved by the data transformation.}, isbn = {978-3-319-00614-7}, doi = {10.1007/978-3-319-00615-4_13}, url = {http://dx.doi.org/10.1007/978-3-319-00615-4_13}, author = {Anna Monreale and Hui Wendy Wang and Francesca Pratesi and S Rinzivillo and Dino Pedreschi and Gennady Andrienko and Natalia Andrienko}, editor = {Vandenbroucke, Danny and Bucher, B{\'e}n{\'e}dicte and Crompvoets, Joep} } @article {478, title = {Privacy-Preserving Mining of Association Rules From Outsourced Transaction Databases}, journal = { IEEE Systems Journal}, year = {2013}, abstract = {Spurred by developments such as cloud computing, there has been considerable recent interest in the paradigm of data mining-as-a-service. A company (data owner) lacking in expertise or computational resources can outsource its mining needs to a third party service provider (server). However, both the items and the association rules of the outsourced database are considered private property of the corporation (data owner). To protect corporate privacy, the data owner transforms its data and ships it to the server, sends mining queries to the server, and recovers the true patterns from the extracted patterns received from the server. In this paper, we study the problem of outsourcing the association rule mining task within a corporate privacy-preserving framework. We propose an attack model based on background knowledge and devise a scheme for privacy preserving outsourced mining. Our scheme ensures that each transformed item is indistinguishable with respect to the attacker{\textquoteright}s background knowledge, from at least k-1 other transformed items. Our comprehensive experiments on a very large and real transaction database demonstrate that our techniques are effective, scalable, and protect privacy.}, doi = {10.1109/JSYST.2012.2221854}, author = {Fosca Giannotti and L.V.S. Lakshmanan and Anna Monreale and Dino Pedreschi and Hui Wendy Wang} } @conference {563, title = {Quantification Trees}, booktitle = {2013 IEEE 13th International Conference on Data Mining, Dallas, TX, USA, December 7-10, 2013}, year = {2013}, pages = {528{\textendash}536}, abstract = {In many applications there is a need to monitor how a population is distributed across different classes, and to track the changes in this distribution that derive from varying circumstances, an example such application is monitoring the percentage (or "prevalence") of unemployed people in a given region, or in a given age range, or at different time periods. When the membership of an individual in a class cannot be established deterministically, this monitoring activity requires classification. However, in the above applications the final goal is not determining which class each individual belongs to, but simply estimating the prevalence of each class in the unlabeled data. This task is called quantification. In a supervised learning framework we may estimate the distribution across the classes in a test set from a training set of labeled individuals. However, this may be sub optimal, since the distribution in the test set may be substantially different from that in the training set (a phenomenon called distribution drift). So far, quantification has mostly been addressed by learning a classifier optimized for individual classification and later adjusting the distribution it computes to compensate for its tendency to either under-or over-estimate the prevalence of the class. In this paper we propose instead to use a type of decision trees (quantification trees) optimized not for individual classification, but directly for quantification. Our experiments show that quantification trees are more accurate than existing state-of-the-art quantification methods, while retaining at the same time the simplicity and understandability of the decision tree framework.}, doi = {10.1109/ICDM.2013.122}, url = {http://dx.doi.org/10.1109/ICDM.2013.122}, author = {Letizia Milli and Anna Monreale and Giulio Rossetti and Fosca Giannotti and Dino Pedreschi and Fabrizio Sebastiani} } @article {527, title = {Spatial and Temporal Evaluation of Network-based Analysis of Human Mobility}, journal = {Social Network Analysis and Mining}, volume = {to appear}, year = {2013}, author = {Michele Coscia and S Rinzivillo and Fosca Giannotti and Dino Pedreschi} } @conference {541, title = {A Study on Parameter Estimation for a Mining Flock Algorithm }, booktitle = {Mining Complex Patterns Workshop, ECML PKDD 2013}, year = {2013}, author = {Rebecca Ong and Mirco Nanni and Chiara Renso and Monica Wachowicz and Dino Pedreschi} } @conference {624, title = {The Three Dimensions of Social Prominence}, booktitle = {Social Informatics - 5th International Conference, SocInfo 2013, Kyoto, Japan, November 25-27, 2013, Proceedings}, year = {2013}, doi = {10.1007/978-3-319-03260-3_28}, url = {http://dx.doi.org/10.1007/978-3-319-03260-3_28}, author = {Diego Pennacchioli and Giulio Rossetti and Luca Pappalardo and Dino Pedreschi and Fosca Giannotti and Michele Coscia} } @article {682, title = {Towards mega-modeling: a walk through data analysis experiences}, journal = {{SIGMOD} Record}, volume = {42}, number = {3}, year = {2013}, pages = {19{\textendash}27}, doi = {10.1145/2536669.2536673}, url = {http://doi.acm.org/10.1145/2536669.2536673}, author = {Stefano Ceri and Themis Palpanas and Emanuele Della Valle and Dino Pedreschi and Johann-Christoph Freytag and Roberto Trasarti} } @article {732, title = {{Understanding the patterns of car travel}}, journal = {The European Physical Journal Special Topics}, volume = {215}, number = {1}, year = {2013}, pages = {61{\textendash}73}, abstract = {{Are the patterns of car travel different from those of general human mobility? Based on a unique dataset consisting of the GPS trajectories of 10 million travels accomplished by 150,000 cars in Italy, we investigate how known mobility models apply to car travels, and illustrate novel analytical findings. We also assess to what extent the sample in our dataset is representative of the overall car mobility, and discover how to build an extremely accurate model that, given our GPS data, estimates the real traffic values as measured by road sensors.}}, doi = {10.1140/epjst\%252fe2013-01715-5}, url = {http://dx.doi.org/10.1140/epjst\%252fe2013-01715-5}, author = {Luca Pappalardo and S Rinzivillo and Qu, Zehui and Dino Pedreschi and Fosca Giannotti} } @conference {687, title = {An Agent-Based Model to Evaluate Carpooling at Large Manufacturing Plants}, booktitle = {Proceedings of the 3rd International Conference on Ambient Systems, Networks and Technologies {(ANT} 2012), the 9th International Conference on Mobile Web Information Systems (MobiWIS-2012), Niagara Falls, Ontario, Canada, August 27-29, 2012}, year = {2012}, doi = {10.1016/j.procs.2012.08.001}, url = {http://dx.doi.org/10.1016/j.procs.2012.08.001}, author = {Tom Bellemans and Sebastian Bothe and Sungjin Cho and Fosca Giannotti and Davy Janssens and Luk Knapen and Christine K{\"o}rner and Michael May and Mirco Nanni and Dino Pedreschi and Hendrik Stange and Roberto Trasarti and Ansar-Ul-Haque Yasar and Geert Wets} } @conference {479, title = {AUDIO: An Integrity Auditing Framework of Outlier-Mining-as-a-Service Systems.}, booktitle = {Machine Learning and Knowledge Discovery in Databases, European Conference, ECML PKDD 2012 }, year = {2012}, month = {2012}, abstract = {Spurred by developments such as cloud computing, there has been considerable recent interest in the data-mining-as-a-service paradigm. Users lacking in expertise or computational resources can outsource their data and mining needs to a third-party service provider (server). Outsourcing, however, raises issues about result integrity: how can the data owner verify that the mining results returned by the server are correct? In this paper, we present AUDIO, an integrity auditing framework for the specific task of distance-based outlier mining outsourcing. It provides efficient and practical verification approaches to check both completeness and correctness of the mining results. The key idea of our approach is to insert a small amount of artificial tuples into the outsourced data; the artificial tuples will produce artificial outliers and non-outliers that do not exist in the original dataset. The server{\textquoteright}s answer is verified by analyzing the presence of artificial outliers/non-outliers, obtaining a probabilistic guarantee of correctness and completeness of the mining result. Our empirical results show the effectiveness and efficiency of our method.}, doi = {10.1007/978-3-642-33486-3_1}, author = {R.Liu and Hui Wendy Wang and Anna Monreale and Dino Pedreschi and Fosca Giannotti and W Guo} } @article {462, title = {Data Science for Simulating the Era of Electric Vehicles}, journal = {KI - K{\"u}nstliche Intelligenz}, year = {2012}, doi = {10.1007/s13218-012-0183-6}, author = {Davy Janssens and Fosca Giannotti and Mirco Nanni and Dino Pedreschi and S Rinzivillo} } @conference {625, title = {DEMON: a local-first discovery method for overlapping communities}, booktitle = {The 18th {ACM} {SIGKDD} International Conference on Knowledge Discovery and Data Mining, {KDD} {\textquoteright}12, Beijing, China, August 12-16, 2012}, year = {2012}, doi = {10.1145/2339530.2339630}, url = {http://doi.acm.org/10.1145/2339530.2339630}, author = {Michele Coscia and Giulio Rossetti and Fosca Giannotti and Dino Pedreschi} } @conference {507, title = {DEMON: a Local-First Discovery Method for Overlapping Communities}, booktitle = {KDD 2012}, year = {2012}, month = {2012}, author = {Michele Coscia and Giulio Rossetti and Fosca Giannotti and Dino Pedreschi} } @article {455, title = {Discovering the Geographical Borders of Human Mobility}, journal = {KI - K{\"u}nstliche Intelligenz}, year = {2012}, chapter = {1}, abstract = {The availability of massive network and mobility data from diverse domains has fostered the analysis of human behavior and interactions. Broad, extensive, and multidisciplinary research has been devoted to the extraction of non-trivial knowledge from this novel form of data. We propose a general method to determine the influence of social and mobility behavior over a specific geographical area in order to evaluate to what extent the current administrative borders represent the real basin of human movement. We build a network representation of human movement starting with vehicle GPS tracks and extract relevant clusters, which are then mapped back onto the territory, finding a good match with the existing administrative borders. The novelty of our approach is the focus on a detailed spatial resolution, we map emerging borders in terms of individual municipalities, rather than macro regional or national areas. We present a series of experiments to illustrate and evaluate the effectiveness of our approach.}, issn = {0933-1875}, doi = {10.1007/s13218-012-0181-8}, url = {https://link.springer.com/article/10.1007\%2Fs13218-012-0181-8}, author = {S Rinzivillo and Simone Mainardi and Fabio Pezzoni and Michele Coscia and Fosca Giannotti and Dino Pedreschi} } @conference {626, title = {"How Well Do We Know Each Other?" Detecting Tie Strength in Multidimensional Social Networks}, booktitle = {International Conference on Advances in Social Networks Analysis and Mining, {ASONAM} 2012, Istanbul, Turkey, 26-29 August 2012}, year = {2012}, doi = {10.1109/ASONAM.2012.180}, url = {http://doi.ieeecomputersociety.org/10.1109/ASONAM.2012.180}, author = {Luca Pappalardo and Giulio Rossetti and Dino Pedreschi} } @conference {569, title = {Injecting Discrimination and Privacy Awareness Into Pattern Discovery}, booktitle = {12th {IEEE} International Conference on Data Mining Workshops, {ICDM} Workshops, Brussels, Belgium, December 10, 2012}, year = {2012}, pages = {360{\textendash}369}, abstract = {Data mining is gaining societal momentum due to the ever increasing availability of large amounts of human data, easily collected by a variety of sensing technologies. Data mining comes with unprecedented opportunities and risks: a deeper understanding of human behavior and how our society works is darkened by a greater chance of privacy intrusion and unfair discrimination based on the extracted patterns and profiles. Although methods independently addressing privacy or discrimination in data mining have been proposed in the literature, in this context we argue that privacy and discrimination risks should be tackled together, and we present a methodology for doing so while publishing frequent pattern mining results. We describe a combined pattern sanitization framework that yields both privacy and discrimination-protected patterns, while introducing reasonable (controlled) pattern distortion.}, doi = {10.1109/ICDMW.2012.51}, url = {http://dx.doi.org/10.1109/ICDMW.2012.51}, author = {Sara Hajian and Anna Monreale and Dino Pedreschi and Josep Domingo-Ferrer and Fosca Giannotti} } @conference {685, title = {Mega-modeling for Big Data Analytics}, booktitle = {Conceptual Modeling - 31st International Conference {ER} 2012, Florence, Italy, October 15-18, 2012. Proceedings}, year = {2012}, doi = {10.1007/978-3-642-34002-4_1}, url = {http://dx.doi.org/10.1007/978-3-642-34002-4_1}, author = {Stefano Ceri and Emanuele Della Valle and Dino Pedreschi and Roberto Trasarti} } @article {482, title = {Multidimensional networks: foundations of structural analysis}, journal = {World Wide Web}, volume = { Volume 15 / 2012}, year = {2012}, month = {10/2012}, abstract = {Complex networks have been receiving increasing attention by the scientific community, thanks also to the increasing availability of real-world network data. So far, network analysis has focused on the characterization and measurement of local and global properties of graphs, such as diameter, degree distribution, centrality, and so on. In the last years, the multidimensional nature of many real world networks has been pointed out, i.e. many networks containing multiple connections between any pair of nodes have been analyzed. Despite the importance of analyzing this kind of networks was recognized by previous works, a complete framework for multidimensional network analysis is still missing. Such a framework would enable the analysts to study different phenomena, that can be either the generalization to the multidimensional setting of what happens in monodimensional networks, or a new class of phenomena induced by the additional degree of complexity that multidimensionality provides in real networks. The aim of this paper is then to give the basis for multidimensional network analysis: we present a solid repertoire of basic concepts and analytical measures, which take into account the general structure of multidimensional networks. We tested our framework on different real world multidimensional networks, showing the validity and the meaningfulness of the measures introduced, that are able to extract important and non-random information about complex phenomena in such networks. }, doi = {10.1007/s11280-012-0190-4}, url = {http://www.springerlink.com/content/f774289854430410/abstract/}, author = {Michele Berlingerio and Michele Coscia and Fosca Giannotti and Anna Monreale and Dino Pedreschi} } @proceedings {461, title = {Optimal Spatial Resolution for the Analysis of Human Mobility}, year = {2012}, address = {Instanbul, Turkey}, author = {Michele Coscia and S Rinzivillo and Dino Pedreschi and Fosca Giannotti} } @article {sam2011, title = {A classification for community discovery methods in complex networks}, journal = {Statistical Analysis and Data Mining}, volume = {4}, number = {5}, year = {2011}, pages = {512-546}, author = {Michele Coscia and Fosca Giannotti and Dino Pedreschi} } @article {MonrealeTPRB11, title = {C-safety: a framework for the anonymization of semantic trajectories}, journal = {Transactions on Data Privacy}, volume = {4}, number = {2}, year = {2011}, pages = {73-101}, abstract = {The increasing abundance of data about the trajectories of personal movement is opening new opportunities for analyzing and mining human mobility. However, new risks emerge since it opens new ways of intruding into personal privacy. Representing the personal movements as sequences of places visited by a person during her/his movements - semantic trajectory - poses great privacy threats. In this paper we propose a privacy model defining the attack model of semantic trajectory linking and a privacy notion, called c-safety based on a generalization of visited places based on a taxonomy. This method provides an upper bound to the probability of inferring that a given person, observed in a sequence of non-sensitive places, has also visited any sensitive location. Coherently with the privacy model, we propose an algorithm for transforming any dataset of semantic trajectories into a c-safe one. We report a study on two real-life GPS trajectory datasets to show how our algorithm preserves interesting quality/utility measures of the original trajectories, when mining semantic trajectories sequential pattern mining results. We also empirically measure how the probability that the attacker{\textquoteright}s inference succeeds is much lower than the theoretical upper bound established.}, url = {http://dl.acm.org/citation.cfm?id=2019319\&CFID=803961971\&CFTOKEN=35994039}, author = {Anna Monreale and Roberto Trasarti and Dino Pedreschi and Chiara Renso and Vania Bogorny} } @conference {asonam12011, title = {Foundations of Multidimensional Network Analysis}, booktitle = {ASONAM}, year = {2011}, pages = {485-489}, abstract = {Complex networks have been receiving increasing attention by the scientific community, thanks also to the increasing availability of real-world network data. In the last years, the multidimensional nature of many real world networks has been pointed out, i.e. many networks containing multiple connections between any pair of nodes have been analyzed. Despite the importance of analyzing this kind of networks was recognized by previous works, a complete framework for multidimensional network analysis is still missing. Such a framework would enable the analysts to study different phenomena, that can be either the generalization to the multidimensional setting of what happens inmonodimensional network, or a new class of phenomena induced by the additional degree of complexity that multidimensionality provides in real networks. The aim of this paper is then to give the basis for multidimensional network analysis: we develop a solid repertoire of basic concepts and analytical measures, which takes into account the general structure of multidimensional networks. We tested our framework on a real world multidimensional network, showing the validity and the meaningfulness of the measures introduced, that are able to extract important, nonrandom, information about complex phenomena.}, doi = {10.1109/ASONAM.2011.103}, author = {Michele Berlingerio and Michele Coscia and Fosca Giannotti and Anna Monreale and Dino Pedreschi} } @conference {481, title = {Privacy-preserving data mining from outsourced databases.}, booktitle = { the 3rd International Conference on Computers, Privacy, and Data Protection: An element of choice }, year = {2011}, month = {2011}, abstract = {Spurred by developments such as cloud computing, there has been considerable recent interest in the paradigm of data mining-as-service: a company (data owner) lacking in expertise or computational resources can outsource its mining needs to a third party service provider (server). However, both the outsourced database and the knowledge extract from it by data mining are considered private property of the data owner. To protect corporate privacy, the data owner transforms its data and ships it to the server, sends mining queries to the server, and recovers the true patterns from the extracted patterns received from the server. In this paper, we study the problem of outsourcing a data mining task within a corporate privacy-preserving framework. We propose a scheme for privacy-preserving outsourced mining which offers a formal protection against information disclosure, and show that the data owner can recover the correct data mining results efficiently.}, doi = {10.1007/978-94-007-0641-5_19}, author = {Fosca Giannotti and L.V.S. Lakshmanan and Anna Monreale and Dino Pedreschi and Hui Wendy Wang} } @article {jocs2011, title = {The pursuit of hubbiness: Analysis of hubs in large multidimensional networks}, journal = {J. Comput. Science}, volume = {2}, number = {3}, year = {2011}, pages = {223-237}, abstract = {Hubs are highly connected nodes within a network. In complex network analysis, hubs have been widely studied, and are at the basis of many tasks, such as web search and epidemic outbreak detection. In reality, networks are often multidimensional, i.e., there can exist multiple connections between any pair of nodes. In this setting, the concept of hub depends on the multiple dimensions of the network, whose interplay becomes crucial for the connectedness of a node. In this paper, we characterize multidimensional hubs. We consider the multidimensional generalization of the degree and introduce a new class of measures, that we call Dimension Relevance, aimed at analyzing the importance of different dimensions for the hubbiness of a node. We assess the meaningfulness of our measures by comparing them on real networks and null models, then we study the interplay among dimensions and their effect on node connectivity. Our findings show that: (i) multidimensional hubs do exist and their characterization yields interesting insights and (ii) it is possible to detect the most influential dimensions that cause the different hub behaviors. We demonstrate the usefulness of multidimensional analysis in three real world domains: detection of ambiguous query terms in a word{\textendash}word query log network, outlier detection in a social network, and temporal analysis of behaviors in a co-authorship network.}, doi = {10.1016/j.jocs.2011.05.009}, author = {Michele Berlingerio and Michele Coscia and Fosca Giannotti and Anna Monreale and Dino Pedreschi} } @article {TrasartiGNPR11, title = {A Query Language for Mobility Data Mining}, journal = {IJDWM}, volume = {7}, number = {1}, year = {2011}, pages = {24-45}, author = {Roberto Trasarti and Fosca Giannotti and Mirco Nanni and Dino Pedreschi and Chiara Renso} } @article {vlbdjMatlas, title = {Unveiling the complexity of human mobility by querying and mining massive trajectory data}, journal = {VLDB J.}, volume = {20}, number = {5}, year = {2011}, pages = {695-719}, author = {Fosca Giannotti and Mirco Nanni and Dino Pedreschi and Fabio Pinelli and Chiara Renso and S Rinzivillo and Roberto Trasarti} } @conference {NanniTRGP10, title = {Advanced knowledge discovery on movement data with the GeoPKDD system}, booktitle = {EDBT}, year = {2010}, pages = {693-696}, author = {Mirco Nanni and Roberto Trasarti and Chiara Renso and Fosca Giannotti and Dino Pedreschi} } @conference {NanniTRGP10, title = {Advanced knowledge discovery on movement data with the GeoPKDD system}, booktitle = {EDBT}, year = {2010}, pages = {693-696}, author = {Mirco Nanni and Roberto Trasarti and Chiara Renso and Fosca Giannotti and Dino Pedreschi} } @conference {pakdd2010, title = {As Time Goes by: Discovering Eras in Evolving Social Networks}, booktitle = {PAKDD (1)}, year = {2010}, pages = {81-90}, abstract = {Within the large body of research in complex network analysis, an important topic is the temporal evolution of networks. Existing approaches aim at analyzing the evolution on the global and the local scale, extracting properties of either the entire network or local patterns. In this paper, we focus instead on detecting clusters of temporal snapshots of a network, to be interpreted as eras of evolution. To this aim, we introduce a novel hierarchical clustering methodology, based on a dissimilarity measure (derived from the Jaccard coefficient) between two temporal snapshots of the network. We devise a framework to discover and browse the eras, either in top-down or a bottom-up fashion, supporting the exploration of the evolution at any level of temporal resolution. We show how our approach applies to real networks, by detecting eras in an evolving co-authorship graph extracted from a bibliographic dataset; we illustrate how the discovered temporal clustering highlights the crucial moments when the network had profound changes in its structure. Our approach is finally boosted by introducing a meaningful labeling of the obtained clusters, such as the characterizing topics of each discovered era, thus adding a semantic dimension to our analysis.}, doi = {10.1007/978-3-642-13657-3_11}, author = {Michele Berlingerio and Michele Coscia and Fosca Giannotti and Anna Monreale and Dino Pedreschi} } @conference {sebd10, title = {Discovering Eras in Evolving Social Networks (Extended Abstract)}, booktitle = {SEBD}, year = {2010}, pages = {78-85}, author = {Michele Berlingerio and Michele Coscia and Fosca Giannotti and Anna Monreale and Dino Pedreschi} } @conference {TrasartiRPNM10, title = {Exploring Real Mobility Data with M-Atlas}, booktitle = {ECML/PKDD (3)}, year = {2010}, pages = {624-627}, abstract = {Research on moving-object data analysis has been recently fostered by the widespread diffusion of new techniques and systems for monitoring, collecting and storing location aware data, generated by a wealth of technological infrastructures, such as GPS positioning and wireless networks. These have made available massive repositories of spatio-temporal data recording human mobile activities, that call for suitable analytical methods, capable of enabling the development of innovative, location-aware applications.}, doi = {10.1007/978-3-642-15939-8_48}, author = {Roberto Trasarti and S Rinzivillo and Fabio Pinelli and Mirco Nanni and Anna Monreale and Chiara Renso and Dino Pedreschi and Fosca Giannotti} } @proceedings {337, title = {A Generalisation-based Approach to Anonymising Movement Data}, year = {2010}, abstract = {The possibility to collect, store, disseminate, and analyze data about movements of people raises very serious privacy concerns, given the sensitivity of the information about personal positions. In particular, sensitive information about individuals can be uncovered with the use of data mining and visual analytics methods. In this paper we present a method for the generalization of trajectory data that can be adopted as the first step of a process to obtain k-anonymity in spatio-temporal datasets. We ran a preliminary set of experiments on a real-world trajectory dataset, demonstrating that this method of generalization of trajectories preserves the clustering analysis results. }, issn = {978-989-20-1953-6}, url = {http://agile2010.dsi.uminho.pt/pen/ShortPapers_PDF\%5C122_DOC.pdf}, author = {Gennady Andrienko and Natalia Andrienko and Fosca Giannotti and Anna Monreale and Dino Pedreschi and S Rinzivillo} } @conference {GiannottiNPPR10, title = {Mobility data mining: discovering movement patterns from trajectory data}, booktitle = {Computational Transportation Science}, year = {2010}, pages = {7-10}, author = {Fosca Giannotti and Mirco Nanni and Dino Pedreschi and Fabio Pinelli and Chiara Renso and S Rinzivillo and Roberto Trasarti} } @article {572, title = {Movement Data Anonymity through Generalization}, journal = {Transactions on Data Privacy}, volume = {3}, number = {2}, year = {2010}, pages = {91{\textendash}121}, abstract = {Wireless networks and mobile devices, such as mobile phones and GPS receivers, sense and track the movements of people and vehicles, producing society-wide mobility databases. This is a challenging scenario for data analysis and mining. On the one hand, exciting opportunities arise out of discovering new knowledge about human mobile behavior, and thus fuel intelligent info-mobility applications. On other hand, new privacy concerns arise when mobility data are published. The risk is particularly high for GPS trajectories, which represent movement of a very high precision and spatio-temporal resolution: the de-identification of such trajectories (i.e., forgetting the ID of their associated owners) is only a weak protection, as generally it is possible to re-identify a person by observing her routine movements. In this paper we propose a method for achieving true anonymity in a dataset of published trajectories, by defining a transformation of the original GPS trajectories based on spatial generalization and k-anonymity. The proposed method offers a formal data protection safeguard, quantified as a theoretical upper bound to the probability of re-identification. We conduct a thorough study on a real-life GPS trajectory dataset, and provide strong empirical evidence that the proposed anonymity techniques achieve the conflicting goals of data utility and data privacy. In practice, the achieved anonymity protection is much stronger than the theoretical worst case, while the quality of the cluster analysis on the trajectory data is preserved.}, url = {http://www.tdp.cat/issues/abs.a045a10.php}, author = {Anna Monreale and Gennady Andrienko and Natalia Andrienko and Fosca Giannotti and Dino Pedreschi and S Rinzivillo and Stefan Wrobel} } @conference {MonrealeTRPB10, title = {Preserving privacy in semantic-rich trajectories of human mobility}, booktitle = {SPRINGL}, year = {2010}, pages = {47-54}, abstract = {The increasing abundance of data about the trajectories of personal movement is opening up new opportunities for analyzing and mining human mobility, but new risks emerge since it opens new ways of intruding into personal privacy. Representing the personal movements as sequences of places visited by a person during her/his movements - semantic trajectory - poses even greater privacy threats w.r.t. raw geometric location data. In this paper we propose a privacy model defining the attack model of semantic trajectory linking, together with a privacy notion, called c-safety. This method provides an upper bound to the probability of inferring that a given person, observed in a sequence of nonsensitive places, has also stopped in any sensitive location. Coherently with the privacy model, we propose an algorithm for transforming any dataset of semantic trajectories into a c-safe one. We report a study on a real-life GPS trajectory dataset to show how our algorithm preserves interesting quality/utility measures of the original trajectories, such as sequential pattern mining results.}, doi = {10.1145/1868470.1868481}, author = {Anna Monreale and Roberto Trasarti and Chiara Renso and Dino Pedreschi and Vania Bogorny} } @conference {Berlm3sn2010, title = {Towards Discovery of Eras in Social Networks}, booktitle = {M3SN 2010 Workshop, in conjunction with ICDE2010}, year = {2010}, abstract = {In the last decades, much research has been devoted in topics related to Social Network Analysis. One important direction in this area is to analyze the temporal evolution of a network. So far, previous approaches analyzed this setting at both the global and the local level. In this paper, we focus on finding a way to detect temporal eras in an evolving network. We pose the basis for a general framework that aims at helping the analyst in browsing the temporal clusters both in a top-down and bottom-up way, exploring the network at any level of temporal details. We show the effectiveness of our approach on real data, by applying our proposed methodology to a co-authorship network extracted from a bibliographic dataset. Our first results are encouraging, and open the way for the definition and implementation of a general framework for discovering eras in evolving social networks.}, doi = {10.1109/ICDEW.2010.5452713}, author = {Michele Berlingerio and Michele Coscia and Fosca Giannotti and Anna Monreale and Dino Pedreschi} } @proceedings {242, title = {Anonymous Sequences from Trajectory Data}, year = {2009}, edition = {17}, address = {Camogli, Italy}, author = {Ruggero G. Pensa and Anna Monreale and Fabio Pinelli and Dino Pedreschi} } @conference {GiannottiPT09, title = {Geographic privacy-aware knowledge discovery and delivery}, booktitle = {EDBT}, year = {2009}, pages = {1157-1158}, author = {Fosca Giannotti and Dino Pedreschi and Yannis Theodoridis} } @conference {fet2009, title = {GeoPKDD {\textendash} Geographic Privacy-aware Knowledge Discovery}, booktitle = {The European Future Technologies Conference (FET 2009)}, year = {2009}, author = {Fosca Giannotti and Mirco Nanni and Dino Pedreschi and Chiara Renso and S Rinzivillo and Roberto Trasarti} } @conference {PedreschiRT09, title = {Integrating induction and deduction for finding evidence of discrimination}, booktitle = {ICAIL}, year = {2009}, pages = {157-166}, author = {Dino Pedreschi and Salvatore Ruggieri and Franco Turini} } @conference {sdmPedreschiRT09, title = {Measuring Discrimination in Socially-Sensitive Decision Records}, booktitle = {SDM}, year = {2009}, pages = {581-592}, author = {Dino Pedreschi and Salvatore Ruggieri and Franco Turini} } @conference {GiannottiNPRT09, title = {Mining Mobility Behavior from Trajectory Data}, booktitle = {CSE (4)}, year = {2009}, pages = {948-951}, author = {Fosca Giannotti and Mirco Nanni and Dino Pedreschi and Chiara Renso and Roberto Trasarti} } @conference {991, title = {Movement data anonymity through generalization}, booktitle = {Proceedings of the 2nd SIGSPATIAL ACM GIS 2009 International Workshop on Security and Privacy in GIS and LBS}, year = {2009}, publisher = {ACM}, organization = {ACM}, abstract = {In recent years, spatio-temporal and moving objects databases have gained considerable interest, due to the diffusion of mobile devices (e.g., mobile phones, RFID devices and GPS devices) and of new applications, where the discovery of consumable, concise, and applicable knowledge is the key step. Clearly, in these applications privacy is a concern, since models extracted from this kind of data can reveal the behavior of group of individuals, thus compromising their privacy. Movement data present a new challenge for the privacy-preserving data mining community because of their spatial and temporal characteristics. In this position paper we briefly present an approach for the generalization of movement data that can be adopted for obtaining k-anonymity in spatio-temporal datasets; specifically, it can be used to realize a framework for publishing of spatio-temporal data while preserving privacy. We ran a preliminary set of experiments on a real-world trajectory dataset, demonstrating that this method of generalization of trajectories preserves the clustering analysis results.}, doi = {10.1145/1667502.1667510}, author = {Gennady Andrienko and Natalia Andrienko and Fosca Giannotti and Anna Monreale and Dino Pedreschi} } @conference {DBLP:conf/gis/Gi, title = {Trajectory pattern analysis for urban traffic}, booktitle = {Second International Workshop on Computational Transportation Science}, year = {2009}, month = {11/2009}, pages = {43-47}, publisher = {ACM}, organization = {ACM}, address = {SEATTLE, USA}, author = {Fosca Giannotti and Mirco Nanni and Dino Pedreschi and Fabio Pinelli} } @conference {AndrienkoARNP09, title = {A Visual Analytics Toolkit for Cluster-Based Classification of Mobility Data}, booktitle = {SSTD}, year = {2009}, pages = {432-435}, author = {Gennady Andrienko and Natalia Andrienko and S Rinzivillo and Mirco Nanni and Dino Pedreschi} } @conference {ClusterVAST, title = {Visual Cluster Analysis of Large Collections of Trajectories}, booktitle = {IEEE Visual Analytics Science and Tecnology (VAST 2009)}, year = {2009}, publisher = {IEEE Computer Society Press}, organization = {IEEE Computer Society Press}, author = {Gennady Andrienko and Natalia Andrienko and S Rinzivillo and Mirco Nanni and Dino Pedreschi and Fosca Giannotti} } @article {DBLP:journals/vldb/AtzoriBGP08, title = {Anonymity preserving pattern discovery}, journal = {VLDB J.}, volume = {17}, number = {4}, year = {2008}, pages = {703-727}, author = {Maurizio Atzori and Francesco Bonchi and Fosca Giannotti and Dino Pedreschi} } @conference {PedreschiRT08, title = {Discrimination-aware data mining}, booktitle = {KDD}, year = {2008}, pages = {560-568}, author = {Dino Pedreschi and Salvatore Ruggieri and Franco Turini} } @inbook {GiannottiP08, title = {Mobility, Data Mining and Privacy: A Vision of Convergence}, booktitle = {Mobility, Data Mining and Privacy}, year = {2008}, pages = {1-11}, author = {Fosca Giannotti and Dino Pedreschi} } @book {2008mdmp, title = {Mobility, Data Mining and Privacy - Geographic Knowledge Discovery}, series = {Mobility, Data Mining and Privacy}, year = {2008}, publisher = {Springer}, organization = {Springer}, isbn = {978-3-540-75176-2}, author = {Fosca Giannotti and Dino Pedreschi}, editor = {Fosca Giannotti and Dino Pedreschi} } @conference {GiannottiPT08, title = {Mobility, Data Mining and Privacy the Experience of the GeoPKDD Project}, booktitle = {PinKDD}, year = {2008}, pages = {25-32}, author = {Fosca Giannotti and Dino Pedreschi and Franco Turini} } @conference {DBLP:conf/esoric, title = {Pattern-Preserving k-Anonymization of Sequences and its Application to Mobility Data Mining}, booktitle = {PiLBA}, year = {2008}, abstract = {Sequential pattern mining is a major research field in knowledge discovery and data mining. Thanks to the increasing availability of transaction data, it is now possible to provide new and improved services based on users{\textquoteright} and customers{\textquoteright} behavior. However, this puts the citizen{\textquoteright}s privacy at risk. Thus, it is important to develop new privacy-preserving data mining techniques that do not alter the analysis results significantly. In this paper we propose a new approach for anonymizing sequential data by hiding infrequent, and thus potentially sensible, subsequences. Our approach guarantees that the disclosed data are k-anonymous and preserve the quality of extracted patterns. An application to a real-world moving object database is presented, which shows the effectiveness of our approach also in complex contexts.}, url = {https://air.unimi.it/retrieve/handle/2434/52786/106397/ProceedingsPiLBA08.pdf$\#$page=44}, author = {Ruggero G. Pensa and Anna Monreale and Fabio Pinelli and Dino Pedreschi} } @inbook {PedreschiBTVAMMS, title = {Privacy Protection: Regulations and Technologies, Opportunities and Threats}, booktitle = {Mobility, Data Mining and Privacy}, year = {2008}, pages = {101-119}, author = {Dino Pedreschi and Francesco Bonchi and Franco Turini and Vassilios S. Verykios and Maurizio Atzori and Bradley Malin and Bart Moelans and Y{\"u}cel Saygin} } @inbook {NanniKKMP08, title = {Spatiotemporal Data Mining}, booktitle = {Mobility, Data Mining and Privacy}, year = {2008}, pages = {267-296}, author = {Mirco Nanni and Bart Kuijpers and Christine K{\"o}rner and Michael May and Dino Pedreschi} } @article {IV2008, title = {Visually driven analysis of movement data by progressive clustering}, journal = {Information Visualization}, volume = {7}, number = {3-4}, year = {2008}, pages = {225-239}, publisher = {Palgrave Macmillan Ltd}, author = {S Rinzivillo and Dino Pedreschi and Mirco Nanni and Fosca Giannotti and Natalia Andrienko and Gennady Andrienko} } @conference {DBLP:conf/mdm/AtzoriBGPA07, title = {Privacy-Aware Knowledge Discovery from Location Data}, booktitle = {MDM}, year = {2007}, pages = {283-287}, author = {Maurizio Atzori and Francesco Bonchi and Fosca Giannotti and Dino Pedreschi and Osman Abul} } @conference {DBLP:conf/kdd/GiannottiNPP07, title = {Trajectory pattern mining}, booktitle = {KDD}, year = {2007}, pages = {330-339}, author = {Fosca Giannotti and Mirco Nanni and Fabio Pinelli and Dino Pedreschi} } @conference {DBLP:conf/sdm/GiannottiNP06, title = {Efficient Mining of Temporally Annotated Sequences}, booktitle = {SDM}, year = {2006}, author = {Fosca Giannotti and Mirco Nanni and Dino Pedreschi} } @conference {DBLP:conf/sac/GiannottiNPP06, title = {Mining sequences with temporal annotations}, booktitle = {SAC}, year = {2006}, pages = {593-597}, author = {Fosca Giannotti and Mirco Nanni and Dino Pedreschi and Fabio Pinelli} } @article {DBLP:journals/jiis/NanniP06, title = {Time-focused clustering of trajectories of moving objects}, journal = {J. Intell. Inf. Syst.}, volume = {27}, number = {3}, year = {2006}, pages = {267-289}, author = {Mirco Nanni and Dino Pedreschi} } @conference {DBLP:conf/sac/AtzoriBGP06, title = {Towards low-perturbation anonymity preserving pattern discovery}, booktitle = {SAC}, year = {2006}, pages = {588-592}, author = {Maurizio Atzori and Francesco Bonchi and Fosca Giannotti and Dino Pedreschi} } @article {DBLP:journals/csse/AtzoriBGP05, title = {Anonymity and data mining}, journal = {Comput. Syst. Sci. Eng.}, volume = {20}, number = {5}, year = {2005}, author = {Maurizio Atzori and Francesco Bonchi and Fosca Giannotti and Dino Pedreschi} } @conference {DBLP:conf/icdm/AtzoriBGP05, title = {Blocking Anonymity Threats Raised by Frequent Itemset Mining}, booktitle = {ICDM}, year = {2005}, pages = {561-564}, author = {Maurizio Atzori and Francesco Bonchi and Fosca Giannotti and Dino Pedreschi} } @article {DBLP:journals/kais/BonchiGMP05, title = {Efficient breadth-first mining of frequent pattern with monotone constraints}, journal = {Knowl. Inf. Syst.}, volume = {8}, number = {2}, year = {2005}, pages = {131-153}, author = {Francesco Bonchi and Fosca Giannotti and Alessio Mazzanti and Dino Pedreschi} } @article {DBLP:journals/expert/BonchiGMP05, title = {Exante: A Preprocessing Method for Frequent-Pattern Mining}, journal = {IEEE Intelligent Systems}, volume = {20}, number = {3}, year = {2005}, pages = {25-31}, author = {Francesco Bonchi and Fosca Giannotti and Alessio Mazzanti and Dino Pedreschi} } @article {DBLP:journals/amai/PedreschiR04, title = {Bounded Nondeterminism of Logic Programs}, journal = {Ann. Math. Artif. Intell.}, volume = {42}, number = {4}, year = {2004}, pages = {313-343}, author = {Dino Pedreschi and Salvatore Ruggieri} } @conference {DBLP:conf/lopstr/PedreschiRS04, title = {Characterisations of Termination in Logic Programming}, booktitle = {Program Development in Computational Logic}, year = {2004}, pages = {376-431}, author = {Dino Pedreschi and Salvatore Ruggieri and Jan-Georg Smaus} } @conference {DBLP:conf/dmkd/BacarellaGNP04, title = {Discovery of ads web hosts through traffic data analysis}, booktitle = {DMKD}, year = {2004}, pages = {76-81}, author = {V. Bacarella and Fosca Giannotti and Mirco Nanni and Dino Pedreschi} } @conference {DBLP:conf/sebd/BoschiGP04, title = {Frequent Pattern Queries for Flexible Knowledge Discovery}, booktitle = {SEBD}, year = {2004}, pages = {250-261}, author = {Francesco Bonchi and Fosca Giannotti and Dino Pedreschi} } @conference {DBLP:conf/iticse/AlfonsiSPS04, title = {IT4PS: information technology for problem solving}, booktitle = {ITiCSE}, year = {2004}, pages = {241}, author = {C. Alfonsi and Nello Scarabottolo and Dino Pedreschi and Maria Simi} } @proceedings {DBLP:conf/pkdd/2004, title = {Knowledge Discovery in Databases: PKDD 2004, 8th European Conference on Principles and Practice of Knowledge Discovery in Databases, Pisa, Italy, September 20-24, 2004, Proceedings}, volume = {3202}, year = {2004}, publisher = {Springer}, isbn = {3-540-23108-0}, author = {Jean-Fran{\c c}ois Boulicaut and Floriana Esposito and Fosca Giannotti and Dino Pedreschi} } @proceedings {DBLP:conf/ecml/2004, title = {Machine Learning: ECML 2004, 15th European Conference on Machine Learning, Pisa, Italy, September 20-24, 2004, Proceedings}, volume = {3201}, year = {2004}, publisher = {Springer}, isbn = {3-540-23105-6}, author = {Jean-Fran{\c c}ois Boulicaut and Floriana Esposito and Fosca Giannotti and Dino Pedreschi} } @conference {DBLP:conf/cinq/BonchiGP04, title = {A Relational Query Primitive for Constraint-Based Pattern Mining}, booktitle = {Constraint-Based Mining and Inductive Databases}, year = {2004}, pages = {14-37}, author = {Francesco Bonchi and Fosca Giannotti and Dino Pedreschi} } @conference {DBLP:conf/pkdd/BonchiGMP03, title = {Adaptive Constraint Pushing in Frequent Pattern Mining}, booktitle = {PKDD}, year = {2003}, pages = {47-58}, author = {Francesco Bonchi and Fosca Giannotti and Alessio Mazzanti and Dino Pedreschi} } @conference {DBLP:conf/icdm/BonchiGMP03, title = {ExAMiner: Optimized Level-wise Frequent Pattern Mining with Monotone Constraint}, booktitle = {ICDM}, year = {2003}, pages = {11-18}, author = {Francesco Bonchi and Fosca Giannotti and Alessio Mazzanti and Dino Pedreschi} } @conference {DBLP:conf/pkdd/BonchiGMP03a, title = {ExAnte: Anticipated Data Reduction in Constrained Pattern Mining}, booktitle = {PKDD}, year = {2003}, pages = {59-70}, author = {Francesco Bonchi and Fosca Giannotti and Alessio Mazzanti and Dino Pedreschi} } @article {DBLP:journals/scp/PedreschiR03, title = {On logic programs that always succeed}, journal = {Sci. Comput. Program.}, volume = {48}, number = {2-3}, year = {2003}, pages = {163-196}, author = {Dino Pedreschi and Salvatore Ruggieri} } @conference {DBLP:conf/sebd/BonchiGMP03, title = {Pre-processing for Constrained Pattern Mining}, booktitle = {SEBD}, year = {2003}, pages = {519-530}, author = {Francesco Bonchi and Fosca Giannotti and Alessio Mazzanti and Dino Pedreschi} } @conference {DBLP:conf/sebd/GiannottiNPS03, title = {WebCat: Automatic Categorization of Web Search Results}, booktitle = {SEBD}, year = {2003}, pages = {507-518}, author = {Fosca Giannotti and Mirco Nanni and Dino Pedreschi and F. Samaritani} } @article {DBLP:journals/tplp/PedreschiRS02, title = {Classes of terminating logic programs}, journal = {TPLP}, volume = {2}, number = {3}, year = {2002}, pages = {369-418}, author = {Dino Pedreschi and Salvatore Ruggieri and Jan-Georg Smaus} } @conference {DBLP:conf/birthday/MascellaniP02, title = {The Declarative Side of Magic}, booktitle = {Computational Logic: Logic Programming and Beyond}, year = {2002}, pages = {83-108}, author = {Paolo Mascellani and Dino Pedreschi} } @conference {DBLP:conf/birthday/MancarellaPR02, title = {Negation as Failure through Abduction: Reasoning about Termination}, booktitle = {Computational Logic: Logic Programming and Beyond}, year = {2002}, pages = {240-272}, author = {Paolo Mancarella and Dino Pedreschi and Salvatore Ruggieri} } @article {DBLP:journals/corr/cs-LO-0106050, title = {Classes of Terminating Logic Programs}, journal = {CoRR}, volume = {cs.LO/0106}, year = {2001}, author = {Dino Pedreschi and Salvatore Ruggieri and Jan-Georg Smaus} } @conference {DBLP:conf/itcc/BonchiGMRNPR01, title = {Data Mining for Intelligent Web Caching}, booktitle = {ITCC}, year = {2001}, pages = {599-603}, author = {Francesco Bonchi and Fosca Giannotti and Giuseppe Manco and Chiara Renso and Mirco Nanni and Dino Pedreschi and Salvatore Ruggieri} } @conference {DBLP:conf/itcc/BonchiGMRNPR01, title = {Data Mining for Intelligent Web Caching}, booktitle = {ITCC}, year = {2001}, pages = {599-603}, author = {Francesco Bonchi and Fosca Giannotti and Giuseppe Manco and Chiara Renso and Mirco Nanni and Dino Pedreschi and Salvatore Ruggieri} } @article {DBLP:journals/tkde/GiannottiMNP01, title = {Nondeterministic, Nonmonotonic Logic Databases}, journal = {IEEE Trans. Knowl. Data Eng.}, volume = {13}, number = {5}, year = {2001}, pages = {813-823}, author = {Fosca Giannotti and Giuseppe Manco and Mirco Nanni and Dino Pedreschi} } @article {DBLP:journals/jcss/GiannottiPZ01, title = {Semantics and Expressive Power of Nondeterministic Constructs in Deductive Databases}, journal = {J. Comput. Syst. Sci.}, volume = {62}, number = {1}, year = {2001}, pages = {15-42}, author = {Fosca Giannotti and Dino Pedreschi and Carlo Zaniolo} } @article {DBLP:journals/dke/BonchiGGMNPRR01, title = {Web log data warehousing and mining for intelligent web caching}, journal = {Data Knowl. Eng.}, volume = {39}, number = {2}, year = {2001}, pages = {165-189}, author = {Francesco Bonchi and Fosca Giannotti and Cristian Gozzi and Giuseppe Manco and Mirco Nanni and Dino Pedreschi and Chiara Renso and Salvatore Ruggieri} } @article {DBLP:journals/dke/BonchiGGMNPRR01, title = {Web log data warehousing and mining for intelligent web caching}, journal = {Data Knowl. Eng.}, volume = {39}, number = {2}, year = {2001}, pages = {165-189}, author = {Francesco Bonchi and Fosca Giannotti and Cristian Gozzi and Giuseppe Manco and Mirco Nanni and Dino Pedreschi and Chiara Renso and Salvatore Ruggieri} } @article {BGGMNPRR01, title = {Web Log Data Warehousing and Mining for Intelligent Web Caching}, journal = {Data and Knowledge Engineering}, year = {2001}, note = {39:165, November .}, author = {Francesco Bonchi and Fosca Giannotti and Cristian Gozzi and Giuseppe Manco and Mirco Nanni and Dino Pedreschi and Chiara Renso and Salvatore Ruggieri} } @article {DBLP:journals/amai/FayzullinNPS00, title = {Foundations of distributed interaction systems}, journal = {Ann. Math. Artif. Intell.}, volume = {28}, number = {1-4}, year = {2000}, pages = {127-168}, author = {Marat Fayzullin and Mirco Nanni and Dino Pedreschi and V. S. Subrahmanian} } @conference {DBLP:conf/ejc/GiannottiNP00, title = {Logic-Based Knowledge Discovery in Databases}, booktitle = {EJC}, year = {2000}, pages = {279-283}, author = {Fosca Giannotti and Mirco Nanni and Dino Pedreschi} } @conference {DBLP:conf/cl/BonchiGP00, title = {On Verification in Logic Database Languages}, booktitle = {Computational Logic}, year = {2000}, pages = {957-971}, author = {Francesco Bonchi and Fosca Giannotti and Dino Pedreschi} } @conference {DBLP:conf/iclp/PedreschiR99, title = {Bounded Nondeterminism of Logic Programs}, booktitle = {ICLP}, year = {1999}, pages = {350-364}, author = {Dino Pedreschi and Salvatore Ruggieri} } @conference {DBLP:conf/kdd/BonchiGMP99, title = {A Classification-Based Methodology for Planning Audit Strategies in Fraud Detection}, booktitle = {KDD}, year = {1999}, pages = {175-184}, author = {Francesco Bonchi and Fosca Giannotti and Gianni Mainetto and Dino Pedreschi} } @conference {DBLP:conf/dmkd/GiannottiMPT99, title = {Experiences with a Logic-based knowledge discovery Support Environment}, booktitle = {1999 ACM SIGMOD Workshop on Research Issues in Data Mining and Knowledge Discovery}, year = {1999}, author = {Fosca Giannotti and Giuseppe Manco and Dino Pedreschi and Franco Turini} } @conference {DBLP:conf/aiia/GiannottiMPT99, title = {Experiences with a Logic-Based Knowledge Discovery Support Environment}, booktitle = {AI*IA}, year = {1999}, pages = {202-213}, author = {Fosca Giannotti and Giuseppe Manco and Dino Pedreschi and Franco Turini} } @conference {DBLP:conf/sebd/GiannottiMNPT99, title = {Integration of Deduction and Induction for Mining Supermarket Sales Data}, booktitle = {SEBD}, year = {1999}, pages = {117-131}, author = {Fosca Giannotti and Giuseppe Manco and Mirco Nanni and Dino Pedreschi and Franco Turini} } @article {DBLP:journals/tcs/PedreschiR99, title = {On Logic Programs That Do Not Fail}, journal = {Electr. Notes Theor. Comput. Sci.}, volume = {30}, number = {1}, year = {1999}, author = {Dino Pedreschi and Salvatore Ruggieri} } @conference {DBLP:conf/sebd/BonchiGMP99, title = {Una Metodologia Basata sulla Classificazione per la Pianificazione degli Accertamenti nel Rilevamento di Frodi}, booktitle = {SEBD}, year = {1999}, pages = {69-84}, author = {Francesco Bonchi and Fosca Giannotti and Gianni Mainetto and Dino Pedreschi} } @conference {DBLP:conf/dawak/BonchiGMP99, title = {Using Data Mining Techniques in Fiscal Fraud Detection}, booktitle = {DaWaK}, year = {1999}, pages = {369-376}, author = {Francesco Bonchi and Fosca Giannotti and Gianni Mainetto and Dino Pedreschi} } @article {DBLP:journals/jlp/PedreschiR99, title = {Verification of Logic Programs}, journal = {J. Log. Program.}, volume = {39}, number = {1-3}, year = {1999}, pages = {125-176}, author = {Dino Pedreschi and Salvatore Ruggieri} } @article {DBLP:journals/jlp/GiannottiP98, title = {Datalog with Non-Deterministic Choice Computers NDB-PTIME}, journal = {J. Log. Program.}, volume = {35}, number = {1}, year = {1998}, pages = {79-101}, author = {Fosca Giannotti and Dino Pedreschi} } @conference {DBLP:conf/csl/GiannottiMNP98, title = {On the Effective Semantics of Nondeterministic, Nonmonotonic, Temporal Logic Databases}, booktitle = {CSL}, year = {1998}, pages = {58-72}, author = {Fosca Giannotti and Giuseppe Manco and Mirco Nanni and Dino Pedreschi} } @conference {DBLP:conf/fqas/GiannottiMNP98, title = {Query Answering in Nondeterministic, Nonmonotonic Logic Databases}, booktitle = {FQAS}, year = {1998}, pages = {175-187}, author = {Fosca Giannotti and Giuseppe Manco and Mirco Nanni and Dino Pedreschi} } @article {DBLP:journals/ipl/PedreschiR98, title = {Weakest Preconditions for Pure Prolog Programs}, journal = {Inf. Process. Lett.}, volume = {67}, number = {3}, year = {1998}, pages = {145-150}, author = {Dino Pedreschi and Salvatore Ruggieri} } @conference {DBLP:conf/dood/GiannottiMNP97, title = {Datalog++: A Basis for Active Object-Oriented Databases}, booktitle = {DOOD}, year = {1997}, pages = {283-301}, author = {Fosca Giannotti and Giuseppe Manco and Mirco Nanni and Dino Pedreschi} } @conference {DBLP:conf/sebd/GiannottiMNP97, title = {Datalog++: a Basis for Active Object.Oriented Databases}, booktitle = {SEBD}, year = {1997}, pages = {325-340}, author = {Fosca Giannotti and Giuseppe Manco and Mirco Nanni and Dino Pedreschi} } @conference {DBLP:conf/agp/GiannottiMP97, title = {A Deductive Data Model for Representing and Querying Semistructured Data}, booktitle = {APPIA-GULP-PRODE}, year = {1997}, pages = {129-140}, author = {Fosca Giannotti and Giuseppe Manco and Dino Pedreschi} } @article {DBLP:journals/amai/PedreschiS97, title = {Non-determinism in Deductive Databases - Preface}, journal = {Ann. Math. Artif. Intell.}, volume = {19}, number = {1-2}, year = {1997}, pages = {1-2}, author = {Dino Pedreschi and V. S. Subrahmanian} } @article {DBLP:journals/logcom/PedreschiR97, title = {Verification of Meta-Interpreters}, journal = {J. Log. Comput.}, volume = {7}, number = {2}, year = {1997}, pages = {267-303}, author = {Dino Pedreschi and Salvatore Ruggieri} } @article {DBLP:journals/jlp/AptGP96, title = {A Closer Look at Declarative Interpretations}, journal = {J. Log. Program.}, volume = {28}, number = {2}, year = {1996}, pages = {147-180}, author = {Krzysztof R. Apt and Maurizio Gabbrielli and Dino Pedreschi} } @proceedings {DBLP:conf/lid/1996, title = {Logic in Databases, International Workshop LID{\textquoteright}96, San Miniato, Italy, July 1-2, 1996, Proceedings}, volume = {1154}, year = {1996}, publisher = {Springer}, isbn = {3-540-61814-7}, author = {Dino Pedreschi and Carlo Zaniolo} } @conference {DBLP:conf/agp/PedreschiR95, title = {A Case Study in Logic Program Verification: the Vanilla Metainterpreter}, booktitle = {GULP-PRODE}, year = {1995}, pages = {643-654}, author = {Dino Pedreschi and Salvatore Ruggieri} } @conference {DBLP:conf/sebd/CarboniGFP95, title = {Declarative Reconstruction of Updates in Logic Databases: A Compilative Approach}, booktitle = {SEBD}, year = {1995}, pages = {3-13}, author = {Marilisa E. Carboni and Fosca Giannotti and V. Foddai and Dino Pedreschi} } @conference {DBLP:conf/agp/CarboniFGP95, title = {Declarative Reconstruction of Updates in Logic Databases: a Compilative Approach}, booktitle = {GULP-PRODE}, year = {1995}, pages = {169-182}, author = {Marilisa E. Carboni and V. Foddai and Fosca Giannotti and Dino Pedreschi} } @conference {DBLP:conf/deductive/CorciuloGPZ94, title = {Expressive Power of Non-Deterministic Operators for Logic-based Languages}, booktitle = {Workshop on Deductive Databases and Logic Programming}, year = {1994}, pages = {27-40}, author = {Luca Corciulo and Fosca Giannotti and Dino Pedreschi and Carlo Zaniolo} } @article {BCMMPRT94, title = {Implementations of Program Composition Operations}, year = {1994}, note = {Programming Language Implementation and Logic Programming Lecture Notes in Computer Science, volume 844,}, pages = {292{\textendash}307}, author = {Antonio Brogi and A. Chiarelli and Paolo Mancarella and V. Mazzotta and Dino Pedreschi and Chiara Renso and Franco Turini} } @conference {DBLP:conf/plilp/BrogiCMMPRT94, title = {Implementations of Program Composition Operations}, booktitle = {PLILP}, year = {1994}, pages = {292-307}, author = {Antonio Brogi and A. Chiarelli and Paolo Mancarella and V. Mazzotta and Dino Pedreschi and Chiara Renso and Franco Turini} } @conference {DBLP:conf/plilp/BrogiCMMPRT94, title = {Implementations of Program Composition Operations}, booktitle = {PLILP}, year = {1994}, pages = {292-307}, author = {Antonio Brogi and A. Chiarelli and Paolo Mancarella and V. Mazzotta and Dino Pedreschi and Chiara Renso and Franco Turini} } @article {DBLP:journals/toplas/BrogiMPT94, title = {Modular Logic Programming}, journal = {ACM Trans. Program. Lang. Syst.}, volume = {16}, number = {4}, year = {1994}, pages = {1361-1398}, author = {Antonio Brogi and Paolo Mancarella and Dino Pedreschi and Franco Turini} } @conference {DBLP:conf/iclp/Pedreschi94, title = {A Proof Method for Runtime Properties of Prolog Programs}, booktitle = {ICLP}, year = {1994}, pages = {584-598}, author = {Dino Pedreschi} } @conference {DBLP:conf/agp/MascellaniP94, title = {Proving termination of Prolog programs}, booktitle = {GULP-PRODE (1)}, year = {1994}, pages = {46-61}, author = {Paolo Mascellani and Dino Pedreschi} } @conference {DBLP:conf/dood/CorciuloGP93, title = {Datalog with Non-Deterministic Choice Computes NDB-PTIME}, booktitle = {DOOD}, year = {1993}, pages = {49-66}, author = {Luca Corciulo and Fosca Giannotti and Dino Pedreschi} } @article {DBLP:journals/iandc/AptP93, title = {Reasoning about Termination of Pure Prolog Programs}, journal = {Inf. Comput.}, volume = {106}, number = {1}, year = {1993}, pages = {109-157}, author = {Krzysztof R. Apt and Dino Pedreschi} } @conference {DBLP:conf/meta/BrogiMPT92, title = {Meta for Modularising Logic Programming}, booktitle = {META}, year = {1992}, pages = {105-119}, author = {Antonio Brogi and Paolo Mancarella and Dino Pedreschi and Franco Turini} } @book {DBLP:books/mit/pfenning92/BertolinoMPT92, title = {The Type System of LML}, series = {Types in Logic Programming}, year = {1992}, pages = {313-332}, author = {Bruno Bertolino and Luigi Meo and Dino Pedreschi and Franco Turini} } @conference {DBLP:conf/dood/GiannottiPSZ91, title = {Non-Determinism in Deductive Databases}, booktitle = {DOOD}, year = {1991}, pages = {129-146}, author = {Fosca Giannotti and Dino Pedreschi and Domenico Sacc{\`a} and Carlo Zaniolo} } @conference {DBLP:conf/tacs/AptP91, title = {Proving Termination of General Prolog Programs}, booktitle = {TACS}, year = {1991}, pages = {265-289}, author = {Krzysztof R. Apt and Dino Pedreschi} } @conference {DBLP:conf/iclp/BrogiMPT91, title = {Theory Construction in Computational Logic}, booktitle = {ICLP Workshop on Construction of Logic Programs}, year = {1991}, pages = {241-250}, author = {Antonio Brogi and Paolo Mancarella and Dino Pedreschi and Franco Turini} } @conference {DBLP:conf/slp/MancarellaPRT90, title = {Algebraic Properties of a Class of Logic Programs}, booktitle = {NACLP}, year = {1990}, pages = {23-39}, author = {Paolo Mancarella and Dino Pedreschi and Marina Rondinelli and Marco Tagliatti} } @conference {DBLP:conf/lpnmr/GiannottiP90, title = {Declarative Semantics for Pruning Operators in Logic Programming}, booktitle = {LPNMR}, year = {1990}, pages = {27-37}, author = {Fosca Giannotti and Dino Pedreschi} } @conference {DBLP:conf/plilp/BrogiMPT90, title = {Logic Programming within a Functional Framework}, booktitle = {PLILP}, year = {1990}, pages = {372-386}, author = {Antonio Brogi and Paolo Mancarella and Dino Pedreschi and Franco Turini} } @article {DBLP:journals/jlp/BarbutiMPT90, title = {A Transformational Approach to Negation in Logic Programming}, journal = {J. Log. Program.}, volume = {8}, number = {3}, year = {1990}, pages = {201-228}, author = {Roberto Barbuti and Paolo Mancarella and Dino Pedreschi and Franco Turini} } @conference {DBLP:conf/ecai/BrogiMPT90, title = {Universal Quantification by Case Analysis}, booktitle = {ECAI}, year = {1990}, pages = {111-116}, author = {Antonio Brogi and Paolo Mancarella and Dino Pedreschi and Franco Turini} } @conference {DBLP:conf/iclp/MancarellaP88, title = {An Algebra of Logic Programs}, booktitle = {ICLP/SLP}, year = {1988}, pages = {1006-1023}, author = {Paolo Mancarella and Dino Pedreschi} } @article {DBLP:journals/jlp/MancarellaMP88, title = {Complete Logic Programs with Domain-Closure Axiom}, journal = {J. Log. Program.}, volume = {5}, number = {3}, year = {1988}, pages = {263-276}, author = {Paolo Mancarella and Simone Martini and Dino Pedreschi} } @conference {DBLP:conf/fgcs/BertolinoMMNPT88, title = {A Progress Report on the LML Project}, booktitle = {FGCS}, year = {1988}, pages = {675-684}, author = {Bruno Bertolino and Paolo Mancarella and Luigi Meo and Luca Nini and Dino Pedreschi and Franco Turini} } @conference {DBLP:conf/tapsoft/BarbutiMPT87, title = {Intensional Negation of Logic Programs: Examples and Implementation Techniques}, booktitle = {TAPSOFT, Vol.2}, year = {1987}, pages = {96-110}, author = {Roberto Barbuti and Paolo Mancarella and Dino Pedreschi and Franco Turini} } @article {DBLP:journals/scp/GiannottiMPT87, title = {Symbolic Evaluation with Structural Recursive Symbolic Constants}, journal = {Sci. Comput. Program.}, volume = {9}, number = {2}, year = {1987}, pages = {161-177}, author = {Fosca Giannotti and Attilio Matteucci and Dino Pedreschi and Franco Turini} } @article {DBLP:journals/tse/AmbriolaGPT85, title = {Symbolic Semantics and Program Reduction}, journal = {IEEE Trans. Software Eng.}, volume = {11}, number = {8}, year = {1985}, pages = {784-794}, author = {Vincenzo Ambriola and Fosca Giannotti and Dino Pedreschi and Franco Turini} } @conference {DBLP:conf/pos/AlbanoGOP85, title = {The Type System of Galileo}, booktitle = {Data Types and Persistence (Appin), Informal Proceedings}, year = {1985}, pages = {175-195}, author = {Antonio Albano and Fosca Giannotti and Renzo Orsini and Dino Pedreschi} } @conference {DBLP:conf/db-workshops/AlbanoGOP85, title = {The Type System of Galileo}, booktitle = {Data Types and Persistence (Appin)}, year = {1985}, pages = {101-119}, author = {Antonio Albano and Fosca Giannotti and Renzo Orsini and Dino Pedreschi} }