@inproceedings{GaoZhangChenetal., author = {Gao, Yunlong and Zhang, Yisong and Chen, Baihua and Xiong, Yuhui}, title = {Local Neighborhood Reliability Weighted Support Vector Machine}, series = {2020 2nd International Conference on Industrial Artificial Intelligence (IAI)}, booktitle = {2020 2nd International Conference on Industrial Artificial Intelligence (IAI)}, publisher = {IEEE}, isbn = {978-1-7281-8216-2}, doi = {10.1109/IAI50351.2020.9262215}, pages = {1 -- 6}, abstract = {Support vector machine (SVM) is a classification model, which learns the decision surface that maximizes the margin in the feature space. Such a decision surface has a good classification ability for unknown new samples. In real-world applications, the data set usually contains many noises and outliers, which will affect the learning of the decision surface, thus the maximum margin cannot be obtained, and the generalization ability of SVM will be reduced. In this paper, we introduce an adjacency factor to each input point to characterize the local neighbor relationship between each point. Weighting each sample point by the adjacency factor can let different sample points make different contributions to the learning of the decision surface. Thus, we can filter out the influence of noises and outliers on the decision surface by this weighting method. We propose this new method namely local neighborhood reliability weighted support vector machine (LN-SVM).}, language = {en} } @misc{GaoZhangPanetal., author = {Gao, Yunlong and Zhang, Yisong and Pan, Jinyan and Luo, Sizhe and Yang, Chengyu}, title = {Discriminant analysis based on reliability of local neighborhood}, series = {Expert Systems with Applications}, volume = {175}, journal = {Expert Systems with Applications}, issn = {1873-6793}, doi = {10.1016/j.eswa.2021.114790}, abstract = {To obtain a compact and effective low-dimensional representation, recently, most existing discriminant manifold learning methods have integrated manifold learning into discriminant analysis (DA) for extracting the intrinsic structure of data. These methods learn two kinds of adjacency graphs, such as intrinsic graph and penalty graph, to characterize the similarity between samples from intraclass and the pseudo similarity of interclass. However, they treat every sample equally, which results in the following defects: (1) These methods cannot accurately characterize the marginal region among different classes only through penalty graphs. (2) They can not identify the noisy and outlier samples which reduce the robustness of these methods. To address these problems, we introduce an adaptive adjacency factor to perform the discriminative based reliability analysis for each sample. By integrating the adjacency factor into discriminant manifold learning methods, we propose a novel method for DA namely discriminant analysis based on reliability of local neighborhood (DA-RoLN). We mainly have three contributions in this paper: (1) By the introduction of adjacency factor, sample points can be divided into three parts: intraclass samples, marginal samples, and outliers. Therefore, DA-RoLN emphasizes the effect of valid samples and filters the influence of outliers. (2) We adaptively calculate the adjacency factor in low-dimensional space, thus, the margin between different classes in low-dimensional space is emphasized. (3) An iterative algorithm is developed to solve the objective function of DA-RoLN, and it is easy to solve with a low computational cost. Extensive experimental results show the effectiveness of DA-RoLN.}, language = {en} } @misc{GaoLuoPanetal., author = {Gao, Yunlong and Luo, Si-Zhe and Pan, Jin-Yan and Chen, Bai-Hua and Zhang, Yi-Song}, title = {Robust PCA Using Adaptive Probability Weighting}, series = {Acta Automatica Sinica}, volume = {47}, journal = {Acta Automatica Sinica}, number = {4}, issn = {0254-4156}, doi = {10.16383/j.aas.c180743}, pages = {825 -- 838}, abstract = {Principal component analysis (PCA) is an important method for processing high-dimensional data. In recent years, PCA models based on various norms have been extensively studied to improve the robustness. However, on the one hand, these algorithms do not consider the relationship between reconstruction error and covariance; on the other hand, they lack the uncertainty of considering the principal component to the data description. Aiming at these problems, this paper proposes a new robust PCA algorithm. Firstly, the L2,p-norm is used to measure the reconstruction error and the description variance of the projection data. Based on the reconstruction error and the description variance, the adaptive probability error minimization model is established to calculate the uncertainty of the principal component's description of the data. Based on the uncertainty, the adaptive probability weighting PCA is established. The corresponding optimization method is designed. The experimental results of artificial data sets, UCI data sets and face databases show that RPCA-PW is superior than other PCA algorithms.}, language = {en} } @misc{ZhangAalst, author = {Zhang, Yisong and Aalst, Wil van der}, title = {Explorative Process Discovery Using Activity Projections}, series = {International Conference on Applications and Theory of Petri Nets and Concurrency, PETRI NETS 2023}, journal = {International Conference on Applications and Theory of Petri Nets and Concurrency, PETRI NETS 2023}, publisher = {Springer, Cham}, isbn = {978-3-031-33619-5}, doi = {10.1007/978-3-031-33620-1_13}, pages = {229 -- 239}, abstract = {This paper presents a tool to Explore Process Discovery (EPD) results using activity projection. Our EPD-Tool aims at exploring quality changes after removing activities from an event log. The main idea is to create a projected event log for every non-empty subset of activities and apply process discovery and conformance checking on them. The tool has been implemented as a plugin in ProM. First, EPD-Tool uses a process discovery algorithm to discover Petri net models for each projected event log. Then, EPD-Tool uses a conformance checking technique to compute conformance measures for each projected event log and model pair (L, N), e.g., fitness, precision, and F1-score. Finally, a dendrogram is generated to visualize the relationship between each log-model pair, thus enabling the systematic exploration of the different models using the dendrogram to find the best-performing node, i.e., a best log-model pair. This method prioritizes activities and detects redundancy in the process, which contributes to process enhancement. Conversely, critical activities are uncovered to help to shorten the processing time or save the process cost. This paper presents the EPD-Tool implementation and some example results.}, language = {en} } @misc{YiWuXietal., author = {Yi, Chao and Wu, Shunxiang and Xi, Bin and Ming, Daodong and Zhang, Yisong and Zhou, Zhenwen}, title = {Terrorist Video Detection System Based on Faster R-CNN and LightGBM}, series = {CSAE '20: Proceedings of the 4th International Conference on Computer Science and Application Engineering}, journal = {CSAE '20: Proceedings of the 4th International Conference on Computer Science and Application Engineering}, isbn = {978-1-4503-7772-0}, doi = {10.1145/3424978.3425121}, pages = {1 -- 8}, abstract = {Nowadays the mobile phone has become an indispensable tool in the lives of many people. While facilitating people's lives, it also provides criminals with a very important tool for spreading the terrorist video. Traditional manual detection of the terrorist video has the problem of low accuracy and inefficiency. To address the issue, this paper proposes a terrorist video detection system based on Light Gradient Boosting Machine (LightGBM) and Faster Region-based Convolutional Neural Network (Faster R-CNN) for mobile phone forensics system, which is used to quickly detect whether there is a terrorist video in the suspect's mobile phone. The system uses a multi-model method for detection, which includes preliminary detection and deep detection in two stages. Experimental research shows that it can effectively and accurately detect terrorist videos in mobile phones, thereby helping criminal investigation personnel to quickly grasp criminal evidence and provide some clues for the detection of the case.}, language = {en} } @misc{GaoLinZhangetal., author = {Gao, Yunlong and Lin, Tingting and Zhang, Yisong and Luo, Sizhe and Nie, Feiping}, title = {Robust principal component analysis based on discriminant information}, series = {IEEE Transactions on Knowledge and Data Engineering}, volume = {35}, journal = {IEEE Transactions on Knowledge and Data Engineering}, number = {2}, publisher = {IEEE}, issn = {1558-2191}, doi = {10.1109/TKDE.2021.3093447}, pages = {1991 -- 2003}, abstract = {Recently, several robust principal component analysis (RPCA) models were presented to enhance the robustness of PCA by exploiting the robust norms as their loss functions. But an important problem is that they have no ability to discriminate outliers from correct samples. To solve this problem, we propose a robust principal component analysis based on discriminant information (RPCA-DI). RPCA-DI disentangles the robust PCA with a two-step fashion: the identification and the processing of outliers. To identity outliers, a sample representation model based on entropy regularization is constructed to analyze the membership of data belonging to the principal component space(PC) and its orthogonal complement(OC), the discriminative information of data will be extracted based on measuring the differences of retained information on PC(or OC) of data. By this way, we can discriminate correct samples when we deal with outliers, which is more reasonable for robustness learning respective to previous works. In the noise processing step, in addition to considering the levels of noise, the resistance of the sample points to noise is also considered to prevent overfitting, thereby improving the generalization performance of RPCA-DI. Finally, an iterative algorithm is designed to solve the corresponding model. Compared with some state-of-art RPCA methods on artificial datasets, UCI datasets and face databases that verifies the effectiveness of our proposed algorithm.}, language = {en} } @misc{BertiKorenAdamsetal., author = {Berti, Alessandro and Koren, Istvan and Adams, Jan Niklas and Park, Gyunam and Knopp, Benedikt and Graves, Nina and Rafiei, Majid and Liß, Lukas and Tacke Genannt Unterberg, Leah and Zhang, Yisong and Schwanen, Christopher and Pegoraro, Marco and Aalst, Wil van der}, title = {OCEL (Object-Centric Event Log) 2.0 Specification}, series = {arXiv}, journal = {arXiv}, pages = {1 -- 49}, abstract = {Object-Centric Event Logs (OCELs) form the basis for Object-Centric Process Mining (OCPM). OCEL 1.0 was first released in 2020 and triggered the development of a range of OCPM techniques. OCEL 2.0 forms the new, more expressive standard, allowing for more extensive process analyses while remaining in an easily exchangeable format. In contrast to the first OCEL standard, it can depict changes in objects, provide information on object relationships, and qualify these relationships to other objects or specific events. Compared to XES, it is more expressive, less complicated, and better readable. OCEL 2.0 offers three exchange formats: a relational database (SQLite), XML, and JSON format. This OCEL 2.0 specification document provides an introduction to the standard, its metamodel, and its exchange formats, aimed at practitioners and researchers alike.}, language = {en} }