@article{VivekanandanWirthKarlbaueretal.2023, author = {Vivekanandan, Deepak and Wirth, Samuel and Karlbauer, Patrick and Klarmann, Noah}, title = {A Reinforcement Learning Approach for Scheduling Problems with Improved Generalization through Order Swapping}, series = {Machine Learning and Knowledge Extraction}, volume = {5}, journal = {Machine Learning and Knowledge Extraction}, number = {2}, doi = {10.3390/make5020025}, pages = {418 -- 430}, year = {2023}, abstract = {The scheduling of production resources (such as associating jobs to machines) plays a vital role for the manufacturing industry not only for saving energy, but also for increasing the overall efficiency. Among the different job scheduling problems, the Job Shop Scheduling Problem (JSSP) is addressed in this work. JSSP falls into the category of NP-hard Combinatorial Optimization Problem (COP), in which solving the problem through exhaustive search becomes unfeasible. Simple heuristics such as First-In, First-Out, Largest Processing Time First and metaheuristics such as taboo search are often adopted to solve the problem by truncating the search space. The viability of the methods becomes inefficient for large problem sizes as it is either far from the optimum or time consuming. In recent years, the research towards using Deep Reinforcement Learning (DRL) to solve COPs has gained interest and has shown promising results in terms of solution quality and computational efficiency. In this work, we provide an novel approach to solve the JSSP examining the objectives generalization and solution effectiveness using DRL. In particular, we employ the Proximal Policy Optimization (PPO) algorithm that adopts the policy-gradient paradigm that is found to perform well in the constrained dispatching of jobs. We incorporated a new method called Order Swapping Mechanism (OSM) in the environment to achieve better generalized learning of the problem. The performance of the presented approach is analyzed in depth by using a set of available benchmark instances and comparing our results with the work of other groups.}, language = {en} }