| Mario Günzel, Georg Brüggen and Jian-Jia Chen. Suspension-Aware Earliest-Deadline-First Scheduling Analysis. IEEE Trans. Comput. Aided Des. Integr. Circuits Syst. (presented in ACM Conference on Embedded Software, EMSOFT) 39 11, pages 4205--4216 2020 [BibTeX][Link][Abstract]@article { guenzel2020emsoft,
author = {G\"unzel, Mario and Br\"uggen, Georg and Chen, Jian-Jia},
title = {Suspension-Aware Earliest-Deadline-First Scheduling Analysis},
journal = {IEEE Trans. Comput. Aided Des. Integr. Circuits Syst. (presented in ACM Conference on Embedded Software, EMSOFT)},
year = {2020},
volume = {39},
number = {11},
pages = {4205--4216},
url = {https://ieeexplore.ieee.org/document/9211430},
keywords = {sus, mario, Georg},
confidential = {n},
abstract = {While the earliest-deadline-first (EDF) scheduling algorithm has extensively been utilized in real-time systems, there is almost no literature considering EDF for task sets with dynamic self-suspension behavior. To be precise, there is no specialized result for uniprocessor systems, besides the trivial suspension-oblivious approach. The work by Liu and Anderson (in ECRTS 2013) and Dong and Liu (in RTSS 2016) for suspension-aware multiprocessor global EDF can also be applied to uniprocessor systems and therefore be considered the state-of-the-art.
In this work, two novel schedulability analyses (one for sporadic and one for periodic task sets) for suspension-aware EDF on uniprocessor systems are proposed, which outperform the state-of-the-art on such systems in empirical and theoretical comparison. We further show that the analysis by Dong and Liu is in fact not suspension-aware for uniprocessor systems.},
} While the earliest-deadline-first (EDF) scheduling algorithm has extensively been utilized in real-time systems, there is almost no literature considering EDF for task sets with dynamic self-suspension behavior. To be precise, there is no specialized result for uniprocessor systems, besides the trivial suspension-oblivious approach. The work by Liu and Anderson (in ECRTS 2013) and Dong and Liu (in RTSS 2016) for suspension-aware multiprocessor global EDF can also be applied to uniprocessor systems and therefore be considered the state-of-the-art.
In this work, two novel schedulability analyses (one for sporadic and one for periodic task sets) for suspension-aware EDF on uniprocessor systems are proposed, which outperform the state-of-the-art on such systems in empirical and theoretical comparison. We further show that the analysis by Dong and Liu is in fact not suspension-aware for uniprocessor systems.
|
| Sebastian Buschjäger, Jian-Jia Chen, Kuan-Hsun Chen, Mario Günzel, Christian Hakert, Katharina Morik, Rodion Novkin, Lukas Pfahler and Mikail Yayla. Towards Explainable Bit Error Tolerance of Resistive RAM-Based Binarized Neural Networks. CoRR abs/2002.00909 2020 [BibTeX][Link][Abstract]@article { buschjger2020explainable,
author = {Buschj\"ager, Sebastian and Chen, Jian-Jia and Chen, Kuan-Hsun and G\"unzel, Mario and Hakert, Christian and Morik, Katharina and Novkin, Rodion and Pfahler, Lukas and Yayla, Mikail},
title = {Towards Explainable Bit Error Tolerance of Resistive RAM-Based Binarized Neural Networks},
journal = {CoRR},
year = {2020},
volume = {abs/2002.00909},
url = {https://arxiv.org/pdf/2002.00909.pdf},
keywords = {kuan, nvm-oma, mario},
confidential = {n},
abstract = {Non-volatile memory, such as resistive RAM (RRAM), is an emerging energy-efficient storage, especially for low-power machine learning models on the edge. It is reported, however, that the bit error rate of RRAMs can be up to 3.3% in the ultra low-power setting, which might be crucial for many use cases. Binary neural networks (BNNs), a resource efficient variant of neural networks (NNs), can tolerate a certain percentage of errors without a loss in accuracy and demand lower resources in computation and storage. The bit error tolerance (BET) in BNNs can be achieved by flipping the weight signs during training, as proposed by Hirtzlin et al., but their method has a significant drawback, especially for fully connected neural networks (FCNN): The FCNNs overfit to the error rate used in training, which leads to low accuracy under lower error rates. In addition, the underlying principles of BET are not investigated. In this work, we improve the training for BET of BNNs and aim to explain this property. We propose straight-through gradient approximation to improve the weight-sign-flip training, by which BNNs adapt less to the bit error rates. To explain the achieved robustness, we define a metric that aims to measure BET without fault injection. We evaluate the metric and find that it correlates with accuracy over error rate for all FCNNs tested. Finally, we explore the influence of a novel regularizer that optimizes with respect to this metric, with the aim of providing a configurable trade-off in accuracy and BET.},
} Non-volatile memory, such as resistive RAM (RRAM), is an emerging energy-efficient storage, especially for low-power machine learning models on the edge. It is reported, however, that the bit error rate of RRAMs can be up to 3.3% in the ultra low-power setting, which might be crucial for many use cases. Binary neural networks (BNNs), a resource efficient variant of neural networks (NNs), can tolerate a certain percentage of errors without a loss in accuracy and demand lower resources in computation and storage. The bit error tolerance (BET) in BNNs can be achieved by flipping the weight signs during training, as proposed by Hirtzlin et al., but their method has a significant drawback, especially for fully connected neural networks (FCNN): The FCNNs overfit to the error rate used in training, which leads to low accuracy under lower error rates. In addition, the underlying principles of BET are not investigated. In this work, we improve the training for BET of BNNs and aim to explain this property. We propose straight-through gradient approximation to improve the weight-sign-flip training, by which BNNs adapt less to the bit error rates. To explain the achieved robustness, we define a metric that aims to measure BET without fault injection. We evaluate the metric and find that it correlates with accuracy over error rate for all FCNNs tested. Finally, we explore the influence of a novel regularizer that optimizes with respect to this metric, with the aim of providing a configurable trade-off in accuracy and BET.
|
| Mario Günzel and Jian-Jia Chen. Correspondence Article: Counterexample for suspension-aware schedulability analysis of EDF scheduling. Real Time Systems Journal 56 4, pages 490--493 2020 [BibTeX][Link]@article { DBLP:journals/rts/GunzelC20,
author = {G\"unzel, Mario and Chen, Jian-Jia},
title = {Correspondence Article: Counterexample for suspension-aware schedulability analysis of EDF scheduling},
journal = {Real Time Systems Journal},
year = {2020},
volume = {56},
number = {4},
pages = {490--493},
url = {https://link.springer.com/article/10.1007%2Fs11241-020-09353-0},
keywords = {sus, mario},
confidential = {n},
} |
| Mario Günzel and Jian-Jia Chen. On Schedulability Analysis of EDF Scheduling by Considering Suspension as Blocking. CoRR abs/2001.05747 2020 [BibTeX][Link]@article { DBLP:journals/corr/abs-2001-05747,
author = {G\"unzel, Mario and Chen, Jian-Jia},
title = {On Schedulability Analysis of EDF Scheduling by Considering Suspension as Blocking},
journal = {CoRR},
year = {2020},
volume = {abs/2001.05747},
url = {https://arxiv.org/abs/2001.05747},
keywords = {sus, mario},
confidential = {n},
} |