@inproceedings{dde8b42ba5cf440aa1e0eb294d3b1bf1,
title = "Deep learning with visual explanation for radiotherapy-induced toxicity prediction",
abstract = "Deep learning models are widely studied for radiotherapy toxicity prediction; however, one of the major challenges is that they are complex models and difficult to understand.1 To aid in the creation of optimal dose treatment plans, it is critical to understand the mechanism and reasoning behind the network's prediction, as well as the specific anatomical regions involved in toxicity. In this work, we propose a convolutional neural network to predict the toxicity after pelvic radiotherapy that is able to explain the network's prediction. The proposed model analyses the dose treatment plan using multiple instance learning and convolutional encores. A dataset of 315 patients was included in the study, and experiments with both quantitative and qualitative approaches were conducted to assess the network's performance.",
keywords = "deep learning, explainable model, multiple instance learning, toxicity prediction",
author = "Behnaz Elhaminia and Alexandra Gilbert and Frangi, {Alejandro F.} and Andrew Scarsbrook and John Lilley and Ane Appelt and Ali Gooya",
note = "Publisher Copyright: {\textcopyright} 2023 SPIE.; Medical Imaging 2023: Computer-Aided Diagnosis ; Conference date: 19-02-2023 Through 23-02-2023",
year = "2023",
doi = "10.1117/12.2652481",
language = "English",
series = "Progress in Biomedical Optics and Imaging - Proceedings of SPIE",
publisher = "SPIE",
editor = "Iftekharuddin, {Khan M.} and Weijie Chen",
booktitle = "Medical Imaging 2023",
address = "United States",
}