@Article{D5DD00012B, author ="Chen, Xu and Yu, Dazhou and Zhao, Liang and Liu, Fang", title ="ACES-GNN: Can Graph Neural Network Learn to Explain Activity Cliffs?", journal ="Digital Discovery", year ="2025", pages ="-", publisher ="RSC", doi ="10.1039/D5DD00012B", url ="http://dx.doi.org/10.1039/D5DD00012B", abstract ="Graph Neural Networks (GNNs) have revolutionized molecular property prediction by leveraging graph-based representations{,} yet their opaque decision-making processes hinder broader adoption in drug discovery. This study introduces the Activity-Cliff-Explanation-Supervised GNN (ACES-GNN) framework{,} designed to simultaneously improve predictive accuracy and interpretability by integrating explanation supervision for activity cliffs (ACs) into GNN training. ACs{,} defined by structurally similar molecules with significant potency differences{,} pose challenges for traditional models due to their reliance on shared structural features. By aligning model attributions with chemist-friendly interpretations{,} the ACES-GNN framework bridges the gap between prediction and explanation. Validated across 30 pharmacological targets{,} ACES-GNN consistently enhances both predictive accuracy and attribution quality compared to baseline methods. Our results demonstrate a strong correlation between improved predictions and accurate explanations{,} offering a robust and adaptable framework for addressing the {"}intra-scaffold{"} generalization problem. This work underscores the potential of explanation-guided learning to advance interpretable artificial intelligence in molecular modeling and drug discovery."}