<?xml version="1.0" encoding="UTF-8"?><xml><records><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>5</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Abhilekha Dalal</style></author><author><style face="normal" font="default" size="100%">Moumita Sen Sarma</style></author><author><style face="normal" font="default" size="100%">Avishek Das</style></author><author><style face="normal" font="default" size="100%">Samatha E. Akkamahadevi</style></author><author><style face="normal" font="default" size="100%">Eugene Y. Vasserman</style></author><author><style face="normal" font="default" size="100%">Pascal Hitzler</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Neurosymbolic Hidden Neuron Analysis in Convolutional Neural Networks</style></title><secondary-title><style face="normal" font="default" size="100%">Neuro-Symbolic AI: Bridging the Gap Between Neural Networks and Symbolic Reasoning</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2026</style></year></dates><publisher><style face="normal" font="default" size="100%">Elsevier</style></publisher><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">&lt;p class=&quot;rtejustify&quot;&gt;This tutorial introduces a step-by-step, deductive pipeline for making the inner workings of neural networks more transparent by assigning human-understandable concepts to hidden neuron activations. The approach automatically maps neuron behavior to symbolic concepts drawn from structured knowledge sources and attaches an error margin to each label, providing a measure of confidence in its precision. While demonstrated in detail on the ADE20k scene dataset---including single-concept neurons, multiple neurons contributing to the same concept, and multi-concept neurons---the method is also applied to the SUN2012 dataset and adapted for a text classification task, highlighting its generalizability across modalities. The chapter is designed to be practical and educational, focusing on a replicable methodology that readers can adapt to varied applications. Through worked examples, visualizations, and evaluation strategies, the tutorial offers a clear, reusable framework for concept-based neuron analysis in both vision and language models.&lt;/p&gt;
</style></abstract><section><style face="normal" font="default" size="100%">8</style></section></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>17</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Pascal Hitzler</style></author><author><style face="normal" font="default" size="100%">Rushrukh Rayan</style></author><author><style face="normal" font="default" size="100%">Joseph Zalewski</style></author><author><style face="normal" font="default" size="100%">Sanaz Saki Norouzi</style></author><author><style face="normal" font="default" size="100%">Aaron Eberhart</style></author><author><style face="normal" font="default" size="100%">Eugene Y. Vasserman</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Deep Deductive Reasoning is a Hard Deep Learning Problem</style></title><secondary-title><style face="normal" font="default" size="100%">Neurosymbolic Artificial Intelligence</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2025</style></year></dates><language><style face="normal" font="default" size="100%">eng</style></language></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Abhilekha Dalal</style></author><author><style face="normal" font="default" size="100%">Rushrukh Rayan</style></author><author><style face="normal" font="default" size="100%">Adrita Barua</style></author><author><style face="normal" font="default" size="100%">Eugene Y. Vasserman</style></author><author><style face="normal" font="default" size="100%">Kamruzzaman Sarker</style></author><author><style face="normal" font="default" size="100%">Pascal Hitzler</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">On the Value of Labeled Data and Symbolic Methods for Hidden Neuron Activation Analysis</style></title><secondary-title><style face="normal" font="default" size="100%">18th International Conference on Neural-Symbolic Learning and Reasoning, NeSy 2024</style></secondary-title></titles><keywords><keyword><style  face="normal" font="default" size="100%">CNN</style></keyword><keyword><style  face="normal" font="default" size="100%">Concept Induction</style></keyword><keyword><style  face="normal" font="default" size="100%">Explainable AI</style></keyword><keyword><style  face="normal" font="default" size="100%">LLM</style></keyword></keywords><dates><year><style  face="normal" font="default" size="100%">2024</style></year></dates><publisher><style face="normal" font="default" size="100%">Springer </style></publisher><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">&lt;p&gt;We introduce a novel model-agnostic post-hoc Explainable AI method that provides meaningful interpretations for hidden neuron activations in a Convolutional Neural Network. Our approach uses a Wikipedia-derived concept hierarchy with approx. 2 million classes as background knowledge, and deductive reasoning based Concept Induc- tion for explanation generation. Additionally, we explore and compare the capabilities of off-the-shelf pre-trained multimodal-based explainable methods. Our evaluation shows that our neurosymbolic method holds a competitive edge in both quantitative and qualitative aspects.&lt;/p&gt;
</style></abstract></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>17</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Pascal Hitzler</style></author><author><style face="normal" font="default" size="100%">Rushrukh Rayan</style></author><author><style face="normal" font="default" size="100%">Joseph Zalewski</style></author><author><style face="normal" font="default" size="100%">Sanaz Saki Norouzi</style></author><author><style face="normal" font="default" size="100%">Aaron Eberhart</style></author><author><style face="normal" font="default" size="100%">Eugene Y. Vasserman</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Deep Deductive Reasoning is a Hard Deep Learning Problem</style></title></titles><dates><year><style  face="normal" font="default" size="100%">2023</style></year></dates><language><style face="normal" font="default" size="100%">eng</style></language></record></records></xml>