<?xml version="1.0" encoding="UTF-8"?><xml><records><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>17</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Abhilekha Dalal</style></author><author><style face="normal" font="default" size="100%">Rushrukh Rayan</style></author><author><style face="normal" font="default" size="100%">Adrita Barua</style></author><author><style face="normal" font="default" size="100%">Samatha Ereshi Akkamahadevi</style></author><author><style face="normal" font="default" size="100%">Avishek Das</style></author><author><style face="normal" font="default" size="100%">Cara Widmer</style></author><author><style face="normal" font="default" size="100%">Eugene Y Vasserman</style></author><author><style face="normal" font="default" size="100%">Kamruzzaman Sarker</style></author><author><style face="normal" font="default" size="100%">Pascal Hitzler</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Toward a Neurosymbolic Understanding of Hidden Neuron Activations</style></title><secondary-title><style face="normal" font="default" size="100%">Neurosymbolic Artificial Intelligence</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2026</style></year></dates><volume><style face="normal" font="default" size="100%">2</style></volume><language><style face="normal" font="default" size="100%">eng</style></language></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>17</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Pascal Hitzler</style></author><author><style face="normal" font="default" size="100%">Rushrukh Rayan</style></author><author><style face="normal" font="default" size="100%">Joseph Zalewski</style></author><author><style face="normal" font="default" size="100%">Sanaz Saki Norouzi</style></author><author><style face="normal" font="default" size="100%">Aaron Eberhart</style></author><author><style face="normal" font="default" size="100%">Eugene Y. Vasserman</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Deep Deductive Reasoning is a Hard Deep Learning Problem</style></title><secondary-title><style face="normal" font="default" size="100%">Neurosymbolic Artificial Intelligence</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2025</style></year></dates><language><style face="normal" font="default" size="100%">eng</style></language></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>10</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Abhilekha Dalal</style></author><author><style face="normal" font="default" size="100%">Rushrukh Rayan</style></author><author><style face="normal" font="default" size="100%">Pascal Hitzler</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Error-margin Analysis for Hidden Neuron Activation Labels</style></title><secondary-title><style face="normal" font="default" size="100%">18th International Conference on Neural-Symbolic Learning and Reasoning, NeSy 2024</style></secondary-title></titles><keywords><keyword><style  face="normal" font="default" size="100%">CNN</style></keyword><keyword><style  face="normal" font="default" size="100%">Concept Induction</style></keyword><keyword><style  face="normal" font="default" size="100%">Explainable AI</style></keyword></keywords><dates><year><style  face="normal" font="default" size="100%">2024</style></year></dates><publisher><style face="normal" font="default" size="100%">Springer </style></publisher><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">&lt;p&gt;Understanding how high-level concepts are represented with- in artificial neural networks is a fundamental challenge in the field of arti- ficial intelligence. While existing literature in explainable AI emphasizes the importance of labeling neurons with concepts to understand their functioning, they mostly focus on identifying what stimulus activates a neuron in most cases; this corresponds to the notion of recall in informa- tion retrieval. We argue that this is only the first-part of a two-part job; it is imperative to also investigate neuron responses to other stimuli, i.e., their precision. We call this the neuron label’s error margin.&lt;/p&gt;
</style></abstract></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Abhilekha Dalal</style></author><author><style face="normal" font="default" size="100%">Rushrukh Rayan</style></author><author><style face="normal" font="default" size="100%">Adrita Barua</style></author><author><style face="normal" font="default" size="100%">Eugene Y. Vasserman</style></author><author><style face="normal" font="default" size="100%">Kamruzzaman Sarker</style></author><author><style face="normal" font="default" size="100%">Pascal Hitzler</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">On the Value of Labeled Data and Symbolic Methods for Hidden Neuron Activation Analysis</style></title><secondary-title><style face="normal" font="default" size="100%">18th International Conference on Neural-Symbolic Learning and Reasoning, NeSy 2024</style></secondary-title></titles><keywords><keyword><style  face="normal" font="default" size="100%">CNN</style></keyword><keyword><style  face="normal" font="default" size="100%">Concept Induction</style></keyword><keyword><style  face="normal" font="default" size="100%">Explainable AI</style></keyword><keyword><style  face="normal" font="default" size="100%">LLM</style></keyword></keywords><dates><year><style  face="normal" font="default" size="100%">2024</style></year></dates><publisher><style face="normal" font="default" size="100%">Springer </style></publisher><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">&lt;p&gt;We introduce a novel model-agnostic post-hoc Explainable AI method that provides meaningful interpretations for hidden neuron activations in a Convolutional Neural Network. Our approach uses a Wikipedia-derived concept hierarchy with approx. 2 million classes as background knowledge, and deductive reasoning based Concept Induc- tion for explanation generation. Additionally, we explore and compare the capabilities of off-the-shelf pre-trained multimodal-based explainable methods. Our evaluation shows that our neurosymbolic method holds a competitive edge in both quantitative and qualitative aspects.&lt;/p&gt;
</style></abstract></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>17</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Pascal Hitzler</style></author><author><style face="normal" font="default" size="100%">Rushrukh Rayan</style></author><author><style face="normal" font="default" size="100%">Joseph Zalewski</style></author><author><style face="normal" font="default" size="100%">Sanaz Saki Norouzi</style></author><author><style face="normal" font="default" size="100%">Aaron Eberhart</style></author><author><style face="normal" font="default" size="100%">Eugene Y. Vasserman</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Deep Deductive Reasoning is a Hard Deep Learning Problem</style></title></titles><dates><year><style  face="normal" font="default" size="100%">2023</style></year></dates><language><style face="normal" font="default" size="100%">eng</style></language></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>27</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Rushrukh Rayan</style></author><author><style face="normal" font="default" size="100%">Cogan Shimizu</style></author><author><style face="normal" font="default" size="100%">Pascal Hitzler</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">MMODS-O: A Modular Ontology for the Metadata Object Description Schema (MODS) – Documentation</style></title></titles><dates><year><style  face="normal" font="default" size="100%">2023</style></year></dates><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">&lt;p&gt;We are presenting the documentation for MMODS-O, an ontology derived from the Metadata Object Description Schema (MODS, version 3.8), which is an XML Schema by The Library of Congress. The XML Schema concerns metadata pertaining to bibliographic elements, however it is also used for other purposes, for instance LCACommons which is an interagency community that focues on Life Cycle Analysis, National Agricultural Library -- require the metadata to be in MODS format. &amp;nbsp;Our motivation for developing this ontology -- including how it relates to previous attempts -- will be described elsewhere. This documentation is intended for readers who are familiar with MODS XML schema.&lt;/p&gt;
</style></abstract></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Rushrukh Rayan</style></author><author><style face="normal" font="default" size="100%">Cogan Shimizu</style></author><author><style face="normal" font="default" size="100%">Heidi Sieverding</style></author><author><style face="normal" font="default" size="100%">Pascal Hitzler</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">A Modular Ontology for MODS – Metadata Object Description Schema</style></title></titles><dates><year><style  face="normal" font="default" size="100%">2023</style></year></dates><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">&lt;p&gt;The Metadata Object Description Schema (MODS) was developed to describe bibliographic concepts and metadata and is maintained by the Library of Congress. Its authoritative version is given as an XML schema based on an XML mindset which means that it has significant limitations for use in a knowledge graphs context. We have therefore developed the Modular MODS Ontology (MMODS-O) which incorporates all elements and attributes of the MODS XML schema. In designing the ontology, we adopt the recent Modular Ontology Design Methodology (MOMo) with the intention to strike a balance between modularity and quality ontology design on the one hand, and conservative backward compatibility with MODS on the other.&amp;nbsp;&lt;/p&gt;
</style></abstract></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Rushrukh Rayan</style></author><author><style face="normal" font="default" size="100%">Cogan Shimizu</style></author><author><style face="normal" font="default" size="100%">Pascal Hitzler</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">An Ontology Design Pattern for Role-Dependent Names</style></title></titles><dates><year><style  face="normal" font="default" size="100%">2023</style></year></dates><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">&lt;div class=&quot;page&quot; title=&quot;Page 1&quot;&gt;
&lt;div class=&quot;layoutArea&quot;&gt;
&lt;div class=&quot;column&quot;&gt;
&lt;p&gt;&lt;span&gt;We present an ontology design pattern for modeling&amp;nbsp;&lt;/span&gt;&lt;span&gt;Names&amp;nbsp;&lt;/span&gt;&lt;span&gt;as part of&amp;nbsp;&lt;/span&gt;&lt;span&gt;Roles&lt;/span&gt;&lt;span&gt;, to capture scenarios where an&amp;nbsp;&lt;/span&gt;&lt;span&gt;Agent&amp;nbsp;&lt;/span&gt;&lt;span&gt;performs different&amp;nbsp;&lt;/span&gt;&lt;span&gt;Roles&amp;nbsp;&lt;/span&gt;&lt;span&gt;using different&amp;nbsp;&lt;/span&gt;&lt;span&gt;Names&amp;nbsp;&lt;/span&gt;&lt;span&gt;associated with the different Roles. Examples of an Agent performing a Role using different Names are rather ubiq- uitous, e.g., authors who write under different pseudonyms, or different legal names for citizens of more than one country. The proposed pattern is a modified merger of a standard Agent Role and a standard Name pattern stub.&lt;/span&gt;&lt;/p&gt;
&lt;/div&gt;
&lt;/div&gt;
&lt;/div&gt;
</style></abstract></record></records></xml>