<?xml version="1.0" encoding="UTF-8"?><oembed><type>video</type><version>1.0</version><html>&lt;iframe src=&quot;https://www.loom.com/embed/67b6b7ce415a467ea44863854ddf0c62&quot; frameborder=&quot;0&quot; width=&quot;1280&quot; height=&quot;960&quot; webkitallowfullscreen mozallowfullscreen allowfullscreen&gt;&lt;/iframe&gt;</html><height>960</height><width>1280</width><provider_name>Loom</provider_name><provider_url>https://www.loom.com</provider_url><thumbnail_height>960</thumbnail_height><thumbnail_width>1280</thumbnail_width><thumbnail_url>https://cdn.loom.com/sessions/thumbnails/67b6b7ce415a467ea44863854ddf0c62-d476a7f1d7126b87.gif</thumbnail_url><duration>109.517</duration><title>NeuroSense Multimodal AI - 10 June 2025</title><description>Multimodal Affect : Unified deep learning framework for emotion and sentiment recognition from video, audio, and text. Powered by BERT, ResNet3D, and CNNs. End-to-end training, robust evaluation —built for research and real-world affective computing.</description></oembed>