<?xml version="1.0" encoding="UTF-8"?><oembed><type>video</type><version>1.0</version><html>&lt;iframe src=&quot;https://www.loom.com/embed/edd7ef43d536466f8f23228d5f4e6211&quot; frameborder=&quot;0&quot; width=&quot;1108&quot; height=&quot;831&quot; webkitallowfullscreen mozallowfullscreen allowfullscreen&gt;&lt;/iframe&gt;</html><height>831</height><width>1108</width><provider_name>Loom</provider_name><provider_url>https://www.loom.com</provider_url><thumbnail_height>831</thumbnail_height><thumbnail_width>1108</thumbnail_width><thumbnail_url>https://cdn.loom.com/sessions/thumbnails/edd7ef43d536466f8f23228d5f4e6211-36f3eb0904ffa27b.gif</thumbnail_url><duration>179.349</duration><title>Distilling Step-by-Step! Outperforming Larger Language Models with Less Training Data and Smaller Model Sizes - ACL Anthology - 15 March 2026</title><description></description></oembed>