<?xml version="1.0" encoding="utf-8"?>
<?xml-stylesheet type="text/xsl" href="style/detail_T.xsl"?>
<bibitem type="C">   <ARLID>0578508</ARLID> <utime>20240402214804.9</utime><mtime>20231124235959.9</mtime>              <title language="eng" primary="1">H-NeXt: The next step towards roto-translation invariant networks</title>  <specification> <page_count>14 s.</page_count> <media_type>E</media_type> </specification>   <serial><ARLID>cav_un_epca*0578507</ARLID><title>34th British Machine Vision Conference 2023</title><part_num/><part_title/><page_num>1-14</page_num><publisher><place>Aberdeen</place><name>BMVA</name><year>2023</year></publisher></serial>    <keyword>H-NeXT</keyword>   <keyword>robustness to unseen deformations</keyword>   <keyword>parameter-efficient roto-translation invariant network</keyword>   <keyword>classification on unaugmented training set</keyword>    <author primary="1"> <ARLID>cav_un_auth*0438860</ARLID> <name1>Karella</name1> <name2>Tomáš</name2> <institution>UTIA-B</institution> <full_dept language="cz">Zpracování obrazové informace</full_dept> <full_dept language="eng">Department of Image Processing</full_dept> <department language="cz">ZOI</department> <department language="eng">ZOI</department> <country>CZ</country> <fullinstit>Ústav teorie informace a automatizace AV ČR, v. v. i.</fullinstit> </author> <author primary="0"> <ARLID>cav_un_auth*0101209</ARLID> <name1>Šroubek</name1> <name2>Filip</name2> <institution>UTIA-B</institution> <full_dept language="cz">Zpracování obrazové informace</full_dept> <full_dept>Department of Image Processing</full_dept> <department language="cz">ZOI</department> <department>ZOI</department> <full_dept>Department of Image Processing</full_dept> <fullinstit>Ústav teorie informace a automatizace AV ČR, v. v. i.</fullinstit> </author> <author primary="0"> <ARLID>cav_un_auth*0254045</ARLID> <name1>Blažek</name1> <name2>Jan</name2> <institution>UTIA-B</institution> <full_dept language="cz">Zpracování obrazové informace</full_dept> <full_dept>Department of Image Processing</full_dept> <department language="cz">ZOI</department> <department>ZOI</department> <full_dept>Department of Image Processing</full_dept> <country>CZ</country> <fullinstit>Ústav teorie informace a automatizace AV ČR, v. v. i.</fullinstit> </author> <author primary="0"> <ARLID>cav_un_auth*0101087</ARLID> <name1>Flusser</name1> <name2>Jan</name2> <institution>UTIA-B</institution> <full_dept language="cz">Zpracování obrazové informace</full_dept> <full_dept>Department of Image Processing</full_dept> <department language="cz">ZOI</department> <department>ZOI</department> <full_dept>Department of Image Processing</full_dept> <fullinstit>Ústav teorie informace a automatizace AV ČR, v. v. i.</fullinstit> </author> <author primary="0"> <ARLID>cav_un_auth*0457087</ARLID> <name1>Košík</name1> <name2>Václav</name2> <institution>UTIA-B</institution> <full_dept language="cz">Zpracování obrazové informace</full_dept> <full_dept>Department of Image Processing</full_dept> <department language="cz">ZOI</department> <department>ZOI</department> <country>CZ</country> <fullinstit>Ústav teorie informace a automatizace AV ČR, v. v. i.</fullinstit> </author>   <source> <url>http://library.utia.cas.cz/separaty/2023/ZOI/karella-0578508.pdf</url> </source>         <cas_special> <project> <project_id>GA21-03921S</project_id> <agency>GA ČR</agency> <ARLID>cav_un_auth*0412209</ARLID> </project>  <abstract language="eng" primary="1">The widespread popularity of equivariant networks underscores the significance of parameter efficient models and effective use of training data. At a time when robustness to unseen deformations is becoming increasingly important, we present H-NeXt, which bridges the gap between equivariance and invariance. H-NeXt is a parameter-efficient roto-translation invariant network that is trained without a single augmented image in the training set. Our network comprises three components: an equivariant backbone for learning roto-translation independent features, an invariant pooling layer for discarding roto-translation information, and a classification layer. H-NeXt outperforms the state of the art in classification on unaugmented training sets and augmented test sets of MNIST and CIFAR-10</abstract>    <action target="WRD"> <ARLID>cav_un_auth*0458470</ARLID> <name>British Machine Vision Conference 2023 /34./</name> <dates>20231120</dates> <unknown tag="mrcbC20-s">20231124</unknown> <place>Aberdeen</place> <country>GB</country>  </action>  <RIV>JD</RIV> <FORD0>20000</FORD0> <FORD1>20200</FORD1> <FORD2>20206</FORD2>    <reportyear>2024</reportyear>      <num_of_auth>5</num_of_auth>  <presentation_type> PR </presentation_type> <inst_support> RVO:67985556 </inst_support>  <permalink>https://hdl.handle.net/11104/0347650</permalink>   <confidential>S</confidential>        <arlyear>2023</arlyear>       <unknown tag="mrcbU14"> SCOPUS </unknown> <unknown tag="mrcbU24"> PUBMED </unknown> <unknown tag="mrcbU34"> WOS </unknown> <unknown tag="mrcbU63"> cav_un_epca*0578507 34th British Machine Vision Conference 2023 BMVA 2023 Aberdeen 1 14 </unknown> </cas_special> </bibitem>