<?xml version="1.0" encoding="utf-8"?>
<?xml-stylesheet type="text/xsl" href="style/detail_T.xsl"?>
<bibitem type="C">   <ARLID>0576905</ARLID> <utime>20240402214559.5</utime><mtime>20231024235959.9</mtime>   <SCOPUS>85179554546</SCOPUS>  <DOI>10.1109/IPTA59101.2023.10319998</DOI>           <title language="eng" primary="1">Invariant Convolutional Networks</title>  <specification> <page_count>6 s.</page_count> <media_type>E</media_type> </specification>   <serial><ARLID>cav_un_epca*0576904</ARLID><ISBN>979-8-3503-2541-6</ISBN><title>Proceedings of The 12th International Conference on Image Processing Theory, Tools and Applications (IPTA 2023)</title><part_num/><part_title/><publisher><place>Piscataway</place><name>IEEE</name><year>2023</year></publisher></serial>    <keyword>Neural network</keyword>   <keyword>augmentation</keyword>   <keyword>blur</keyword>    <author primary="1"> <ARLID>cav_un_auth*0377447</ARLID> <name1>Lébl</name1> <name2>Matěj</name2> <institution>UTIA-B</institution> <full_dept language="cz">Zpracování obrazové informace</full_dept> <full_dept language="eng">Department of Image Processing</full_dept> <department language="cz">ZOI</department> <department language="eng">ZOI</department> <full_dept>Department of Image Processing</full_dept> <country>CZ</country> <fullinstit>Ústav teorie informace a automatizace AV ČR, v. v. i.</fullinstit> </author> <author primary="0"> <ARLID>cav_un_auth*0101087</ARLID> <name1>Flusser</name1> <name2>Jan</name2> <institution>UTIA-B</institution> <full_dept language="cz">Zpracování obrazové informace</full_dept> <full_dept>Department of Image Processing</full_dept> <department language="cz">ZOI</department> <department>ZOI</department> <full_dept>Department of Image Processing</full_dept> <fullinstit>Ústav teorie informace a automatizace AV ČR, v. v. i.</fullinstit> </author>   <source> <url>http://library.utia.cas.cz/separaty/2023/ZOI/flusser-0576905.pdf</url> </source>        <cas_special> <project> <project_id>StrategieAV21/1</project_id> <agency>AV ČR</agency> <country>CZ</country> <ARLID>cav_un_auth*0328930</ARLID> </project> <project> <project_id>GA21-03921S</project_id> <agency>GA ČR</agency> <ARLID>cav_un_auth*0412209</ARLID> </project>  <abstract language="eng" primary="1">Neural networks are often trained on datasets, that are not fully representative of the expected query images. Many times, the difference stem from the query images being taken in sub-optimal conditions. The most common defects are rotation, scale, blur, noise and intensity &amp; contrast change which were all thoroughly studied and described. In this paper we propose a novel neural network architecture which is invariant to such degradations by design. We incorporate the knowledge build for classical methods directly into the network architecture providing an alternative to the augmentation of the training dataset. In the experiments, the proposed solution outperforms the classical augmentation technique in both accuracy and computational resources needed. </abstract>    <action target="WRD"> <ARLID>cav_un_auth*0456973</ARLID> <name>International Conference on Image Processing Theory, Tools and Applications (IPTA 2023) /12./</name> <dates>20231016</dates> <unknown tag="mrcbC20-s">20231019</unknown> <place>Paris</place> <country>FR</country>  </action>  <RIV>JD</RIV> <FORD0>20000</FORD0> <FORD1>20200</FORD1> <FORD2>20204</FORD2>    <reportyear>2024</reportyear>      <num_of_auth>2</num_of_auth>  <presentation_type> PR </presentation_type> <inst_support> RVO:67985556 </inst_support>  <permalink>https://hdl.handle.net/11104/0346495</permalink>   <confidential>S</confidential>  <article_num> 10319998 </article_num>       <arlyear>2023</arlyear>       <unknown tag="mrcbU14"> 85179554546 SCOPUS </unknown> <unknown tag="mrcbU24"> PUBMED </unknown> <unknown tag="mrcbU34"> WOS </unknown> <unknown tag="mrcbU63"> cav_un_epca*0576904 Proceedings of The 12th International Conference on Image Processing Theory, Tools and Applications (IPTA 2023) IEEE 2023 Piscataway 979-8-3503-2541-6 </unknown> </cas_special> </bibitem>