<?xml version="1.0" encoding="utf-8"?>
<?xml-stylesheet type="text/xsl" href="style/detail_T.xsl"?>
<bibitem type="C">   <ARLID>0575759</ARLID> <utime>20240402214439.5</utime><mtime>20230922235959.9</mtime>    <DOI>10.1109/ICIP49359.2023.10221948</DOI>           <title language="eng" primary="1">NeRD: Neural field-based Demosaicking</title>  <specification> <page_count>5 s.</page_count> <media_type>E</media_type> </specification>   <serial><ARLID>cav_un_epca*0575755</ARLID><ISBN>978-1-7281-9835-4</ISBN><title>Proceedings of the 2023 IEEE International Conference on Image Processing (ICIP)</title><part_num/><part_title/><page_num>1735-1739</page_num><publisher><place>Piscataway</place><name>IEEE</name><year>2023</year></publisher></serial>    <keyword>Demosaicking</keyword>   <keyword>neural field</keyword>   <keyword>implicit neural representation</keyword>    <author primary="1"> <ARLID>cav_un_auth*0379363</ARLID> <name1>Kerepecký</name1> <name2>Tomáš</name2> <institution>UTIA-B</institution> <full_dept language="cz">Zpracování obrazové informace</full_dept> <full_dept language="eng">Department of Image Processing</full_dept> <department language="cz">ZOI</department> <department language="eng">ZOI</department> <full_dept>Department of Image Processing</full_dept> <country>CZ</country> <fullinstit>Ústav teorie informace a automatizace AV ČR, v. v. i.</fullinstit> </author> <author primary="0"> <ARLID>cav_un_auth*0101209</ARLID> <name1>Šroubek</name1> <name2>Filip</name2> <institution>UTIA-B</institution> <full_dept language="cz">Zpracování obrazové informace</full_dept> <full_dept>Department of Image Processing</full_dept> <department language="cz">ZOI</department> <department>ZOI</department> <full_dept>Department of Image Processing</full_dept> <fullinstit>Ústav teorie informace a automatizace AV ČR, v. v. i.</fullinstit> </author> <author primary="0"> <ARLID>cav_un_auth*0283562</ARLID> <name1>Novozámský</name1> <name2>Adam</name2> <institution>UTIA-B</institution> <full_dept language="cz">Zpracování obrazové informace</full_dept> <full_dept>Department of Image Processing</full_dept> <department language="cz">ZOI</department> <department>ZOI</department> <full_dept>Department of Image Processing</full_dept> <fullinstit>Ústav teorie informace a automatizace AV ČR, v. v. i.</fullinstit> </author> <author primary="0"> <ARLID>cav_un_auth*0101087</ARLID> <name1>Flusser</name1> <name2>Jan</name2> <institution>UTIA-B</institution> <full_dept language="cz">Zpracování obrazové informace</full_dept> <full_dept>Department of Image Processing</full_dept> <department language="cz">ZOI</department> <department>ZOI</department> <full_dept>Department of Image Processing</full_dept> <fullinstit>Ústav teorie informace a automatizace AV ČR, v. v. i.</fullinstit> </author>   <source> <url>http://library.utia.cas.cz/separaty/2023/ZOI/kerepecky-0575759.pdf</url> </source>        <cas_special> <project> <project_id>GA21-03921S</project_id> <agency>GA ČR</agency> <ARLID>cav_un_auth*0412209</ARLID> </project> <project> <project_id>StrategieAV21/1</project_id> <agency>AV ČR</agency> <country>CZ</country> <ARLID>cav_un_auth*0441412</ARLID> </project> <project>  </project>  <abstract language="eng" primary="1">We introduce NeRD, a new demosaicking method for generating full-color images from Bayer patterns. Our approach leverages advancements in neural fields to perform demosaicking by representing an image as a coordinate-based neural network with sine activation functions. The inputs to the network are spatial coordinates and a low-resolution Bayer pattern, while the outputs are the corresponding RGB values. An encoder network, which is a blend of ResNet and U-net, enhances the implicit neural representation of the image to improve its quality and ensure spatial consistency through prior learning. Our experimental results demonstrate that NeRD outperforms traditional and state-of-the-art CNN-based methods and significantly closes the gap to transformer-based methods.</abstract>    <action target="WRD"> <ARLID>cav_un_auth*0455338</ARLID> <name>IEEE International Conference on Image Processing 2023 (ICIP 2023)</name> <dates>20231008</dates> <unknown tag="mrcbC20-s">20231011</unknown> <place>Kuala Lumpur</place> <country>MY</country>  </action>  <RIV>JC</RIV> <FORD0>10000</FORD0> <FORD1>10200</FORD1> <FORD2>10201</FORD2>    <reportyear>2024</reportyear>      <num_of_auth>4</num_of_auth>  <presentation_type> PR </presentation_type> <inst_support> RVO:67985556 </inst_support>  <permalink>https://hdl.handle.net/11104/0345842</permalink>  <cooperation> <ARLID>cav_un_auth*0329918</ARLID> <name>FJFI ČVUT Praha</name> <country>CZ</country> </cooperation>  <confidential>S</confidential>        <arlyear>2023</arlyear>       <unknown tag="mrcbU14"> SCOPUS </unknown> <unknown tag="mrcbU24"> PUBMED </unknown> <unknown tag="mrcbU34"> WOS </unknown> <unknown tag="mrcbU63"> cav_un_epca*0575755 Proceedings of the 2023 IEEE International Conference on Image Processing (ICIP) 978-1-7281-9835-4 1735 1739 Piscataway IEEE 2023 </unknown> </cas_special> </bibitem>