<?xml version="1.0" encoding="utf-8"?>
<?xml-stylesheet type="text/xsl" href="style/detail_T.xsl"?>
<bibitem type="J">   <ARLID>0567218</ARLID> <utime>20240402213552.6</utime><mtime>20230120235959.9</mtime>   <SCOPUS>85146315332</SCOPUS> <WOS>000960827100001</WOS>  <DOI>10.1016/j.orl.2023.01.008</DOI>           <title language="eng" primary="1">Contractivity of Bellman operator in risk averse dynamic programming with infinite horizon</title>  <specification> <page_count>4 s.</page_count> <media_type>P</media_type> </specification>   <serial><ARLID>cav_un_epca*0254574</ARLID><ISSN>0167-6377</ISSN><title>Operations Research Letters</title><part_num/><part_title/><volume_id>51</volume_id><volume>2 (2023)</volume><page_num>133-136</page_num><publisher><place/><name>Elsevier</name><year/></publisher></serial>    <keyword>Risk aversion</keyword>   <keyword>Dynamic programming</keyword>   <keyword>Infinite horizon</keyword>    <author primary="1"> <ARLID>cav_un_auth*0289084</ARLID> <name1>Kopa</name1> <name2>M.</name2> <country>CZ</country> </author> <author primary="0"> <ARLID>cav_un_auth*0101206</ARLID> <name1>Šmíd</name1> <name2>Martin</name2> <institution>UTIA-B</institution> <full_dept language="cz">Ekonometrie</full_dept> <full_dept>Department of Econometrics</full_dept> <department language="cz">E</department> <department>E</department> <full_dept>Department of Econometrics</full_dept>  <share>50</share> <fullinstit>Ústav teorie informace a automatizace AV ČR, v. v. i.</fullinstit> </author>   <source> <url>http://library.utia.cas.cz/separaty/2023/E/smid-0567218.pdf</url> </source> <source> <url>https://www.sciencedirect.com/science/article/pii/S0167637723000081?via%3Dihub</url>  </source>        <cas_special> <project> <project_id>GA19-11062S</project_id> <agency>GA ČR</agency> <country>CZ</country> <ARLID>cav_un_auth*0385133</ARLID> </project>  <abstract language="eng" primary="1">The paper deals with a risk averse dynamic programming problem with infinite horizon. First, the required assumptions are formulated to have the problem well defined. Then the Bellman equation is derived, which may be also seen as a standalone reinforcement learning problem. The fact that the Bellman operator is contraction is proved, guaranteeing convergence of various solution algorithms used for dynamic programming as well as reinforcement learning problems, which we demonstrate on the value iteration and the policy iteration algorithms.</abstract>     <result_subspec>WOS</result_subspec> <RIV>BB</RIV> <FORD0>10000</FORD0> <FORD1>10100</FORD1> <FORD2>10103</FORD2>    <reportyear>2024</reportyear>      <num_of_auth>2</num_of_auth>  <inst_support> RVO:67985556 </inst_support>  <permalink>https://hdl.handle.net/11104/0340876</permalink>   <confidential>S</confidential>  <unknown tag="mrcbC86"> Article Operations Research Management Science </unknown> <unknown tag="mrcbC91"> C </unknown>         <unknown tag="mrcbT16-e">OPERATIONSRESEARCH&amp;MANAGEMENTSCIENCE</unknown> <unknown tag="mrcbT16-f">1.1</unknown> <unknown tag="mrcbT16-g">0.2</unknown> <unknown tag="mrcbT16-h">13.6</unknown> <unknown tag="mrcbT16-i">0.00271</unknown> <unknown tag="mrcbT16-j">0.502</unknown> <unknown tag="mrcbT16-k">3313</unknown> <unknown tag="mrcbT16-q">85</unknown> <unknown tag="mrcbT16-s">0.449</unknown> <unknown tag="mrcbT16-y">19</unknown> <unknown tag="mrcbT16-x">0.97</unknown> <unknown tag="mrcbT16-3">474</unknown> <unknown tag="mrcbT16-4">Q2</unknown> <unknown tag="mrcbT16-5">0.800</unknown> <unknown tag="mrcbT16-6">110</unknown> <unknown tag="mrcbT16-7">Q4</unknown> <unknown tag="mrcbT16-C">8</unknown> <unknown tag="mrcbT16-D">Q4</unknown> <unknown tag="mrcbT16-E">Q3</unknown> <unknown tag="mrcbT16-M">0.21</unknown> <unknown tag="mrcbT16-N">Q4</unknown> <unknown tag="mrcbT16-P">8</unknown> <arlyear>2023</arlyear>       <unknown tag="mrcbU14"> 85146315332 SCOPUS </unknown> <unknown tag="mrcbU24"> PUBMED </unknown> <unknown tag="mrcbU34"> 000960827100001 WOS </unknown> <unknown tag="mrcbU63"> cav_un_epca*0254574 Operations Research Letters Roč. 51 č. 2 2023 133 136 0167-6377 1872-7468 Elsevier </unknown> </cas_special> </bibitem>