<?xml version="1.0" encoding="utf-8"?>
<?xml-stylesheet type="text/xsl" href="style/detail_T.xsl"?>
<bibitem type="C">   <ARLID>0583563</ARLID> <utime>20250131155208.1</utime><mtime>20240304235959.9</mtime>              <title language="eng" primary="1">Average Reward Optimality in Semi-Markov Decision Processes with Costly Interventions</title>  <specification> <page_count>6 s.</page_count> <media_type>P</media_type> </specification>   <serial><ARLID>cav_un_epca*0583562</ARLID><ISBN>978-80-11-04132-8</ISBN><ISSN>2788-3965</ISSN><title>Proceedings of the 41st International Conference on Mathematical Methods in Econometrics</title><part_num/><part_title/><page_num>378-383</page_num><publisher><place>Praha</place><name>The Czech Society of Operations Research</name><year>2023</year></publisher><editor><name1>Sekničková</name1><name2>Jana</name2></editor><editor><name1>Holý</name1><name2>Vladimír</name2></editor></serial>    <keyword>controlled semi-Markov reward processes</keyword>   <keyword>long-run optimality</keyword>   <keyword>intervention of the decision maker</keyword>    <author primary="1"> <ARLID>cav_un_auth*0101196</ARLID> <name1>Sladký</name1> <name2>Karel</name2> <institution>UTIA-B</institution> <full_dept language="cz">Ekonometrie</full_dept> <full_dept language="eng">Department of Econometrics</full_dept> <department language="cz">E</department> <department language="eng">E</department>  <share>100</share> <fullinstit>Ústav teorie informace a automatizace AV ČR, v. v. i.</fullinstit> </author>   <source> <url>http://library.utia.cas.cz/separaty/2023/E/sladky-0583563.pdf</url> </source>        <cas_special>  <abstract language="eng" primary="1">In this note we consider semi-Markov reward decision processes evolving on finite state spaces. We focus attention on average reward models, i.e. we establish explicit formulas for the growth rate of the total expected reward. In contrast to the standard models we assume that the decision maker can also change the running process by some (costly) intervention. Recall that the result for optimality criteria for the classical Markov decision chains in discrete and continuous time setting turn out to be a very specific case of the considered model. The aim is to formulate optimality conditions for semi-Markov models with interventions and present algorithmical procedures for finding optimal solutions.</abstract>    <action target="WRD"> <ARLID>cav_un_auth*0464093</ARLID> <name>MME 2023: Mathematical Methods in Economics /41./</name> <dates>20230913</dates> <unknown tag="mrcbC20-s">20230915</unknown> <place>Prague</place> <url>mme2023.vse.cz</url> <country>CZ</country>  </action>  <RIV>BB</RIV> <FORD0>10000</FORD0> <FORD1>10100</FORD1> <FORD2>10103</FORD2>    <reportyear>2024</reportyear>      <num_of_auth>1</num_of_auth>  <presentation_type> PR </presentation_type> <inst_support> RVO:67985556 </inst_support>  <permalink>https://hdl.handle.net/11104/0351597</permalink>   <confidential>S</confidential>        <arlyear>2023</arlyear>       <unknown tag="mrcbU02"> C </unknown> <unknown tag="mrcbU14"> SCOPUS </unknown> <unknown tag="mrcbU24"> PUBMED </unknown> <unknown tag="mrcbU34"> WOS </unknown> <unknown tag="mrcbU63"> cav_un_epca*0583562 Proceedings of the 41st International Conference on Mathematical Methods in Econometrics The Czech Society of Operations Research 2023 Praha 378 383 978-80-11-04132-8 2788-3965 </unknown> <unknown tag="mrcbU67"> Sekničková Jana 340 </unknown> <unknown tag="mrcbU67"> Holý Vladimír 340 </unknown> </cas_special> </bibitem>