<?xml version="1.0" encoding="utf-8"?>
<?xml-stylesheet type="text/xsl" href="style/detail_T.xsl"?>
<bibitem type="A">   <ARLID>0410869</ARLID> <utime>20240103182245.0</utime><mtime>20060210235959.9</mtime>        <title language="eng" primary="1">Algorithmic procedures for mean-variance optimality in Markov decision chains. Abstract</title>  <publisher> <place>Prague</place> <name>Institute of Information Theory and Automation</name> <pub_time>2002</pub_time> </publisher> <specification> <page_count>1 s.</page_count> </specification>   <serial><title>Abstracts of the 24th European Meeting of Statisticians &amp; 14th Prague Conference on Information Theory, Statistical Decision Functions and Random Processes</title><part_num/><part_title/><page_num>322</page_num><editor><name1>Janžura</name1><name2>M.</name2></editor><editor><name1>Mikosch</name1><name2>T.</name2></editor></serial>    <keyword>Markov decision chains</keyword>   <keyword>mean-variance</keyword>   <keyword>policy iteration</keyword>    <author primary="1"> <ARLID>cav_un_auth*0101196</ARLID> <name1>Sladký</name1> <name2>Karel</name2> <institution>UTIA-B</institution> <full_dept>Department of Econometrics</full_dept>  <fullinstit>Ústav teorie informace a automatizace AV ČR, v. v. i.</fullinstit> </author> <author primary="0"> <ARLID>cav_un_auth*0101193</ARLID> <name1>Sitař</name1> <name2>Milan</name2> <institution>UTIA-B</institution>  <fullinstit>Ústav teorie informace a automatizace AV ČR, v. v. i.</fullinstit> </author>     <COSATI>12B</COSATI>    <cas_special> <project> <project_id>GA402/02/1015</project_id> <agency>GA ČR</agency> <ARLID>cav_un_auth*0000527</ARLID> </project> <project> <project_id>GA402/01/0539</project_id> <agency>GA ČR</agency> <ARLID>cav_un_auth*0008959</ARLID> </project> <research> <research_id>CEZ:AV0Z1075907</research_id> </research>  <abstract language="eng" primary="1">We investigate how the mean-variance selection rule, originally proposed for portfolio selection problems, can work in Markovian decision models. We consider a Markov decision chain with finite state and action spaces, however, instead of average expected reward or average expected variance optimality we consider mean variance optimality, square mean variance optimality or weighted difference of average expected rewards and variances. Optimality conditions and algorithmic procedures are presented.</abstract>  <action target="WRD"> <ARLID>cav_un_auth*0212933</ARLID> <name>EMS 2002</name> <place>Prague</place> <country>CZ</country> <dates>19.08.2002-23.08.2002</dates>  </action>    <RIV>BB</RIV>   <department>E</department>    <permalink>http://hdl.handle.net/11104/0130956</permalink>   <ID_orig>UTIA-B 20020083</ID_orig>     <arlyear>2002</arlyear>       <unknown tag="mrcbU10"> 2002 </unknown> <unknown tag="mrcbU10"> Prague Institute of Information Theory and Automation </unknown> <unknown tag="mrcbU63"> Abstracts of the 24th European Meeting of Statisticians &amp; 14th Prague Conference on Information Theory, Statistical Decision Functions and Random Processes 322 </unknown> <unknown tag="mrcbU67"> Janžura M. 340 </unknown> <unknown tag="mrcbU67"> Mikosch T. 340 </unknown> </cas_special> </bibitem>