<?xml version="1.0" encoding="utf-8"?>
<?xml-stylesheet type="text/xsl" href="style/detail_T.xsl"?>
<bibitem type="J">   <ARLID>0432661</ARLID> <utime>20240103204735.9</utime><mtime>20141013235959.9</mtime>   <WOS>000343160900017</WOS>  <DOI>10.1007/s10957-013-0474-6</DOI>           <title language="eng" primary="1">A Counterexample on Sample-Path Optimality in Stable Markov Decision Chains with the Average Reward Criterion</title>  <specification> <page_count>11 s.</page_count> <media_type>P</media_type> </specification>   <serial><ARLID>cav_un_epca*0257061</ARLID><ISSN>0022-3239</ISSN><title>Journal of Optimization Theory and Applications</title><part_num/><part_title/><volume_id>163</volume_id><volume>2 (2014)</volume><page_num>674-684</page_num><publisher><place/><name>Springer</name><year/></publisher></serial>    <keyword>Strong sample-path optimality</keyword>   <keyword>Lyapunov function condition</keyword>   <keyword>Stationary policy</keyword>   <keyword>Expected average reward criterion</keyword>    <author primary="1"> <ARLID>cav_un_auth*0307645</ARLID> <name1>Cavazos-Cadena</name1> <name2>R.</name2> <country>MX</country>  </author> <author primary="0"> <ARLID>cav_un_auth*0238984</ARLID> <name1>Montes-de-Oca</name1> <name2>R.</name2> <country>MX</country>  </author> <author primary="0"> <ARLID>cav_un_auth*0101196</ARLID> <name1>Sladký</name1> <name2>Karel</name2> <full_dept language="cz">Ekonometrie</full_dept> <full_dept>Department of Econometrics</full_dept> <department language="cz">E</department> <department>E</department> <institution>UTIA-B</institution> <full_dept>Department of Econometrics</full_dept>  <fullinstit>Ústav teorie informace a automatizace AV ČR, v. v. i.</fullinstit> </author>   <source> <url>http://library.utia.cas.cz/separaty/2014/E/sladky-0432661.pdf</url> </source>        <cas_special> <project> <project_id>012/300/02</project_id> <agency>PSF Organization</agency> <country>US</country> <ARLID>cav_un_auth*0307566</ARLID> </project> <project> <project_id>171396</project_id> <agency>CONACYT (México) and ASCR (Czech Republic)</agency> <country>MX</country> <ARLID>cav_un_auth*0307567</ARLID> </project>  <abstract language="eng" primary="1">This note deals with Markov decision chains evolving on a denumerable  state space. Under standard continuity compactness requirements, an explicit example is provided to show that, with respect to a strong sample-path average reward criterion, the Lyapunov function condition does not ensure the existence of an optimal stationary policy.</abstract>     <reportyear>2015</reportyear>  <RIV>BB</RIV>      <num_of_auth>3</num_of_auth>  <inst_support> RVO:67985556 </inst_support>  <permalink>http://hdl.handle.net/11104/0237102</permalink>  <cooperation> <ARLID>cav_un_auth*0307568</ARLID> <institution>UAA</institution> <name>Departamento de Estadística y Cálculo, Universidad Autónoma Agraria Antonio Narro, Saltillo Coah</name> <country>MX</country> </cooperation> <cooperation> <ARLID>cav_un_auth*0307569</ARLID> <institution>UAM</institution> <name>Departamento de Matemáticas, Universidad Autónoma Metropolitana, Campus Iztapalapa</name> <country>MX</country> </cooperation>  <confidential>S</confidential>          <unknown tag="mrcbT16-e">MATHEMATICSAPPLIED|OPERATIONSRESEARCHMANAGEMENTSCIENCE</unknown> <unknown tag="mrcbT16-j">0.773</unknown> <unknown tag="mrcbT16-s">1.140</unknown> <unknown tag="mrcbT16-4">Q1</unknown> <unknown tag="mrcbT16-B">66.559</unknown> <unknown tag="mrcbT16-C">77.581</unknown> <unknown tag="mrcbT16-D">Q2</unknown> <unknown tag="mrcbT16-E">Q1</unknown> <arlyear>2014</arlyear>       <unknown tag="mrcbU34"> 000343160900017 WOS </unknown> <unknown tag="mrcbU63"> cav_un_epca*0257061 Journal of Optimization Theory and Applications 0022-3239 1573-2878 Roč. 163 č. 2 2014 674 684 Springer </unknown> </cas_special> </bibitem>