<?xml version="1.0" encoding="UTF-8"?>
<collection xmlns="http://www.loc.gov/MARC21/slim">
    <record>
        <controlfield tag="001">1925</controlfield>
        <controlfield tag="005">20190219081244.0</controlfield>
        <datafield tag="037" ind1=" " ind2=" ">
            <subfield code="a">INPRO--2008-022</subfield>
        </datafield>
        <datafield tag="041" ind1=" " ind2=" ">
            <subfield code="a">eng</subfield>
        </datafield>
        <datafield tag="100" ind1=" " ind2=" ">
            <subfield code="a">Montijano, Eduardo</subfield>
        </datafield>
        <datafield tag="245" ind1=" " ind2=" ">
            <subfield code="a">Position-Based Navigation Using Multiple Homographies</subfield>
        </datafield>
        <datafield tag="260" ind1=" " ind2=" ">
            <subfield code="c">2008-10-15</subfield>
        </datafield>
        <datafield tag="300" ind1=" " ind2=" ">
            <subfield code="a">8</subfield>
        </datafield>
        <datafield tag="500" ind1=" " ind2=" ">
            <subfield code="a">homography visual navigation motion estimation reactive navigation metric reconstruction.</subfield>
        </datafield>
        <datafield tag="520" ind1=" " ind2=" ">
            <subfield code="a">In this paper we address the problem of visual navigation of a mobile robot which simultaneously obtains metric localization and scene reconstruction using homographies. Initially, the robot is guided by a human and some scenes during the trip are stored from known reference locations. The interest of this paper consist in the possibility of getting real and precise data of the robot motion and the scene, which presents some advantages over other existing approaches. For example, it allows the robot to carry out other trajectories than the executed during the teaching phase. We show an extensive analysis of the output in presence of errors in some of the inputs.</subfield>
        </datafield>
        <datafield tag="540" ind1=" " ind2=" ">
            <subfield code="9">info:eu-repo/semantics/openAccess</subfield>
            <subfield code="a">Esta obra está sujeta a una licencia de uso Creative Commons. Se permite la reproducción total o parcial, la distribución, la comunicación pública de la obra y la creación de obras derivadas, siempre que no sea con finalidades comerciales, y sempre que se reconzca la autoria de la obra original.</subfield>
            <subfield code="u">https://creativecommons.org/licenses/by-nc/4.0/</subfield>
        </datafield>
        <datafield tag="653" ind1="1" ind2=" ">
            <subfield code="a">robotics</subfield>
            <subfield code="a">image reconstruction</subfield>
            <subfield code="a">mobile robots</subfield>
            <subfield code="a">motion estimation</subfield>
            <subfield code="a">path planning</subfield>
            <subfield code="a">position control</subfield>
            <subfield code="a">robot visionhomography</subfield>
            <subfield code="a">metric localization</subfield>
            <subfield code="a">position-based navigation</subfield>
            <subfield code="a">robot motion</subfield>
            <subfield code="a">scene reconstruction</subfield>
            <subfield code="a">visual navigation</subfield>
        </datafield>
        <datafield tag="700" ind1=" " ind2=" ">
            <subfield code="a">Sagüés Blázquiz, Carlos</subfield>
        </datafield>
        <datafield tag="773" ind1=" " ind2=" ">
            <subfield code="c">994-1001</subfield>
            <subfield code="p">Proceedings of 13th IEEE International Conference on Emerging Technologies and Factory Automation (ETFA)</subfield>
            <subfield code="y">2008</subfield>
        </datafield>
        <datafield tag="856" ind1="0" ind2=" ">
            <subfield code="f">emonti@unizar.es</subfield>
        </datafield>
        <datafield tag="856" ind1="4" ind2=" ">
            <subfield code="s">250167</subfield>
            <subfield code="u">http://zaguan.unizar.es/record/1925/files/login.pdf</subfield>
            <subfield code="z">Texto completo (Para personal UZ)</subfield>
        </datafield>
        <datafield tag="980" ind1=" " ind2=" ">
            <subfield code="a">PREPRINT</subfield>
        </datafield>
    </record>

    
</collection>