<?xml version="1.0" encoding="UTF-8"?>
<collection xmlns="http://www.loc.gov/MARC21/slim">
<record>
  <controlfield tag="001">144803</controlfield>
  <controlfield tag="005">20251113150205.0</controlfield>
  <datafield tag="024" ind1="7" ind2=" ">
    <subfield code="2">doi</subfield>
    <subfield code="a">10.1109/TIV.2024.3418525</subfield>
  </datafield>
  <datafield tag="024" ind1="8" ind2=" ">
    <subfield code="2">sideral</subfield>
    <subfield code="a">139594</subfield>
  </datafield>
  <datafield tag="037" ind1=" " ind2=" ">
    <subfield code="a">ART-2025-139594</subfield>
  </datafield>
  <datafield tag="041" ind1=" " ind2=" ">
    <subfield code="a">eng</subfield>
  </datafield>
  <datafield tag="100" ind1=" " ind2=" ">
    <subfield code="a">Liu, Changxiang</subfield>
  </datafield>
  <datafield tag="245" ind1=" " ind2=" ">
    <subfield code="a">PE-VINS: Accurate Monocular Visual-Inertial SLAM With Point-Edge Features</subfield>
  </datafield>
  <datafield tag="260" ind1=" " ind2=" ">
    <subfield code="c">2025</subfield>
  </datafield>
  <datafield tag="506" ind1="0" ind2=" ">
    <subfield code="a">Access copy available to the general public</subfield>
    <subfield code="f">Unrestricted</subfield>
  </datafield>
  <datafield tag="520" ind1="3" ind2=" ">
    <subfield code="a">Visual-Inertial Navigation Systems (VINS) is a significant undertaking in computer vision, robotics, and autonomous driving. Currently, point-line VINS have attracted significant attention due to their increased robustness and accuracy compared to point-only VINS. However, their effectiveness relies on the existence of clear line structures within the scene. Point-line VINS may become inaccurate or fail when scenes contain scattered lines or other features like arcs. Moreover, extracting and matching line features can bring computational overheads due to complex geometric models. In order to address VINS challenges without the overheads related to lines, we propose a novel approach, denoted as PE-VINS, which adds edge features to point-based VINS. Our proposed employs edge features in scenes to establish extra correspondences between views and then enhance its accuracy and robustness. Our method identifies edge features using image gradients and selects the most informative ones in the front end. We leverage sparse optical flow to track selected edge features and triangulate them using the initial pose predicted by the Inertial Measurement Unit (IMU). In the back end, we present a novel edge feature residual formulation that differs from the traditional reprojection residual. We tightly couple the new edge residual with the reprojection and IMU preintegration residual to better refine camera poses. We test our PE-VINS on public datasets, and our results show that it outperforms existing point-line-based methods and achieves state-of-the-art VINS performance. The code will be released at https://github.com/BlueAkoasm/PE-VINS .</subfield>
  </datafield>
  <datafield tag="540" ind1=" " ind2=" ">
    <subfield code="9">info:eu-repo/semantics/openAccess</subfield>
    <subfield code="a">All rights reserved</subfield>
    <subfield code="u">http://www.europeana.eu/rights/rr-f/</subfield>
  </datafield>
  <datafield tag="590" ind1=" " ind2=" ">
    <subfield code="a">14.3</subfield>
    <subfield code="b">2024</subfield>
  </datafield>
  <datafield tag="591" ind1=" " ind2=" ">
    <subfield code="a">COMPUTER SCIENCE, ARTIFICIAL INTELLIGENCE</subfield>
    <subfield code="b">6 / 204 = 0.029</subfield>
    <subfield code="c">2024</subfield>
    <subfield code="d">Q1</subfield>
    <subfield code="e">T1</subfield>
  </datafield>
  <datafield tag="591" ind1=" " ind2=" ">
    <subfield code="a">TRANSPORTATION SCIENCE &amp; TECHNOLOGY</subfield>
    <subfield code="b">4 / 77 = 0.052</subfield>
    <subfield code="c">2024</subfield>
    <subfield code="d">Q1</subfield>
    <subfield code="e">T1</subfield>
  </datafield>
  <datafield tag="591" ind1=" " ind2=" ">
    <subfield code="a">ENGINEERING, ELECTRICAL &amp; ELECTRONIC</subfield>
    <subfield code="b">7 / 366 = 0.019</subfield>
    <subfield code="c">2024</subfield>
    <subfield code="d">Q1</subfield>
    <subfield code="e">T1</subfield>
  </datafield>
  <datafield tag="592" ind1=" " ind2=" ">
    <subfield code="a">2.821</subfield>
    <subfield code="b">2024</subfield>
  </datafield>
  <datafield tag="593" ind1=" " ind2=" ">
    <subfield code="a">Artificial Intelligence</subfield>
    <subfield code="c">2024</subfield>
    <subfield code="d">Q1</subfield>
  </datafield>
  <datafield tag="593" ind1=" " ind2=" ">
    <subfield code="a">Control and Optimization</subfield>
    <subfield code="c">2024</subfield>
    <subfield code="d">Q1</subfield>
  </datafield>
  <datafield tag="593" ind1=" " ind2=" ">
    <subfield code="a">Automotive Engineering</subfield>
    <subfield code="c">2024</subfield>
    <subfield code="d">Q1</subfield>
  </datafield>
  <datafield tag="655" ind1=" " ind2="4">
    <subfield code="a">info:eu-repo/semantics/article</subfield>
    <subfield code="v">info:eu-repo/semantics/acceptedVersion</subfield>
  </datafield>
  <datafield tag="700" ind1=" " ind2=" ">
    <subfield code="a">Yu, Hongshan</subfield>
  </datafield>
  <datafield tag="700" ind1=" " ind2=" ">
    <subfield code="a">Cheng, Panfei</subfield>
  </datafield>
  <datafield tag="700" ind1=" " ind2=" ">
    <subfield code="a">Sun, Wei</subfield>
  </datafield>
  <datafield tag="700" ind1=" " ind2=" ">
    <subfield code="a">Civera, Javier</subfield>
    <subfield code="u">Universidad de Zaragoza</subfield>
    <subfield code="0">(orcid)0000-0003-1368-1151</subfield>
  </datafield>
  <datafield tag="700" ind1=" " ind2=" ">
    <subfield code="a">Chen, Xieyuanli</subfield>
  </datafield>
  <datafield tag="710" ind1="2" ind2=" ">
    <subfield code="1">5007</subfield>
    <subfield code="2">520</subfield>
    <subfield code="a">Universidad de Zaragoza</subfield>
    <subfield code="b">Dpto. Informát.Ingenie.Sistms.</subfield>
    <subfield code="c">Área Ingen.Sistemas y Automát.</subfield>
  </datafield>
  <datafield tag="773" ind1=" " ind2=" ">
    <subfield code="g">10, 2 (2025), 808 - 818</subfield>
    <subfield code="p">IEEE trans. intell. veh.</subfield>
    <subfield code="t">IEEE transactions on intelligent vehicles</subfield>
    <subfield code="x">2379-8858</subfield>
  </datafield>
  <datafield tag="856" ind1="4" ind2=" ">
    <subfield code="s">14016338</subfield>
    <subfield code="u">http://zaguan.unizar.es/record/144803/files/texto_completo.pdf</subfield>
    <subfield code="y">Postprint</subfield>
  </datafield>
  <datafield tag="856" ind1="4" ind2=" ">
    <subfield code="s">3661713</subfield>
    <subfield code="u">http://zaguan.unizar.es/record/144803/files/texto_completo.jpg?subformat=icon</subfield>
    <subfield code="x">icon</subfield>
    <subfield code="y">Postprint</subfield>
  </datafield>
  <datafield tag="909" ind1="C" ind2="O">
    <subfield code="o">oai:zaguan.unizar.es:144803</subfield>
    <subfield code="p">articulos</subfield>
    <subfield code="p">driver</subfield>
  </datafield>
  <datafield tag="951" ind1=" " ind2=" ">
    <subfield code="a">2025-11-13-15:00:48</subfield>
  </datafield>
  <datafield tag="980" ind1=" " ind2=" ">
    <subfield code="a">ARTICLE</subfield>
  </datafield>
</record>
</collection>