<?xml version="1.0" encoding="UTF-8"?>
<collection xmlns="http://www.loc.gov/MARC21/slim">
<record>
  <controlfield tag="001">168363</controlfield>
  <controlfield tag="005">20260204153543.0</controlfield>
  <datafield tag="024" ind1="7" ind2=" ">
    <subfield code="2">doi</subfield>
    <subfield code="a">10.1007/s00371-025-04340-7</subfield>
  </datafield>
  <datafield tag="024" ind1="8" ind2=" ">
    <subfield code="2">sideral</subfield>
    <subfield code="a">147867</subfield>
  </datafield>
  <datafield tag="037" ind1=" " ind2=" ">
    <subfield code="a">ART-2026-147867</subfield>
  </datafield>
  <datafield tag="041" ind1=" " ind2=" ">
    <subfield code="a">eng</subfield>
  </datafield>
  <datafield tag="100" ind1=" " ind2=" ">
    <subfield code="a">Luesia-Lahoz, Pablo</subfield>
    <subfield code="u">Universidad de Zaragoza</subfield>
    <subfield code="0">(orcid)0000-0002-5778-1819</subfield>
  </datafield>
  <datafield tag="245" ind1=" " ind2=" ">
    <subfield code="a">Stereo non-line-of-sight imaging</subfield>
  </datafield>
  <datafield tag="260" ind1=" " ind2=" ">
    <subfield code="c">2026</subfield>
  </datafield>
  <datafield tag="520" ind1="3" ind2=" ">
    <subfield code="a">Transient non-line-of-sight imaging techniques reconstruct hidden scenes by analyzing the time of flight of light scattered off a visible secondary surface, or relay wall. Despite many promising approaches, all face the inherent problem of the missing cone, which restricts surface visibility based on their position and orientations relative to the relay wall. Drawing inspiration from stereo technologies from computer vision, we devise a setup consisting of two distinct relay walls. We leverage phasor fields that computationally model both relay walls as generalized virtual camera apertures. This approach allows us to combine the contributions from each relay wall, including the signal obtained by illuminating one wall and capturing the other, information that would be lost otherwise. Our results demonstrate that our proposal diminishes the effect of the missing cone by making the problem better posed. Additionally, by analyzing the visibility conditions of the missing cone, we extract orientation cues from each relay wall contribution. We use this information to enhance visualizations.</subfield>
  </datafield>
  <datafield tag="506" ind1="0" ind2=" ">
    <subfield code="a">Access copy available to the general public</subfield>
    <subfield code="f">Unrestricted</subfield>
  </datafield>
  <datafield tag="536" ind1=" " ind2=" ">
    <subfield code="9">info:eu-repo/grantAgreement/EC/HORIZON EUROPE/101070310/EU/Physical Cognition for Intelligent Control and Safe Human-Robot Interaction/Sestosenso</subfield>
    <subfield code="9">info:eu-repo/grantAgreement/ES/MICIU/PID2019-105004GB-I00</subfield>
  </datafield>
  <datafield tag="540" ind1=" " ind2=" ">
    <subfield code="9">info:eu-repo/semantics/openAccess</subfield>
    <subfield code="a">by-nc-nd</subfield>
    <subfield code="u">https://creativecommons.org/licenses/by-nc-nd/4.0/deed.es</subfield>
  </datafield>
  <datafield tag="655" ind1=" " ind2="4">
    <subfield code="a">info:eu-repo/semantics/article</subfield>
    <subfield code="v">info:eu-repo/semantics/publishedVersion</subfield>
  </datafield>
  <datafield tag="700" ind1=" " ind2=" ">
    <subfield code="a">Cartiel, Sergio</subfield>
    <subfield code="u">Universidad de Zaragoza</subfield>
  </datafield>
  <datafield tag="700" ind1=" " ind2=" ">
    <subfield code="a">Muñoz, Adolfo</subfield>
    <subfield code="u">Universidad de Zaragoza</subfield>
    <subfield code="0">(orcid)0000-0002-8160-7159</subfield>
  </datafield>
  <datafield tag="710" ind1="2" ind2=" ">
    <subfield code="1">5007</subfield>
    <subfield code="2">570</subfield>
    <subfield code="a">Universidad de Zaragoza</subfield>
    <subfield code="b">Dpto. Informát.Ingenie.Sistms.</subfield>
    <subfield code="c">Área Lenguajes y Sistemas Inf.</subfield>
  </datafield>
  <datafield tag="773" ind1=" " ind2=" ">
    <subfield code="g">42, 148 (2026), [12 pp.]</subfield>
    <subfield code="p">Vis. comput.</subfield>
    <subfield code="t">VISUAL COMPUTER</subfield>
    <subfield code="x">0178-2789</subfield>
  </datafield>
  <datafield tag="856" ind1="4" ind2=" ">
    <subfield code="s">3172165</subfield>
    <subfield code="u">http://zaguan.unizar.es/record/168363/files/texto_completo.pdf</subfield>
    <subfield code="y">Versión publicada</subfield>
  </datafield>
  <datafield tag="856" ind1="4" ind2=" ">
    <subfield code="s">2341252</subfield>
    <subfield code="u">http://zaguan.unizar.es/record/168363/files/texto_completo.jpg?subformat=icon</subfield>
    <subfield code="x">icon</subfield>
    <subfield code="y">Versión publicada</subfield>
  </datafield>
  <datafield tag="909" ind1="C" ind2="O">
    <subfield code="o">oai:zaguan.unizar.es:168363</subfield>
    <subfield code="p">articulos</subfield>
    <subfield code="p">driver</subfield>
  </datafield>
  <datafield tag="951" ind1=" " ind2=" ">
    <subfield code="a">2026-02-04-13:14:33</subfield>
  </datafield>
  <datafield tag="980" ind1=" " ind2=" ">
    <subfield code="a">ARTICLE</subfield>
  </datafield>
</record>
</collection>