<?xml version="1.0" encoding="UTF-8"?>
<collection xmlns="http://www.loc.gov/MARC21/slim">
<record>
  <controlfield tag="001">132130</controlfield>
  <controlfield tag="005">20260113073156.0</controlfield>
  <datafield tag="024" ind1="7" ind2=" ">
    <subfield code="2">doi</subfield>
    <subfield code="a">10.1109/ICCV51070.2023.00483</subfield>
  </datafield>
  <datafield tag="024" ind1="8" ind2=" ">
    <subfield code="2">sideral</subfield>
    <subfield code="a">137283</subfield>
  </datafield>
  <datafield tag="037" ind1=" " ind2=" ">
    <subfield code="a">ART-2024-137283</subfield>
  </datafield>
  <datafield tag="041" ind1=" " ind2=" ">
    <subfield code="a">eng</subfield>
  </datafield>
  <datafield tag="100" ind1=" " ind2=" ">
    <subfield code="a">Mur-Labadia, Lorenzo</subfield>
    <subfield code="u">Universidad de Zaragoza</subfield>
  </datafield>
  <datafield tag="245" ind1=" " ind2=" ">
    <subfield code="a">Multi-label affordance mapping from egocentric vision</subfield>
  </datafield>
  <datafield tag="260" ind1=" " ind2=" ">
    <subfield code="c">2024</subfield>
  </datafield>
  <datafield tag="506" ind1="0" ind2=" ">
    <subfield code="a">Access copy available to the general public</subfield>
    <subfield code="f">Unrestricted</subfield>
  </datafield>
  <datafield tag="520" ind1="3" ind2=" ">
    <subfield code="a">Accurate affordance detection and segmentation with pixel precision is an important piece in many complex systems based on interactions, such as robots and assitive devices. We present a new approach to affordance perception which enables accurate multi-label segmentation. Our approach can be used to automatically extract grounded affordances from first person videos of interactions using a 3D map of the environment providing pixel level precision for the affordance location. We use this method to build the largest and most complete dataset on affordances based on the EPIC-Kitchen dataset, EPIC-Aff, which provides interaction-grounded, multi-label, metric and spatial affordance annotations. Then, we propose a new approach to affordance segmentation based on multi-label detection which enables multiple affordances to co-exists in the same space, for example if they are associated with the same object. We present several strategies of multi-label detection using several segmentation architectures. The experimental results highlight the importance of the multi-label detection. Finally, we show how our metric representation can be exploited for build a map of interaction hotspots in spatial action-centric zones and use that representation to perform a task-oriented navigation.</subfield>
  </datafield>
  <datafield tag="536" ind1=" " ind2=" ">
    <subfield code="9">info:eu-repo/grantAgreement/ES/DGA/T45-23R</subfield>
    <subfield code="9">info:eu-repo/grantAgreement/ES/MICINN-AEI/PID2021-125209OB-I00</subfield>
    <subfield code="9">info:eu-repo/grantAgreement/EUR/MICINN/TED2021-129410B-I00</subfield>
    <subfield code="9">info:eu-repo/grantAgreement/EUR/MICINN/TED2021-131150B-I00</subfield>
  </datafield>
  <datafield tag="540" ind1=" " ind2=" ">
    <subfield code="9">info:eu-repo/semantics/openAccess</subfield>
    <subfield code="a">All rights reserved</subfield>
    <subfield code="u">http://www.europeana.eu/rights/rr-f/</subfield>
  </datafield>
  <datafield tag="592" ind1=" " ind2=" ">
    <subfield code="a">3.544</subfield>
    <subfield code="b">2024</subfield>
  </datafield>
  <datafield tag="593" ind1=" " ind2=" ">
    <subfield code="a">Software</subfield>
    <subfield code="c">2024</subfield>
  </datafield>
  <datafield tag="593" ind1=" " ind2=" ">
    <subfield code="a">Computer Vision and Pattern Recognition</subfield>
    <subfield code="c">2024</subfield>
  </datafield>
  <datafield tag="594" ind1=" " ind2=" ">
    <subfield code="a">38.1</subfield>
    <subfield code="b">2024</subfield>
  </datafield>
  <datafield tag="655" ind1=" " ind2="4">
    <subfield code="a">info:eu-repo/semantics/article</subfield>
    <subfield code="v">info:eu-repo/semantics/acceptedVersion</subfield>
  </datafield>
  <datafield tag="700" ind1=" " ind2=" ">
    <subfield code="0">(orcid)0000-0001-5209-2267</subfield>
    <subfield code="a">Guerrero, José J.</subfield>
    <subfield code="u">Universidad de Zaragoza</subfield>
  </datafield>
  <datafield tag="700" ind1=" " ind2=" ">
    <subfield code="0">(orcid)0000-0002-6741-844X</subfield>
    <subfield code="a">Martínez-Cantín, Rubén</subfield>
    <subfield code="u">Universidad de Zaragoza</subfield>
  </datafield>
  <datafield tag="710" ind1="2" ind2=" ">
    <subfield code="1">5007</subfield>
    <subfield code="2">520</subfield>
    <subfield code="a">Universidad de Zaragoza</subfield>
    <subfield code="b">Dpto. Informát.Ingenie.Sistms.</subfield>
    <subfield code="c">Área Ingen.Sistemas y Automát.</subfield>
  </datafield>
  <datafield tag="773" ind1=" " ind2=" ">
    <subfield code="g">2023 (2024), 5215-5226</subfield>
    <subfield code="p">Proceedings (IEEE Int. Conf. Comput. Vision)</subfield>
    <subfield code="t">Proceedings (IEEE International Conference on Computer Vision)</subfield>
    <subfield code="x">1550-5499</subfield>
  </datafield>
  <datafield tag="856" ind1="4" ind2=" ">
    <subfield code="s">7690358</subfield>
    <subfield code="u">http://zaguan.unizar.es/record/132130/files/texto_completo.pdf</subfield>
    <subfield code="y">Postprint</subfield>
    <subfield code="z">info:eu-repo/date/embargoEnd/2025-01-15</subfield>
  </datafield>
  <datafield tag="856" ind1="4" ind2=" ">
    <subfield code="s">2650642</subfield>
    <subfield code="u">http://zaguan.unizar.es/record/132130/files/texto_completo.jpg?subformat=icon</subfield>
    <subfield code="x">icon</subfield>
    <subfield code="y">Postprint</subfield>
    <subfield code="z">info:eu-repo/date/embargoEnd/2025-01-15</subfield>
  </datafield>
  <datafield tag="909" ind1="C" ind2="O">
    <subfield code="o">oai:zaguan.unizar.es:132130</subfield>
    <subfield code="p">articulos</subfield>
    <subfield code="p">driver</subfield>
  </datafield>
  <datafield tag="951" ind1=" " ind2=" ">
    <subfield code="a">2026-01-12-12:38:27</subfield>
  </datafield>
  <datafield tag="980" ind1=" " ind2=" ">
    <subfield code="a">ARTICLE</subfield>
  </datafield>
</record>
</collection>