We propose 360° Volumetric Portrait (3VP) Avatar, a novel method for reconstructing 360° photo-realistic portrait avatars of human subjects solely based on monocular video inputs. State-of-the-art monocular avatar reconstruction methods rely on stable facial performance capturing. However, the common usage of 3D Morphable Model (3DMM)-based facial tracking has its limits; side-views can hardly be captured and it fails, especially, for back-views, as required inputs like facial landmarks or human parsing masks are missing. This results in incomplete avatar reconstructions that only cover the frontal hemisphere. In contrast to this, we propose a template-based tracking of the torso, head and facial expressions which allows us to cover the appearance of a human subject from all sides. Thus, given a sequence of a subject that is rotating in front of a single camera, we train a neural volumetric representation based on neural radiance fields. A key challenge to construct this representation is the modeling of appearance changes, especially, in the mouth region (i.e., lips and teeth). We, therefore, propose a deformation-field-based blend basis which allows us to interpolate between different appearance states. We evaluate our approach on captured real-world data and compare against state-of-the-art monocular reconstruction methods. In contrast to those, our method is the first monocular technique that reconstructs an entire 360° avatar.
@article{nehvi2023360deg,
title={360{\deg} Volumetric Portrait Avatar},
author={Jalees Nehvi and Berna Kabadayi and Julien Valentin and Justus Thies},
year={2023},
eprint={2312.05311},
archivePrefix={arXiv},
primaryClass={cs.CV}
}
@inproceedings{10.1007/978-3-031-85187-2_1,
author = {Nehvi, Jalees and Kabadayi, Berna and Valentin, Julien and Thies, Justus},
title = { Volumetric Portrait Avatar},
year = {2025},
isbn = {978-3-031-85186-5},
publisher = {Springer-Verlag},
address = {Berlin, Heidelberg},
url = {https://doi.org/10.1007/978-3-031-85187-2_1},
doi = {10.1007/978-3-031-85187-2_1},
abstract = {We propose Volumetric Portrait (3VP) Avatar, a novel method for reconstructing photo-realistic portrait avatars of human subjects solely based on monocular video inputs. State-of-the-art monocular avatar reconstruction methods rely on stable facial performance capturing. However, the common usage of 3D Morphable Model (3DMM)-based facial tracking has its limits; side-views can hardly be captured and it fails, especially, for back-views, as required inputs like facial landmarks or human parsing masks are missing. This results in incomplete avatar reconstructions that only cover the frontal hemisphere. In contrast to this, we propose a template-based tracking of the torso, head and facial expressions which allows us to cover the appearance of a human subject from all sides. Thus, given a sequence of a subject that is rotating in front of a single camera, we train a neural volumetric representation based on neural radiance fields. A key challenge to construct this representation is the modeling of appearance changes, especially, in the mouth region (i.e., lips and teeth). We, therefore, propose a deformation-field-based blend basis which allows us to interpolate between different appearance states. We evaluate our approach on captured real-world data and compare against state-of-the-art monocular reconstruction methods. In contrast to those, our method is the first monocular technique that reconstructs an entire avatar. Project page: .},
booktitle = {Pattern Recognition: 46th DAGM German Conference, DAGM GCPR 2024, Munich, Germany, September 10–13, 2024, Proceedings, Part II},
pages = {3–19},
numpages = {17},
keywords = {Volumetric Portrait Avatars, 360-degree, Monocular},
location = {Munich, Germany}
}