From d18fe06b7de0cf33b1f25d721342f5ba1c153ca7 Mon Sep 17 00:00:00 2001 From: RockyZuo <1240582726@qq.com> Date: Tue, 6 Feb 2024 13:02:32 -0800 Subject: [PATCH] update news --- _bibliography/papers.bib | 14 +++++++------- _config.yml | 2 +- _news/20230910_Google.md | 7 +++++++ _news/20231220_NerfVO.md | 7 +++++++ _news/20240103_FMGS.md | 7 +++++++ _news/20240116_AAAI.md | 7 +++++++ _news/20240120_Caltech.md | 7 +++++++ _news/20240129_ICRA.md | 7 +++++++ _pages/about.md | 2 +- 9 files changed, 51 insertions(+), 9 deletions(-) create mode 100644 _news/20230910_Google.md create mode 100644 _news/20231220_NerfVO.md create mode 100644 _news/20240103_FMGS.md create mode 100644 _news/20240116_AAAI.md create mode 100644 _news/20240120_Caltech.md create mode 100644 _news/20240129_ICRA.md diff --git a/_bibliography/papers.bib b/_bibliography/papers.bib index 7e60e4d..699870e 100644 --- a/_bibliography/papers.bib +++ b/_bibliography/papers.bib @@ -2,7 +2,7 @@ --- @article{zuo2023ral, title={ Coco-LIC: Continuous-Time Tightly-Coupled LiDAR-Inertial-Camera Odometry using Non-Uniform B-spline }, - author={ Lang, Xiaolei and Chen, Chao and Tang, Kai and Ma, Yukai and Lv, Jiajun and Liu, Yong and Zuo#, Xingxing}, + author={ Lang, Xiaolei and Chen, Chao and Tang, Kai and Ma, Yukai and Lv, Jiajun and Liu#, Yong and Zuo#, Xingxing}, journal={ IEEE Robotics and Automation Letters}, year={2023}, publisher={IEEE}, @@ -15,7 +15,7 @@ @article{zuo2023ral @InProceedings{xin2023ismar, title={SimpleMapping: Real-Time Visual-Inertial Dense Mapping with Deep Multi-View Stereo}, - author={Xin*, Yingye and Zuo*#, Xingxing, and Lu, Dongyue and Leutenegger, Stefan}, + author={Xin*, Yingye and Zuo*#, Xingxing and Lu, Dongyue and Leutenegger, Stefan}, Booktitle = {IEEE International Symposium on Mixed and Augmented Reality (ISMAR)}, year={2023}, abstract={We present a real-time visual-inertial dense mapping method capable of performing incremental 3D mesh reconstruction with high quality using only sequential monocular images and inertial measurement unit (IMU) readings. 6-DoF camera poses are estimated by a robust feature-based visual-inertial odometry (VIO), which also generates noisy sparse 3D map points as a by-product. We propose a sparse point aided multi-view stereo neural network (SPA-MVSNet) that can effectively leverage the informative but noisy sparse points from the VIO system. The sparse depth from VIO is firstly completed by a single-view depth completion network. This dense depth map, although naturally limited in accuracy, is then used as a prior to guide our MVS network in the cost volume generation and regularization for accurate dense depth prediction. Predicted depth maps of keyframe images by the MVS network are incrementally fused into a global map using TSDF-Fusion. We extensively evaluate both the proposed SPA-MVSNet and the entire visual-inertial dense mapping system on several public datasets as well as our own dataset, demonstrating the system's impressive generalization capabilities and its ability to deliver high-quality 3D mesh reconstruction online. Our proposed dense mapping system achieves a 39.7% improvement in F-score over existing systems when evaluated on the challenging scenarios of the EuRoC dataset.}, @@ -39,7 +39,7 @@ @article{zuo2023ral @article{lv2023tmech, title={Continuous-Time Fixed-Lag Smoothing for LiDAR-Inertial-Camera SLAM }, - author={Lv, Jiajun and Lang, Xiaolei and Xu, Jinhong and Wang, Mengmeng and Liu, Yong and Zuo#, Xingxing}, + author={Lv, Jiajun and Lang, Xiaolei and Xu, Jinhong and Wang, Mengmeng and Liu#, Yong and Zuo#, Xingxing}, journal={IEEE/ASME Transactions on Mechatronics}, year={2023}, publisher={IEEE}, @@ -51,7 +51,7 @@ @article{lv2023tmech @article{lang2022ral, title={Ctrl-VIO: Continuous-Time Visual-Inertial Odometry for Rolling Shutter Cameras }, - author={Lang, Xiaolei and Lv, Jiajun and Huang, Jianxin and Ma, Yukai and Liu, Yong and Zuo#, Xingxing}, + author={Lang, Xiaolei and Lv, Jiajun and Huang, Jianxin and Ma, Yukai and Liu#, Yong and Zuo#, Xingxing}, journal={ IEEE Robotics and Automation Letters}, year={2022}, publisher={IEEE}, @@ -86,7 +86,7 @@ @InProceedings{nate2022cvpr @inproceedings{zuo2021codevio, title={ CodeVIO: Visual-inertial odometry with learned optimizable dense depth }, - author={Zuo, Xingxing and Merrill, Nathaniel and Li, Wei and Liu, Yong and Pollefeys, Marc and Huang, Guoquan}, + author={Zuo*, Xingxing, and Merrill*, Nathaniel and Li, Wei and Liu, Yong and Pollefeys, Marc and Huang, Guoquan}, booktitle={IEEE International Conference on Robotics and Automation (ICRA)}, comments={Nominated for the "Best Paper Award in Robot Vision (Finalist) of ICRA 2021"}, pages={14382--14388}, @@ -196,7 +196,7 @@ @InProceedings{Zuo2019IROS @InProceedings{Zuo2019ISRR, Title = {Visual-Inertial Localization for Skid-Steering Robots with Kinematic Constraints}, - Author = {Zuo, Xingxing and Zhang, Mingming and Chen, Yiming and Liu, Yong and Huang, Guoquan and Li, Mingyang}, + Author = {Zuo*, Xingxing and Zhang*, Mingming and Chen, Yiming and Liu, Yong and Huang, Guoquan and Li, Mingyang}, Booktitle = {International Symposium on Robotics Research (ISRR)}, Year = {2019}, Address = {Hanoi, Vietnam}, @@ -208,7 +208,7 @@ @InProceedings{Zuo2019ISRR @article{zuo2019visual, title={Visual-Inertial Localization With Prior LiDAR Map Constraints}, - author={Zuo, Xingxing and Geneva, Patrick and Yang, Yulin and Ye, Wenlong and Liu, Yong and Huang, Guoquan}, + author={Zuo*, Xingxing and Geneva*, Patrick and Yang, Yulin and Ye, Wenlong and Liu, Yong and Huang, Guoquan}, journal={IEEE Robotics and Automation Letters}, volume={4}, number={4}, diff --git a/_config.yml b/_config.yml index 9f8c6f0..caa85a0 100644 --- a/_config.yml +++ b/_config.yml @@ -104,7 +104,7 @@ collections: # output: true # permalink: /projects/:path/ -news_limit: 10 +news_limit: 15 # ----------------------------------------------------------------------------- # Jekyll settings diff --git a/_news/20230910_Google.md b/_news/20230910_Google.md new file mode 100644 index 0000000..e70a5bf --- /dev/null +++ b/_news/20230910_Google.md @@ -0,0 +1,7 @@ +--- +layout: post +date: 2023-09-10 10:00:00-0400 +inline: true +--- + + I join [Google](https://arvr.google.com/) as a full-time Visiting Faculty Researcher (Scientist) in Mountain View, USA! diff --git a/_news/20231220_NerfVO.md b/_news/20231220_NerfVO.md new file mode 100644 index 0000000..322b768 --- /dev/null +++ b/_news/20231220_NerfVO.md @@ -0,0 +1,7 @@ +--- +layout: post +date: 2023-12-20 10:00:00-0400 +inline: true +--- + +Check out our research work "[NeRF-VO: Real-Time Sparse Visual Odometry with Neural Radiance Fields](https://arxiv.org/abs/2312.13471)"! diff --git a/_news/20240103_FMGS.md b/_news/20240103_FMGS.md new file mode 100644 index 0000000..845671d --- /dev/null +++ b/_news/20240103_FMGS.md @@ -0,0 +1,7 @@ +--- +layout: post +date: 2024-01-03 10:00:00-0400 +inline: true +--- + +Check out our research work "[FMGS: Foundation Model Embedded 3D Gaussian Splatting for Holistic 3D Scene Understanding](https://arxiv.org/abs/2401.01970)" finished at Google! diff --git a/_news/20240116_AAAI.md b/_news/20240116_AAAI.md new file mode 100644 index 0000000..79d6625 --- /dev/null +++ b/_news/20240116_AAAI.md @@ -0,0 +1,7 @@ +--- +layout: post +date: 2024-01-16 10:00:00-0400 +inline: true +--- + +A paper about "[M2-CLIP: A Multimodal, Multi-task Adapting Framework for Video Action Recognition](https://arxiv.org/pdf/2401.11649.pdf)" is accepted by [AAAI](https://aaai.org/aaai-conference/) and featured as Oral! diff --git a/_news/20240120_Caltech.md b/_news/20240120_Caltech.md new file mode 100644 index 0000000..f4981cd --- /dev/null +++ b/_news/20240120_Caltech.md @@ -0,0 +1,7 @@ +--- +layout: post +date: 2024-01-20 10:00:00-0400 +inline: true +--- + + I join [Caltech](https://www.cms.caltech.edu/) as a Postdoc Research Associate in CMS department at Caltech, USA! diff --git a/_news/20240129_ICRA.md b/_news/20240129_ICRA.md new file mode 100644 index 0000000..76d7071 --- /dev/null +++ b/_news/20240129_ICRA.md @@ -0,0 +1,7 @@ +--- +layout: post +date: 2024-01-29 10:00:00-0400 +inline: true +--- + +A paper about "[RadarCam-Depth: Radar-Camera Fusion for Depth Estimation with Learned Metric Scale](https://arxiv.org/abs/2401.04325)" is accepted by ICRA! diff --git a/_pages/about.md b/_pages/about.md index 889ea5a..bed0331 100644 --- a/_pages/about.md +++ b/_pages/about.md @@ -17,7 +17,7 @@ years: [2023, 2022, 2021, 2020, 2019, 2018, 2017] I'm a Postdoctoral Research Associate at the Department of Computing + Mathematical Sciences, Caltech since January, 2024. Prior to this, I worked at Google in USA as a full-time research faculty researcher (Scientist) in 2023. I was an Postdoc Researcher (2021 - 2023) at the School of Computation, Information and Technology, Technical University of Munich, Germany, working with [Prof. Stefan Leutenegger](https://www.professoren.tum.de/en/leutenegger-stefan). I was an academic guest (2019 - 2021) in the [Computer Vision and Geometry Group](https://cvg.ethz.ch/) (CVG), at ETH Zurich, Switzerland, working with [Prof. Marc Pollefeys](http://people.inf.ethz.ch/pomarc/). I was also a visiting scholar (2018) in the [Robot Perception and Navigation Group](http://sites.udel.edu/robot) (RPNG) of [Prof. Guoquan (Paul) Huang](https://udel.edu/~ghuang/) at University of Delaware, USA, and a visiting scholar (2017) in the [UTS Robotics Institute](https://www.uts.edu.au/research/robotics-institute), University of Technology Sydney, Australia, working with [Prof. Teresa Vidal Calleja](https://profiles.uts.edu.au/Teresa.VidalCalleja). I received my doctoral degree in 2021, from the Department of Control Science and Engineering at [Zhejiang University](https://www.zju.edu.cn/english/), China, under the co-supervision of [Prof. Guoquan (Paul) Huang](https://udel.edu/~ghuang/) and [Prof. Yong liu](https://april.zju.edu.cn/team/dr-yong-liu/), and graduated with honors. Previously, I graduated with honors and obtained my Bachelor's degree in Mechanical Engineering at [University of Electronic Science and Technology of China](https://en.uestc.edu.cn/) (UESTC), Chengdu, China, in 2016. -I'm open to collaborations on cutting-edge research about robotic perception and intelligence, and co-supervise highly-qualified students. Feel free to reach out through [email](mailto:zuox@caltech.edu)! +** I'm open to collaborations on cutting-edge research about robotic perception and intelligence, and co-supervise highly-qualified students. Feel free to reach out through [email](mailto:zuox@caltech.edu)! **