Skip to content

Commit

Permalink
update news
Browse files Browse the repository at this point in the history
  • Loading branch information
ZuoJiaxing committed Feb 6, 2024
1 parent ad362bc commit d18fe06
Show file tree
Hide file tree
Showing 9 changed files with 51 additions and 9 deletions.
14 changes: 7 additions & 7 deletions _bibliography/papers.bib
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
---
@article{zuo2023ral,
title={<a name="lnk_lang2023ral"> Coco-LIC: Continuous-Time Tightly-Coupled LiDAR-Inertial-Camera Odometry using Non-Uniform B-spline </a>},
author={ Lang, Xiaolei and Chen, Chao and Tang, Kai and Ma, Yukai and Lv, Jiajun and Liu, Yong and <em><b>Zuo<sup>#</sup></b></em>, <em><b>Xingxing</b></em>},
author={ Lang, Xiaolei and Chen, Chao and Tang, Kai and Ma, Yukai and Lv, Jiajun and Liu<sup>#</sup>, Yong and <em><b>Zuo<sup>#</sup></b></em>, <em><b>Xingxing</b></em>},
journal={ IEEE Robotics and Automation Letters},
year={2023},
publisher={IEEE},
Expand All @@ -15,7 +15,7 @@ @article{zuo2023ral

@InProceedings{xin2023ismar,
title={<a name="lnk_xin2023ismar">SimpleMapping: Real-Time Visual-Inertial Dense Mapping with Deep Multi-View Stereo</a>},
author={Xin*, Yingye and <em><b>Zuo*<sup>#</sup></b></em>, <em><b>Xingxing</b></em>, and Lu, Dongyue and Leutenegger, Stefan},
author={Xin*, Yingye and <em><b>Zuo*<sup>#</sup></b></em>, <em><b>Xingxing</b></em> and Lu, Dongyue and Leutenegger, Stefan},
Booktitle = {IEEE International Symposium on Mixed and Augmented Reality (ISMAR)},
year={2023},
abstract={We present a real-time visual-inertial dense mapping method capable of performing incremental 3D mesh reconstruction with high quality using only sequential monocular images and inertial measurement unit (IMU) readings. 6-DoF camera poses are estimated by a robust feature-based visual-inertial odometry (VIO), which also generates noisy sparse 3D map points as a by-product. We propose a sparse point aided multi-view stereo neural network (SPA-MVSNet) that can effectively leverage the informative but noisy sparse points from the VIO system. The sparse depth from VIO is firstly completed by a single-view depth completion network. This dense depth map, although naturally limited in accuracy, is then used as a prior to guide our MVS network in the cost volume generation and regularization for accurate dense depth prediction. Predicted depth maps of keyframe images by the MVS network are incrementally fused into a global map using TSDF-Fusion. We extensively evaluate both the proposed SPA-MVSNet and the entire visual-inertial dense mapping system on several public datasets as well as our own dataset, demonstrating the system's impressive generalization capabilities and its ability to deliver high-quality 3D mesh reconstruction online. Our proposed dense mapping system achieves a 39.7% improvement in F-score over existing systems when evaluated on the challenging scenarios of the EuRoC dataset.},
Expand All @@ -39,7 +39,7 @@ @article{zuo2023ral

@article{lv2023tmech,
title={<a name="lnk_lv2023tmech">Continuous-Time Fixed-Lag Smoothing for LiDAR-Inertial-Camera SLAM </a>},
author={Lv, Jiajun and Lang, Xiaolei and Xu, Jinhong and Wang, Mengmeng and Liu, Yong and <em><b>Zuo<sup>#</sup></b></em>, <em><b>Xingxing</b></em>},
author={Lv, Jiajun and Lang, Xiaolei and Xu, Jinhong and Wang, Mengmeng and Liu<sup>#</sup>, Yong and <em><b>Zuo<sup>#</sup></b></em>, <em><b>Xingxing</b></em>},
journal={IEEE/ASME Transactions on Mechatronics},
year={2023},
publisher={IEEE},
Expand All @@ -51,7 +51,7 @@ @article{lv2023tmech

@article{lang2022ral,
title={<a name="lnk_lang2022ral">Ctrl-VIO: Continuous-Time Visual-Inertial Odometry for Rolling Shutter Cameras </a>},
author={Lang, Xiaolei and Lv, Jiajun and Huang, Jianxin and Ma, Yukai and Liu, Yong and <em><b>Zuo<sup>#</sup></b></em>, <em><b>Xingxing</b></em>},
author={Lang, Xiaolei and Lv, Jiajun and Huang, Jianxin and Ma, Yukai and Liu<sup>#</sup>, Yong and <em><b>Zuo<sup>#</sup></b></em>, <em><b>Xingxing</b></em>},
journal={ IEEE Robotics and Automation Letters},
year={2022},
publisher={IEEE},
Expand Down Expand Up @@ -86,7 +86,7 @@ @InProceedings{nate2022cvpr

@inproceedings{zuo2021codevio,
title={<a name="lnk_codevio"> CodeVIO: Visual-inertial odometry with learned optimizable dense depth </a>},
author={Zuo, Xingxing and Merrill, Nathaniel and Li, Wei and Liu, Yong and Pollefeys, Marc and Huang, Guoquan},
author={<em><b>Zuo*</b></em>, <em><b>Xingxing</b></em>, and Merrill*, Nathaniel and Li, Wei and Liu, Yong and Pollefeys, Marc and Huang, Guoquan},
booktitle={IEEE International Conference on Robotics and Automation (ICRA)},
comments={<b style="color:red;">Nominated for the "Best Paper Award in Robot Vision (Finalist) of ICRA 2021"</b>},
pages={14382--14388},
Expand Down Expand Up @@ -196,7 +196,7 @@ @InProceedings{Zuo2019IROS

@InProceedings{Zuo2019ISRR,
Title = {Visual-Inertial Localization for Skid-Steering Robots with Kinematic Constraints},
Author = {Zuo, Xingxing and Zhang, Mingming and Chen, Yiming and Liu, Yong and Huang, Guoquan and Li, Mingyang},
Author = {<em><b>Zuo*</b></em>, <em><b>Xingxing</b></em> and Zhang*, Mingming and Chen, Yiming and Liu, Yong and Huang, Guoquan and Li, Mingyang},
Booktitle = {International Symposium on Robotics Research (ISRR)},
Year = {2019},
Address = {Hanoi, Vietnam},
Expand All @@ -208,7 +208,7 @@ @InProceedings{Zuo2019ISRR

@article{zuo2019visual,
title={Visual-Inertial Localization With Prior LiDAR Map Constraints},
author={Zuo, Xingxing and Geneva, Patrick and Yang, Yulin and Ye, Wenlong and Liu, Yong and Huang, Guoquan},
author={<em><b>Zuo*</b></em>, <em><b>Xingxing</b></em> and Geneva*, Patrick and Yang, Yulin and Ye, Wenlong and Liu, Yong and Huang, Guoquan},
journal={IEEE Robotics and Automation Letters},
volume={4},
number={4},
Expand Down
2 changes: 1 addition & 1 deletion _config.yml
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,7 @@ collections:
# output: true
# permalink: /projects/:path/

news_limit: 10
news_limit: 15

# -----------------------------------------------------------------------------
# Jekyll settings
Expand Down
7 changes: 7 additions & 0 deletions _news/20230910_Google.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
---
layout: post
date: 2023-09-10 10:00:00-0400
inline: true
---

I join [Google](https://arvr.google.com/) as a full-time Visiting Faculty Researcher (Scientist) in Mountain View, USA!
7 changes: 7 additions & 0 deletions _news/20231220_NerfVO.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
---
layout: post
date: 2023-12-20 10:00:00-0400
inline: true
---

Check out our research work "[NeRF-VO: Real-Time Sparse Visual Odometry with Neural Radiance Fields](https://arxiv.org/abs/2312.13471)"!
7 changes: 7 additions & 0 deletions _news/20240103_FMGS.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
---
layout: post
date: 2024-01-03 10:00:00-0400
inline: true
---

Check out our research work "[FMGS: Foundation Model Embedded 3D Gaussian Splatting for Holistic 3D Scene Understanding](https://arxiv.org/abs/2401.01970)" finished at Google!
7 changes: 7 additions & 0 deletions _news/20240116_AAAI.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
---
layout: post
date: 2024-01-16 10:00:00-0400
inline: true
---

A paper about "[M2-CLIP: A Multimodal, Multi-task Adapting Framework for Video Action Recognition](https://arxiv.org/pdf/2401.11649.pdf)" is accepted by [AAAI](https://aaai.org/aaai-conference/) and featured as Oral!
7 changes: 7 additions & 0 deletions _news/20240120_Caltech.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
---
layout: post
date: 2024-01-20 10:00:00-0400
inline: true
---

I join [Caltech](https://www.cms.caltech.edu/) as a Postdoc Research Associate in CMS department at Caltech, USA!
7 changes: 7 additions & 0 deletions _news/20240129_ICRA.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
---
layout: post
date: 2024-01-29 10:00:00-0400
inline: true
---

A paper about "[RadarCam-Depth: Radar-Camera Fusion for Depth Estimation with Learned Metric Scale](https://arxiv.org/abs/2401.04325)" is accepted by ICRA!
2 changes: 1 addition & 1 deletion _pages/about.md
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ years: [2023, 2022, 2021, 2020, 2019, 2018, 2017]

I'm a Postdoctoral Research Associate at the Department of Computing + Mathematical Sciences, <a href="https://www.cms.caltech.edu/"> Caltech</a> since January, 2024. Prior to this, I worked at Google in USA as a full-time research faculty researcher (Scientist) in 2023. I was an Postdoc Researcher (2021 - 2023) at the School of Computation, Information and Technology, <a href="https://www.tum.de/en">Technical University of Munich</a>, Germany, working with [Prof. Stefan Leutenegger](https://www.professoren.tum.de/en/leutenegger-stefan). I was an academic guest (2019 - 2021) in the [Computer Vision and Geometry Group](https://cvg.ethz.ch/) (CVG), at ETH Zurich, Switzerland, working with [Prof. Marc Pollefeys](http://people.inf.ethz.ch/pomarc/). I was also a visiting scholar (2018) in the [Robot Perception and Navigation Group](http://sites.udel.edu/robot) (RPNG) of [Prof. Guoquan (Paul) Huang](https://udel.edu/~ghuang/) at University of Delaware, USA, and a visiting scholar (2017) in the [UTS Robotics Institute](https://www.uts.edu.au/research/robotics-institute), University of Technology Sydney, Australia, working with [Prof. Teresa Vidal Calleja](https://profiles.uts.edu.au/Teresa.VidalCalleja). I received my doctoral degree in 2021, from the Department of Control Science and Engineering at [Zhejiang University](https://www.zju.edu.cn/english/), China, under the co-supervision of [Prof. Guoquan (Paul) Huang](https://udel.edu/~ghuang/) and [Prof. Yong liu](https://april.zju.edu.cn/team/dr-yong-liu/), and graduated with honors. Previously, I graduated with honors and obtained my Bachelor's degree in Mechanical Engineering at [University of Electronic Science and Technology of China](https://en.uestc.edu.cn/) (UESTC), Chengdu, China, in 2016.

I'm open to collaborations on cutting-edge research about robotic perception and intelligence, and co-supervise highly-qualified students. Feel free to reach out through [email](mailto:[email protected])!
** I'm open to collaborations on cutting-edge research about robotic perception and intelligence, and co-supervise highly-qualified students. Feel free to reach out through [email](mailto:[email protected])! **

<div class="row align-items-center">
<div class="col-sm mt-3 mt-md-0">
Expand Down

0 comments on commit d18fe06

Please sign in to comment.