@inproceedings{6c8a344f3c5e43198f1a9b95b841220e,
title = "InCLET: Large Language Model In-context Learning can Improve Embodied Instruction-following",
abstract = "Natural language-conditioned reinforcement learning (NLC-RL) empowers embodied agent to complete various tasks following human instruction. However, the unbounded natural language examples still introduce much complexity for the agent that solves concrete RL tasks, which can distract policy learning from completing the task. Consequently, extracting effective task representation from human instruction emerges as the critical component of NLC-RL. While previous methods have attempted to address this issue by learning task-related representation using large language models (LLMs), they highly rely on pre-collected task data and require extra training procedure. In this study, we uncover the inherent capability of LLMs to generate task representations and present a novel method, in-context learning embedding as task representation (InCLET). InCLET is grounded on a foundational finding that LLM in-context learning using trajectories can greatly help represent tasks. We thus firstly employ LLM to imagine task trajectories following the natural language instruction, then use in-context learning of LLM to generate task representations, and finally aggregate and project into a compact low-dimensional task representation. This representation is then used to train a human instruction-following agent. We conduct experiments on various embodied control environments and results show that InCLET creates effective task representations. Furthermore, this representation can significantly improve the RL training efficiency, compared to the baseline methods.",
keywords = "Embodiment Agent, In-context Learning, Reinforcement Learning",
author = "Wang, \{Peng Yuan\} and Pang, \{Jing Cheng\} and Wang, \{Chen Yang\} and Xuhui Liu and Liu, \{Tian Shuo\} and Yang, \{Si Hang\} and Hong Qian and Yang Yu",
note = "Publisher Copyright: {\textcopyright} 2025 International Foundation for Autonomous Agents and Multiagent Systems (www.ifaamas.org).; 24th International Conference on Autonomous Agents and Multiagent Systems, AAMAS 2025 ; Conference date: 19-05-2025 Through 23-05-2025",
year = "2025",
language = "英语",
series = "Proceedings of the International Joint Conference on Autonomous Agents and Multiagent Systems, AAMAS",
publisher = "International Foundation for Autonomous Agents and Multiagent Systems (IFAAMAS)",
pages = "2134--2142",
editor = "Yevgeniy Vorobeychik and Sanmay Das and Ann Nowe",
booktitle = "Proceedings of the 24th International Conference on Autonomous Agents and Multiagent Systems, AAMAS 2025",
address = "美国",
}