people:beetz
% Publications of Beetz
% Encoding: utf-8
@article{Beetz2015RoboSherlockSrpinger,
author="Beetz, Michael and B{\'a}lint-Bencz{\'e}di, Ferenc and Blodow, Nico and Kerl, Christian and M{\'a}rton, Zolt{\'a}n-Csaba and Nyga, Daniel and Seidel, Florian and Wiedemeyer, Thiemo and Worch, Jan-Hendrik",
editor="Busoniu, Lucian and Tam{\'a}s, Levente",
title="RoboSherlock: Unstructured Information Processing Framework for Robotic Perception",
journal="Handling Uncertainty and Networked Structure in Robot Control",
year="2015",
publisher="Springer International Publishing",
address="Cham",
pages="181--208",
isbn="978-3-319-26327-4",
doi="10.1007/978-3-319-26327-4_8",
url="http://dx.doi.org/10.1007/978-3-319-26327-4_8"
}
@inproceedings{beetz15robosherlock,
title = {{RoboSherlock: Unstructured Information Processing for Robot Perception}},
author = {Michael Beetz and Ferenc Balint-Benczedi and Nico Blodow and Daniel Nyga and Thiemo Wiedemeyer and Zoltan-Csaba Marton},
year = {2015},
booktitle = {IEEE International Conference on Robotics and Automation (ICRA)},
address = {Seattle, Washington, USA},
note = {Best Service Robotics Paper Award},
keywords = {openease_perception},
}
@INPROCEEDINGS{Bee92Dec,
AUTHOR = {M. Beetz and D. McDermott},
TITLE = {{Declarative Goals in Reactive Plans}},
BOOKTITLE = {First International Conference on AI Planning
Systems},
PAGES = {3-12},
YEAR = {1992},
EDITOR = {J. Hendler},
ADDRESS = {Morgan Kaufmann},
bib2html_pubtype = {Refereed Conference Paper},
bib2html_rescat = {Plan-based Robot Control}
}
@INPROCEEDINGS{Bee94Imp,
AUTHOR = {M. Beetz and D. McDermott},
TITLE = {Improving Robot Plans During Their Execution},
BOOKTITLE = {Second International Conference on AI Planning
Systems},
PAGES = {3-12},
YEAR = {1994},
EDITOR = {K. Hammond},
ADDRESS = {Morgan Kaufmann},
bib2html_pubtype = {Refereed Conference Paper},
bib2html_rescat = {Plan-based Robot Control}
}
@phdthesis{Bee96Ant,
AUTHOR = {M. Beetz},
TITLE = {Anticipating and Forestalling Execution Failures in
Structured Reactive Plans},
SCHOOL = {Yale University},
YEAR = {1996},
TYPE = {Technical Report, YALE/DCS/RR1097},
bib2html_pubtype = {ignore},
bib2html_rescat = {ignore}
}
@inproceedings{Bee96Exe,
AUTHOR = {M. Beetz and D. McDermott},
TITLE = {Executing Structured Reactive Plans},
BOOKTITLE = {AAAI Fall Symposium: Issues in Plan Execution},
YEAR = {1996},
EDITOR = {L. Pryor and S. Steel},
bib2html_pubtype = {ignore},
bib2html_rescat = {ignore}
}
@INPROCEEDINGS{Bee96RTP,
AUTHOR = {M. Beetz and D. McDermott},
TITLE = {Local Planning of Ongoing Activities},
BOOKTITLE = {Third International Conference on AI Planning
Systems},
PAGES = {19-26},
YEAR = {1996},
EDITOR = {Brian Drabble},
ADDRESS = {Morgan Kaufmann},
bib2html_pubtype = {Refereed Conference Paper},
bib2html_rescat = {Plan-based Robot Control}
}
@INPROCEEDINGS{Bee97Exp,
AUTHOR = {M. Beetz and D. McDermott},
TITLE = {Expressing Transformations of Structured Reactive
Plans},
BOOKTITLE = "Recent Advances in AI Planning. Proceedings of the
1997 European Conference on Planning",
PUBLISHER = {Springer Publishers},
YEAR = {1997},
PAGES = {64-76},
bib2html_pubtype = {Refereed Conference Paper},
bib2html_rescat = {Plan-based Robot Control}
}
@INPROCEEDINGS{Bee97FPPD,
AUTHOR = {M. Beetz and D. McDermott},
TITLE = {Fast Probabilistic Plan Debugging},
BOOKTITLE = "Recent Advances in AI Planning. Proceedings of the
1997 European Conference on Planning",
PUBLISHER = {Springer Publishers},
YEAR = {1997},
PAGES = {77-90},
bib2html_pubtype = {Refereed Conference Paper},
bib2html_rescat = {Plan-based Robot Control}
}
@INPROCEEDINGS{Bee98Cau,
AUTHOR = {M. Beetz and H. Grosskreutz},
TITLE = {Causal Models of Mobile Service Robot Behavior},
BOOKTITLE = {Fourth International Conference on AI Planning
Systems},
YEAR = {1998},
PAGES = {163-170},
EDITOR = {R. Simmons and M. Veloso and S. Smith},
ADDRESS = {Morgan Kaufmann},
bib2html_pubtype = {Refereed Conference Paper},
bib2html_rescat = {Plan-based Robot Control}
}
@InProceedings{Arb98Sys,
author = {Tom Arbuckle and Michael Beetz},
title = {{RECIPE} - A System for Building Extensible, Run-time Configurable, Image Processing Systems},
booktitle = {Proceedings of Computer Vision and Mobile Robotics (CVMR) Workshop},
pages = {91--98},
year = {1998},
bib2html_pubtype = {Refereed Conference Paper},
bib2html_rescat = {Vision},
bib2html_groups = {IAS},
bib2html_funding = {ignore},
bib2html_keywords = {Vision},
abstract = {This paper describes the design, and implementation of RECIPE, an extensible, run-time
configurable, image capture and processing system specifically designed for use with robotic
systems and currently under active development here at Bonn. Robotic systems, particularly
autonomous robotic systems, present both challenges and opportunities to the implementors of their
vision systems. On the one hand, robotic systems constrain the vision systems in terms of their
available resources and in the specific form of the hardware to be employed. On the other hand,
intelligent processes can employ sensory input to modify the image capture and image processing to
fit the current context of the robot. RECIPE meets these challenges while facilitating the modular
development of efficient image processing operations. Implementing all of its functionality
within a platform and compiler neutral framework as scriptable, active objects which are
dynamically loaded at run-time, RECIPE provides a common basis for the development of image
processing systems on robots. At the same time, it permits the image processing operations being
employed by the robot system to be monitored and adjusted according to all of the sensory
information available to the robot, encouraging the deployment of efficient, context specific,
algorithms. Finally, it has been designed to encourage robust, fault-tolerant approaches to the
action of image processing.}
}
@InProceedings{Bee98Pla,
author = {Michael Beetz and Maren Bennewitz},
title = {Planning, Scheduling, and Plan Execution for Autonomous Robot Office Couriers},
booktitle = {Proceedings of the workshop ``Integrating Planning, Scheduling and Execution in Dynamic and Uncertain Environments'' at the Fourth International Conference on AI in Planning Systems (AIPS)},
editor = {R. Bergmann and A. Kott},
volume = {Workshop Notes 98-02},
year = {1998},
publisher = {AAAI Press},
bib2html_pubtype = {Refereed Conference Paper},
bib2html_rescat = {Plan-based Robot Control},
bib2html_groups = {IAS},
bib2html_funding = {ignore},
bib2html_keywords = {Robot, Planning},
abstract = {Scheduling the tasks of an autonomous robot office courier and carrying out the scheduled tasks
reliably and efficiently pose challenging problems for autonomous robot control. The controller has
to accomplish longterm efficiency rather than optimize problem-solving episodes. It also has to
exploit opportunities and avoid problems flexibly because often the robot is forced to generate
schedules based on partial information. We propose to implement the controller for scheduled
activity by employing concurrent reactive plans that reschedule the course of action whenever
necessary and while performing their actions. The plans are represented modularly and transparently
to allow for easy transformation. Scheduling and schedule repair methods are implemented as plan
transformation rules.}
}
@InProceedings{Bee98Tra,
author = {Michael Beetz and Tom Arbuckle and Armin Cremers and Markus Mann},
title = {Transparent, Flexible, and Resource-adaptive Image Processing for Autonomous Service Robots},
booktitle = {Procs. of the 13th European Conference on Artificial Intelligence (ECAI-98)},
editor = {H. Prade},
year = {1998},
pages = {632--636},
bib2html_pubtype = {Refereed Conference Paper},
bib2html_rescat = {Plan-based Robot Control, Robot Vision},
bib2html_groups = {IAS},
bib2html_funding = {ignore},
bib2html_keywords = {Robot, Planning, Vision},
abstract = {We present the design of a programming system for IP routines which satisfies the requirements
above. Our solution consists of RECIPE, a dynamically loadable, modular architecture in a
distributed robot control system that provides the basic IP functionality and manages images
and other IP data structures. It provides a variety of standard IP routines such as edge detectors,
convolutions, noise reduction, segmentation, etc. RPLIP, an extension of the abstract machine
provided by the robot control/plan language RPL. RPLIP provides suitable abstractions for images,
regions of interest, etc, and supports a tight integration of the vision routines into the robot
control system. Image Processing Plans that provide various methods for combining IP methods into
IP pipelines. IP plans support the implementation of robust vision routines and the integration of
other sensors such as laser range finders and sonars for object recognition tasks and scene
analysis. Since vision routines are RPL programs, they can be constructed, revised, and reasoned
about while the robot control program is being executed.}
}
@InProceedings{Bee98Str,
author = "Michael Beetz and Hanno Peters",
title = "Structured Reactive Communication Plans --- Integrating Conversational Actions into High-level Robot Control Systems",
booktitle = "Proceedings of the 22nd German Conference on Artificial Intelligence (KI 98), Bremen, Germany",
year = "1998",
publisher = "Springer Verlag",
bib2html_pubtype = {Refereed Conference Paper},
bib2html_rescat = {Plan-based Robot Control},
bib2html_groups = {IAS},
bib2html_funding = {ignore},
bib2html_keywords = {Robot, Planning}
}
@Article{Bee98Int,
author = "Michael Beetz and Wolfram Burgard and Dieter Fox and Armin Cremers",
title = "Integrating Active Localization into High-level Control Systems",
journal = "Robotics and Autonomous Systems",
volume = {23},
pages = {205--220},
year = {1998},
bib2html_pubtype = {Journal},
bib2html_rescat = {Plan-based Robot Control, State Estimation},
bib2html_groups = {IAS},
bib2html_funding = {ignore},
bib2html_keywords = {Robot, Planning, State Estimation}
}
@inproceedings{Arb99Con,
author = {Tom Arbuckle and Michael Beetz},
title = {Controlling Image Processing: Providing Extensible, Run-time Configurable Functionality on Autonomous Robots},
booktitle = {Proceedings of the 1999 IEEE/RSJ International Conference on Intelligent Robots and Systems},
year = {1999},
volume = {2},
pages = {787--792},
bib2html_pubtype = {Refereed Conference Paper},
bib2html_rescat = {Vision},
bib2html_groups = {IAS},
bib2html_funding = {ignore},
bib2html_keywords = {Vision},
abstract = {The dynamic nature of autonomous robots' tasks requires that their image processing operations are
tightly coupled to those actions within their control systems which require the visual information.
While there are many image processing libraries that provide the raw image processing functionality
required for autonomous robot applications, these libraries do not provide the additional
functionality necessary for transparently binding image processing operations within a robot's
control system. In particular such libraries lack facilities for process scheduling, sequencing,
concurrent execution and resource management. The paper describes the design and implementation of
an enabling extensible system-RECIPE-for providing image processing functionality in a form that is
convenient for robot control together with concrete implementation examples}
}
@InProceedings{Bee99Exp,
author = {Michael Beetz and Thorsten Belker},
title = {Experience- and Model-based Transformational Learning of Symbolic Behavior Specifications},
booktitle = {Proceedings of the IJCAI Workshop on Robot Action Planning},
note = {IJCAI Workshop on Robot Action Planning},
year = {1999},
bib2html_pubtype = {Refereed Conference Paper},
bib2html_rescat = {Robot Learning},
bib2html_groups = {IAS},
bib2html_funding = {ignore},
bib2html_keywords = {Learning},
abstract = {The paper describes Xfrml, a system that learns symbolic behavior specifications to control and
improve the continuous sensor-driven navigation behavior of an autonomous mobile robot. The robot
is to navigate between a set of predefined locations in an office environment and employs a
navigation system consisting of a path planner and a reactive collision avoidance system. XfrmLearn
rationally reconstructs the continuous sensor-driven navigation behavior in terms of task
hierarchies by identifying significant structures and commonalities in behaviors. It also
constructs a statistical behavior model for typical navigation tasks. The behavior model together
with a model of how the collision avoidance module should "perceive" the environment is used to
detect behavior "flaws", diagnose them, and revise the plans to improve their performance. The
learning method is implemented on an autonomous mobile robot.}
}
@InProceedings{Arb99Ext,
author = {T. Arbuckle and M. Beetz},
title = {Extensible, Runtime-configurable Image Processing on
Robots --- the {RECIPE} system},
booktitle = "Proceedings of the 1999 IEEE/RSJ International
Conference on Intelligent Robots and Systems",
year = 1999,
bib2html_pubtype={Refereed Conference Paper},
bib2html_rescat={Robot Vision},
bib2html_groups={},
abstract = {}
}
@InProceedings{Bee99Pro,
author = {Michael Beetz and Maren Bennewitz and Henrik Grosskreutz},
title = {Probabilistic, Prediction-based Schedule Debugging for Autonomous Robot Office Couriers},
booktitle = "Proceedings of the 23rd German Conference on Artificial Intelligence (KI 99)",
address = {Bonn, Germany},
year = {1999},
publisher = "Springer Verlag",
bib2html_pubtype = {Refereed Conference Paper},
bib2html_rescat = {Plan-based Robot Control},
bib2html_groups = {IAS},
bib2html_funding = {ignore},
bib2html_keywords = {Robot, Planning},
abstract = {Acting efficiently and meeting deadlines requires autonomous robots to schedule their activities.
It also requires them to act flexibly: to exploit opportunities and avoid problems as they occur.
Scheduling activities to meet these requirements is an important research problem in its own right.
In addition, it provides us with a problem domain where modern symbolic AI planning techniques can
enable robots to exhibit better performance than they possibly could without planning. This paper
describes PPSD, a novel planning technique that enables autonomous robots to impose order
constraints on concurrent percept-driven plans to increase the plans' efficiency. The basic idea is
to generate a schedule under simplified conditions and then to iteratively detect, diagnose, and
eliminate behavior flaws caused by the schedule based on a small number of randomly sampled
symbolic execution scenarios. The paper discusses the integration of PPSD into the controller of
an autonomous robot office courier and gives an example of its use.}
}
@InProceedings{Bee99Str,
author = {Michael Beetz},
title = {Structured {R}eactive {C}ontrollers --- A computational Model of Everyday Activity},
booktitle = {Proceedings of the Third International Conference on Autonomous Agents},
editor = {O. Etzioni and J. M{\"u}ller and J. Bradshaw},
pages = {228--235},
year = {1999},
bib2html_pubtype = {Award Winner, Refereed Conference Paper},
bib2html_rescat = {Plan-based Robot Control},
bib2html_groups = {IAS},
bib2html_funding = {ignore},
bib2html_keywords = {Robot, Planning}
}
@InProceedings{Bee99Sem,
author = {Michael Beetz and Markus Giesenschlag and Roman Englert and Eberhard G{\"u}lch and Armin Cremers},
title = {Semi-automatic Acquisition of Symbolically-annotated 3D Models of Office Environments},
booktitle = {International Conference on Robotics and Automation (ICRA-99)},
year = {1999},
bib2html_pubtype = {Refereed Conference Paper},
bib2html_rescat = {Plan-based Robot Control, Robot Vision},
bib2html_groups = {IAS},
bib2html_funding = {ignore},
bib2html_keywords = {Robot, Planning, Vision}
}
@Article{Bee00Ena,
author = {Michael Beetz and Tom Arbuckle and Thorsten Belker and Maren Bennewitz and Armin Cremers and Dirk H{\"a}hnel and Dirk Schulz},
title = {Enabling Autonomous Robots to Perform Complex Tasks},
journal = {KI - K{\"u}nstliche Intelligenz; Special Issue on Autonomous Robots},
year = {2000},
bib2html_pubtype = {Journal},
bib2html_rescat = {Plan-based Robot Control},
bib2html_groups = {IAS},
bib2html_funding = {ignore},
bib2html_keywords = {Planning, Robot},
abstract = {Recent extensions of the RHINO control system, a system for controlling autonomous mobile robots,
have further enhanced its ability to perform complex, dynamically changing, tasks. We present an
overview of the extended RHINO system, sketching the functionality of its main components and their
inter-relationships as well as long-term experiments demonstrating the practicality of its
approach. Pointers are also provided to the detailed technical references.}
}
@Book{Bee00Con,
author = {Michael Beetz},
title = {Concurrent Reactive Plans: Anticipating and Forestalling Execution Failures},
publisher = {Springer Publishers},
year = {2000},
volume = {LNAI 1772},
series = {Lecture Notes in Artificial Intelligence},
bib2html_pubtype = {Book},
bib2html_rescat = {Robot Learning, Plan-based Robot Control},
bib2html_groups = {IAS},
bib2html_funding = {ignore},
bib2html_keywords = {Planning, Robot, Learning}
}
@InProceedings{Bee00Env,
author = {Michael Beetz and Thorsten Belker},
title = {Environment and Task Adaptation for Robotic Agents},
booktitle = {Procs. of the 14th European Conference on Artificial Intelligence (ECAI-2000)},
editor = {W. Horn},
year = {2000},
pages = {648--652},
bib2html_pubtype = {Refereed Conference Paper},
bib2html_rescat = {Robot Learning},
bib2html_groups = {IAS},
bib2html_funding = {ignore},
bib2html_keywords = {Robot, Learning},
abstract = {This paper investigates the problem of improving the performance of general state-of-the-art robot
control systems by autonomously adapting them to specific tasks and environments. We propose model-
and test-based transformational learning (MTTL) as a computational model for performing this task.
MTTL uses abstract models of control systems and environments in order to propose promising
adaptations. To account for model deficiencies resulting from abstraction, hypotheses are
statistically tested based on experimentation in the physical world.
We describe XfrmLearn, an implementation of MTTL, and apply it to the problem of indoor navigation.
We present experiments in which XfrmLearn improves the navigation performance of a state-of-the-art
high-speed navigation system for a given set of navigation tasks by up to 44 percent.}
}
@InProceedings{Bee00Lea,
author = {Michael Beetz and Thorsten Belker},
title = {Learning Structured Reactive Navigation Plans from Executing MDP Navigation Policies},
booktitle = {8th International Symposium on Intelligent Robotic Systems, SIRS 2000},
editor = "Ferryman",
year = {2000},
bib2html_pubtype = {Refereed Conference Paper},
bib2html_rescat = {Robot Learning},
bib2html_groups = {IAS},
bib2html_funding = {ignore},
bib2html_keywords = {Robot, Learning},
abstract = {Autonomous robots, such as robot office couriers, need navigation routines that support flexible
task execution and effective action planning. This paper describes XfrmLearn, a system that learns
structured symbolic navigation plans. Given a navigation task, XfrmLearn learns to structure
continuous navigation behavior and represents the learned structure as compact and transparent
plans. The structured plans are obtained by starting with monolithic default plans that are
optimized for average performance and adding subplans to improve the navigation performance for the
given task. Compactness is achieved by incorporating only subplans that achieve significant
performance gains. The resulting plans support action planning and opportunistic task execution.
XfrmLearn is implemented and extensively evaluated on an autonomous mobile robot.}
}
@Article{Thr00Prob,
author = {Sebastian Thrun and Michael Beetz and Maren Bennewitz and Armin Cremers and Frank Dellaert
and Dieter Fox and Dirk H{\"a}hnel and Charles Rosenberg and Nicholas Roy and Jamieson Schulte and Dirk Schulz},
title = "Probabilistic Algorithms and the Interactive Museum Tour-Guide Robot {M}inerva",
journal = "International Journal of Robotics Research",
year = "2000",
bib2html_pubtype = {Journal},
bib2html_rescat = {Plan-based Robot Control, State Estimation},
bib2html_groups = {IAS},
bib2html_funding = {ignore},
bib2html_keywords = {Robot, Planning},
abstract = {This paper describes Minerva, an interactive tour-guide robot that was successful ly deployed in a
Smithsonian museum. Minerva's software is pervasively probabilistic, relying on explicit
representations of uncertainty in perception and control. This article describes Minerva's major
software components, and provides a comparative analysis of the results obtained in the Smithsonian
museum. During two weeks of highly successful operation, the robot interacted with thousands of
people, both in the museum and through the Web, traversing more than 44km at speeds of up to
163 cm/sec in the unmodified museum.}
}
@PhdThesis{Bee00Pla,
author = {Michael Beetz},
title = {Plan-based Control of Robotic Agents},
school = {University of Bonn},
year = {2000},
note = {Habilitationsschrift, eingereicht im Oktober 2000.},
bib2html_pubtype = {Others},
bib2html_rescat = {Plan-based Robot Control},
bib2html_groups = {IAS},
bib2html_funding = {ignore},
bib2html_keywords = {Robot, Planning}
}
@inproceedings{Bee00Per,
author = {Michael Beetz and J{\"u}rgen Schumacher and Armin Cremers and Bernd Hellingrath and Christian Mazzocco},
title = {Perspectives on Plan-based Multiagent Systems for Distributed Supply Chain Management in the Steel Industry},
year = {2000},
booktitle = {Proceedings of the ECAI2000 Workshop on Agent Technologies and Their Application Scenarios in Logistics},
editor = "I. Timm",
bib2html_pubtype = {Refereed Conference Paper},
bib2html_rescat = {Plan-based Robot Control},
bib2html_groups = {IAS},
bib2html_funding = {ignore},
bib2html_keywords = {Robot, Planning}
}
@InProceedings{Bee00Pro,
author = {Michael Beetz and Henrik Grosskreutz},
title = {Probabilistic Hybrid Action Models for Predicting Concurrent Percept-driven Robot Behavior},
booktitle = "Proceedings of the Sixth International Conference on AI Planning Systems",
year = {2000},
publisher = "AAAI Press",
bib2html_pubtype = {Refereed Conference Paper},
bib2html_rescat = {Plan-based Robot Control},
bib2html_groups = {IAS},
bib2html_funding = {ignore},
bib2html_keywords = {Robot, Planning},
abstract = {This paper develops Probabilistic Hybrid Action Models (PHAMs), a realistic causal model for
predicting the behavior generated by modern concurrent percept-driven robot plans.PHAMs
represent aspects of robot behavior that cannot be represented by most action models used in AI
planning: the temporal structure of continuous control processes, their non-deterministic effects,
and several modes of their interferences. The main contributions of the paper are: (1) PHAMs, a
model of concurrent percept-driven behavior, its formalization, and proofs that the model generates
probably, qualitatively accurate predictions; and (2) a resource-efficient inference method for
PHAMs based on sampling projections from probabilistic action models and state descriptions. We
discuss how PHAMs can be applied to planning the course of action of an autonomous robot office
courier based on analytical and experimental results.}
}
@InProceedings{Bee00Run,
author = {Michael Beetz},
title = {Runtime Plan Adaptation in Structured Reactive Controllers},
booktitle = {Proceedings of the Fourth International Conference on Autonomous Agents},
editor = {M. Gini and J. Rosenschein},
year = {2000},
bib2html_pubtype = {Refereed Conference Paper},
bib2html_rescat = {Plan-based Robot Control},
bib2html_groups = {IAS},
bib2html_funding = {ignore},
bib2html_keywords = {Robot, Planning}
}
@inproceedings{Sch01Coo1,
author = "Thorsten Schmitt and Robert Hanek and Sebastian Buck and Michael Beetz",
title = {Cooperative Probabilistic State Estimation for Vision-based Autonomous Mobile Robots},
booktitle = {Proc. of the IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS)},
address = "Maui, Hawaii",
pages = "1630--1638",
year = "2001",
bib2html_pubtype = {Refereed Conference Paper},
bib2html_rescat = {Plan-based Robot Control, State Estimation},
bib2html_groups = {IAS, AGILO},
bib2html_funding = {AGILO},
bib2html_keywords = {Robot, State Estimation Vision},
abstract = {With the services that autonomous robots are to provide becoming more demanding, the states that
the robots have to estimate become more complex. In this paper, we develop and analyze a
probabilistic, vision-based state estimation method for individual, autono-mous robots. This method
enables a team of mobile robots to estimate their joint positions in a known environment and track
the positions of autonomously moving objects. The state estimators of different robots cooperate to
increase the accuracy and reliability of the estimation process. This cooperation between the
robots enables them to track temporarily occluded objects and to faster recover their position
after they have lost track of it. The method is empirically validated based on experiments with a
team of physical robots.}
}
@inproceedings{Sch01Coo2,
author = "Thorsten Schmitt and Robert Hanek and Sebastian Buck and Michael Beetz",
title = {{Cooperative Probabilistic State Estimation for Vision-based Autonomous Soccer Robots}},
booktitle = "DAGM Symposium",
volume = "2191",
year = "2001",
publisher = "Springer",
pages = "321--328",
series = "Lecture Notes in Computer Science",
bib2html_pubtype = {Refereed Conference Paper},
bib2html_rescat = {State Estimation, Robot Vision},
bib2html_groups = {IAS, AGILO},
bib2html_funding = {AGILO},
bib2html_keywords = {Robot, State Estimation Vision}
}
@inproceedings{Sch01Coo3,
author = {Thorsten Schmitt and Robert Hanek and Sebastian Buck and Michael Beetz},
title = {Cooperative Probabilistic State Estimation fo Vision-based Autonomous Soccer Robots},
booktitle = {RoboCup International Symposium 2001},
address = "Seattle, USA",
year = {2001},
bib2html_pubtype = {Refereed Conference Paper},
bib2html_rescat = {Plan-based Robot Control, State Estimation},
bib2html_groups = {IAS, AGILO},
bib2html_funding = {AGILO},
bib2html_keywords = {Robot, State Estimation, Vision},
abstract = {With the services that autonomous robots are to provide becoming more demanding, the states that
the robots have to estimate become more complex. In this paper, we develop and analyze a
probabilistic, vision-based state estimation method for individual, autono-mous robots. This method
enables a team of mobile robots to estimate their joint positions in a known environment and track
the positions of autonomously moving objects. The state estimators of different robots cooperate to
increase the accuracy and reliability of the estimation process. This cooperation between the
robots enables them to track temporarily occluded objects and to faster recover their position
after they have lost track of it. The method is empirically validated based on experiments with a
team of physical robots.}
}
@inproceedings{Buc01Pla,
author = {Sebastian Buck and Michael Beetz and Thorsten Schmitt},
title = {Planning and Executing Joint Navigation Tasks in Autonomous Robot Soccer},
booktitle = {5th International Workshop on RoboCup (Robot World Cup Soccer Games and Conferences)},
year = {2001},
bib2html_pubtype = {Refereed Conference Paper},
bib2html_rescat = {Plan-based Robot Control, Robocup},
bib2html_groups = {IAS, AGILO},
bib2html_funding = {AGILO},
bib2html_keywords = {Planning, Robot}
}
@inproceedings{Buc01Mul,
author = {Sebastian Buck and U. Weber and Michael Beetz and Thorsten Schmitt},
title = {Multi Robot Path Planning for Dynamic Evironments: A case study},
booktitle = {Proc. of the IEEE Intl. Conf. on Intelligent Robots and Systems},
year = {2001},
bib2html_pubtype = {Refereed Conference Paper},
bib2html_rescat = {Plan-based Robot Control, Robot Learning},
bib2html_groups = {IAS, AGILO},
bib2html_funding = {AGILO},
bib2html_keywords = {Planning, Robot}
}
@inproceedings{Sch01AGI,
author = "Thorsten Schmitt and Sebastian Buck and Michael Beetz",
title = "{AGILO} {RoboCuppers} 2001: Utility- and Plan-based Action Selection based on Probabilistically Estimated Game Situations",
booktitle = "5th International Workshop on RoboCup (Robot World Cup Soccer Games and Conferences)",
year = "2001",
publisher = "Springer Verlag",
editor = "P. Stone and T. Balch and G. Kraetzschmar",
series = "Lecture Notes in Computer Science",
bib2html_pubtype = {Refereed Conference Paper},
bib2html_rescat = {Plan-based Robot Control, State Estimation},
bib2html_groups = {IAS, AGILO},
bib2html_funding = {AGILO},
bib2html_keywords = {Planning, Robot, State Estimation},
abstract = {This paper describes the AGILO RoboCuppers 1 the RoboCup team of the image understanding group (FG
BV) at the Technische Universit{\"a}t M{\"u}nchen. a? With a team of four Pioneer I robots, all
equipped with CCD camera and a single board computer, we've participated in all international
middle size league tournaments from 1998 until 2001. We use a modular approach of concurrent
subprograms for image processing, self localization, object tracking, action selection, path
planning and basic robot control. A fast feature extraction process provides the data necessary for
the on-board scene interpretation. All robot observations are fused into a single environmental
model, which forms the basis for action selection, path planning and low-level robot control.}
}
@InProceedings{Bee01Lea,
author = {Michael Beetz and Thorsten Belker},
title = {Learning Structured Reactive Navigation Plans from Executing MDP policies},
booktitle = {Proceedings of the 5th International Conference on Autonomous Agents},
pages = {19--20},
year = {2001},
bib2html_pubtype = {Refereed Conference Paper},
bib2html_rescat = {Robot Learning},
bib2html_groups = {IAS},
bib2html_funding = {ignore},
bib2html_keywords = {Robot, Learning},
abstract = {Autonomous robots, such as robot office couriers, need navigation routines that support flexible
task execution and effective action planning. This paper describes XfrmLearn, a system that learns
structured symbolic navigation plans. Given a navigation task, XfrmLearn learns to structure
continuous navigation behavior and represents the learned structure as compact and transparent
plans. The structured plans are obtained by starting with monolithic default plans that are
optimized for average performance and adding subplans to improve the navigation performance for the
given task. Compactness is achieved by incorporating only subplans that achieve significant
performance gains. The resulting plans support action planning and opportunistic task execution.
XfrmLearn is implemented and extensively evaluated on an autonomous mobile robot.}
}
@InProceedings{Bel01Lea,
author = {Thorsten Belker and Michael Beetz},
title = {Learning to Execute Robot Navigation Plans},
booktitle = {Proceedings of the 25th German Conference on Artificial Intelligence (KI 01)},
year = {2001},
address = {Wien, Austria},
publisher = "Springer Verlag",
bib2html_pubtype = {Refereed Conference Paper},
bib2html_rescat = {Robot Learning, Plan-based Robot Control},
bib2html_groups = {IAS},
bib2html_funding = {ignore},
bib2html_keywords = {Robot, Learning},
abstract = {Most state-of-the-art navigation systems for autonomous service robots decompose navigation into
global navigation planning and local reactive navigation. While the methods for navigation planning
and local navigation are well understood, the plan execution problem, the problem of how to
generate and parameterize local navigation tasks from a given navigation plan, is largely unsolved.
This article describes how a robot can autonomously learn to execute navigation plans. We formalize
the problem as a Markov Decision Problem (MDP), discuss how it can be simplified to make its
solution feasible, and describe how the robot can acquire the necessary action models. We show,
both in simulation and on a RWI B21 mobile robot, that the learned models are able to produce
competent navigation behavior.}
}
@Article{Bee01Int,
author = {Michael Beetz and Tom Arbuckle and Maren Bennewitz and Wolfram Burgard and Armin Cremers
and Dieter Fox and Henrik Grosskreutz and Dirk H{\"a}hnel and Dirk Schulz},
title = {Integrated Plan-based Control of Autonomous Service Robots in Human Environments},
journal = {{IEEE} Intelligent Systems},
volume = {16},
number = {5},
pages = {56--65},
year = {2001},
bib2html_pubtype = {Journal},
bib2html_rescat = {Plan-based Robot Control},
bib2html_groups = {IAS},
bib2html_funding = {ignore},
bib2html_keywords = {Robot, Planning},
abstract = {The authors extend the Rhino robot by adding the means for plan-based high-level control and plan
transformation, further enhancing its probabilistic reasoning capabilities. The result: an
autonomous robot capable of accomplishing prolonged, complex, and dynamically changing tasks in the
real world.}
}
@Article{Bee01Pla,
author = "Michael Beetz",
title = "Plan Management for Robotic Agents",
journal = {KI - K{\"u}nstliche Intelligenz; Special Issue on Planning and Scheduling},
volume = {15},
number = {2},
year = "2001",
pages = {12--17},
bib2html_pubtype = {Journal},
bib2html_rescat = {Plan-based Robot Control},
bib2html_groups = {IAS},
bib2html_funding = {ignore},
bib2html_keywords = {Robot, Planning},
abstract = {Autonomous robots that perform complex jobs in changing environments must be capable of managing
their plans as the environmental conditions or their tasks change. This raises the problem of
deciding whether, when, where, and how to revise the plans as the robots' beliefs change. This
article investigates an approach to execution time plan management in which the plans themselves
specify the plan adaptation processes. In this approach the robot makes strategical (farsighted)
adaptations while it executes a plan using tactical (immediate) decisions and overwrites tactical
adaptations after strategical decisions have been reached (if necessary). We present experiments in
which the plan adaptation technique is used for the control of two autonomous mobile robots. In one
of them it controlled the course of action of a museums tourguide robot that has operated for
thirteen days and performed about 3200 plan adaptations reliably.}
}
@InProceedings{Bee01Run,
author = {Michael Beetz},
title = {Runtime Plan Adaptation in Structured Reactive Controllers},
booktitle = {Proceedings of the Fourth International Conference on Autonomous Agents},
year = {2001},
editor = {E. Andre and S. Sen},
bib2html_pubtype = {Refereed Conference Paper},
bib2html_rescat = {Plan-based Robot Control},
bib2html_groups = {IAS},
bib2html_funding = {ignore},
bib2html_keywords = {Robot, Planning}
}
@Article{Bee01Str,
author = {Michael Beetz},
title = {Structured {Reactive} {Controllers}},
journal = {Journal of Autonomous Agents and Multi-Agent Systems. Special Issue: Best Papers of the International Conference on Autonomous Agents '99},
publisher = "Kluwer Academic Publishers",
month = {March/June},
year = {2001},
volume = {4},
pages = {25--55},
bib2html_pubtype = {Journal, Award Winner},
bib2html_rescat = {Plan-based Robot Control},
bib2html_groups = {IAS},
bib2html_funding = {ignore},
bib2html_keywords = {Robot, Planning}
}
@InProceedings{Sch01Age,
author = {J{\"u}rgen Schumacher and Michael Beetz},
title = {Ein agentenbasiertes Verfahren zur effizienten Beantwortung von Lieferterminanfragen in einer Supply-Chain},
booktitle = {Proceedings der Verbundtagung VertIS 2001},
year = {2001},
bib2html_pubtype = {ignore},
bib2html_rescat = {ignore},
bib2html_groups = {ignore},
bib2html_funding = {ignore},
bib2html_keywords = {ignore}
}
@InProceedings{Han02Tow,
author = {Robert Hanek and Thorsten Schmitt and Sebastian Buck and Michael Beetz},
title = {Towards {RoboCup} without Color Labeling},
booktitle = {RoboCup International Symposium 2002},
year = 2002,
series = {Lecture Notes in Artificial Intelligence (LNAI)},
address = {Fukuoka, Japan},
publisher = {Springer Publishers},
bib2html_pubtype = {Award Winner, Refereed Conference Paper},
bib2html_rescat = {RoboCup, Vision},
bib2html_groups = {AGILO, IU},
bib2html_funding = {AGILO},
bib2html_keywords = {Robot, Vision, State Estimation}
}
@inproceedings{Sch02Wat,
author = "Thorsten Schmitt and Michael Beetz and Robert Hanek and Sebastian Buck",
title = "Watch their Moves: Applying Probabilistic Multiple Object Tracking to Autonomous Robot Soccer",
booktitle = "The Eighteenth National Conference on Artificial Intelligence",
address = "Edmonton, Canada",
year = {2002},
bib2html_pubtype = {Refereed Conference Paper},
bib2html_rescat = {State Estimation, Robot Vision, RoboCup},
bib2html_groups = {AGILO},
bib2html_funding = {AGILO},
bib2html_keywords = {Robot, Vision, State Estimation},
abstract = {In many autonomous robot applications robots must be capable of estimating the positions and
motions of moving objects in their environments. In this paper, we apply probabilistic multiple
object tracking to estimating the positions of opponent players in autonomous robot soccer. We
extend an existing tracking algorithm to handle multiple mobile sensors with uncertain positions,
discuss the specification of probabilistic models needed by the algorithm, and describe the
required vision-interpretation algorithms. The multiple object tracking has been successfully
applied throughout the RoboCup 2001 world championship.}
}
@InProceedings{Han02Fas,
author = "Robert Hanek and Thorsten Schmitt and Sebastian Buck and Michael Beetz",
title = "{Fast Image-based Object Localization in Natural Scenes}",
booktitle = "IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS) 2002",
series = "Lausanne",
year = "2002",
pages = {116--122},
bib2html_pubtype = {Refereed Conference Paper},
bib2html_rescat = {Vision, Image Understanding},
bib2html_groups = {IAS, IU},
bib2html_funding = {BV},
bib2html_keywords = {Vision},
abstract ={In many robot applications, autonomous robots must be capable of localizing the objects they are to
manipulate. In this paper we address the object localization problem by fitting a parametric curve
model to the object contour in the image. The initial prior of the object pose is iteratively
refined to the posterior distribution by optimizing the separation of the object and the background.
The local separation criteria are based on local statistics which are iteratively computed from the
object and the background region. No prior knowledge on color distributions is needed. Experiments
show that the method is capable of localizing objects in a cluttered and textured scene even under
strong variations of illumination. The method is able to localize a soccer ball within frame rate.}
}
@inproceedings{Bee02Agi1,
author = "Michael Beetz and Sebastian Buck and Robert Hanek and Thorsten Schmitt and Bernd Radig",
title = "The {AGILO} Autonomous Robot Soccer Team: Computational Principles, Experiences, and Perspectives",
booktitle = "International Joint Conference on Autonomous Agents and Multi Agent Systems (AAMAS) 2002",
year = "2002",
pages = "805--812",
address = "Bologna, Italy",
bib2html_pubtype = {Refereed Conference Paper},
bib2html_rescat = {Robot Learning, State Estimation, RoboCup},
bib2html_groups = {AGILO},
bib2html_funding = {AGILO},
bib2html_keywords = {Learning, Robot, State Estimation, Vision, Reasoning},
abstract = {This paper describes the computational model underlying the AGILO autonomous robot soccer team, its
implementation, and our experiences with it. The most salient aspects of the AGILO control software
are that it includes (1) a cooperative probabilistic game state estimator working with a simple
off-the-shelf camera system; (2) a situated action selection module that makes amble use of
experience-based learning and produces coherent team behavior even if inter-robot communication is
perturbed; and (3) a playbook executor that can perform preprogrammed complex soccer plays in
appropriate situations by employing plan-based control techniques. The use of such sophisticated
state estimation and control techniques distinguishes the AGILO software from many others applied
to mid-size autonomous robot soccer. The paper discusses the computational techniques and necessary
extensions based on experimental data from the 2001 robot soccer world championship.}
}
@inproceedings{Buc02App,
author = {Sebastian Buck and Michael Beetz and Thorsten Schmitt},
title = {{Approximating the Value Function for Continuous Space Reinforcement Learning in Robot Control}},
booktitle = {Proc. of the IEEE Intl. Conf. on Intelligent Robots and Systems},
year = {2002},
bib2html_pubtype = {Refereed Conference Paper},
bib2html_rescat = {Robot Learning, RoboCup},
bib2html_groups = {AGILO},
bib2html_funding = {AGILO},
bib2html_keywords = {Learning, Robot},
abstract = {Many robot learning tasks are very difficult to solve: their state spaces are high dimensional,
variables and command parameters are continuously valued, and system states are only partly
observable. In this paper, we propose to learn a continuous space value function for reinforcement
learning using neural networks trained from data of exploration runs. The learned function is
guaranteed to be a lower bound for, and reproduces the characteristic shape of, the accurate value
function. We apply our approach to two robot navigation tasks, discuss how to deal with possible
problems occurring in practice, and assess its performance.}
}
@InProceedings{Buck02MAM,
author = "Sebastian Buck and Michael Beetz and Thorsten Schmitt",
title = "{M-ROSE: A Multi Robot Simulation Environment for Learning Cooperative Behavior}",
booktitle = "{Distributed Autonomous Robotic Systems 5, Lecture Notes in Artificial Intelligence}",
editor = "H. Asama and T. Arai and T. Fukuda and T. Hasegawa",
series = "LNAI",
year = "2002",
publisher = "Springer-Verlag",
bib2html_pubtype = {Refereed Conference Paper},
bib2html_rescat = {Robot Learning, RoboCup},
bib2html_groups = {AGILO},
bib2html_funding = {AGILO},
bib2html_keywords = {Learning, Robot}
}
@inproceedings{Buc02Rel,
author = {Sebastian Buck and Michael Beetz and Thorsten Schmitt},
title = {{Reliable Multi Robot Coordination Using Minimal Communication and Neural Prediction}},
booktitle = {Advances in Plan-based Control of Autonomous Robots. Selected Contributions of the Dagstuhl Seminar ``Plan-based Control of Robotic Agents''},
editor = {M. Beetz and J. Hertzberg and M. Ghallab and M. Pollack},
publisher = {Springer},
series = {Lecture Notes in Artificial Intelligence},
year = {2002},
bib2html_pubtype = {Refereed Conference Paper},
bib2html_rescat = {Robot Learning, RoboCup},
bib2html_groups = {AGILO},
bib2html_funding = {AGILO},
bib2html_keywords = {Learning, Robot}
}
@inproceedings{Buc02Mac,
author = {Sebastian Buck and Freek Stulp and Michael Beetz and Thorsten Schmitt},
title = {{Machine Control Using Radial Basis Value Functions and Inverse State Projection}},
booktitle = {Proc. of the IEEE Intl. Conf. on Automation, Robotics, Control, and Vision},
year = {2002},
bib2html_pubtype = {Refereed Conference Paper},
bib2html_rescat = {Models, Learning, Action},
bib2html_groups = {AGILO},
bib2html_funding = {AGILO},
bib2html_keywords = {Robot},
abstract = {Typical real world machine control tasks have some characteristics
which makes them difficult to solve: Their state spaces are
high-dimensional and continuous, and it may be impossible to reach a
satisfying target state by exploration or human control. To overcome
these problems, in this paper, we propose (1) to use radial basis
functions for value function approximation in continuous space
reinforcement learning and (2) the use of learned inverse projection
functions for state space exploration. We apply our approach to path
planning in dynamic environments and to an aircraft autolanding
simulation, and evaluate its performance.}
}
@InProceedings{Bee02Agi2,
author = "Michael Beetz and Sebastian Buck and Robert Hanek and Andreas Hofhauser and Thorsten Schmitt",
title = "{AGILO RoboCuppers 2002: Applying Cooperative Game State Estimation Experience-based Learning, and Plan-based Control to Autonomous Robot Soccer}",
booktitle = "RoboCup International Symposium 2002",
series = "Lecture Notes in Computer Science",
year = "2002",
bib2html_pubtype = {Refereed Conference Paper},
bib2html_rescat = {RoboCup, State Estimation, Robot Vision, Robot Learning, Plan-based Robot Control},
bib2html_groups = {AGILO},
bib2html_funding = {AGILO},
bib2html_keywords = {Robot, Planning, Learning, State Estimation, Vision},
abstract = {This paper describes the computational model underlying the AGILO autonomous robot soccer team and
its implementation. The most salient aspects of the AGILO control software are that it includes (1)
a cooperative probabilistic game state estimator working with a simple off-the-shelf camera system;
(2) a situated action selection module that makes amble use of experience-based learning and
produces coherent team behavior even if inter-robot communication is perturbed; and (3) a playbook
executor that can perform preprogrammed complex soccer plays in appropriate situations by employing
plan-based control techniques. The use of such sophisticated state estimation and control
techniques characterizes the AGILO software. The paper discusses the computational techniques and
necessary extensions based on experimental data from the 2001 robot soccer world championship.}
}
@article{Bel02Lea,
author = {Thorsten Belker and Michael Beetz and Armin Cremers},
title = {Learning Action Models for the Improved Execution of Navigation Plans},
journal = {Robotics and Autonomous Systems},
volume = {38},
number = {3--4},
pages = {137--148},
month = {March},
year = {2002},
bib2html_pubtype = {Journal},
bib2html_rescat = {Robot Learning, Plan-based Robot Control},
bib2html_groups = {IAS},
bib2html_funding = {ignore},
bib2html_keywords = {Learning, Robot, Planning},
abstract = {Most state-of-the-art navigation systems for autonomous service robots decompose navigation into
global navigation planning and local reactive navigation. While the methods for navigation planning
and local navigation themselves are well understood, the plan execution problem, the problem of how
to generate and parameterize local navigation tasks from a given navigation plan, is largely
unsolved.
This article describes how a robot can autonomously learn to execute navigation plans. We formalize
the problem as a Markov Decision Process (MDP) and derive a decision theoretic action selection
function from it. The action selection function employs models of the robot's navigation actions,
which are autonomously acquired from experience using neural network or regression tree learning
algorithms. We show, both in simulation and on a RWI B21 mobile robot, that the learned models
together with the derived action selection function achieve competent navigation behavior.}
}
@InProceedings{Bee02Robocup,
author = "Michael Beetz and Andreas Hofhauser",
title = {Plan-based control for autonomous robot soccer},
booktitle = "{Advances in Plan-based Control of Autonomous Robots. Selected Contributions of the Dagstuhl Seminar Plan-based Control of Robotic Agents, Lecture Notes in Artificial Intelligence (LNAI)}",
year = "2002",
publisher = "Springer-Verlag",
bib2html_pubtype = {Book Chapter},
bib2html_rescat = {Robocup, Plan-based Robot Control},
bib2html_groups = {AGILO},
bib2html_funding = {AGILO},
bib2html_keywords = {Robot, Planning}
}
@InCollection{Bee02Tow,
author = {Michael Beetz},
title = {Towards integrated computational models for the plan-based control of robotic agents.},
booktitle = {Festschrift zum 60.~Geburtstag von Prof.~J.~Siekmann},
publisher = {Springer Publishers},
year = {2002},
series = {Lecture Notes in Artificial Intelligence},
bib2html_pubtype = {Book Chapter},
bib2html_rescat = {Plan-based Robot Control},
bib2html_groups = {IAS},
bib2html_funding = {ignore},
bib2html_keywords = {Robot, Planning}
}
@Book{Bee02Pla,
author = {Michael Beetz},
title = {Plan-based Control of Robotic Agents},
publisher = {Springer Publishers},
year = {2002},
volume = {LNAI 2554},
series = {Lecture Notes in Artificial Intelligence},
bib2html_pubtype = {Book},
bib2html_rescat = {Robot Learning, Plan-based Robot Control},
bib2html_groups = {IAS},
bib2html_funding = {ignore},
bib2html_keywords = {Robot, Planning, Learning}
}
@Book{Bee02Adv,
author = {Michael Beetz and Joachim Hertzberg and Malik Ghallab and Martha Pollack},
title = {Advances in Plan-based Control of Robotic Agents},
publisher = {Springer Publishers},
year = {2002},
volume = {LNAI 2554},
series = {Lecture Notes in Artificial Intelligence},
bib2html_pubtype = {Book},
bib2html_rescat = {Robot Learning, Plan-based Robot Control},
bib2html_groups = {IAS},
bib2html_funding = {ignore},
bib2html_keywords = {Robot, Planning, Learning}
}
@InProceedings{Bee02Rep,
author = {Michael Beetz},
title = {{Plan Representation for Robotic Agents}},
booktitle = {Proceedings of the Sixth International Conference on AI Planning and Scheduling},
pages = {223--232},
year = {2002},
address = {Menlo Park, CA},
publisher = {AAAI Press},
bib2html_pubtype = {Refereed Conference Paper},
bib2html_rescat = {Plan-based Robot Control},
bib2html_groups = {IAS},
bib2html_funding = {ignore},
bib2html_keywords = {Robot, Planning, Representation}
}
@Article{Sch02Coo,
author = "Thorsten Schmitt and Robert Hanek and Michael Beetz and Sebastian Buck and Bernd Radig",
title = "Cooperative Probabilistic State Estimation for Vision-based Autonomous Mobile Robots",
journal = "IEEE Transactions on Robotics and Automation",
year = "2002",
month = "October",
volume = "18",
number = "5",
bib2html_pubtype = {Journal},
bib2html_rescat = {State Estimation, Robot Vision, RoboCup},
bib2html_groups = {AGILO},
bib2html_funding = {AGILO},
bib2html_keywords = {Robot, Vision, State Estimation},
abstract = {With the services that autonomous robots are to provide becoming more demanding, the states that
the robots have to estimate become more complex. In this article, we develop and analyze a probabilistic, vision-based state estimation method for individual, autonomous robots. This method enables a team of mobile robots to estimate their joint positions in a known environment and track the positions of autonomously moving objects. The tate estimators of different robots cooperate to increase the accuracy and reliability of the estimation process. This cooperation between the robots enables them to track temporarily occluded objects and to faster recover their position after they have lost track of it. The method is empirically validated based on experiments with a team of physical robots.}
}
@InProceedings{ SchroeterRoman02RGML,
author = {D. Schr{\"o}ter and M. Beetz and J.-S. Gutmann},
title = {{RG Mapping: Learning Compact and Structured 2D Line Maps of Indoor Environments}},
booktitle = {11th IEEE International Workshop on Robot and Human Interactive Communication (ROMAN), Berlin/Germany},
year = {2002},
abstract = {{In this paper we present
Region \& Gateway (RG) Mapping,
a novel approach to laser-based 2D line mapping of indoor environments.
RG Mapping is capable of acquiring very compact, structured, and
semantically annotated maps. We present and empirically analyze
the method based on map acquisition experiments with autonomous
mobile robots. The experiments show that RG mapping drastically
compresses the data contained in line scan maps without substantial
loss of accuracy.}},
bib2html_pubtype = {Refereed Workshop Paper},
bib2html_rescat = {Robot Mapping},
bib2html_groups = {IAS,EvI},
bib2html_funding = {EvI},
bib2html_keywords = {Environment Mapping},
}
@InProceedings{Bee03Aut,
author = {Michael Beetz and Freek Stulp and Alexandra Kirsch and Armin M{\"u}ller and Sebastian Buck},
title = {Autonomous Robot Controllers Capable of Acquiring Repertoires of Complex Skills},
booktitle = {RoboCup International Symposium 2003},
series = {Padova},
year = {2003},
month = {July},
bib2html_pubtype = {Conference Paper},
bib2html_rescat = {Learning, Planning, Action},
bib2html_groups = {AGILO},
bib2html_funding = {AGILO},
bib2html_keywords = {Robot, Language, Learning},
abstract = {Due to the complexity and sophistication of the skills needed in real world tasks, the development
of autonomous robot controllers requires an ever increasing application of learning techniques. To
date, however, learning steps are mainly executed in isolation and only the learned code pieces
become part of the controller. This approach has several drawbacks: the learning steps themselves
are undocumented and not executable. In this paper, we extend an existing control language with
constructs for specifying control tasks, process models, learning problems, exploration strategies,
etc. Using these constructs, the learning problems can be represented explicitly and transparently
and, as they are part of the overall program implementation, become executable. With the extended
language we rationally reconstruct large parts of the action selection module of the AGILO2001
autonomous soccer robots.}
}
@InProceedings{Sch03Des,
author = "Thorsten Schmitt and Michael Beetz",
title = "{Designing Probabilistic State Estimators for Autonomous Robot Control}",
booktitle = "IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS)",
year = "2003",
bib2html_pubtype = {Refereed Conference Paper},
bib2html_rescat = {RoboCup, State Estimation},
bib2html_groups = {AGILO},
bib2html_funding = {AGILO},
bib2html_keywords = {Robot, State Estimation},
abstract = {This paper sketches and discusses design options for complex probabilistic state estimators and
investigates their interactions and their impact on performance. We consider, as an example, the
estimation of game states in autonomous robot soccer. We show that many factors other than the
choice of algorithms determine the performance of the estimation systems. We propose empirical
investigations and learning as necessary tools for the development of successful state estimation
systems.}
}
@InProceedings{Sch03Dev,
author = "Thorsten Schmitt and Robert Hanek and Michael Beetz",
title = "{Developing Comprehensive State Estimators for Robot Soccer}",
booktitle = "RoboCup International Symposium 2003",
series = "Padova",
year = "2003",
bib2html_pubtype = {Refereed Conference Paper},
bib2html_rescat = {RoboCup, State Estimation},
bib2html_groups = {AGILO},
bib2html_funding = {AGILO},
bib2html_keywords = {Robot, State Estimation},
abstract = {This paper sketches and discusses design options for complex probabilistic state estimators and
investigates their interactions and their impact on performance. We consider, as an example, the
estimation of game states in autonomous robot soccer. We show that many factors other than the
choice of algorithms determine the performance of the estimation systems. We propose empirical
investigations and learning as necessary tools for the development of successful state estimation
systems.}
}
@Article{Han03Tow,
author = {Robert Hanek and Thorsten Schmitt and Sebastian Buck and Michael Beetz},
title = {Towards {RoboCup} without color labeling},
journal = {AI Magazine},
year = {2003},
volume = {24},
number = {2},
pages = {37--40},
bib2html_pubtype = {Journal},
bib2html_rescat = {RoboCup, Vision, Image Understanding},
bib2html_groups = {AGILO, IU},
bib2html_funding = {AGILO},
bib2html_keywords = {Robot, Vision, State Estimation}
}
@InProceedings{Bee03Agi,
author = {Michael Beetz and Suat Gedikli and Robert Hanek and Thorsten Schmitt and Freek Stulp},
title = {{AGILO RoboCuppers 2003: Computational Priciples and Research Directions}},
booktitle = {RoboCup International Symposium 2003},
series = {Padova},
year = {2003},
bib2html_pubtype = {Refereed Conference Paper},
bib2html_rescat = {Perception, Models, Learning, Planning, Action},
bib2html_groups = {AGILO},
bib2html_funding = {AGILO},
bib2html_keywords = {Robot, State Estimation},
abstract = {This paper gives an overview about the approaches chosen by the
middle size robot soccer team of the Munich University of Technology, the
AGILO RoboCuppers. First a brief sytem overview will be given. Then the
computational priciples are described. Finally the directions for further
research are outlined.}
}
@InProceedings{ SchroeterOgrw03,
author = {Derik Schr{\"o}ter and Michael Beetz},
title = {{RG~Mapping: Building Object-Oriented Representations of Structured Human Environments}},
booktitle =
{6-th Open Russian-German Workshop on Pattern Recognition and Image Understanding (OGRW), Katun/Russia},
note = {{\bf Best Paper Award}},
year = {2004},
abstract =
{{We present a new approach to mapping of indoor environments, where the environment structure in terms of regions and gateways is automatically extracted, while the robot explores. Objects, both in 2D and 3D, are modelled explicitly in those maps and allow for robust localization. We refer to those maps as object-oriented environment representations or Region \& Gateway~Maps. Region \& Gateway~Mapping is capable of acquiring very compact, structured, and semantically annotated maps. We show that those maps can be built online and that they are extremely useful in plan-based control of autonomous robots as well as for robot-human interaction.}},
bib2html_pubtype = {Refereed Workshop Paper, Award Winner},
bib2html_rescat = {Robot Mapping},
bib2html_groups = {IAS,EvI},
bib2html_funding = {EvI},
bib2html_keywords = {Environment Mapping},
}
@InProceedings{Bee04Mot,
author = {Michael Beetz and Sven Flossmann and Thomas Stammeier},
title = {Motion and Episode Models for (Simulated) Football Games: Acquisition, Representation, and Use},
booktitle = {3rd International Joint Conference on Autonomous Agents \& Multi Agent Systems (AAMAS)},
year = {2004},
bib2html_pubtype = {Refereed Conference Paper},
bib2html_rescat = {Game Analysis},
bib2html_groups = {IAS, FIPM, Aspogamo},
bib2html_funding = {FIPM},
bib2html_domain = {Soccer Analysis},
bib2html_keywords = {Game Analysis}
}
@Article{Han04CCD,
author = {Robert Hanek and Michael Beetz},
title = {The Contracting Curve Density Algorithm: Fitting Parametric Curve Models to Images Using Local Self-adapting Separation Criteria},
journal = {International Journal of Computer Vision},
year = 2004,
volume = {59},
number = {3},
pages = {233-258},
bib2html_pubtype = {Journal},
bib2html_rescat = {RoboCup, Vision},
bib2html_groups = {IAS, IU},
bib2html_funding = {BV},
bib2html_keywords = {Robot, Vision},
abstract = {The task of fitting parametric curve models to the boundaries of perceptually meaningful image
regions is a key problem in computer vision with numerous applications, such as image segmentation,
pose estimation, object tracking, and 3-D reconstruction. In this article, we propose the
Contracting Curve Density (CCD) algorithm as a solution to the curve-fitting problem. The CCD
algorithm extends the state-of-the-art in two important ways. First, it applies a novel likelihood
function for the assessment of a fit between the curve model and the image data. This likelihood
function can cope with highly inhomogeneous image regions, because it is formulated in terms of
local image statistics. The local image statistics are learned on the fly from the vicinity of the
expected curve. They provide therefore locally adapted criteria for separating the adjacent image
regions. These local criteria replace often used predefined fixed criteria that rely on homogeneous
image regions or specific edge properties. The second contribution is the use of blurred curve
models as efficient means for iteratively optimizing the posterior density over possible model
parameters. These blurred curve models enable the algorithm to trade-off two conflicting
objectives, namely heaving a large area of convergence and achieving high accuracy. We apply the
CCD algorithm to several challenging image segmentation and 3-D pose estimation problems. Our
experiments with RGB images show that the CCD algorithm achieves a high level of robustness and
subpixel accuracy even in the presence of severe texture, shading, clutter, partial occlusion, and
strong changes of illumination.}}
@Article{Bee04AGILO,
author = {Michael Beetz and Thorsten Schmitt and Robert Hanek and Sebastian Buck and Freek Stulp and Derik Schr{\"o}ter and Bernd Radig},
title = {The {AGILO} Robot Soccer Team -- Experience-based Learning and Probabilistic Reasoning in Autonomous Robot Control},
journal = {Autonomous Robots},
year = 2004,
volume = {17},
number = {1},
pages = {55--77},
bib2html_pubtype = {Journal},
bib2html_rescat = {Perception, Models, Learning, Planning, Action},
bib2html_groups = {AGILO},
bib2html_funding = {AGILO},
bib2html_keywords = {Learning, Robot, Reasoning},
abstract = {This article describes the computational model underlying the AGILO autonomous robot soccer team,
its implementation, and our experiences with it. According to our model the control system of an
autonomous soccer robot consists of a probabilistic game state estimator and a situated action
selection module. The game state estimator computes the robot's belief state with respect to the
current game situation using a simple off-theshelf camera system. The estimated game state
comprises the positions and dynamic states of the robot itself and its teammates as well as the
positions of the ball and the opponent players. Employing sophisticated probabilistic reasoning
techniques and exploiting the cooperation between team mates, the robot can estimate complex game
states reliably and accurately despite incomplete and inaccurate state information. The action
selection module selects actions according to specified selection criteria as well as learned
experiences. Automatic learning techniques made it possible to develop fast and skillful routines
for approaching the ball, assigning roles, and performing coordinated plays. The paper discusses
the computational techniques based on experimental data from the 2001 robot soccer world
championship.}
}
@InProceedings{Bee04RPL,
author = {Michael Beetz and Alexandra Kirsch and Armin M{\"u}ller},
title = {{RPL-LEARN}: Extending an Autonomous Robot Control Language to Perform Experience-based Learning},
booktitle = {3rd International Joint Conference on Autonomous Agents \& Multi Agent Systems (AAMAS)},
year = {2004},
bib2html_pubtype = {Conference Paper},
bib2html_rescat = {Learning},
bib2html_groups = {AGILO,Cogito},
bib2html_funding = {AGILO},
bib2html_keywords = {Learning, Robot, Language},
abstract = {In this paper, we extend the autonomous robot control and plan language RPL with constructs for
specifying experiences, control tasks, learning systems and their parameterization, and exploration
strategies. Using these constructs, the learning problems can be represented explicitly and
transparently and become executable. With the extended language we rationally reconstruct parts of
the AGILO autonomous robot soccer controllers and show the feasibility and advantages of our
approach.}
}
@InProceedings{Mue04Obj,
author = {Armin M{\"u}ller and Alexandra Kirsch and Michael Beetz},
title = {Object-oriented Model-based Extensions of Robot Control Languages},
booktitle = {27th German Conference on Artificial Intelligence},
year = {2004},
bib2html_pubtype = {Conference Paper},
bib2html_rescat = {Planning,Action},
bib2html_groups = {AGILO,Cogito},
bib2html_funding = {AGILO},
bib2html_keywords = {Robot, Language, Representation},
abstract = {More than a decade after mobile robots arrived in many research labs it is still difficult to find
plan-based autonomous robot controllers that perform, beyond doubt, better than they possibly could
without applying AI methods. One of the main reason for this situation is abstraction. AI based
control techniques typically abstract away from the mechanisms that generate the physical behavior
and refuse the use of control structures that have proven to be necessary for producing flexible
and reliable robot behavior. The consequence is: AI-based control mechanisms can neither explain
and diagnose how a certain behavior resulted from a given plan nor can they revise the plans to
improve its physical performance. In our view, a substantial improvement on this situation is not
possible without having a new generation of robot control languages. These languages must, on the
one hand, be expressive enough for specifying and producing high performance robot behavior and, on
the other hand, be transparent and explicit enough to enable execution time inference mechanisms to
reason about, and manipulate these control programs. This paper reports on aspects of the design of
RPL-II, which we propose as such a next generation control language. We describe the nuts and bolts
of extending our existing language R P L to support explicit models of physical systems, and
object-oriented modeling of control tasks and programs. We show the application of these concepts
in the context of autonomous robot soccer.}
}
@InProceedings{stulp04agilo,
author = {Freek Stulp and Alexandra Kirsch and Suat Gedikli and Michael Beetz},
title = {{AGILO RoboCuppers} 2004},
booktitle = {RoboCup International Symposium 2004},
series = {Lisbon},
year = {2004},
month = {July},
bib2html_pubtype = {Conference Paper},
bib2html_rescat = {Perception, Models, Learning, Planning, Action},
bib2html_groups = {AGILO},
bib2html_funding = {AGILO},
bib2html_keywords = {Robot, State Estimation, Learning, Vision},
abstract = {The Agilo RoboCup team is the primary platform for our
research on the semi-automatic acquisition of visuo-motoric plans. It is
realized using inexpensive, off the shelf, easily extendible hardware
components and a standard software environment. The control system of an
autonomous soccer robot consists of a probabilistic game state estimator and a
situated action selection module. The game state estimator computes the
robot's belief state with respect to the current game situation. The action
selection module selects actions according to specified goals as well as
learned experiences. Automatic learning techniques made it possible to develop
fast and skillful routines for approaching the ball, assigning roles, and
performing coordinated plays.}
}
@InProceedings{stulp04evaluating,
author = {Freek Stulp and Suat Gedikli and Michael Beetz},
title = {Evaluating Multi-Agent Robotic Systems Using Ground Truth},
year = {2004},
booktitle = {Proceedings of the Workshop on Methods and Technology for Empirical Evaluation of Multi-agent Systems and Multi-robot Teams (MTEE)},
bib2html_pubtype = {Refereed Conference Paper},
bib2html_rescat = {Perception, Models},
bib2html_groups = {AGILO},
bib2html_funding = {AGILO},
bib2html_keywords = {},
abstract = {A thorough empirical evaluation of multi-agent robotic
systems is greatly facilitated if the {\em true} state of the world over time
can be obtained. The accuracy of the beliefs as well as the overall
performance can then be measured objectively and efficiently. In this paper we
present a system for determining the {\em ground truth} state of the world,
similar to the ceiling cameras used in RoboCup small-size league. We have used
this ground truth data to evaluate the accuracy of the self- and
object-localization of the robots in our RoboCup mid-size league team, the
Agilo RoboCuppers. More complex models of the state estimation module have
also been learned. These models provide insight into the workings and
shortcomings of this module, and can be used to improve it.}
}
@INPROCEEDINGS{Beetz04Wat,
AUTHOR = {M. Beetz and F. Fischer and S. Flossmann and B. Kirchlechner and A. Unseld and C. Holzer},
TITLE = {Watching Football with the Eyes of Experts: Integrated Intelligent Systems for the Automatic Analysis of (Simulated) Football Games},
BOOKTITLE = {5th Annual Conference dvs-Section Computer Science in Sport},
year = {2004},
bib2html_pubtype = {Refereed Conference Paper},
bib2html_rescat = {Game Analysis},
bib2html_groups = {IAS,FIPM,Aspogamo},
bib2html_domain = {Soccer Analysis},
bib2html_funding = {FIPM}
}
@INPROCEEDINGS{Beetz04Int,
AUTHOR = {M. Beetz and B. Kirchlechner and F. Fischer},
TITLE = {Interpretation and Processing of Position Data for the Empirical Study of the Behavior of Simulation League Robocup Teams},
BOOKTITLE = {KI 2004 Workshop},
year = {2004},
bib2html_pubtype = {Refereed Workshop Paper},
bib2html_rescat = {Game Analysis},
bib2html_groups = {IAS,FIPM,Aspogamo},
bib2html_domain = {Soccer Analysis},
bib2html_funding = {FIPM}
}
@InProceedings{ SchroeterIcra04RectObjects,
author = {D. Schr{\"o}ter and M. Beetz},
title = {{Acquiring Modells of Rectangular Objects for Robot Maps}},
booktitle = {Proc. of IEEE International Conference on Robotics and Automation (ICRA), New Orleans/USA},
year = {2004},
abstract = {{State-of-the-art robot mapping approaches are capable of acquiring
impressively accurate 2D and 3D models of their environments. To the best of
our knowledge few of them can acquire models of task-relevant objects.
In this paper, we introduce a novel method for acquiring models of
task-relevant objects from stereo images. The proposed algorithm
applies methods from projective geometry and works for rectangular
objects, which are, in office- and museum-like environments,
the most commonly found subclass of geometric objects.
The method is shown to work accurately and for a wide range of
viewing angles and distances.}},
bib2html_pubtype = {Refereed Conference Paper},
bib2html_rescat = {Robot Mapping},
bib2html_groups = {IAS,EvI},
bib2html_funding = {EvI},
bib2html_keywords = {Environment Mapping},
}
@InProceedings{ SchroeterDagm04DetectClassGW,
author = {D. Schr{\"o}ter and T. Weber and M. Beetz and B. Radig},
title = {{Detection and Classification of Gateways for the Acquisition of Structured Robot Maps}},
booktitle = {Proc. of 26th Pattern Recognition Symposium (DAGM), T\"ubingen/Germany},
year = {2004},
abstract = {{The automatic acquisition of structured object maps
requires sophisticated perceptual mechanisms that enable the robot
to recognize the objects that are to be stored in the robot map.
This paper investigates a particular object recognition problem:
the automatic detection and classification of gateways in office
environments based on laser range data. We will propose, discuss,
and empirically evaluate a sensor model for crossing gateways and
different approaches to gateway classification including simple
maximum classifiers and HMM-based classification
of observation sequences.}},
bib2html_pubtype = {Refereed Conference Paper},
bib2html_rescat = {Robot Mapping},
bib2html_groups = {IAS,EvI},
bib2html_funding = {EvI},
bib2html_keywords = {Environment Mapping},
}
@inproceedings{kirsch05combining,
author = {Alexandra Kirsch and Michael Beetz},
title = {Combining Learning and Programming for High-Performance Robot Controllers},
booktitle = {Tagungsband Autonome Mobile Systeme 2005},
series = {Reihe Informatik aktuell},
publisher = {Springer Verlag},
year = {2005},
bib2html_pubtype = {Conference Paper},
bib2html_rescat = {Learning,Planning},
bib2html_groups = {Cogito,AGILO},
abstract = {The implementation of high-performance robot controllers for complex
control tasks such as playing autonomous robot soccer is tedious,
error-prone, and a never ending programming task. In this paper we
propose programmers to write autonomous controllers that optimize and
automatically adapt themselves to changing circumstances of task execution
using explicit perception, dynamics and action models.
To this end we develop ROLL (Robot Learning Language), a control language
allowing for model-based robot programming. ROLL provides language constructs
for specifying executable code pieces of how to learn and update these
models. We are currently using ROLL's mechanisms for implementing a rational
reconstruction of our soccer robot controllers.}
}
@inproceedings{kirsch05making,
author = {Alexandra Kirsch and Michael Schweitzer and Michael Beetz},
title = {Making Robot Learning Controllable: A Case Study in Robot Navigation},
booktitle = {Proceedings of the ICAPS Workshop on Plan Execution: A Reality Check},
year = {2005},
bib2html_pubtype = {Workshop Paper},
bib2html_rescat = {Learning,Planning,Action},
bib2html_groups = {Cogito, AGILO},
abstract = {In many applications the performance of learned robot controllers
drags behind those of the respective hand-coded ones. In our view,
this situation is caused not mainly by deficiencies of the learning
algorithms but rather by an insufficient embedding of learning in
robot control programs.
This paper presents a case study in which RoLL, a robot control
language that allows for explicit representations of learning
problems, is applied to learning robot navigation tasks. The case
study shows that RoLL's constructs for specifying learning
problems (1) make aspects of autonomous robot learning explicit
and controllable; (2) have an enormous impact on the
performance of the learned controllers and therefore encourage the
engineering of high performance learners; (3) make the learning
processes repeatable and allow for writing bootstrapping robot
controllers. Taken together the approach constitutes an important
step towards engineering controllers of autonomous learning
robots.}
}
@Misc{stulp05optimizeda,
author = {Freek Stulp and Michael Beetz},
title = {Optimized Execution of Action Chains through Subgoal Refinement},
year = {2005},
note = {ICAPS Workshop ``Plan Execution: A Reality Check''},
url = {http://ic.arc.nasa.gov/people/sailesh/icaps2005wksp/},
bib2html_pubtype = {Refereed Workshop Paper},
bib2html_rescat = {Models, Learning, Planning, Action},
bib2html_groups = {IAS, AGILO},
bib2html_keywords = {},
abstract = {In this paper we propose a novel computation model for
the execution of abstract action chains. In this computation model a robot
first learns situation-specific performance models of abstract actions. It
then uses these models to automatically specialize the abstract actions for
their execution in a given action chain. This specialization results in
refined chains that are optimized for performance. As a side effect this
behavior optimization also appears to produce action chains with seamless
transitions between actions.}
}
@Misc{stulp05tailoring,
author = {Freek Stulp and Michael Beetz},
title = {Tailoring Action Parameterizations to Their Task Contexts},
note = {IJCAI Workshop ``Agents in Real-Time and Dynamic Environments''},
year = {2005},
url = {http://www.tzi.de/\~visser/ijcai05/},
bib2html_pubtype = {Refereed Workshop Paper},
bib2html_rescat = {Models, Learning, Planning, Action},
bib2html_groups = {IAS, AGILO},
bib2html_keywords = {},
abstract = {Solving complex tasks successfully and efficiently not
only depends on {\em what} you do, but also {\em how} you do it. Different
task contexts have different performance measures, and thus require different
ways of executing an action to optimize performance. Simply adding new actions
that are tailored to perform well within a specific task context makes
planning or action selection programming more difficult, as generality and
adaptivity is lost. Rather, existing actions should be parametrized such that
they optimize the task-specific performance measure. In this paper we propose
a novel computation model for the execution of abstract action chains. In this
computation model, a robot first learns situation-specific performance models
of abstract actions. It then uses these models to automatically specialize the
abstract actions for their execution in a given action chain. This
specialization results in refined chains that are optimized for performance.
As a side effect this behavior optimization also appears to produce action
chains with seamless transitions between actions.}
}
@InProceedings{stulp05optimized,
author = {Freek Stulp and Michael Beetz},
title = {Optimized Execution of Action Chains Using Learned Performance Models of Abstract Actions},
booktitle = {Proceedings of the Nineteenth International Joint Conference on Artificial Intelligence (IJCAI)},
year = {2005},
bib2html_pubtype = {Refereed Conference Paper},
bib2html_rescat = {Models, Learning, Planning, Action},
bib2html_groups = {AGILO},
bib2html_funding = {AGILO},
bib2html_keywords = {},
abstract = {Many plan-based autonomous robot controllers generate chains of
abstract actions in order to achieve complex, dynamically changing,
and possibly interacting goals. The execution of these action chains
often results in robot behavior that shows abrupt transitions between
subsequent actions, causing suboptimal performance. The resulting motion
patterns are so characteristic for robots that people imitating robotic behavior will do so by making abrupt movements between
actions. In this paper we propose a novel computation model for the execution
of abstract action chains. In this computation model a robot first
learns situation-specific performance models of abstract actions. It
then uses these models to automatically specialize the abstract
actions for their execution in a given action chain. This
specialization results in refined chains that are optimized for
performance. As a side effect this behavior optimization also appears
to produce action chains with seamless transitions between actions.}
}
@Article{Bee05FIPM,
author = {Michael Beetz and Bernhard Kirchlechner and Martin Lames},
title = {Computerized Real-Time Analysis of Football Games},
journal = {IEEE Pervasive Computing},
year = 2005,
volume = {4},
number = {3},
pages = {33-39},
bib2html_pubtype = {Journal},
bib2html_rescat = {Game Analysis},
bib2html_groups = {IAS,FIPM,Aspogamo},
bib2html_funding = {FIPM},
bib2html_domain = {Soccer Analysis},
bib2html_keywords = {Game Analysis},
abstract = {The research reported in this article is part of an ambitious,
mid-term project that studies the automated analysis of football games.
The input for game analysis is position data provided by tiny microwave senders
that are placed into the ball and the shin guards of football players. The
main objectives of the project are (1) the investigation of novel computational
mechanisms that enable computer systems to recognize intentional activities
based on position data, (2) the development of an integrated software system to
automate game interpretation and analysis, and (3) the demonstration of the
impact of automatic game analysis on sport science, football coaching, and
sports entertainment. The results are to be showcased in the form of an
intelligent information system for the matches at the Football World
Championship 2006 in Germany.}
}
@incollection{Bee05Tow,
author = {Michael Beetz},
title = {Towards Comprehensive Computational Models for Plan-Based Control of Autonomous Robots},
booktitle = {Mechanizing Mathematical Reasoning:
Essays in Honor of J{\"o}rg H. Siekmann on the Occasion of His 60th Birthday},
pages = {514--527},
editor = {Dieter Hutter, Werner Stephan},
year = {2005},
publisher = {Springer LNCS 2605},
bib2html_pubtype = {Book Chapter},
bib2html_rescat = {Plan-based Robot Control},
bib2html_groups = {IAS},
bib2html_funding = {},
bib2html_keywords = {}
}
@Article{beetz05probabilistic,
author = {Michael Beetz and Henrik Grosskreutz},
title = {Probabilistic Hybrid Action Models for Predicting
Concurrent Percept-driven Robot Behavior},
journal = {Journal of Artificial Intelligence Research},
year = {2005},
volume = {24},
pages = {799--849},
bib2html_pubtype = {Journal},
bib2html_rescat = {},
bib2html_groups = {},
bib2html_funding = {},
bib2html_keywords = {},
abstract = {This article develops Probabilistic Hybrid Action
Models (PHAMs), a realistic causal model for
predicting the behavior generated by modern
percept-driven robot plans. PHAMs represent aspects
of robot behavior that cannot be represented by most
action models used in AI planning: the temporal
structure of continuous control processes, their
non-deterministic effects, several modes of their
interferences, and the achievement of triggering
conditions in closed-loop robot plans. The main
contributions of this article are: (1) PHAMs, a
model of concurrent percept-driven behavior, its
formalization, and proofs that the model generates
probably, qualitatively accurate predictions; and
(2) a resource-efficient inference method for PHAMs
based on sampling projections from probabilistic
action models and state descriptions. We show how
PHAMs can be applied to planning the course of
action of an autonomous robot office courier based
on analytical and experimental results.}
}
@InProceedings{Rusu06UbiCompPoster,
author = {Radu Bogdan Rusu and Alexis Maldonado and Michael Beetz and
Matthias Kranz and Lorenz M{\"o}senlechner and Paul Holleis and
Albrecht Schmidt},
title = {Player/Stage as Middleware for Ubiquitous Computing},
booktitle = {Proceedings of the 8th Annual Conference on Ubiquitous Computing
(Ubicomp 2006), Orange County California, September 17-21},
year = {2006},
abstract = {
The effective development and deployment of
comprehensive and heterogeneous ubiquitous computing
applications is hindered by the lack of a
comprehensive middleware infrastructure: interfaces
to sensors are company specific and sometimes even
product specific. Typically, these interfaces also
do not sustain the development of robust systems that
make use of sensor data fusion. In this paper, we
propose the use of Player/Stage, a middleware
commonly used as a defacto standard by the robotics
community, as the backbone of a heterogeneous
ubiquitous system. Player/Stage offers many features
needed in ubicomp, mostly because dealing with
uncertainty and many different sensor and actuator
systems has been a long term problem in robotics as
well. We emphasize they key features of the
Player/Stage project, and show how ubicomp devices
can be integrated into the system, as well as how
existing devices can be used. On top of that, we
present our sensor-enabled AwareKitchen environment
which makes use of automatic data analysis algorithms
integrated as drivers in the Player/Stage platform.
All our work is released as open source software
under the Player/Stage package, of which we are
active developers.
},
bib2html_pubtype = {Others},
bib2html_rescat = {Activity Recognition, Robotics},
bib2html_groups = {IAS},
bib2html_keywords = {Ubiquitous computing middleware, Player/Stage, sensor fusion},
}
@InProceedings{Rusu06UbiSys,
author = {Matthias Kranz and Radu Bogdan Rusu and Alexis Maldonado and
Michael Beetz and Albrecht Schmidt},
title = {A Player/Stage System for Context-Aware Intelligent Environments},
booktitle = {Proceedings of UbiSys'06, System Support for Ubiquitous Computing
Workshop, at the 8th Annual Conference on Ubiquitous Computing
(Ubicomp 2006), Orange County California, September 17-21, 2006},
year = {2006},
bib2html_pubtype = {Refereed Workshop Paper},
bib2html_rescat = {Activity Recognition, Robotics},
bib2html_groups = {IAS},
bib2html_keywords = {Ubiquitous computing, Player/Stage, sensor fusion, AwareKitchen},
abstract = {
We propose Player/Stage, a well-known platform widely
used in robotics, as middleware for ubiquitous
computing. Player/Stage provides uniform interfaces
to sensors and actuators and allows the computational
matching of input and output. Player/Stage exactly
addresses the issues of dealing with heterogeneous
hardware but currently only with a focus towards
robotics. We show how to integrate ubiquitous
computing platforms into Player/Stage and propose
Player/Stage as middleware for ubiquitous computing
projects.},
}
@InProceedings{stulp06feature,
author = {Freek Stulp and Mark Pfl\"uger and Michael Beetz},
title = {Feature Space Generation using Equation Discovery},
booktitle = {Proceedings of the 29th German Conference on Artificial Intelligence (KI)},
year = {2006},
bib2html_pubtype = {Refereed Conference Paper},
bib2html_rescat = {Models, Learning},
bib2html_groups = {AGILO},
bib2html_funding = {AGILO},
bib2html_keywords = {},
abstract = {}
}
@InProceedings{Bee06camera,
AUTHOR = {Michael Beetz and Jan Bandouch and Suat Gedikli and
Nico von Hoyningen-Huene and Bernhard Kirchlechner
and Alexis Maldonado},
TITLE = {Camera-based Observation of Football Games for
Analyzing Multi-agent Activities},
booktitle = {Proceedings of the Fifth International Joint
Conference on Autonomous Agents and Multiagent
Systems (AAMAS)},
year = {2006},
bib2html_pubtype ={Refereed Conference Paper},
bib2html_rescat ={Game analysis},
bib2html_groups ={IAS, FIPM, Aspogamo},
bib2html_funding ={FIPM},
bib2html_domain = {Soccer Analysis},
bib2html_keywords ={},
abstract = {This paper describes a camera-based observation
system for football games that is used for the
automatic analysis of football games and reasoning
about multi-agent activity. The observation system
runs on video streams produced by cameras set up for
TV broadcasting. The observation system achieves
reliability and accuracy through various mechanisms
for adaptation, probabilistic estimation, and
exploiting domain constraints. It represents motions
compactly and segments them into classified ball
actions.}
}
@InProceedings{stulp06implicit,
author = {Freek Stulp and Michael Isik and Michael Beetz},
title = {Implicit Coordination in Robotic Teams using Learned Prediction Models},
booktitle = {Proceedings of the IEEE International Conference on Robotics and Automation (ICRA)},
year = {2006},
pages = {1330-1335},
bib2html_pubtype = {Refereed Conference Paper},
bib2html_rescat = {Models, Learning, Planning, Action},
bib2html_groups = {AGILO},
bib2html_funding = {AGILO},
bib2html_keywords = {},
abstract = {Many application tasks require the cooperation of two or more robots. Humans are good at cooperation in shared workspaces, because they anticipate and adapt to the intentions and actions of others. In contrast, multi-agent and multi-robot systems rely on communication to exchange their intentions. This causes problems in domains where perfect communication is not guaranteed, such as rescue robotics, autonomous vehicles participating in traffic, or robotic soccer.
In this paper, we introduce a computational model for implicit coordination, and apply it to a typical coordination task from robotic soccer: regaining ball possession. The computational model specifies that performance prediction models are necessary for coordination, so we learn them off-line from observed experience. By taking the perspective of the team mates, these models are then used to predict utilities of others, and optimize a shared performance model for joint actions. In several experiments conducted with our robotic soccer team, we evaluate the performance of implicit coordination.}
}
@InProceedings{stulp06actionawareness,
author = {Freek Stulp and Michael Beetz},
title = {Action Awareness -- Enabling Agents to Optimize, Transform, and Coordinate Plans},
booktitle = {Proceedings of the Fifth International Joint Conference on Autonomous Agents and Multiagent Systems (AAMAS)},
year = {2006},
bib2html_pubtype = {Refereed Conference Paper},
bib2html_rescat = {Models, Learning, Planning, Action},
bib2html_groups = {AGILO},
bib2html_funding = {AGILO},
bib2html_keywords = {},
abstract = {As agent systems are solving more and more complex tasks in increasingly challenging domains, the systems themselves are becoming more complex too, often compromising their adaptivity and robustness. A promising approach to solve this problem is to provide agents with reflective capabilities. Agents that can reflect on the effects and expected performance of their actions, are more aware and knowledgeable of their capabilities and shortcomings.
In this paper, we introduce a computational model for what we call \emph{action awareness}. To achieve this awareness, agents learn predictive action models from observed experience. This knowledge is then used to optimize, transform and coordinate plans. We apply this computational model to a number of typical scenarios from robotic soccer. Various experiments on real robots demonstrate that action awareness enables the robots to improve the performance of their plans substantially.}
}
@InProceedings{mueller06designing,
author = {Armin M{\"u}ller and Michael Beetz},
title = {Designing and Implementing a Plan Library for a Simulated Household Robot},
booktitle = {Cognitive Robotics: Papers from the AAAI Workshop},
editor = {Michael Beetz and Kanna Rajan and Michael Thielscher and Radu Bogdan Rusu},
pages = {119--128},
year = {2006},
series = {Technical Report WS-06-03},
isbn = {978-1-57735-285-3},
publisher = {American Association for Artificial Intelligence},
address = {Menlo Park, California},
bib2html_pubtype = {Refereed Workshop Paper},
bib2html_rescat = {Plan-based Robot Control},
bib2html_groups = {IAS},
abstract = {As we are deploying planning mechanisms in real-world
applications, such as the control of autonomous robots, it becomes
apparent that the performance of plan-based controllers critically
depends on the design and implementation of plan libraries.
Despite its importance the investigation of designs of plan
libraries and plans has been largely ignored.
In this paper we describe parts of a plan library that we are
currently developing and applying to the control of a simulated
household robot. The salient features of our plans are that they
are designed for reliable, flexible, and optimized execution, and are
grounded into sensor data and action routines. We provide
empirical evidence that design criteria that we are proposing have
considerable impact on the performance level of robots.}
}
@InProceedings{geipel06learning,
author = {Markus Geipel and Michael Beetz},
title = {Learning to shoot goals, Analysing the Learning
Process and the Resulting Policies},
editor = {Gerhard Lakemeyer and Elizabeth Sklar and Domenico
Sorenti and Tomoichi Takahashi},
note = {to be published},
year = {2006},
booktitle = {RoboCup-2006: Robot Soccer World Cup X},
organization = {RoboCup},
publisher = {Springer Verlag, Berlin},
bib2html_pubtype ={Refereed Conference Paper},
bib2html_rescat = {Robocup},
bib2html_groups = {IAS},
abstract = {Reinforcement learning is a very general
unsupervised learning mechanism. Due to its
generality reinforcement learning does not scale
very well for tasks that involve inferring
subtasks. In particular when the subtasks are
dynamically changing and the environment is
adversarial. One of the most challenging
reinforcement learning tasks so far has been the 3
to 2 keepaway task in the RoboCup simulation
league. In this paper we apply reinforcement
learning to a even more challenging task: attacking
the opponents goal. The main contribution of this
paper is the empirical analysis of a portfolio of
mechanisms for scaling reinforcement learning
towards learning attack policies in simulated robot
soccer.}
}
@InProceedings{hoyningen07gram,
author = {Nicolai v. Hoyningen-Huene and Bernhard Kirchlechner and Michael Beetz},
title = {{GrAM}: Reasoning with Grounded Action Models by Combining Knowledge Representation and Data Mining},
booktitle = {Towards Affordance-based Robot Control},
year = {2007},
bib2html_pubtype = {Refereed Conference Paper},
bib2html_rescat = {Game analysis},
bib2html_groups = {IAS,FIPM,Aspogamo},
bib2html_funding = {FIPM},
bib2html_domain = {Soccer Analysis},
bib2html_keywords = {},
abstract = {
This paper proposes GrAM (Grounded Action Models), a novel
integration of actions and action models into the knowledge
representation and inference mechanisms of agents. In GrAM action
models accord to agent behavior and can be specified explicitly and implicitly. The
explicit representation is an action class specific set of Markov
logic rules that predict action properties. Stated implicitly an
action model defines a data mining problem that, when executed,
computes the model's explicit representation. When inferred from
an implicit representation the prediction rules predict typical
behavior and are learned from a set of training examples, or, in
other words, grounded in the respective experience of the agents.
Therefore, GrAM allows for the functional and thus adaptive specification of concepts
such as the class of situations in which a special action is typically
executed successfully or the concept of agents that tend to execute certain
kinds of actions.
GrAM represents actions and their models using an upgrading of the representation
language OWL and equips the Java Theorem Prover (JTP), a hybrid reasoner for OWL, with
additional mechanisms that allow for the automatic acquisition of
action models and solving a variety of inference tasks for actions, action models and functional descriptions.
}
}
@InProceedings{stulp07seamless,
author = {Freek Stulp and Wolfram Koska and Alexis Maldonado and Michael Beetz},
title = {Seamless Execution of Action Sequences},
booktitle = {Proceedings of the IEEE International Conference on Robotics and Automation (ICRA)},
year = {2007},
pages = {3687-3692},
bib2html_pubtype = {Refereed Conference Paper},
bib2html_rescat = {Models, Learning, Planning, Action},
bib2html_groups = {AGILO},
bib2html_funding = {AGILO},
bib2html_keywords = {},
abstract = {
One of the most notable and recognizable features of robot motion is the abrupt
transitions between actions in action sequences. In contrast, humans and animals
perform sequences of actions efficiently, and with seamless transitions between
subsequent actions. This smoothness is not a goal in itself, but a side-effect
of the evolutionary optimization of other performance measures.
In this paper, we argue that such jagged motion is an inevitable consequence of
the way human designers and planners reason about abstract actions. We then
present subgoal refinement, a procedure that optimizes action sequences. Subgoal
refinement determines action parameters that are not relevant to why the action
was selected, and optimizes these parameters with respect to expected execution
performance. This performance is computed using action models, which are learned
from observed experience. We integrate subgoal refinement in an existing
planning system, and demonstrate how requiring optimal performance causes smooth
motion in three robotic domains.
}
}
@InProceedings{beetz07visually,
author = {Michael Beetz and Suat Gedikli and Jan Bandouch and Bernhard Kirchlechner and Nico von Hoyningen-Huene and Alexander Perzylo},
title = {Visually Tracking Football Games Based on TV Broadcasts},
year = {2007},
booktitle = {Proceedings of the Twentieth International Joint Conference on Artificial Intelligence (IJCAI)},
bib2html_pubtype ={Refereed Conference Paper},
bib2html_rescat ={Game analysis},
bib2html_groups ={IAS, FIPM, Aspogamo},
abstract = {This paper describes ASPOGAMO, a visual tracking system that
determines the coordinates and trajectories of football players in
camera view based on TV broadcasts. To do so, ASPOGAMO solves a
complex probabilistic estimation problem that consists of three
subproblems that interact in subtle ways: the estimation of the
camera direction and zoom factor, the tracking and smoothing of
player routes, and the disambiguation of tracked players after
occlusions. The paper concentrates on system aspects that make it
suitable for operating under unconstrained conditions and in
(almost) realtime. We report on results obtained in a public
demonstration at RoboCup 2006 where we conducted extensive experiments
with real data from live coverage of World Cup 2006 games in Germany.}
}
@InProceedings{gedikli07adaptive,
author = {Suat Gedikli and Jan Bandouch and Nico von Hoyningen-Huene and Bernhard Kirchlechner and Michael Beetz},
title = {An Adaptive Vision System for Tracking Soccer Players from Variable Camera Settings},
booktitle = {Proceedings of the 5th International Conference on Computer Vision Systems (ICVS)},
year = {2007},
bib2html_pubtype ={Refereed Conference Paper},
bib2html_rescat ={Game analysis},
bib2html_groups ={IAS, FIPM, Aspogamo},
abstract = {In this paper we present ASpoGAMo, a vision system capable of
estimating motion trajectories of soccer players taped on video.
The system performs well in a multitude of application scenarios
because of its adaptivity to various camera setups, such as single
or multiple camera settings, static or dynamic ones. Furthermore,
ASpoGAMo can directly process image streams taken from TV broadcast,
and extract all valuable information despite scene interruptions
and cuts between different cameras. The system achieves a high level
of robustness through the use of modelbased vision algorithms for
camera estimation and player recognition and a probabilistic
multi-player tracking framework capable of dealing with occlusion
situations typical in team-sports. The continuous interplay between
these submodules is adding to both the reliability and the
efficiency of the overall system.}
}
@Article{buss07cotesysb,
author = {Martin Buss and Michael Beetz and Dirk Wollherr},
title = {{CoTeSys} --- Cognition for Technical Systems},
journal = {International Journal of Assistive Robotics and
Mechatronics},
year = 2007,
volume = 8,
number = 4,
pages = {25-36},
bib2html_pubtype = {Journal},
bib2html_rescat = {unspecified},
bib2html_groups ={IAS},
abstract = { The CoTeSys cluster of excellence investigates
cognition for technical systems such as vehicles,
robots, and factories. Cognitive technical systems
(CTS) are information processing systems equipped
with artificial sensors and actuators, integrated
and embedded into physical systems, and acting in a
physical world. They differ from other technical
systems as they perform cognitive control and have
cognitive capabilities. Cognitive control
orchestrates reflexive and habitual behavior in
accord with longterm intentions. Cognitive
capabilities such as perception, reasoning,
learning, and planning turn technical systems into
systems that ``know what they are doing''. The
cognitive capabilities will result in systems of
higher reliability, flexibility, adaptivity, and
better performance. They will be easier to interact
and cooperate with.}
}
@InProceedings{Matthias07TEI,
author = {Matthias Kranz and Alexis Maldonado and Benedikt Hoernler and Radu Bogdan Rusu and
Michael Beetz and Gerhard Rigoll and Albrecht Schmidt},
title = {{A Knife and a Cutting Board as Implicit User Interface - Towards Context-Aware Kitchen Utilities}},
booktitle = {Proceedings of First International Conference on Tangible and Embedded Interaction 2007,
TEI 2007, February 15-17 Baton Rouge, Louisiana, USA},
year = {2007},
bib2html_pubtype = {Refereed Conference Paper},
bib2html_rescat = {Activity Recognition},
bib2html_groups = {IAS},
}
@InProceedings{Matthias07INSS,
author = {Matthias Kranz and Alexis Maldonado and Radu Bogdan Rusu and Benedikt Hoernler
and Gerhard Rigoll and Michael Beetz and Albrecht Schmidt},
title = {Sensing Technologies and the Player-Middleware for Context-Awareness in
Kitchen Environments},
booktitle = {Proceedings of Fourth International Conference on Networked Sensing Systems,
June 6 - 8, 2007, Braunschweig, Germany},
year = {2007},
bib2html_pubtype = {Refereed Conference Paper},
bib2html_rescat = {Activity Recognition},
bib2html_groups = {IAS},
}
@InProceedings{Rusu07ICRA_NRS,
author = {Radu Bogdan Rusu and Alexis Maldonado and Michael Beetz and Brian Gerkey},
title = {{Extending Player/Stage/Gazebo towards Cognitive Robots Acting in Ubiquitous
Sensor-equipped Environments}},
booktitle = {Proceedings of the IEEE International Conference on Robotics and Automation
(ICRA) Workshop for Network Robot Systems, 2007, April 14, Rome, Italy},
year = {2007},
abstract = {Standardized middleware for autonomous robot
control has proven itself to enable faster
deployment of robots, to make robot control code
more interchangeable, and experiments easier to
replicate. Unfortunately, the support provided
by current middleware is in most cases limited
to what current robots do: navigation. However,
as we tackle more ambitious service robot
applications, more comprehensive middleware
support is needed. We increasingly need the
middleware to support ubiquitous sensing
infrastructures, robot manipulation tasks, and
cognitive capabilities. In this paper we
describe and discuss current extensions of the
Player/Stage/Gazebo (P/S/G) middleware, one of
the most widespread used robot middlewares, of
which we are active developers, that satisfy
these requirements.},
bib2html_pubtype = {Refereed Workshop Paper},
bib2html_rescat = {Robotics},
bib2html_groups = {IAS},
}
@InProceedings{buss07cotesys,
author = {Martin Buss and Michael Beetz and Dirk Wollherr},
title = {{CoTeSys} --- Cognition for Technical Systems},
booktitle = {Proceedings of the 4th COE Workshop on Human
Adaptive Mechatronics (HAM)},
year = {2007},
bib2html_pubtype ={Refereed Workshop Paper},
bib2html_rescat ={unspecified},
bib2html_groups ={IAS},
abstract = { The CoTeSys cluster of excellence investigates
cognition for technical systems such as vehicles,
robots, and factories. Cognitive technical systems
(CTS) are information processing systems equipped
with artificial sensors and actuators, integrated
and embedded into physical systems, and acting in a
physical world. They differ from other technical
systems as they perform cognitive control and have
cognitive capabilities. Cognitive control
orchestrates reflexive and habitual behavior in
accord with longterm intentions. Cognitive
capabilities such as perception, reasoning,
learning, and planning turn technical systems into
systems that ``know what they are doing''. The
cognitive capabilities will result in systems of
higher reliability, flexibility, adaptivity, and
better performance. They will be easier to interact
and cooperate with.}
}
@InProceedings{beetz07assistive,
author = {Michael Beetz and Jan Bandouch and Alexandra Kirsch and Alexis Maldonado and Armin M{\"u}ller and Radu Bogdan Rusu},
title = {The Assistive Kitchen --- A Demonstration Scenario for Cognitive Technical Systems},
booktitle = {Proceedings of the 4th COE Workshop on Human
Adaptive Mechatronics (HAM)},
year = {2007},
bib2html_pubtype ={Workshop Paper},
bib2html_rescat ={Planning,Learning,Action,Perception,Models,Reasoning},
bib2html_groups ={Cogito,Memoman,Cogman},
abstract = { This paper introduces the Assistive
Kitchen as a comprehensive demonstration and challenge scenario
for technical cognitive systems. We describe its hardware and
software infrastructure. Within the Assistive Kitchen
application, we select particular domain activities as research
subjects and identify the cognitive capabilities needed for
perceiving, interpreting, analyzing, and executing these activities
as research foci. We conclude by outlining open research issues
that need to be solved to realize the scenarios successfully.}
}
@InProceedings{kirsch07training,
author = {Alexandra Kirsch and Michael Beetz},
title = {Training on the Job --- Collecting Experience with Hierarchical Hybrid Automata},
booktitle = {Proceedings of the 30th German Conference on Artificial Intelligence (KI-2007)},
editor = {J. Hertzberg and M. Beetz and R. Englert},
year = {2007},
pages = {473--476},
bib2html_pubtype = {Conference Paper},
bib2html_rescat = {Learning,Models,Representation},
bib2html_groups = {Cogito},
abstract = { We propose a novel approach to experience collection for autonomous
service robots performing complex activities. This approach enables
robots to collect data for many learning problems at a time,
abstract it and transform it into information specific to the
learning tasks and thereby speeding up the learning process. The
approach is based on the concept of hierarchical hybrid automata,
which are used as transparent and expressive representational
mechanisms that allow for the specification of these experience
related capabilities independent of the program itself. The
suitability of the approach is demonstrated through experiments in
which a robot doing household chore performs experience-based
learning. }}
@InProceedings{mueller07transformational,
author = {Armin M{\"u}ller and Alexandra Kirsch and Michael Beetz},
title = {Transformational Planning for Everyday Activity},
booktitle = {Proceedings of the 17th International Conference on Automated Planning and Scheduling (ICAPS'07)},
year = {2007},
month = {September},
address = {Providence, USA},
pages = {248--255},
bib2html_pubtype = {Conference Paper},
bib2html_rescat = {Planning},
bib2html_groups = {Cogito},
abstract = { We propose an approach to transformational planning and learning of
everyday activity. This approach is targeted at autonomous robots
that are to perform complex activities such as household chore. Our
approach operates on flexible and reliable plans suited for
long-term activity and applies plan transformations that generate
competent and high-performance robot behavior. We show as a proof
of concept that general transformation rules can be formulated that
achieve substantially and significantly improved performance using
table setting as an example. }}
@InProceedings{mueller07towards,
author = {Armin M{\"u}ller and Michael Beetz},
title = {Towards a Plan Library for Household Robots},
booktitle = {Proceedings of the ICAPS'07 Workshop on Planning and Plan Execution for Real-World Systems: Principles and Practices for Planning in Execution},
year = {2007},
month = {September},
address = {Providence, USA},
bib2html_pubtype = {Workshop Paper},
bib2html_rescat = {Planning},
bib2html_groups = {Cogito},
abstract = { This paper describes the structure for a plan library of a
service robot intended to perform household chores. The plans in the
library are particularly designed to enable reliable, flexible, and
efficient robot control, to learn control heuristics, to generalize
the plans to cope with new objects and situations. We believe that
plans with these characteristics are required for competent
autonomous robots performing skilled manipulation tasks in human
environments. }}
@InProceedings{beetz07cotesys,
author = {Michael Beetz and Martin Buss and Dirk Wollherr},
title = {Cognitive Technical Systems --- What Is the Role of Artificial Intelligence?},
booktitle = {Proceedings of the 30th German Conference on Artificial Intelligence (KI-2007)},
editor = {J. Hertzberg and M. Beetz and R. Englert},
year = {2007},
pages = {19--42},
note = {Invited paper},
bib2html_pubtype = {Refereed Conference Paper},
bib2html_rescat = {unspecified},
bib2html_groups = {IAS},
abstract = { The newly established cluster of excellence COTESYS investigates
the realization of cognitive capabilities such as perception, learning, reasoning,
planning, and execution for technical systems including humanoid robots, flexible
manufacturing systems, and autonomous vehicles. In this paper we describe
cognitive technical systems using a sensor-equipped kitchen with a robotic assistant
as an example.We will particularly consider the role of Artificial Intelligence
in the research enterprise.
Key research foci of Artificial Intelligence research in COTESYS include (*) symbolic
representations grounded in perception and action, (*) first-order probabilistic
representations of actions, objects, and situations, (*) reasoning about objects
and situations in the context of everyday manipulation tasks, and (*) the representation
and revision of robot plans for everyday activity.}}
@InProceedings{rusu07towards,
author = {Radu Bogdan Rusu and Nico Blodow and Zoltan-Csaba Marton and Alina Soos and Michael Beetz},
title = {Towards 3D Object Maps for Autonomous Household Robots},
booktitle = {Proceedings of the 20th IEEE International Conference on Intelligent Robots and Systems (IROS)},
year = {2007},
address = {San Diego, CA, USA},
abstract = {This paper describes a mapping system that
acquires 3D object models of man-made indoor environments such as
kitchens. The system segments and geometrically reconstructs cabinets
with doors, tables, drawers, and shelves, objects that are important
for robots retrieving and manipulating objects in these environments.
The system also acquires models of objects of daily use such glasses,
plates, and ingredients. The models enable the recognition of the
objects in cluttered scenes and the classification of newly
encountered objects.
Key technical contributions include (1)~a robust, accurate, and
efficient algorithm for constructing complete object models from 3D
point clouds constituting partial object views, (2)~feature-based
recognition procedures for cabinets, tables, and other task-relevant
furniture objects, and (3)~automatic inference of object instance and
class signatures for objects of daily use that enable robots to reliably
recognize the objects in cluttered and real task contexts. We present
results from the sensor-based mapping of a real kitchen.},
bib2html_pubtype = {Conference Paper},
bib2html_rescat = {Perception, Models},
bib2html_groups = {Cop, EnvMod},
bib2html_funding = {CoTeSys},
bib2html_domain = {Assistive Household}
}
@inProceedings{jain07extending,
author = {Dominik Jain and Bernhard Kirchlechner and Michael Beetz},
title = {{Extending Markov Logic to Model Probability Distributions in Relational Domains}},
booktitle = {KI 2007: Advances in Artificial Intelligence, 30th Annual German Conference on AI},
publisher = {Springer},
series = {Lecture Notes in Computer Science},
volume = {4667},
year = {2007},
pages = {129--143},
isbn = {978-3-540-74564-8},
ee = {http://dx.doi.org/10.1007/978-3-540-74565-5_12},
bib2html_pubtype = {Refereed Conference Paper},
bib2html_rescat = {unspecified},
bib2html_groups = {IAS, ProbCog}}
@InProceedings{Marton08KitchenApplications,
author = {Zoltan Csaba Marton and Nico Blodow and Mihai Dolha and Moritz Tenorth and Radu Bogdan Rusu and Michael Beetz},
title = {{Autonomous Mapping of Kitchen Environments and Applications}},
booktitle = {Proceedings of the 1st International Workshop on Cognition for Technical Systems, Munich, Germany, 6-8 October},
year = {2008},
bib2html_pubtype = {Conference Paper},
bib2html_rescat = {Perception},
bib2html_groups = {EnvMod},
bib2html_funding = {CoTeSys},
bib2html_domain = {Assistive Household},
}
@InProceedings{Rusu08IROS-Mapping-Stereo,
author = {Radu Bogdan Rusu and Aravind Sundaresan and Benoit Morisset and Motilal Agrawal and Michael Beetz and Kurt Konolige},
title = {{Realtime Extended 3D Reconstruction from Stereo for Navigation}},
booktitle = {Proceedings of the 21st IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS) Workshop on 3D Mapping, Nice, France, September 26},
year = {2008},
note = {Invited paper},
bib2html_pubtype = {Conference Paper},
bib2html_rescat = {Perception},
bib2html_groups = {EnvMod},
bib2html_funding = {CoTeSys},
bib2html_domain = {Assistive Household},
}
@InProceedings{Rusu08IROS-Mapping-Urban,
author = {Radu Bogdan Rusu and Zoltan Csaba Marton and Nico Blodow and Michael Beetz},
title = {{Interpretation of Urban Scenes based on Geometric Features}},
booktitle = {Proceedings of the 21st IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS) Workshop on 3D Mapping, Nice, France, September 26},
year = {2008},
note = {Invited paper},
bib2html_pubtype = {Conference Paper},
bib2html_rescat = {Perception, Models},
bib2html_groups = {EnvMod},
bib2html_funding = {CoTeSys},
bib2html_domain = {Assistive Household},
}
@Article{Rusu08RAS2,
author = {Radu Bogdan Rusu and Zoltan Csaba Marton and Nico Blodow and Mihai Dolha and Michael Beetz},
title = {{Towards 3D Point Cloud Based Object Maps for Household Environments}},
journal = {Robotics and Autonomous Systems Journal (Special Issue on Semantic Knowledge in Robotics)},
volume = {56},
number = {11},
pages = {927--941},
month = {30 November},
year = {2008},
bib2html_pubtype = {Journal},
bib2html_rescat = {Perception, Models},
bib2html_groups = {EnvMod},
bib2html_funding = {CoTeSys},
bib2html_domain = {Assistive Household},
}
@InProceedings{Rusu08ICIRA,
author = {Radu Bogdan Rusu and Aravind Sundaresan and Benoit Morisset and Motilal Agrawal and Michael Beetz},
title = {{Leaving Flatland: Realtime 3D Stereo Semantic Reconstruction}},
booktitle = {Proceedings of the International Conference on Intelligent Robotics and Applications (ICIRA) 2008, October 15-17, Wuhan, China},
year = {2008},
bib2html_pubtype = {Conference Paper},
bib2html_rescat = {Perception},
bib2html_groups = {EnvMod},
bib2html_funding = {CoTeSys},
bib2html_domain = {Assistive Household},
}
@InProceedings{Rusu08ICARCV,
author = {Radu Bogdan Rusu and Zoltan Csaba Marton and Nico Blodow and Michael Beetz},
title = {{Learning Informative Point Classes for the Acquisition of Object Model Maps}},
booktitle = {Proceedings of the 10th International Conference on Control, Automation, Robotics and Vision (ICARCV),
Hanoi, Vietnam, December 17-20},
year = {2008},
bib2html_pubtype = {Conference Paper},
bib2html_rescat = {Perception, Models},
bib2html_groups = {EnvMod},
bib2html_funding = {CoTeSys},
bib2html_domain = {Assistive Household},
}
@InProceedings{beetz08assistive,
author = {Michael Beetz and Freek Stulp and Bernd Radig and Jan Bandouch and Nico Blodow and Mihai Dolha and Andreas Fedrizzi and Dominik Jain and Uli Klank and Ingo Kresse and Alexis Maldonado and Zoltan Marton and Lorenz M{\"o}senlechner and Federico Ruiz and Radu Bogdan Rusu and Moritz Tenorth},
title = {{The Assistive Kitchen -- A Demonstration Scenario for Cognitive Technical Systems}},
booktitle = {IEEE 17th International Symposium on Robot and Human Interactive Communication (RO-MAN), Muenchen, Germany},
pages = {1-8},
year = {2008},
bib2html_pubtype = {Conference Paper},
bib2html_rescat = {Perception, Planning, Learning},
bib2html_groups = {Memoman, Cogito, EnvMod, Cogman, K4C, ProbCog},
bib2html_funding = {CoTeSys},
bib2html_domain = {Assistive Household},
note = {Invited paper.},
}
@InProceedings{schuboe08subsequent,
author = {Anna Schub\"o and Alexis Maldonado and Sonja Stork and Michael Beetz},
title = {Subsequent Actions Influence Motor Control Parameters of a Current Grasping Action},
booktitle = {IEEE 17th International Symposium on Robot and Human Interactive Communication (RO-MAN), Muenchen, Germany},
year = {2008},
bib2html_pubtype = {Conference Paper},
bib2html_rescat = {Action},
bib2html_groups = {Cogman},
bib2html_funding = {CoTeSys},
bib2html_domain = {Assistive Household},
}
@InProceedings{stulp08learning,
author = {Freek Stulp and Michael Beetz},
title = {Learning Predictive Knowledge to Optimize Robot Motor Control},
booktitle = {International Conference on Cognitive Systems (CogSys 2008)},
year = {2008},
bib2html_pubtype = {Conference Paper},
bib2html_rescat = {Planning, Learning},
bib2html_groups = {AGILO},
bib2html_funding = {AGILO},
bib2html_keywords = {},
}
@Article{stulp08refining,
author = {Freek Stulp and Michael Beetz},
title = {Refining the execution of abstract actions with learned action models},
journal = {Journal of Artificial Intelligence Research (JAIR)},
year = {2008},
volume = {32},
month = {June},
bib2html_pubtype = {Journal},
bib2html_rescat = {Planning, Learning},
bib2html_groups = {AGILO},
bib2html_funding = {AGILO},
bib2html_keywords = {},
}
@InProceedings{bandouch08bmvc,
author = {Jan Bandouch and Florian Engstler and Michael Beetz},
title = {{Evaluation of Hierarchical Sampling Strategies in 3D Human Pose Estimation}},
year = {2008},
booktitle = {Proceedings of the 19th British Machine Vision Conference (BMVC)},
bib2html_pubtype ={Conference Paper},
bib2html_rescat = {Perception},
bib2html_groups ={Memoman},
bib2html_funding = {CoTeSys},
bib2html_domain = {Assistive Household},
abstract = {A common approach to the problem of 3D human pose estimation from video
is to recursively estimate the most likely pose via particle filtering.
However, standard particle filtering methods fail the task due to the high
dimensionality of the 3D articulated human pose space.
In this paper we present a thorough evaluation of two variants of particle
filtering, namely Annealed Particle Filtering and Partitioned Sampling
Particle Filtering, that have been proposed to make the problem feasible by
exploiting the hierarchical structures inside the pose space. We evaluate
both methods in the context of markerless model-based 3D motion capture
using silhouette shapes from multiple cameras. For that we created a
simulation from ground truth sequences of human motions, which enables
us to focus our evaluation on the sampling capabilities of the approaches,
i.e. on how efficient particles are spread towards the modes of the
distribution. We show the behaviour with respect to the amount of cameras
used, the amount of particles used, as well as the dimensionality of the
search space. Especially the performance when using more complex human
models (40 DOF and above) that are able to capture human movements
with higher precision compared to previous approaches is of interest
in this work.
In summary, we show that both methods have complementary strengths, and
propose a combined method that is able to perform the tracking task with
higher robustness despite reduced computational effort.}
}
@InProceedings{bandouch08amdo,
author = {Jan Bandouch and Florian Engstler and Michael Beetz},
title = {Accurate Human Motion Capture Using an Ergonomics-Based Anthropometric Human Model},
year = {2008},
booktitle = {Proceedings of the Fifth International Conference on Articulated Motion and Deformable Objects (AMDO)},
bib2html_pubtype ={Conference Paper},
bib2html_rescat = {Perception},
bib2html_groups = {Memoman},
bib2html_funding = {CoTeSys},
bib2html_domain = {Assistive Household},
abstract = {In this paper we present our work on markerless model-based 3D
human motion capture using multiple cameras. We use an industry
proven anthropometric human model that was modeled taking ergonomic
considerations into account. The outer surface consists of a precise
yet compact 3D surface mesh that is mostly rigid on body part level
apart from some small but important torsion deformations. Benefits
are the ability to capture a great amount of possible human
appearances with high accuracy while still having a simple to use
and computationally efficient model. We have introduced special
optimizations such as caching into the model to improve its
performance in tracking applications. Available force and comfort
measures within the model provide further opportunities for future
research.
3D articulated pose estimation is performed in a Bayesian framework,
using a set of hierarchically coupled local particle filters for
tracking. This makes it possible to sample efficiently from the high
dimensional space of articulated human poses without constraining
the allowed movements. Sequences of tracked upper-body as well as
full-body motions captured by three cameras show promising results.
Despite the high dimensionality of our model (51 DOF) we succeed
at tracking using only silhouette overlap as weighting function
due to the precise outer appearance of our model and the
hierarchical decomposition.}
}
@InProceedings{Rusu08ROMAN,
author = {Radu Bogdan Rusu and Jan Bandouch and Zoltan Csaba Marton and Nico Blodow and Michael Beetz},
title = {{Action Recognition in Intelligent Environments using Point Cloud Features Extracted from Silhouette Sequences}},
booktitle = {IEEE 17th International Symposium on Robot and Human Interactive Communication (RO-MAN), Muenchen, Germany},
year = {2008},
bib2html_pubtype = {Conference Paper},
bib2html_rescat = {Perception},
bib2html_groups = {Memoman, EnvMod},
bib2html_funding = {CoTeSys},
bib2html_domain = {Assistive Household},
abstract = {
In this paper we present our work on human action recognition in intelligent
environments. We classify actions by looking at a time-sequence of
silhouettes extracted from various camera images. By treating time as the
third spatial dimension we generate so-called space-time shapes that contain
rich information about the actions. We propose a novel approach for
recognizing actions, by representing the shapes as 3D point clouds and
estimating feature histograms for them. Preliminary results show that our
method robustly derives different classes of actions, even in the presence
of large variability in the data, coming from different persons at different
time intervals.
}
}
@Article{stulp08combining,
author = {Freek Stulp and Michael Beetz},
title = {Combining Declarative, Procedural and Predictive Knowledge to Generate and Execute Robot Plans Efficiently and Robustly},
journal = {Robotics and Autonomous Systems Journal (Special Issue on Semantic Knowledge)},
year = {2008},
bib2html_groups = {IAS},
bib2html_pubtype = {Journal},
bib2html_rescat = {Planning, Learning},
bib2html_funding = {CoTeSys},
bib2html_domain = {Assistive Household},
abstract = {One of the main challenges in motor control is expressing high-level goals in terms of low-level actions. To do so effectively, motor control systems must reason about actions at different levels of abstraction. Grounding high-level plans in low-level actions is essential semantic knowledge for plan-based control of real robots.
We present a robot control system that uses declarative, procedural and predictive to generate, execute and optimize plans. Declarative knowledge is represented in PDDL, durative actions constitute procedural knowledge, and predictive knowledge is learned by observing action executions. We demonstrate how learned predictive knowledge enables robots to autonomously optimize plan execution with respect to execution duration and robustness in real-time. The approach is evaluated in two different robotic domains.}
}
@Article{Rusu08RAS,
author = {Radu Bogdan Rusu and Brian Gerkey and Michael Beetz},
title = {{Robots in the kitchen: Exploiting ubiquitous sensing and actuation}},
journal = {Robotics and Autonomous Systems Journal (Special Issue on Network Robot Systems)},
year = {2008},
abstract = {
Our goal is to develop intelligent service robots that operate in standard
human environments, automating common tasks. In pursuit of this goal, we
follow the ubiquitous robotics paradigm, in which intelligent
perception and control are combined with ubiquitous computing. By
exploiting sensors and effectors in its environment, a robot can perform
more complex tasks without becoming overly complex itself. Following this
insight, we have developed a service robot that operates autonomously in a
sensor-equipped kitchen. The robot learns from demonstration and performs
sophisticated tasks in concert with the network of devices in its
environment. We report on the design, implementation, and usage of this
system, which is freely available for use and improvement by others in the
research community.
},
bib2html_groups = {EnvMod},
bib2html_pubtype = {Journal},
bib2html_rescat = {Perception},
bib2html_funding = {CoTeSys},
bib2html_domain = {Assistive Household},
}
@InProceedings{Rusu08IAS,
author = {Radu Bogdan Rusu and Zoltan Csaba Marton and Nico Blodow and Michael Beetz},
title = {{Persistent Point Feature Histograms for 3D Point Clouds}},
booktitle = {Proceedings of the 10th International Conference on Intelligent Autonomous Systems (IAS-10), Baden-Baden, Germany},
year = {2008},
abstract = {
This paper proposes a novel way of characterizing the local geometry of 3D
points, using persistent feature histograms. The relationships between the
neighbors of a point are analyzed and the resulted values are stored in a
16-bin histogram. The histograms are pose and point cloud density invariant
and cope well with noisy datasets. We show that geometric primitives have
unique signatures in this feature space, preserved even in the presence of
additive noise. To extract a compact subset of points which characterizes a
point cloud dataset, we perform an in-depth analysis of all point feature
histograms using different distance metrics. Preliminary results show that
point clouds can be roughly segmented based on the uniqueness of geometric
primitives feature histograms. We validate our approach on datasets acquired.
from laser sensors in indoor (kitchen) environments.
},
bib2html_pubtype = {Conference Paper},
bib2html_rescat = {Perception},
bib2html_groups = {EnvMod},
bib2html_funding = {CoTeSys},
bib2html_domain = {Assistive Household},
}
@InProceedings{Rusu08IROS-2,
author = {Radu Bogdan Rusu and Zoltan Csaba Marton and Nico Blodow and Mihai Emanuel Dolha and Michael Beetz},
title = {{Functional Object Mapping of Kitchen Environments}},
booktitle = {Proceedings of the 21st IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS),
Nice, France, September 22-26},
year = {2008},
bib2html_pubtype = {Conference Paper},
bib2html_rescat = {Perception,Models},
bib2html_groups = {EnvMod},
bib2html_funding = {CoTeSys},
bib2html_domain = {Assistive Household},
}
@InProceedings{Rusu08IROS-1,
author = {Radu Bogdan Rusu and Nico Blodow and Zoltan Csaba Marton and Michael Beetz},
title = {{Aligning Point Cloud Views using Persistent Feature Histograms}},
booktitle = {Proceedings of the 21st IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS),
Nice, France, September 22-26},
year = {2008},
bib2html_pubtype = {Conference Paper},
bib2html_rescat = {Perception},
bib2html_groups = {EnvMod},
bib2html_funding = {CoTeSys},
bib2html_domain = {Assistive Household},
}
@InProceedings{Ruehr08ETFA,
author = {Thomas R{\"u}hr and Dejan Pangercic and Michael Beetz},
title = {{Structured Reactive Controllers and Transformational Planning for Manufacturing}},
booktitle = {Proceedings of the 13th IEEE International Conference on Emerging Technologies and Factory Automation (ETFA), Hamburg, Germany, September 15-18},
year = {2008},
bib2html_rescat = {Planning},
bib2html_groups = {Cogmash},
bib2html_funding = {CoTeSys},
bib2html_pubtype = {Conference Paper},
bib2html_domain = {Cognitive Factory}
}
@InProceedings{Pangercic08ETFA,
author = {Dejan Pangercic and Radu Bogdan Rusu and Michael Beetz},
title = {{3D-Based Monocular SLAM for Mobile Agents Navigating in Indoor Environments}},
booktitle = {Proceedings of the 13th IEEE International Conference on Emerging Technologies and Factory Automation (ETFA), Hamburg, Germany, September 15-18},
year = {2008},
bib2html_rescat = {Perception, Action},
bib2html_groups = {Cop, Cogmash},
bib2html_funding = {CoTeSys},
bib2html_pubtype = {Conference Paper},
bib2html_domain = {Assistive Household, Cognitive Factory}
}
@InProceedings{Cogmash08Cotesys,
author = {M. F. Z{\"a}h and M. Beetz and K. Shea and G. Reinhart and O. Stursberg and M. Ostgathe and
C. Lau and C. Ertelt and D. Pangercic and Thomas R{\"u}hr and H. Ding and T. Paschedag},
title = {An Integrated Approach to Realize the Cognitive Machine Shop},
booktitle = {Proceedings of the 1st International Workshop on Cognition for Technical Systems, M{\"u}nchen, Germany, 6-8 October},
year = {2008},
bib2html_rescat = {Planning, Perception, Action},
bib2html_groups = {Cogmash},
bib2html_funding = {CoTeSys},
bib2html_pubtype = {Conference Paper},
bib2html_domain = {Cognitive Factory}
}
@InProceedings{tenorth08cotesys,
author = {Moritz Tenorth and Michael Beetz},
title = {Towards Practical and Grounded Knowledge Representation Systems for Autonomous Household Robots},
booktitle = {Proceedings of the 1st International Workshop on Cognition for Technical Systems, M{\"u}nchen, Germany, 6-8 October},
year = {2008},
bib2html_pubtype = {Workshop Paper},
bib2html_rescat = {Representation},
bib2html_groups = {K4C},
bib2html_funding = {CoTeSys},
bib2html_domain = {Assistive Household},
abstract = {Mobile household robots need much knowledge about objects, places and actions
when performing more and more complex tasks. They must be able to recognize
objects, know what they are and how they can be used. This knowledge can
often be specified more easily in terms of action-related concepts than by
giving declarative descriptions of the appearance of objects. Defining chairs
as objects to sit on, for instance, is much more natural than describing
how chairs in general look like.
Having grounded symbolic models of its actions and related concepts allows
the robot to reason about its activities and improve its problem solving
performance.
In order to use action-related concepts, the robot must be able to find them in its
environment. We present a practical approach to robot knowledge representation that
combines description logics knowledge bases with data mining and (self-) observation
modules. The robot collects experiences while executing actions and uses them to
learn models and aspects of action-related concepts grounded in its perception and
action system.
We demonstrate our approach by learning places that are involved in mobile
robot manipulation actions.}
}
@InProceedings{jain08cotesys,
author = {Dominik Jain and Lorenz M\"osenlechner and Michael Beetz},
title = {{Equipping Robot Control Programs with First-Order Probabilistic Reasoning Capabilities}},
booktitle = {Proceedings of the 1st International Workshop on Cognition for Technical Systems},
month = {6-8 October},
address = {M{\"u}nchen, Germany},
year = {2008},
bib2html_pubtype = {Workshop Paper},
bib2html_rescat = {Representation},
bib2html_groups = {ProbCog},
bib2html_funding = {CoTeSys},
bib2html_domain = {Assistive Household},
}
@InProceedings{moesenle08cotesys,
author = {Lorenz M{\"o}senlechner and Armin M\"uller and Michael Beetz},
title = {High Performance Execution of Everyday Pick-and-Place Tasks by Integrating Transformation Planning and Reactive Execution},
booktitle = {Proceedings of the 1st International Workshop on Cognition for Technical Systems, M{\"u}nchen, Germany, 6-8 October},
year = {2008},
bib2html_pubtype = {Conference Paper},
bib2html_groups = {Cogito},
bib2html_funding = {CoTeSys},
bib2html_rescat = {Planning, Action},
bib2html_domain = {Assistive Household},
abstract = {We investigate the plan-based control of physically and sensorically
realistic simulated autonomous mobile robots performing everyday
pick-and-place tasks in human environments, such as table setting.
Our approach applies AI planning techniques to transform default
plans that can be inferred from instructions for activities of daily
life into flexible, high-performance robot plans. To find high
performance plans the planning system applies transformations such
as carrying plates to the table by stacking them or leaving cabinet doors
open while setting the table, which require substantial changes of
the control structure of the intended activities.
We argue and demonstrate that applying AI planning techniques
directly to concurrent reactive plan languages, instead of using
layered software architectures with different languages, enables the
robot action planner to achieve substantial performance improvements
(23\% - 45\% depending on the tasks). We also argue that the
transformation of concurrent reactive plans is necessary to obtain
the results. Our claims are supported by extensive empirical
investigations in realistic simulations.}
}
@inCollection{wykowska09howhumans,
author = {Wykowska, Agnieszka and Maldonado, Alexis and Beetz, Michael and Schuboe, Anna},
affiliation = {Department of Experimental Psychology, Ludwig Maximilians Universit\"at, M\"unchen, Germany},
title = {How Humans Optimize Their Interaction with the Environment: The Impact of Action Context on Human Perception},
booktitle = {Progress in Robotics},
series = {Communications in Computer and Information Science},
editor = {Kim, Jong-Hwan and Ge, Shuzhi Sam and Vadakkepat, Prahlad and Jesse, Norbert and Al Manum, Abdullah and Puthusserypady K, Sadasivan and R\"uckert, Ulrich and Sitte, Joaquin and Witkowski, Ulf and Nakatsu, Ryohei and Braunl, Thomas and Baltes, Jacky and Anderson, John and Wong, Ching-Chang and Verner, Igor and Ahlgren, David},
publisher = {Springer Berlin Heidelberg},
isbn = {978-3-642-03986-7},
keyword = {Computer Science},
pages = {162-172},
volume = {44},
url = {http://dx.doi.org/10.1007/978-3-642-03986-7_19},
note = {10.1007/978-3-642-03986-7_19},
year = {2009}
}
@InProceedings{stulp09compactmodels,
author = {Freek Stulp and Erhan Oztop and Peter Pastor and Michael Beetz and Stefan Schaal},
title = {Compact Models of Motor Primitive Variations for Predictable Reaching and Obstacle Avoidance},
booktitle = {9th IEEE-RAS International Conference on Humanoid Robots},
year = {2009},
bib2html_groups = {Cogman},
bib2html_rescat = {Action},
bib2html_pubtype = {Conference Paper},
}
@InProceedings{stulp09combining,
author = {Freek Stulp and Andreas Fedrizzi and Franziska Zacharias and Moritz Tenorth and Jan Bandouch and Michael Beetz},
title = {Combining Analysis, Imitation, and Experience-based Learning to Acquire a Concept of Reachability},
booktitle = {9th IEEE-RAS International Conference on Humanoid Robots},
year = {2009},
pages = {161--167},
bib2html_groups = {Cogman},
bib2html_rescat = {Learning, Action},
bib2html_pubtype = {Conference Paper},
bib2html_domain = {Assistive Household},
bib2html_funding = {CoTeSys},
}
@InProceedings{klank09searchspace,
author = {Ulrich Klank and Dejan Pangercic and Radu Bogdan Rusu and Michael Beetz},
title = {{Real-time CAD Model Matching for Mobile Manipulation and Grasping}},
booktitle = {9th IEEE-RAS International Conference on Humanoid Robots},
month = {December 7-10},
year = {2009},
address = {Paris, France},
pages = {290--296},
bib2html_groups = {Cop, EnvMod},
bib2html_rescat = {Perception, Models},
bib2html_pubtype = {Conference Paper},
bib2html_domain = {Assistive Household}
}
@Article{Beetz09AR,
author = {Michael Beetz and Freek Stulp and Piotr Esden-Tempski and Andreas Fedrizzi and Ulrich Klank and Ingo Kresse and Alexis Maldonado and Federico Ruiz},
title = {Generality and Legibility in Mobile Manipulation},
journal = {Autonomous Robots Journal (Special Issue on Mobile Manipulation)},
year = {2010},
volume = {28},
number = {1},
pages = {21--44},
bib2html_groups = {IAS, Cogman},
bib2html_pubtype = {Journals},
bib2html_rescat = {Mobile Manipulation},
bib2html_domain = {Assistive Household},
}
@InProceedings{Composite09IROS,
author = {Zoltan Csaba Marton and Radu Bogdan Rusu and Dominik Jain and Ulrich Klank and Michael Beetz},
title = {{Probabilistic Categorization of Kitchen Objects in Table Settings with a Composite Sensor}},
booktitle = {IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS)},
pages = {4777-4784},
month = {October 11-15},
year = {2009},
address = {St. Louis, MO, USA},
bib2html_pubtype = {Conference Paper},
bib2html_rescat = {Perception, Models},
bib2html_groups = {EnvMod, ProbCog},
bib2html_funding = {CoTeSys},
bib2html_domain = {Assistive Household},
}
@InProceedings{Rusu09IROS_ClosingLoop,
author = {Radu Bogdan Rusu and Ioan Alexandru Sucan and Brian Gerkey and Sachin Chitta and Michael Beetz and Lydia E. Kavraki},
title = {{Real-time Perception-Guided Motion Planning for a Personal Robot}},
booktitle = {Proceedings of the IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS)},
month = {October 11-15},
year = {2009},
address = {St. Louis, MO, USA},
pages = {4245--4252},
bib2html_pubtype = {Conference Paper},
bib2html_rescat = {Perception, Models},
bib2html_groups = {EnvMod},
bib2html_funding = {CoTeSys},
bib2html_domain = {Assistive Household},
}
@InProceedings{Rusu09IROS_SemanticMaps,
author = {Radu Bogdan Rusu and Zoltan Csaba Marton and Nico Blodow and Andreas Holzbach and Michael Beetz},
title = {{Model-based and Learned Semantic Object Labeling in 3D Point Cloud Maps of Kitchen Environments}},
booktitle = {Proceedings of the IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS)},
month = {October 11-15},
year = {2009},
address = {St. Louis, MO, USA},
bib2html_pubtype = {Conference Paper},
bib2html_rescat = {Perception, Models},
bib2html_groups = {EnvMod},
bib2html_funding = {CoTeSys},
bib2html_domain = {Assistive Household},
}
@InProceedings{Rusu09IROS_CloseHybrid,
author = {Radu Bogdan Rusu and Nico Blodow and Zoltan Csaba Marton and Michael Beetz},
title = {{Close-range Scene Segmentation and Reconstruction of 3D Point Cloud Maps for Mobile Manipulation in Human Environments}},
booktitle = {Proceedings of the IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS)},
month = {October 11-15},
year = {2009},
address = {St. Louis, MO, USA},
bib2html_pubtype = {Conference Paper},
bib2html_rescat = {Perception, Models},
bib2html_groups = {EnvMod},
bib2html_funding = {CoTeSys},
bib2html_domain = {Assistive Household},
}
@InProceedings{Rusu09IROS_FPFH,
author = {Radu Bogdan Rusu and Andreas Holzbach and Nico Blodow and Michael Beetz},
title = {{Fast Geometric Point Labeling using Conditional Random Fields}},
booktitle = {Proceedings of the IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS)},
month = {October 11-15},
year = {2009},
address = {St. Louis, MO, USA},
bib2html_pubtype = {Conference Paper},
bib2html_rescat = {Perception, Models},
bib2html_groups = {EnvMod},
bib2html_funding = {CoTeSys},
bib2html_domain = {Assistive Household},
}
@InProceedings{09ETFA,
author = {Christoph Ertelt and Thomas R\"uhr and Dejan Pangercic and Kristina
Shea and Michael Beetz},
title = {Integration of Perception, Global Planning and Local Planning in the Manufacturing Domain},
booktitle = {Proceedings of Emerging Technologies and Factory Automation (ETFA).},
year = {2009},
bib2html_pubtype = {Conference Paper},
bib2html_rescat = {Perception, Planning, Action},
bib2html_groups = {Cogmash},
bib2html_funding = {CoTeSys},
bib2html_domain = {Cognitive Factory},
}
@InProceedings{stulp09actionrelated,
author = {Freek Stulp and Andreas Fedrizzi and Michael Beetz},
title = {Action-Related Place-Based Mobile Manipulation},
booktitle = {Proceedings of the International Conference on Intelligent Robots and Systems (IROS)},
year = {2009},
pages = {3115--3120},
bib2html_pubtype = {Conference Paper},
bib2html_rescat = {Learning, Planning},
bib2html_groups = {Cogman},
bib2html_funding = {CoTeSys},
bib2html_domain = {Assistive Household},
}
@InProceedings{Mac09ICSR,
author = {Florian Friesdorf and Dejan Pangercic and Heiner Bubb and Michael Beetz},
title = {Mutually Augmented Cognition},
booktitle = {Proceedings of the International Conference on Social Robotics (ICSR).},
year = {2009},
bib2html_pubtype = {Conference Paper},
bib2html_rescat = {Human-Robot Interaction},
bib2html_groups = {Other},
bib2html_funding = {CoTeSys},
bib2html_domain = {Assistive Household},
}
@InProceedings{Rusu09ICAR,
author = {Radu Bogdan Rusu and Wim Meeussen and Sachin Chitta and Michael Beetz},
title = {{Laser-based Perception for Door and Handle Identification}},
booktitle = {Proceedings of the International Conference on Advanced Robotics (ICAR)},
month = {June 22-26},
year = {2009},
address = {Munich},
note = {Best Paper Award},
bib2html_pubtype = {Conference Paper},
bib2html_rescat = {Perception, Models},
bib2html_groups = {EnvMod},
bib2html_funding = {CoTeSys},
bib2html_domain = {Assistive Household},
}
@InProceedings{09SceneDetection,
author = {Dejan Pangercic and Rok Tavcar and Moritz Tenorth and Michael Beetz},
title = {Visual Scene Detection and Interpretation using Encyclopedic Knowledge
and Formal Description Logic},
booktitle = {Proceedings of the International Conference on Advanced Robotics (ICAR).},
year = {2009},
month = {June 22 - 26},
address = {Munich, Germany},
bib2html_pubtype = {Conference Paper},
bib2html_rescat = {Perception, Reasoning},
bib2html_groups = {Cop, K4C},
bib2html_funding = {CoTeSys},
bib2html_domain = {Assistive Household},
}
@InProceedings{fedrizzi09transformational,
author = {Andreas Fedrizzi and Lorenz Moesenlechner and Freek Stulp and Michael Beetz},
title = {Transformational Planning for Mobile Manipulation based on Action-related Places},
booktitle = {Proceedings of the International Conference on Advanced Robotics (ICAR).},
year = {2009},
pages = {1--8},
bib2html_pubtype = {Conference Paper},
bib2html_rescat = {Learning, Planning},
bib2html_groups = {Cogman, Cogito},
bib2html_funding = {CoTeSys},
bib2html_domain = {Assistive Household},
}
@InProceedings{stulp09learning,
author = {Freek Stulp and Andreas Fedrizzi and Michael Beetz},
title = {Learning and Performing Place-based Mobile Manipulation},
booktitle = {Proceedings of the 8th International Conference on Development and Learning (ICDL).},
year = {2009},
pages = {1--7},
bib2html_pubtype = {Conference Paper},
bib2html_rescat = {Learning, Planning},
bib2html_groups = {Cogman},
bib2html_funding = {CoTeSys},
bib2html_domain = {Assistive Household},
}
@InProceedings{stulp09compact,
author = {Freek Stulp and Ingo Kresse and Alexis Maldonado and Federico Ruiz and Andreas Fedrizzi and Michael Beetz},
title = {Compact Models of Human Reaching Motions for Robotic Control in Everyday Manipulation Tasks},
booktitle = {Proceedings of the 8th International Conference on Development and Learning (ICDL).},
year = {2009},
bib2html_pubtype = {Conference Paper},
bib2html_rescat = {Learning},
bib2html_groups = {Cogman},
bib2html_funding = {CoTeSys},
bib2html_domain = {Assistive Household},
}
@InProceedings{LFlatland09ICRA,
author = {Benoit Morisset and Radu Bogdan Rusu and Aravind Sundaresan and Kris Hauser and Motilal Agrawal and Jean-Claude Latombe and Michael Beetz},
title = {{Leaving Flatland: Toward Real-Time 3D Navigation}},
booktitle = {Proceedings of the IEEE International Conference on Robotics and Automation (ICRA), Kobe, Japan, May 12-17},
year = {2009},
bib2html_pubtype = {Conference Paper},
bib2html_rescat = {Perception, Models},
bib2html_groups = {EnvMod},
bib2html_funding = {CoTeSys},
bib2html_domain = {Assistive Household},
}
@Article{Rusu09LFlatland,
author = {Radu Bogdan Rusu and Aravind Sundaresan and Benoit Morisset and Kris Hauser and Motilal Agrawal and Jean-Claude Latombe and Michael Beetz},
title = {{Leaving Flatland: Efficient Real-Time 3D Navigation}},
journal = {Journal of Field Robotics (JFR)},
year = {2009},
bib2html_pubtype = {Journal},
bib2html_rescat = {Perception, Models, Planning},
bib2html_groups = {EnvMod},
bib2html_funding = {CoTeSys},
bib2html_domain = {Other},
}
@InProceedings{Rusu09ICRA,
author = {Radu Bogdan Rusu and Nico Blodow and Michael Beetz},
title = {{Fast Point Feature Histograms (FPFH) for 3D Registration}},
booktitle = {Proceedings of the IEEE International Conference on Robotics and Automation (ICRA), Kobe, Japan, May 12-17},
year = {2009},
bib2html_pubtype = {Conference Paper},
bib2html_rescat = {Perception, Models},
bib2html_groups = {EnvMod},
bib2html_funding = {CoTeSys},
bib2html_domain = {Assistive Household},
}
@InProceedings{Marton09ICRA,
author = {Zoltan Csaba Marton and Radu Bogdan Rusu and Michael Beetz},
title = {{On Fast Surface Reconstruction Methods for Large and Noisy Point Clouds}},
booktitle = {Proceedings of the IEEE International Conference on Robotics and Automation (ICRA)},
address = {Kobe, Japan},
month = {May 12-17},
year = {2009},
bib2html_pubtype = {Conference Paper},
bib2html_rescat = {Perception, Models},
bib2html_groups = {EnvMod},
bib2html_funding = {CoTeSys},
bib2html_domain = {Assistive Household},
}
@InProceedings{Blas09SafeProcess,
author = {Morten Rufus Blas and Radu Bogdan Rusu and Mogens Blanke and Michael Beetz},
title = {{Fault-tolerant 3D Mapping with Application to an Orchard Robot}},
booktitle = {Proceedings of the 7th IFAC International Symposium on Fault Detection, Supervision and Safety of Technical Processes (SAFEPROCESS'09), Barcelona, Spain, June 30 - July 3},
year = {2009},
bib2html_pubtype = {Conference Paper},
bib2html_rescat = {Perception, Models},
bib2html_groups = {EnvMod},
bib2html_funding = {CoTeSys},
bib2html_domain = {Assistive Household},
}
@InProceedings{klank09icra,
author = {Ulrich Klank and Muhammad Zeeshan Zia and Michael Beetz},
title = {{3D Model Selection from an Internet Database for Robotic Vision}},
booktitle = {International Conference on Robotics and Automation (ICRA)},
year = {2009},
pages = {2406--2411},
bib2html_pubtype = {Conference Paper},
bib2html_rescat = {Models},
bib2html_groups = {Cop},
bib2html_funding = {CoTeSys},
bib2html_domain = {Assistive Household},
abstract = {We propose a new method for automatically accessing an internet
database of 3D models that are searchable only by their
user-annotated labels, for using them for vision and robotic
manipulation purposes. Instead of having only a local database
containing already seen objects, we want to use shared databases
available over the internet. This approach while having the
potential to dramatically increase the visual recognition capability
of robots, also poses certain problems, like wrong annotation due to
the open nature of the database, or overwhelming amounts of data
(many 3D models) or the lack of relevant data (no models matching a
specified label). To solve those problems we propose the following:
First, we present an outlier/inlier classification method for
reducing the number of results and discarding invalid 3D models that
do not match our query. Second, we utilize an approach from computer
graphics, the so called 'morphing', to this application to
specialize the models, in order to describe more objects. Third, we
search for 3D models using a restricted search space, as obtained
from our knowledge of the environment. We show our classification
and matching results and finally show how we can recover the correct
scaling with the stereo setup of our robot.}
}
@InProceedings{jain09icra,
author = {Dominik Jain and Lorenz M{\"o}senlechner and Michael Beetz},
title = {{Equipping Robot Control Programs with First-Order Probabilistic Reasoning Capabilities}},
booktitle = {IEEE International Conference on Robotics and Automation (ICRA)},
pages = {3626-3631},
year = {2009},
bib2html_pubtype = {Conference Paper},
bib2html_rescat = {Models},
bib2html_groups = {ProbCog},
bib2html_funding = {CoTeSys},
bib2html_domain = {Assistive Household},
}
@InProceedings{hoyninge09visapp,
author = {Nicolai von Hoyningen-Huene and Michael Beetz},
title = {{Rao-Blackwellized Resampling Particle Filter for Real-Time Player Tracking in Sports}},
booktitle = {Fourth International Conference on Computer Vision Theory and Applications (VISAPP)},
year = {2009},
pages = {464-470},
editor = {AlpeshKumar Ranchordas and Helder Araujo},
volume = {1},
address = {Lisboa, Portugal},
month = {Feb.},
organization = {INSTICC},
publisher = {INSTICC press},
bib2html_pubtype = {Refereed Conference Paper},
bib2html_rescat = {Perception},
bib2html_groups = {Aspogamo},
bib2html_funding = {ASpoGAMo},
bib2html_domain = {Soccer Analysis},
abstract = {Tracking multiple targets with similiar appearance is a common task in computer vision applications, especially in sports games. We propose a Rao-Blackwellized Resampling Particle Filter (RBRPF) as an implementable real-time continuation of a state-of-the-art multi-target tracking method. Target configurations are tracked by sampling associations and solving single-target tracking problems by Kalman filters. As an advantage of the new method the independence assumption between data associations is relaxed to increase the robustness in the sports domain. Smart resampling and memoization is introduced to equip the tracking method with real-time capabilities in the first place. The probabilistic framework allows for consideration of appearance models and the fusion of different sensors. We demonstrate its applicability to real world applications by tracking soccer players captured by multiple cameras through occlusions in real-time.
}
}
@inproceedings{andreakis09wsom,
author = {Andreas Andreakis and
Nicolai von Hoyningen-Huene and
Michael Beetz},
title = {Incremental Unsupervised Time Series Analysis Using Merge
Growing Neural Gas},
booktitle = {WSOM},
year = {2009},
pages = {10-18},
editor = {Jos{\'e} Carlos Pr\'{\i}ncipe and
Risto Miikkulainen},
booktitle = {WSOM},
publisher = {Springer},
series = {Lecture Notes in Computer Science},
volume = {5629},
isbn = {978-3-642-02396-5},
bib2html_pubtype = {Conference Paper},
bib2html_rescat = {Time Series, Machine Learning, SOM},
bib2html_groups = {IAS},
bib2html_funding = {Aspogamo},
abstract = {We propose Merge Growing Neural Gas (MGNG) as a novel unsupervised growing neural network for time series analysis. MGNG combines the state-of-the-art recursive temporal context of Merge Neural Gas (MNG) with the incremental Growing Neural Gas (GNG) and enables thereby the analysis of unbounded and possibly infinite time series in an online manner.
There is no need to define the number of neurons a priori and only constant parameters are used. In order to focus on frequent sequence patterns an entropy maximization strategy is utilized which controls the creation of new neurons.
Experimental results demonstrate reduced time complexity compared to MNG while retaining similar accuracy in time series representation.
}
}
@InProceedings{hoyninge09accv,
author = {Nicolai von Hoyningen-Huene and Michael Beetz},
title = {{Robust real-time multiple target tracking}},
booktitle = {Ninth Asian Conference on Computer Vision (ACCV)},
year = {2009},
address = {Xi'an, China},
month = {Sep.},
bib2html_pubtype = {Refereed Conference Paper},
bib2html_rescat = {Tracking},
bib2html_groups = {Aspogamo},
bib2html_funding = {ASpoGAMo},
bib2html_domain = {Soccer Analysis},
abstract = {We propose a novel efficient algorithm for robust tracking of a fixed number of targets in real-time with low failure rate. The method is an instance of Sequential Importance Resampling filters approximating the posterior of complete target configurations as a mixture of Gaussians. Using predicted target positions by Kalman filters, data associations are sampled for each measurement sweep according to their likelihood allowing to constrain the number of associations per target. Updated target configurations are weighted for resampling pursuant to their explanatory power for former positions and measurements. Fixed-lag of the resulting positions increases the tracking quality while smart resampling and memoization decrease the computational demand. A negative information handling exploits missing measurements for a target outside the monitored area.
We present both, qualitative and quantitative experimental results on two demanding real-world applications with occluded and highly confusable targets, demonstrating the robustness and real-time performance of our approach outperforming current state-of-the-art MCMC methods.
}
}
@Article{beetz09ijcss,
author = {Michael Beetz and Nicolai von Hoyningen-Huene and Bernhard Kirchlechner and Suat Gedikli and Francisco Siles and Murat Durus and Martin Lames},
title = {{ASpoGAMo: Automated Sports Game Analysis Models}},
journal = {International Journal of Computer Science in Sport},
year = {2009},
volume = {8},
number = {1},
bib2html_pubtype = {Journal},
bib2html_rescat = {Perception,Models,Representation},
bib2html_groups = {Aspogamo},
bib2html_funding = {ASpoGAMo},
bib2html_domain = {Soccer Analysis},
abstract = {We propose automated sport game models as a novel technical
means for the analysis of team sport games. The basic idea is that
automated sport game models are based on a conceptualization of key
notions in such games and probabilistically derived from a
set of previous games. In contrast to existing approaches, automated
sport game models provide an analysis that is sensitive to their context
and go beyond simple statistical aggregations allowing objective,
transparent and meaningful concept definitions. Based on automatically gathered spatio-temporal data
by a computer vision system, a model hierarchy is built bottom up, where
context-sensitive concepts are instantiated by the application of machine learning techniques.
We describe the current state of implementation of the
ASpoGaMo system including its computer vision subsystem
that realizes the idea of automated sport game
models. Their usage is exemplified with an analysis of
the final of the soccer World Cup 2006.
}
}
@techreport{tenorth09ehowtr,
title = {{Understanding and Executing Instructions for Everyday Manipulation Tasks from the World Wide Web}},
author = {Moritz Tenorth and Daniel Nyga and Michael Beetz},
institution = {{IAS} group, Technische Universit\"at M\"unchen, Fakult\"at f\"ur Informatik},
bib2html_pubtype = {Other},
bib2html_groups = {K4C},
bib2html_funding = {CoTeSys},
bib2html_rescat = {Planning},
bib2html_domain = {Assistive Household},
year = {2009}
}
@techreport{jain09blns,
title = {{Bayesian Logic Networks}},
author = {Dominik Jain and Stefan Waldherr and Michael Beetz},
institution = {{IAS} Group, Fakult\"at f\"ur Informatik, Technische Universit\"at M\"unchen},
bib2html_pubtype = {Other},
bib2html_groups = {ProbCog},
bib2html_funding = {CoTeSys},
bib2html_rescat = {Planning},
bib2html_domain = {Assistive Household},
year = {2009}
}
@InProceedings{tenorth09knowledge,
author = {Moritz Tenorth and Michael Beetz},
abstract = { Mobile household robots need much knowledge about objects, places and actions
when performing more and more complex tasks. They must be able to recognize
objects, know what they are and how they can be used.
We present a practical approach to robot knowledge representation that
combines description logics knowledge bases with a rich environment model,
data mining and (self-) observation modules. The robot observes itself and
humans while executing actions and uses the collected experiences to learn
models of action-related concepts grounded in its perception and action system.
We demonstrate our approach by learning places that are involved in mobile
robot manipulation actions, by locating objects based on their function
and by supplying knowledge required for understanding underspecified task
descriptions as commonly given by humans. },
title = {{KnowRob -- Knowledge Processing for Autonomous Personal Robots}},
booktitle = {IEEE/RSJ International Conference on Intelligent Robots and Systems},
pages={4261--4266},
bib2html_pubtype = {Conference Paper},
bib2html_groups = {K4C},
bib2html_funding = {CoTeSys},
bib2html_rescat = {Representation,Reasoning,Learning},
bib2html_domain = {Assistive Household},
year = {2009}}
@InProceedings{zia09icar,
author = {Muhammad Zeeshan Zia and Ulrich Klank and Michael Beetz},
title = {{Acquisition of a Dense 3D Model Database for Robotic Vision}},
booktitle = {International Conference on Advanced Robotics (ICAR)},
year = {2009},
bib2html_pubtype = {Conference Paper},
bib2html_rescat = {Models},
bib2html_groups = {Cop},
bib2html_funding = {CoTeSys},
bib2html_domain = {Assistive Household},
abstract = {Service Robots in real world environments need to have computer vision capability
for detecting a large class of objects. We discuss how freely available 3D model
databases can be used to enable robots to know the appearance of a wide variety of
objects in human environments with special application to our Assistive Kitchen.
However, the open and free nature of such databases pose problems for example the presence of
incorrectly annotated 3D models, or objects for which very few models
exist online. We have previously proposed techniques to automatically select
the useful models from the search result, and utilizing such models to
perform simple manipulation tasks. Here, we build upon that work, to describe a technique
based on Morphing to form new 3D models if we only have a few models corresponding to a label.
However, morphing in computer graphics requires a human operator and is computationally burdensome,
due to which we present our own automatic morphing technique. We also present a simple
technique to speed the matching process of 3D models against real scenes using Visibility culling.
This technique can potentially speed-up the matching process by 2-3 times while using less memory,
if we have some prior information model and world pose.}
}
@InProceedings{moesenle09icaps,
author = {Lorenz M\"osenlechner and Michael Beetz},
title = {Using Physics- and Sensor-based Simulation for High-fidelity Temporal Projection of Realistic Robot Behavior},
booktitle = {19th International Conference on Automated Planning and Scheduling (ICAPS'09).},
year = {2009},
bib2html_pubtype = {Conference Paper},
bib2html_rescat = {Planning},
bib2html_groups = {Cogito},
bib2html_funding = {CoTeSys},
bib2html_domain = {Assistive Household},
abstract = { Planning means deciding on the future course of action based on
predictions of what will happen when an activity is carried out in
one way or the other. As we apply action planning to autonomous,
sensor-guided mobile robots with manipulators or even to humanoid
robots we need very realistic and detailed predictions of the
behavior generated by a plan in order to improve the robot's
performance substantially.
In this paper we investigate the high-fidelity temporal projection
of realistic robot behavior based on physics- and sensor-based
simulation systems. We equip a simulator and interpreter with means
to log simulated plan executions into a database. A logic-based
query and inference mechanism then retrieves and reconstructs the
necessary information from the database and translates the
information into a first-order representation of robot plans and the
behavior they generate. The query language enables the robot
planning system to infer the intentions, the beliefs, and the world
state at any projected time. It also allows the planning system to
recognize, diagnose, and analyze various plan failures typical for
performing everyday manipulation tasks. }}
@InProceedings{bandouch09hci,
author = {Jan Bandouch and Michael Beetz},
title = {Tracking Humans Interacting with the Environment Using Efficient Hierarchical Sampling and Layered Observation Models},
booktitle = {IEEE Int. Workshop on Human-Computer Interaction (HCI). In conjunction with ICCV2009},
year = {2009},
bib2html_pubtype ={Conference Paper},
bib2html_rescat = {Perception},
bib2html_groups = {Memoman},
bib2html_funding = {CoTeSys},
bib2html_domain = {Assistive Household},
abstract = { We present a markerless tracking system for unconstrained human
motions which are typical for everyday manipulation tasks. Our
system is capable of tracking a high-dimensional human model
(51 DOF) without constricting the type of motion and the need for
training sequences. The system reliably tracks humans that
frequently interact with the environment, that manipulate objects,
and that can be partially occluded by the environment.
We describe and discuss two key components that substantially
contribute to the accuracy and reliability of the system. First, a
sophisticated hierarchical sampling strategy for recursive Bayesian
estimation that combines partitioning with annealing strategies to enable
efficient search in the presence of many local maxima. Second, a simple yet
effective appearance model that allows for the combination of shape and
appearance masks to implicitly deal with two cases of environmental occlusions
by (1) subtracting dynamic non-human objects from the region of
interest and (2) modeling objects (e.g. tables) that both occlude and
can be occluded by human subjects. The appearance model is based on
bit representations that makes our algorithm well suited for
implementation on highly parallel hardware such as commodity GPUs.
Extensive evaluations on the HumanEva2 benchmarks show the potential
of our method when compared to state-of-the-art Bayesian techniques.
Besides the HumanEva2 benchmarks, we present results on more
challenging sequences, including table setting tasks in a kitchen
environment and persons getting into and out of a car mock-up.}
}
@InProceedings{tenorth09dataset,
author = {Moritz Tenorth and Jan Bandouch and Michael Beetz},
title = {{The {TUM} Kitchen Data Set of Everyday Manipulation Activities for Motion Tracking and Action Recognition}},
booktitle = {IEEE International Workshop on Tracking Humans for the Evaluation of their Motion in Image Sequences (THEMIS), in conjunction with ICCV2009},
year = {2009},
bib2html_pubtype ={Conference Paper},
bib2html_rescat = {Perception},
bib2html_groups = {Memoman, K4C},
bib2html_funding = {CoTeSys},
bib2html_domain = {Assistive Household},
abstract = {We introduce the publicly available TUM Kitchen Data Set as a comprehensive collection of activity sequences recorded in a kitchen environment equipped with multiple complementary sensors. The recorded data consists of observations of naturally performed manipulation tasks as encountered in everyday activities of human life. Several instances of a table-setting task were performed by different subjects, involving the manipulation of objects and the environment. We provide the original video sequences, fullbody motion capture data recorded by a markerless motion tracker, RFID tag readings and magnetic sensor readings from objects and the environment, as well as corresponding action labels. In this paper, we both describe how the data was computed, in particular the motion tracker and the labeling, and give examples what it can be used for. We present first results of an automatic method for segmenting the observed motions into semantic classes, and describe how the data can be integrated in a knowledge-based framework for reasoning about the observations.}
}
@InProceedings{beetz09qlts,
author = {Michael Beetz and Jan Bandouch and Dominik Jain and Moritz Tenorth},
title = {{Towards Automated Models of Activities of Daily Life}},
booktitle = {First International Symposium on Quality of Life Technology -- Intelligent Systems for Better Living},
year = {2009},
address = {Pittsburgh, Pennsylvania USA},
bib2html_pubtype ={Conference Paper},
bib2html_rescat = {Learning,Models,Planning,Perception,Knowledge,Reasoning,Representation},
bib2html_groups = {Memoman,K4C,ProbCog},
bib2html_funding = {CoTeSys},
bib2html_domain = {Assistive Household},
abstract = {We propose automated probabilistic models of everyday
activities (AM-EvA) as a novel technical means for the
perception, interpretation, and analysis of everyday manipulation
tasks and activities of daily life. AM-EvAs are based on
action-related concepts in everyday activities such as
action-related places (the place where cups are taken from the
cupboard), capabilities (the objects that can be picked up
single-handedly), etc. These concepts are probabilistically derived
from a set of previous activities that are fully and automatically
observed by computer vision and additional sensor systems. AM-EvA
models enable robots and technical systems to analyze activities in
the complete situation and activity context. They render the
classification and the assessment of actions and situations objective
and can justify the probabilistic interpretation with respect to the
activities the concepts have been learned from.
In this paper, we describe the current state of implementation of the
system that realizes this idea of automated models of
everyday activities and show example results from the observation
and analysis of table setting episodes.}
}
@Article{Rusu09RSJ-AR,
author = {Radu Bogdan Rusu and Jan Bandouch and Franziska Meier and Irfan Essa and Michael Beetz},
title = {{Human Action Recognition using Global Point Feature Histograms and Action Shapes}},
journal = {Advanced Robotics journal, Robotics Society of Japan (RSJ)},
year = {2009},
bib2html_pubtype = {Journal},
bib2html_rescat = {Perception},
bib2html_groups = {Memoman, EnvMod},
bib2html_funding = {CoTeSys},
bib2html_domain = {Assistive Household},
abstract = { This article investigates the recognition of human actions from 3D point clouds
that encode the motions of people acting in sensor-distributed indoor environments.
Data streams are time-sequences of silhouettes extracted from cameras in the environment.
From the 2D silhouette contours we generate space-time streams by continuously aligning and
stacking the contours along the time axis as third spatial dimension.
The space-time stream of an observation sequence is segmented into parts
corresponding to subactions using a pattern matching technique based
on suffix trees and interval scheduling. Then, the segmented space-time shapes
are processed by treating the shapes as 3D point clouds and estimating global
point feature histograms for them. The resultant models are clustered using
statistical analysis, and our experimental results indicate that the presented
methods robustly derive different action classes. This holds despite large
intra-class variance in the recorded datasets due to performances from different
persons at different time intervals.
}
}
@InProceedings{09ETFALearning,
author = {Andreas Leha and Dejan Pangercic and Thomas R\"uhr and Michael Beetz},
title = {Optimization of Simulated Production Process Performance
using Machine Learning},
booktitle = {Proceedings of Emerging Technologies and Factory Automation (ETFA).},
year = {2009},
bib2html_pubtype = {Conference Paper},
bib2html_rescat = {Learning, Planning},
bib2html_groups = {Cogmash},
bib2html_funding = {CoTeSys},
bib2html_domain = {Cognitive Factory},
}
@InProceedings{sunli-2009-cvprws,
title={EYEWATCHME - 3D Hand and object tracking for inside out activity analysis},
author={Li Sun and Ulrich Klank and Michael Beetz},
booktitle={IEEE Computer Society Conference on Computer Vision and Pattern Recognition, 2009. CVPR 2009.},
year={2009},
month={June},
volume={},
number={},
pages={9-16},
abstract={This paper investigates the inside-out recognition of everyday manipulation tasks using a gaze-directed camera, which is a camera that actively directs at the visual attention focus of the person wearing the camera. We present EYEWATCHME, an integrated vision and state estimation system that at the same time tracks the positions and the poses of the acting hands, the pose that the manipulated object, and the pose of the observing camera. Taken together, EYEWATCHME provides comprehensive data for learning predictive models of vision-guided manipulation that include the objects people are attending, the interaction of attention and reaching/grasping, and the segmentation of reaching and grasping using visual attention as evidence. Key technical contributions of this paper include an ego view hand tracking system that estimates 27 DOF hand poses. The hand tracking system is capable of detecting hands and estimating their poses despite substantial self-occlusion caused by the hand and occlusions caused by the manipulated object. EYEWATCHME can also cope with blurred images that are caused by rapid eye movements. The second key contribution is the of the integrated activity recognition system that simultaneously tracks the attention of the person, the hand poses, and the poses of the manipulated objects in terms of a global scene coordinates. We demonstrate the operation of EYEWATCHME in the context of kitchen tasks including filling a cup with water.},
keywords={computer graphics, human computer interaction, image restoration, image segmentation, image sensors, object recognition, tracking3D hand tracking, 3D object tracking, EYEWATCHME, blurred images, gaze-directed camera, grasping segmentation, inside out activity analysis, integrated activity recognition system, reaching segmentation, state estimation system, substantial self-occlusion, vision-guided manipulation},
doi={10.1109/CVPR.2009.5204358},
ISSN={1063-6919},
bib2html_pubtype = {Workshop Paper},
bib2html_groups = {Cop},
bib2html_rescat = {Perception},
bib2html_domain = {Assistive Household}
}
@InProceedings{Marton09ISRR,
author = {Zoltan Csaba Marton and Lucian Cosmin Goron and Radu Bogdan Rusu and Michael Beetz},
title = {{Reconstruction and Verification of 3D Object Models for Grasping}},
booktitle = {Proceedings of the 14th International Symposium on Robotics Research (ISRR09)},
address = {Lucerne, Switzerland},
month = {August 31 -- September 3},
year = {2009},
bib2html_pubtype = {Workshop Paper},
bib2html_rescat = {Perception, Models},
bib2html_groups = {Cop, EnvMod},
bib2html_funding = {CoTeSys},
bib2html_domain = {Assistive Household},
}
@InProceedings{Rusu09ICCV-WS,
author = {Radu Bogdan Rusu and Andreas Holzbach and Gary Bradski and Michael Beetz},
title = {Detecting and Segmenting Objects for Mobile Manipulation},
booktitle = {Proceedings of IEEE Workshop on Search in 3D and Video (S3DV), held in conjunction with the 12th IEEE International Conference on Computer Vision (ICCV)},
month = {September 27},
year = {2009},
address = {Kyoto, Japan},
bib2html_pubtype = {Workshop Paper},
bib2html_rescat = {Perception, Models},
bib2html_groups = {Cop, EnvMod},
bib2html_funding = {CoTeSys},
bib2html_domain = {Assistive Household},
}
@InProceedings{Blodow09Humanoids,
author = {Nico Blodow and Radu Bogdan Rusu and Zoltan Csaba Marton and Michael Beetz},
title = {{Partial View Modeling and Validation in 3D Laser Scans for Grasping}},
booktitle = {9th IEEE-RAS International Conference on Humanoid Robots (Humanoids)},
month = {December 7-10},
year = {2009},
address = {Paris, France},
bib2html_pubtype = {Conference Paper},
bib2html_rescat = {Perception, Models},
bib2html_groups = {Cop, EnvMod},
bib2html_funding = {CoTeSys},
bib2html_domain = {Assistive Household},
}
@InProceedings{Rusu09Humanoids,
author = {Radu Bogdan Rusu and Andreas Holzbach and Rosen Diankov and Gary Bradski and Michael Beetz},
title = {Perception for Mobile Manipulation and Grasping using Active Stereo},
booktitle = {9th IEEE-RAS International Conference on Humanoid Robots (Humanoids)},
month = {December 7-10},
year = {2009},
address = {Paris, France},
bib2html_pubtype = {Conference Paper},
bib2html_rescat = {Perception, Models},
bib2html_groups = {Cop, EnvMod},
bib2html_funding = {CoTeSys},
bib2html_domain = {Assistive Household},
}
@InProceedings{IAS09CoPMan,
author = {Michael Beetz and Nico Blodow and Ulrich Klank and Zoltan Csaba Marton and Dejan Pangercic and Radu Bogdan Rusu},
title = {{CoP-Man -- Perception for Mobile Pick-and-Place in Human Living Environments}},
booktitle = {Proceedings of the 22nd IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS) Workshop on Semantic Perception for Mobile Manipulation},
month = {October 11-15},
year = {2009},
address = {St. Louis, MO, USA},
note = {Invited paper.},
bib2html_pubtype = {Workshop Paper},
bib2html_rescat = {Perception, Models},
bib2html_groups = {Cop, EnvMod},
bib2html_funding = {CoTeSys},
bib2html_domain = {Assistive Household},
}
@InProceedings{Li09Robio,
author = {Jun Li and Alexis Maldonado and Michael Beetz and Anna Schuboe},
title = {Obstacle avoidance in a pick-and-place task},
booktitle = {Proceedings of the 2009 IEEE Conference on Robotics and Biomimetics},
month = {December 19-23},
year = {2009},
address = {Guilin, Guangxi, China},
bib2html_pubtype = {Conference Paper},
bib2html_rescat = {Perception, Models},
bib2html_groups = {Cogman},
bib2html_funding = {CoTeSys},
bib2html_domain = {Assistive Household},
}
@InProceedings{Wykowska09Fira,
author = { Agnieszka Wykowska and Alexis Maldonado and Michael Beetz and Anna Schuboe},
title = { How humans optimize their interaction with the environment: The impact of action context on human perception.},
booktitle = { Progress in Robotics. Proceedings of the FIRA RoboWorld Congress },
month = { August 16-20 },
year = {2009},
address = {Incheon, Korea},
bib2html_pubtype = {Conference Paper},
bib2html_rescat = {Perception, Models},
bib2html_groups = {Cogman},
bib2html_funding = {CoTeSys},
bib2html_domain = {Assistive Household},
}
@incollection{zaeh09cogfac,
author = {Michael F. Z\"ah and Michael Beetz and Kristina Shea and
Gunther Reinhart and K. Bender and Christian Lau and Martin Ostgathe and
W. Vogl and Mathey Wiesbeck and Marco Engelhard and Christoph Ertelt and
Thomas R\"uhr and M. Friedrich and S. Herle},
title = {{The Cognitive Factory}},
editor = {H. A. ElMaraghy},
year = {2009},
booktitle = {Changeable and Reconfigurable Manufacturing Systems},
publisher = {Springer},
pages = {355--371}
}
@InCollection{hoyninge10ccis,
author = {Nicolai v. Hoyningen-Huene and Michael Beetz},
editor = {AlpeshKumar Ranchordas and Helder Araujo},
booktitle = {VISIGRAPP 2009},
title = {Importance Sampling as One Solution to the Data Association Problem in Multi-target Tracking},
publisher = {Springer-Verlag Berlin Heidelberg},
year = {2010},
number = {68},
series = {Communications in Computer and Information Science (CCIS)},
pages = {309--325},
bib2html_pubtype = {Selected Paper},
bib2html_rescat = {Person Tracking},
bib2html_groups = {Aspogamo},
bib2html_funding = {ASpoGAMo},
bib2html_domain = {Soccer Analysis},
abstract = {Tracking multiple targets with similar appearance is a common task in many computer vision applications as surveillance or sports analysis. We propose a Rao-Blackwellized Resampling Particle Filter (RBRPF) as a real-time multi-target tracking method that solves the data association problem by a Monte Carlo approach. Each particle containing the whole target configuration is predicted by using a process model and resampled by sampling associations and fusing of the predicted state with the assigned measurement(s) instead of the common dispersion. As each target state is modeled as a Gaussian, Rao-Blackwellization can be used to solve some of these steps analytically. The sampling of associations splits the multi-target tracking problem in multiple single target tracking problems, which can be handled by Kalman filters in an optimal way. The method is independent of the order of measurements which is mostly predetermined by the measuring process in contrast to other state-of-the-art approaches. Smart resampling and memoization is introduced to equip the tracking method with real-time capabilities in the first place exploiting the discreteness of the associations. The probabilistic framework allows for consideration of appearance models and the fusion of different sensors. A way to constrain the multiplicity of measurements associated with a single target is proposed and -- along with the ability to cope with a high number of targets in clutter -- evaluated in a simulation experiment. We demonstrate the applicability of the proposed method to real world applications by tracking soccer players captured by multiple cameras through occlusions in real-time.}
}
@article{wykowska10humansoptimize,
author = {Wykowska, Agnieszka and Maldonado, Alexis and Beetz, Michael and Schuboe, Anna},
affiliation = {Department of Experimental Psychology, Ludwig Maximilians Universit\"at, M\"unchen, Germany},
title = {How Humans Optimize Their Interaction with the Environment: The Impact of Action Context on Human Perception},
journal = {International Journal of Social Robotics},
publisher = {Springer Netherlands},
issn = {1875-4791},
keyword = {Engineering},
pages = {1-9},
url = {http://dx.doi.org/10.1007/s12369-010-0078-3},
note = {10.1007/s12369-010-0078-3},
year = {2010}
}
@InProceedings{tenorth10webinstructions,
author = {Moritz Tenorth and Daniel Nyga and Michael Beetz},
title = {{Understanding and Executing Instructions for Everyday Manipulation Tasks from the World Wide Web}},
booktitle = {IEEE International Conference on Robotics and Automation (ICRA)},
year = {2010},
pages = {1486--1491},
month = {May 3--8},
address = {Anchorage, AK, USA},
bib2html_pubtype = {Conference Paper},
bib2html_groups = {K4C},
bib2html_funding = {CoTeSys},
bib2html_rescat = {Reasoning, Planning},
bib2html_domain = {Assistive Household}
}
@InProceedings{tenorth10transformations,
author = {Moritz Tenorth and Michael Beetz},
title = {{Priming Transformational Planning with Observations of Human Activities}},
booktitle = {IEEE International Conference on Robotics and Automation (ICRA)},
year = {2010},
pages = {1499--1504},
month = {May 3--8},
address = {Anchorage, AK, USA},
bib2html_pubtype = {Conference Paper},
bib2html_groups = {K4C},
bib2html_funding = {CoTeSys},
bib2html_rescat = {Learning, Planning},
bib2html_domain = {Assistive Household}
}
@article{beetz10ameva,
title={{Towards Automated Models of Activities of Daily Life}},
author={Michael Beetz and Moritz Tenorth and Dominik Jain and Jan Bandouch},
journal={Technology and Disability},
volume={22},
number= {1-2},
pages={27--40},
year={2010},
publisher={IOS Press},
bib2html_pubtype = {Journal},
bib2html_groups = {K4C, Memoman, ProbCog},
bib2html_funding = {CoTeSys, MeMoMan},
bib2html_rescat = {Perception, Models, Representation, Learning, Reasoning},
bib2html_domain = {Assistive Household}
}
@article{beetz10towards,
title={{Towards Performing Everyday Manipulation Activities}},
author={Michael Beetz and Dominik Jain and Lorenz M{\"o}senlechner and Moritz Tenorth},
journal={Robotics and Autonomous Systems},
year={2010},
volume={58},
number= {9},
pages={1085--1095},
publisher={Elsevier},
bib2html_pubtype = {Journal},
bib2html_groups = {K4C, Cogito, ProbCog},
bib2html_funding = {CoTeSys},
bib2html_rescat = {Models, Representation, Learning, Reasoning, Planning},
bib2html_domain = {Assistive Household}
}
@article{beetz10cpa,
title={{Learning from Humans -- Cognition-enabled Computational Models of Everyday Activity}},
author={Michael Beetz and Martin Buss and Bernd Radig},
journal={K{\"u}nstliche Intelligenz},
year={2010},
publisher={Springer},
bib2html_pubtype = {Journal},
bib2html_funding = {CoTeSys},
bib2html_groups = {Cogito},
bib2html_domain = {Assistive Household}
}
@article{buss10cotesys,
title={{CoTeSys -- Cognition for Technical Systems}},
author={Martin Buss and Michael Beetz},
journal={K{\"u}nstliche Intelligenz},
year={2010},
publisher={Springer},
bib2html_pubtype = {Journal},
bib2html_funding = {CoTeSys}
}
@article{tenorth10kr,
title={{Knowledge Representation for Cognitive Robots}},
author={Moritz Tenorth and Dominik Jain and Michael Beetz},
journal = {K{\"u}nstliche Intelligenz},
publisher = {Springer},
year = {2010},
volume = {24},
number = {3},
pages = {233--240},
bib2html_pubtype = {Journal},
bib2html_groups = {K4C, ProbCog},
bib2html_funding = {CoTeSys},
bib2html_rescat = {Representation, Learning, Reasoning},
bib2html_domain = {Assistive Household}
}
@InProceedings{jain10adaptive,
author = {Dominik Jain and Andreas Barthels and Michael Beetz},
title = {{Adaptive Markov Logic Networks: Learning Statistical Relational Models with Dynamic Parameters}},
pages = {937-942},
booktitle = {19th European Conference on Artificial Intelligence (ECAI)},
year = {2010},
bib2html_groups = {ProbCog}
}
@Article{KI10SpecialIssue,
author = {Michael Beetz and Alexandra Kirsch},
title = {Special Issue on Cognition for Technical Systems},
journal = {K\"unstliche Intelligenz},
year = 2010,
volume = 24}
@InProceedings{jain10soft,
author = {Dominik Jain and Michael Beetz},
title = {{Soft Evidential Update via Markov Chain Monte Carlo Inference}},
booktitle = {KI 2010: Advances in Artificial Intelligence, 33rd Annual German Conference on AI},
pages = {280-290},
location = {Karlsruhe, Germany},
publisher = {Springer},
series = {Lecture Notes in Computer Science},
volume = {6359},
isbn = {978-3-642-16110-0},
year = {2010},
bib2html_groups = {ProbCog}
}
@InProceedings{rss10making_sense_3d_data,
author = {Nico Blodow and Zoltan-Csaba Marton and Dejan Pangercic and Michael Beetz},
title = {Making Sense of 3D Data},
booktitle = {Robotics: Science and Systems Conference (RSS), Workshop on Strategies and Evaluation for Mobile Manipulation in Household Environments},
year = {2010},
bib2html_pubtype = {Workshop Paper},
bib2html_groups = {EnvMod},
bib2html_funding = {CoTeSys},
bib2html_rescat = {Perception, Models},
bib2html_domain = {Assistive Household}}
@InProceedings{Marton10IROS,
author = {Zoltan-Csaba Marton and Dejan Pangercic and Nico Blodow and Jonathan Kleinehellefort and Michael Beetz},
title = {{General 3D Modelling of Novel Objects from a Single View}},
booktitle = {Proceedings of the IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS)},
month = {October 18-22},
year = {2010},
address = {Taipei, Taiwan},
bib2html_pubtype = {Conference Paper},
bib2html_rescat = {Perception, Models},
bib2html_groups = {Cop, EnvMod},
bib2html_funding = {CoTeSys},
bib2html_domain = {Assistive Household}
}
@InProceedings{iros10kcopman,
author = {Dejan Pangercic and Moritz Tenorth and Dominik Jain and Michael Beetz},
title = {{Combining Perception and Knowledge Processing for Everyday Manipulation}},
booktitle = {IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS)},
pages = {1065-1071},
year = {2010},
month = {October 18-22},
address = {Taipei, Taiwan},
bib2html_groups = {K4C,ProbCog,Cop},
bib2html_pubtype = {Conference Paper},
bib2html_rescat = {Perception, Models},
bib2html_funding = {CoTeSys},
bib2html_domain = {Assistive Household}
}
@InProceedings{beetz10cram,
author = {Michael Beetz and Lorenz M\"osenlechner and Moritz Tenorth},
title = {{CRAM -- A Cognitive Robot Abstract Machine for Everyday Manipulation in Human Environments}},
booktitle = {IEEE/RSJ International Conference on Intelligent Robots and Systems},
year = {2010},
pages = {1012-1017},
month = {October 18-22},
address = {Taipei, Taiwan},
bib2html_pubtype = {Conference Paper},
bib2html_groups = {K4C,Cogito},
bib2html_funding = {CoTeSys},
bib2html_rescat = {Reasoning, Planning},
bib2html_domain = {Assistive Household}
}
@InProceedings{moesenle10ActionAwareness,
author = {Lorenz M\"osenlechner and Nikolaus Demmel and Michael Beetz},
title = {{Becoming Action-aware through Reasoning about Logged Plan Execution Traces}},
booktitle = {IEEE/RSJ International Conference on Intelligent RObots and Systems.},
pages = {2231--2236},
year = {2010},
month = {October 18-22},
address = {Taipei, Taiwan},
bib2html_pubtype = {Conference Paper},
bib2html_groups = {Cogito},
bib2html_funding = {CoTeSys},
bib2html_rescat = {Reasoning, Planning},
bib2html_domain = {Assistive Household}
}
@techreport{brscic10mujoa,
author = {D. Br\v{s}\v{c}i\'{c} and M. Eggers and F. Rohrm\"uller and O. Kourakos and S. Sosnowski and D. Althoff and M. Lawitzky and A. M\"ortl and M. Rambow and V. Koropouli and J.R. Medina Hern\'{a}ndez and X. Zang and W. Wang and D. Wollherr and K. K\"uhnlenz and C. Mayer and T. Kruse and A. Kirsch and J. Blume and A. Bannat and T. Rehrl and F. Wallhoff and T. Lorenz and P. Basili and C. Lenz and T. R\"oder and G. Panin and W. Maier and S. Hirche and M. Buss and M. Beetz and B. Radig and A. Schub\"o and S. Glasauer and A. Knoll and E. Steinbach},
title = {{Multi Joint Action} in {CoTeSys} --- Setup and Challenges},
institution = {CoTeSys Cluster of Excelence: Technische Universit\"at M\"unchen \& Ludwig-Maximilians-Universit\"at M\"unchen},
year = {2010},
number = {CoTeSys-TR-10-01},
address = {Munich, Germany},
month = {June},
bib2html_pubtype = {Other},
bib2html_groups = {PARA, Mudis, Other},
bib2html_funding = {CoTeSys},
bib2html_rescat = {Perception, Human-Robot Interaction, Planning},
bib2html_domain = {Multi-Joint Action}}
@InProceedings{ruizugalde10objectmodels,
author = {Federico Ruiz-Ugalde and Gordon Cheng and Michael Beetz},
title = {Prediction of action outcomes using an object model},
booktitle = {2010 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS)},
month = {October 18-22},
year = {2010},
address = {Taipei, Taiwan},
bib2html_pubtype = {Conference Paper},
bib2html_rescat = {Models, Action, Learning},
bib2html_groups = {Cogman},
bib2html_funding = {CoTeSys},
bib2html_domain = {Assistive Household}
}
@InProceedings{maldonado10nomodelgrasping,
author = {Alexis Maldonado and Ulrich Klank and Michael Beetz},
title = {Robotic grasping of unmodeled objects using time-of-flight range data and finger torque information},
booktitle = {2010 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS)},
month = {October 18-22},
year = {2010},
pages = {2586--2591},
address = {Taipei, Taiwan},
bib2html_pubtype = {Conference Paper},
bib2html_rescat = {Action, Perception},
bib2html_groups = {Cogman},
bib2html_funding = {CoTeSys},
bib2html_domain = {Assistive Household}
}
@InProceedings{kunze10omics,
author = {Lars Kunze and Moritz Tenorth and Michael Beetz},
title = {{Putting People's Common Sense into Knowledge Bases of Household Robots}},
booktitle = {33rd Annual German Conference on Artificial Intelligence (KI 2010)},
month = {September 21-24},
year = {2010},
pages = {151--159},
address = {Karlsruhe, Germany},
publisher = {Springer},
bib2html_pubtype = {Conference Paper},
bib2html_rescat = {Reasoning, Representation},
bib2html_groups = {K4C},
bib2html_funding = {CoTeSys},
bib2html_domain = {Assistive Household}
}
@InProceedings{GRSD10Humanoids,
author = {Zoltan-Csaba Marton and Dejan Pangercic and Radu Bogdan Rusu and Andreas Holzbach and Michael Beetz},
title = {Hierarchical Object Geometric Categorization and Appearance Classification for Mobile Manipulation},
booktitle = {Proceedings of the IEEE-RAS International Conference on Humanoid Robots},
month = {December 6-8},
year = {2010},
address = {Nashville, TN, USA},
bib2html_pubtype = {Conference Paper},
bib2html_rescat = {Perception, Models},
bib2html_groups = {Cop, EnvMod},
bib2html_funding = {CoTeSys},
bib2html_domain = {Assistive Household}
}
@InProceedings{Blodow10Humanoids,
author = {Nico Blodow and Dominik Jain and Zoltan-Csaba Marton and Michael Beetz},
title = {{Perception and Probabilistic Anchoring for Dynamic World State Logging}},
booktitle = {10th IEEE-RAS International Conference on Humanoid Robots},
pages = {160-166},
month = {December 6-8},
year = {2010},
address = {Nashville, TN, USA},
bib2html_pubtype = {Conference Paper},
bib2html_rescat = {Perception, Models},
bib2html_groups = {Cop, EnvMod, ProbCog},
bib2html_funding = {CoTeSys},
bib2html_domain = {Assistive Household}
}
@InProceedings{tenorth10envmodel,
author = {Moritz Tenorth and Lars Kunze and Dominik Jain and Michael Beetz},
title = {{KNOWROB-MAP -- Knowledge-Linked Semantic Object Maps}},
booktitle = {10th IEEE-RAS International Conference on Humanoid Robots},
pages = {430-435},
month = {December 6-8},
year = {2010},
address = {Nashville, TN, USA},
bib2html_pubtype = {Conference Paper},
bib2html_rescat = {Perception, Models},
bib2html_groups = {K4C, EnvMod, ProbCog},
bib2html_funding = {CoTeSys},
bib2html_domain = {Assistive Household}
}
@article{zaeh10artificial,
author = {M. F. Zaeh and W. Roesel and A. Bannat and T. Bautze and M. Beetz and J. Blume and K. Diepold and C. Ertelt and F. Geiger and T. Gmeiner and T. Gyger and A. Knoll and C. Lau and C. Lenz and M. Ostgathe and G. Reinhart and T. Ruehr and A. Schuboe and K. Shea and I. Stork genannt Wersborg and S. Stork and W. Tekouo and F. Wallhoff and M. Wiesbeck},
title = {Artificial Cognition in Production Systems},
year = {2010},
journal ={IEEE Transactions on Automation Science and Engineering},
volume = {7},
number = {3},
pages = {1--27}
}
@inproceedings{lemaignan2010oro,
author = {S\'everin Lemaignan and Raquel Ros and Lorenz M{\"o}senlechner and Rachid Alami and Michael Beetz},
title = {ORO, a knowledge management module for cognitive architectures in robotics},
booktitle = {Proceedings of the 2010 IEEE/RSJ International Conference on Intelligent Robots and Systems},
year = {2010},
month = {October 18-22},
address = {Taipei, Taiwan},
pages = {3548--3553}
}
@techreport{tenorth10roboearth,
title = {{Deliverable D5.2: The RoboEarth Language -- Language Specification}},
author = {Moritz Tenorth and Michael Beetz},
year = {2010},
institution = {FP7-ICT-248942 RoboEarth},
number = {D5.2},
bib2html_pubtype = {Other},
bib2html_groups = {K4C, ProbCog},
bib2html_funding = {CoTeSys},
bib2html_rescat = {Representation, Learning, Reasoning},
bib2html_domain = {Assistive Household},
abstract = {This document describes the current state of implementation of the RoboEarth representation language. This language is designed for two main purposes. First, it should allow to represent all information a robot needs to perform a reasonably complex task. This includes information about (1) Plans, which consist of the actions a task is composed of, ordering constraints among them, monitoring and failure handling, as well as action parameters like objects, locations, grasp types; (2) Objects, especially types, dimensions, states, and other properties, but also locations of specific objects a robot has detected, and object models that can be used for recognition; and the (3) Environment, including maps for self-localization as well as poses of objects like pieces of furniture. The second main task of the RoboEarth language is to allow a robot to decide on its own if a certain piece of information is useful to it. That means, a robot must be able to check if an action description contains a plan for the action it would like to do, if it meets all requirements to perform this action, and if it has the sensors needed to use an object recognition model. Using the semantic descriptions in the RoboEarth language, a robot can perform the checks using logical inference.
}}
@INPROCEEDINGS{goron10isr-robotik,
AUTHOR="Lucian Cosmin Goron and Zoltan Csaba Marton and Gheorghe Lazea and Michael Beetz",
TITLE="Automatic Layered 3D Reconstruction of Simplified Object Models for Grasping",
BOOKTITLE="Joint 41st International Symposium on Robotics (ISR) and 6th German Conference on Robotics (ROBOTIK)",
ADDRESS="Munich, Germany",
YEAR=2010,
bib2html_pubtype = {Conference Paper},
bib2html_rescat = {Perception, Models},
bib2html_groups = {Cop, EnvMod},
bib2html_funding = {CoTeSys},
bib2html_domain = {Assistive Household}
}
@article{lemaignan2011grounding,
author = {S\'everin Lemaignan and Raquel Ros and E. Akin Sisbot and Rachid Alami and Michael Beetz},
title = {Grounding the Interaction: Anchoring Situated Discourse in Everyday Human-Robot Interaction},
journal = {International Journal of Social Robots},
publisher = {Springer Netherlands},
issn = {1875-4791},
pages = {1-19},
year = {2011},
url = {http://dx.doi.org/10.1007/s12369-011-0123-x}
}
@inproceedings{pushing11humanoids,
author = {Federico Ruiz-Ugalde and Gordon Cheng and Michael Beetz},
title = {Fast adaptation for effect-aware pushing},
booktitle = {11th IEEE-RAS International Conference on Humanoid Robots},
year = {2011},
month = {October, 26--28},
address = {Bled, Slovenia},
bib2html_pubtype = {Conference Paper},
bib2html_groups = {Cogman},
bib2html_funding = {CoTeSys},
bib2html_domain = {Assistive Household},
bib2html_rescat = {Action, Control, Prediction}
}
@inproceedings{optimitation11humanoids,
author = {Sebastian Albrecht and Karinne Ramirez-Amaro and Federico Ruiz-Ugalde and David Weikersdorfer and Marion Leibold and Michael Ulbrich and Michael Beetz},
title = {Imitating human reaching motions using physically inspired optimization principles},
booktitle = {11th IEEE-RAS International Conference on Humanoid Robots},
year = {2011},
month = {October, 26--28},
address = {Bled, Slovenia},
bib2html_pubtype = {Conference Paper},
bib2html_groups = {},
bib2html_funding = {CoTeSys},
bib2html_domain = {Assistive Household},
bib2html_rescat = {Imitation, Optimization, Perception, Segmentation}
}
@inproceedings{kunze11naivephysics,
author = {Lars Kunze and Mihai Emanuel Dolha and Emitza Guzman and Michael Beetz},
title = {Simulation-based Temporal Projection of Everyday Robot Object Manipulation},
booktitle = {Proc. of the 10th Int. Conf. on Autonomous Agents and Multiagent Systems (AAMAS 2011)},
editor = {Yolum and Tumer and Stone and Sonenberg},
publisher = {IFAAMAS},
month = {May, 2--6},
year = {2011},
address = {Taipei, Taiwan},
bib2html_pubtype = {Conference Paper},
bib2html_groups = {K4C},
bib2html_funding = {CoTeSys},
bib2html_rescat = {Reasoning},
bib2html_domain = {Assistive Household}
}
@inproceedings{kunze11srdl,
author = {Lars Kunze and Tobias Roehm and Michael Beetz},
title = {Towards Semantic Robot Description Languages},
booktitle = {IEEE International Conference on Robotics and Automation (ICRA)},
month = {May, 9--13},
year = {2011},
pages = {5589--5595},
address = {Shanghai, China},
bib2html_pubtype = {Conference Paper},
bib2html_groups = {K4C},
bib2html_funding = {CoTeSys},
bib2html_rescat = {Representation, Reasoning},
bib2html_domain = {Assistive Household}
}
@inproceedings{nyga11trajcluster,
author = {Daniel Nyga and Moritz Tenorth and Michael Beetz},
title = {How-Models of Human Reaching Movements in the Context of Everyday Manipulation Activities},
booktitle = {IEEE International Conference on Robotics and Automation (ICRA)},
month = {May, 9--13},
year = {2011},
address = {Shanghai, China},
bib2html_pubtype = {Conference Paper},
bib2html_groups = {K4C},
bib2html_funding = {CoTeSys},
bib2html_rescat = {Representation, Reasoning},
bib2html_domain = {Assistive Household}
}
@inproceedings{klank11transparent,
author = {Ulrich Klank and Daniel Carton and Michael Beetz},
title = {Transparent Object Detection and Reconstruction on a Mobile Platform},
booktitle = {IEEE International Conference on Robotics and Automation (ICRA)},
month = {May, 9--13},
year = {2011},
address = {Shanghai, China},
bib2html_pubtype = {Conference Paper},
bib2html_groups = {CoP},
bib2html_funding = {CoTeSys},
bib2html_rescat = {Object Recognition},
bib2html_domain = {Assistive Household}
}
@article{tenorth11www,
title={{Web-enabled Robots -- Robots that Use the Web as an Information Resource}},
author={Moritz Tenorth and Ulrich Klank and Dejan Pangercic and Michael Beetz},
journal={Robotics \& Automation Magazine},
volume={18},
number={2},
pages = {58--68},
year={2011},
publisher={IEEE},
bib2html_pubtype = {Journal},
bib2html_groups = {K4C},
bib2html_funding = {},
bib2html_rescat = {Representation, Models, Reasoning},
bib2html_domain = {Assistive Household}
}
@article{waibel11roboearth,
title={{RoboEarth - A World Wide Web for Robots}},
author={Markus Waibel and Michael Beetz and Raffaello D'Andrea and Rob Janssen and Moritz Tenorth and Javier Civera and Jos Elfring and Dorian G\'alvez-L\'opez and Kai H\"aussermann and J.M.M. Montiel and Alexander Perzylo and Bj\"orn Schie{\ss}le and Oliver Zweigle and Ren\'e van de Molengraft},
journal={Robotics \& Automation Magazine},
volume={18},
number={2},
pages = {69--82},
year={2011},
publisher={IEEE},
bib2html_pubtype = {Journal},
bib2html_groups = {K4C},
bib2html_funding = {},
bib2html_rescat = {Perception, Models, Reasoning},
bib2html_domain = {Assistive Household},
}
@article{mozos11furniture,
title={{Furniture Models Learned from the WWW -- Using Web Catalogs to Locate and Categorize Unknown Furniture Pieces in 3D Laser Scans}},
author={Oscar Martinez Mozos and Zoltan Csaba Marton and Michael Beetz},
journal={Robotics \& Automation Magazine},
volume={18},
number={2},
pages={22--32},
month={June},
year={2011},
publisher={IEEE},
bib2html_pubtype = {Journal},
bib2html_groups = {EnvMod, CoP},
bib2html_funding = {CoTeSys},
bib2html_rescat = {Perception, Models, Reasoning},
bib2html_domain = {Assistive Household}
}
@inproceedings{icra11perception-manipulation,
author = {Michael Beetz and Ulrich Klank and Alexis Maldonado and Dejan Pangercic and Thomas R\"uhr},
title = {Robotic Roommates Making Pancakes - Look Into Perception-Manipulation Loop},
booktitle = {IEEE International Conference on Robotics and Automation (ICRA), Workshop on Mobile Manipulation: Integrating Perception and Manipulation},
month = {May, 9--13},
year = {2011},
pages = {529--536},
bib2html_pubtype = {Workshop Paper},
bib2html_groups = {EnvMod, Cogman, CoP},
bib2html_funding = {CoTeSys},
bib2html_rescat = {Action, Perception},
bib2html_domain = {Assistive Household}
}
@inproceedings{icra11semantic-perception,
author = {Nico Blodow and Zoltan-Csaba Marton and Dejan Pangercic and Thomas R\"uhr and Moritz Tenorth and Michael Beetz},
title = {Inferring Generalized Pick-and-Place Tasks from Pointing Gestures},
booktitle = {IEEE International Conference on Robotics and Automation (ICRA), Workshop on Semantic Perception, Mapping and Exploration},
month = {May, 9--13},
year = {2011},
bib2html_pubtype = {Workshop Paper},
bib2html_groups = {EnvMod, K4C},
bib2html_funding = {CoTeSys},
bib2html_rescat = {Perception, Representation, Reasoning},
bib2html_domain = {Assistive Household}
}
@article{marton11ijrr,
title={{Combined 2D-3D Categorization and Classification for Multimodal Perception Systems}},
author={Zoltan Csaba Marton and Dejan Pangercic and Nico Blodow and Michael Beetz},
journal={The International Journal of Robotics Research},
volume={30},
number={11},
pages={1378--1402},
month={September},
year={2011},
publisher={Sage Publications},
bib2html_pubtype = {Journal},
bib2html_groups = {EnvMod, CoP},
bib2html_funding = {CoTeSys},
bib2html_rescat = {Perception, Models, Reasoning},
bib2html_domain = {Assistive Household}
}
@inproceedings{mapping11iros,
title={Autonomous Semantic Mapping for Robots Performing Everyday Manipulation Tasks in Kitchen Environments},
author={Nico Blodow and Lucian Cosmin Goron and Zoltan-Csaba Marton and Dejan Pangercic and Thomas R\"uhr and Moritz Tenorth and Michael Beetz},
booktitle={Proceedings of the IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS)},
year={2011},
month = {September, 25--30},
address = {San Francisco, CA, USA},
bib2html_pubtype = {Conference Paper},
bib2html_rescat = {Perception, Models},
bib2html_groups = {EnvMod},
bib2html_funding = {CoTeSys},
bib2html_domain = {Assistive Household}
}
@inproceedings{kunze11simlp,
author = {Lars Kunze and Mihai Emanuel Dolha and Michael Beetz},
title={{Logic Programming with Simulation-based Temporal Projection for Everyday Robot Object Manipulation}},
booktitle={2011 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS)},
year={2011},
month = {September, 25--30},
address = {San Francisco, CA, USA},
note = {Best Student Paper Finalist.},
bib2html_pubtype = {Conference Paper},
bib2html_groups = {K4C},
bib2html_funding = {CoTeSys},
bib2html_rescat = {Representation, Reasoning},
bib2html_domain = {Assistive Household}
}
@inproceedings{beetz11arso,
title={Advantages of Spatial-temporal Object Maps for Service Robotics},
author={Zoltan-Csaba Marton and Nico Blodow and Michael Beetz},
booktitle={IEEE Workshop on Advanced Robotics and its Social Impacts (ARSO)},
year={2011},
month = {October 2-4},
address = {Half-Moon Bay, CA, USA},
bib2html_pubtype = {Workshop Paper},
bib2html_rescat = {Perception, Models},
bib2html_groups = {EnvMod},
bib2html_funding = {CoTeSys},
bib2html_domain = {Assistive Household}
}
@InProceedings{jain11blns,
author = {Dominik Jain and Klaus von Gleissenthall and Michael Beetz},
title = {{Bayesian Logic Networks and the Search for Samples with Backward Simulation and Abstract Constraint Learning}},
pages = {144-156},
booktitle = {KI 2011: Advances in Artificial Intelligence, 34th Annual German Conference on AI},
location = {Berlin, Germany},
publisher = {Springer},
series = {Lecture Notes in Computer Science},
volume = {7006},
isbn = {978-3-642-24454-4},
month = {October 4-7},
year = {2011},
bib2html_groups = {ProbCog},
}
@inproceedings{moesenlechner11physics,
author = {Lorenz M{\"o}senlechner and Michael Beetz},
title = {{Parameterizing Actions to have the Appropriate Effects}},
booktitle = {IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS)},
year = {2011},
month = {September 25--30},
address = {San Francisco, CA, USA},
bib2html_pubtype = {Conference Paper},
bib2html_groups = {Cogito},
bib2html_funding = {CoTeSys},
bib2html_domain = {Assistive Household}
}
@inproceedings{ccd11humanoids,
author = {Shulei Zhu and Dejan Pangercic and Michael Beetz},
title = {Contracting Curve Density Algorithm for Applications in Personal Robotics},
booktitle = {11th IEEE-RAS International Conference on Humanoid Robots},
year = {2011},
month = {October, 26--28},
address = {Bled, Slovenia},
bib2html_pubtype = {Conference Paper},
bib2html_groups = {EnvMod, CoP},
bib2html_funding = {CoTeSys},
bib2html_domain = {Assistive Household},
bib2html_rescat = {Perception},
}
@inproceedings{pancakes11humanoids,
author = {Michael Beetz and Ulrich Klank and Ingo Kresse and Alexis Maldonado and Lorenz M\"osenlechner and Dejan Pangercic and Thomas
R\"uhr and Moritz Tenorth},
title = {{Robotic Roommates Making Pancakes}},
booktitle = {11th IEEE-RAS International Conference on Humanoid Robots},
year = {2011},
month = {October, 26--28},
address = {Bled, Slovenia},
bib2html_pubtype = {Conference Paper},
bib2html_groups = {EnvMod, CoP, Cogito, K4C, Cogman},
bib2html_funding = {CoTeSys},
bib2html_domain = {Assistive Household},
bib2html_rescat = {Action, Perception, Reasoning, Planning, Models, Representation},
}
@misc{shopping_demo11aaai,
author = {Dejan Pangercic and Koppany Mathe and Zoltan-Csaba Marton and Lucian Cosmin
Goron and Monica-Simona Opris and Martin Schuster and Moritz Tenorth and Dominik
Jain and Thomas Ruehr and Michael Beetz},
title = {A Robot that Shops for and Stores Groceries},
howpublished = {AAAI Video Competition (AIVC 2011)},
address = {San Francisco, CA, USA},
year = {2011},
month = {August 7--11},
url = {http://youtu.be/x0Ybod_6ADA},
bib2html_pubtype = {Video},
bib2html_groups = {EnvMod, CoP, K4C},
bib2html_funding = {CoTeSys},
bib2html_domain = {Assistive Household},
bib2html_rescat = {Action, Perception, Reasoning, Models, Representation},
}
@inproceedings{toolrep11humanoids,
author = {Ingo Kresse and Ulrich Klank and Michael Beetz},
title = {Multimodal Autonomous Tool Analyses and Appropriate Application},
booktitle = {11th IEEE-RAS International Conference on Humanoid Robots},
year = {2011},
month = {October, 26--28},
address = {Bled, Slovenia},
bib2html_pubtype = {Conference Paper},
bib2html_groups = {CoP, Cogman},
bib2html_funding = {CoTeSys},
bib2html_domain = {Assistive Household},
bib2html_rescat = {Perception, Action, Representation},
}
@inproceedings{iros11objsearch,
author = {Manabu Saito and Haseru Chen and Kei Okada and Masayuki Inaba and Lars Kunze and Michael Beetz},
title = {Semantic Object Search in Large-scale Indoor Environments},
booktitle = {IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS), Workshop on Active Semantic Perception and Object Search in the Real World},
month = {September, 25--30},
year = {2011},
address = {San Francisco, CA, USA},
bib2html_pubtype = {Workshop Paper},
bib2html_groups = {EnvMod,K4C},
bib2html_funding = {CoTeSys},
bib2html_rescat = {Representation, Reasoning, Models},
bib2html_domain = {Assistive Household},
}
@inproceedings{irosws11germandeli,
author = {Dejan Pangercic and Vladimir Haltakov and Michael Beetz},
title = {Fast and Robust Object Detection in Household Environments Using Vocabulary Trees with SIFT Descriptors},
booktitle = {IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS), Workshop on Active Semantic Perception and Object Search in the Real World},
month = {September, 25--30},
year = {2011},
address = {San Francisco, CA, USA},
bib2html_pubtype = {Workshop Paper},
bib2html_groups = {CoP},
bib2html_funding = {CoTeSys},
bib2html_rescat = {Perception},
bib2html_domain = {Assistive Household},
}
@inproceedings{irosws11vosch,
author = {Asako Kanezaki and Zoltan-Csaba Marton and Dejan Pangercic and Tatsuya Harada and Yasuo Kuniyoshi and Michael Beetz},
title = {{Voxelized Shape and Color Histograms for RGB-D}},
booktitle = {IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS), Workshop on Active Semantic Perception and Object Search in the Real World},
month = {September, 25--30},
year = {2011},
address = {San Francisco, CA, USA},
bib2html_pubtype = {Workshop Paper},
bib2html_groups = {CoP,EnvMod},
bib2html_funding = {CoTeSys},
bib2html_rescat = {Perception},
bib2html_domain = {Assistive Household},
}
@inproceedings{marton11rgbd,
author = {Zoltan-Csaba Marton and Dejan Pangercic and Michael Beetz},
title = {{Efficient Surface and Feature Estimation in RGBD}},
booktitle = {RGB-D Workshop on 3D Perception in Robotics at the European Robotics (euRobotics) Forum},
month = {April 8},
year = {2011},
address = {V\"aster\aa{}s, Sweden},
bib2html_pubtype = {Workshop Paper},
bib2html_groups = {CoP,EnvMod},
bib2html_funding = {CoTeSys},
bib2html_rescat = {Perception},
bib2html_domain = {Assistive Household},
}
@INPROCEEDINGS{Kammerl2012,
author = "Julius Kammerl AND Nico Blodow AND Radu Bogdan Rusu AND Suat Gedikli AND Michael Beetz AND Eckehard Steinbach",
title = "Real-time Compression of Point Cloud Streams ",
booktitle = "{IEEE} International Conference on Robotics and Automation ({ICRA})",
month = "May",
year = "2012",
address = "Minnesota, {USA}",
}
@article{Bandouch12memoman,
author = {Jan Bandouch and
Odest Chadwicke Jenkins and
Michael Beetz},
title = {A Self-Training Approach for Visual Tracking and Recognition
of Complex Human Activity Patterns},
journal = {International Journal of Computer Vision},
volume = {99},
number = {2},
year = {2012},
pages = {166-189}
}
@inproceedings{hausman12interactive,
author = {Karol Hausman and Christian Bersch and Dejan Pangercic
and Sarah Osentoski and Zoltan-Csaba Marton and Michael Beetz},
title = {Segmentation of Cluttered Scenes through Interactive Perception},
booktitle = {ICRA 2012 Workshop on Semantic Perception and Mapping for Knowledge-enabled Service Robotics},
year = {2012},
month = {May 14--18},
address = {St. Paul, MN, USA},
bib2html_groups = {EnvMod},
bib2html_pubtype = {Workshop Paper},
bib2html_funding = {CoTeSys},
bib2html_rescat = {Perception},
bib2html_domain = {Assistive Household}
}
@Article{Bee12IEEE,
author = {Michael Beetz and Dominik Jain and Lorenz M\"osenlechner and
Moritz Tenorth and Lars Kunze and Nico Blodow and Dejan Pangercic},
title = {Cognition-Enabled Autonomous Robot Control
for the Realization of Home Chore Task Intelligence},
journal = {Proceedings of the IEEE, Special Issue on Quality of Life Technology},
year = {2012},
volume = {100},
number = {8},
pages = {2454--2471},
}
@inproceedings{kidson12registration,
author = {Ross Kidson and Darko Stanimirovic and Dejan Pangercic and Michael Beetz},
title = {Elaborative Evaluation of RGB-D based Point Cloud Registration for Personal Robots},
booktitle = {ICRA 2012 Workshop on Semantic Perception and Mapping for Knowledge-enabled Service Robotics},
year = {2012},
month = {May 14--18},
address = {St. Paul, MN, USA},
bib2html_groups = {EnvMod},
bib2html_pubtype = {Workshop Paper},
bib2html_funding = {CoTeSys},
bib2html_rescat = {Perception},
bib2html_domain = {Assistive Household}
}
@Article{stulp12learning,
author = {Freek Stulp and Andreas Fedrizzi and Lorenz M\"osenlechner and Michael Beetz},
title = {{Learning and Reasoning with Action-Related Places for Robust Mobile Manipulation}},
journal = {Journal of Artificial Intelligence Research (JAIR)},
year = {2012},
volume = {43},
pages = {1--42},
}
@inproceedings{schuster12orgprinciples,
author = {Martin Schuster and Dominik Jain and Moritz Tenorth and Michael Beetz},
title = {{Learning Organizational Principles in Human Environments}},
booktitle = {IEEE International Conference on Robotics and Automation (ICRA)},
year = {2012},
pages = {3867-3874},
month = {May 14--18},
address = {St. Paul, MN, USA},
bib2html_groups = {ProbCog, K4C}
}
@inproceedings{icra12doors,
author = {Thomas R\"uhr and J\"urgen Sturm and Dejan Pangercic and Michael Beetz and Daniel Cremers},
title = {A Generalized Framework for Opening Doors and Drawers in Kitchen Environments},
booktitle = {IEEE International Conference on Robotics and Automation (ICRA)},
year = {2012},
month = {May 14--18},
address = {St. Paul, MN, USA},
bib2html_pubtype = {Conference Paper},
bib2html_groups = {EnvMod, K4C, Cogman},
bib2html_funding = {CoTeSys},
bib2html_rescat = {Perception, Representation, Manipulation},
bib2html_domain = {Assistive Household}
}
@inproceedings{tenorth12roboearth,
author = {Moritz Tenorth and Alexander Clifford Perzylo and Reinhard Lafrenz and Michael Beetz},
title = {{The RoboEarth language: Representing and Exchanging Knowledge about Actions, Objects, and Environments}},
booktitle = {IEEE International Conference on Robotics and Automation (ICRA)},
year = {2012},
month = {May 14--18},
address = {St. Paul, MN, USA},
note = {Best Cognitive Robotics Paper Award.},
bib2html_pubtype = {Conference Paper},
bib2html_groups = {K4C, Cogman},
bib2html_funding = {RoboEarth},
bib2html_rescat = {Representation, Reasoning},
bib2html_domain = {Assistive Household}
}
@inproceedings{kunze12objsearch,
author = {Lars Kunze and Michael Beetz and Manabu Saito and Haseru Azuma and Kei Okada and Masayuki Inaba},
title = {Searching Objects in Large-scale Indoor Environments: A
Decision-thereotic Approach},
booktitle = {IEEE International Conference on Robotics and Automation (ICRA)},
year = {2012},
month = {May 14--18},
address = {St. Paul, MN, USA},
bib2html_pubtype = {Conference Paper},
bib2html_groups = {K4C},
bib2html_funding = {CoTeSys},
bib2html_rescat = {Representation, Reasoning},
bib2html_domain = {Assistive Household}
}
@inproceedings{klank12validation,
author = {Ulrich Klank and Lorenz M\"osenlechner and Alexis Maldonado and Michael Beetz},
title = {Robots that Validate Learned Perceptual Models},
booktitle = {IEEE International Conference on Robotics and Automation (ICRA)},
year = {2012},
month = {May 14--18},
address = {St. Paul, MN, USA},
bib2html_pubtype = {Conference Paper},
bib2html_groups = {Cop, Cogito},
bib2html_funding = {CoTeSys},
bib2html_rescat = {Representation, Reasoning},
bib2html_domain = {Assistive Household}
}
@inproceedings{kresse12itasc,
author = {Ingo Kresse and Michael Beetz},
title = {Movement-aware Action Control -- Integrating Symbolic and Control-theoretic Action Execution},
booktitle = {IEEE International Conference on Robotics and Automation (ICRA)},
year = {2012},
month = {May 14--18},
address = {St. Paul, MN, USA},
bib2html_pubtype = {Conference Paper},
bib2html_groups = {Cogman, Cogito},
bib2html_funding = {CoTeSys},
bib2html_rescat = {Action, Reasoning, Planning, Models},
bib2html_domain = {Assistive Household}
}
@inproceedings{tenorth12springsymp,
author = {Moritz Tenorth and Michael Beetz},
title = {Knowledge Processing for Autonomous Robot Control},
booktitle = {{AAAI Spring Symposium on Designing Intelligent Robots: Reintegrating AI}},
year = {2012},
month = {March 26--28},
address = {Stanford, CA, USA},
bib2html_pubtype = {Conference Paper},
bib2html_groups = {K4C},
bib2html_funding = {RoboEarth},
bib2html_rescat = {Representation, Reasoning},
bib2html_domain = {Assistive Household}
}
@inproceedings{cogsys12semantic_mapping,
author = {Michael Beetz and Moritz Tenorth and Dejan Pangercic and Benjamin Pitzer},
title = {Semantic Object Maps for Household Tasks},
booktitle = {5th International Conference on Cognitive Systems (CogSys 2012)},
year = {2012},
bib2html_pubtype = {Poster},
bib2html_groups = {EnvMod, K4C},
bib2html_funding = {CoTeSys, RoboEarth},
bib2html_rescat = {Perception, Representation},
bib2html_domain = {Assistive Household},
}
@inproceedings{cogsys12cram,
author = {Michael Beetz and Lorenz M\"osenlechner and Moritz Tenorth and Thomas R\"uhr},
title = {CRAM -- a Cognitive Robot Abstract Machine},
booktitle = {5th International Conference on Cognitive Systems (CogSys 2012)},
year = {2012},
bib2html_pubtype = {Poster},
bib2html_groups = {Cogito, K4C},
bib2html_funding = {CoTeSys, RoboEarth},
bib2html_rescat = {Perception, Representation},
bib2html_domain = {Assistive Household},
}
@inproceedings{cogsys12parts,
author = {Ferenc Balint-Benczedi and Zoltan-Csaba Marton and Michael Beetz},
title = {Efficient Part-Graph Hashes for Object Categorization},
booktitle = {5th International Conference on Cognitive Systems (CogSys)},
year = {2012},
bib2html_pubtype = {Poster},
bib2html_groups = {EnvMod, Cop},
bib2html_funding = {CoTeSys},
bib2html_rescat = {Perception},
bib2html_domain = {Assistive Household}
}
@inproceedings{ias12exchange,
author = {Moritz Tenorth and Michael Beetz},
title = {Exchange of Action-related Information among Autonomous Robots},
booktitle = {12th International Conference on Intelligent Autonomous Systems},
year = {2012},
bib2html_pubtype = {Conference Paper},
bib2html_groups = {K4C},
bib2html_funding = {RoboEarth},
bib2html_rescat = {Representation, Reasoning},
bib2html_domain = {Assistive Household}
}
@InProceedings{marton12SC,
author = {Zoltan-Csaba Marton and Ferenc Balint-Benczedi and Florian Seidel and Lucian Cosmin Goron and Michael Beetz},
title = {{Object Categorization in Clutter using Additive Features and Hashing of Part-graph Descriptors}},
booktitle = {Proceedings of Spatial Cognition (SC)},
address = {Abbey Kloster Seeon, Germany},
year = {2012},
bib2html_pubtype = {Conference Paper},
bib2html_groups = {EnvMod, Cop},
bib2html_funding = {CoTeSys},
bib2html_rescat = {Perception},
bib2html_domain = {Assistive Household},
keywords = {openease_kbe_perception_everyday},
url = {http://link.springer.com/chapter/10.1007%2F978-3-642-32732-2_2},
}
@inproceedings{gossow2012daft,
author = {David Gossow and David Weikersdorfer and Michael Beetz},
title = {Distinctive Texture Features from Perspective-Invariant Keypoints},
booktitle = {21st International Conference on Pattern Recognition},
year = {2012},
bib2html_pubtype = {Conference Paper},
note = {Accepted for publication.}
}
@inproceedings{weikersdorfer2012dasp,
author = {David Weikersdorfer and David Gossow and Michael Beetz},
title = {Depth-Adaptive Superpixels},
booktitle = {21st International Conference on Pattern Recognition},
year = {2012},
bib2html_pubtype = {Conference Paper},
note = {Accepted for publication.}
}
@inproceedings{bersch12interactive,
author = {Christian Bersch and Dejan Pangercic and Sarah
Osentoski and Karol Hausman and Zoltan-Csaba Marton
and Ryohei Ueda and Kei Okada and Michael Beetz},
title = {Segmentation of Textured and Textureless Objects through Interactive Perception},
booktitle = {RSS Workshop on Robots in Clutter: Manipulation, Perception and Navigation in Human Environments},
year = {2012},
month = {July 9--13},
address = {Sydney, Australia},
bib2html_groups = {EnvMod},
bib2html_pubtype = {Workshop Paper},
bib2html_funding = {CoTeSys},
bib2html_rescat = {Perception},
bib2html_domain = {Assistive Household}
}
@inproceedings{tenorth12actionreasoning,
title={A Unified Representation for Reasoning about Robot Actions, Processes, and their Effects on Objects},
author={Moritz Tenorth and Michael Beetz},
booktitle={{2012 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS)}},
year={2012},
month = {October, 7--12},
address = {Vilamoura, Portugal},
bib2html_pubtype = {Conference Paper},
bib2html_groups = {K4C},
bib2html_funding = {RoboEarth,RoboHow},
bib2html_rescat = {Representation, Reasoning},
bib2html_domain = {Assistive Household}
}
@inproceedings{nyga12actioncore,
title={Everything Robots Always Wanted to Know about Housework (But were afraid to ask)},
author={Daniel Nyga and Michael Beetz},
booktitle={{2012 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS)}},
year={2012},
month = {October, 7--12},
address = {Vilamoura, Portugal},
bib2html_pubtype = {Conference Paper},
bib2html_funding = {CoTeSys, RoboHow},
bib2html_rescat = {Representation, Reasoning},
bib2html_domain = {Assistive Household}
}
@inproceedings{iros12semantic_mapping,
title={Semantic Object Maps for Robotic Housework - Representation, Acquisition and Use},
author={Dejan Pangercic and Moritz Tenorth and Benjamin Pitzer and Michael Beetz},
booktitle={{2012 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS)}},
year={2012},
month = {October, 7--12},
address = {Vilamoura, Portugal},
bib2html_pubtype = {Conference Paper},
bib2html_groups = {K4C, EnvMod},
bib2html_funding = {CoTeSys},
bib2html_rescat = {Representation, Reasoning, Perception},
bib2html_domain = {Assistive Household}
}
@inproceedings{maldonado12improving,
author = {Alexis Maldonado and Humberto Alvarez-Heredia and Michael Beetz},
title = {Improving robot manipulation through fingertip perception},
booktitle = {IEEE International Conference on Intelligent Robots and Systems (IROS)},
year = {2012},
month = {October 7--11},
address = {Vilamoura, Algarve, Portugal},
bib2html_pubtype = {Conference Paper},
bib2html_groups = {Cogman},
bib2html_funding = {CoTeSys},
bib2html_rescat = {Manipulation},
bib2html_domain = {Assistive Household}
}
@Article{marton12prl,
author = {Zoltan-Csaba Marton and Florian Seidel and Ferenc Balint-Benczedi and Michael Beetz},
title = {{Ensembles of Strong Learners for Multi-cue Classification}},
journal = {Pattern Recognition Letters (PRL), Special Issue on Scene Understandings and Behaviours Analysis},
year = {2012},
bib2html_pubtype = {Journal},
bib2html_groups = {EnvMod,K4C},
bib2html_funding = {CoTeSys},
bib2html_rescat = {Perception},
bib2html_domain = {Assistive Household},
note = {In press.}
}
@INPROCEEDINGS{goron12robotik,
AUTHOR="Lucian Cosmin Goron and Zoltan Csaba Marton and Gheorghe Lazea and Michael Beetz",
TITLE="Segmenting Cylindrical and Box-like Objects in Cluttered {3D} Scenes",
BOOKTITLE="7th German Conference on Robotics (ROBOTIK)",
ADDRESS="Munich, Germany",
MONTH=may,
YEAR=2012,
bib2html_pubtype = {Conference Paper},
bib2html_rescat = {Perception, Models},
bib2html_groups = {Cop, EnvMod},
bib2html_funding = {CoTeSys},
bib2html_domain = {Assistive Household}
}
@InProceedings{marton12robotdoc,
author = {Zoltan-Csaba Marton and Florian Seidel and Michael Beetz},
title = {{Towards Modular Spatio-temporal Perception for Task-adapting Robots}},
booktitle = {Postgraduate Conference on Robotics and Development of Cognition (RobotDoC-PhD), a satellite event of the 22nd International Conference on Artificial Neural Networks (ICANN)},
address = {Lausanne, Switzerland},
year = {2012},
bib2html_pubtype = {Conference Paper},
bib2html_groups = {EnvMod, Cop},
bib2html_funding = {CoTeSys},
bib2html_rescat = {Perception},
bib2html_domain = {Assistive Household}
}
@InProceedings{klapfer12liquids,
author = {Reinhard Klapfer and Lars Kunze and Michael Beetz},
title = {{Pouring and Mixing Liquids --- Understanding the Physical Effects of Everyday Robot Manipulation Actions}},
booktitle = {35th German Conference on Artificial Intelligence (KI-2012), Workshop on Human Reasoning and Automated Deduction},
address = {Saarbr\"ucken, Germany},
month = {September 24--27},
year = {2012},
bib2html_pubtype = {Workshop Paper},
bib2html_funding = {CoTeSys},
bib2html_rescat = {Representation, Reasoning},
bib2html_domain = {Assistive Household}
}
@InProceedings{kunze12virtualpancakes,
author = {Lars Kunze and Andrei Haidu and Michael Beetz},
title = {{Making Virtual Pancakes --- Acquiring and Analyzing Data of Everyday
Manipulation Tasks through Interactive Physics-based Simulations}},
booktitle = {Poster and Demo Track of the 35th German Conference on Artificial Intelligence (KI-2012)},
address = {Saarbr\"ucken, Germany},
month = {September 24--27},
year = {2012},
bib2html_funding = {CoTeSys},
bib2html_rescat = {Representation, Reasoning},
bib2html_domain = {Assistive Household}
}
@inproceedings{sherpa12ssrr,
title={The SHERPA project: smart collaboration between humans and ground-aerial robots for improving rescuing activities in alpine environments},
author={L. Marconi and C. Melchiorri and M. Beetz and D. Pangercic† and R. Siegwart and S. Leutenegger and R. Carloni and S. Stramigioli and H. Bruyninckx and P. Doherty and A. Kleiner and V. Lippiello and A. Finzi and B. Siciliano and A. Sala and N. Tomatis},
booktitle={IEEE International Symposium on Safety, Security, and Rescue Robotics (SSRR)},
year={2012},
month = {Nov. 5-8},
address = {College Station, Texas, USA},
bib2html_pubtype = {Conference Paper},
bib2html_groups = {K4C, EnvMod},
bib2html_funding = {SHERPA},
bib2html_rescat = {Reasoning, Perception},
bib2html_domain = {Alpine}
}
@inproceedings{usenko12furniture,
author = {Vladyslav Usenko and Florian Seidel and Zoltan-Csaba Marton and Dejan Pangercic Michael Beetz},
title = {Furniture Classification using WWW CAD Models},
booktitle = {IROS’12 Workshop on Active Semantic Perception (ASP’12)},
year = {2012},
month = {October 7},
address = {Vilamoura, Portugal},
bib2html_pubtype = {Workshop Paper},
bib2html_groups = {EnvMod},
bib2html_funding = {CoTeSys},
bib2html_rescat = {Perception},
bib2html_domain = {Assistive Household}
}
@INPROCEEDINGS{icra13textureless,
author = {Karol Hausman and Ferenc Balint-Benczedi and Dejan Pangercic and Zoltan-Csaba Marton
and Ryohei Ueda and Kei Okada and Michael Beetz},
title = {Tracking-based Interactive Segmentation of Textureless Objects},
booktitle = {IEEE International Conference on Robotics and Automation (ICRA)},
year = {2013},
month = {May 6--10},
address = {Karlsruhe, Germany},
bib2html_groups = {Perception},
bib2html_funding = {},
note = {Best Service Robotics Paper Award Finalist}
}
@Article{tenorth13tase,
author = {Moritz Tenorth and Alexander Clifford Perzylo and Reinhard Lafrenz and Michael Beetz},
title = {{Representation and Exchange of Knowledge about Actions, Objects, and Environments in the RoboEarth Framework}},
journal = {IEEE Transactions on Automation Science and Engineering (T-ASE)},
year = {2013},
bib2html_pubtype = {Journal},
bib2html_groups = {K4C},
bib2html_funding = {RoboEarth,RoboHow},
bib2html_rescat = {Knowledge,Learning,Models},
bib2html_domain = {Assistive Household},
note = {Accepted for publication.}
}
@inproceedings{tenorth13partialorder,
author = {Moritz Tenorth and Fernando De la Torre and Michael Beetz},
title = {Learning Probability Distributions over Partially-Ordered Human Everyday Activities},
booktitle = {IEEE International Conference on Robotics and Automation (ICRA)},
year = {2013},
month = {May 6--10},
address = {Karlsruhe, Germany},
bib2html_groups = {ProbCog, K4C},
bib2html_funding = {RoboEarth,RoboHow,CogWatch},
note = {Accepted for publication.}
}
@inproceedings{moesenlechner13projection,
author = {Lorenz M\"osenlechner and Michael Beetz},
title = {Fast Temporal Projection Using Accurate Physics-Based Geometric Reasoning},
booktitle = {IEEE International Conference on Robotics and Automation (ICRA)},
year = {2013},
month = {May 6--10},
address = {Karlsruhe, Germany},
bib2html_groups = {CRAM},
note = {Accepted for publication.}
}
@Article{tenorth13knowrob,
author = {Moritz Tenorth and Michael Beetz},
title = {{KnowRob -- A Knowledge Processing Infrastructure for Cognition-enabled Robots. Part 1: The KnowRob System}},
journal = {International Journal of Robotics Research (IJRR)},
year = {2013},
note = {Accepted for publication.}
}
@article{marton14JINT,
title={Part-Based Geometric Categorization and Object Reconstruction in Cluttered Table-Top Scenes},
author={Marton, Zoltan-Csaba and Balint-Benczedi, Ferenc and Mozos, Oscar Martinez and Blodow, Nico and Kanezaki, Asako and Goron, Lucian Cosmin and Pangercic, Dejan and Beetz, Michael},
journal={Journal of Intelligent \& Robotic Systems},
pages={1--22},
year={2014},
publisher={Springer Netherlands}
}
@inproceedings{haemmerle05sensorbased,
author = {Simone H{\"a}mmerle and Matthias Wimmer and Bernd Radig and Michael Beetz},
title = {Sensor-based Situated, Individualized, and Personalized Interaction in Smart Environments},
year = {2005},
month = {September},
pages = {261-265},
editor = {Armin B. Cremers and Rainer Manthey and Peter Martini and Volker Steinhage},
booktitle =
{INFORMATIK 2005 - Informatik LIVE! Band 1, Beitr{\"a}ge der 35. Jahrestagung der Gesellschaft f{\"u}r Informatik~(GI)},
address = {Bonn, Germany},
otherbooktitle = {GI~Jahrestagung~(1)},
publisher = {GI},
series = {LNI},
volume = {67},
abstract =
{Smart environments are sensor equipped areas that know about their environment thus being able to adapt to the user. We present sHOME, a multiagent based platform for integrating situated, individualized, and personalized information. sHOME acquires sensor data to determine the user's identity, his location, his gesture, and natural language commands and stores it in a central knowledge base.},
isbn = {3-88579-396-2},
bib2html_pubtype = {Refereed Conference Paper},
bib2html_rescat = {Image Understanding},
bib2html_groups = {IU},
}
@inproceedings{wimmer06aperson,
author = {Matthias Wimmer and Bernd Radig and Michael Beetz},
title = {A Person and Context Specific Approach for Skin Color Classification},
booktitle = {Procedings of the 18th~International Conference of Pattern Recognition~(ICPR 2006)},
volume = {2},
pages = {39-42},
year = {2006},
month = {August},
publisher = {IEEE Computer Society},
address = {Los Alamitos, CA, USA},
abstract =
{Skin color is an important feature of faces. Various applications benefit from robust skin color detection. Depending on camera settings, illumination, shadows, people?s tans, and ethnic groups skin color looks differently, which is a challenging aspect for detecting it automatically.
In this paper, we present an approach that uses a high level vision module to detect an image specific skin color model. This model is then used to adapt parametric skin color classifiers to the processed image. This approach is capable to distinguish skin color from extremely similar colors, such as lip color or eyebrow color. Its high speed and high accuracy make it appropriate for real time applications such as face tracking and recognition of facial expressions.},
bib2html_pubtype = {Refereed Conference Paper},
bib2html_rescat = {Image Understanding},
bib2html_groups = {IU}
}
@inproceedings{riaz09icb,
title = {A Model Based approach for Expression Invariant Face Recognition},
author = {Zahid Riaz and Christoph Mayer and Matthias Wimmer and Michael Beetz and Bernd Radig},
booktitle = {3rd International Conference on Biometrics, Alghero Italy},
pages = {},
publisher = {Springer},
series = {},
volume = {},
year = {2009},
note={},
bib2html_pubtype = {Conference Paper},
bib2html_groups = {IU},
bib2html_funding = {CoTeSys},
bib2html_rescat = {Human-Robot Interaction},
bib2html_domain = {Multi-Joint Action}
}
@inproceedings{riaz09cost,
title = {Facial Expressions Recognition from Image Sequences},
author = {Zahid Riaz and Christoph Mayer and Michael Beetz and Bernd Radig},
booktitle = {2nd International Conference on Cross-Modal Analysis of Speech, Gestures, Gaze and Facial Expressions, Prague, Czech Republic},
pages = {},
publisher = {Springer},
series = {},
volume = {},
year = {2009},
note={},
bib2html_pubtype = {Conference Paper},
bib2html_groups = {IU},
bib2html_funding = {CoTeSys},
bib2html_rescat = {Human-Robot Interaction},
bib2html_domain = {Multi-Joint Action}
}
@inproceedings{riaz09caip,
title = {Model Based Analysis of Face Images for Facial Feature Extraction},
author = {Zahid Riaz and Christoph Mayer and Michael Beetz and Bernd Radig},
booktitle = {Computer Analysis of Images and Patterns, Munster, Germany},
pages = {},
publisher = {Springer},
series = {},
volume = {},
year = {2009},
note={},
bib2html_pubtype = {Conference Paper},
bib2html_groups = {IU},
bib2html_funding = {CoTeSys},
bib2html_rescat = {Human-Robot Interaction},
bib2html_domain = {Multi-Joint Action}
}
@inproceedings{ riaz08INMIC,
author = {Zahid Riaz and Michael Beetz and Bernd Radig},
title = {Shape Invariant Recognition of Segmented Human Faces using Eigenfaces},
booktitle = {Proceedings of the 12th~International Multitopic Conference},
year = {2008},
month = {},
address = {},
publisher = {IEEE},
xxxvolume = {},
xxxpages = {},
bib2html_pubtype = {Conference Paper},
bib2html_groups = {IU},
bib2html_funding = {CoTeSys},
bib2html_rescat = {Human-Robot Interaction},
bib2html_domain = {Multi-Joint Action}
}
@inproceedings{riaz09icict,
title = {Image Normalization for Face Recognition using 3D Model},
author = {Zahid Riaz and Michael Beetz and Bernd Radig},
booktitle = {International Conference of Information and Communication Technologies, Karachi, Pakistan},
pages = {},
publisher = {IEEE},
series = {},
volume = {},
year = {2009},
note={},
bib2html_pubtype = {Conference Paper},
bib2html_groups = {IU},
bib2html_funding = {CoTeSys},
bib2html_rescat = {Human-Robot Interaction},
bib2html_domain = {Multi-Joint Action}
}
@inproceedings{riaz09bioId,
title = {3D Model for Face Recognition across Facial Expressions},
author = {Zahid Riaz and Christoph Mayer and Michael Beetz and Bernd Radig},
booktitle = {Biometric ID Management and Multimodal Communication, Madrid, Spain},
pages = {},
publisher = {Springer},
series = {},
volume = {},
year = {2009},
note={},
bib2html_pubtype = {Conference Paper},
bib2html_groups = {IU},
bib2html_funding = {CoTeSys},
bib2html_rescat = {Human-Robot Interaction},
bib2html_domain = {Multi-Joint Action}
}
@inproceedings{riaz09acii,
title = {A Unified Features Approach to Human Face Image Analysis and Interpretation},
author = {Zahid Riaz and Suat Gedikli and Michael Beetz and Bernd Radig},
booktitle = {Affective Computing and Intelligent Interaction, Amsterdam, Netherlands},
pages = {},
publisher = {IEEE},
series = {},
volume = {},
year = {2009},
note={Doctoral Consortium Paper},
bib2html_pubtype = {Conference Paper},
bib2html_groups = {IU},
bib2html_funding = {CoTeSys},
bib2html_rescat = {Human-Robot Interaction},
bib2html_domain = {Multi-Joint Action}
}
@inproceedings{riaz09fit,
title = {Multi-Feature Fusion in Advanced Robotics Applications},
author = {Zahid Riaz and Christoph Mayer and Saquib Sarfraz and Michael Beetz and Bernd Radig},
booktitle = {Internaional Conference on Frontier of Information Technology},
pages = {},
publisher = {ACM},
series = {},
volume = {},
year = {2009},
note={},
bib2html_pubtype = {Conference Paper},
bib2html_groups = {IU},
bib2html_funding = {CoTeSys},
bib2html_rescat = {Human-Robot Interaction},
bib2html_domain = {Multi-Joint Action}
}