people:klank
% Publications of Klank
% Encoding: utf-8
@InProceedings{beetz08assistive,
author = {Michael Beetz and Freek Stulp and Bernd Radig and Jan Bandouch and Nico Blodow and Mihai Dolha and Andreas Fedrizzi and Dominik Jain and Uli Klank and Ingo Kresse and Alexis Maldonado and Zoltan Marton and Lorenz M{\"o}senlechner and Federico Ruiz and Radu Bogdan Rusu and Moritz Tenorth},
title = {{The Assistive Kitchen -- A Demonstration Scenario for Cognitive Technical Systems}},
booktitle = {IEEE 17th International Symposium on Robot and Human Interactive Communication (RO-MAN), Muenchen, Germany},
pages = {1-8},
year = {2008},
bib2html_pubtype = {Conference Paper},
bib2html_rescat = {Perception, Planning, Learning},
bib2html_groups = {Memoman, Cogito, EnvMod, Cogman, K4C, ProbCog},
bib2html_funding = {CoTeSys},
bib2html_domain = {Assistive Household},
note = {Invited paper.},
}
@article{klank2008afg,
title={{Automatic feature generation in endoscopic images}},
bib2html_pubtype = {Journal},
bib2html_groups = {Other},
bib2html_funding = {Other},
author={Ulrich Klank and N. Padoy and H. Feussner and N. Navab},
journal={International Journal of Computer Assisted Radiology and Surgery},
volume={3},
number={3},
pages={331--339},
year={2008},
publisher={Springer}
}
@InProceedings{klank09searchspace,
author = {Ulrich Klank and Dejan Pangercic and Radu Bogdan Rusu and Michael Beetz},
title = {{Real-time CAD Model Matching for Mobile Manipulation and Grasping}},
booktitle = {9th IEEE-RAS International Conference on Humanoid Robots},
month = {December 7-10},
year = {2009},
address = {Paris, France},
pages = {290--296},
bib2html_groups = {Cop, EnvMod},
bib2html_rescat = {Perception, Models},
bib2html_pubtype = {Conference Paper},
bib2html_domain = {Assistive Household}
}
@Article{Beetz09AR,
author = {Michael Beetz and Freek Stulp and Piotr Esden-Tempski and Andreas Fedrizzi and Ulrich Klank and Ingo Kresse and Alexis Maldonado and Federico Ruiz},
title = {Generality and Legibility in Mobile Manipulation},
journal = {Autonomous Robots Journal (Special Issue on Mobile Manipulation)},
year = {2010},
volume = {28},
number = {1},
pages = {21--44},
bib2html_groups = {IAS, Cogman},
bib2html_pubtype = {Journals},
bib2html_rescat = {Mobile Manipulation},
bib2html_domain = {Assistive Household},
}
@InProceedings{Composite09IROS,
author = {Zoltan Csaba Marton and Radu Bogdan Rusu and Dominik Jain and Ulrich Klank and Michael Beetz},
title = {{Probabilistic Categorization of Kitchen Objects in Table Settings with a Composite Sensor}},
booktitle = {IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS)},
pages = {4777-4784},
month = {October 11-15},
year = {2009},
address = {St. Louis, MO, USA},
bib2html_pubtype = {Conference Paper},
bib2html_rescat = {Perception, Models},
bib2html_groups = {EnvMod, ProbCog},
bib2html_funding = {CoTeSys},
bib2html_domain = {Assistive Household},
}
@InProceedings{klank09icra,
author = {Ulrich Klank and Muhammad Zeeshan Zia and Michael Beetz},
title = {{3D Model Selection from an Internet Database for Robotic Vision}},
booktitle = {International Conference on Robotics and Automation (ICRA)},
year = {2009},
pages = {2406--2411},
bib2html_pubtype = {Conference Paper},
bib2html_rescat = {Models},
bib2html_groups = {Cop},
bib2html_funding = {CoTeSys},
bib2html_domain = {Assistive Household},
abstract = {We propose a new method for automatically accessing an internet
database of 3D models that are searchable only by their
user-annotated labels, for using them for vision and robotic
manipulation purposes. Instead of having only a local database
containing already seen objects, we want to use shared databases
available over the internet. This approach while having the
potential to dramatically increase the visual recognition capability
of robots, also poses certain problems, like wrong annotation due to
the open nature of the database, or overwhelming amounts of data
(many 3D models) or the lack of relevant data (no models matching a
specified label). To solve those problems we propose the following:
First, we present an outlier/inlier classification method for
reducing the number of results and discarding invalid 3D models that
do not match our query. Second, we utilize an approach from computer
graphics, the so called 'morphing', to this application to
specialize the models, in order to describe more objects. Third, we
search for 3D models using a restricted search space, as obtained
from our knowledge of the environment. We show our classification
and matching results and finally show how we can recover the correct
scaling with the stereo setup of our robot.}
}
@InProceedings{zia09icar,
author = {Muhammad Zeeshan Zia and Ulrich Klank and Michael Beetz},
title = {{Acquisition of a Dense 3D Model Database for Robotic Vision}},
booktitle = {International Conference on Advanced Robotics (ICAR)},
year = {2009},
bib2html_pubtype = {Conference Paper},
bib2html_rescat = {Models},
bib2html_groups = {Cop},
bib2html_funding = {CoTeSys},
bib2html_domain = {Assistive Household},
abstract = {Service Robots in real world environments need to have computer vision capability
for detecting a large class of objects. We discuss how freely available 3D model
databases can be used to enable robots to know the appearance of a wide variety of
objects in human environments with special application to our Assistive Kitchen.
However, the open and free nature of such databases pose problems for example the presence of
incorrectly annotated 3D models, or objects for which very few models
exist online. We have previously proposed techniques to automatically select
the useful models from the search result, and utilizing such models to
perform simple manipulation tasks. Here, we build upon that work, to describe a technique
based on Morphing to form new 3D models if we only have a few models corresponding to a label.
However, morphing in computer graphics requires a human operator and is computationally burdensome,
due to which we present our own automatic morphing technique. We also present a simple
technique to speed the matching process of 3D models against real scenes using Visibility culling.
This technique can potentially speed-up the matching process by 2-3 times while using less memory,
if we have some prior information model and world pose.}
}
@InProceedings{sunli-2009-cvprws,
title={EYEWATCHME - 3D Hand and object tracking for inside out activity analysis},
author={Li Sun and Ulrich Klank and Michael Beetz},
booktitle={IEEE Computer Society Conference on Computer Vision and Pattern Recognition, 2009. CVPR 2009.},
year={2009},
month={June},
volume={},
number={},
pages={9-16},
abstract={This paper investigates the inside-out recognition of everyday manipulation tasks using a gaze-directed camera, which is a camera that actively directs at the visual attention focus of the person wearing the camera. We present EYEWATCHME, an integrated vision and state estimation system that at the same time tracks the positions and the poses of the acting hands, the pose that the manipulated object, and the pose of the observing camera. Taken together, EYEWATCHME provides comprehensive data for learning predictive models of vision-guided manipulation that include the objects people are attending, the interaction of attention and reaching/grasping, and the segmentation of reaching and grasping using visual attention as evidence. Key technical contributions of this paper include an ego view hand tracking system that estimates 27 DOF hand poses. The hand tracking system is capable of detecting hands and estimating their poses despite substantial self-occlusion caused by the hand and occlusions caused by the manipulated object. EYEWATCHME can also cope with blurred images that are caused by rapid eye movements. The second key contribution is the of the integrated activity recognition system that simultaneously tracks the attention of the person, the hand poses, and the poses of the manipulated objects in terms of a global scene coordinates. We demonstrate the operation of EYEWATCHME in the context of kitchen tasks including filling a cup with water.},
keywords={computer graphics, human computer interaction, image restoration, image segmentation, image sensors, object recognition, tracking3D hand tracking, 3D object tracking, EYEWATCHME, blurred images, gaze-directed camera, grasping segmentation, inside out activity analysis, integrated activity recognition system, reaching segmentation, state estimation system, substantial self-occlusion, vision-guided manipulation},
doi={10.1109/CVPR.2009.5204358},
ISSN={1063-6919},
bib2html_pubtype = {Workshop Paper},
bib2html_groups = {Cop},
bib2html_rescat = {Perception},
bib2html_domain = {Assistive Household}
}
@InProceedings{IAS09CoPMan,
author = {Michael Beetz and Nico Blodow and Ulrich Klank and Zoltan Csaba Marton and Dejan Pangercic and Radu Bogdan Rusu},
title = {{CoP-Man -- Perception for Mobile Pick-and-Place in Human Living Environments}},
booktitle = {Proceedings of the 22nd IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS) Workshop on Semantic Perception for Mobile Manipulation},
month = {October 11-15},
year = {2009},
address = {St. Louis, MO, USA},
note = {Invited paper.},
bib2html_pubtype = {Workshop Paper},
bib2html_rescat = {Perception, Models},
bib2html_groups = {Cop, EnvMod},
bib2html_funding = {CoTeSys},
bib2html_domain = {Assistive Household},
}
@InProceedings{maldonado10nomodelgrasping,
author = {Alexis Maldonado and Ulrich Klank and Michael Beetz},
title = {Robotic grasping of unmodeled objects using time-of-flight range data and finger torque information},
booktitle = {2010 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS)},
month = {October 18-22},
year = {2010},
pages = {2586--2591},
address = {Taipei, Taiwan},
bib2html_pubtype = {Conference Paper},
bib2html_rescat = {Action, Perception},
bib2html_groups = {Cogman},
bib2html_funding = {CoTeSys},
bib2html_domain = {Assistive Household}
}
@inproceedings{klank11transparent,
author = {Ulrich Klank and Daniel Carton and Michael Beetz},
title = {Transparent Object Detection and Reconstruction on a Mobile Platform},
booktitle = {IEEE International Conference on Robotics and Automation (ICRA)},
month = {May, 9--13},
year = {2011},
address = {Shanghai, China},
bib2html_pubtype = {Conference Paper},
bib2html_groups = {CoP},
bib2html_funding = {CoTeSys},
bib2html_rescat = {Object Recognition},
bib2html_domain = {Assistive Household}
}
@article{tenorth11www,
title={{Web-enabled Robots -- Robots that Use the Web as an Information Resource}},
author={Moritz Tenorth and Ulrich Klank and Dejan Pangercic and Michael Beetz},
journal={Robotics \& Automation Magazine},
volume={18},
number={2},
pages = {58--68},
year={2011},
publisher={IEEE},
bib2html_pubtype = {Journal},
bib2html_groups = {K4C},
bib2html_funding = {},
bib2html_rescat = {Representation, Models, Reasoning},
bib2html_domain = {Assistive Household}
}
@inproceedings{icra11perception-manipulation,
author = {Michael Beetz and Ulrich Klank and Alexis Maldonado and Dejan Pangercic and Thomas R\"uhr},
title = {Robotic Roommates Making Pancakes - Look Into Perception-Manipulation Loop},
booktitle = {IEEE International Conference on Robotics and Automation (ICRA), Workshop on Mobile Manipulation: Integrating Perception and Manipulation},
month = {May, 9--13},
year = {2011},
pages = {529--536},
bib2html_pubtype = {Workshop Paper},
bib2html_groups = {EnvMod, Cogman, CoP},
bib2html_funding = {CoTeSys},
bib2html_rescat = {Action, Perception},
bib2html_domain = {Assistive Household}
}
@inproceedings{pancakes11humanoids,
author = {Michael Beetz and Ulrich Klank and Ingo Kresse and Alexis Maldonado and Lorenz M\"osenlechner and Dejan Pangercic and Thomas
R\"uhr and Moritz Tenorth},
title = {{Robotic Roommates Making Pancakes}},
booktitle = {11th IEEE-RAS International Conference on Humanoid Robots},
year = {2011},
month = {October, 26--28},
address = {Bled, Slovenia},
bib2html_pubtype = {Conference Paper},
bib2html_groups = {EnvMod, CoP, Cogito, K4C, Cogman},
bib2html_funding = {CoTeSys},
bib2html_domain = {Assistive Household},
bib2html_rescat = {Action, Perception, Reasoning, Planning, Models, Representation},
}
@inproceedings{toolrep11humanoids,
author = {Ingo Kresse and Ulrich Klank and Michael Beetz},
title = {Multimodal Autonomous Tool Analyses and Appropriate Application},
booktitle = {11th IEEE-RAS International Conference on Humanoid Robots},
year = {2011},
month = {October, 26--28},
address = {Bled, Slovenia},
bib2html_pubtype = {Conference Paper},
bib2html_groups = {CoP, Cogman},
bib2html_funding = {CoTeSys},
bib2html_domain = {Assistive Household},
bib2html_rescat = {Perception, Action, Representation},
}
@inproceedings{klank12validation,
author = {Ulrich Klank and Lorenz M\"osenlechner and Alexis Maldonado and Michael Beetz},
title = {Robots that Validate Learned Perceptual Models},
booktitle = {IEEE International Conference on Robotics and Automation (ICRA)},
year = {2012},
month = {May 14--18},
address = {St. Paul, MN, USA},
bib2html_pubtype = {Conference Paper},
bib2html_groups = {Cop, Cogito},
bib2html_funding = {CoTeSys},
bib2html_rescat = {Representation, Reasoning},
bib2html_domain = {Assistive Household}
}
@phdthesis {klank2012PhD,
author = {Ulrich Klank},
title = {Everyday Perception for Mobile Manipulation in Human Environments},
year = {2012},
school = {Technische Universit\"at M\"unchen},
bib2html_pubtype = {PhD Thesis},
bib2html_rescat = {Perception, Representation, Learning},
bib2html_groups = {Cop},
bib2html_funding = {CoTeSys},
bib2html_domain = {Assistive Household},
pdf = {http://mediatum.ub.tum.de/download/1080039/1080039.pdf},
url = {http://nbn-resolving.de/urn:nbn:de:bvb:91-diss-20120412-1080039-1-7}
}