Initial commit
223
.gitignore
vendored
Normal file
@@ -0,0 +1,223 @@
|
||||
install/
|
||||
build/
|
||||
log/
|
||||
|
||||
.idea
|
||||
|
||||
|
||||
# Byte-compiled / optimized / DLL files
|
||||
__pycache__/
|
||||
*.py[codz]
|
||||
*$py.class
|
||||
|
||||
# C extensions
|
||||
*.so
|
||||
|
||||
# Distribution / packaging
|
||||
.Python
|
||||
build/
|
||||
develop-eggs/
|
||||
dist/
|
||||
downloads/
|
||||
eggs/
|
||||
.eggs/
|
||||
lib/
|
||||
lib64/
|
||||
parts/
|
||||
sdist/
|
||||
var/
|
||||
wheels/
|
||||
share/python-wheels/
|
||||
*.egg-info/
|
||||
.installed.cfg
|
||||
*.egg
|
||||
MANIFEST
|
||||
|
||||
# PyInstaller
|
||||
# Usually these files are written by a python script from a template
|
||||
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
||||
*.manifest
|
||||
*.spec
|
||||
|
||||
# Installer logs
|
||||
pip-log.txt
|
||||
pip-delete-this-directory.txt
|
||||
|
||||
# Unit test / coverage reports
|
||||
htmlcov/
|
||||
.tox/
|
||||
.nox/
|
||||
.coverage
|
||||
.coverage.*
|
||||
.cache
|
||||
nosetests.xml
|
||||
coverage.xml
|
||||
*.cover
|
||||
*.py.cover
|
||||
.hypothesis/
|
||||
.pytest_cache/
|
||||
cover/
|
||||
|
||||
# Translations
|
||||
*.mo
|
||||
*.pot
|
||||
|
||||
# Django stuff:
|
||||
*.log
|
||||
local_settings.py
|
||||
db.sqlite3
|
||||
db.sqlite3-journal
|
||||
|
||||
# Flask stuff:
|
||||
instance/
|
||||
.webassets-cache
|
||||
|
||||
# Scrapy stuff:
|
||||
.scrapy
|
||||
|
||||
# Sphinx documentation
|
||||
docs/_build/
|
||||
|
||||
# PyBuilder
|
||||
.pybuilder/
|
||||
target/
|
||||
|
||||
# Jupyter Notebook
|
||||
.ipynb_checkpoints
|
||||
|
||||
# IPython
|
||||
profile_default/
|
||||
ipython_config.py
|
||||
|
||||
# pyenv
|
||||
# For a library or package, you might want to ignore these files since the code is
|
||||
# intended to run in multiple environments; otherwise, check them in:
|
||||
# .python-version
|
||||
|
||||
# pipenv
|
||||
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
||||
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
||||
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
||||
# install all needed dependencies.
|
||||
# Pipfile.lock
|
||||
|
||||
# UV
|
||||
# Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control.
|
||||
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
||||
# commonly ignored for libraries.
|
||||
# uv.lock
|
||||
|
||||
# poetry
|
||||
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
|
||||
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
||||
# commonly ignored for libraries.
|
||||
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
|
||||
# poetry.lock
|
||||
# poetry.toml
|
||||
|
||||
# pdm
|
||||
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
|
||||
# pdm recommends including project-wide configuration in pdm.toml, but excluding .pdm-python.
|
||||
# https://pdm-project.org/en/latest/usage/project/#working-with-version-control
|
||||
# pdm.lock
|
||||
# pdm.toml
|
||||
.pdm-python
|
||||
.pdm-build/
|
||||
|
||||
# pixi
|
||||
# Similar to Pipfile.lock, it is generally recommended to include pixi.lock in version control.
|
||||
# pixi.lock
|
||||
# Pixi creates a virtual environment in the .pixi directory, just like venv module creates one
|
||||
# in the .venv directory. It is recommended not to include this directory in version control.
|
||||
.pixi
|
||||
|
||||
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
|
||||
__pypackages__/
|
||||
|
||||
# Celery stuff
|
||||
celerybeat-schedule
|
||||
celerybeat.pid
|
||||
|
||||
# Redis
|
||||
*.rdb
|
||||
*.aof
|
||||
*.pid
|
||||
|
||||
# RabbitMQ
|
||||
mnesia/
|
||||
rabbitmq/
|
||||
rabbitmq-data/
|
||||
|
||||
# ActiveMQ
|
||||
activemq-data/
|
||||
|
||||
# SageMath parsed files
|
||||
*.sage.py
|
||||
|
||||
# Environments
|
||||
.env
|
||||
.envrc
|
||||
.venv
|
||||
env/
|
||||
venv/
|
||||
ENV/
|
||||
env.bak/
|
||||
venv.bak/
|
||||
|
||||
# Spyder project settings
|
||||
.spyderproject
|
||||
.spyproject
|
||||
|
||||
# Rope project settings
|
||||
.ropeproject
|
||||
|
||||
# mkdocs documentation
|
||||
/site
|
||||
|
||||
# mypy
|
||||
.mypy_cache/
|
||||
.dmypy.json
|
||||
dmypy.json
|
||||
|
||||
# Pyre type checker
|
||||
.pyre/
|
||||
|
||||
# pytype static type analyzer
|
||||
.pytype/
|
||||
|
||||
# Cython debug symbols
|
||||
cython_debug/
|
||||
|
||||
# PyCharm
|
||||
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
|
||||
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
||||
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
||||
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
||||
# .idea/
|
||||
|
||||
# Abstra
|
||||
# Abstra is an AI-powered process automation framework.
|
||||
# Ignore directories containing user credentials, local state, and settings.
|
||||
# Learn more at https://abstra.io/docs
|
||||
.abstra/
|
||||
|
||||
# Visual Studio Code
|
||||
# Visual Studio Code specific template is maintained in a separate VisualStudioCode.gitignore
|
||||
# that can be found at https://github.com/github/gitignore/blob/main/Global/VisualStudioCode.gitignore
|
||||
# and can be added to the global gitignore or merged into this file. However, if you prefer,
|
||||
# you could uncomment the following to ignore the entire vscode folder
|
||||
# .vscode/
|
||||
|
||||
# Ruff stuff:
|
||||
.ruff_cache/
|
||||
|
||||
# PyPI configuration file
|
||||
.pypirc
|
||||
|
||||
# Marimo
|
||||
marimo/_static/
|
||||
marimo/_lsp/
|
||||
__marimo__/
|
||||
|
||||
# Streamlit
|
||||
.streamlit/secrets.toml
|
||||
BIN
PAPER/Beerbot.pdf
Normal file
438
PAPER/Beerbot.tex
Normal file
@@ -0,0 +1,438 @@
|
||||
%%
|
||||
%%
|
||||
%%
|
||||
%% Commands for TeXCount
|
||||
%TC:macro \cite [option:text,text]
|
||||
%TC:macro \citep [option:text,text]
|
||||
%TC:macro \citet [option:text,text]
|
||||
%TC:envir table 0 1
|
||||
%TC:envir table* 0 1
|
||||
%TC:envir tabular [ignore] word
|
||||
%TC:envir displaymath 0 word
|
||||
%TC:envir math 0 word
|
||||
%TC:envir comment 0 0
|
||||
%%
|
||||
%% The first command in your LaTeX source must be the \documentclass
|
||||
%% command.
|
||||
%%
|
||||
%% For submission and review of your manuscript please change the
|
||||
%% command to \documentclass[manuscript, screen, review]{acmart}.
|
||||
%%
|
||||
%% When submitting camera ready or to TAPS, please change the command
|
||||
%% to \documentclass[sigconf]{acmart} or whichever template is required
|
||||
%% for your publication.
|
||||
%%
|
||||
%%
|
||||
\documentclass[manuscript, screen, review]{acmart}
|
||||
%%
|
||||
%% \BibTeX command to typeset BibTeX logo in the docs
|
||||
\AtBeginDocument{%
|
||||
\providecommand\BibTeX{{%
|
||||
Bib\TeX}}}
|
||||
|
||||
%% Rights management information. This information is sent to you
|
||||
%% when you complete the rights form. These commands have SAMPLE
|
||||
%% values in them; it is your responsibility as an author to replace
|
||||
%% the commands and values with those provided to you when you
|
||||
%% complete the rights form.
|
||||
\setcopyright{acmlicensed}
|
||||
\copyrightyear{2026}
|
||||
\acmYear{2026}
|
||||
\acmDOI{} %% hide the doi link
|
||||
|
||||
%%
|
||||
%% Submission ID.
|
||||
%% Use this when submitting an article to a sponsored event. You'll
|
||||
%% receive a unique submission ID from the organizers
|
||||
%% of the event, and this ID should be used as the parameter to this command.
|
||||
%%\acmSubmissionID{123-A56-BU3}
|
||||
|
||||
%%
|
||||
%% For managing citations, it is recommended to use bibliography
|
||||
%% files in BibTeX format.
|
||||
%%
|
||||
%% You can then either use BibTeX with the ACM-Reference-Format style,
|
||||
%% or BibLaTeX with the acmnumeric or acmauthoryear sytles, that include
|
||||
%% support for advanced citation of software artefact from the
|
||||
%% biblatex-software package, also separately available on CTAN.
|
||||
%%
|
||||
%% Look at the sample-*-biblatex.tex files for templates showcasing
|
||||
%% the biblatex styles.
|
||||
%%
|
||||
|
||||
%%
|
||||
%% The majority of ACM publications use numbered citations and
|
||||
%% references. The command \citestyle{authoryear} switches to the
|
||||
%% "author year" style.
|
||||
%%
|
||||
%% If you are preparing content for an event
|
||||
%% sponsored by ACM SIGGRAPH, you must use the "author year" style of
|
||||
%% citations and references.
|
||||
%% Uncommenting
|
||||
%% the next command will enable that style.
|
||||
%%\citestyle{acmauthoryear}
|
||||
|
||||
\usepackage[T1]{fontenc} % richtige Schriftkodierung
|
||||
\usepackage{lmodern} % skalierbare Latin Modern Fonts
|
||||
\usepackage{microtype} % verbessert Schriftbild, erlaubt Font Expansion
|
||||
\usepackage{url}
|
||||
\usepackage{csquotes}
|
||||
\usepackage[english]{babel}
|
||||
\usepackage{subcaption}
|
||||
|
||||
|
||||
%%
|
||||
%% end of the preamble, start of the body of the document source.
|
||||
\begin{document}
|
||||
|
||||
%%
|
||||
%% The "title" command has an optional parameter,
|
||||
%% allowing the author to define a "short title" to be used in page headers.
|
||||
\title{BeerBot}
|
||||
|
||||
%%
|
||||
%% The "author" command and its associated commands are used to define
|
||||
%% the authors and their affiliations.
|
||||
%% Of note is the shared affiliation of the first two authors, and the
|
||||
%% "authornote" and "authornotemark" commands
|
||||
%% used to denote shared contribution to the research.
|
||||
\author{Erik Rausch}
|
||||
\email{erik.rausch@hof-university.de}
|
||||
\affiliation{%
|
||||
\institution{Hochschule Hof}
|
||||
\city{Hof}
|
||||
\state{Bayern}
|
||||
\country{Deutschland}
|
||||
}
|
||||
|
||||
\author{René Sinnig}
|
||||
\email{rene.sinnig@hof-university.de}
|
||||
\affiliation{%
|
||||
\institution{Hochschule Hof}
|
||||
\city{Hof}
|
||||
\state{Bayern}
|
||||
\country{Deutschland}
|
||||
}
|
||||
|
||||
\author{Jan Kemnitzer}
|
||||
\email{jan.kemnitzer.2@hof-university.de}
|
||||
\affiliation{%
|
||||
\institution{Hochschule Hof}
|
||||
\city{Hof}
|
||||
\state{Bayern}
|
||||
\country{Deutschland}
|
||||
}
|
||||
|
||||
\author{Benjamin Münster}
|
||||
\email{benjamin.muenster@hof-university.de}
|
||||
\affiliation{%
|
||||
\institution{Hochschule Hof}
|
||||
\city{Hof}
|
||||
\state{Bayern}
|
||||
\country{Deutschland}
|
||||
}
|
||||
|
||||
%%
|
||||
%% By default, the full list of authors will be used in the page
|
||||
%% headers. Often, this list is too long, and will overlap
|
||||
%% other information printed in the page headers. This command allows
|
||||
%% the author to define a more concise list
|
||||
%% of authors' names for this purpose.
|
||||
\renewcommand{\shortauthors}{Rausch et al.}
|
||||
|
||||
%%
|
||||
%% The abstract is a short summary of the work to be presented in the
|
||||
%% article.
|
||||
\begin{abstract}
|
||||
This is the documentation of the project BeerBot in the module Applied Robotics WS25/26. The project's goal was to design a robot arm that can pour beer (or other liquids) from a bottle into a glass. Implementing the beer workflow is a promising challenge, as there are more things to consider than just pouring water, especially if you want to do it properly.
|
||||
\end{abstract}
|
||||
|
||||
%%
|
||||
%% Keywords. The author(s) should pick words that accurately describe
|
||||
%% the work being presented. Separate the keywords with commas.
|
||||
\keywords{Dobot Magician, Robotics, Robot Arm, Localization, Code, Python}
|
||||
%% A "teaser" image appears between the author and affiliation
|
||||
%% information and the body of the document, and typically spans the
|
||||
%% page.
|
||||
\begin{teaserfigure}
|
||||
\includegraphics[width=\textwidth]{teaser.png}
|
||||
\caption{Beerbot pouring beer}
|
||||
\Description{A robotic arm holding a beer bottle and pouring beer into a glass placed in a wooden contraption that tilts the glass}
|
||||
\label{fig:teaser}
|
||||
\end{teaserfigure}
|
||||
|
||||
%%
|
||||
%% This command processes the author and affiliation and title
|
||||
%% information and builds the first part of the formatted document.
|
||||
\maketitle
|
||||
|
||||
\section{Task Description}
|
||||
The project's goal was to design a robot arm capable of pouring beer,
|
||||
managing the pouring process and the correct formation of foam.
|
||||
The work was defined in four steps:
|
||||
\begin{enumerate}
|
||||
\item The robot can grab a bottle/beer can
|
||||
\item The robot performs the pouring without fluids
|
||||
\item The robot pours water from the bottle into a glass (positions of both are fixed)
|
||||
\item Adjustment to pour beer (by using a tilting mechanism, if needed)
|
||||
\end{enumerate}
|
||||
As a completely working solution is not the primary goal of the study work, we have defined additional learning goals:
|
||||
\begin{itemize}
|
||||
\item Working with hardware: Getting practical experience with robotics instead of doing simulations or learning theory
|
||||
\item Robotics basics: Learning about coordinate transformations, kinematics and camera localization
|
||||
\item Scientific work: Writing a {\itshape real} scientific paper using \LaTeX\
|
||||
\end{itemize}
|
||||
In the case of proceeding as expected with the above declared core functionality, a few additional tasks were defined.
|
||||
These extensions are optional but promising to gather more experience in the field of robotics and autonomous systems:
|
||||
\begin{itemize}
|
||||
\item Bottle and glass placed at random positions, including object detection and
|
||||
localization
|
||||
\item Choose from different bottles, e.g. select the one that has been ordered.
|
||||
\end{itemize}
|
||||
|
||||
\section{Hardware}
|
||||
\label{sec:hardware}
|
||||
|
||||
The first decision was about the hardware. The lab provides two different robotic arms, Dobot Magician and Niryo.
|
||||
Considering the respective technical specifications we chose the Dobot due to its higher load capacity (500g) than Niryo (300g). Another crucial property is the gripper width, as it has to be able to grab a bottle. This makes only a small bottle possible. In this case we chose Super Bock Mini Original 0.2 L, which is able to be carried and tilted by the Dobot. At this point we had major concerns since holding a bottle at just the bottleneck requires less strength than when the bottle is tilted, which is caused by the leverage.
|
||||
|
||||
The robotic arm can perform movements in cartesian and joint coordinates. That means, direct and inverse kinematics are supported. Especially inverse kinematics are important as they provide trajectory calculations automatically, not requiring the user to implement them on their own.
|
||||
In addition to the robot a base plate was used, where the robot could be placed on. This allows marking the positions of the bottle and the glass and ensuring consistency.
|
||||
|
||||
Important to mention is that the Dobot internally manages a queue for the movement actions, most control implementations do not wait until the movement has been executed, only until the desired movement has been registered in the queue.
|
||||
To control the Dobot we mainly used Windows laptops and for specific tests an Ubuntu system.
|
||||
For imaging and localization smartphone cameras and printed ArUco patterns were used as they are simple to handle.
|
||||
Furthermore, we equipped the bottle with a ring to improve the gripper's ability to hold the bottle securely.
|
||||
|
||||
\section{Software}
|
||||
\label{sec:software}
|
||||
|
||||
For controlling the Dobot, there were three alternatives available:
|
||||
\begin{itemize}
|
||||
\item DobotStudio
|
||||
\item ROS2
|
||||
\item Dobot RPC (Python)
|
||||
\end{itemize}
|
||||
|
||||
Not to exclude a possibility too early, we decided to try out every alternative and evaluated their abilities. This way we could ensure that we could get a reliable overview of each of the three.
|
||||
|
||||
|
||||
\subsection{DobotStudio}
|
||||
|
||||
DobotStudio is a simple and user-friendly way to control the Dobot. After connecting the laptop with DobotStudio installed to the robot by using USB, the robot's joints and the gripper can directly be controlled. This Software provides a huge bunch of features. At first we tried out the homing procedure, manual movements in both coordinate systems and the activation of the pneumatic gripper. A very useful functionality is teach and playback. This allows a recording of robot movements by holding the arm and moving it by hand. These recordings can displayed in DobotStudio and also manipulated. Then these movements can be executed step by step and as a sequence. The recordings can also be exported and imported in the XML-format. Another helpful feature is the integrated python console, allowing the user to control the dobot directly using code. However this console's python environment is very minimal and does not allow other libraries, making it limited to some basic movements of the Dobot without integration of other dependencies.
|
||||
|
||||
\subsection{ROS2}
|
||||
|
||||
ROS2 (robot operating system) is a common software in robotics providing the infrastructure for data processing and delivery. The Data Distribution Service ensures a simple event driven communication of the single components (nodes) and their data. This makes the program essential when dealing with lots of different data sources and formats from different devices. In this project this may not be the case as the only data needed are robot positions and images or videos of a camera (if the additional tasks are included).
|
||||
For controlling the Dobot using ROS we found an implementation of the basic operations on Github \cite{dobot_ros}. The available features are similar to DobotStudio. This implementation provides actions for the homing procedure, for point-to-point movements in different modes (e.g. cartesian or joint coordinates) and gripper control. Flexible control programs can be written, by using this in personalized ROS nodes. Also crucial to mention are the visualization tools, allowing a detailed overview of the robot's current state and manual movements using buttons.
|
||||
|
||||
Being able to include other python libraries ROS is more suitable for programming with the Dobot than DobotStudio. Another benefit is that the end of movements can be awaited as they are implemented as ROS actions.
|
||||
However the ROS implementation causes other problems. When moving the end effector (gripper) in y-direction the gripper rotates although it has not been controlled. Another problem occurred when a XML file from DobotStudio was exported and executed by a self written ROS node. In this case the real positions of the end effector controlled by ROS were different from the real positions controlled by DobotStudio. This offset may be able to be corrected by measuring, however this would be a huge waste of time especially if other alternatives are available. To sum this up, because of these drawbacks ROS was not suitable for the project anymore.
|
||||
|
||||
\subsection{DobotRPC}
|
||||
|
||||
DobotRPC is a python library for controlling the Dobot.\footnote{DobotRPC can be installed by using \texttt{pip install DobotRPC}}
|
||||
An additional middleware program called DobotLink is required to establish the connection between a Laptop and the robot. However, this program is only available on Windows. Using DobotRPC, all necessary robot operations can be executed (getPose, Point-to-Point movements in joint or cartesian coordinates, gripper control). The communication with the Dobot is synchronous but only until the movement is registered in the queue as described above. That means there has to be some waiting implemented to ensure every movement has been executed when the use case requires a calculation/operation to be executed after all movements were done. Setting everything up caused some problems which took time until they were fixed. For example the used python version caused some incompatibilities with DobotLink which could not directly be identified as incompatibilities. Another issue was that the COM-Port used for communication with the robot, was not detected or that the robot did not move although the robot and the interfaces were registered. \cite{dobotrpc}
|
||||
|
||||
\section{Implementing the pouring workflow}
|
||||
\label{sec:implementation}
|
||||
|
||||
The first experiments started while the decision on the final software was not yet made. Because of the simple setup, we worked with DobotStudio. We started with grabbing the bottle. Although, the grip was not as stable as expected, the robot did not drop the bottle, even during tilting. In the next step the pouring process was tested. For this, an empty bottle was used as it could not be ensured that the bottle would not be dropped this time. The movements were recorded by the teach and playback function of DobotStudio. After the replay being successful, a bottle filled with water was used. In this experiment a problem occurred. Because of the unstable grip of the bottle, which resulted in some water being spilled while pouring, we had to figure out how to improve the setup. Therefore, the gripper was fitted with a "P-Profile Fensterband" to stabilize the bottle when grabbed. However, the window tape was not small enough, so the gripper was not able to grab the bottle anymore, even when the window tape was cut smaller. That is why a ring out of metal and cable ties was designed that can be placed directly under the spot, where the gripper holds the bottleneck. This ensures that the grip is stable, even if the bottle is tilted, preventing water from getting spilled. A bottle with the ring is displayed in \ref{fig:ring}.
|
||||
|
||||
\begin{figure}[h]
|
||||
\centering
|
||||
\includegraphics[width=0.3\textwidth]{ring.png}
|
||||
\caption{Beer bottle with the metal ring}
|
||||
\label{fig:ring}
|
||||
\Description{A beer bottle equipped with a ring out of metal to prevent the bottle from shaking when gripped}
|
||||
\end{figure}
|
||||
|
||||
In the next step the workflow was further refined using the XML-file of the previous water pouring procedure. Using DobotStudio, the coordinates are adjusted manually to prepare the whole setup for the beer pouring.
|
||||
After everything was prepared, including drying papers to prevent any beer from damaging the robot or polluting the table, the pouring process was started.
|
||||
|
||||
\subsection{How is beer foam created?}
|
||||
|
||||
Pouring beer is very different from pouring water. Especially the foam, that should cause neither overflowing nor too less foam, is the largest challenge. The foam is created when the contained, dissolved $H_2O$ escapes on impact. Proteins and hops stabilize the bubbles. The higher the energy on impact is, the more foam will be created. The energy depends on the fall height, the pouring angle and the temperature. \citep{foam_bierentdecker, foam_wiki}
|
||||
|
||||
\subsection{Adaptation to pouring beer}
|
||||
|
||||
Unfortunately, the formation of foam was too fast so that the beer spilled over. This happened because the whole pouring was done in only a few discrete movement steps (as motion can only be implemented in point to point movements). This caused nearly the half content of the bottle being inserted in one step causing too much foam.
|
||||
|
||||
To prevent this, more intermediate steps and breaks were added. This reduced the velocity of the process and the increase of the bottle angle. However, the reduced pouring speed caused a phenomenon called teapot effect, \enquote{it refers to the tendency of liquid pouring from a spout to stick to its underside} \citep{teapot_mihai}. The teapot effect is favored by a hydrophilic surface, a rounded spout, a small angle between the liquid and the spout, and a low pouring speed. In contrast, a higher velocity or less wettable surfaces reduce dripping \citep{teapot_mihai}.
|
||||
|
||||
The teapot effect would not be a problem with higher speed, but this would cause higher foam making this option unsuitable. Another possibility would be changing the beer temperature, as a lower temperature reduces the foam. However this would complicate the whole setup. As foam height also depends on the fall height and the angle, another idea was to change the angle of the glass reducing the foam.
|
||||
|
||||
This way the beer only falls a short distance and then flows down the inside of the glass instead of falling directly into the glass. To actualize this, a tilting contraption to place the glass was built, whose angle could be adjusted. The contraption is shown in \ref{fig:tilting}.
|
||||
|
||||
\begin{figure}[h]
|
||||
\centering
|
||||
\includegraphics[width=0.3\textwidth]{tilting_mechanism.jpg}
|
||||
\caption{Tilting mechanism}
|
||||
\label{fig:tilting}
|
||||
\Description{A wood contraption where the beer glass is placed in. The glass is upholstered with plastic. The tilting angle can be adjusted.}
|
||||
\end{figure}
|
||||
|
||||
In addition to the tilting of the glass, the pouring process was adjusted, so that the spout of the bottle is only a few millimeters over the bottle edge. Although the teapot effect cannot be prevented completely this way, the beer is captured by the edge of the glass, making the beer flow into the glass and not along the bottle. Because of the combination of the tilted glass, the reduced distance between bottle spout and edge of the glass and a small increase of the bottle angle per step with enough breaks between the steps, no beer was spilled and the foam is not too high. The beer is now poured correctly.
|
||||
|
||||
|
||||
\section{Localization with ArUco Patterns}
|
||||
|
||||
The localization part of the project was defined as an additional task. The goal was to enable the Dobot to find out the position of the bottle in its base coordinate system. For this at first some considerations were made. A localization is typically a chain of coordinate transformations in homogeneous coordinates. This means building a 4x4-matrix out of a 3x3-rotation matrix and a translation vector. A transformation that transforms coordinates from coordinate system B to coordinate system A is described by this:
|
||||
\[
|
||||
{}^{A}T_{B} =
|
||||
\begin{bmatrix}
|
||||
R_{3\times 3} & t_{3\times 1} \\
|
||||
0\ 0\ 0 & 1
|
||||
\end{bmatrix}
|
||||
\]
|
||||
|
||||
Usually when a object is located, its coordinates are provided in the coordinate system of a sensor. To get its coordinates in the desired coordinate system, a chain of such homogeneous transformations is built where the elements are combined with matrix multiplications:
|
||||
\[
|
||||
{}^{target}T_{sensor} = {}^{target}T_{1} \; {}^{1}T_{2} \; \dots \; {}^{n_{-1}}T_{n} \; {}^{n}T_{sensor}
|
||||
\]
|
||||
|
||||
Finding these transformations is often quite challenging. For this project we decided on a localization using cameras as sensors because LiDAR or even RaDAR for a single object detection might not be worth the effort of dealing with and learning about the algorithms of point cloud data provided by these sensors. When using cameras to do localizations there are always calibrations needed.
|
||||
|
||||
\subsection{Intrinsic Calibration \cite{localization}}
|
||||
|
||||
Intrinsic calibration means the process of calculating the transformation from points in the camera coordinate system to pixel coordinates in the recorded image. For this, real world coordinates \((x, y, z)\) are projected onto the image plane using the intersect theorem. These two-dimensional points \((x', y')\) in the image plane are then transformed in pixel coordinates \((u, v)\) by multiplying them with the camera matrix K.
|
||||
|
||||
\[
|
||||
\begin{bmatrix}
|
||||
x' \\[2mm] y'
|
||||
\end{bmatrix}
|
||||
=
|
||||
\frac{1}{Z_c}
|
||||
\begin{bmatrix}
|
||||
X_c \\[2mm] Y_c
|
||||
\end{bmatrix}
|
||||
\]
|
||||
|
||||
|
||||
\[
|
||||
K =
|
||||
\begin{bmatrix}
|
||||
f_x & 0 & c_x \\
|
||||
0 & f_y & c_y \\
|
||||
0 & 0 & 1
|
||||
\end{bmatrix}
|
||||
\]
|
||||
|
||||
with \(f_x, f_y\) denoting the focal length in pixels, and \(c_x, c_y\) being the pixel coordinates of the image center.
|
||||
|
||||
\[
|
||||
\begin{bmatrix}
|
||||
u \\ v \\ 1
|
||||
\end{bmatrix}
|
||||
=
|
||||
K
|
||||
\begin{bmatrix}
|
||||
x' \\ y' \\ 1
|
||||
\end{bmatrix}
|
||||
\]
|
||||
|
||||
In addition to K, distortion parameter are needed as lenses in cameras distort the image.
|
||||
K and the distortion parameter can be determined by recording a grid of black and white squares, called checkerboard, and using OpenCV to detect the corners and calculate the parameters.
|
||||
|
||||
\subsection{Extrinsic Calibration \cite{localization}}
|
||||
|
||||
Extrinsic calibration means finding the transformation of points in world coordinates to the camera coordinate system. For doing this correspondences consisting of 3D points in world coordinates, 2D pixel coordinates and the camera intrinsics are needed. With at least three correspondences the transformation (rotation and translation) can be found (solving the Perspective-n-Point problem, PnP). OpenCV also provides functions for that. In this context, ArUco Markers are often used for this extrinsic calibrations as they have a known size and structure. Support for ArUco is also provided by OpenCV. Such a pattern is displayed in \ref{fig:aruco}.
|
||||
|
||||
\begin{figure}[h]
|
||||
\centering
|
||||
\includegraphics[width=0.1\textwidth]{aruco.png}
|
||||
\caption{6x6 ArUco pattern}
|
||||
\label{fig:aruco}
|
||||
\end{figure}
|
||||
|
||||
\subsection{Discussion of localization setups}
|
||||
|
||||
With the intrinsic and extrinsic calibration, a pixel can be fully transformed to the world coordinate system. However, a pixel only corresponds to a ray in world coordinates as a pixel is only a projection of the real points without the distance information being preserved. To resolve this problem there are some possibilities. When there are two cameras available and placed at fixed locations, stereo vision can be performed when knowing the distance of the camera centers by using the epipolar geometry. For this a mapping between the pixels of the two images is necessary. This can be done by extracting features and finding the corresponding features on the other image or transforming the images such that each point in the image has a corresponding pixel on the same pixel row in the other image.
|
||||
The approach that was tried at first was ArUco-based. Two ArUco markers were placed, one at the end effector and the other at the bottle. Using ArUco patterns directly at the bottle results in some benefits. This way no object detection is needed as the ArUco patterns provide the transformation from the camera frame to the bottle coordinate system directly. So the bottle location is simply the origin of the bottle coordinate system. By using another ArUco pattern placed at the end effector, the camera coordinates can be transformed into robot coordinates. This transformation chain is described as the following:
|
||||
|
||||
\[
|
||||
{}^{robot\_base}T_{aruco\_bottle} = {}^{robot\_base}T_{ee} \; {}^{ee}T_{aruco\_ee} \; {}^{aruco\_ee}T_{camera} \; {}^{camera}T_{aruco\_bottle}
|
||||
\]
|
||||
|
||||
Finding \({}^{ee}T_{aruco\_ee}\) is the most challenging part of this setup as this transformation can not directly be derived. This transformation is necessary because the ArUco marker can not exactly be placed at origin of the end-effector coordinate system with the required rotation as its real position is unknown and probably inside of the gripper mechanics. It seems to be possible to estimate this transformation by providing correspondences of frames with the ArUco pattern on it and the position of the end effector in robot coordinates. However, this requires a fixed camera and is prone to errors.
|
||||
|
||||
That is the reason why the setup was slightly changed. Instead of placing a ArUco pattern at the end effector it is placed on the ground plate, where the robot stands on. This results in the following chain of coordinate transformations:
|
||||
|
||||
\[
|
||||
{}^{robot\_base}T_{aruco\_bottle} = {}^{robot\_base}T_{aruco\_ground} \; {}^{aruco\_ground}T_{camera} \; {}^{camera}T_{aruco\_bottle}
|
||||
\]
|
||||
|
||||
This way the setup should allow a nearly accurate measurement of the translation from robot\_base to aruco\_ground because the robot origin can be estimated using the user manual of the Dobot. Unfortunately, the calculated coordinates of the bottle position were not correct, probably because of errors in the implementation or the manually determined coordinate transformation ${}^{robot\_base}T_{aruco\_ground}$.
|
||||
Due to time constraints the ArUco approach was not pursued further.
|
||||
|
||||
\section{Localization with YOLO}
|
||||
|
||||
As the localization based on ArUco patterns did not work as expected in an acceptable amount of time, a different approach was tried. For that YOLOv8m was used. This model is very common in object detection tasks and performs well in real-time scenarios. Instead of other object detection approaches like R-CNNs, YOLO (You Only Look Once) performs the object localization (bounding box regression) and classification for all objects in only one forward pass through the neural network \cite{yolo}.
|
||||
|
||||
To do the localization, a webcam was placed above the robot. The images were processed by the YOLO model and the results are filtered. Fortunately, the model was already able to detect the bottles properly without fine-tuning. However, because of light reflections, the bottle detection was hindered sometimes. The same problem was encountered for the detection of the beer glass, but it occurred more often presumably because of the glass being transparent, preventing a robust glass detection.
|
||||
|
||||
With only using YOLO, a localization is only possible in the image and not in the robot coordinate system. To calculate the bottle position in robot coordinates a calibration was performed marking four points in the image and then moving the robot end effector to these positions. With these four image-robot correspondences a 2D-to-2D transformation can be calculated by using OpenCV. However, this is only an approximation since a planar 2D mapping is used for a 3D scene. So, objects, which are significantly higher than the base plane cause problems \cite{localization}.
|
||||
|
||||
Nevertheless because of the camera filming from above and the height of the bottle being known, this approximation worked sufficiently well so far. However, the bottle detection was not completed due to time constraints. The problem that the robot often reached its coordinate limits to move to the predicted position couldn't be resolved, though this could be fixed by defining key points inside the robot's range where the robot moves to first and then moves to the bottle.
|
||||
|
||||
\section{Summary}
|
||||
|
||||
\subsection{Results}
|
||||
|
||||
Before the implementation of the pouring process began, controlling the robot via DobotStudio, ROS2 and PythonRPC was evaluated and their specific use cases ware determined. DobotStudio is best suited for the implementation of the robot movements. This ROS2 implementation is not suited at all for this use case as a move in y-direction causes an unwanted rotation of the gripper. In addition the real world coordinates differ from DobotStudio. PythonRPC is suited best when integration with other libraries or other sensors are needed.
|
||||
|
||||
A ring was built to prevent the bottle from wobbling during pouring. A tilting contraption with adjustable angles was created to reduce the fall height of the beer. The pouring workflow was continuously improved regarding direct observations and research. Starting with water instead of beer helped not to waste too much beer and get a better understanding of the behavior of fluids.
|
||||
|
||||
Using the tilting contraption and preventing the teapot effect by reducing the distance between bottle spout and glass edge finally resulted in a successful pouring of beer.
|
||||
|
||||
\subsection{Lessons Learned}
|
||||
|
||||
During the project there were not only many problems to solve, but also many things to learn. This section yields a short summary of the most important lessons learned.
|
||||
\begin{itemize}
|
||||
\item \textbf{Grabbing the bottle}: The gripper of the robot had problems to hold the bottle steady while pouring making the ring necessary.
|
||||
\item \textbf{ArUco Patterns}: The white margin is crucial to guarantee a correct detection of the patterns, otherwise the actual border would not be recognized correctly.
|
||||
\item \textbf{Camera Intrinsics}: The camera intrinsics of a smartphone differs from video mode to photo mode. This caused confusion as the calibration was done with a video, but the ArUco detection was done only with a photo, resulting in wrong pattern axes.
|
||||
\item \textbf{Coordinate transformations}: Doing coordinate transformations practically facilitated a deeper understanding.
|
||||
\item \textbf{Tilted glass}: To better control the foam, the beer glass has to be tilted reducing the fall height.
|
||||
\item \textbf{Teapot effect}: The behavior of fluids to run down the bottle instead of falling into the glass when they are poured slowly.
|
||||
\item \textbf{Internet, AI}: Sometimes it is better to try out things on your own instead of directly searching on the internet or asking AI.
|
||||
\end{itemize}
|
||||
|
||||
\subsection{Reflection}
|
||||
|
||||
In the end, it is also essential to reflect what went well and what could be further improved. All encountered problems were directly addressed and fixed in an acceptable amount of time. In addition everyone of the team contributed their strengths making the division of the tasks easier. Furthermore, the implementation of the pouring process was done exactly like it was defined in advance. Additionally, it was very useful that the hardware equipment of the lab included two Dobot robots enabling parallel working with two robots.
|
||||
|
||||
However, there are some aspects to improve. Doing a localization using ArUco patterns is very complex. This could have been noticed in advance, so a more promising approach for the localization could have been prioritized. Also, a better sensor setup, e.g. a RGB-D camera may had brought faster and better results.
|
||||
|
||||
Though, it's important to note that the localization task was not part of the main tasks of the project. That is why these aspects are not particularly serious. On the contrary, working with ArUco Patterns and coordinate transformations resulted in a deeper understanding of the concepts.
|
||||
|
||||
\section{Research Methods}
|
||||
|
||||
\subsection{Analyzing available resources}
|
||||
|
||||
Before starting the actual implementation of the pouring process the available hardware and software was analyzed. For that the robot control via DobotStudio, ROS2 and PythonRPC was tried out and evaluated. The decision on the hardware can be found in section \ref{sec:hardware}. The detailed evaluation of the software tools can be found in section \ref{sec:software}.
|
||||
|
||||
\subsection{Implementation}
|
||||
|
||||
The implementation was done by iteratively refining the coordinates and steps of the pouring process. After each iteration, the behavior of the robot was evaluated and it was discussed what can be improved. The detailed description of the implementation can be found in section \ref{sec:implementation}.
|
||||
|
||||
%%
|
||||
%% The acknowledgments section is defined using the "acks" environment
|
||||
%% (and NOT an unnumbered section). This ensures the proper
|
||||
%% identification of the section in the article metadata, and the
|
||||
%% consistent spelling of the heading.
|
||||
\begin{acks}
|
||||
To Meinel-Bräu GmbH for their valuable tips and expertise on beer, the provision of specialized glassware, and the opportunity to refill small bottles.
|
||||
\end{acks}
|
||||
|
||||
%%
|
||||
%% The next two lines define the bibliography style to be used, and
|
||||
%% the bibliography file.
|
||||
\bibliographystyle{ACM-Reference-Format}
|
||||
\bibliography{bibfile}
|
||||
|
||||
\end{document}
|
||||
\endinput
|
||||
%%
|
||||
%% End of file `sample-sigconf-authordraft.tex'.
|
||||
|
||||
55
PAPER/bibfile.bib
Normal file
@@ -0,0 +1,55 @@
|
||||
@misc{dobot_ros,
|
||||
author = {jkaniuka},
|
||||
title = {magician\_ros2},
|
||||
year = {2023},
|
||||
howpublished = {\url{https://github.com/jkaniuka/magician_ros2/}},
|
||||
note = {Accessed: 2026-01-10}
|
||||
}
|
||||
|
||||
@misc{foam_wiki,
|
||||
author = {Wikipedia},
|
||||
title = {Bierschaum},
|
||||
year = {2025},
|
||||
howpublished = {\url{https://de.wikipedia.org/wiki/Bierschaum}},
|
||||
note = {Accessed: 2026-01-10}
|
||||
}
|
||||
|
||||
@misc{foam_bierentdecker,
|
||||
author = {Nina Witzemann},
|
||||
title = {Bierschaum: wie er entsteht und perfekt gelingt},
|
||||
year = {n.d.},
|
||||
howpublished = {\url{https://www.bierentdecker.com/bierwissen/der-bierschaum}},
|
||||
note = {Accessed: 2026-01-10}
|
||||
}
|
||||
|
||||
@misc{teapot_mihai,
|
||||
author = {Andrei Mihai},
|
||||
title = {The maths behind the annoying teapot effect — and how to prevent it},
|
||||
year = {2021},
|
||||
howpublished = {\url{https://scilogs.spektrum.de/hlf/the-maths-behind-the-annoying-teapot-effect-and-how-to-prevent-it/}},
|
||||
note = {Accessed: 2026-01-10}
|
||||
}
|
||||
|
||||
@misc{dobotrpc,
|
||||
author = {pypi},
|
||||
title = {DobotRPC 4.8.8},
|
||||
year = {2022},
|
||||
howpublished = {\url{https://pypi.org/project/DobotRPC/}},
|
||||
note = {Accessed: 2026-01-11}
|
||||
}
|
||||
|
||||
@misc{localization,
|
||||
author = {OpenCV},
|
||||
title = {Camera Calibration and 3D Reconstruction},
|
||||
year = {2022},
|
||||
howpublished = {\url{https://docs.opencv.org/4.x/d9/d0c/group__calib3d.html}},
|
||||
note = {Accessed: 2026-01-15}
|
||||
}
|
||||
|
||||
@misc{yolo,
|
||||
author = {Ultralytics},
|
||||
title = {Entdecken Sie Ultralytics YOLOv8},
|
||||
year = {2024},
|
||||
howpublished = {\url{https://docs.ultralytics.com/de/models/yolov8/}},
|
||||
note = {Accessed: 2026-01-15}
|
||||
}
|
||||
1
PRESENTATION/nextcloud.txt
Normal file
@@ -0,0 +1 @@
|
||||
https://nextcloud.hof-university.de/s/zaYpZszZNYyyDXP
|
||||
0
PROJECT_SOURCE_CODE/__init__.py
Normal file
3
PROJECT_SOURCE_CODE/docker/new-terminal.sh
Executable file
@@ -0,0 +1,3 @@
|
||||
#!/bin/bash
|
||||
|
||||
docker exec -it ros2 /bin/bash
|
||||
39
PROJECT_SOURCE_CODE/docker/ros-container.sh
Executable file
@@ -0,0 +1,39 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Konfiguration
|
||||
CONTAINER_NAME="ros2"
|
||||
IMAGE_NAME="ros2_humble"
|
||||
DOBOT_PATH="/home/grapfen/PycharmProjects/Dobot"
|
||||
|
||||
# Container sichern, falls bereits vorhanden
|
||||
if docker ps -a --format '{{.Names}}' | grep -Eq "^${CONTAINER_NAME}$"; then
|
||||
echo "Container '$CONTAINER_NAME' wird gespeichert als neues Image..."
|
||||
docker commit "$CONTAINER_NAME" "$IMAGE_NAME"
|
||||
docker container rm -f "$CONTAINER_NAME"
|
||||
fi
|
||||
|
||||
# X11 für GUI-Forwarding erlauben
|
||||
xhost +local:docker
|
||||
|
||||
# USB-Option abhängig vom Device
|
||||
USB_OPTS=""
|
||||
if [[ -e /dev/ttyUSB0 ]]; then
|
||||
echo "Start mit USB"
|
||||
USB_OPTS="--device /dev/ttyUSB0"
|
||||
else
|
||||
echo "Start ohne USB"
|
||||
fi
|
||||
|
||||
sudo docker run -it \
|
||||
--name "$CONTAINER_NAME" \
|
||||
--volume "$DOBOT_PATH:/root/dobot" \
|
||||
--network host \
|
||||
--env DISPLAY="$DISPLAY" \
|
||||
--env QT_X11_NO_MITSHM=1 \
|
||||
--volume /tmp/.X11-unix:/tmp/.X11-unix:rw \
|
||||
--workdir /root/dobot/ros-dobot \
|
||||
--device /dev/dri/card1 \
|
||||
"$USB_OPTS" \
|
||||
"$IMAGE_NAME" \
|
||||
bash
|
||||
|
||||
1
PROJECT_SOURCE_CODE/python_dobot/.python-version
Normal file
@@ -0,0 +1 @@
|
||||
3.9.0
|
||||
90
PROJECT_SOURCE_CODE/python_dobot/README.md
Normal file
@@ -0,0 +1,90 @@
|
||||
# Dobot Magician – Python Control Script
|
||||
<img src="https://img.shields.io/badge/platform-Windows%2010%2F11-blue"/>
|
||||
<img src="https://img.shields.io/badge/dobotlink-required-orange"/>
|
||||
<img src="https://img.shields.io/badge/python-3.9-green"/>
|
||||
|
||||
<p align="center">
|
||||
<img src="https://user-images.githubusercontent.com/80155305/212770939-2d7a4389-4143-4147-a50a-3453f73a1317.png" width="250" height="250"/>
|
||||
<img src="https://user-images.githubusercontent.com/80155305/207256203-75f2607e-b40c-4a45-bf4e-de4aaffe6530.png" width="350" height="250"/>
|
||||
</p>
|
||||
|
||||
## Table of contents :clipboard:
|
||||
|
||||
* [Overview](#overview)
|
||||
* [Features](#features)
|
||||
* [Installation](#installation)
|
||||
|
||||
---
|
||||
|
||||
<a name="overview"></a>
|
||||
## Overview :bulb:
|
||||
|
||||
Dieses Repository enthält ein Python-Testskript (`dobot_test.py`), mit dem sich ein
|
||||
**Dobot Magician** über **DobotLink** (RPC-Kommunikation) ansteuern lässt.
|
||||
|
||||
Das Skript:
|
||||
|
||||
- sucht automatisch nach angeschlossenen Dobots
|
||||
- verbindet sich über den CP210x-USB-Adapter
|
||||
- fragt die Pose ab
|
||||
- führt eine einfache PTP-Bewegung aus
|
||||
- trennt sauber die Verbindung
|
||||
|
||||
⚠️ **Wichtiger Hinweis:**
|
||||
Dieses Projekt funktioniert **ausschließlich unter Windows**, da DobotLink und die CP210x-Treiber offiziell nur dort unterstützt werden.
|
||||
|
||||
---
|
||||
|
||||
<a name="features"></a>
|
||||
## Features :star:
|
||||
|
||||
- automatische Dobot-Erkennung
|
||||
- CP210x-Port-Suche
|
||||
- Verbindung über DobotLink RPC
|
||||
- Abfrage der aktuellen Pose
|
||||
- PTP-Joint-Bewegung (Beispiel)
|
||||
- Logging über DobotRPC
|
||||
- Fehlerbehandlung (z. B. NetworkError)
|
||||
|
||||
---
|
||||
|
||||
<a name="installation"></a>
|
||||
## Installation :arrow_down:
|
||||
|
||||
### 1. DobotLink installieren
|
||||
Offizielle Steuerungssoftware (erforderlich für RPC):
|
||||
|
||||
https://www.dobot.cc/downloadcenter/dobot-magician.html
|
||||
|
||||
Nach der Installation **DobotLink starten**.
|
||||
|
||||
---
|
||||
|
||||
### 2. Python 3.9 oder 3.10 installieren
|
||||
|
||||
Download:
|
||||
https://www.python.org/downloads/windows/
|
||||
|
||||
Wichtig:
|
||||
✔ **Add Python to PATH** aktivieren
|
||||
|
||||
---
|
||||
|
||||
### 3. CP210x USB-Treiber installieren
|
||||
|
||||
Download:
|
||||
https://www.silabs.com/developers/usb-to-uart-bridge-vcp-drivers
|
||||
|
||||
Ohne diesen Treiber wird der Dobot-Port *nicht erkannt*.
|
||||
|
||||
---
|
||||
|
||||
### 4. DobotRPC installieren
|
||||
|
||||
DobotLink bringt ein kompatibles Python-SDK mit.
|
||||
|
||||
Falls pip benötigt wird:
|
||||
|
||||
```bash
|
||||
pip install dobotlink
|
||||
pip install setuptools
|
||||
134
PROJECT_SOURCE_CODE/python_dobot/aruco/aruco_compex_setup.py
Normal file
@@ -0,0 +1,134 @@
|
||||
from math import pi, cos, sin
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
|
||||
from aruco_utils import detect_aruco, invert_transform
|
||||
|
||||
# ---------------------------------------------------------
|
||||
# KONFIGURATION
|
||||
# ---------------------------------------------------------
|
||||
|
||||
VIDEO = "videos/aruco_test_2.mp4"
|
||||
CAMERA_NPZ = "erik-intrinsics.npz"
|
||||
|
||||
ARUCO_DICT = "DICT_5X5_50"
|
||||
MARKER_LENGTH = 0.05 # in Metern
|
||||
EE_ID = 1
|
||||
BOTTLE_ID = 2
|
||||
|
||||
# Endeffektor-Pose im Roboter
|
||||
TEE = [0.21, 0.0, 0.10]
|
||||
TEE_RPY = [0.0, 0.0, 0.0]
|
||||
|
||||
# Flasche Position in Roboterkoordinaten (statisch): x=0.190, 0,174
|
||||
|
||||
# Transformation EE → Marker
|
||||
angle = pi / 2 # 90 Grad
|
||||
Ry = np.array([
|
||||
[cos(angle), 0, sin(angle), 0],
|
||||
[0, 1, 0, 0],
|
||||
[-sin(angle), 0, cos(angle), 0],
|
||||
[0, 0, 0, 1]
|
||||
])
|
||||
|
||||
T_ee_to_eeMarker = Ry
|
||||
|
||||
aruco_rvec = np.array([np.deg2rad(90), 0, 0], dtype=np.float64)
|
||||
|
||||
|
||||
# ---------------------------------------------------------
|
||||
# Hilfefunktionen
|
||||
# ---------------------------------------------------------
|
||||
|
||||
|
||||
def build_T_from_tee(tee, rpy):
|
||||
tx, ty, tz = tee
|
||||
rz, ry, rx = rpy
|
||||
|
||||
cz, sz = np.cos(rz), np.sin(rz)
|
||||
cy, sy = np.cos(ry), np.sin(ry)
|
||||
cx, sx = np.cos(rx), np.sin(rx)
|
||||
|
||||
Rz = np.array([[cz, -sz, 0], [sz, cz, 0], [0, 0, 1]])
|
||||
Ry = np.array([[cy, 0, sy], [0, 1, 0], [-sy, 0, cy]])
|
||||
Rx = np.array([[1, 0, 0], [0, cx, -sx], [0, sx, cx]])
|
||||
|
||||
R = Rz @ Ry @ Rx
|
||||
T = np.eye(4)
|
||||
T[:3, :3] = R
|
||||
T[:3, 3] = [tx, ty, tz]
|
||||
return T
|
||||
|
||||
|
||||
# ---------------------------------------------------------
|
||||
# Hauptfunktion
|
||||
# ---------------------------------------------------------
|
||||
|
||||
def main():
|
||||
# Kamera laden
|
||||
cam = np.load(CAMERA_NPZ)
|
||||
K = cam["camera_matrix"]
|
||||
dist = cam["dist_coeffs"]
|
||||
|
||||
# Endeffektorpose
|
||||
T_base_to_ee = build_T_from_tee(TEE, TEE_RPY)
|
||||
|
||||
cap = cv2.VideoCapture(VIDEO)
|
||||
if not cap.isOpened():
|
||||
print("Video konnte nicht geöffnet werden!")
|
||||
return
|
||||
|
||||
T_cam_to_base = None
|
||||
T_cam_to_bottle_marker = None
|
||||
|
||||
print("Suche Marker im Video…")
|
||||
|
||||
while True:
|
||||
ok, frame = cap.read()
|
||||
if not ok:
|
||||
break
|
||||
|
||||
aruco_results = detect_aruco(frame, K, dist, ARUCO_DICT, MARKER_LENGTH, visualize=True)
|
||||
|
||||
# Endeffektor
|
||||
if EE_ID in aruco_results.keys():
|
||||
T_cam_to_eeMarker = aruco_results[EE_ID]
|
||||
T_eeMarker_to_cam = invert_transform(T_cam_to_eeMarker)
|
||||
T_ee_to_cam = T_eeMarker_to_cam @ T_ee_to_eeMarker
|
||||
|
||||
|
||||
# Flasche
|
||||
if BOTTLE_ID in aruco_results.keys():
|
||||
T_cam_to_bottle_marker = aruco_results[BOTTLE_ID]
|
||||
|
||||
if T_cam_to_base is not None and T_cam_to_bottle_marker is not None:
|
||||
print("Marker gefunden!")
|
||||
break
|
||||
|
||||
cap.release()
|
||||
|
||||
if T_cam_to_base is None:
|
||||
print("Endeffektor-Marker nie gefunden!")
|
||||
return
|
||||
if T_cam_to_bottle_marker is None:
|
||||
print("Flaschen-Marker nie gefunden!")
|
||||
return
|
||||
|
||||
# Endpose
|
||||
T_bottle_marker_base = T_cam_to_base @ T_cam_to_bottle_marker
|
||||
|
||||
pos = T_bottle_marker_base[:3, 3]
|
||||
R = T_bottle_marker_base[:3, :3]
|
||||
rvec, _ = cv2.Rodrigues(R)
|
||||
|
||||
print("\n--- Ergebnis ---")
|
||||
print("Flaschenposition (x,y,z) [m]:")
|
||||
print(pos)
|
||||
print("Rodrigues-Winkel:")
|
||||
print(rvec.flatten())
|
||||
|
||||
|
||||
# ---------------------------------------------------------
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
54
PROJECT_SOURCE_CODE/python_dobot/aruco/aruco_simple_setup.py
Normal file
@@ -0,0 +1,54 @@
|
||||
import math
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
from aruco_utils import detect_aruco
|
||||
from PROJECT_SOURCE_CODE.python_dobot.aruco.aruco_utils import invert_transform
|
||||
|
||||
# Setup:
|
||||
# Ziel: Flaschenposition in Roboter Base Koordinaten
|
||||
# Aruco Marker auf Basisplatte
|
||||
# Aruco Marker auf Flasche
|
||||
# Position des Bodenmarkers in Base Koordinaten ist bekannt
|
||||
|
||||
base_plate_id = 1
|
||||
bottle_id = 2
|
||||
aruco_dict = "DICT_6X6_50"
|
||||
squares_per_marker = 6
|
||||
square_size = 0.0055
|
||||
vid_path = "inputs/vid2.mp4"
|
||||
|
||||
cam = np.load("intrinsics/erik-intrinsics.npz")
|
||||
camera_matrix = cam["camera_matrix"]
|
||||
dist_coeffs = cam["dist_coeffs"]
|
||||
|
||||
# Pose des Bodenmarkers in Base-Koordinaten
|
||||
T_base_to_plate_marker = np.eye(4)
|
||||
T_base_to_plate_marker[:3, 3] = [0.175, 0.0, -0.1375]
|
||||
|
||||
cap = cv2.VideoCapture(vid_path)
|
||||
success, img = cap.read()
|
||||
aruco_results = detect_aruco(img, camera_matrix, dist_coeffs, aruco_dict, square_size, squares_per_marker)
|
||||
print("aruco_results:")
|
||||
print(aruco_results)
|
||||
|
||||
if base_plate_id not in aruco_results.keys() or bottle_id not in aruco_results.keys():
|
||||
raise ValueError("Marker nicht gefunden.")
|
||||
|
||||
T_plate_marker_to_cam = aruco_results[base_plate_id]
|
||||
print("T_cam_to_plate_marker:\n", T_plate_marker_to_cam)
|
||||
|
||||
T_bottle_marker_to_cam = aruco_results[bottle_id]
|
||||
print("T_cam_to_bottle_marker:\n", T_bottle_marker_to_cam)
|
||||
|
||||
T_cam_to_plate_marker = invert_transform(T_plate_marker_to_cam)
|
||||
|
||||
T_bottle_to_plate_marker = T_cam_to_plate_marker @ T_bottle_marker_to_cam
|
||||
|
||||
T_bottle_to_base = invert_transform(T_base_to_plate_marker) @ T_bottle_to_plate_marker
|
||||
|
||||
bottle_position_in_base = T_bottle_to_base[:3, 3] * 1000
|
||||
print("Flaschenposition in Base:")
|
||||
print(math.floor(bottle_position_in_base[0]))
|
||||
print(math.floor(bottle_position_in_base[1]))
|
||||
print(math.floor(bottle_position_in_base[2]))
|
||||
101
PROJECT_SOURCE_CODE/python_dobot/aruco/aruco_utils.py
Normal file
@@ -0,0 +1,101 @@
|
||||
import cv2
|
||||
import numpy as np
|
||||
|
||||
|
||||
def rvec_tvec_to_homogeneous(rvec, tvec):
|
||||
R, _ = cv2.Rodrigues(rvec)
|
||||
T = np.eye(4)
|
||||
T[:3, :3] = R
|
||||
T[:3, 3] = tvec.flatten()
|
||||
return T
|
||||
|
||||
|
||||
def invert_transform(T):
|
||||
R = T[:3, :3]
|
||||
t = T[:3, 3]
|
||||
T_inv = np.eye(4)
|
||||
T_inv[:3, :3] = R.T
|
||||
T_inv[:3, 3] = -R.T @ t
|
||||
return T_inv
|
||||
|
||||
|
||||
def detect_aruco(
|
||||
img,
|
||||
camera_matrix,
|
||||
dist_coeffs,
|
||||
aruco_dict,
|
||||
square_size: float,
|
||||
dim: int,
|
||||
visualize=False
|
||||
) -> dict:
|
||||
aruco_dict = cv2.aruco.getPredefinedDictionary(
|
||||
getattr(cv2.aruco, aruco_dict)
|
||||
)
|
||||
params = cv2.aruco.DetectorParameters()
|
||||
detector = cv2.aruco.ArucoDetector(aruco_dict, params)
|
||||
|
||||
marker_length = (dim + 2) * square_size
|
||||
half = marker_length / 2.0
|
||||
|
||||
# --- KORREKTE Marker-Objektpunkte (OpenCV-Konvention) ---
|
||||
marker_obj_pts = np.array([
|
||||
[-half, half, 0.0],
|
||||
[half, half, 0.0],
|
||||
[half, -half, 0.0],
|
||||
[-half, -half, 0.0],
|
||||
], dtype=np.float32)
|
||||
# marker_obj_pts = np.array([
|
||||
# [0, 0, 0],
|
||||
# [marker_length, 0, 0],
|
||||
# [marker_length, marker_length, 0],
|
||||
# [0, marker_length, 0]
|
||||
# ], dtype=np.float32)
|
||||
|
||||
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
|
||||
|
||||
corners, ids, _ = detector.detectMarkers(gray)
|
||||
if ids is None or len(ids) == 0:
|
||||
raise ValueError("Kein ArUco-Marker gefunden.")
|
||||
|
||||
cv2.aruco.drawDetectedMarkers(img, corners, ids)
|
||||
ids = ids.flatten()
|
||||
|
||||
transformations = {}
|
||||
|
||||
for c, aruco_id in zip(corners, ids):
|
||||
image_points = c.reshape(-1, 2).astype(np.float64)
|
||||
|
||||
success, rvec, tvec = cv2.solvePnP(
|
||||
marker_obj_pts,
|
||||
image_points,
|
||||
camera_matrix,
|
||||
dist_coeffs,
|
||||
flags=cv2.SOLVEPNP_ITERATIVE
|
||||
)
|
||||
|
||||
if not success:
|
||||
continue
|
||||
|
||||
rvec = rvec.reshape(3, 1)
|
||||
tvec = tvec.reshape(3, 1)
|
||||
|
||||
T_marker_cam = rvec_tvec_to_homogeneous(rvec, tvec)
|
||||
transformations[aruco_id] = T_marker_cam
|
||||
|
||||
cv2.drawFrameAxes(
|
||||
img,
|
||||
camera_matrix,
|
||||
dist_coeffs,
|
||||
rvec,
|
||||
tvec,
|
||||
marker_length * 0.5
|
||||
)
|
||||
|
||||
if visualize:
|
||||
cv2.imshow("Detected ArUco Markers", img)
|
||||
cv2.waitKey(0)
|
||||
|
||||
cv2.imwrite("ausgabe.png", img)
|
||||
print("img gespeichert.")
|
||||
|
||||
return transformations
|
||||
@@ -0,0 +1,94 @@
|
||||
import cv2
|
||||
import numpy as np
|
||||
|
||||
# File is from the lecture Cooporative Autonomous Systems
|
||||
|
||||
pattern_size = (6, 8)
|
||||
|
||||
input_file = "inputs/Erik_checkerboard_2.mp4"
|
||||
output_file = "intrinsics/erik-intrinsics.npz"
|
||||
|
||||
# Define the size of each square on the chessboard (e.g., 1 cm or 0.01 meters)
|
||||
# What is the correct size?
|
||||
square_size = 0.0247
|
||||
|
||||
# Prepare object points for a 9x6 chessboard with each square of size `square_size`
|
||||
# Object points will be the same for each frame
|
||||
object_points_pattern = np.zeros((pattern_size[0] * pattern_size[1], 3), np.float32)
|
||||
# shape: (9*6, 3)
|
||||
object_points_pattern[:, :2] = np.mgrid[0:pattern_size[0], 0:pattern_size[1]].T.reshape(-1, 2) * square_size
|
||||
# corners in world coordinate system (chess frame)
|
||||
# What is the content of this Array and what could it be needed?
|
||||
|
||||
#
|
||||
object_points = [] # 3D coordinates of the chessboard corners
|
||||
image_points = [] # 2D coordinates of the detected corners in the image
|
||||
|
||||
#
|
||||
cap = cv2.VideoCapture(input_file)
|
||||
|
||||
found_count = 0 # counter for detected corners
|
||||
|
||||
# only each 11th frame is used for calibration
|
||||
skipframes = 5
|
||||
|
||||
while cap.isOpened():
|
||||
|
||||
for i in range(skipframes):
|
||||
ret, frame = cap.read()
|
||||
ret, frame = cap.read()
|
||||
|
||||
if not ret:
|
||||
break
|
||||
|
||||
# convert to grayscale
|
||||
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
|
||||
|
||||
# ind the corners in the image
|
||||
ret, corners = cv2.findChessboardCorners(gray, pattern_size, None)
|
||||
|
||||
# if corners were found
|
||||
if ret:
|
||||
found_count += 1
|
||||
object_points.append(object_points_pattern)
|
||||
# refine the corners in superpixel precision
|
||||
refined_corners = cv2.cornerSubPix(gray, corners, (11, 11), (-1, -1),
|
||||
criteria=(cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001))
|
||||
image_points.append(refined_corners)
|
||||
|
||||
#
|
||||
cv2.drawChessboardCorners(frame, pattern_size, refined_corners, ret)
|
||||
cv2.imshow("Detected Chessboard", frame)
|
||||
|
||||
# when we have enough frames, stop
|
||||
if found_count >= 100: #
|
||||
break
|
||||
|
||||
#
|
||||
cv2.imshow("Calibration", frame)
|
||||
|
||||
# Press 'q' to exit early
|
||||
if cv2.waitKey(2) & 0xFF == ord('q'):
|
||||
break
|
||||
|
||||
cap.release()
|
||||
cv2.destroyAllWindows()
|
||||
print("Found corners in {} frames.".format(found_count))
|
||||
|
||||
#
|
||||
if found_count > 5:
|
||||
#
|
||||
ret, camera_matrix, dist_coeffs, rvecs, tvecs = cv2.calibrateCamera(object_points, image_points, gray.shape[::-1],
|
||||
None, None)
|
||||
|
||||
if ret:
|
||||
print("Calibration successful!")
|
||||
print("Camera Matrix:\n", camera_matrix)
|
||||
print("Distortion Coefficients:\n", dist_coeffs)
|
||||
|
||||
#
|
||||
np.savez(output_file, camera_matrix=camera_matrix, dist_coeffs=dist_coeffs)
|
||||
else:
|
||||
print("Calibration failed.")
|
||||
else:
|
||||
print("Not enough frames with detected corners to perform calibration.")
|
||||
@@ -0,0 +1,18 @@
|
||||
import cv2
|
||||
import numpy as np
|
||||
|
||||
from aruco_utils import detect_aruco
|
||||
|
||||
aruco_dict = "DICT_7X7_50"
|
||||
squares_per_marker = 7
|
||||
square_size = 0.0077
|
||||
img_path = "inputs/test_4.jpg"
|
||||
|
||||
cam = np.load("intrinsics/erik-intrinsics.npz")
|
||||
camera_matrix = cam["camera_matrix"]
|
||||
dist_coeffs = cam["dist_coeffs"]
|
||||
|
||||
img = cv2.imread(img_path)
|
||||
aruco_results = detect_aruco(img, camera_matrix, dist_coeffs, aruco_dict, square_size, squares_per_marker)
|
||||
print("aruco_results:")
|
||||
print(aruco_results)
|
||||
@@ -0,0 +1,20 @@
|
||||
import cv2
|
||||
import numpy as np
|
||||
|
||||
from aruco_utils import detect_aruco
|
||||
|
||||
aruco_dict = "DICT_6X6_50"
|
||||
squares_per_marker = 6
|
||||
square_size = 0.0055
|
||||
vid_path = "inputs/vid2.mp4"
|
||||
|
||||
cam = np.load("intrinsics/erik-intrinsics.npz")
|
||||
camera_matrix = cam["camera_matrix"]
|
||||
dist_coeffs = cam["dist_coeffs"]
|
||||
|
||||
cap = cv2.VideoCapture(vid_path)
|
||||
success, img = cap.read()
|
||||
|
||||
aruco_results = detect_aruco(img, camera_matrix, dist_coeffs, aruco_dict, square_size, squares_per_marker)
|
||||
print("aruco_results:")
|
||||
print(aruco_results)
|
||||
67
PROJECT_SOURCE_CODE/python_dobot/aruco/test_marker_to_ee.py
Normal file
@@ -0,0 +1,67 @@
|
||||
import cv2
|
||||
import numpy as np
|
||||
from aruco_utils import detect_aruco
|
||||
|
||||
marker_length = 0.05 # Markergröße in Metern
|
||||
ARUCO_DICT = "DICT_5X5_50"
|
||||
marker_id = 1
|
||||
|
||||
cam = np.load("intrinsics/erik-intrinsics.npz")
|
||||
camera_matrix = cam["camera_matrix"]
|
||||
dist_coeffs = cam["dist_coeffs"]
|
||||
|
||||
|
||||
# === Hauptfunktion ===
|
||||
def calibrate_from_frames(inputs):
|
||||
"""
|
||||
inputs: dict {frame_path: [x, y, z]}
|
||||
returns: T_eeMarker_ee (4x4 Matrix)
|
||||
"""
|
||||
ee_matrices = []
|
||||
marker_matrices = []
|
||||
|
||||
for frame_path, ee_pos in inputs.items():
|
||||
img = cv2.imread(frame_path)
|
||||
aruco_results = detect_aruco(img, camera_matrix, dist_coeffs, ARUCO_DICT, marker_length)
|
||||
|
||||
if not marker_id in aruco_results.keys():
|
||||
raise ValueError(f"Marker {marker_id} in {frame_path} nicht gefunden.")
|
||||
|
||||
marker_matrices.append(aruco_results[marker_id])
|
||||
|
||||
# EE-Matrix (Rotation Identity)
|
||||
T_ee_to_base = np.eye(4)
|
||||
T_ee_to_base[:3, 3] = ee_pos
|
||||
ee_matrices.append(T_ee_to_base)
|
||||
|
||||
# Extrahiere Rotation/Translation für cv2.calibrateHandEye
|
||||
R_gripper2base = [T[:3, :3] for T in ee_matrices]
|
||||
t_gripper2base = [T[:3, 3] for T in ee_matrices]
|
||||
R_target2cam = [T[:3, :3] for T in marker_matrices]
|
||||
t_target2cam = [T[:3, 3] for T in marker_matrices]
|
||||
|
||||
# Hand-Eye-Kalibrierung (Tsai-Lenz)
|
||||
R_cam2gripper, t_cam2gripper = cv2.calibrateHandEye(
|
||||
R_gripper2base, t_gripper2base,
|
||||
R_target2cam, t_target2cam,
|
||||
method=cv2.CALIB_HAND_EYE_TSAI
|
||||
)
|
||||
|
||||
# 4x4 Matrix zurückgeben
|
||||
T_eeMarker_ee = np.eye(4)
|
||||
T_eeMarker_ee[:3, :3] = R_cam2gripper
|
||||
T_eeMarker_ee[:3, 3] = t_cam2gripper.flatten()
|
||||
|
||||
return T_eeMarker_ee
|
||||
|
||||
|
||||
# === Beispiel ===
|
||||
inputs = {
|
||||
"frame1.jpg": [0.5, 0.2, 0.3],
|
||||
"frame2.jpg": [0.6, 0.25, 0.35],
|
||||
"frame3.jpg": [0.55, 0.3, 0.4],
|
||||
}
|
||||
|
||||
# Kalibrierung durchführen
|
||||
T_eeMarker_ee = calibrate_from_frames(inputs)
|
||||
print("T_eeMarker_ee:\n", T_eeMarker_ee)
|
||||
41
PROJECT_SOURCE_CODE/python_dobot/aruco/test_robot_to_cam.py
Normal file
@@ -0,0 +1,41 @@
|
||||
import math
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
from aruco_utils import detect_aruco, invert_transform
|
||||
|
||||
base_plate_id = 1
|
||||
aruco_dict = "DICT_6X6_50"
|
||||
squares_per_marker = 6
|
||||
square_size = 0.0055
|
||||
vid_path = "inputs/vid3.mp4"
|
||||
|
||||
cam = np.load("intrinsics/erik-intrinsics.npz")
|
||||
camera_matrix = cam["camera_matrix"]
|
||||
dist_coeffs = cam["dist_coeffs"]
|
||||
|
||||
# Pose des Bodenmarkers in Base-Koordinaten
|
||||
T_base_to_plate_marker = np.eye(4)
|
||||
T_base_to_plate_marker[:3, 3] = [0.175, 0.0, -0.1375] # Aruco Ursprung liegt 17,5 cm in x, und 13,75 cm unter dem Ursprung
|
||||
|
||||
cap = cv2.VideoCapture(vid_path)
|
||||
success, img = cap.read()
|
||||
aruco_results = detect_aruco(img, camera_matrix, dist_coeffs, aruco_dict, square_size, squares_per_marker)
|
||||
print("aruco_results:")
|
||||
print(aruco_results)
|
||||
|
||||
if base_plate_id not in aruco_results.keys():
|
||||
raise ValueError("Marker nicht gefunden.")
|
||||
|
||||
T_plate_marker_to_cam = aruco_results[base_plate_id]
|
||||
print("T_cam_to_plate_marker:\n", T_plate_marker_to_cam)
|
||||
|
||||
T_cam_to_plate_marker = invert_transform(T_plate_marker_to_cam)
|
||||
|
||||
T_cam_to_base = invert_transform(T_base_to_plate_marker) @ T_cam_to_plate_marker
|
||||
|
||||
bottle_position_in_base = T_cam_to_base[:3, 3] * 1000
|
||||
print("Cam in Base:")
|
||||
print(math.floor(bottle_position_in_base[0]))
|
||||
print(math.floor(bottle_position_in_base[1]))
|
||||
print(math.floor(bottle_position_in_base[2]))
|
||||
190
PROJECT_SOURCE_CODE/python_dobot/beerbot.py
Normal file
@@ -0,0 +1,190 @@
|
||||
from DobotRPC import RPCClient, DobotlinkAdapter, loggers
|
||||
from DobotRPC.NetworkError import NetworkError
|
||||
import xml.etree.ElementTree as ET
|
||||
import time
|
||||
|
||||
def parse_xml_moves(xml_path):
|
||||
"""Liest die XML-Datei und extrahiert alle Bewegungen."""
|
||||
tree = ET.parse(xml_path)
|
||||
root = tree.getroot()
|
||||
|
||||
moves = []
|
||||
row_num = 0
|
||||
|
||||
while True:
|
||||
row = root.find(f'row{row_num}')
|
||||
if row is None:
|
||||
break
|
||||
|
||||
move = {
|
||||
'ptpMode': int(row.find('item_0').text) if row.find('item_0') is not None else 1,
|
||||
'description': row.find('item_1').text if row.find('item_1') is not None else f'Move {row_num}',
|
||||
'x': float(row.find('item_2').text) if row.find('item_2') is not None else 0.0,
|
||||
'y': float(row.find('item_3').text) if row.find('item_3') is not None else 0.0,
|
||||
'z': float(row.find('item_4').text) if row.find('item_4') is not None else 0.0,
|
||||
'r': float(row.find('item_5').text) if row.find('item_5') is not None else 0.0,
|
||||
'wait': float(row.find('item_10').text) if row.find('item_10') is not None else 0.0,
|
||||
'gripper': int(row.find('item_11').text) if row.find('item_11') is not None else 0
|
||||
}
|
||||
moves.append(move)
|
||||
row_num += 1
|
||||
|
||||
return moves
|
||||
|
||||
def execute_moves(dobot, port, moves):
|
||||
"""Führt alle Bewegungen aus der XML-Datei aus."""
|
||||
for i, move in enumerate(moves):
|
||||
print(f"\n🤖 Schritt {i}: {move['description']}")
|
||||
print(f" Position: x={move['x']}, y={move['y']}, z={move['z']}, r={move['r']}")
|
||||
|
||||
# Mechanischer Greifer-Steuerung
|
||||
if move['gripper'] == 1:
|
||||
print(" 🔓 Greifer öffnen")
|
||||
try:
|
||||
dobot.Magician.SetEndEffectorGripper(
|
||||
portName=port,
|
||||
enable=True,
|
||||
on=False
|
||||
)
|
||||
time.sleep(0.5) # Kurz warten bis Greifer geöffnet ist
|
||||
except NetworkError as e:
|
||||
print(f" ⚠ Fehler beim Öffnen des Greifers: {e}")
|
||||
elif move['gripper'] == 2:
|
||||
print(" 🔒 Greifer schließen")
|
||||
try:
|
||||
dobot.Magician.SetEndEffectorGripper(
|
||||
portName=port,
|
||||
enable=True,
|
||||
on=True
|
||||
)
|
||||
time.sleep(0.5) # Kurz warten bis Greifer geschlossen ist
|
||||
except NetworkError as e:
|
||||
print(f" ⚠ Fehler beim Schließen des Greifers: {e}")
|
||||
|
||||
# PTP-Bewegung ausführen
|
||||
try:
|
||||
dobot.Magician.SetPTPCmd(
|
||||
portName=port,
|
||||
ptpMode=move['ptpMode'],
|
||||
x=move['x'],
|
||||
y=move['y'],
|
||||
z=move['z'],
|
||||
r=move['r']
|
||||
)
|
||||
print(f" ✅ Bewegung gesendet")
|
||||
except NetworkError as e:
|
||||
print(f" ❌ Fehler bei Bewegung: {e}")
|
||||
return False
|
||||
|
||||
# Warten, falls angegeben
|
||||
if move['wait'] > 0:
|
||||
print(f" ⏳ Warte {move['wait']} Sekunden...")
|
||||
time.sleep(move['wait'])
|
||||
|
||||
return True
|
||||
|
||||
def main(dobot):
|
||||
# 1. XML-Datei laden
|
||||
xml_path = "../../documentation/moves/move_bottle.xml"
|
||||
print(f"📄 Lade Bewegungen aus {xml_path}...")
|
||||
|
||||
try:
|
||||
moves = parse_xml_moves(xml_path)
|
||||
print(f"✅ {len(moves)} Bewegungen geladen\n")
|
||||
except Exception as e:
|
||||
print(f"❌ Fehler beim Laden der XML-Datei: {e}")
|
||||
return
|
||||
|
||||
# 2. Nach Dobots suchen
|
||||
res = dobot.Magician.SearchDobot()
|
||||
print("Gefundene Dobots:", res)
|
||||
|
||||
if not res:
|
||||
print("❌ Kein Dobot gefunden. Ist er eingeschaltet und DobotLink gestartet?")
|
||||
return
|
||||
|
||||
# CP210x / COM5 auswählen
|
||||
cp210x_ports = [d for d in res if "Silicon Labs CP210x" in d.get("description", "")]
|
||||
if cp210x_ports:
|
||||
port = cp210x_ports[0]["portName"]
|
||||
else:
|
||||
port = "COM5" # Fallback
|
||||
print("Verwende Port:", port)
|
||||
|
||||
# 3. Verbinden
|
||||
try:
|
||||
dobot.Magician.ConnectDobot(portName=port)
|
||||
except NetworkError as e:
|
||||
print("❌ Fehler beim Verbinden:", e)
|
||||
return
|
||||
|
||||
print("✅ Verbunden.\n")
|
||||
|
||||
# 4. End Effector Typ auf Gripper setzen
|
||||
print("🔧 Setze End Effector Typ auf Gripper...")
|
||||
try:
|
||||
# Typ 3 = Gripper (1=SuctionCup, 2=Laser, 3=Gripper, 4=Pen)
|
||||
dobot.Magician.SetEndEffectorType(
|
||||
portName=port,
|
||||
endEffectorType=3
|
||||
)
|
||||
print("✅ End Effector Typ gesetzt\n")
|
||||
time.sleep(0.5)
|
||||
except NetworkError as e:
|
||||
print(f"⚠ Fehler beim Setzen des End Effector Typs: {e}\n")
|
||||
|
||||
# 5. Greifer initialisieren
|
||||
print("🔧 Initialisiere Greifer...")
|
||||
try:
|
||||
dobot.Magician.SetEndEffectorGripper(
|
||||
portName=port,
|
||||
enable=True,
|
||||
on=False
|
||||
)
|
||||
print("✅ Greifer initialisiert (geöffnet)\n")
|
||||
time.sleep(1)
|
||||
except NetworkError as e:
|
||||
print(f"⚠ Fehler bei Greifer-Initialisierung: {e}\n")
|
||||
|
||||
# 6. Aktuelle Pose abfragen
|
||||
try:
|
||||
pose = dobot.Magician.GetPose(portName=port)
|
||||
print("Aktuelle Pose:", pose)
|
||||
except NetworkError as e:
|
||||
print("⚠ Fehler bei GetPose:", e)
|
||||
|
||||
# 7. Bewegungen ausführen
|
||||
print("\n" + "="*60)
|
||||
print("🍺 Starte Beerbot-Sequenz...")
|
||||
print("="*60)
|
||||
|
||||
success = execute_moves(dobot, port, moves)
|
||||
|
||||
if success:
|
||||
print("\n" + "="*60)
|
||||
print("🎉 Alle Bewegungen erfolgreich ausgeführt!")
|
||||
print("="*60)
|
||||
|
||||
input("\nENTER drücken, um zu trennen...")
|
||||
|
||||
# 8. Verbindung sauber trennen
|
||||
try:
|
||||
dobot.Magician.SetEndEffectorGripper(
|
||||
portName=port,
|
||||
enable=False, # ← Greifer ausschalten
|
||||
on=False
|
||||
)
|
||||
dobot.Magician.DisconnectDobot(
|
||||
portName=port,
|
||||
queueStop=True,
|
||||
queueClear=True
|
||||
)
|
||||
except NetworkError as e:
|
||||
print("⚠ Fehler beim Disconnect:", e)
|
||||
|
||||
print("🔌 Verbindung getrennt.")
|
||||
|
||||
if __name__ == "__main__":
|
||||
loggers.set_level(loggers.INFO)
|
||||
dobotlink = DobotlinkAdapter(RPCClient(), is_sync=True)
|
||||
main(dobotlink)
|
||||
1169
PROJECT_SOURCE_CODE/python_dobot/beerbot_vision.py
Normal file
69
PROJECT_SOURCE_CODE/python_dobot/dobot_test.py
Normal file
@@ -0,0 +1,69 @@
|
||||
from DobotRPC import RPCClient, DobotlinkAdapter, loggers
|
||||
from DobotRPC.NetworkError import NetworkError
|
||||
|
||||
def main(dobot):
|
||||
# 1. Nach Dobots suchen
|
||||
res = dobot.Magician.SearchDobot()
|
||||
print("Gefundene Dobots:", res)
|
||||
|
||||
if not res:
|
||||
print("❌ Kein Dobot gefunden. Ist er eingeschaltet und DobotLink gestartet?")
|
||||
return
|
||||
|
||||
# CP210x / COM5 auswählen
|
||||
cp210x_ports = [d for d in res if "Silicon Labs CP210x" in d.get("description", "")]
|
||||
if cp210x_ports:
|
||||
port = cp210x_ports[0]["portName"]
|
||||
else:
|
||||
port = "COM5" # Fallback
|
||||
print("Verwende Port:", port)
|
||||
|
||||
# 2. Verbinden
|
||||
try:
|
||||
dobot.Magician.ConnectDobot(portName=port)
|
||||
except NetworkError as e:
|
||||
print("❌ Fehler beim Verbinden:", e)
|
||||
return
|
||||
|
||||
print("✅ Verbunden.")
|
||||
|
||||
# 4. Test: aktuelle Pose abfragen
|
||||
try:
|
||||
pose = dobot.Magician.GetPose(portName=port)
|
||||
print("Aktuelle Pose:", pose)
|
||||
except NetworkError as e:
|
||||
print("⚠ Fehler bei GetPose:", e)
|
||||
|
||||
# 5. Einfache PTP-Bewegung
|
||||
try:
|
||||
print("🤖 Fahre zu Position (x=230, y=0, z=50, r=0)...")
|
||||
dobot.Magician.SetPTPCmd(
|
||||
portName=port,
|
||||
ptpMode=1, # Joint-PTP
|
||||
x=230,
|
||||
y=-0,
|
||||
z=50,
|
||||
r=0
|
||||
)
|
||||
print("PTP-Befehl gesendet.")
|
||||
except NetworkError as e:
|
||||
print("❌ Fehler bei SetPTPCmd:", e)
|
||||
|
||||
input("ENTER drücken, um zu trennen...")
|
||||
|
||||
# 6. Verbindung sauber trennen
|
||||
try:
|
||||
dobot.Magician.DisconnectDobot(
|
||||
portName=port,
|
||||
queueStop=True,
|
||||
queueClear=True
|
||||
)
|
||||
except NetworkError as e:
|
||||
print("⚠ Fehler beim Disconnect:", e)
|
||||
|
||||
print("🔌 Verbindung getrennt.")
|
||||
|
||||
if __name__ == "__main__":
|
||||
loggers.set_level(loggers.INFO)
|
||||
dobotlink = DobotlinkAdapter(RPCClient(), is_sync=True)
|
||||
main(dobotlink)
|
||||
1
PROJECT_SOURCE_CODE/python_dobot/requirements.txt
Normal file
@@ -0,0 +1 @@
|
||||
DobotRPC
|
||||
21
PROJECT_SOURCE_CODE/python_dobot/requirements_vision.txt
Normal file
@@ -0,0 +1,21 @@
|
||||
# Beer Bot - Requirements
|
||||
# ======================
|
||||
# Installation: pip install -r requirements.txt
|
||||
|
||||
# DOBOT Steuerung
|
||||
DobotRPC
|
||||
|
||||
# Computer Vision & AI
|
||||
ultralytics>=8.0.0 # YOLOv8 Objekterkennung
|
||||
opencv-python>=4.8.0 # Bildverarbeitung
|
||||
|
||||
# Numerische Berechnungen
|
||||
numpy>=1.24.0
|
||||
|
||||
# Optional: Für bessere Performance mit GPU
|
||||
# torch>=2.0.0 # PyTorch (wird von ultralytics installiert)
|
||||
# torchvision>=0.15.0
|
||||
|
||||
# Optional: Für Bildaugmentation beim Custom-Training
|
||||
# albumentations>=1.3.0
|
||||
# roboflow # Dataset-Management
|
||||
0
PROJECT_SOURCE_CODE/python_dobot_yolo/__init__.py
Normal file
@@ -0,0 +1,102 @@
|
||||
{
|
||||
"pixel_points": [
|
||||
[
|
||||
216,
|
||||
155
|
||||
],
|
||||
[
|
||||
157,
|
||||
39
|
||||
],
|
||||
[
|
||||
242,
|
||||
299
|
||||
],
|
||||
[
|
||||
380,
|
||||
419
|
||||
],
|
||||
[
|
||||
538,
|
||||
384
|
||||
],
|
||||
[
|
||||
562,
|
||||
571
|
||||
],
|
||||
[
|
||||
713,
|
||||
446
|
||||
],
|
||||
[
|
||||
841,
|
||||
409
|
||||
],
|
||||
[
|
||||
860,
|
||||
545
|
||||
],
|
||||
[
|
||||
1033,
|
||||
418
|
||||
],
|
||||
[
|
||||
1129,
|
||||
271
|
||||
],
|
||||
[
|
||||
1011,
|
||||
144
|
||||
]
|
||||
],
|
||||
"robot_points": [
|
||||
[
|
||||
171.8896484375,
|
||||
-117.91725158691406
|
||||
],
|
||||
[
|
||||
135.88966369628906,
|
||||
-177.917236328125
|
||||
],
|
||||
[
|
||||
219.88963317871094,
|
||||
-81.91724395751953
|
||||
],
|
||||
[
|
||||
239.36228942871094,
|
||||
-9.882417678833008
|
||||
],
|
||||
[
|
||||
189.88967895507812,
|
||||
50.08274841308594
|
||||
],
|
||||
[
|
||||
273.89019775390625,
|
||||
68.08277893066406
|
||||
],
|
||||
[
|
||||
189.89035034179688,
|
||||
116.08283233642578
|
||||
],
|
||||
[
|
||||
149.89041137695312,
|
||||
156.0828857421875
|
||||
],
|
||||
[
|
||||
221.89051818847656,
|
||||
174.0829315185547
|
||||
],
|
||||
[
|
||||
137.89059448242188,
|
||||
222.0830078125
|
||||
],
|
||||
[
|
||||
53.89064407348633,
|
||||
240.08314514160156
|
||||
],
|
||||
[
|
||||
-12.109418869018555,
|
||||
156.08338928222656
|
||||
]
|
||||
]
|
||||
}
|
||||
@@ -0,0 +1,124 @@
|
||||
"""
|
||||
Beer Bot Vision - Klasse zur Kalibrierung der Kamera-zu-Roboter Transformation
|
||||
"""
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
import json
|
||||
import os
|
||||
from config import Config
|
||||
from utils import sanitize_filename
|
||||
|
||||
|
||||
class CameraCalibrator:
|
||||
def __init__(self):
|
||||
self.pixel_points = []
|
||||
self.robot_points = []
|
||||
self.transform_matrix = None
|
||||
self.is_calibrated = False
|
||||
|
||||
def add_calibration_point(self, pixel_x, pixel_y, robot_x, robot_y):
|
||||
"""Fügt einen Kalibrierungspunkt hinzu."""
|
||||
self.pixel_points.append([pixel_x, pixel_y])
|
||||
self.robot_points.append([robot_x, robot_y])
|
||||
print(f"✅ Punkt {len(self.pixel_points)}: Pixel({pixel_x}, {pixel_y}) -> Robot({robot_x:.1f}, {robot_y:.1f})")
|
||||
|
||||
def upsert_calibration_point(self, pixel_x, pixel_y, robot_x, robot_y):
|
||||
"""Ersetzt den naechsten Punkt oder fuegt einen neuen hinzu."""
|
||||
if self.pixel_points:
|
||||
best_idx = 0
|
||||
best_d2 = None
|
||||
for i, (px, py) in enumerate(self.pixel_points):
|
||||
d2 = (px - pixel_x) ** 2 + (py - pixel_y) ** 2
|
||||
if best_d2 is None or d2 < best_d2:
|
||||
best_idx = i
|
||||
best_d2 = d2
|
||||
self.pixel_points[best_idx] = [pixel_x, pixel_y]
|
||||
self.robot_points[best_idx] = [robot_x, robot_y]
|
||||
print(
|
||||
f"! Punkt {best_idx + 1} aktualisiert: "
|
||||
f"Pixel({pixel_x}, {pixel_y}) -> Robot({robot_x:.1f}, {robot_y:.1f})"
|
||||
)
|
||||
return True
|
||||
|
||||
self.add_calibration_point(pixel_x, pixel_y, robot_x, robot_y)
|
||||
return False
|
||||
|
||||
def delete_nearest_point(self, pixel_x, pixel_y):
|
||||
"""Loescht den naechsten Kalibrierungspunkt."""
|
||||
if not self.pixel_points:
|
||||
print("? Keine Punkte zum Loeschen")
|
||||
return False
|
||||
|
||||
best_idx = 0
|
||||
best_d2 = None
|
||||
for i, (px, py) in enumerate(self.pixel_points):
|
||||
d2 = (px - pixel_x) ** 2 + (py - pixel_y) ** 2
|
||||
if best_d2 is None or d2 < best_d2:
|
||||
best_idx = i
|
||||
best_d2 = d2
|
||||
|
||||
px, py = self.pixel_points[best_idx]
|
||||
rx, ry = self.robot_points[best_idx]
|
||||
del self.pixel_points[best_idx]
|
||||
del self.robot_points[best_idx]
|
||||
print(
|
||||
f"! Punkt {best_idx + 1} geloescht: "
|
||||
f"Pixel({px}, {py}) -> Robot({rx:.1f}, {ry:.1f})"
|
||||
)
|
||||
return True
|
||||
|
||||
def calculate_transformation(self):
|
||||
"""Berechnet die Transformationsmatrix."""
|
||||
if len(self.pixel_points) < 4:
|
||||
print(f"❌ Mindestens 4 Punkte erforderlich (aktuell: {len(self.pixel_points)})")
|
||||
return False
|
||||
|
||||
pixel_np = np.array(self.pixel_points, dtype=np.float32)
|
||||
robot_np = np.array(self.robot_points, dtype=np.float32)
|
||||
self.transform_matrix, _ = cv2.estimateAffine2D(pixel_np, robot_np)
|
||||
|
||||
if self.transform_matrix is not None:
|
||||
self.is_calibrated = True
|
||||
print("✅ Kalibrierung erfolgreich!")
|
||||
return True
|
||||
print("❌ Kalibrierung fehlgeschlagen!")
|
||||
return False
|
||||
|
||||
def pixel_to_robot(self, pixel_x, pixel_y):
|
||||
"""Wandelt Pixel in Roboter-Koordinaten um."""
|
||||
if not self.is_calibrated:
|
||||
raise ValueError("Nicht kalibriert!")
|
||||
pixel = np.array([[[pixel_x, pixel_y]]], dtype=np.float32)
|
||||
robot = cv2.transform(pixel, self.transform_matrix)
|
||||
return float(robot[0][0][0]), float(robot[0][0][1])
|
||||
|
||||
def save_calibration(self, matrix_file=None, points_file=None):
|
||||
"""Speichert die Kalibrierung."""
|
||||
matrix_file = matrix_file or Config.CALIBRATION_FILE
|
||||
points_file = points_file or Config.CALIBRATION_POINTS_FILE
|
||||
|
||||
if self.transform_matrix is not None:
|
||||
np.save(sanitize_filename(sanitize_filename(matrix_file)), self.transform_matrix)
|
||||
|
||||
with open(points_file, 'w') as f:
|
||||
json.dump({"pixel_points": self.pixel_points, "robot_points": self.robot_points}, f, indent=2)
|
||||
print(f"✅ Kalibrierung gespeichert")
|
||||
|
||||
def load_calibration(self, matrix_file=None, points_file=None):
|
||||
"""Lädt eine Kalibrierung."""
|
||||
matrix_file = matrix_file or Config.CALIBRATION_FILE
|
||||
points_file = points_file or Config.CALIBRATION_POINTS_FILE
|
||||
|
||||
if os.path.exists(matrix_file):
|
||||
self.transform_matrix = np.load(matrix_file)
|
||||
self.is_calibrated = True
|
||||
print(f"✅ Kalibrierung geladen")
|
||||
|
||||
if os.path.exists(points_file):
|
||||
with open(points_file, 'r') as f:
|
||||
data = json.load(f)
|
||||
self.pixel_points = data.get("pixel_points", [])
|
||||
self.robot_points = data.get("robot_points", [])
|
||||
|
||||
return self.is_calibrated
|
||||
@@ -0,0 +1,84 @@
|
||||
"""
|
||||
Beer Bot Vision - Zentrale Konfiguration für das Vision-System
|
||||
"""
|
||||
class Config:
|
||||
|
||||
# Kamera-Einstellungen
|
||||
CAMERA_ID = 1
|
||||
CAMERA_WIDTH = 1280
|
||||
CAMERA_HEIGHT = 720
|
||||
|
||||
# YOLO-Einstellungen
|
||||
YOLO_MODEL = "yolov8m.pt"
|
||||
CONFIDENCE_THRESHOLD = 0.5
|
||||
BOTTLE_CLASS_ID = 39
|
||||
|
||||
# Roboter-Einstellungen
|
||||
BOTTLE_GRIP_Z = 25.0
|
||||
BOTTLE_ABOVE_Z = 99.16
|
||||
SAFE_Z = 120.0
|
||||
CALIBRATION_Z = 25.0
|
||||
PICK_R_OFFSET_DEG = 0.0
|
||||
PICK_R_DEG = 90.0
|
||||
ROUTE_MAX_REACH_MM = 320.0
|
||||
TOOL_OFFSET_MM = 20.0
|
||||
ROUTE_RADIUS_MARGIN = 5.0
|
||||
ROUTE_MAX_RADIUS = ROUTE_MAX_REACH_MM - ROUTE_RADIUS_MARGIN - TOOL_OFFSET_MM
|
||||
ROUTE_MIN_RADIUS = 60.0
|
||||
ROUTE_FALLBACK_ENABLE = True
|
||||
ROUTE_FALLBACK_ANGLE_DEG = 0.0
|
||||
ROUTE_FALLBACK_RADIUS = (ROUTE_MIN_RADIUS + ROUTE_MAX_RADIUS) * 0.5
|
||||
ROUTE_THETA_MIN_DEG = -120.0
|
||||
ROUTE_THETA_MAX_DEG = 120.0
|
||||
ROUTE_Z_CLEARANCE = SAFE_Z
|
||||
ROUTE_ARC_STEP_DEG = 12.0
|
||||
ROUTE_SEGMENT_SAMPLE_MM = 10.0
|
||||
ROUTE_ENFORCE_MOVL = True
|
||||
|
||||
GLASS_POSITIONS = {
|
||||
"approach": (152.8, -246.3, 64.7, 25.8),
|
||||
"tilt_1": (150.7, -240.8, 60.0, -5.8),
|
||||
"pour": (135.6, -176.1, 44.0, -48.9),
|
||||
"pour_more": (122.9, -159.6, 49.0, -100.0),
|
||||
}
|
||||
|
||||
POUR_SEQUENCE = [
|
||||
# gedreht an Glas 2.4 – einschenk start
|
||||
{"x": 190.993, "y": -173.4277, "z": 82.0, "r": -46.599, "pause": 5},
|
||||
# 2.5
|
||||
{"x": 189.993, "y": -172.4277, "z": 82.0, "r": -49.599, "pause": 5},
|
||||
# 3
|
||||
{"x": 188.7364, "y": -171.6266, "z": 82.5, "r": -52.139, "pause": 7},
|
||||
# 3.1
|
||||
{"x": 188.7364, "y": -171.6266, "z": 83.5, "r": -55.139, "pause": 7},
|
||||
# 3.2
|
||||
{"x": 188.7364, "y": -171.6266, "z": 83.5, "r": -59.139, "pause": 5},
|
||||
# 3.3
|
||||
{"x": 187.7364, "y": -170.6266, "z": 83.5, "r": -61.139, "pause": 7},
|
||||
# 4
|
||||
{"x": 187.5676, "y": -170.2011, "z": 83.5, "r": -64.039, "pause": 5},
|
||||
# 5
|
||||
{"x": 187.1694, "y": -171.2617, "z": 86.5, "r": -75.559, "pause": 5},
|
||||
# 6
|
||||
{"x": 182.9012, "y": -167.3563, "z": 90.5, "r": -84.179, "pause": 7},
|
||||
# 7 – einschenk ende
|
||||
{"x": 179.4419, "y": -164.1910, "z": 91.5, "r": -100.279, "pause": 3},
|
||||
]
|
||||
|
||||
UI_COLORS = {
|
||||
"bg_dark": (30, 30, 30),
|
||||
"bg_panel": (45, 45, 45),
|
||||
"bg_header": (60, 60, 60),
|
||||
"accent_blue": (255, 180, 0),
|
||||
"accent_green": (0, 255, 100),
|
||||
"accent_red": (80, 80, 255),
|
||||
"accent_yellow": (0, 220, 255),
|
||||
"accent_cyan": (255, 255, 0),
|
||||
"text_white": (255, 255, 255),
|
||||
"text_gray": (180, 180, 180),
|
||||
"text_dim": (120, 120, 120),
|
||||
}
|
||||
|
||||
UI_SCALE = 1.45
|
||||
CALIBRATION_FILE = "calibration_matrix.npy"
|
||||
CALIBRATION_POINTS_FILE = "calibration_points.json"
|
||||
@@ -0,0 +1,10 @@
|
||||
"""
|
||||
Beer Bot Vision - PTP Modi Konstanten für DOBOT Magician
|
||||
"""
|
||||
# PTP Modi für DOBOT
|
||||
PTP_MODE_JUMP_XYZ = 0 # Jump-Modus (hebt erst an)
|
||||
PTP_MODE_MOVJ_XYZ = 1 # Joint-Interpolation (NICHT für manuelle Steuerung!)
|
||||
PTP_MODE_MOVL_XYZ = 2 # Linear kartesisch (FÜR MANUELLE STEUERUNG!)
|
||||
PTP_MODE_JUMP_ANGLE = 3
|
||||
PTP_MODE_MOVJ_ANGLE = 4
|
||||
PTP_MODE_MOVL_ANGLE = 5
|
||||
@@ -0,0 +1,852 @@
|
||||
"""
|
||||
Beer Bot Vision - Controller Modul
|
||||
"""
|
||||
import cv2
|
||||
import time
|
||||
import math
|
||||
from config import Config
|
||||
from constants import PTP_MODE_MOVL_XYZ, PTP_MODE_MOVJ_XYZ
|
||||
from utils import get_step_multiplier
|
||||
from vision import BeerBotVision
|
||||
from user_interface import ModernUI
|
||||
|
||||
def is_pose_xy_valid(x, y, *, min_r, max_r, theta_min_deg, theta_max_deg):
|
||||
r = math.hypot(x, y)
|
||||
if r < min_r:
|
||||
return False
|
||||
if max_r > 0.0 and r > max_r:
|
||||
return False
|
||||
|
||||
theta = math.degrees(math.atan2(y, x))
|
||||
if theta_min_deg <= theta_max_deg:
|
||||
return theta_min_deg <= theta <= theta_max_deg
|
||||
return theta >= theta_min_deg or theta <= theta_max_deg
|
||||
|
||||
def _segment_sample_count(x1, y1, x2, y2, sample_mm):
|
||||
dist = math.hypot(x2 - x1, y2 - y1)
|
||||
return max(1, int(math.ceil(dist / max(sample_mm, 1.0))))
|
||||
|
||||
def is_segment_xy_valid(x1, y1, x2, y2, *, min_r, max_r, theta_min_deg, theta_max_deg, sample_mm):
|
||||
steps = _segment_sample_count(x1, y1, x2, y2, sample_mm)
|
||||
for i in range(steps + 1):
|
||||
t = i / steps
|
||||
x = x1 + (x2 - x1) * t
|
||||
y = y1 + (y2 - y1) * t
|
||||
if not is_pose_xy_valid(x, y, min_r=min_r, max_r=max_r,
|
||||
theta_min_deg=theta_min_deg, theta_max_deg=theta_max_deg):
|
||||
return False
|
||||
return True
|
||||
|
||||
def _min_distance_to_segment(x1, y1, x2, y2):
|
||||
dx = x2 - x1
|
||||
dy = y2 - y1
|
||||
if abs(dx) < 1e-9 and abs(dy) < 1e-9:
|
||||
return math.hypot(x1, y1)
|
||||
t = -(x1 * dx + y1 * dy) / (dx * dx + dy * dy)
|
||||
if t <= 0.0:
|
||||
return math.hypot(x1, y1)
|
||||
if t >= 1.0:
|
||||
return math.hypot(x2, y2)
|
||||
px = x1 + t * dx
|
||||
py = y1 + t * dy
|
||||
return math.hypot(px, py)
|
||||
|
||||
def _delta_ccw(angle0, angle1):
|
||||
return (angle1 - angle0) % (2.0 * math.pi)
|
||||
|
||||
def _delta_cw(angle0, angle1):
|
||||
return -((angle0 - angle1) % (2.0 * math.pi))
|
||||
|
||||
def _build_arc_points(angle0, delta, steps, radius, z):
|
||||
points = []
|
||||
for i in range(1, steps + 1):
|
||||
ang = angle0 + delta * (i / steps)
|
||||
points.append((radius * math.cos(ang), radius * math.sin(ang), z))
|
||||
return points
|
||||
|
||||
def _route_xy_valid(start_xy, waypoints, *, min_r, max_r, theta_min_deg, theta_max_deg, sample_mm):
|
||||
px, py = start_xy
|
||||
for wx, wy, _wz in waypoints:
|
||||
if not is_pose_xy_valid(wx, wy, min_r=min_r, max_r=max_r,
|
||||
theta_min_deg=theta_min_deg, theta_max_deg=theta_max_deg):
|
||||
return False
|
||||
if not is_segment_xy_valid(px, py, wx, wy, min_r=min_r, max_r=max_r,
|
||||
theta_min_deg=theta_min_deg, theta_max_deg=theta_max_deg,
|
||||
sample_mm=sample_mm):
|
||||
return False
|
||||
px, py = wx, wy
|
||||
return True
|
||||
|
||||
def _points_close(p1, p2, tol=1e-3):
|
||||
return (
|
||||
abs(p1[0] - p2[0]) <= tol
|
||||
and abs(p1[1] - p2[1]) <= tol
|
||||
and abs(p1[2] - p2[2]) <= tol
|
||||
)
|
||||
|
||||
def _compute_fallback_xy(config, *, min_r, max_r, theta_min_deg, theta_max_deg):
|
||||
angle_deg = float(getattr(config, "ROUTE_FALLBACK_ANGLE_DEG", 0.0))
|
||||
radius = float(getattr(config, "ROUTE_FALLBACK_RADIUS", 0.0))
|
||||
if radius <= 0.0:
|
||||
if max_r > 0.0:
|
||||
radius = 0.5 * (min_r + max_r)
|
||||
else:
|
||||
radius = min_r + 50.0
|
||||
|
||||
ang = math.radians(angle_deg)
|
||||
x = radius * math.cos(ang)
|
||||
y = radius * math.sin(ang)
|
||||
if not is_pose_xy_valid(x, y, min_r=min_r, max_r=max_r,
|
||||
theta_min_deg=theta_min_deg, theta_max_deg=theta_max_deg):
|
||||
print("? Fallback blocked: waypoint outside XY limits")
|
||||
return None
|
||||
return x, y
|
||||
|
||||
def build_safe_route(start, target, *, config=Config, allow_fallback=True):
|
||||
sx, sy, sz, _ = start
|
||||
tx, ty, tz, _ = target
|
||||
|
||||
min_r = float(getattr(config, "ROUTE_MIN_RADIUS", 0.0))
|
||||
max_r = float(getattr(config, "ROUTE_MAX_RADIUS", 0.0))
|
||||
margin = float(getattr(config, "ROUTE_RADIUS_MARGIN", 0.0))
|
||||
theta_min_deg = float(getattr(config, "ROUTE_THETA_MIN_DEG", -180.0))
|
||||
theta_max_deg = float(getattr(config, "ROUTE_THETA_MAX_DEG", 180.0))
|
||||
travel_z = float(getattr(config, "ROUTE_Z_CLEARANCE", getattr(config, "SAFE_Z", 0.0)))
|
||||
sample_mm = float(getattr(config, "ROUTE_SEGMENT_SAMPLE_MM", 10.0))
|
||||
|
||||
z_min = float(getattr(config, "ROUTE_Z_MIN", 0.0))
|
||||
z_max = getattr(config, "ROUTE_Z_MAX", None)
|
||||
if z_max is not None:
|
||||
z_max = float(z_max)
|
||||
|
||||
if sz < z_min or tz < z_min:
|
||||
print("? Route blocked: Z below minimum")
|
||||
return None
|
||||
if z_max is not None and (sz > z_max or tz > z_max):
|
||||
print("? Route blocked: Z above maximum")
|
||||
return None
|
||||
|
||||
r0 = math.hypot(sx, sy)
|
||||
r1 = math.hypot(tx, ty)
|
||||
|
||||
if not is_pose_xy_valid(sx, sy, min_r=min_r, max_r=max_r,
|
||||
theta_min_deg=theta_min_deg, theta_max_deg=theta_max_deg):
|
||||
print("? Route blocked: start outside XY limits")
|
||||
return None
|
||||
if not is_pose_xy_valid(tx, ty, min_r=min_r, max_r=max_r,
|
||||
theta_min_deg=theta_min_deg, theta_max_deg=theta_max_deg):
|
||||
print("? Route blocked: target outside XY limits")
|
||||
return None
|
||||
|
||||
travel_z = max(travel_z, sz, tz)
|
||||
|
||||
if abs(tx - sx) < 1e-6 and abs(ty - sy) < 1e-6:
|
||||
return [(tx, ty, tz)]
|
||||
|
||||
min_dist = _min_distance_to_segment(sx, sy, tx, ty)
|
||||
line_ok = is_segment_xy_valid(sx, sy, tx, ty, min_r=min_r, max_r=max_r,
|
||||
theta_min_deg=theta_min_deg, theta_max_deg=theta_max_deg,
|
||||
sample_mm=sample_mm)
|
||||
|
||||
if line_ok and min_dist >= min_r:
|
||||
return [(tx, ty, tz)]
|
||||
|
||||
safe_radius = max(min_r + margin, r0, r1)
|
||||
if max_r > 0.0 and safe_radius > max_r:
|
||||
print("? Route blocked: safe radius exceeds max radius")
|
||||
return None
|
||||
|
||||
angle0 = math.atan2(sy, sx)
|
||||
angle1 = math.atan2(ty, tx)
|
||||
|
||||
max_step = math.radians(float(getattr(config, "ROUTE_ARC_STEP_DEG", 12.0)))
|
||||
if min_r > 0.0 and safe_radius > min_r:
|
||||
ratio = min(1.0, max(0.0, min_r / safe_radius))
|
||||
safe_step = 2.0 * math.acos(ratio) * 0.95
|
||||
if safe_step > 0.0:
|
||||
max_step = min(max_step, safe_step)
|
||||
|
||||
candidates = []
|
||||
for delta in (_delta_cw(angle0, angle1), _delta_ccw(angle0, angle1)):
|
||||
if abs(delta) < 1e-9:
|
||||
continue
|
||||
steps = max(1, int(math.ceil(abs(delta) / max_step))) if max_step > 0.0 else 1
|
||||
waypoints = []
|
||||
if abs(sz - travel_z) > 0.5:
|
||||
waypoints.append((sx, sy, travel_z))
|
||||
if abs(safe_radius - r0) > 0.5:
|
||||
waypoints.append((safe_radius * math.cos(angle0), safe_radius * math.sin(angle0), travel_z))
|
||||
|
||||
waypoints.extend(_build_arc_points(angle0, delta, steps, safe_radius, travel_z))
|
||||
|
||||
if abs(r1 - safe_radius) > 0.5:
|
||||
waypoints.append((tx, ty, travel_z))
|
||||
if abs(tz - travel_z) > 0.5:
|
||||
waypoints.append((tx, ty, tz))
|
||||
|
||||
if _route_xy_valid((sx, sy), waypoints, min_r=min_r, max_r=max_r,
|
||||
theta_min_deg=theta_min_deg, theta_max_deg=theta_max_deg,
|
||||
sample_mm=sample_mm):
|
||||
candidates.append((abs(delta), waypoints))
|
||||
|
||||
if not candidates:
|
||||
print("? Route blocked: no valid arc within sector")
|
||||
if not allow_fallback or not getattr(config, "ROUTE_FALLBACK_ENABLE", False):
|
||||
return None
|
||||
|
||||
fallback_xy = _compute_fallback_xy(
|
||||
config,
|
||||
min_r=min_r,
|
||||
max_r=max_r,
|
||||
theta_min_deg=theta_min_deg,
|
||||
theta_max_deg=theta_max_deg,
|
||||
)
|
||||
if fallback_xy is None:
|
||||
return None
|
||||
|
||||
fx, fy = fallback_xy
|
||||
print(f"? Using fallback route via ({fx:.1f}, {fy:.1f})")
|
||||
leg1 = build_safe_route(start, (fx, fy, travel_z, 0.0), config=config, allow_fallback=False)
|
||||
if leg1 is None:
|
||||
print("? Fallback failed: leg1 unreachable")
|
||||
return None
|
||||
leg2 = build_safe_route((fx, fy, travel_z, 0.0), target, config=config, allow_fallback=False)
|
||||
if leg2 is None:
|
||||
print("? Fallback failed: leg2 unreachable")
|
||||
return None
|
||||
|
||||
if leg1 and leg2 and _points_close(leg1[-1], leg2[0]):
|
||||
leg2 = leg2[1:]
|
||||
return leg1 + leg2
|
||||
|
||||
candidates.sort(key=lambda item: (item[0], len(item[1])))
|
||||
return candidates[0][1]
|
||||
|
||||
# DOBOT Import
|
||||
try:
|
||||
from DobotRPC import DobotlinkAdapter, RPCClient
|
||||
DOBOT_AVAILABLE = True
|
||||
except ImportError:
|
||||
DOBOT_AVAILABLE = False
|
||||
print("⚠️ DobotRPC nicht installiert.")
|
||||
|
||||
class BeerBotController:
|
||||
|
||||
def __init__(self):
|
||||
self.vision = BeerBotVision()
|
||||
self.dobot = None
|
||||
self.port = None
|
||||
self.connected = False
|
||||
self.compressor_on = False
|
||||
self.gripper_closed = False
|
||||
self.auto_lock_r = False
|
||||
self._allow_pour_r = False
|
||||
self.ui = ModernUI()
|
||||
self.step_size = 10.0
|
||||
|
||||
def connect_dobot(self):
|
||||
if not DOBOT_AVAILABLE:
|
||||
print("❌ DobotRPC nicht verfügbar!")
|
||||
return False
|
||||
|
||||
print("🔄 Verbinde mit DOBOT...")
|
||||
self.dobot = DobotlinkAdapter(RPCClient(), is_sync=True)
|
||||
|
||||
try:
|
||||
res = self.dobot.Magician.SearchDobot()
|
||||
if not res:
|
||||
print("❌ Kein DOBOT gefunden!")
|
||||
return False
|
||||
|
||||
cp210x = [d for d in res if "CP210x" in d.get("description", "")]
|
||||
self.port = cp210x[0]["portName"] if cp210x else "COM5"
|
||||
|
||||
try:
|
||||
self.dobot.Magician.DisconnectDobot(portName=self.port)
|
||||
except:
|
||||
pass
|
||||
|
||||
self.dobot.Magician.ConnectDobot(portName=self.port)
|
||||
print(f"✅ DOBOT verbunden auf {self.port}")
|
||||
|
||||
self.dobot.Magician.SetEndEffectorType(portName=self.port, endEffectorType=3)
|
||||
self.dobot.Magician.SetEndEffectorGripper(portName=self.port, enable=False, on=False)
|
||||
self.connected = True
|
||||
return True
|
||||
except Exception as e:
|
||||
print(f"❌ Verbindungsfehler: {e}")
|
||||
return False
|
||||
|
||||
def clear_alarms(self):
|
||||
if not self.connected:
|
||||
return False
|
||||
try:
|
||||
self.dobot.Magician.ClearAllAlarmsState(portName=self.port)
|
||||
print("✅ Alarme gelöscht!")
|
||||
return True
|
||||
except:
|
||||
try:
|
||||
self.dobot.Magician.SetQueuedCmdClear(portName=self.port)
|
||||
self.dobot.Magician.SetQueuedCmdStartExec(portName=self.port)
|
||||
print("✅ Queue gelöscht")
|
||||
return True
|
||||
except:
|
||||
return False
|
||||
|
||||
def set_compressor(self, on=True):
|
||||
if not self.connected:
|
||||
return False
|
||||
self.dobot.Magician.SetEndEffectorGripper(portName=self.port, enable=on, on=False)
|
||||
self.compressor_on = on
|
||||
print(f"💨 Kompressor: {'AN' if on else 'AUS'}")
|
||||
return True
|
||||
|
||||
def get_current_position(self):
|
||||
if not self.connected:
|
||||
return None
|
||||
pose = self.dobot.Magician.GetPose(portName=self.port)
|
||||
return (pose['x'], pose['y'], pose['z'], pose['r'])
|
||||
|
||||
def _send_ptp(self, x, y, z, r, mode, wait):
|
||||
self.dobot.Magician.SetPTPCmd(
|
||||
portName=self.port,
|
||||
ptpMode=mode,
|
||||
x=x, y=y, z=z, r=r
|
||||
)
|
||||
if wait > 0:
|
||||
time.sleep(wait)
|
||||
|
||||
def _theta_deg(self, x, y):
|
||||
return math.degrees(math.atan2(y, x))
|
||||
|
||||
def _wrap_angle_deg(self, angle):
|
||||
return (angle + 180.0) % 360.0 - 180.0
|
||||
|
||||
def _nearest_angle(self, target, reference):
|
||||
return reference + self._wrap_angle_deg(target - reference)
|
||||
|
||||
def _r_for_world(self, x, y, r_world):
|
||||
offset = float(getattr(Config, "PICK_R_OFFSET_DEG", 0.0))
|
||||
return self._wrap_angle_deg(float(r_world) - self._theta_deg(x, y) + offset)
|
||||
|
||||
def _world_r_from_pose(self, pose):
|
||||
offset = float(getattr(Config, "PICK_R_OFFSET_DEG", 0.0))
|
||||
return pose[3] + self._theta_deg(pose[0], pose[1]) - offset
|
||||
|
||||
def _build_safe_route(self, start, target):
|
||||
return build_safe_route(start, target)
|
||||
|
||||
def move_to(self, x, y, z, r, mode=PTP_MODE_MOVL_XYZ, wait=0.3, use_route=True, r_world=None):
|
||||
if not self.connected:
|
||||
return False
|
||||
|
||||
if r_world is None and self.auto_lock_r and not self._allow_pour_r:
|
||||
r_world = float(getattr(Config, "PICK_R_DEG", 0.0))
|
||||
|
||||
# Safe route assumes MOVL segments between waypoints.
|
||||
if use_route and getattr(Config, "ROUTE_ENFORCE_MOVL", True):
|
||||
if mode != PTP_MODE_MOVL_XYZ:
|
||||
print("? Route uses MOVL; overriding ptpMode to MOVL")
|
||||
mode = PTP_MODE_MOVL_XYZ
|
||||
elif use_route and mode != PTP_MODE_MOVL_XYZ:
|
||||
print("? Warning: use_route with non-MOVL mode may be unsafe")
|
||||
|
||||
if not use_route:
|
||||
if r_world is not None:
|
||||
r = self._r_for_world(x, y, r_world)
|
||||
pose = self.get_current_position()
|
||||
if pose:
|
||||
r = self._nearest_angle(r, pose[3])
|
||||
self._send_ptp(x, y, z, r, mode, wait)
|
||||
return True
|
||||
|
||||
start_pos = self.get_current_position()
|
||||
if not start_pos:
|
||||
self._send_ptp(x, y, z, r, mode, wait)
|
||||
return True
|
||||
|
||||
waypoints = self._build_safe_route(start_pos, (x, y, z, r))
|
||||
if waypoints is None:
|
||||
print("? Movement canceled: no safe route")
|
||||
return False
|
||||
|
||||
step_wait = min(wait, 0.08)
|
||||
last = len(waypoints) - 1
|
||||
last_r = None
|
||||
if r_world is not None and start_pos:
|
||||
last_r = start_pos[3]
|
||||
for i, (wx, wy, wz) in enumerate(waypoints):
|
||||
w = wait if i == last else step_wait
|
||||
cmd_r = r
|
||||
if r_world is not None:
|
||||
cmd_r = self._r_for_world(wx, wy, r_world)
|
||||
if last_r is not None:
|
||||
cmd_r = self._nearest_angle(cmd_r, last_r)
|
||||
last_r = cmd_r
|
||||
self._send_ptp(wx, wy, wz, cmd_r, mode, w)
|
||||
return True
|
||||
|
||||
def move_relative(self, axis, step):
|
||||
|
||||
if not self.connected:
|
||||
return False
|
||||
|
||||
pose = self.get_current_position()
|
||||
if not pose:
|
||||
return False
|
||||
|
||||
x, y, z, r = pose
|
||||
|
||||
if axis in ('x', 'y'):
|
||||
step = step * get_step_multiplier(z)
|
||||
|
||||
if axis == "x":
|
||||
x += step
|
||||
elif axis == "y":
|
||||
y += step
|
||||
elif axis == "z":
|
||||
z += step
|
||||
elif axis == "r":
|
||||
r += step
|
||||
else:
|
||||
print(f"❌ Unbekannte Achse: {axis}")
|
||||
return False
|
||||
|
||||
# Linear Modus für kartesische Bewegung
|
||||
self.dobot.Magician.SetPTPCmd(
|
||||
portName=self.port,
|
||||
ptpMode=PTP_MODE_MOVL_XYZ,
|
||||
x=x, y=y, z=z, r=r
|
||||
)
|
||||
time.sleep(0.03)
|
||||
return True
|
||||
|
||||
def grip(self, close=True):
|
||||
# Öffnet oder schließt den Greifer
|
||||
if not self.connected:
|
||||
return False
|
||||
|
||||
if not self.compressor_on:
|
||||
self.set_compressor(True)
|
||||
|
||||
self.dobot.Magician.SetEndEffectorGripper(portName=self.port, enable=True, on=close)
|
||||
self.gripper_closed = close
|
||||
time.sleep(0.8)
|
||||
return True
|
||||
|
||||
def _handle_keyboard(self, key):
|
||||
try:
|
||||
ch = chr(key).lower()
|
||||
except Exception:
|
||||
ch = ""
|
||||
|
||||
s = self.step_size
|
||||
|
||||
if ch == 'w':
|
||||
self.move_relative("y", +s)
|
||||
elif ch == 's':
|
||||
self.move_relative("y", -s)
|
||||
elif ch == 'a':
|
||||
self.move_relative("x", -s)
|
||||
elif ch == 'd':
|
||||
self.move_relative("x", +s)
|
||||
|
||||
elif ch == 'r':
|
||||
self.move_relative("z", +s)
|
||||
elif ch == 'f':
|
||||
self.move_relative("z", -s)
|
||||
|
||||
elif ch == 'q':
|
||||
self.move_relative("r", -5)
|
||||
elif ch == 'e':
|
||||
self.move_relative("r", +5)
|
||||
|
||||
elif ch == '1':
|
||||
self.step_size = 1.0
|
||||
print("📏 Schrittweite: 1mm")
|
||||
elif ch == '2':
|
||||
self.step_size = 5.0
|
||||
print("📏 Schrittweite: 5mm")
|
||||
elif ch == '3':
|
||||
self.step_size = 10.0
|
||||
print("📏 Schrittweite: 10mm")
|
||||
elif ch == '4':
|
||||
self.step_size = 20.0
|
||||
print("📏 Schrittweite: 20mm")
|
||||
|
||||
elif ch == 'g':
|
||||
self.grip(close=True)
|
||||
print("✊ Greifer geschlossen")
|
||||
elif ch == 'o':
|
||||
self.grip(close=False)
|
||||
print("🖐️ Greifer geöffnet")
|
||||
|
||||
elif ch == 'k':
|
||||
self.set_compressor(not self.compressor_on)
|
||||
elif ch == 'h':
|
||||
print("🏠 Fahre Home...")
|
||||
self.move_to(210, 0, 150, 0)
|
||||
elif ch == 'x':
|
||||
self.clear_alarms()
|
||||
|
||||
def run_manual_control(self):
|
||||
if not self.connected:
|
||||
print("⚠️ Bitte zuerst DOBOT verbinden!")
|
||||
return
|
||||
|
||||
print("🎮 Starte interaktive Steuerung...")
|
||||
print(" W/S = Vor/Zurück (Y)")
|
||||
print(" A/D = Links/Rechts (X)")
|
||||
print(" R/F = Hoch/Runter (Z)")
|
||||
print(" Q/E = Greifer-Rotation (R)")
|
||||
print(" 0 = Beenden")
|
||||
|
||||
window_name = "DOBOT Steuerung"
|
||||
cv2.namedWindow(window_name, cv2.WINDOW_NORMAL)
|
||||
tmp_panel = self.ui.create_control_panel(
|
||||
self.get_current_position(),
|
||||
self.step_size,
|
||||
self.compressor_on,
|
||||
self.gripper_closed,
|
||||
)
|
||||
ph, pw = tmp_panel.shape[:2]
|
||||
cv2.resizeWindow(window_name, pw, ph)
|
||||
|
||||
while True:
|
||||
pos = self.get_current_position()
|
||||
panel = self.ui.create_control_panel(
|
||||
pos,
|
||||
self.step_size,
|
||||
self.compressor_on,
|
||||
self.gripper_closed,
|
||||
)
|
||||
cv2.imshow(window_name, panel)
|
||||
|
||||
key = cv2.waitKey(50) & 0xFF
|
||||
|
||||
if key == ord('0'):
|
||||
break
|
||||
elif key != 255:
|
||||
self._handle_keyboard(key)
|
||||
|
||||
cv2.destroyWindow(window_name)
|
||||
print("↩️ Zurück zum Hauptmenü")
|
||||
|
||||
def run_calibration(self):
|
||||
if not self.connected:
|
||||
print("? Bitte zuerst DOBOT verbinden!")
|
||||
return
|
||||
|
||||
print("?? Starte Kalibrierung...")
|
||||
calibration_z = float(getattr(Config, "CALIBRATION_Z", getattr(Config, "BOTTLE_GRIP_Z", 0.0)))
|
||||
print(f" Zielhoehe: Z={calibration_z:.1f}")
|
||||
|
||||
if not self.vision.camera_initialized:
|
||||
self.vision.init_camera()
|
||||
|
||||
window_cam = "Kalibrierung - Kamera"
|
||||
window_ctrl = "Kalibrierung - Steuerung"
|
||||
cv2.namedWindow(window_cam, cv2.WINDOW_NORMAL)
|
||||
cv2.namedWindow(window_ctrl, cv2.WINDOW_NORMAL)
|
||||
cv2.resizeWindow(window_ctrl, self.ui.s(380), self.ui.s(580))
|
||||
|
||||
click_point = None
|
||||
show_popup = True
|
||||
waiting_for_save = False
|
||||
|
||||
def mouse_callback(event, x, y, flags, param):
|
||||
nonlocal click_point, waiting_for_save
|
||||
if event == cv2.EVENT_LBUTTONDOWN:
|
||||
click_point = (x, y)
|
||||
waiting_for_save = True
|
||||
print(f"?? Punkt markiert: ({x}, {y}) - Fahre Roboter hin und druecke ENTER")
|
||||
|
||||
cv2.setMouseCallback(window_cam, mouse_callback)
|
||||
|
||||
print("?? TIPP: Klicke auf das Steuerungs-Fenster fuer WASD-Steuerung!")
|
||||
|
||||
while True:
|
||||
frame = self.vision.capture_frame()
|
||||
if frame is None:
|
||||
break
|
||||
|
||||
# Zeichne bestehende Punkte
|
||||
for i, (px, py) in enumerate(self.vision.calibrator.pixel_points):
|
||||
cv2.circle(frame, (int(px), int(py)), 12, (0, 255, 0), 2)
|
||||
cv2.putText(frame, str(i + 1), (int(px) + 15, int(py) + 5),
|
||||
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)
|
||||
|
||||
# Markiere aktuellen Klick-Punkt
|
||||
if click_point and waiting_for_save:
|
||||
cv2.circle(frame, click_point, 15, (0, 255, 255), 3)
|
||||
cv2.putText(frame, "?", (click_point[0] + 18, click_point[1] + 5),
|
||||
cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 255), 2)
|
||||
cv2.putText(frame, "Klicke auf Steuerungs-Fenster fuer WASD!",
|
||||
(10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 255), 2)
|
||||
|
||||
frame = self.ui.draw_calibration_help(frame, len(self.vision.calibrator.pixel_points), show_popup)
|
||||
cv2.imshow(window_cam, frame)
|
||||
|
||||
pos = self.get_current_position()
|
||||
panel = self.ui.create_control_panel(pos, self.step_size, self.compressor_on)
|
||||
cv2.imshow(window_ctrl, panel)
|
||||
|
||||
key = cv2.waitKey(50) & 0xFF
|
||||
|
||||
if key == ord('0'):
|
||||
break
|
||||
elif key == ord('p'):
|
||||
show_popup = not show_popup
|
||||
elif key == 13: # Enter
|
||||
if click_point and waiting_for_save:
|
||||
pos = self.get_current_position()
|
||||
if pos:
|
||||
if abs(pos[2] - calibration_z) > 0.5:
|
||||
if self.move_to(pos[0], pos[1], calibration_z, pos[3]):
|
||||
pos = self.get_current_position()
|
||||
if pos:
|
||||
self.vision.calibrator.add_calibration_point(
|
||||
click_point[0], click_point[1], pos[0], pos[1]
|
||||
)
|
||||
click_point = None
|
||||
waiting_for_save = False
|
||||
elif key == ord('c'):
|
||||
if self.vision.calibrator.calculate_transformation():
|
||||
self.vision.calibrator.save_calibration()
|
||||
elif key != 255:
|
||||
self._handle_keyboard(key)
|
||||
|
||||
cv2.destroyWindow(window_cam)
|
||||
cv2.destroyWindow(window_ctrl)
|
||||
print("?? Kalibrierung beendet")
|
||||
|
||||
def run_calibration_test(self):
|
||||
if not self.connected:
|
||||
print("? Bitte zuerst DOBOT verbinden!")
|
||||
return
|
||||
|
||||
if not self.vision.calibrator.is_calibrated:
|
||||
if not self.vision.load_calibration():
|
||||
print("? Keine Kalibrierung geladen!")
|
||||
return
|
||||
|
||||
if not self.vision.camera_initialized:
|
||||
self.vision.init_camera()
|
||||
|
||||
print("?? Starte Kalibrierungstest...")
|
||||
calibration_z = float(getattr(Config, "CALIBRATION_Z", getattr(Config, "BOTTLE_GRIP_Z", 0.0)))
|
||||
print(f" Zielhoehe: Z={calibration_z:.1f}")
|
||||
print(" Klick auf Kamera -> Roboter faehrt hin (oben)")
|
||||
print(" Feinjustieren mit WASD/RF/QE")
|
||||
print(" ENTER = Punkt ueberschreiben, L = Punkt loeschen")
|
||||
print(" C = Berechnen+Speichern, 0 = Ende")
|
||||
|
||||
window_cam = "Kalibrierung - Test"
|
||||
window_ctrl = "Kalibrierung - Steuerung"
|
||||
cv2.namedWindow(window_cam, cv2.WINDOW_NORMAL)
|
||||
cv2.namedWindow(window_ctrl, cv2.WINDOW_NORMAL)
|
||||
cv2.resizeWindow(window_ctrl, self.ui.s(380), self.ui.s(580))
|
||||
|
||||
click_point = None
|
||||
predicted_robot = None
|
||||
waiting_for_save = False
|
||||
show_popup = True
|
||||
|
||||
def mouse_callback(event, x, y, flags, param):
|
||||
nonlocal click_point, predicted_robot, waiting_for_save
|
||||
if event == cv2.EVENT_LBUTTONDOWN:
|
||||
if not self.vision.calibrator.is_calibrated:
|
||||
print("? Keine Kalibrierung geladen!")
|
||||
return
|
||||
try:
|
||||
rx, ry = self.vision.calibrator.pixel_to_robot(x, y)
|
||||
except Exception:
|
||||
print("? Pixel konnte nicht transformiert werden")
|
||||
return
|
||||
click_point = (x, y)
|
||||
predicted_robot = (rx, ry)
|
||||
waiting_for_save = True
|
||||
print(f"?? Testpunkt: Pixel({x}, {y}) -> Robot({rx:.1f}, {ry:.1f})")
|
||||
pose = self.get_current_position()
|
||||
r_world = self._world_r_from_pose(pose) if pose else Config.PICK_R_DEG
|
||||
if not self.move_to(
|
||||
rx,
|
||||
ry,
|
||||
calibration_z,
|
||||
Config.PICK_R_DEG,
|
||||
mode=PTP_MODE_MOVJ_XYZ,
|
||||
use_route=False,
|
||||
r_world=r_world,
|
||||
):
|
||||
print("? Bewegung nicht moeglich")
|
||||
|
||||
cv2.setMouseCallback(window_cam, mouse_callback)
|
||||
|
||||
while True:
|
||||
frame = self.vision.capture_frame()
|
||||
if frame is None:
|
||||
break
|
||||
|
||||
# Zeichne bestehende Punkte
|
||||
for i, (px, py) in enumerate(self.vision.calibrator.pixel_points):
|
||||
cv2.circle(frame, (int(px), int(py)), 12, (0, 255, 0), 2)
|
||||
cv2.putText(frame, str(i + 1), (int(px) + 15, int(py) + 5),
|
||||
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)
|
||||
|
||||
# Markiere aktuellen Klick-Punkt
|
||||
if click_point and waiting_for_save:
|
||||
cv2.circle(frame, click_point, 15, (0, 255, 255), 3)
|
||||
cv2.putText(frame, "?", (click_point[0] + 18, click_point[1] + 5),
|
||||
cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 255), 2)
|
||||
|
||||
frame = self.ui.draw_calibration_help(frame, len(self.vision.calibrator.pixel_points), show_popup)
|
||||
cv2.putText(frame, "TESTMODUS: Klick -> Ziel, ENTER = ueberschreiben, L = loeschen",
|
||||
(10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 255), 2)
|
||||
cv2.imshow(window_cam, frame)
|
||||
|
||||
pos = self.get_current_position()
|
||||
panel = self.ui.create_control_panel(pos, self.step_size, self.compressor_on, self.gripper_closed)
|
||||
cv2.imshow(window_ctrl, panel)
|
||||
|
||||
key = cv2.waitKey(50) & 0xFF
|
||||
|
||||
if key == ord('0'):
|
||||
break
|
||||
elif key == ord('p'):
|
||||
show_popup = not show_popup
|
||||
elif key == 13: # Enter
|
||||
if click_point and waiting_for_save:
|
||||
pos = self.get_current_position()
|
||||
if pos:
|
||||
if abs(pos[2] - calibration_z) > 0.5:
|
||||
if self.move_to(pos[0], pos[1], calibration_z, pos[3]):
|
||||
pos = self.get_current_position()
|
||||
if pos:
|
||||
rx, ry = pos[0], pos[1]
|
||||
if predicted_robot:
|
||||
dx = rx - predicted_robot[0]
|
||||
dy = ry - predicted_robot[1]
|
||||
print(f"? Korrektur: dx={dx:.1f} dy={dy:.1f}")
|
||||
self.vision.calibrator.upsert_calibration_point(
|
||||
click_point[0], click_point[1], rx, ry
|
||||
)
|
||||
click_point = None
|
||||
predicted_robot = None
|
||||
waiting_for_save = False
|
||||
elif key == ord('l'):
|
||||
if click_point:
|
||||
if self.vision.calibrator.delete_nearest_point(click_point[0], click_point[1]):
|
||||
click_point = None
|
||||
predicted_robot = None
|
||||
waiting_for_save = False
|
||||
else:
|
||||
print("? Bitte zuerst im Bild klicken")
|
||||
elif key == ord('c'):
|
||||
if self.vision.calibrator.calculate_transformation():
|
||||
self.vision.calibrator.save_calibration()
|
||||
elif key != 255:
|
||||
self._handle_keyboard(key)
|
||||
|
||||
cv2.destroyWindow(window_cam)
|
||||
cv2.destroyWindow(window_ctrl)
|
||||
print("?? Kalibrierungstest beendet")
|
||||
|
||||
def _pick_bottle_at(self, x, y):
|
||||
print(f"🍺 Greife Flasche bei ({x:.1f}, {y:.1f})")
|
||||
r = float(getattr(Config, "PICK_R_DEG", 0.0))
|
||||
self.grip(close=False)
|
||||
if not self.move_to(x, y, Config.BOTTLE_ABOVE_Z, r):
|
||||
return False
|
||||
if not self.move_to(x, y, Config.BOTTLE_GRIP_Z, r):
|
||||
return False
|
||||
self.grip(close=True)
|
||||
if not self.move_to(x, y, Config.SAFE_Z, r):
|
||||
return False
|
||||
return True
|
||||
|
||||
def _move_xyzr(self, x, y, z, r, ptpMode=PTP_MODE_MOVL_XYZ, wait=0.2, use_route=True):
|
||||
return self.move_to(x, y, z, r, mode=ptpMode, wait=wait, use_route=use_route)
|
||||
|
||||
def pour_at_glass(self):
|
||||
self._allow_pour_r = True
|
||||
try:
|
||||
for step in Config.POUR_SEQUENCE:
|
||||
if not self._move_xyzr(
|
||||
step["x"],
|
||||
step["y"],
|
||||
step["z"],
|
||||
step["r"]
|
||||
):
|
||||
print("? Pour aborted: no safe route")
|
||||
return False
|
||||
finally:
|
||||
self._allow_pour_r = False
|
||||
|
||||
return True
|
||||
|
||||
def _return_bottle(self, x, y):
|
||||
print(f"🏠 Abstellen bei ({x:.1f}, {y:.1f})")
|
||||
r = float(getattr(Config, "PICK_R_DEG", 0.0))
|
||||
if not self.move_to(x, y, Config.SAFE_Z, r):
|
||||
return False
|
||||
if not self.move_to(x, y, Config.BOTTLE_GRIP_Z, r):
|
||||
return False
|
||||
self.grip(close=False)
|
||||
if not self.move_to(x, y, Config.BOTTLE_ABOVE_Z, r):
|
||||
return False
|
||||
return True
|
||||
|
||||
def find_pick_pour_and_return(self):
|
||||
print("?? Starte Ablauf...")
|
||||
|
||||
self.auto_lock_r = True
|
||||
try:
|
||||
if not self.compressor_on:
|
||||
self.set_compressor(True)
|
||||
|
||||
if not self.vision.calibrator.is_calibrated:
|
||||
if not self.vision.load_calibration():
|
||||
print("? Keine Kalibrierung!")
|
||||
return False
|
||||
|
||||
self.vision.detect_objects(visualize=True)
|
||||
cv2.waitKey(1000)
|
||||
|
||||
pos = self.get_current_position() or (0, 0, 0, 0)
|
||||
bottle = self.vision.get_nearest_bottle((pos[0], pos[1]))
|
||||
|
||||
if not bottle:
|
||||
print("? Keine Flasche gefunden!")
|
||||
cv2.destroyAllWindows()
|
||||
return False
|
||||
|
||||
rx, ry = bottle["robot_coords"]
|
||||
print(f"? Flasche bei ({rx:.1f}, {ry:.1f})")
|
||||
cv2.destroyAllWindows()
|
||||
|
||||
if not self._pick_bottle_at(rx, ry):
|
||||
print("? Ablauf abgebrochen: keine sichere Route zum Aufnehmen")
|
||||
return False
|
||||
if not self.pour_at_glass():
|
||||
print("? Ablauf abgebrochen: keine sichere Route zum Glas")
|
||||
return False
|
||||
if not self._return_bottle(rx, ry):
|
||||
print("? Ablauf abgebrochen: keine sichere Route zum Abstellen")
|
||||
return False
|
||||
|
||||
if not self.move_to(210, 0, 150, 0):
|
||||
print("? Hinweis: Home-Position nicht erreichbar")
|
||||
print("?? Fertig!")
|
||||
return True
|
||||
finally:
|
||||
self.auto_lock_r = False
|
||||
|
||||
def disconnect(self):
|
||||
# Trennt alle Verbindungen
|
||||
self.vision.release()
|
||||
if self.connected and self.dobot:
|
||||
if self.compressor_on:
|
||||
self.set_compressor(False)
|
||||
self.dobot.Magician.DisconnectDobot(portName=self.port)
|
||||
print("✅ DOBOT getrennt")
|
||||
self.connected = False
|
||||
60
PROJECT_SOURCE_CODE/python_dobot_yolo/beerbot_vision/main.py
Normal file
@@ -0,0 +1,60 @@
|
||||
"""
|
||||
Beer Bot Vision - Hauptprogramm
|
||||
"""
|
||||
from controller import BeerBotController
|
||||
|
||||
def print_menu():
|
||||
print("\n" + "=" * 50)
|
||||
print(" 🍺 BEER BOT VISION 🍺")
|
||||
print("=" * 50)
|
||||
print(" 1. DOBOT verbinden")
|
||||
print(" 2. Kalibrierung starten")
|
||||
print(" 3. Manuelle Steuerung")
|
||||
print(" 4. Live-Erkennung (nur Kamera)")
|
||||
print(" 5. Automatischer Ablauf")
|
||||
print(" 6. Kalibrierung laden")
|
||||
print(" 7. Kalibrierung testen/anpassen")
|
||||
print(" 0. Beenden")
|
||||
print("=" * 50)
|
||||
|
||||
def main():
|
||||
controller = BeerBotController()
|
||||
|
||||
while True:
|
||||
print_menu()
|
||||
choice = input("Auswahl: ").strip()
|
||||
|
||||
if choice == "1":
|
||||
controller.connect_dobot()
|
||||
|
||||
elif choice == "2":
|
||||
controller.run_calibration()
|
||||
|
||||
elif choice == "3":
|
||||
controller.run_manual_control()
|
||||
|
||||
elif choice == "4":
|
||||
controller.vision.run_live_detection()
|
||||
|
||||
elif choice == "5":
|
||||
controller.find_pick_pour_and_return()
|
||||
|
||||
elif choice == "6":
|
||||
if controller.vision.load_calibration():
|
||||
print("✅ Kalibrierung geladen!")
|
||||
else:
|
||||
print("❌ Keine Kalibrierung gefunden!")
|
||||
|
||||
elif choice == "7":
|
||||
controller.run_calibration_test()
|
||||
|
||||
elif choice == "0":
|
||||
print("👋 Auf Wiedersehen!")
|
||||
controller.disconnect()
|
||||
break
|
||||
|
||||
else:
|
||||
print("❌ Ungültige Auswahl!")
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -0,0 +1,20 @@
|
||||
# Beer Bot - Requirements
|
||||
# Installation: pip install -r requirements.txt
|
||||
|
||||
# DOBOT Steuerung
|
||||
DobotRPC
|
||||
|
||||
# Computer Vision & AI
|
||||
ultralytics>=8.0.0 # YOLOv8 Objekterkennung
|
||||
opencv-python>=4.8.0 # Bildverarbeitung
|
||||
|
||||
# Numerische Berechnungen
|
||||
numpy>=1.24.0
|
||||
|
||||
# Optional: Für bessere Performance mit GPU
|
||||
# torch>=2.0.0 # PyTorch (wird von ultralytics installiert)
|
||||
# torchvision>=0.15.0
|
||||
|
||||
# Optional: Für Bildaugmentation beim Custom-Training
|
||||
# albumentations>=1.3.0
|
||||
# roboflow # Dataset-Management
|
||||
@@ -0,0 +1,224 @@
|
||||
"""
|
||||
Beer Bot Vision - User Interface
|
||||
"""
|
||||
import cv2
|
||||
import numpy as np
|
||||
from config import Config
|
||||
from utils import get_step_multiplier
|
||||
|
||||
class ModernUI:
|
||||
|
||||
def __init__(self, scale=None):
|
||||
self.colors = Config.UI_COLORS
|
||||
self.scale = float(scale if scale is not None else getattr(Config, "UI_SCALE", 1.0))
|
||||
|
||||
# Dicke/Schrift an Skalierung koppeln (mindestens 1)
|
||||
self.thin = max(1, int(round(1 * self.scale)))
|
||||
self.norm = max(1, int(round(2 * self.scale)))
|
||||
|
||||
def s(self, v: float) -> int:
|
||||
return int(round(v * self.scale))
|
||||
|
||||
def draw_rounded_rect(self, img, pt1, pt2, color, radius=10, thickness=-1):
|
||||
x1, y1 = pt1
|
||||
x2, y2 = pt2
|
||||
radius = max(1, int(round(radius)))
|
||||
|
||||
if thickness == -1:
|
||||
cv2.rectangle(img, (x1 + radius, y1), (x2 - radius, y2), color, -1)
|
||||
cv2.rectangle(img, (x1, y1 + radius), (x2, y2 - radius), color, -1)
|
||||
cv2.circle(img, (x1 + radius, y1 + radius), radius, color, -1)
|
||||
cv2.circle(img, (x2 - radius, y1 + radius), radius, color, -1)
|
||||
cv2.circle(img, (x1 + radius, y2 - radius), radius, color, -1)
|
||||
cv2.circle(img, (x2 - radius, y2 - radius), radius, color, -1)
|
||||
else:
|
||||
cv2.line(img, (x1 + radius, y1), (x2 - radius, y1), color, thickness)
|
||||
cv2.line(img, (x1 + radius, y2), (x2 - radius, y2), color, thickness)
|
||||
cv2.line(img, (x1, y1 + radius), (x1, y2 - radius), color, thickness)
|
||||
cv2.line(img, (x2, y1 + radius), (x2, y2 - radius), color, thickness)
|
||||
|
||||
def draw_key_badge(self, img, x, y, key, label, key_color=None, width=None):
|
||||
key_color = key_color or self.colors["accent_blue"]
|
||||
|
||||
key_width = self.s(42)
|
||||
key_height = self.s(34)
|
||||
radius = self.s(6)
|
||||
|
||||
label_width = self.s(155) if width is None else width
|
||||
total_w = key_width + self.s(8) + label_width
|
||||
|
||||
bg = self.colors["bg_panel"]
|
||||
self.draw_rounded_rect(img, (x, y), (x + total_w, y + key_height), bg, radius=radius)
|
||||
|
||||
self.draw_rounded_rect(img, (x, y), (x + key_width, y + key_height), key_color, radius=radius)
|
||||
|
||||
key_font = 0.75 * self.scale
|
||||
key_th = self.norm
|
||||
ts = cv2.getTextSize(key, cv2.FONT_HERSHEY_SIMPLEX, key_font, key_th)[0]
|
||||
tx = x + (key_width - ts[0]) // 2
|
||||
ty = y + (key_height + ts[1]) // 2
|
||||
cv2.putText(img, key, (tx, ty), cv2.FONT_HERSHEY_SIMPLEX, key_font, self.colors["bg_dark"], key_th)
|
||||
|
||||
label_font = 0.58 * self.scale
|
||||
cv2.putText(img, label, (x + key_width + self.s(10), ty),
|
||||
cv2.FONT_HERSHEY_SIMPLEX, label_font, self.colors["text_gray"], self.thin)
|
||||
|
||||
def draw_section_header(self, img, x, y, title, width=300):
|
||||
line_y = y + self.s(28)
|
||||
cv2.line(img, (x, line_y), (x + width, line_y), self.colors["text_dim"], self.thin)
|
||||
|
||||
font = 0.65 * self.scale
|
||||
ts = cv2.getTextSize(title, cv2.FONT_HERSHEY_SIMPLEX, font, self.thin)[0]
|
||||
cv2.rectangle(img, (x, y + self.s(14)), (x + ts[0] + self.s(14), y + self.s(38)),
|
||||
self.colors["bg_dark"], -1)
|
||||
cv2.putText(img, title, (x + self.s(6), y + self.s(33)),
|
||||
cv2.FONT_HERSHEY_SIMPLEX, font, self.colors["accent_yellow"], self.thin)
|
||||
|
||||
def create_control_panel(self, position, step_size, compressor_on, gripper_closed=None):
|
||||
base_w, base_h = 380, 580
|
||||
width, height = self.s(base_w), self.s(base_h)
|
||||
panel = np.zeros((height, width, 3), dtype=np.uint8)
|
||||
panel[:] = self.colors["bg_dark"]
|
||||
|
||||
cv2.rectangle(panel, (0, 0), (width, self.s(70)), self.colors["bg_header"], -1)
|
||||
cv2.putText(panel, "DOBOT STEUERUNG v1.7", (self.s(18), self.s(45)),
|
||||
cv2.FONT_HERSHEY_SIMPLEX, 0.95 * self.scale, self.colors["text_white"], self.norm)
|
||||
|
||||
# Position Anzeige
|
||||
y_offset = self.s(88)
|
||||
cv2.putText(panel, "POSITION", (self.s(18), y_offset),
|
||||
cv2.FONT_HERSHEY_SIMPLEX, 0.60 * self.scale, self.colors["text_dim"], self.thin)
|
||||
y_offset += self.s(28)
|
||||
|
||||
if position:
|
||||
x, y, z, r = position
|
||||
radius = float(np.sqrt(x ** 2 + y ** 2))
|
||||
|
||||
small = 0.58 * self.scale
|
||||
val = 0.75 * self.scale
|
||||
|
||||
cv2.putText(panel, "X", (self.s(18), y_offset), cv2.FONT_HERSHEY_SIMPLEX, small, self.colors["text_dim"], self.thin)
|
||||
cv2.putText(panel, f"{x:7.1f}", (self.s(38), y_offset), cv2.FONT_HERSHEY_SIMPLEX, val, self.colors["accent_green"], self.norm)
|
||||
|
||||
cv2.putText(panel, "Y", (self.s(150), y_offset), cv2.FONT_HERSHEY_SIMPLEX, small, self.colors["text_dim"], self.thin)
|
||||
cv2.putText(panel, f"{y:7.1f}", (self.s(170), y_offset), cv2.FONT_HERSHEY_SIMPLEX, val, self.colors["accent_green"], self.norm)
|
||||
|
||||
cv2.putText(panel, "Z", (self.s(280), y_offset), cv2.FONT_HERSHEY_SIMPLEX, small, self.colors["text_dim"], self.thin)
|
||||
cv2.putText(panel, f"{z:7.1f}", (self.s(300), y_offset), cv2.FONT_HERSHEY_SIMPLEX, val, self.colors["accent_cyan"], self.norm)
|
||||
|
||||
cv2.putText(panel, "R", (self.s(18), y_offset + self.s(28)), cv2.FONT_HERSHEY_SIMPLEX, small, self.colors["text_dim"], self.thin)
|
||||
cv2.putText(panel, f"{r:7.1f}", (self.s(38), y_offset + self.s(28)), cv2.FONT_HERSHEY_SIMPLEX, val, self.colors["accent_yellow"], self.norm)
|
||||
|
||||
cv2.putText(panel, f"Radius: {radius:.1f}mm", (self.s(150), y_offset + self.s(28)),
|
||||
cv2.FONT_HERSHEY_SIMPLEX, 0.58 * self.scale, self.colors["text_gray"], self.thin)
|
||||
|
||||
speed_mult = float(get_step_multiplier(z))
|
||||
cv2.putText(panel, f"Speed: {speed_mult * 100:.0f}%", (self.s(280), y_offset + self.s(56)),
|
||||
cv2.FONT_HERSHEY_SIMPLEX, 0.58 * self.scale, self.colors["text_gray"], self.thin)
|
||||
|
||||
# Status Bar
|
||||
y_offset += self.s(70)
|
||||
cv2.rectangle(panel, (self.s(15), y_offset), (width - self.s(15), y_offset + self.s(44)),
|
||||
self.colors["bg_panel"], -1)
|
||||
|
||||
cv2.putText(panel, f"Schritt: {step_size:g}mm", (self.s(25), y_offset + self.s(30)),
|
||||
cv2.FONT_HERSHEY_SIMPLEX, 0.62 * self.scale, self.colors["text_white"], self.thin)
|
||||
|
||||
comp_color = self.colors["accent_green"] if compressor_on else self.colors["accent_red"]
|
||||
comp_text = "AN" if compressor_on else "AUS"
|
||||
cv2.circle(panel, (self.s(210), y_offset + self.s(22)), self.s(7), comp_color, -1)
|
||||
cv2.putText(panel, f"Kompressor: {comp_text}", (self.s(228), y_offset + self.s(30)),
|
||||
cv2.FONT_HERSHEY_SIMPLEX, 0.62 * self.scale, self.colors["text_white"], self.thin)
|
||||
|
||||
y_offset += self.s(65)
|
||||
self.draw_section_header(panel, self.s(15), y_offset, "BEWEGUNG (Kartesisch Linear)", width - self.s(30))
|
||||
y_offset += self.s(55)
|
||||
|
||||
center_x = self.s(95)
|
||||
self.draw_key_badge(panel, center_x, y_offset, "W", "Vor (+Y)", self.colors["accent_blue"])
|
||||
y_offset += self.s(42)
|
||||
self.draw_key_badge(panel, center_x - self.s(70), y_offset, "A", "Links (-X)", self.colors["accent_blue"])
|
||||
self.draw_key_badge(panel, center_x, y_offset, "S", "Zurück (-Y)", self.colors["accent_blue"])
|
||||
self.draw_key_badge(panel, center_x + self.s(70), y_offset, "D", "Rechts (+X)", self.colors["accent_blue"])
|
||||
|
||||
z_x = self.s(255)
|
||||
self.draw_key_badge(panel, z_x, y_offset - self.s(42), "R", "Hoch (+Z)", self.colors["accent_cyan"])
|
||||
self.draw_key_badge(panel, z_x, y_offset, "F", "Runter (-Z)", self.colors["accent_cyan"])
|
||||
self.draw_key_badge(panel, z_x + self.s(105), y_offset - self.s(42), "Q", "Rot -", self.colors["accent_yellow"])
|
||||
self.draw_key_badge(panel, z_x + self.s(105), y_offset, "E", "Rot +", self.colors["accent_yellow"])
|
||||
|
||||
y_offset += self.s(70)
|
||||
self.draw_section_header(panel, self.s(15), y_offset, "SCHRITTWEITE", width - self.s(30))
|
||||
y_offset += self.s(55)
|
||||
|
||||
steps = [("1", "1mm"), ("2", "5mm"), ("3", "10mm"), ("4", "20mm")]
|
||||
for i, (key, label) in enumerate(steps):
|
||||
selected = (step_size == 1 and key == "1") or (step_size == 5 and key == "2") or \
|
||||
(step_size == 10 and key == "3") or (step_size == 20 and key == "4")
|
||||
color = self.colors["accent_green"] if selected else self.colors["bg_panel"]
|
||||
self.draw_key_badge(panel, self.s(18) + i * self.s(92), y_offset, key, label, color, width=self.s(70))
|
||||
|
||||
y_offset += self.s(75)
|
||||
self.draw_section_header(panel, self.s(15), y_offset, "GREIFER & SYSTEM", width - self.s(30))
|
||||
y_offset += self.s(55)
|
||||
|
||||
self.draw_key_badge(panel, self.s(18), y_offset, "G", "Greifen", self.colors["accent_green"], width=self.s(120))
|
||||
self.draw_key_badge(panel, self.s(170), y_offset, "O", "Öffnen", self.colors["accent_red"], width=self.s(120))
|
||||
self.draw_key_badge(panel, self.s(322), y_offset, "K", "Kompr.", self.colors["accent_yellow"], width=self.s(120))
|
||||
|
||||
y_offset += self.s(75)
|
||||
self.draw_section_header(panel, self.s(15), y_offset, "FUNKTIONEN", width - self.s(30))
|
||||
y_offset += self.s(55)
|
||||
|
||||
self.draw_key_badge(panel, self.s(18), y_offset, "H", "Home", self.colors["accent_blue"], width=self.s(110))
|
||||
self.draw_key_badge(panel, self.s(150), y_offset, "X", "Alarm-Reset", self.colors["accent_red"], width=self.s(150))
|
||||
self.draw_key_badge(panel, self.s(330), y_offset, "0", "Exit", self.colors["text_dim"], width=self.s(90))
|
||||
|
||||
return panel
|
||||
|
||||
def draw_calibration_help(self, frame, num_points, show_popup=True):
|
||||
h, w = frame.shape[:2]
|
||||
|
||||
if show_popup:
|
||||
box_w, box_h = self.s(360), self.s(245)
|
||||
box_x, box_y = w - box_w - self.s(20), self.s(20)
|
||||
|
||||
overlay = frame.copy()
|
||||
cv2.rectangle(overlay, (box_x, box_y), (box_x + box_w, box_y + box_h),
|
||||
self.colors["bg_dark"], -1)
|
||||
cv2.addWeighted(overlay, 0.85, frame, 0.15, 0, frame)
|
||||
|
||||
cv2.rectangle(frame, (box_x, box_y), (box_x + box_w, box_y + box_h),
|
||||
self.colors["accent_blue"], self.norm)
|
||||
|
||||
cv2.putText(frame, "KALIBRIERUNG", (box_x + self.s(15), box_y + self.s(30)),
|
||||
cv2.FONT_HERSHEY_SIMPLEX, 0.85 * self.scale, self.colors["accent_yellow"], self.norm)
|
||||
|
||||
steps = [
|
||||
"1. Punkt im Bild anklicken",
|
||||
"2. Roboter zum Punkt fahren",
|
||||
"3. ENTER zum Speichern",
|
||||
"4. Mind. 4 Punkte sammeln",
|
||||
"5. C = Berechnen",
|
||||
"6. 0 = Beenden"
|
||||
]
|
||||
|
||||
y = box_y + self.s(65)
|
||||
for step in steps:
|
||||
cv2.putText(frame, step, (box_x + self.s(15), y),
|
||||
cv2.FONT_HERSHEY_SIMPLEX, 0.52 * self.scale, self.colors["text_gray"], self.thin)
|
||||
y += self.s(27)
|
||||
|
||||
color = self.colors["accent_green"] if num_points >= 4 else self.colors["accent_yellow"]
|
||||
cv2.putText(frame, f"Punkte: {num_points}/4+", (box_x + self.s(15), box_y + box_h - self.s(15)),
|
||||
cv2.FONT_HERSHEY_SIMPLEX, 0.70 * self.scale, color, self.norm)
|
||||
|
||||
cv2.putText(frame, "[P] Popup ein/aus", (box_x + box_w - self.s(160), box_y + box_h - self.s(15)),
|
||||
cv2.FONT_HERSHEY_SIMPLEX, 0.45 * self.scale, self.colors["text_dim"], self.thin)
|
||||
|
||||
cv2.rectangle(frame, (0, h - self.s(46)), (w, h), self.colors["bg_dark"], -1)
|
||||
status = "Punkte: {}/4+ | WASD=Bewegen | RF=Hoch/Runter | QE=Rotation | ENTER=Speichern | C=Berechnen | 0=Ende".format(num_points)
|
||||
cv2.putText(frame, status, (self.s(10), h - self.s(15)),
|
||||
cv2.FONT_HERSHEY_SIMPLEX, 0.55 * self.scale, self.colors["text_gray"], self.thin)
|
||||
|
||||
return frame
|
||||
@@ -0,0 +1,14 @@
|
||||
"""
|
||||
Beer Bot Vision - Hilfsfunktionen
|
||||
"""
|
||||
import re
|
||||
|
||||
def sanitize_filename(filename):
|
||||
return re.sub(r'[^a-zA-Z0-9._-]', '', filename)
|
||||
|
||||
def get_step_multiplier(z):
|
||||
if z < 50:
|
||||
return 0.3 # Sehr langsam nah am Tisch
|
||||
elif z < 100:
|
||||
return 0.6 # Mittel
|
||||
return 1.0 # Volle Geschwindigkeit
|
||||
156
PROJECT_SOURCE_CODE/python_dobot_yolo/beerbot_vision/vision.py
Normal file
@@ -0,0 +1,156 @@
|
||||
"""
|
||||
Beer Bot Vision - Hauptklasse für die Bildverarbeitung mit YOLOv8.
|
||||
"""
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
from config import Config
|
||||
from calibrator import CameraCalibrator
|
||||
from user_interface import ModernUI
|
||||
|
||||
# YOLOv8 Import
|
||||
try:
|
||||
from ultralytics import YOLO
|
||||
YOLO_AVAILABLE = True
|
||||
except ImportError:
|
||||
YOLO_AVAILABLE = False
|
||||
print("⚠️ ultralytics nicht installiert. Installiere mit: pip install ultralytics")
|
||||
|
||||
|
||||
class BeerBotVision:
|
||||
def __init__(self, camera_id=None, model_path=None):
|
||||
self.camera_id = camera_id or Config.CAMERA_ID
|
||||
self.model_path = model_path or Config.YOLO_MODEL
|
||||
|
||||
if YOLO_AVAILABLE:
|
||||
print(f"🔄 Lade YOLO-Modell: {self.model_path}")
|
||||
self.model = YOLO(self.model_path)
|
||||
print("✅ YOLO-Modell geladen!")
|
||||
else:
|
||||
self.model = None
|
||||
|
||||
self.cap = None
|
||||
self.camera_initialized = False
|
||||
self.calibrator = CameraCalibrator()
|
||||
self.detected_bottles = []
|
||||
self.ui = ModernUI()
|
||||
|
||||
def init_camera(self):
|
||||
print(f"🔄 Öffne Kamera {self.camera_id}...")
|
||||
self.cap = cv2.VideoCapture(self.camera_id)
|
||||
if not self.cap.isOpened():
|
||||
print("❌ Kamera konnte nicht geöffnet werden!")
|
||||
return False
|
||||
self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, Config.CAMERA_WIDTH)
|
||||
self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, Config.CAMERA_HEIGHT)
|
||||
self.camera_initialized = True
|
||||
print(f"✅ Kamera initialisiert")
|
||||
return True
|
||||
|
||||
def capture_frame(self):
|
||||
if not self.camera_initialized:
|
||||
if not self.init_camera():
|
||||
return None
|
||||
ret, frame = self.cap.read()
|
||||
return frame if ret else None
|
||||
|
||||
def detect_objects(self, frame=None, visualize=False):
|
||||
if self.model is None:
|
||||
return {"bottles": []}
|
||||
|
||||
if frame is None:
|
||||
frame = self.capture_frame()
|
||||
if frame is None:
|
||||
return {"bottles": []}
|
||||
|
||||
results = self.model(frame, verbose=False)
|
||||
self.detected_bottles = []
|
||||
|
||||
for result in results:
|
||||
for box in result.boxes:
|
||||
cls = int(box.cls[0])
|
||||
conf = float(box.conf[0])
|
||||
|
||||
if conf >= Config.CONFIDENCE_THRESHOLD and cls == Config.BOTTLE_CLASS_ID:
|
||||
x1, y1, x2, y2 = map(int, box.xyxy[0])
|
||||
center_x, center_y = (x1 + x2) // 2, (y1 + y2) // 2
|
||||
|
||||
robot_coords = None
|
||||
if self.calibrator.is_calibrated:
|
||||
try:
|
||||
robot_coords = self.calibrator.pixel_to_robot(center_x, center_y)
|
||||
except:
|
||||
pass
|
||||
|
||||
self.detected_bottles.append({
|
||||
"bbox": (x1, y1, x2, y2),
|
||||
"center_pixel": (center_x, center_y),
|
||||
"robot_coords": robot_coords,
|
||||
"confidence": conf
|
||||
})
|
||||
|
||||
if visualize:
|
||||
self._visualize_detections(frame)
|
||||
|
||||
return {"bottles": self.detected_bottles, "frame": frame}
|
||||
|
||||
def _visualize_detections(self, frame):
|
||||
for bottle in self.detected_bottles:
|
||||
x1, y1, x2, y2 = bottle["bbox"]
|
||||
cx, cy = bottle["center_pixel"]
|
||||
conf = bottle["confidence"]
|
||||
|
||||
cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0), 2)
|
||||
cv2.circle(frame, (cx, cy), 5, (0, 0, 255), -1)
|
||||
|
||||
label = f"Bottle {conf:.0%}"
|
||||
if bottle["robot_coords"]:
|
||||
rx, ry = bottle["robot_coords"]
|
||||
label += f" ({rx:.0f}, {ry:.0f})"
|
||||
|
||||
cv2.putText(frame, label, (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2)
|
||||
|
||||
h, w = frame.shape[:2]
|
||||
cv2.rectangle(frame, (0, h - 35), (w, h), (0, 0, 0), -1)
|
||||
cv2.putText(frame, f"Flaschen: {len(self.detected_bottles)} | Druecke 0 fuer Hauptmenue",
|
||||
(10, h - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 1)
|
||||
|
||||
cv2.imshow("Beer Bot Vision", frame)
|
||||
|
||||
def get_nearest_bottle(self, reference_point=(0, 0)):
|
||||
if not self.detected_bottles:
|
||||
self.detect_objects()
|
||||
|
||||
valid = [b for b in self.detected_bottles if b["robot_coords"]]
|
||||
if not valid:
|
||||
return None
|
||||
|
||||
def dist(b):
|
||||
bx, by = b["robot_coords"]
|
||||
rx, ry = reference_point
|
||||
return np.sqrt((bx - rx) ** 2 + (by - ry) ** 2)
|
||||
|
||||
return min(valid, key=dist)
|
||||
|
||||
def run_live_detection(self):
|
||||
print("🎥 Live-Erkennung gestartet. Drücke '0' zum Beenden.")
|
||||
if not self.camera_initialized:
|
||||
self.init_camera()
|
||||
|
||||
while True:
|
||||
frame = self.capture_frame()
|
||||
if frame is None:
|
||||
break
|
||||
self.detect_objects(frame, visualize=True)
|
||||
if cv2.waitKey(1) & 0xFF == ord('0'):
|
||||
break
|
||||
|
||||
cv2.destroyAllWindows()
|
||||
|
||||
def load_calibration(self, matrix_file=None):
|
||||
return self.calibrator.load_calibration(matrix_file)
|
||||
|
||||
def release(self):
|
||||
if self.cap:
|
||||
self.cap.release()
|
||||
cv2.destroyAllWindows()
|
||||
BIN
PROJECT_SOURCE_CODE/python_dobot_yolo/beerbot_vision/yolov8m.pt
Normal file
0
PROJECT_SOURCE_CODE/ros_dobot/__init__.py
Normal file
3
PROJECT_SOURCE_CODE/ros_dobot/build.sh
Executable file
@@ -0,0 +1,3 @@
|
||||
#!/bin/bash
|
||||
|
||||
colcon build --symlink-install
|
||||
21
PROJECT_SOURCE_CODE/ros_dobot/src/LICENSE
Normal file
@@ -0,0 +1,21 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2023 Jan Kaniuka
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
323
PROJECT_SOURCE_CODE/ros_dobot/src/README.md
Normal file
@@ -0,0 +1,323 @@
|
||||
# ROS 2 control stack for Dobot Magician
|
||||
<img src="https://img.shields.io/badge/ros--version-humble-green"/> <img src="https://img.shields.io/badge/platform%20-Ubuntu%2022.04-orange"/> [](https://opensource.org/licenses/MIT)
|
||||
|
||||
<p align="center">
|
||||
<img src="https://user-images.githubusercontent.com/80155305/212770939-2d7a4389-4143-4147-a50a-3453f73a1317.png" width="250" height="250"/><img src="https://user-images.githubusercontent.com/80155305/207256203-75f2607e-b40c-4a45-bf4e-de4aaffe6530.png" width="350" height="250"/><img src="https://user-images.githubusercontent.com/80155305/204082368-6eb63a16-2a22-4aed-83f8-45730a7b5d93.png" width="200" height="250"/>
|
||||
</p>
|
||||
|
||||
## Table of contents :clipboard:
|
||||
* [Packages in the repository](#packages)
|
||||
* [Installation](#installation)
|
||||
* [System startup](#running)
|
||||
* [Homing procedure](#homing)
|
||||
* [Diagnostics](#diagnostics)
|
||||
* [Published topics](#topics)
|
||||
* [Motion](#motion)
|
||||
* [Visualization in RViz](#visualization)
|
||||
* [Dobot Magician Control Panel - RQT plugin](#dmcp)
|
||||
* [End effectors](#end_effector_control)
|
||||
* [Additional tools for visualization](#additional)
|
||||
* [Examples](#examples)
|
||||
* [Sliding rail](#rail)
|
||||
* [Multi-robot system (MRS)](#mrs)
|
||||
* [Video - see how the system works](#video)
|
||||
* [FAQ](#faq)
|
||||
* [Citing](#citing)
|
||||
* [Contributing](#contributing)
|
||||
|
||||
|
||||
<a name="packages"></a>
|
||||
## Packages in the repository :open_file_folder:
|
||||
|
||||
- `dobot_bringup` - launch files and parameters configuration (in _YAML_ files)
|
||||
- `dobot_control_panel` - RQT plugin to control Dobot Magician robotic arm (as well as sliding rail)
|
||||
- `dobot_demos` - a collection of sample scripts for beginners (_minimal working examples_)
|
||||
- `dobot_description` - package containing URDF description of Dobot Magician together with meshes
|
||||
- `dobot_diagnostics` - aggregation and analysis of alarm states
|
||||
- `dobot_driver` - low-level Python interface to communicate with Dobot via serial port
|
||||
- `dobot_end_effector` - set of service servers allowing to control different kinds of end effectors
|
||||
- `dobot_homing` - tool for executing homing procedure of Dobot stepper motors
|
||||
- `dobot_kinematics` - implementation of forward and inverse kinematics to validate trajectory feasibility
|
||||
- `dobot_motion` - package containing action server to control motion of Dobot Magician (joint interpolated motion / linear motion)
|
||||
- `dobot_msgs` - package defining messages used by control stack
|
||||
- `dobot_state_updater` - package containing a node regularly retrieving information about the state of the robot (e.g. joint angles / TCP position)
|
||||
- `dobot_visualization_tools` - useful tools for visualization (e.g. trajectory / range) in form of RViZ markers
|
||||
|
||||
<a name="installation"></a>
|
||||
## Installation :arrow_down:
|
||||
|
||||
### Requirements
|
||||
|
||||
This control system requires a system setup with ROS 2. It is recommended to use Ubuntu 22.04 with [ROS 2 Humble](https://docs.ros.org/en/humble/index.html), however using Ubuntu 20.04 with [ROS 2 Galactic](https://docs.ros.org/en/galactic/index.html) should also work.
|
||||
|
||||
### Install ROS 2 Humble Hawksbill
|
||||
Follow the instructions from the [link](https://docs.ros.org/en/humble/Installation/Ubuntu-Install-Debians.html). There are 3 versions of ROS 2 Humble Hawksbill to choose from: Desktop Install, ROS-Base Install and Development tools Install. Be sure to install **Desktop Install** version (`sudo apt install ros-humble-desktop`).
|
||||
|
||||
|
||||
### Install additional modules and packages
|
||||
All necessary modules are in [requirements.txt](https://github.com/jkaniuka/magician_ros2/blob/main/requirements.txt), install using: `pip3 install -r requirements.txt`
|
||||
Packages from apt repository: `sudo apt install ros-humble-diagnostic-aggregator ros-humble-rqt-robot-monitor python3-pykdl`
|
||||
:warning: After installing new RQT plugins run `rqt --force-discover` to make plugins visible in RQT GUI. This issue is further described [here](https://answers.ros.org/question/338282/ros2-what-is-the-rqt-force-discover-option-meaning/).
|
||||
|
||||
### Create workspace for control system (build from source)
|
||||
```
|
||||
source /opt/ros/humble/setup.bash
|
||||
mkdir -p ~/magician_ros2_control_system_ws/src
|
||||
git clone https://github.com/jkaniuka/magician_ros2.git ~/magician_ros2_control_system_ws/src
|
||||
cd magician_ros2_control_system_ws
|
||||
rosdep install -i --from-path src --rosdistro humble -y
|
||||
colcon build
|
||||
```
|
||||
|
||||
### Access to serial ports
|
||||
In order to communicate with the robot, access to serial ports is required. To be able to open serial ports without using `sudo`, you need to add yourself to the **dialout** group:
|
||||
|
||||
```bash
|
||||
sudo usermod -a -G dialout <username>
|
||||
# Relogin or reboot required!
|
||||
```
|
||||
|
||||
:warning: The USB port to which Dobot Magician is connected is set by default to **/dev/ttyUSB0** - you can change it in [this file](dobot_driver/dobot_driver/dobot_handle.py).
|
||||
|
||||
<a name="running"></a>
|
||||
## System startup :robot:
|
||||
1. Connect Dobot Magician with a USB cable to the computer and then turn it on.
|
||||
2. Set the MAGICIAN_TOOL environment variable describing the robot's configuration `export MAGICIAN_TOOL=<tool_type>` (allowed values are: _none, pen, suction_cup, gripper, extended_gripper_).
|
||||
3. From inside of the **magician_ros2_control_system_ws** directory, run `. install/setup.bash` to source your workspace.
|
||||
3. Launch entire control stack with `ros2 launch dobot_bringup dobot_magician_control_system.launch.py`.
|
||||
|
||||
|
||||
<a name="homing"></a>
|
||||
## Homing procedure
|
||||
Homing should be performed as the first action after the system is started. It is necessary because an incremental encoder has been placed in the base of the manipulator, and the robot is not aware of its actual position when it is powered up. Stop all other scripts controlling the robot before starting the homing procedure.
|
||||
Homing is handled by the service server, to start it run the following command:
|
||||
```
|
||||
ros2 service call /dobot_homing_service dobot_msgs/srv/ExecuteHomingProcedure
|
||||
```
|
||||
A _homing\_position_ parameter is assigned to the server node of the homing service, which allows you to determine the position reached by the manipulator after the procedure is completed. The following are examples of the commands for reading and setting parameters value.
|
||||
```
|
||||
ros2 param get /dobot_homing_srv homing_position
|
||||
ros2 param set /dobot_homing_srv homing_position [150.0,0.0,100.0,0.0]
|
||||
```
|
||||
|
||||
<a name="diagnostics"></a>
|
||||
## Diagnostics
|
||||
The system takes care of the robot's state diagnostics. Using the _Diagnostics Viewer_ RQT plugin, you can clearly visualize information about active alarm states. To start _Diagnostics Viewer_ run the following command:
|
||||
```
|
||||
rqt -s rqt_robot_monitor
|
||||
```
|
||||
If you already have an open RQT you will find this plugin in section _Plugins -> Robot Tools -> Diagnostics Viewer_. After opening the plugin, select _Alternative view_ option at the top. After double-clicking on the alarm description, a window will appear with information about its probable cause and how to delete it. Below you will find screenshots of the _Diagnostics Viewer_ plugin.
|
||||
<p align="center">
|
||||
<img src="https://user-images.githubusercontent.com/80155305/220293202-d1320648-719d-4e3c-a592-52e8607d3838.png" width="400" height="400"/><img src="https://user-images.githubusercontent.com/80155305/220293214-8e07c4ef-67fa-40c1-a562-7c97e81730ff.png" width="275" height="400"/>
|
||||
</p>
|
||||
|
||||
<a name="topics"></a>
|
||||
## Published topics
|
||||
- `/joint_states` (sensor_msgs/msg/JointState) - angles values in the manipulator's joints
|
||||
- `/dobot_TCP` (geometry_msgs/msg/PoseStamped) - position of the coordinate frame associated with the end of the last robot link (orientation given as a quaternion)
|
||||
- `/dobot_pose_raw` (std_msgs/msg/Float64MultiArray) - position of the coordinate frame associated with the end of the last robot link (raw orientation received from Dobot, expressed in degrees)
|
||||
|
||||
<a name="motion"></a>
|
||||
## Motion
|
||||
The motion of the manipulator is handled using the ROS 2 action. In order for the manipulator to move to the desired position, the motion target must be sent to the action server. The following describes the structure of the message sent to the action server (part of _dobot\_msgs/action/PointToPoint_):
|
||||
* **motion_type**:
|
||||
* 1 -> joint interpolated motion, target expressed in Cartesian coordinates
|
||||
* 2 -> linear motion, target expressed in Cartesian coordinates
|
||||
* 4 -> joint interpolated motion, target expressed in joint coordinates
|
||||
* 5 -> linear motion, target expressed in joint coordinates
|
||||
* **target_pose** - desired position expressed in Cartesian coordinates [_mm_] or in joint coordinates [_degrees_]
|
||||
* **velocity_ratio** (default 1.0)
|
||||
* **acceleration_ratio** (default 1.0)
|
||||
|
||||
An example of a command that allows you to send a goal to an action server can be found below (adding `--feedback` flag will cause the terminal to display the current position of the robot while it is moving):
|
||||
```
|
||||
ros2 action send_goal /PTP_action dobot_msgs/action/PointToPoint "{motion_type: 1, target_pose: [200.0, 0.0, 100.0, 0.0], velocity_ratio: 0.5, acceleration_ratio: 0.3}" --feedback
|
||||
```
|
||||
If you want to cancel the goal, run the following command:
|
||||
```
|
||||
ros2 service call /PTP_action/_action/cancel_goal action_msgs/srv/CancelGoal
|
||||
```
|
||||
The parameters associated with the motion action server node allow you to determine the motion velocities and accelerations of the individual joints of the robot (`JT1_vel, JT2_vel, JT3_vel, JT4_vel, JT1_acc, JT2_acc, JT3_acc, JT4_acc`), as well as the parameters of the linear motion of the manipulator (`TCP_vel, end_tool_rot_vel, TCP_acc, end_tool_rot_acc`).
|
||||
|
||||
|
||||
### Goal/trajectory validation
|
||||
The motion target is checked by the trajectory validation service server before execution. The trajectory validation service server checks whether the target point is in the manipulator's workspace and whether there is a solution to the inverse kinematics task.
|
||||
|
||||
The parameters of the node implementing the trajectory validation server are: `axis_1_range`, `axis_2_range`, `axis_3_range`, `axis_4_range`.
|
||||
* The first four of these allow the manipulator's working space to be limited by setting position restrictions at joints other than those resulting from the mechanical design. If you send a motion order to a point that violates the restrictions you defined, you will receive the following response from the PointToPoint action server:
|
||||
```
|
||||
Response: [PTP_server-1] [WARN] [1668081940.281544573] [dobot_PTP_server]:
|
||||
Goal rejected: dobot_msgs.srv.EvaluatePTPTrajectory_Response(is_valid=False,
|
||||
message='Joint limits violated')
|
||||
```
|
||||
|
||||
|
||||
<a name="visualization"></a>
|
||||
## Visualization in RViz
|
||||
In Rviz, you can display one of up to 8 different robot configurations. All allowed configurations are placed in the diagram below:
|
||||

|
||||
The command that starts the visualization of the manipulator in the example configuration is as follows:
|
||||
```
|
||||
ros2 launch dobot_description display.launch.py DOF:=4 tool:=extended_gripper use_camera:=true
|
||||
```
|
||||
If the robot is disconnected from the computer, you can start the visualization by adding the `gui:=true` argument and control the robot using [**joint_state_publisher_gui**](https://index.ros.org/p/joint_state_publisher_gui/) node.
|
||||
|
||||
Below you will find 3 sample visualizations:
|
||||
<p align="center">
|
||||
<img src="https://user-images.githubusercontent.com/80155305/212384408-b57bfde4-b837-4513-8ea3-89a7bffd97af.png" width="260" height="260"/><img src="https://user-images.githubusercontent.com/80155305/212382237-fe2eadbf-19bc-4e6c-86f4-3a0e06edccf4.png" width="260" height="260"/><img src="https://user-images.githubusercontent.com/80155305/212383859-5afbccb4-dc07-4a80-a705-3a7af915c96e.png" width="260" height="260"/>
|
||||
</p>
|
||||
|
||||
|
||||
<a name="dmcp"></a>
|
||||
## Dobot Magician Control Panel
|
||||
Dobot Magician Control Panel is an RQT plugin that allows you to conveniently position the manipulator, visualize its state, modify motion parameters and control end effectors. In order to launch it, run the following command:
|
||||
```
|
||||
rqt -s dobot_control_panel
|
||||
```
|
||||
:warning: When you use _Dobot Magician Control Panel_, no other robot control program can be run. Either the robot is controlled in **manual mode** or in **automatic mode**.
|
||||
Below you will find screenshots of all the plugin screens:
|
||||
|
||||
<p align="center">
|
||||
<img src="https://user-images.githubusercontent.com/80155305/220214176-0c0844c0-447b-481c-941b-aafd72b94d5a.png" width="250" height="230"/> <img src="https://user-images.githubusercontent.com/80155305/220214179-14b4493f-4dd0-4de5-89fc-d8c60e9d4d8b.png" width="250" height="230"/> <img src="https://user-images.githubusercontent.com/80155305/220214183-296eac2b-a3b0-4a84-b799-145440944eef.png" width="250" height="230"/>  
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<img src="https://user-images.githubusercontent.com/80155305/220214189-8eb57aca-bed8-4b55-a3fc-25244a41e721.png" width="250" height="230"/> <img src="https://user-images.githubusercontent.com/80155305/212384115-3875164a-3717-465e-a3cc-cda9b8913ea6.png" width="250" height="230"/>
|
||||
</p>
|
||||
|
||||
<a name="end_effector_control"></a>
|
||||
## End Effector control
|
||||
Control of the gripper and pneumatic suction cup was implemented using ROS 2 services.
|
||||
|
||||
### Gripper:
|
||||
Command example:
|
||||
```
|
||||
ros2 service call /dobot_gripper_service dobot_msgs/srv/GripperControl "{gripper_state: 'open', keep_compressor_running: true}"
|
||||
```
|
||||
Request fields:
|
||||
- `gripper state` (type _string_) : _open/close_
|
||||
- `keep_compressor_running` (type _bool_) : _true/false_
|
||||
|
||||
### Suction cup:
|
||||
Command example:
|
||||
```
|
||||
ros2 service call /dobot_suction_cup_service dobot_msgs/srv/SuctionCupControl "{enable_suction: true}"
|
||||
```
|
||||
Request fields:
|
||||
- `enable_suction` (type _bool_) : _true/false_
|
||||
|
||||
|
||||
<a name="additional"></a>
|
||||
## Additional tools for visualization
|
||||
The **dobot_visualization_tools** package provides visualization tools in form of RViz markers.
|
||||
1. Camera field of view (FOV) for Intel Realsense D435i (published at `/realsense_FOV` topic)
|
||||
```
|
||||
ros2 run dobot_visualization_tools show_camera_FOV
|
||||
```
|
||||
2. Workspace visualization (published at `/reachability_range` topic) - range without end effector attached.
|
||||
```
|
||||
ros2 run dobot_visualization_tools show_dobot_range
|
||||
```
|
||||
3. TCP trajectory (published at `/TCP_trajectory` topic) - trajectory markers are removed after 2 seconds of the manipulator's stationary state, so as not to slow RViz down with too many markers it needs to display.
|
||||
```
|
||||
ros2 run dobot_visualization_tools show_trajectory
|
||||
```
|
||||
4. Interactive Markers - you can select the target position of the tool end (x, y, z, r) by moving the interactive marker in RViz. Once the target is selected, right-click on the yellow sphere and select the motion type from the menu. When running a node that allows control using interactive markers, two parameters must be specified (see example below) that define the point at the end of the tool in the coordinate system associated with the last link of the manipulator.
|
||||
```
|
||||
ros2 run dobot_visualization_tools int_marker --ros-args -p TCP_x_offset:=0.059 -p TCP_z_offset:=-0.12
|
||||
```
|
||||
|
||||
<p align="center">
|
||||
<img src="https://user-images.githubusercontent.com/80155305/212383385-e9c2ca38-8cfa-4ac9-a106-99d2663ce629.png" width="220" height="220"/><img src="https://user-images.githubusercontent.com/80155305/212384560-198f5b65-b248-428d-baad-bf75c6909542.png" width="330" height="220"/>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<img src="https://user-images.githubusercontent.com/80155305/212383482-6fa7b2c5-1bc1-4f0a-a118-a93dbc788a94.png" width="250" height="250"/><img src="https://user-images.githubusercontent.com/80155305/220213781-8e1b593d-4865-424c-9631-87724acba8d6.png" width="250" height="250"/>
|
||||
</p>
|
||||
|
||||
<video controls width="250">
|
||||
<source src="https://user-images.githubusercontent.com/80155305/219739487-8edd727b-aee9-4f14-b7c3-1d6c78ce2d4d.mp4">
|
||||
</video>
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
<a name="examples"></a>
|
||||
## Examples
|
||||
Sample scripts have been included in **dobot_demos** package. Analysis of sample codes (_minimal working examples_) from this package will help you understand how to control Dobot Magician robot.
|
||||
Running sample scripts:
|
||||
- `ros2 run dobot_demos test_gripper`
|
||||
- `ros2 run dobot_demos test_suction_cup`
|
||||
- `ros2 run dobot_demos test_homing`
|
||||
- `ros2 run dobot_demos test_point_to_point`
|
||||
- `ros2 run dobot_demos test_pick_and_place`
|
||||
|
||||
|
||||
<a name="rail"></a>
|
||||
## Sliding rail
|
||||
If you have _Dobot Magician Sliding Rail_ and you want to get real time feedback about the position of the carriage you need to export `MAGICIAN_RAIL_IN_USE` environment variable before launching entire control stack (`export MAGICIAN_RAIL_IN_USE=true`). The current position of the carriage on the sliding rail will be published on the `/dobot_rail_pose` topic at a frequency of 20 Hz. After disconnecting the sliding rail, type `unset MAGICIAN_RAIL_IN_USE` and restart entire control system.
|
||||
|
||||
Control of the sliding rail is possible both from **Dobot Magician Control Panel** RQT plugin and by using **/move_sliding_rail** action server. To launch **/move_sliding_rail** action server and load parameters (sliding rail velocity and acceleration) use the command below:
|
||||
```
|
||||
ros2 launch dobot_motion dobot_rail.launch.py
|
||||
```
|
||||
An example of a command that allows you to send a goal to an action server can be found below:
|
||||
```
|
||||
ros2 action send_goal /move_sliding_rail dobot_msgs/action/SlidingRail "{target_pose: 500}"
|
||||
```
|
||||
|
||||
<a name="mrs"></a>
|
||||
## Multi-Robot System (MRS) :robot:
|
||||
If you want to connect several Dobot Magician robots to the same computer or to the same network, thus creating a _multi-robot system_ (MRS), check the [**magician-mrs**](https://github.com/jkaniuka/magician_ros2/tree/magician-mrs) branch.
|
||||
|
||||
<a name="video"></a>
|
||||
## Video - see how the system works :movie_camera:
|
||||
|
||||
| Overview and features | Interactive Markers |
|
||||
| ------ | ------ |
|
||||
| [<img src="https://user-images.githubusercontent.com/80155305/215295789-e6b1dadd-a819-4fe9-a633-4ce483a47964.png" width="100%">](https://vimeo.com/793400746) | <video src="https://github.com/jkaniuka/dobot_tmp/assets/80155305/7b946f88-e4c8-499f-84f4-1ecf10534b25">|
|
||||
|
||||
|
||||
|
||||
|
||||
<a name="faq"></a>
|
||||
## FAQ :question:
|
||||
**Why haven't I used MoveIt 2?**
|
||||
|
||||
MoveIt 2 is a great tool, but I have not used it in my control system for several reasons:
|
||||
* To begin with, this system was created with the idea of using it when learning ROS 2. First, you need to familiarize yourself with entities such as actions, topics, services, parameters to then understand how this is being utilized in MoveIt 2.
|
||||
* Secondly, MoveIt is used among other things to generate collision-free trajectories. The mechanical design of Dobot Magician robot and its very short range (32 cm) makes it _de facto_ unsuitable for working in spaces with obstacles.
|
||||
* Lastly, MoveIt enables grasp generation. Dobot Magician is equipped with a gripper that can be either fully open or fully closed. In addition, it is always pointed horizontally downward. Grip generation in this case does not seem to be necessary.
|
||||
|
||||
<a name="citing"></a>
|
||||
## Citing :scroll:
|
||||
If you find this work useful, please give credits to the author by citing:
|
||||
|
||||
```
|
||||
@mastersthesis{jkaniuka-bsc-23-twiki,
|
||||
author = {Kaniuka, Jan},
|
||||
school = {WEiTI},
|
||||
title = {{System sterowania robota manipulacyjnego Dobot Magician na bazie frameworka ROS2}},
|
||||
engtitle = {{Control system of DobotMagician robotic manipulator based on ROS2 framework}},
|
||||
year = {2023},
|
||||
type = {Bachelor's thesis},
|
||||
lang = {pl},
|
||||
organization = {IAiIS},
|
||||
publisher = {IAiIS},
|
||||
tutor = {Tomasz Winiarski},
|
||||
twiki = {bsc},
|
||||
url = {https://gitlab-stud.elka.pw.edu.pl/robotyka/rpmpg_pubs/-/raw/main/student-theses/jkaniuka-bsc-23-twiki.pdf}
|
||||
}
|
||||
```
|
||||
|
||||
<a name="contributing"></a>
|
||||
## Contributing
|
||||
|
||||
#### Bug Reports & Feature Requests
|
||||
|
||||
Please use the [**issue tracker**](https://github.com/jkaniuka/magician_ros2/issues) to report any bugs or feature requests.
|
||||
|
||||
|
||||
|
||||
|
||||
0
PROJECT_SOURCE_CODE/ros_dobot/src/__init__.py
Normal file
139
PROJECT_SOURCE_CODE/ros_dobot/src/beerbot/beerbot/beerbot.py
Normal file
@@ -0,0 +1,139 @@
|
||||
#!/usr/bin/env python3
|
||||
import rclpy
|
||||
from rclpy.node import Node
|
||||
from rclpy.action import ActionClient
|
||||
from dobot_msgs.action import PointToPoint
|
||||
from dobot_msgs.srv import GripperControl
|
||||
from dobot_driver.dobot_handle import bot
|
||||
from sensor_msgs.msg import JointState
|
||||
import time
|
||||
import math
|
||||
import xml.etree.ElementTree as ET
|
||||
|
||||
|
||||
class DobotMotion(Node):
|
||||
def __init__(self):
|
||||
super().__init__('dobot_action_client')
|
||||
self._client = ActionClient(self, PointToPoint, '/PTP_action')
|
||||
self._client.wait_for_server()
|
||||
self._gripper_cli = self.create_client(GripperControl, '/dobot_gripper_service')
|
||||
while not self._gripper_cli.wait_for_service(timeout_sec=1.0):
|
||||
self.get_logger().info('Greifer-Service noch nicht verfügbar, warte...')
|
||||
self._current_joint_state = None
|
||||
self._sub = self.create_subscription(
|
||||
JointState,
|
||||
'/joint_states',
|
||||
self.joint_callback,
|
||||
10
|
||||
)
|
||||
|
||||
def get_joints(self):
|
||||
if self._current_joint_state is None:
|
||||
self.get_logger().warn("No joint state available!")
|
||||
return self._current_joint_state
|
||||
|
||||
def joint_callback(self, msg):
|
||||
self._current_joint_state = msg.position
|
||||
|
||||
def move(self, x, y, z, r=0.0, reset=False):
|
||||
self.move_to(x, y, z, r)
|
||||
if reset:
|
||||
self.set_gripper_angle(90.0)
|
||||
|
||||
def move_to(self, x, y, z, r=0.0, motion_type=1, velocity_ratio=0.5, acceleration_ratio=0.3):
|
||||
goal = PointToPoint.Goal()
|
||||
goal.motion_type = motion_type
|
||||
goal.target_pose = [float(x), float(y), float(z), float(r)]
|
||||
goal.velocity_ratio = float(velocity_ratio)
|
||||
goal.acceleration_ratio = float(acceleration_ratio)
|
||||
|
||||
future = self._client.send_goal_async(goal, feedback_callback=self.feedback_callback)
|
||||
rclpy.spin_until_future_complete(self, future)
|
||||
goal_handle = future.result()
|
||||
|
||||
if not goal_handle.accepted:
|
||||
self.get_logger().warn("Goal abgelehnt!")
|
||||
return
|
||||
|
||||
result_future = goal_handle.get_result_async()
|
||||
rclpy.spin_until_future_complete(self, result_future)
|
||||
result = result_future.result().result
|
||||
self.get_logger().info(f"Bewegung abgeschlossen, erreichte Pose: {result.achieved_pose}")
|
||||
|
||||
def feedback_callback(self, feedback_msg):
|
||||
# Optional: Feedback anzeigen
|
||||
pass
|
||||
|
||||
def set_gripper_angle(self, angle_deg: float):
|
||||
joints_rad = self.get_joints()
|
||||
self.get_logger().info(str(joints_rad))
|
||||
if joints_rad is None:
|
||||
self.get_logger().warn("Keine Gelenkpositionen verfügbar!")
|
||||
return
|
||||
|
||||
# Radiant -> Grad
|
||||
joints_deg = [math.degrees(j) for j in joints_rad]
|
||||
joints_deg[3] = angle_deg
|
||||
|
||||
self.move_to(
|
||||
joints_deg[0],
|
||||
joints_deg[1],
|
||||
joints_deg[2],
|
||||
joints_deg[3],
|
||||
motion_type=4, # Joint-Modus
|
||||
velocity_ratio=50,
|
||||
acceleration_ratio=50
|
||||
)
|
||||
joints_rad = self.get_joints()
|
||||
self.get_logger().info(str(joints_rad))
|
||||
|
||||
def set_gripper(self, state: str, keep_compressor: bool = True):
|
||||
req = GripperControl.Request()
|
||||
req.gripper_state = state
|
||||
req.keep_compressor_running = keep_compressor
|
||||
|
||||
future = self._gripper_cli.call_async(req)
|
||||
rclpy.spin_until_future_complete(self, future)
|
||||
|
||||
if future.result() is not None:
|
||||
self.get_logger().info(f"Greifer-Befehl ausgeführt: {state}")
|
||||
else:
|
||||
self.get_logger().error(f"Fehler beim Greifer: {future.exception()}")
|
||||
|
||||
|
||||
def execute_moves(xml_path: str, client: DobotMotion, reset: bool):
|
||||
xml = ET.parse(xml_path)
|
||||
root = xml.getroot()
|
||||
rows = [elem for elem in root if elem.tag.startswith("row") and elem.tag[3:].isdigit()]
|
||||
|
||||
for row in rows:
|
||||
x: float = float(row.find("item_2").text)
|
||||
y: float = float(row.find("item_3").text)
|
||||
z: float = float(row.find("item_4").text)
|
||||
r: float = float(row.find("item_5").text)
|
||||
client.move(x, y, z, r, reset=reset)
|
||||
time.sleep(0.5)
|
||||
#gripper_state = int(row.find("item_11").text)
|
||||
#if gripper_state == 1:
|
||||
# client.set_gripper("open", True)
|
||||
#elif gripper_state == 2:
|
||||
|
||||
|
||||
|
||||
def main(args=None):
|
||||
rclpy.init(args=args)
|
||||
client = DobotMotion()
|
||||
|
||||
execute_moves("/root/dobot/documentation/moves/move_bottle_test.xml", client, reset=False)
|
||||
|
||||
#client.move(-43.3493, 224.4263, 98.9893, 184.9323)
|
||||
#client.move(130.0, 20.0, 100.0, 90.0, reset=True)
|
||||
#client.move(130.0, 40.0, 100.0, 90.0, reset=True)
|
||||
#client.move(130.0, -40.0, 100.0, 90.0, reset=True)
|
||||
|
||||
client.destroy_node()
|
||||
rclpy.shutdown()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
18
PROJECT_SOURCE_CODE/ros_dobot/src/beerbot/package.xml
Normal file
@@ -0,0 +1,18 @@
|
||||
<?xml version="1.0"?>
|
||||
<?xml-model href="http://download.ros.org/schema/package_format3.xsd" schematypens="http://www.w3.org/2001/XMLSchema"?>
|
||||
<package format="3">
|
||||
<name>beerbot</name>
|
||||
<version>0.0.0</version>
|
||||
<description>TODO: Package description</description>
|
||||
<maintainer email="root@todo.todo">root</maintainer>
|
||||
<license>TODO: License declaration</license>
|
||||
|
||||
<test_depend>ament_copyright</test_depend>
|
||||
<test_depend>ament_flake8</test_depend>
|
||||
<test_depend>ament_pep257</test_depend>
|
||||
<test_depend>python3-pytest</test_depend>
|
||||
|
||||
<export>
|
||||
<build_type>ament_python</build_type>
|
||||
</export>
|
||||
</package>
|
||||
4
PROJECT_SOURCE_CODE/ros_dobot/src/beerbot/setup.cfg
Normal file
@@ -0,0 +1,4 @@
|
||||
[develop]
|
||||
script_dir=$base/lib/beerbot
|
||||
[install]
|
||||
install_scripts=$base/lib/beerbot
|
||||
30
PROJECT_SOURCE_CODE/ros_dobot/src/beerbot/setup.py
Normal file
@@ -0,0 +1,30 @@
|
||||
from setuptools import find_packages, setup
|
||||
|
||||
package_name = 'beerbot'
|
||||
|
||||
setup(
|
||||
name=package_name,
|
||||
version='0.0.0',
|
||||
packages=find_packages(exclude=['test']),
|
||||
data_files=[
|
||||
('share/ament_index/resource_index/packages',
|
||||
['resource/' + package_name]),
|
||||
('share/' + package_name, ['package.xml']),
|
||||
],
|
||||
install_requires=['setuptools'],
|
||||
zip_safe=True,
|
||||
maintainer='root',
|
||||
maintainer_email='root@todo.todo',
|
||||
description='TODO: Package description',
|
||||
license='TODO: License declaration',
|
||||
extras_require={
|
||||
'test': [
|
||||
'pytest',
|
||||
],
|
||||
},
|
||||
entry_points={
|
||||
'console_scripts': [
|
||||
'beerbot = beerbot.beerbot:main'
|
||||
],
|
||||
},
|
||||
)
|
||||
@@ -0,0 +1,25 @@
|
||||
# Copyright 2015 Open Source Robotics Foundation, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from ament_copyright.main import main
|
||||
import pytest
|
||||
|
||||
|
||||
# Remove the `skip` decorator once the source file(s) have a copyright header
|
||||
@pytest.mark.skip(reason='No copyright header has been placed in the generated source file.')
|
||||
@pytest.mark.copyright
|
||||
@pytest.mark.linter
|
||||
def test_copyright():
|
||||
rc = main(argv=['.', 'test'])
|
||||
assert rc == 0, 'Found errors'
|
||||
@@ -0,0 +1,25 @@
|
||||
# Copyright 2017 Open Source Robotics Foundation, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from ament_flake8.main import main_with_errors
|
||||
import pytest
|
||||
|
||||
|
||||
@pytest.mark.flake8
|
||||
@pytest.mark.linter
|
||||
def test_flake8():
|
||||
rc, errors = main_with_errors(argv=[])
|
||||
assert rc == 0, \
|
||||
'Found %d code style errors / warnings:\n' % len(errors) + \
|
||||
'\n'.join(errors)
|
||||
@@ -0,0 +1,23 @@
|
||||
# Copyright 2015 Open Source Robotics Foundation, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from ament_pep257.main import main
|
||||
import pytest
|
||||
|
||||
|
||||
@pytest.mark.linter
|
||||
@pytest.mark.pep257
|
||||
def test_pep257():
|
||||
rc = main(argv=['.', 'test'])
|
||||
assert rc == 0, 'Found code style errors / warnings'
|
||||
@@ -0,0 +1,25 @@
|
||||
import rclpy
|
||||
from rclpy.node import Node
|
||||
from dobot_driver.dobot_handle import bot
|
||||
|
||||
|
||||
class SetToolNullConfigurator(Node):
|
||||
|
||||
def __init__(self):
|
||||
super().__init__('dobot_init_config')
|
||||
print("Loading the initial configuration into the robot.")
|
||||
# Reset old alarms before startup
|
||||
bot.clear_alarms_state()
|
||||
bot.set_end_effector_params(0, 0, 0)
|
||||
|
||||
def main(args=None):
|
||||
rclpy.init(args=args)
|
||||
|
||||
startup_params_configurator = SetToolNullConfigurator()
|
||||
|
||||
startup_params_configurator.destroy_node()
|
||||
rclpy.shutdown()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,268 @@
|
||||
import os, sys, subprocess
|
||||
from launch import LaunchDescription
|
||||
from launch_ros.actions import Node
|
||||
from launch.actions import LogInfo, RegisterEventHandler, TimerAction, ExecuteProcess
|
||||
from launch.conditions import IfCondition
|
||||
from launch.event_handlers import OnProcessStart, OnShutdown
|
||||
from launch.substitutions import LocalSubstitution, PythonExpression
|
||||
|
||||
|
||||
def generate_launch_description():
|
||||
|
||||
# -----------------------------------------------------------------------------------------------------------------
|
||||
# Check if the robot is physically connected
|
||||
|
||||
shell_cmd = subprocess.Popen('lsusb | grep -E "Silicon Labs CP210x UART Bridge|QinHeng Electronics" ', shell=True, stdout=subprocess.PIPE)
|
||||
is_connected = shell_cmd.stdout.read().decode('utf-8')
|
||||
if not is_connected:
|
||||
sys.exit("Dobot is disconnected! Check if the USB cable and power adapter are plugged in.")
|
||||
# -----------------------------------------------------------------------------------------------------------------
|
||||
|
||||
|
||||
# -----------------------------------------------------------------------------------------------------------------
|
||||
# Check if MAGICIAN_TOOL env var has the right value
|
||||
tool_env_var = str(os.environ.get('MAGICIAN_TOOL'))
|
||||
|
||||
if tool_env_var == "None":
|
||||
sys.exit("MAGICIAN_TOOL env var is not set!")
|
||||
|
||||
if tool_env_var not in ['none', 'pen', 'suction_cup', 'gripper', 'extended_gripper']:
|
||||
sys.exit("MAGICIAN_TOOL env var has an incorrect value!")
|
||||
|
||||
valid_tool=["'", tool_env_var, "' == 'none'", 'or', \
|
||||
"'", tool_env_var, "' == 'pen'", 'or', \
|
||||
"'", tool_env_var, "' == 'suction_cup'", 'or', \
|
||||
"'", tool_env_var, "' == 'gripper'", 'or', \
|
||||
"'", tool_env_var, "' == 'extended_gripper'"
|
||||
]
|
||||
# -----------------------------------------------------------------------------------------------------------------
|
||||
|
||||
|
||||
# -----------------------------------------------------------------------------------------------------------------
|
||||
# Nodes & launch files
|
||||
|
||||
tool_null = Node(
|
||||
package='dobot_bringup',
|
||||
executable='set_tool_null',
|
||||
output='screen',
|
||||
condition = IfCondition(PythonExpression(valid_tool))
|
||||
)
|
||||
|
||||
alarms =ExecuteProcess(
|
||||
cmd=[[
|
||||
'ros2 ', 'launch ', 'dobot_diagnostics ', 'alarms_analyzer.launch.py'
|
||||
]],
|
||||
shell=True,
|
||||
output='log',
|
||||
condition = IfCondition(PythonExpression(valid_tool))
|
||||
)
|
||||
|
||||
gripper = Node(
|
||||
package='dobot_end_effector',
|
||||
executable='gripper_server',
|
||||
output='screen',
|
||||
condition = IfCondition(PythonExpression(valid_tool))
|
||||
)
|
||||
|
||||
suction_cup = Node(
|
||||
package='dobot_end_effector',
|
||||
executable='suction_cup_server',
|
||||
output='screen',
|
||||
condition = IfCondition(PythonExpression(valid_tool))
|
||||
)
|
||||
|
||||
homing =ExecuteProcess(
|
||||
cmd=[[
|
||||
'ros2 ', 'launch ', 'dobot_homing ', 'dobot_homing.launch.py'
|
||||
]],
|
||||
shell=True,
|
||||
output='screen',
|
||||
condition = IfCondition(PythonExpression(valid_tool))
|
||||
)
|
||||
|
||||
trajectory_validator =ExecuteProcess(
|
||||
cmd=[[
|
||||
'ros2 ', 'launch ', 'dobot_kinematics ', 'dobot_validate_trajectory.launch.py'
|
||||
]],
|
||||
shell=True,
|
||||
output='screen',
|
||||
condition = IfCondition(PythonExpression(valid_tool))
|
||||
)
|
||||
|
||||
PTP_action =ExecuteProcess(
|
||||
cmd=[[
|
||||
'ros2 ', 'launch ', 'dobot_motion ', 'dobot_PTP.launch.py'
|
||||
]],
|
||||
shell=True,
|
||||
output='screen',
|
||||
condition = IfCondition(PythonExpression(valid_tool))
|
||||
)
|
||||
|
||||
robot_state = Node(
|
||||
package='dobot_state_updater',
|
||||
executable='state_publisher',
|
||||
output='screen',
|
||||
condition = IfCondition(PythonExpression(valid_tool))
|
||||
)
|
||||
|
||||
# -----------------------------------------------------------------------------------------------------------------
|
||||
|
||||
# -----------------------------------------------------------------------------------------------------------------
|
||||
# OnProcessStart events for information purposes
|
||||
|
||||
tool_null_event = RegisterEventHandler(
|
||||
OnProcessStart(
|
||||
target_action=tool_null,
|
||||
on_start=[
|
||||
LogInfo(msg='Loading tool parameters.')
|
||||
]
|
||||
)
|
||||
)
|
||||
|
||||
alarms_event = RegisterEventHandler(
|
||||
OnProcessStart(
|
||||
target_action=alarms,
|
||||
on_start=[
|
||||
LogInfo(msg='Starting the diagnostics module.')
|
||||
]
|
||||
)
|
||||
)
|
||||
|
||||
gripper_event = RegisterEventHandler(
|
||||
OnProcessStart(
|
||||
target_action=gripper,
|
||||
on_start=[
|
||||
LogInfo(msg='Gripper control service started.')
|
||||
]
|
||||
)
|
||||
)
|
||||
|
||||
suction_cup_event = RegisterEventHandler(
|
||||
OnProcessStart(
|
||||
target_action=suction_cup,
|
||||
on_start=[
|
||||
LogInfo(msg='Suction Cup control service started.')
|
||||
]
|
||||
)
|
||||
)
|
||||
|
||||
homing_event = RegisterEventHandler(
|
||||
OnProcessStart(
|
||||
target_action=homing,
|
||||
on_start=[
|
||||
LogInfo(msg='Starting homing service.'),
|
||||
LogInfo(msg='Loading homing parameters.')
|
||||
]
|
||||
)
|
||||
)
|
||||
|
||||
trajectory_validator_event = RegisterEventHandler(
|
||||
OnProcessStart(
|
||||
target_action=trajectory_validator,
|
||||
on_start=[
|
||||
LogInfo(msg='Strating trajectory validator service.'),
|
||||
LogInfo(msg='Loading kinematics parameters.')
|
||||
]
|
||||
)
|
||||
)
|
||||
PTP_action_event = RegisterEventHandler(
|
||||
OnProcessStart(
|
||||
target_action=PTP_action,
|
||||
on_start=[
|
||||
LogInfo(msg='Starting PointToPoint action server.'),
|
||||
LogInfo(msg='Setting speed and acceleration values.')
|
||||
]
|
||||
)
|
||||
)
|
||||
|
||||
robot_state_event = RegisterEventHandler(
|
||||
OnProcessStart(
|
||||
target_action=robot_state,
|
||||
on_start=[
|
||||
LogInfo(msg='Dobot state updater node started.'),
|
||||
LogInfo(msg='Dobot Magician control stack has been launched correctly')
|
||||
]
|
||||
)
|
||||
)
|
||||
# -----------------------------------------------------------------------------------------------------------------
|
||||
|
||||
|
||||
|
||||
# -----------------------------------------------------------------------------------------------------------------
|
||||
# Shutdown event handler
|
||||
on_shutdown = RegisterEventHandler(
|
||||
OnShutdown(
|
||||
on_shutdown=[LogInfo(
|
||||
msg=['Dobot Magician control system launch was asked to shutdown: ',
|
||||
LocalSubstitution('event.reason')]
|
||||
)]
|
||||
)
|
||||
)
|
||||
# -----------------------------------------------------------------------------------------------------------------
|
||||
|
||||
# -----------------------------------------------------------------------------------------------------------------
|
||||
# Launch scheduler
|
||||
|
||||
|
||||
tool_null_sched = TimerAction(
|
||||
period=1.0,
|
||||
actions=[tool_null]
|
||||
)
|
||||
|
||||
alarms_sched = TimerAction(
|
||||
period=2.0,
|
||||
actions=[alarms]
|
||||
)
|
||||
|
||||
gripper_sched = TimerAction(
|
||||
period=2.0,
|
||||
actions=[gripper]
|
||||
)
|
||||
|
||||
suction_cup_sched = TimerAction(
|
||||
period=2.0,
|
||||
actions=[suction_cup]
|
||||
)
|
||||
|
||||
homing_sched = TimerAction(
|
||||
period=3.0,
|
||||
actions=[homing]
|
||||
)
|
||||
|
||||
trajectory_validator_sched = TimerAction(
|
||||
period=5.0,
|
||||
actions=[trajectory_validator]
|
||||
)
|
||||
|
||||
PTP_action_sched = TimerAction(
|
||||
period=7.0,
|
||||
actions=[PTP_action]
|
||||
)
|
||||
|
||||
robot_state_sched = TimerAction(
|
||||
period=18.0,
|
||||
actions=[robot_state]
|
||||
)
|
||||
|
||||
# -----------------------------------------------------------------------------------------------------------------
|
||||
|
||||
|
||||
return LaunchDescription([
|
||||
tool_null_event,
|
||||
alarms_event,
|
||||
gripper_event,
|
||||
suction_cup_event,
|
||||
homing_event,
|
||||
trajectory_validator_event,
|
||||
PTP_action_event,
|
||||
robot_state_event,
|
||||
tool_null_sched,
|
||||
alarms_sched,
|
||||
gripper_sched,
|
||||
suction_cup_sched,
|
||||
homing_sched,
|
||||
trajectory_validator_sched,
|
||||
PTP_action_sched,
|
||||
robot_state_sched,
|
||||
on_shutdown
|
||||
])
|
||||
21
PROJECT_SOURCE_CODE/ros_dobot/src/dobot_bringup/package.xml
Normal file
@@ -0,0 +1,21 @@
|
||||
<?xml version="1.0"?>
|
||||
<?xml-model href="http://download.ros.org/schema/package_format3.xsd" schematypens="http://www.w3.org/2001/XMLSchema"?>
|
||||
<package format="3">
|
||||
<name>dobot_bringup</name>
|
||||
<version>1.0.0</version>
|
||||
<description>Bringup package for Dobot Magician</description>
|
||||
<author email="kan.jan@wp.pl">Jan Kaniuka</author>
|
||||
<maintainer email="kan.jan@wp.pl">Jan Kaniuka</maintainer>
|
||||
<license>BSD</license>
|
||||
|
||||
<exec_depend>ros2launch</exec_depend>
|
||||
|
||||
<test_depend>ament_copyright</test_depend>
|
||||
<test_depend>ament_flake8</test_depend>
|
||||
<test_depend>ament_pep257</test_depend>
|
||||
<test_depend>python3-pytest</test_depend>
|
||||
|
||||
<export>
|
||||
<build_type>ament_python</build_type>
|
||||
</export>
|
||||
</package>
|
||||
@@ -0,0 +1,4 @@
|
||||
[develop]
|
||||
script_dir=$base/lib/dobot_bringup
|
||||
[install]
|
||||
install_scripts=$base/lib/dobot_bringup
|
||||
29
PROJECT_SOURCE_CODE/ros_dobot/src/dobot_bringup/setup.py
Normal file
@@ -0,0 +1,29 @@
|
||||
from setuptools import setup
|
||||
import os
|
||||
from glob import glob
|
||||
|
||||
package_name = 'dobot_bringup'
|
||||
|
||||
setup(
|
||||
name=package_name,
|
||||
version='1.0.0',
|
||||
packages=[package_name],
|
||||
data_files=[
|
||||
('share/ament_index/resource_index/packages',
|
||||
['resource/' + package_name]),
|
||||
('share/' + package_name, ['package.xml']),
|
||||
(os.path.join('share', package_name), glob('launch/*.launch.py')),
|
||||
],
|
||||
install_requires=['setuptools'],
|
||||
zip_safe=True,
|
||||
maintainer='Jan Kaniuka',
|
||||
maintainer_email='kan.jan@wp.pl',
|
||||
description='Bringup package for Dobot Magician',
|
||||
license='BSD',
|
||||
tests_require=['pytest'],
|
||||
entry_points={
|
||||
'console_scripts': [
|
||||
'set_tool_null = dobot_bringup.set_tool_null:main',
|
||||
],
|
||||
},
|
||||
)
|
||||
@@ -0,0 +1,25 @@
|
||||
# Copyright 2015 Open Source Robotics Foundation, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from ament_copyright.main import main
|
||||
import pytest
|
||||
|
||||
|
||||
# Remove the `skip` decorator once the source file(s) have a copyright header
|
||||
@pytest.mark.skip(reason='No copyright header has been placed in the generated source file.')
|
||||
@pytest.mark.copyright
|
||||
@pytest.mark.linter
|
||||
def test_copyright():
|
||||
rc = main(argv=['.', 'test'])
|
||||
assert rc == 0, 'Found errors'
|
||||
@@ -0,0 +1,25 @@
|
||||
# Copyright 2017 Open Source Robotics Foundation, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from ament_flake8.main import main_with_errors
|
||||
import pytest
|
||||
|
||||
|
||||
@pytest.mark.flake8
|
||||
@pytest.mark.linter
|
||||
def test_flake8():
|
||||
rc, errors = main_with_errors(argv=[])
|
||||
assert rc == 0, \
|
||||
'Found %d code style errors / warnings:\n' % len(errors) + \
|
||||
'\n'.join(errors)
|
||||
@@ -0,0 +1,23 @@
|
||||
# Copyright 2015 Open Source Robotics Foundation, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from ament_pep257.main import main
|
||||
import pytest
|
||||
|
||||
|
||||
@pytest.mark.linter
|
||||
@pytest.mark.pep257
|
||||
def test_pep257():
|
||||
rc = main(argv=['.', 'test'])
|
||||
assert rc == 0, 'Found code style errors / warnings'
|
||||
@@ -0,0 +1,405 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
from __future__ import division
|
||||
|
||||
import itertools
|
||||
import os
|
||||
from std_msgs.msg import String
|
||||
from dobot_driver.dobot_handle import bot
|
||||
|
||||
from ament_index_python import get_resource
|
||||
from python_qt_binding import loadUi
|
||||
from python_qt_binding.QtCore import Qt, QTimer, qWarning, Slot, QProcess
|
||||
from python_qt_binding.QtGui import QIcon
|
||||
from python_qt_binding.QtWidgets import QHeaderView, QMenu, QTreeWidgetItem, QWidget, QVBoxLayout, QSizePolicy
|
||||
from rqt_py_common.message_helpers import get_message_class
|
||||
from PyQt5.QtWidgets import QMessageBox
|
||||
from PyQt5.QtGui import *
|
||||
from PyQt5.QtCore import *
|
||||
import subprocess
|
||||
import rclpy
|
||||
from geometry_msgs.msg import PoseStamped
|
||||
import math
|
||||
import tf_transformations
|
||||
from sensor_msgs.msg import JointState
|
||||
from std_msgs.msg import Float64, Float64MultiArray
|
||||
from dobot_msgs.msg import GripperStatus
|
||||
import sys
|
||||
|
||||
|
||||
|
||||
|
||||
class DobotControlPanel(QWidget):
|
||||
|
||||
def __init__(self, node, plugin=None):
|
||||
|
||||
super(DobotControlPanel, self).__init__()
|
||||
|
||||
shell_cmd = subprocess.Popen('lsusb | grep -E "Silicon Labs CP210x UART Bridge|QinHeng Electronics" ', shell=True, stdout=subprocess.PIPE)
|
||||
is_connected = shell_cmd.stdout.read().decode('utf-8')
|
||||
if not is_connected:
|
||||
sys.exit("Dobot is disconnected! Check if the USB cable and power adapter are plugged in.")
|
||||
|
||||
|
||||
self._node = node
|
||||
self._logText = ''
|
||||
self.process = QProcess(self)
|
||||
self._plugin = plugin
|
||||
|
||||
_, package_path = get_resource('packages', 'dobot_control_panel')
|
||||
ui_file = os.path.join(package_path, 'share', 'dobot_control_panel', 'resource', 'DobotControlPanel_layout.ui')
|
||||
loadUi(ui_file, self)
|
||||
|
||||
|
||||
self.subscription_TCP = self._node.create_subscription(
|
||||
Float64MultiArray,
|
||||
'dobot_pose_raw',
|
||||
self.tcp_position_callback,
|
||||
10)
|
||||
|
||||
self.subscription_joints = self._node.create_subscription(
|
||||
JointState,
|
||||
'dobot_joint_states',
|
||||
self.joints_positions_callback,
|
||||
10)
|
||||
|
||||
self.gripper_state_publ = self._node.create_publisher(GripperStatus, 'gripper_status_rviz', 10)
|
||||
|
||||
self.dobot_current_joint_states = []
|
||||
|
||||
if os.environ.get('MAGICIAN_RAIL_IN_USE') == "true":
|
||||
self.RAIL_IN_USE = True
|
||||
bot.set_sliding_rail_status(1,1)
|
||||
bot.set_point_to_point_sliding_rail_params(100, 100)
|
||||
bot.set_point_to_point_sliding_rail_params(100*2, 100*2) #BUG
|
||||
self.subscription_rail_pose = self._node.create_subscription(
|
||||
Float64,
|
||||
'dobot_rail_pose',
|
||||
self.rail_pose_callback,
|
||||
10)
|
||||
else:
|
||||
self.RAIL_IN_USE = False
|
||||
self.CurrentPositionRail.setText(str("X"))
|
||||
|
||||
# Control Tab
|
||||
self.JT1Plus.pressed.connect(lambda:self.JT1_move(self.JT1Plus))
|
||||
self.JT1Plus.released.connect(self.JT_IDLE)
|
||||
self.JT1Minus.pressed.connect(lambda:self.JT1_move(self.JT1Minus))
|
||||
self.JT1Minus.released.connect(self.JT_IDLE)
|
||||
|
||||
self.JT2Plus.pressed.connect(lambda:self.JT2_move(self.JT2Plus))
|
||||
self.JT2Plus.released.connect(self.JT_IDLE)
|
||||
self.JT2Minus.pressed.connect(lambda:self.JT2_move(self.JT2Minus))
|
||||
self.JT2Minus.released.connect(self.JT_IDLE)
|
||||
|
||||
self.JT3Plus.pressed.connect(lambda:self.JT3_move(self.JT3Plus))
|
||||
self.JT3Plus.released.connect(self.JT_IDLE)
|
||||
self.JT3Minus.pressed.connect(lambda:self.JT3_move(self.JT3Minus))
|
||||
self.JT3Minus.released.connect(self.JT_IDLE)
|
||||
|
||||
self.JT4Plus.pressed.connect(lambda:self.JT4_move(self.JT4Plus))
|
||||
self.JT4Plus.released.connect(self.JT_IDLE)
|
||||
self.JT4Minus.pressed.connect(lambda:self.JT4_move(self.JT4Minus))
|
||||
self.JT4Minus.released.connect(self.JT_IDLE)
|
||||
|
||||
self.HomingButton.clicked.connect(self.button_clicked_HomingButton)
|
||||
self.EStopButton.clicked.connect(self.button_clicked_EStopButton)
|
||||
|
||||
self.BaseFrame.setChecked(True)
|
||||
self.frame = "base"
|
||||
self.BaseFrame.toggled.connect(lambda:self.framestate(self.BaseFrame))
|
||||
self.JointFrame.toggled.connect(lambda:self.framestate(self.JointFrame))
|
||||
|
||||
|
||||
# Tool Tab
|
||||
self.GripperOpen.pressed.connect(self.open_gripper)
|
||||
self.GripperClose.pressed.connect(self.close_gripper)
|
||||
self.SuctionCupTurnOn.pressed.connect(self.turn_on_suction_cup)
|
||||
self.SuctionCupTurnOff.pressed.connect(self.turn_off_suction_cup)
|
||||
|
||||
# Speed Tab
|
||||
self.Joint1VelSlider.valueChanged.connect(lambda:self.valuechange_joints(self.JT1Vel))
|
||||
self.Joint2VelSlider.valueChanged.connect(lambda:self.valuechange_joints(self.JT2Vel))
|
||||
self.Joint3VelSlider.valueChanged.connect(lambda:self.valuechange_joints(self.JT3Vel))
|
||||
self.Joint4VelSlider.valueChanged.connect(lambda:self.valuechange_joints(self.JT4Vel))
|
||||
|
||||
self.XVelSlider.valueChanged.connect(lambda:self.valuechange_cartesian(self.XVel))
|
||||
self.YVelSlider.valueChanged.connect(lambda:self.valuechange_cartesian(self.YVel))
|
||||
self.ZVelSlider.valueChanged.connect(lambda:self.valuechange_cartesian(self.ZVel))
|
||||
self.RVelSlider.valueChanged.connect(lambda:self.valuechange_cartesian(self.RVel))
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
self.Joint1VelSlider.sliderReleased.connect(lambda:self.change_vel_joints(self.Joint1VelSlider))
|
||||
self.Joint2VelSlider.sliderReleased.connect(lambda:self.change_vel_joints(self.Joint2VelSlider))
|
||||
self.Joint3VelSlider.sliderReleased.connect(lambda:self.change_vel_joints(self.Joint3VelSlider))
|
||||
self.Joint4VelSlider.sliderReleased.connect(lambda:self.change_vel_joints(self.Joint4VelSlider))
|
||||
|
||||
self.XVelSlider.sliderReleased.connect(lambda:self.change_vel_cartesian(self.XVelSlider))
|
||||
self.YVelSlider.sliderReleased.connect(lambda:self.change_vel_cartesian(self.YVelSlider))
|
||||
self.ZVelSlider.sliderReleased.connect(lambda:self.change_vel_cartesian(self.ZVelSlider))
|
||||
self.RVelSlider.sliderReleased.connect(lambda:self.change_vel_cartesian(self.RVelSlider))
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
self.RailSVSlider.sliderReleased.connect(self.sliding_rail_SV_slider)
|
||||
self.RailSVSlider.valueChanged.connect(self.sliding_rail_current_pose)
|
||||
|
||||
self.RailVelocitySlider.sliderReleased.connect(self.sliding_rail_params_slider)
|
||||
self.RailVelocitySlider.valueChanged.connect(self.sliding_rail_vel_display)
|
||||
|
||||
self.RailAccelerationSlider.sliderReleased.connect(self.sliding_rail_params_slider)
|
||||
self.RailAccelerationSlider.valueChanged.connect(self.sliding_rail_acc_display)
|
||||
|
||||
self.StopRailButton.clicked.connect(self.button_clicked_StopRail)
|
||||
|
||||
|
||||
|
||||
|
||||
# initial setup
|
||||
bot.set_jog_common_params(100, 1) # (vel_ratio, acc_ratio)
|
||||
bot.set_jog_joint_params([100, 100, 100, 100], [100, 100, 100, 100])
|
||||
bot.set_jog_coordinate_params([100, 100, 100, 100], [100, 100, 100, 100])
|
||||
|
||||
|
||||
|
||||
|
||||
def tcp_position_callback(self, msg):
|
||||
self.XPoseLCD.setText(str(round((msg.data[0])*1000, 3)))
|
||||
self.YPoseLCD.setText(str(round((msg.data[1])*1000, 3)))
|
||||
self.ZPoseLCD.setText(str(round((msg.data[2])*1000, 3)))
|
||||
self.RPoseLCD.setText(str(round((msg.data[3]), 3)))
|
||||
|
||||
|
||||
def joints_positions_callback(self, msg):
|
||||
self.dobot_current_joint_states = [math.degrees(msg.position[0]), math.degrees(msg.position[1]), math.degrees(msg.position[2]), math.degrees(msg.position[3])]
|
||||
self.JT1LCD.setText(str(round((math.degrees(msg.position[0])), 3)))
|
||||
self.JT2LCD.setText(str(round((math.degrees(msg.position[1])), 3)))
|
||||
self.JT3LCD.setText(str(round((math.degrees(msg.position[2])), 3)))
|
||||
self.JT4LCD.setText(str(round((math.degrees(msg.position[3])), 3)))
|
||||
|
||||
def rail_pose_callback(self, msg):
|
||||
self.rail_pose = msg.data
|
||||
self.CurrentPositionRail.setText(str(int(self.rail_pose)))
|
||||
|
||||
|
||||
def framestate(self, b):
|
||||
|
||||
if b.text() == "Base":
|
||||
if b.isChecked() == True:
|
||||
self.frame = "base"
|
||||
|
||||
if b.text() == "Joint":
|
||||
if b.isChecked() == True:
|
||||
self.frame = "joint"
|
||||
|
||||
def JT_IDLE(self):
|
||||
if self.frame == "base":
|
||||
bot.set_jog_command(0, 0)
|
||||
elif self.frame == "joint":
|
||||
bot.set_jog_command(1, 0)
|
||||
|
||||
|
||||
def JT1_move(self, sign):
|
||||
if sign.text() == "+":
|
||||
if self.frame == "base":
|
||||
bot.set_jog_command(0, 1)
|
||||
elif self.frame == "joint":
|
||||
bot.set_jog_command(1, 1)
|
||||
elif sign.text() == "-":
|
||||
if self.frame == "base":
|
||||
bot.set_jog_command(0, 2)
|
||||
elif self.frame == "joint":
|
||||
bot.set_jog_command(1, 2)
|
||||
|
||||
def JT2_move(self, sign):
|
||||
if sign.text() == "+":
|
||||
if self.frame == "base":
|
||||
bot.set_jog_command(0, 3)
|
||||
elif self.frame == "joint":
|
||||
bot.set_jog_command(1, 3)
|
||||
elif sign.text() == "-":
|
||||
if self.frame == "base":
|
||||
bot.set_jog_command(0, 4)
|
||||
elif self.frame == "joint":
|
||||
bot.set_jog_command(1, 4)
|
||||
|
||||
def JT3_move(self, sign):
|
||||
if sign.text() == "+":
|
||||
if self.frame == "base":
|
||||
bot.set_jog_command(0, 5)
|
||||
elif self.frame == "joint":
|
||||
bot.set_jog_command(1, 5)
|
||||
elif sign.text() == "-":
|
||||
if self.frame == "base":
|
||||
bot.set_jog_command(0, 6)
|
||||
elif self.frame == "joint":
|
||||
bot.set_jog_command(1, 6)
|
||||
|
||||
def JT4_move(self, sign):
|
||||
if sign.text() == "+":
|
||||
if self.frame == "base":
|
||||
bot.set_jog_command(0, 7)
|
||||
elif self.frame == "joint":
|
||||
bot.set_jog_command(1, 7)
|
||||
elif sign.text() == "-":
|
||||
if self.frame == "base":
|
||||
bot.set_jog_command(0, 8)
|
||||
elif self.frame == "joint":
|
||||
bot.set_jog_command(1, 8)
|
||||
|
||||
def button_clicked_HomingButton(self):
|
||||
msg = QMessageBox()
|
||||
msg.setIcon(QMessageBox.Information)
|
||||
|
||||
# msg.setText("Information")
|
||||
msg.setInformativeText("The homing procedure is about to start. Wait until the arm stops moving and the led stops flashing blue and turns green.")
|
||||
msg.setWindowTitle("Homing procedure")
|
||||
msg.setStandardButtons(QMessageBox.Ok)
|
||||
msg.exec_()
|
||||
|
||||
command = 'ros2 service call /dobot_homing_service dobot_msgs/srv/ExecuteHomingProcedure'
|
||||
subprocess.Popen(
|
||||
command, universal_newlines=True, shell=True,
|
||||
stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
|
||||
|
||||
def button_clicked_EStopButton(self):
|
||||
bot.stop_queue(force=True)
|
||||
|
||||
command = 'ros2 service call /PTP_action/_action/cancel_goal action_msgs/srv/CancelGoal'
|
||||
subprocess.Popen(
|
||||
command, universal_newlines=True, shell=True,
|
||||
stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
|
||||
|
||||
msg = QMessageBox()
|
||||
msg.setIcon(QMessageBox.Warning)
|
||||
|
||||
msg.setText("Warning")
|
||||
msg.setInformativeText("The stop button has been pressed. Make sure nothing is in the robot's workspace before resuming operation.")
|
||||
msg.setWindowTitle("Motion stop")
|
||||
msg.setStandardButtons(QMessageBox.Ok)
|
||||
msg.exec_()
|
||||
|
||||
|
||||
def open_gripper(self):
|
||||
command = 'ros2 service call /dobot_gripper_service dobot_msgs/srv/GripperControl "{gripper_state: "open", keep_compressor_running: false}"'
|
||||
subprocess.Popen(
|
||||
command, universal_newlines=True, shell=True,
|
||||
stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
|
||||
|
||||
self.send_gripper_state("opened")
|
||||
|
||||
def close_gripper(self):
|
||||
command = 'ros2 service call /dobot_gripper_service dobot_msgs/srv/GripperControl "{gripper_state: "close", keep_compressor_running: false}"'
|
||||
subprocess.Popen(
|
||||
command, universal_newlines=True, shell=True,
|
||||
stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
|
||||
|
||||
self.send_gripper_state("closed")
|
||||
|
||||
|
||||
def turn_on_suction_cup(self):
|
||||
command = 'ros2 service call /dobot_suction_cup_service dobot_msgs/srv/SuctionCupControl "{enable_suction: true}"'
|
||||
subprocess.Popen(
|
||||
command, universal_newlines=True, shell=True,
|
||||
stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
|
||||
|
||||
def turn_off_suction_cup(self):
|
||||
command = 'ros2 service call /dobot_suction_cup_service dobot_msgs/srv/SuctionCupControl "{enable_suction: false}"'
|
||||
subprocess.Popen(
|
||||
command, universal_newlines=True, shell=True,
|
||||
stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
|
||||
|
||||
def valuechange_joints(self, field):
|
||||
if field.objectName() == "JT1Vel":
|
||||
self.JT1Vel.setText(str(self.Joint1VelSlider.value()))
|
||||
elif field.objectName() == "JT2Vel":
|
||||
self.JT2Vel.setText(str(self.Joint2VelSlider.value()))
|
||||
elif field.objectName() == "JT3Vel":
|
||||
self.JT3Vel.setText(str(self.Joint3VelSlider.value()))
|
||||
elif field.objectName() == "JT4Vel":
|
||||
self.JT4Vel.setText(str(self.Joint4VelSlider.value()))
|
||||
|
||||
|
||||
def valuechange_cartesian(self, field):
|
||||
if field.objectName() == "XVel":
|
||||
self.XVel.setText(str(self.XVelSlider.value()))
|
||||
elif field.objectName() == "YVel":
|
||||
self.YVel.setText(str(self.YVelSlider.value()))
|
||||
elif field.objectName() == "ZVel":
|
||||
self.ZVel.setText(str(self.ZVelSlider.value()))
|
||||
elif field.objectName() == "RVel":
|
||||
self.RVel.setText(str(self.RVelSlider.value()))
|
||||
|
||||
def change_vel_joints(self, slider):
|
||||
bot.set_jog_joint_params([int(self.Joint1VelSlider.value()), int(self.Joint2VelSlider.value()), int(self.Joint3VelSlider.value()), int(self.Joint4VelSlider.value())], [100, 100, 100, 100])
|
||||
|
||||
|
||||
def change_vel_cartesian(self, slider):
|
||||
bot.set_jog_coordinate_params([int(self.XVelSlider.value()), int(self.YVelSlider.value()), int(self.ZVelSlider.value()), int(self.RVelSlider.value())], [100, 100, 100, 100])
|
||||
|
||||
|
||||
def sliding_rail_disconnected(self):
|
||||
msg = QMessageBox()
|
||||
msg.setIcon(QMessageBox.Warning)
|
||||
|
||||
msg.setText("Sliding rail is not connected")
|
||||
msg.setInformativeText("If you have actually placed the manipulator on the sliding rail, you must additionally set MAGICIAN_RAIL_IN_USE environment variable to 'true'.")
|
||||
msg.setWindowTitle("WARNING")
|
||||
msg.setStandardButtons(QMessageBox.Close)
|
||||
msg.exec_()
|
||||
|
||||
def sliding_rail_SV_slider(self):
|
||||
if not self.RAIL_IN_USE:
|
||||
self.sliding_rail_disconnected()
|
||||
return
|
||||
|
||||
bot.set_point_to_point_sliding_rail_command(4,self.dobot_current_joint_states[0], self.dobot_current_joint_states[1], self.dobot_current_joint_states[2], self.dobot_current_joint_states[3], int(self.RailSVSlider.value()))
|
||||
|
||||
def sliding_rail_current_pose(self):
|
||||
self.RailSVDisplay.setText(str(self.RailSVSlider.value()))
|
||||
|
||||
def sliding_rail_params_slider(self):
|
||||
if not self.RAIL_IN_USE:
|
||||
self.sliding_rail_disconnected()
|
||||
return
|
||||
bot.set_point_to_point_sliding_rail_params(int(self.RailVelocitySlider.value()) * 2, int(self.RailAccelerationSlider.value()) * 2) #BUG
|
||||
|
||||
def sliding_rail_vel_display(self):
|
||||
self.RailVelLCD.setText(str(self.RailVelocitySlider.value()))
|
||||
|
||||
|
||||
def sliding_rail_acc_display(self):
|
||||
self.RailAccLCD.setText(str(self.RailAccelerationSlider.value()))
|
||||
|
||||
def button_clicked_StopRail(self):
|
||||
if not self.RAIL_IN_USE:
|
||||
self.sliding_rail_disconnected()
|
||||
return
|
||||
bot.stop_queue(force=True)
|
||||
bot.clear_queue()
|
||||
bot.start_queue()
|
||||
|
||||
msg = QMessageBox()
|
||||
msg.setIcon(QMessageBox.Warning)
|
||||
|
||||
msg.setText("Warning")
|
||||
msg.setInformativeText("The stop button has been pressed. Make sure nothing is in the robot's workspace before resuming operation.")
|
||||
msg.setWindowTitle("Motion stop")
|
||||
msg.setStandardButtons(QMessageBox.Ok)
|
||||
msg.exec_()
|
||||
|
||||
# --------------------------------------------------------
|
||||
def send_gripper_state(self, state):
|
||||
msg = GripperStatus()
|
||||
msg.header.stamp = self._node.get_clock().now().to_msg()
|
||||
msg.status = state
|
||||
self.gripper_state_publ.publish(msg)
|
||||
|
||||
|
||||
|
||||
def start(self):
|
||||
pass
|
||||
|
||||
def shutdown_plugin(self):
|
||||
pass
|
||||
@@ -0,0 +1,11 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import sys
|
||||
from rqt_gui.main import Main
|
||||
|
||||
def main():
|
||||
main = Main()
|
||||
sys.exit(main.main(sys.argv, standalone='dobot_control_panel.ros2_dobot_control_panel.Ros2DobotControlPanel'))
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,21 @@
|
||||
from rqt_gui_py.plugin import Plugin
|
||||
from .dobot_control_panel_widget import DobotControlPanel
|
||||
|
||||
|
||||
class Ros2DobotControlPanel(Plugin):
|
||||
|
||||
def __init__(self, context):
|
||||
super(Ros2DobotControlPanel, self).__init__(context)
|
||||
self._node = context.node
|
||||
self._logger = self._node.get_logger().get_child('dobot_control_panel.ros2_dobot_control_panel.Ros2DobotControlPanel')
|
||||
|
||||
super(Ros2DobotControlPanel, self).__init__(context)
|
||||
self.setObjectName('Ros2DobotControlPanel')
|
||||
|
||||
self._widget = DobotControlPanel(context.node, self)
|
||||
|
||||
self._widget.start()
|
||||
if context.serial_number() > 1:
|
||||
self._widget.setWindowTitle(
|
||||
self._widget.windowTitle() + (' (%d)' % context.serial_number()))
|
||||
context.add_widget(self._widget)
|
||||
|
After Width: | Height: | Size: 335 KiB |
@@ -0,0 +1,27 @@
|
||||
<?xml version="1.0"?>
|
||||
<?xml-model href="http://download.ros.org/schema/package_format3.xsd" schematypens="http://www.w3.org/2001/XMLSchema"?>
|
||||
<package format="3">
|
||||
<name>dobot_control_panel</name>
|
||||
<version>1.0.0</version>
|
||||
<description>RQT-based GUI for Dobot Magician</description>
|
||||
<author email="kan.jan@wp.pl">Jan Kaniuka</author>
|
||||
<maintainer email="kan.jan@wp.pl">Jan Kaniuka</maintainer>
|
||||
<license>BSD</license>
|
||||
|
||||
<test_depend>ament_copyright</test_depend>
|
||||
<test_depend>ament_flake8</test_depend>
|
||||
<test_depend>ament_pep257</test_depend>
|
||||
<test_depend>python3-pytest</test_depend>
|
||||
|
||||
<exec_depend version_gte="0.2.19">python_qt_binding</exec_depend>
|
||||
<exec_depend>rqt_gui</exec_depend>
|
||||
<exec_depend>rqt_gui_py</exec_depend>
|
||||
<exec_depend>rqt_py_common</exec_depend>
|
||||
<exec_depend>rclpy</exec_depend>
|
||||
<exec_depend>std_msgs</exec_depend>
|
||||
|
||||
<export>
|
||||
<build_type>ament_python</build_type>
|
||||
<rqt_gui plugin="${prefix}/plugin.xml"/>
|
||||
</export>
|
||||
</package>
|
||||
@@ -0,0 +1,17 @@
|
||||
<library path="./">
|
||||
<class name="DobotControlPanel" type="dobot_control_panel.ros2_dobot_control_panel.Ros2DobotControlPanel" base_class_type="rqt_gui_py::Plugin">
|
||||
<description>
|
||||
Rqt plugin that allows you to control Dobot Magician robotic arm.
|
||||
</description>
|
||||
<qtgui>
|
||||
<group>
|
||||
<label>Robot Tools</label>
|
||||
<icon type="theme">folder</icon>
|
||||
<statustip>Plugins related to Robot Tools.</statustip>
|
||||
</group>
|
||||
<label>Dobot Control Panel</label>
|
||||
<icon type="theme">input-gaming</icon>
|
||||
<statustip>A Python GUI plugin for controlling Dobot Magician robotic arm.</statustip>
|
||||
</qtgui>
|
||||
</class>
|
||||
</library>
|
||||
|
After Width: | Height: | Size: 8.6 KiB |
|
After Width: | Height: | Size: 126 KiB |
|
After Width: | Height: | Size: 1.7 KiB |
|
After Width: | Height: | Size: 179 KiB |
|
After Width: | Height: | Size: 67 KiB |
|
After Width: | Height: | Size: 140 KiB |
|
After Width: | Height: | Size: 432 KiB |
|
After Width: | Height: | Size: 54 KiB |
@@ -0,0 +1,19 @@
|
||||
<RCC>
|
||||
<qresource prefix="collection">
|
||||
<file>dobot_joints_system.png</file>
|
||||
<file>cartesian_system.png</file>
|
||||
<file>velocity.png</file>
|
||||
<file>sliding_rail2.png</file>
|
||||
<file>settings.png</file>
|
||||
<file>rail_image.png</file>
|
||||
<file>info.png</file>
|
||||
<file>bg-red.svg</file>
|
||||
<file>bg-green.svg</file>
|
||||
<file>status_error.png</file>
|
||||
<file>status_ok.png</file>
|
||||
<file>blue_house.png</file>
|
||||
<file>coordinate_system.png</file>
|
||||
<file>estop.png</file>
|
||||
<file>magician_gui_image.png</file>
|
||||
</qresource>
|
||||
</RCC>
|
||||
|
After Width: | Height: | Size: 12 KiB |
|
After Width: | Height: | Size: 77 KiB |
|
After Width: | Height: | Size: 1.9 KiB |
|
After Width: | Height: | Size: 114 KiB |
|
After Width: | Height: | Size: 26 KiB |
@@ -0,0 +1,4 @@
|
||||
[develop]
|
||||
script_dir=$base/lib/dobot_control_panel
|
||||
[install]
|
||||
install_scripts=$base/lib/dobot_control_panel
|
||||
@@ -0,0 +1,33 @@
|
||||
from setuptools import setup
|
||||
from glob import glob
|
||||
import os
|
||||
|
||||
package_name = 'dobot_control_panel'
|
||||
|
||||
setup(
|
||||
name=package_name,
|
||||
version='1.0.0',
|
||||
packages=[package_name],
|
||||
data_files=[
|
||||
('share/ament_index/resource_index/packages',
|
||||
['resource/' + package_name]),
|
||||
('share/' + package_name, ['package.xml']),
|
||||
('share/' + package_name + '/resource', ['resource/DobotControlPanel_layout.ui']),
|
||||
('share/' + package_name, ['plugin.xml']),
|
||||
('share/' + package_name + '/resource/images', glob('resource/images/*.png')),
|
||||
('share/' + package_name + '/resource/images', glob('resource/images/*.svg'))
|
||||
],
|
||||
install_requires=['setuptools'],
|
||||
zip_safe=True,
|
||||
maintainer='Jan Kaniuka',
|
||||
maintainer_email='kan.jan@wp.pl',
|
||||
description='RQT-based GUI for Dobot Magician',
|
||||
license='BSD',
|
||||
tests_require=['pytest'],
|
||||
entry_points={
|
||||
'console_scripts': [
|
||||
'dobot_control_panel = dobot_control_panel.main:main',
|
||||
],
|
||||
},
|
||||
)
|
||||
|
||||
@@ -0,0 +1,23 @@
|
||||
# Copyright 2015 Open Source Robotics Foundation, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from ament_copyright.main import main
|
||||
import pytest
|
||||
|
||||
|
||||
@pytest.mark.copyright
|
||||
@pytest.mark.linter
|
||||
def test_copyright():
|
||||
rc = main(argv=['.', 'test'])
|
||||
assert rc == 0, 'Found errors'
|
||||