how can i implement a pid controller to convert pixels into degrees on Yarp iCubSim? #678
-
Hi there! #include <iostream>
#include <yarp/os/all.h>
#include <yarp/sig/all.h>
#include <yarp/dev/all.h>
using namespace yarp::os;
using namespace yarp::dev;
using namespace yarp::sig;
using namespace std;
class controller_thread: public PeriodicThread {
public:
controller_thread(double p):PeriodicThread(p){
bool ok = imagePort.open("/image-port"); // give the port a name
if (!ok) {
std::cerr << "Failed to open port" << std::endl;
return;
}
// Connect the client to the RPC Server of iCub_SIM
yarp.connect("/icubSim/cam/left", "/image-port");
options.put("device", "remote_controlboard");
options.put("local", "/local/test"); // local port name
options.put("remote", "/icubSim/head"); // where we connect to
pd.open(options);
if (!pd.isValid()) {
cerr<<"PolyDriver not instatiated"<<endl;
}
pd.view(pos); // makes the PolyDriver view the position control
pd.view(ie); // makes the PolyDriver view the encoders
encoders.resize(6);
command_position.resize(6);
}
protected:
bool threadInit(){
return true;
}
void run() {
ImageOf<PixelRgb> *image = imagePort.read(); // read an image
if (image!=NULL) { // check we actually got something
cout<< "We got an image of size " << image->width() << image->height() <<endl;
double xMean = 0;
double yMean = 0;
int ct = 0;
for (int x=0; x<image->width(); x++) {
for (int y=0; y<image->height(); y++) {
PixelRgb& pixel = image->pixel(x,y);
// verify the red level exceeds blue and green by a factor of 2 //
if (pixel.r>pixel.b*1.2+10 && pixel.r>pixel.g*1.2+10) {
// find the average location of the red pixels //
xMean += x;
yMean += y;
ct++;
}
}
}
if (ct>0) {
xMean /= ct;
yMean /= ct;
}
x_pixel = xMean;
y_pixel = yMean;
cout << "X pixel: " << x_pixel << " Y pixel: " << y_pixel << endl;
pos->getAxes(&jnts);
double eye_pos_x = (x_pixel - 160);
double eye_pos_y = (y_pixel - 120);
for (int ii= 0; ii< jnts; ii++) {
command_position[ii] = 0;
}
command_position[4] = eye_pos_x;
command_position[3] = eye_pos_y;
pos->positionMove(command_position.data()); // update joint value
ie->getEncoders(encoders.data());
for (int ii=0; ii<6; ii++)
cout<<encoders[ii]<< " "; // print encoders value
cout<<endl;
}
}
void threadRelease() {
yarp.disconnect("/icubSim/cam/left", "/image-port");
imagePort.close();
pd.close();
cout << "Done, goodbye from myThread." << endl;
}
private:
Network yarp; // initialize the network
BufferedPort<ImageOf<PixelRgb> > imagePort; // port for reading images
Property options;
PolyDriver pd; // PolyDriver
IPositionControl *pos; // position control pointer
IEncoders *ie; // encoders pointer
Vector command_position; // vector for position control
Vector encoders; // vector for storing encoders values
int jnts = 0;
double x_pixel = 0;
double y_pixel = 0;
};
|
Beta Was this translation helpful? Give feedback.
Replies: 2 comments 4 replies
-
I searched on internet and found that to convert pixels into degrees, you need information about the field of view (FOV) of the camera or sensor. The FOV represents the angular extent of the observable world that the camera can capture. Once you have the FOV information, you can use it to convert pixel coordinates to degrees. the Fov on the x-axis is equal to the Fov in the y-axis for iCub_sim: fx = fy = 257.34 I used this in my code, the robot follow the sphere correctly when it's on his left but then kind of lose it, maybe I'm wrong in some if cycle. double error_x = (x_pixel - 160);
double error_y = (y_pixel - 120);
double f = 257.34;
double angle_x = error_x*atan(1/f)*180/M_PI;
double angle_y = error_y*atan(1/f)*180/M_PI;
cout<< "angle x: " << angle_x << "angle y: " << angle_y << endl;
if (angle_x>10 || angle_x< -10){
for (int ii= 0; ii< jnts; ii++) {
command_position[ii] = 0;
}
command_position[2] = 10 - angle_x;
pos->positionMove(command_position.data()); // update joint value
}
if (angle_y>10 || angle_y< -10){
for (int ii= 0; ii< jnts; ii++) {
command_position[ii] = 0;
}
command_position[0] = 10 - angle_y;
pos->positionMove(command_position.data()); // update joint
}
else{
for (int ii= 0; ii< jnts; ii++) {
command_position[ii] = 0;
}
command_position[4] = angle_x;
command_position[3] = angle_y;
pos->positionMove(command_position.data()); // update joint value
}
ie->getEncoders(encoders.data());
for (int ii=0; ii<6; ii++)
cout<<encoders[ii]<< " "; // print encoders value
cout<<endl;
}
|
Beta Was this translation helpful? Give feedback.
-
The key is controlling the robot head in velocity and not in position 💡 This way, you won't need to know any conversion factor pixels → angles, but rather you'll have to focus on mapping the sign of pixel error into the correct direction of motion for all the axes at stake. In this setting, the PID will be responsible for driving the pixel error to zero by yielding velocity commands. Why are you coding a PID?
|
Beta Was this translation helpful? Give feedback.
Hi @bionicsgirl
I'm sorry, but
IPositionControl
does NOT get along with a high-level PID. This interface provides the user with a control mode characterized by commands of the type send-and-forget. In other words, these are the steps the user is required to accomplish in the code, once the interface gets correctly configured:setRefSpeed
. This is something you're not doing yet, apparently.positionMove
.checkMotionDone
.A send-and-forget control mode can be seen from the user's per…