Returns camera frames which can be used for color recognition and victim detection
This page is under construction.
Basics
The camera sensor returns a 3D matrix/list, representing a 2D image of pixels, with each pixel comprised of RGBA color channels. Each channel can be in the range of 0 to 255, with (255,255,255) being white, and (0, 0, 0) being black. It is mainly used in the following ways:
As a RGB color sensor to detect environment colors, like the color of the floor (In this case, it may be implemented as a 1px by 1px camera).
As a regular camera, which returns frames that can be used with image processing libraries for victim or hazard map detection.
There are 4 basic steps to using this sensor:
Start by importing or including the appropriate class for the camera.
Retrieve the sensor by name from the robot.
Enable the sensor by passing in the update rate of the sensor. (Usually the timestep)
from controller import Robot, Camera # Step 1: Import Camerarobot = Robot()
colorSensor = robot.getDevice("colour_sensor") # Step 2: Retrieve the sensor, named "colour_sensor", from the robot. Note that the sensor name may differ between robots)timestep = int(robot.getBasicTimeStep())
colorSensor.enable(timestep) # Step 3: Enable the sensor, using the timestep as the update ratewhile robot.step(timestep) !=-1:
image = colorSensor.getImage() # Step 4: Retrieve the image frame.# Get the individual RGB color channels from the pixel (0,0)# Note that these functions require you to pass the width of the overall image in pixels.# Since this is a 1px by 1px color sensor, the width of the image is just 1. r = colorSensor.imageGetRed(image, 1, 0, 0)
g = colorSensor.imageGetGreen(image, 1, 0, 0)
b = colorSensor.imageGetBlue(image, 1, 0, 0)
print("r: "+ str(r) +" g: "+ str(g) +" b: "+ str(b))
#include<iostream>#include<webots/Robot.hpp>#include<webots/Camera.hpp> // Step 1: Include Camera headerusingnamespace webots;
usingnamespace std;
intmain(int argc, char**argv) {
Robot *robot =new Robot();
Camera* colorSensor = robot->getCamera("colour_sensor"); // Step 2: Retrieve the sensor, named "colour_sensor", from the robot. Note that the sensor name may differ between robots.
int timeStep = (int)robot->getBasicTimeStep();
colorSensor->enable(timeStep); // Step 3: Enable the sensor, using the timestep as the update rate
robot->step(timeStep);
while (robot->step(timeStep) !=-1) {
constunsignedchar* image = colorSensor->getImage(); // Step 4: Retrieve the image frame.
// Get the individual RGB color channels from the pixel (0,0)
// Note that these functions require you to pass the width of the overall image in pixels.
// Since this is a 1px by 1px color sensor, the width of the image is just 1.
int r = colorSensor->imageGetRed(image, 1, 0, 0);
int g = colorSensor->imageGetGreen(image, 1, 0, 0);
int b = colorSensor->imageGetBlue(image, 1, 0, 0);
cout <<"r: "<< r <<" g: "<< g <<" b: "<< b << endl;
}
delete robot;
return0;
}
Note: The dimensions of the frame, in pixels, can be found with the getWidth() and getHeight functions:
int height = colorSensor->getHeight();
int width = colorSensor->getWidth();
Example
This example demonstrates techniques to interface the camera frames from Erebus with third-party libraries to perform advanced image processing. The frame is fed into a data format usable by the OpenCV library, which then grayscales and displays the modified image.
from controller import Robot, Camera
import cv2 # Include OpenCV libraryimport numpy as np # For python, include numpy as wellrobot = Robot()
camera = robot.getDevice("cameraName")
timestep = int(robot.getBasicTimeStep())
camera.enable(timestep)
while robot.step(timestep) !=-1:
# Convert the camera image into an openCV Mat. You must pass in the height# and width of the camera in pixels, as well as specify CV_8UC4, meaning that# it is in RGBA format image = camera.getImage()
image = np.frombuffer(image, np.uint8).reshape((camera.getHeight(), camera.getWidth(), 4))
frame = cv2.cvtColor(image, cv2.COLOR_BGRA2BGR)
cv2.imshow("frame", frame)
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # Grayscale cv2.imshow("grayScale", frame)
cv2.threshold(frame, 80, 255, cv2.THRESH_BINARY) # Threshold cv2.imshow("thresh", frame)
cv2.waitKey(1) # Render imshows on screen
#include<opencv2/opencv.hpp> // Include OpenCV libraryusingnamespace cv;
#include<webots/Robot.hpp>#include<webots/Camera.hpp>usingnamespace webots;
intmain(int argc, char**argv) {
Robot *robot =new Robot();
Camera *cam = robot->getCamera("cameraName");
int timeStep = (int)robot->getBasicTimeStep();
cam->enable(timeStep);
while (robot->step(timeStep) !=-1) {
/*
* Convert the camera image into an openCV Mat. You must pass in the height
* and width of the camera in pixels, as well as specify CV_8UC4, meaning that
* it is in RGBA format.
*/ Mat frame(cam->getHeight(), cam->getWidth(), CV_8UC4, (void*)cam->getImage());
imshow("frame", frame);
cvtColor(frame,frame,COLOR_BGR2GRAY); // grayscale
imshow("grayScale", frame);
threshold(frame,frame,80,255,THRESH_BINARY); // threshold
imshow("thresh", frame);
waitKey(1); // Render imshows on screen
};
delete robot;
return0;
}