Problem Statement and Background Image and Video filters Ascii art Photo Mosaic
The markdown of the above sketch looks like:
1link> :P5 sketch=/docs/sketches/original_imaging.js, width=512, height=256
And the p5 sketch that loads the original image and video is the following:
1linkfunction preload() {
2link img = loadImage('/vc/docs/sketches/lenna.png');
3link video = createVideo(['/vc/docs/sketches/fingers.mov', '/vc/docs/sketches/fingers.webm']);
4link video.hide();
5link}
6link
7linkfunction setup() {
8link createCanvas(512, 256);
9link video.loop();
10link}
11link
12linkfunction draw() {
13link image(img, 0, 0, 256, 256);
14link image(video, 256, 0, 256, 256);
15link}
16link
17link
The process of color inversion has as primary objective transform light areas into dark, and dark areas into light. The mathematic behind this process is the following. Suppose that you have an image in normalized RGB (the values of each channel goes from 0 to 1), then in order to invert the color of the image if the values of the channels are v_red, v_green and v_blue, the values of the inverted will be v_inv_red = 1 - v_red, v_inv_green = 1 - v_green, v_inv_blue = 1 - v_blue. In p5 we can directly use the functionality filter, and apply INVERT in order to have our color inverted.
The markdown of the above sketch looks like:
1link> :P5 sketch=/docs/sketches/inverse_color_imaging.js, width=512, height=256
And the p5 sketch that loads the image and video is the following:
1linkfunction preload() {
2link img = loadImage('/vc/docs/sketches/lenna.png');
3link video = createVideo(['/vc/docs/sketches/fingers.mov', '/vc/docs/sketches/fingers.webm']);
4link video.hide();
5link}
6link
7linkfunction setup() {
8link createCanvas(512, 256);
9link video.loop();
10link}
11link
12linkfunction draw() {
13link image(img, 0, 0, 256, 256);
14link image(video, 256, 0, 256, 256);
15link filter(INVERT);
16link}
The markdown of the above sketch looks like:
1link> :P5 sketch=/docs/sketches/inverse_color_manual_imaging.js, width=512, height=256
And the p5 sketch that loads the image and video is the following:
1linkfunction preload() {
2link img = loadImage('/vc/docs/sketches/lenna.png');
3link video = createVideo(['/vc/docs/sketches/fingers.mov', '/vc/docs/sketches/fingers.webm']);
4link video.hide();
5link}
6link
7linkfunction setup() {
8link createCanvas(512, 256);
9link video.loop();
10link}
11link
12linkfunction draw() {
13link image(img, 0, 0, 256, 256);
14link
15link image(video, 256, 0, 256, 256);
16link loadPixels();
17link let d = pixelDensity();
18link let fullImage = (width * d) * (height * d);
19link for(let i=0; i < fullImage; i++){
20link let r = pixels[i*4];
21link let g = pixels[i*4+1];
22link let b = pixels[i*4+2];
23link
24link pixels[i*4] = 255-r;
25link pixels[i*4+1] = 255-g;
26link pixels[i*4+2] = 255-b;
27link }
28link updatePixels();
29link}
The grayscale conversion process has as primary objective transform the channels of the color of the image into only one channel and extract the brightness of the picture. The mathematics behind this process is the following. Suppose that you have an image in RGB, then we can use the average of the channels as the value of intensity, or we can use the Luma conversion formula, which is based on how our eyes react to each channel in which the intensity has the following formula, intensity = 0.2126 * red + 0.7152 * green + 0.0.0722*blue, that intensity will determine the intensity of luminance of each pixel. In p5 we can directly use the functionality filter, and apply GRAY.
The markdown of the above sketch looks like:
1link> :P5 sketch=/docs/sketches/gray_color_imaging.js, width=512, height=256
And the p5 sketch that loads the image and video is the following:
1linkfunction preload() {
2link img = loadImage('/vc/docs/sketches/lenna.png');
3link video = createVideo(['/vc/docs/sketches/fingers.mov', '/vc/docs/sketches/fingers.webm']);
4link video.hide();
5link}
6link
7linkfunction setup() {
8link createCanvas(512, 256);
9link video.loop();
10link}
11link
12linkfunction draw() {
13link image(img, 0, 0, 256, 256);
14link image(video, 256,0,256,256);
15link filter(GRAY);
16link}
17link
The markdown of the above sketch looks like:
1link> :P5 sketch=/docs/sketches/gray_color_manual_imaging.js, width=512, height=256
And the p5 sketch that loads the original image is the following:
1linkfunction preload() {
2link img = loadImage('/vc/docs/sketches/lenna.png');
3link video = createVideo(['/vc/docs/sketches/fingers.mov', '/vc/docs/sketches/fingers.webm']);
4link video.hide();
5link}
6link
7linkfunction setup() {
8link createCanvas(512, 256);
9link video.loop();
10link}
11link
12linkfunction draw() {
13link image(img, 0, 0, 256, 256);
14link image(video, 256, 0, 256, 256);
15link loadPixels();
16link let d = pixelDensity();
17link let fullImage = (width * d) * (height * d);
18link for (let i = 0; i < fullImage; i++) {
19link let r = pixels[i*4];
20link let g = pixels[i*4+1];
21link let b = pixels[i*4+2];
22link
23link let gray = r *0.2126 + g *0.7152 + b *0.0722;
24link
25link pixels[i*4] = gray;
26link pixels[i*4+1] = gray;
27link pixels[i*4+2] = gray;
28link }
29link updatePixels();
30link}
We will use the following image that has a big difference in illuminance to compare the average grayscale method and Luma.
We have the two processed images, on the upper side we have the one processed using Luma and on the other side the one using the average:
We can see that the average processing takes many bright spots as dark zones in the flame and that Luma shows the difference of brightness in a more accurate way, showing why it is better to use Luma against average processing.
Now you can test the inverse and gray scale filters with your own camera.
Problem Statement and Background Image and Video filters Ascii art Photo Mosaic