Description
stringlengths 9
105
| Link
stringlengths 45
135
| Code
stringlengths 10
26.8k
| Test_Case
stringlengths 9
202
| Merge
stringlengths 63
27k
|
---|---|---|---|---|
White and black dot detection
|
https://www.geeksforgeeks.org/white-and-black-dot-detection-using-opencv-python/
|
import cv2
|
#Output : 23
|
White and black dot detection
import cv2
#Output : 23
[END]
|
White and black dot detection
|
https://www.geeksforgeeks.org/white-and-black-dot-detection-using-opencv-python/
|
# path ="C:/Users/Personal/Downloads/black dot.jpg"
path = "black dot.jpg"
|
#Output : 23
|
White and black dot detection
# path ="C:/Users/Personal/Downloads/black dot.jpg"
path = "black dot.jpg"
#Output : 23
[END]
|
White and black dot detection
|
https://www.geeksforgeeks.org/white-and-black-dot-detection-using-opencv-python/
|
gray = cv2.imread(path, 0)
|
#Output : 23
|
White and black dot detection
gray = cv2.imread(path, 0)
#Output : 23
[END]
|
White and black dot detection
|
https://www.geeksforgeeks.org/white-and-black-dot-detection-using-opencv-python/
|
# threshold
th, threshed = cv2.threshold(gray, 100, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)
|
#Output : 23
|
White and black dot detection
# threshold
th, threshed = cv2.threshold(gray, 100, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)
#Output : 23
[END]
|
White and black dot detection
|
https://www.geeksforgeeks.org/white-and-black-dot-detection-using-opencv-python/
|
# findcontours
cnts = cv2.findContours(threshed, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)[-2]
|
#Output : 23
|
White and black dot detection
# findcontours
cnts = cv2.findContours(threshed, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)[-2]
#Output : 23
[END]
|
White and black dot detection
|
https://www.geeksforgeeks.org/white-and-black-dot-detection-using-opencv-python/
|
# filter by area
s1 = 3
s2 = 20
xcnts = []
for cnt in cnts:
if s1 < cv2.contourArea(cnt) < s2:
xcnts.append(cnt)
|
#Output : 23
|
White and black dot detection
# filter by area
s1 = 3
s2 = 20
xcnts = []
for cnt in cnts:
if s1 < cv2.contourArea(cnt) < s2:
xcnts.append(cnt)
#Output : 23
[END]
|
White and black dot detection
|
https://www.geeksforgeeks.org/white-and-black-dot-detection-using-opencv-python/
|
print("\nDots number: {}".format(len(xcnts)))
|
#Output : 23
|
White and black dot detection
print("\nDots number: {}".format(len(xcnts)))
#Output : 23
[END]
|
White and black dot detection
|
https://www.geeksforgeeks.org/white-and-black-dot-detection-using-opencv-python/
|
import cv2
path = "white dot.png"
# reading the image in grayscale mode
gray = cv2.imread(path, 0)
# threshold
th, threshed = cv2.threshold(gray, 100, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
# findcontours
cnts = cv2.findContours(threshed, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)[-2]
# filter by area
s1 = 3
s2 = 20
xcnts = []
for cnt in cnts:
if s1 < cv2.contourArea(cnt) < s2:
xcnts.append(cnt)
# printing output
print("\nDots number: {}".format(len(xcnts)))
|
#Output : 23
|
White and black dot detection
import cv2
path = "white dot.png"
# reading the image in grayscale mode
gray = cv2.imread(path, 0)
# threshold
th, threshed = cv2.threshold(gray, 100, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
# findcontours
cnts = cv2.findContours(threshed, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)[-2]
# filter by area
s1 = 3
s2 = 20
xcnts = []
for cnt in cnts:
if s1 < cv2.contourArea(cnt) < s2:
xcnts.append(cnt)
# printing output
print("\nDots number: {}".format(len(xcnts)))
#Output : 23
[END]
|
OpenCV BGR color palette with trackbars
|
https://www.geeksforgeeks.org/python-opencv-bgr-color-palette-with-trackbars/
|
# Python program to create RGB color
# palette with trackbars
# importing libraries
import cv2
import numpy as np
# empty function called when
# any trackbar moves
def emptyFunction():
pass
def main():
# blackwindow having 3 color chanels
image = np.zeros((512, 512, 3), np.uint8)
windowName = "Open CV Color Palette"
# window name
cv2.namedWindow(windowName)
# there trackbars which have the name
# of trackbars min and max value
cv2.createTrackbar("Blue", windowName, 0, 255, emptyFunction)
cv2.createTrackbar("Green", windowName, 0, 255, emptyFunction)
cv2.createTrackbar("Red", windowName, 0, 255, emptyFunction)
# Used to open the window
# till press the ESC key
while True:
cv2.imshow(windowName, image)
if cv2.waitKey(1) == 27:
break
# values of blue, green, red
blue = cv2.getTrackbarPos("Blue", windowName)
green = cv2.getTrackbarPos("Green", windowName)
red = cv2.getTrackbarPos("Red", windowName)
# merge all three color chanels and
# make the image composites image from rgb
image[:] = [blue, green, red]
print(blue, green, red)
cv2.destroyAllWindows()
# Calling main()
if __name__ == "__main__":
main()
|
#Output : Libraries needed:
|
OpenCV BGR color palette with trackbars
# Python program to create RGB color
# palette with trackbars
# importing libraries
import cv2
import numpy as np
# empty function called when
# any trackbar moves
def emptyFunction():
pass
def main():
# blackwindow having 3 color chanels
image = np.zeros((512, 512, 3), np.uint8)
windowName = "Open CV Color Palette"
# window name
cv2.namedWindow(windowName)
# there trackbars which have the name
# of trackbars min and max value
cv2.createTrackbar("Blue", windowName, 0, 255, emptyFunction)
cv2.createTrackbar("Green", windowName, 0, 255, emptyFunction)
cv2.createTrackbar("Red", windowName, 0, 255, emptyFunction)
# Used to open the window
# till press the ESC key
while True:
cv2.imshow(windowName, image)
if cv2.waitKey(1) == 27:
break
# values of blue, green, red
blue = cv2.getTrackbarPos("Blue", windowName)
green = cv2.getTrackbarPos("Green", windowName)
red = cv2.getTrackbarPos("Red", windowName)
# merge all three color chanels and
# make the image composites image from rgb
image[:] = [blue, green, red]
print(blue, green, red)
cv2.destroyAllWindows()
# Calling main()
if __name__ == "__main__":
main()
#Output : Libraries needed:
[END]
|
Draw rectangular shape and extract objects
|
https://www.geeksforgeeks.org/python-draw-rectangular-shape-and-extract-objects-using-opencv/
|
# Python program to extract rectangular
# Shape using OpenCV in Python3
import cv2
import numpy as np
drawing = False # true if mouse is pressed
mode = True # if True, draw rectangle.
ix, iy = -1, -1
# mouse callback function
def draw_circle(event, x, y, flags, param):
global ix, iy, drawing, mode
if event == cv2.EVENT_LBUTTONDOWN:
drawing = True
ix, iy = x, y
elif event == cv2.EVENT_MOUSEMOVE:
if drawing == True:
if mode == True:
cv2.rectangle(img, (ix, iy), (x, y), (0, 255, 0), 3)
a = x
b = y
if a != x | b != y:
cv2.rectangle(img, (ix, iy), (x, y), (0, 0, 0), -1)
else:
cv2.circle(img, (x, y), 5, (0, 0, 255), -1)
elif event == cv2.EVENT_LBUTTONUP:
drawing = False
if mode == True:
cv2.rectangle(img, (ix, iy), (x, y), (0, 255, 0), 2)
else:
cv2.circle(img, (x, y), 5, (0, 0, 255), -1)
img = np.zeros((512, 512, 3), np.uint8)
cv2.namedWindow("image")
cv2.setMouseCallback("image", draw_circle)
while 1:
cv2.imshow("image", img)
k = cv2.waitKey(1) & 0xFF
if k == ord("m"):
mode = not mode
elif k == 27:
break
cv2.destroyAllWindows()
|
#Output : python capture_events.py --image demo.jpg
|
Draw rectangular shape and extract objects
# Python program to extract rectangular
# Shape using OpenCV in Python3
import cv2
import numpy as np
drawing = False # true if mouse is pressed
mode = True # if True, draw rectangle.
ix, iy = -1, -1
# mouse callback function
def draw_circle(event, x, y, flags, param):
global ix, iy, drawing, mode
if event == cv2.EVENT_LBUTTONDOWN:
drawing = True
ix, iy = x, y
elif event == cv2.EVENT_MOUSEMOVE:
if drawing == True:
if mode == True:
cv2.rectangle(img, (ix, iy), (x, y), (0, 255, 0), 3)
a = x
b = y
if a != x | b != y:
cv2.rectangle(img, (ix, iy), (x, y), (0, 0, 0), -1)
else:
cv2.circle(img, (x, y), 5, (0, 0, 255), -1)
elif event == cv2.EVENT_LBUTTONUP:
drawing = False
if mode == True:
cv2.rectangle(img, (ix, iy), (x, y), (0, 255, 0), 2)
else:
cv2.circle(img, (x, y), 5, (0, 0, 255), -1)
img = np.zeros((512, 512, 3), np.uint8)
cv2.namedWindow("image")
cv2.setMouseCallback("image", draw_circle)
while 1:
cv2.imshow("image", img)
k = cv2.waitKey(1) & 0xFF
if k == ord("m"):
mode = not mode
elif k == 27:
break
cv2.destroyAllWindows()
#Output : python capture_events.py --image demo.jpg
[END]
|
Draw rectangular shape and extract objects
|
https://www.geeksforgeeks.org/python-draw-rectangular-shape-and-extract-objects-using-opencv/
|
# Write Python code here
# import the necessary packages
import cv2
import argparse
# now let's initialize the list of reference point
ref_point = []
crop = False
def shape_selection(event, x, y, flags, param):
# grab references to the global variables
global ref_point, crop
# if the left mouse button was clicked, record the starting
# (x, y) coordinates and indicate that cropping is being performed
if event == cv2.EVENT_LBUTTONDOWN:
ref_point = [(x, y)]
# check to see if the left mouse button was released
elif event == cv2.EVENT_LBUTTONUP:
# record the ending (x, y) coordinates and indicate that
# the cropping operation is finished
ref_point.append((x, y))
# draw a rectangle around the region of interest
cv2.rectangle(image, ref_point[0], ref_point[1], (0, 255, 0), 2)
cv2.imshow("image", image)
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required=True, help="Path to the image")
args = vars(ap.parse_args())
# load the image, clone it, and setup the mouse callback function
image = cv2.imread(args["image"])
clone = image.copy()
cv2.namedWindow("image")
cv2.setMouseCallback("image", shape_selection)
# keep looping until the 'q' key is pressed
while True:
# display the image and wait for a keypress
cv2.imshow("image", image)
key = cv2.waitKey(1) & 0xFF
# press 'r' to reset the window
if key == ord("r"):
image = clone.copy()
# if the 'c' key is pressed, break from the loop
elif key == ord("c"):
break
if len(ref_point) == 2:
crop_img = clone[
ref_point[0][1] : ref_point[1][1], ref_point[0][0] : ref_point[1][0]
]
cv2.imshow("crop_img", crop_img)
cv2.waitKey(0)
# close all open windows
cv2.destroyAllWindows()
|
#Output : python capture_events.py --image demo.jpg
|
Draw rectangular shape and extract objects
# Write Python code here
# import the necessary packages
import cv2
import argparse
# now let's initialize the list of reference point
ref_point = []
crop = False
def shape_selection(event, x, y, flags, param):
# grab references to the global variables
global ref_point, crop
# if the left mouse button was clicked, record the starting
# (x, y) coordinates and indicate that cropping is being performed
if event == cv2.EVENT_LBUTTONDOWN:
ref_point = [(x, y)]
# check to see if the left mouse button was released
elif event == cv2.EVENT_LBUTTONUP:
# record the ending (x, y) coordinates and indicate that
# the cropping operation is finished
ref_point.append((x, y))
# draw a rectangle around the region of interest
cv2.rectangle(image, ref_point[0], ref_point[1], (0, 255, 0), 2)
cv2.imshow("image", image)
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required=True, help="Path to the image")
args = vars(ap.parse_args())
# load the image, clone it, and setup the mouse callback function
image = cv2.imread(args["image"])
clone = image.copy()
cv2.namedWindow("image")
cv2.setMouseCallback("image", shape_selection)
# keep looping until the 'q' key is pressed
while True:
# display the image and wait for a keypress
cv2.imshow("image", image)
key = cv2.waitKey(1) & 0xFF
# press 'r' to reset the window
if key == ord("r"):
image = clone.copy()
# if the 'c' key is pressed, break from the loop
elif key == ord("c"):
break
if len(ref_point) == 2:
crop_img = clone[
ref_point[0][1] : ref_point[1][1], ref_point[0][0] : ref_point[1][0]
]
cv2.imshow("crop_img", crop_img)
cv2.waitKey(0)
# close all open windows
cv2.destroyAllWindows()
#Output : python capture_events.py --image demo.jpg
[END]
|
Drawing with Mouse on Images using Python-OpenCV
|
https://www.geeksforgeeks.org/drawing-with-mouse-on-images-using-python-opencv/
|
import cv2
img = cv2.imread("flower.jpg")
def draw_circle(event, x, y, flags, param):
if event == cv2.EVENT_LBUTTONDOWN:
print("hello")
cv2.circle(img, (x, y), 100, (0, 255, 0), -1)
cv2.namedWindow(winname="Title of Popup Window")
cv2.setMouseCallback("Title of Popup Window", draw_circle)
while True:
cv2.imshow("Title of Popup Window", img)
if cv2.waitKey(10) & 0xFF == 27:
break
cv2.destroyAllWindows()
|
#Output : cv2.namedWindow("Title of Popup Window")
|
Drawing with Mouse on Images using Python-OpenCV
import cv2
img = cv2.imread("flower.jpg")
def draw_circle(event, x, y, flags, param):
if event == cv2.EVENT_LBUTTONDOWN:
print("hello")
cv2.circle(img, (x, y), 100, (0, 255, 0), -1)
cv2.namedWindow(winname="Title of Popup Window")
cv2.setMouseCallback("Title of Popup Window", draw_circle)
while True:
cv2.imshow("Title of Popup Window", img)
if cv2.waitKey(10) & 0xFF == 27:
break
cv2.destroyAllWindows()
#Output : cv2.namedWindow("Title of Popup Window")
[END]
|
Drawing with Mouse on Images using Python-OpenCV
|
https://www.geeksforgeeks.org/drawing-with-mouse-on-images-using-python-opencv/
|
import cv2
img = cv2.imread("flower.jpg")
# variables
ix = -1
iy = -1
drawing = False
def draw_rectangle_with_drag(event, x, y, flags, param):
global ix, iy, drawing, img
if event == cv2.EVENT_LBUTTONDOWN:
drawing = True
ix = x
iy = y
elif event == cv2.EVENT_MOUSEMOVE:
if drawing == True:
cv2.rectangle(
img, pt1=(ix, iy), pt2=(x, y), color=(0, 255, 255), thickness=-1
)
elif event == cv2.EVENT_LBUTTONUP:
drawing = False
cv2.rectangle(img, pt1=(ix, iy), pt2=(x, y), color=(0, 255, 255), thickness=-1)
cv2.namedWindow(winname="Title of Popup Window")
cv2.setMouseCallback("Title of Popup Window", draw_rectangle_with_drag)
while True:
cv2.imshow("Title of Popup Window", img)
if cv2.waitKey(10) == 27:
break
cv2.destroyAllWindows()
|
#Output : cv2.namedWindow("Title of Popup Window")
|
Drawing with Mouse on Images using Python-OpenCV
import cv2
img = cv2.imread("flower.jpg")
# variables
ix = -1
iy = -1
drawing = False
def draw_rectangle_with_drag(event, x, y, flags, param):
global ix, iy, drawing, img
if event == cv2.EVENT_LBUTTONDOWN:
drawing = True
ix = x
iy = y
elif event == cv2.EVENT_MOUSEMOVE:
if drawing == True:
cv2.rectangle(
img, pt1=(ix, iy), pt2=(x, y), color=(0, 255, 255), thickness=-1
)
elif event == cv2.EVENT_LBUTTONUP:
drawing = False
cv2.rectangle(img, pt1=(ix, iy), pt2=(x, y), color=(0, 255, 255), thickness=-1)
cv2.namedWindow(winname="Title of Popup Window")
cv2.setMouseCallback("Title of Popup Window", draw_rectangle_with_drag)
while True:
cv2.imshow("Title of Popup Window", img)
if cv2.waitKey(10) == 27:
break
cv2.destroyAllWindows()
#Output : cv2.namedWindow("Title of Popup Window")
[END]
|
Text Detection and Extraction using OpenCV and OCR
|
https://www.geeksforgeeks.org/text-detection-and-extraction-using-opencv-and-ocr/
|
# Import required packages
import cv2
import pytesseract
# Mention the installed location of Tesseract-OCR in your system
pytesseract.pytesseract.tesseract_cmd = "System_path_to_tesseract.exe"
# Read image from which text needs to be extracted
img = cv2.imread("sample.jpg")
# Preprocessing the image starts
# Convert the image to gray scale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Performing OTSU threshold
ret, thresh1 = cv2.threshold(gray, 0, 255, cv2.THRESH_OTSU | cv2.THRESH_BINARY_INV)
# Specify structure shape and kernel size.
# Kernel size increases or decreases the area
# of the rectangle to be detected.
# A smaller value like (10, 10) will detect
# each word instead of a sentence.
rect_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (18, 18))
# Applying dilation on the threshold image
dilation = cv2.dilate(thresh1, rect_kernel, iterations=1)
# Finding contours
contours, hierarchy = cv2.findContours(
dilation, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE
)
# Creating a copy of image
im2 = img.copy()
# A text file is created and flushed
file = open("recognized.txt", "w+")
file.write("")
file.close()
# Looping through the identified contours
# Then rectangular part is cropped and passed on
# to pytesseract for extracting text from it
# Extracted text is then written into the text file
for cnt in contours:
x, y, w, h = cv2.boundingRect(cnt)
# Drawing a rectangle on copied image
rect = cv2.rectangle(im2, (x, y), (x + w, y + h), (0, 255, 0), 2)
# Cropping the text block for giving input to OCR
cropped = im2[y : y + h, x : x + w]
# Open the file in append mode
file = open("recognized.txt", "a")
# Apply OCR on the cropped image
text = pytesseract.image_to_string(cropped)
# Appending the text into file
file.write(text)
file.write("\n")
# Close the file
file.close
|
#Output : pip install opencv-python
|
Text Detection and Extraction using OpenCV and OCR
# Import required packages
import cv2
import pytesseract
# Mention the installed location of Tesseract-OCR in your system
pytesseract.pytesseract.tesseract_cmd = "System_path_to_tesseract.exe"
# Read image from which text needs to be extracted
img = cv2.imread("sample.jpg")
# Preprocessing the image starts
# Convert the image to gray scale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Performing OTSU threshold
ret, thresh1 = cv2.threshold(gray, 0, 255, cv2.THRESH_OTSU | cv2.THRESH_BINARY_INV)
# Specify structure shape and kernel size.
# Kernel size increases or decreases the area
# of the rectangle to be detected.
# A smaller value like (10, 10) will detect
# each word instead of a sentence.
rect_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (18, 18))
# Applying dilation on the threshold image
dilation = cv2.dilate(thresh1, rect_kernel, iterations=1)
# Finding contours
contours, hierarchy = cv2.findContours(
dilation, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE
)
# Creating a copy of image
im2 = img.copy()
# A text file is created and flushed
file = open("recognized.txt", "w+")
file.write("")
file.close()
# Looping through the identified contours
# Then rectangular part is cropped and passed on
# to pytesseract for extracting text from it
# Extracted text is then written into the text file
for cnt in contours:
x, y, w, h = cv2.boundingRect(cnt)
# Drawing a rectangle on copied image
rect = cv2.rectangle(im2, (x, y), (x + w, y + h), (0, 255, 0), 2)
# Cropping the text block for giving input to OCR
cropped = im2[y : y + h, x : x + w]
# Open the file in append mode
file = open("recognized.txt", "a")
# Apply OCR on the cropped image
text = pytesseract.image_to_string(cropped)
# Appending the text into file
file.write(text)
file.write("\n")
# Close the file
file.close
#Output : pip install opencv-python
[END]
|
Unsupervised Face Clustering Pipelementsine
|
https://www.geeksforgeeks.org/ml-unsupervised-face-clustering-pipeline/
|
"""
The ResizeUtils provides resizing function
to keep the aspect ratio intact
Credits: AndyP at StackOverflow"""
class ResizeUtils:
# Given a target height, adjust the image
# by calculating the width and resize
def rescale_by_height(self, image, target_height, method=cv2.INTER_LANCZOS4):
# Rescale `image` to `target_height`
# (preserving aspect ratio)
w = int(round(target_height * image.shape[1] / image.shape[0]))
return cv2.resize(image, (w, target_height), interpolation=method)
# Given a target width, adjust the image
# by calculating the height and resize
def rescale_by_width(self, image, target_width, method=cv2.INTER_LANCZOS4):
# Rescale `image` to `target_width`
# (preserving aspect ratio)
h = int(round(target_width * image.shape[0] / image.shape[1]))
return cv2.resize(image, (target_width, h), interpolation=method)
|
Input: Footage.mp4
Output:
|
Unsupervised Face Clustering Pipelementsine
"""
The ResizeUtils provides resizing function
to keep the aspect ratio intact
Credits: AndyP at StackOverflow"""
class ResizeUtils:
# Given a target height, adjust the image
# by calculating the width and resize
def rescale_by_height(self, image, target_height, method=cv2.INTER_LANCZOS4):
# Rescale `image` to `target_height`
# (preserving aspect ratio)
w = int(round(target_height * image.shape[1] / image.shape[0]))
return cv2.resize(image, (w, target_height), interpolation=method)
# Given a target width, adjust the image
# by calculating the height and resize
def rescale_by_width(self, image, target_width, method=cv2.INTER_LANCZOS4):
# Rescale `image` to `target_width`
# (preserving aspect ratio)
h = int(round(target_width * image.shape[0] / image.shape[1]))
return cv2.resize(image, (target_width, h), interpolation=method)
Input: Footage.mp4
Output:
[END]
|
Unsupervised Face Clustering Pipelementsine
|
https://www.geeksforgeeks.org/ml-unsupervised-face-clustering-pipeline/
|
# The FramesGenerator extracts image
# frames from the given video file
# The image frames are resized for
# face_recognition / dlib processing
class FramesGenerator:
def __init__(self, VideoFootageSource):
self.VideoFootageSource = VideoFootageSource
# Resize the given input to fit in a specified
# size for face embeddings extraction
def AutoResize(self, frame):
resizeUtils = ResizeUtils()
height, width, _ = frame.shape
if height > 500:
frame = resizeUtils.rescale_by_height(frame, 500)
self.AutoResize(frame)
if width > 700:
frame = resizeUtils.rescale_by_width(frame, 700)
self.AutoResize(frame)
return frame
|
Input: Footage.mp4
Output:
|
Unsupervised Face Clustering Pipelementsine
# The FramesGenerator extracts image
# frames from the given video file
# The image frames are resized for
# face_recognition / dlib processing
class FramesGenerator:
def __init__(self, VideoFootageSource):
self.VideoFootageSource = VideoFootageSource
# Resize the given input to fit in a specified
# size for face embeddings extraction
def AutoResize(self, frame):
resizeUtils = ResizeUtils()
height, width, _ = frame.shape
if height > 500:
frame = resizeUtils.rescale_by_height(frame, 500)
self.AutoResize(frame)
if width > 700:
frame = resizeUtils.rescale_by_width(frame, 700)
self.AutoResize(frame)
return frame
Input: Footage.mp4
Output:
[END]
|
Unsupervised Face Clustering Pipelementsine
|
https://www.geeksforgeeks.org/ml-unsupervised-face-clustering-pipeline/
|
# Extract 1 frame from each second from video footage
# and save the frames to a specific folder
def GenerateFrames(self, OutputDirectoryName):
cap = cv2.VideoCapture(self.VideoFootageSource)
_, frame = cap.read()
fps = cap.get(cv2.CAP_PROP_FPS)
TotalFrames = cap.get(cv2.CAP_PROP_FRAME_COUNT)
print("[INFO] Total Frames ", TotalFrames, " @ ", fps, " fps")
print("[INFO] Calculating number of frames per second")
CurrentDirectory = os.path.curdir
OutputDirectoryPath = os.path.join(CurrentDirectory, OutputDirectoryName)
if os.path.exists(OutputDirectoryPath):
shutil.rmtree(OutputDirectoryPath)
time.sleep(0.5)
os.mkdir(OutputDirectoryPath)
CurrentFrame = 1
fpsCounter = 0
FrameWrittenCount = 1
while CurrentFrame < TotalFrames:
_, frame = cap.read()
if frame is None:
continue
if fpsCounter > fps:
fpsCounter = 0
frame = self.AutoResize(frame)
filename = "frame_" + str(FrameWrittenCount) + ".jpg"
cv2.imwrite(os.path.join(OutputDirectoryPath, filename), frame)
FrameWrittenCount += 1
fpsCounter += 1
CurrentFrame += 1
print("[INFO] Frames extracted")
|
Input: Footage.mp4
Output:
|
Unsupervised Face Clustering Pipelementsine
# Extract 1 frame from each second from video footage
# and save the frames to a specific folder
def GenerateFrames(self, OutputDirectoryName):
cap = cv2.VideoCapture(self.VideoFootageSource)
_, frame = cap.read()
fps = cap.get(cv2.CAP_PROP_FPS)
TotalFrames = cap.get(cv2.CAP_PROP_FRAME_COUNT)
print("[INFO] Total Frames ", TotalFrames, " @ ", fps, " fps")
print("[INFO] Calculating number of frames per second")
CurrentDirectory = os.path.curdir
OutputDirectoryPath = os.path.join(CurrentDirectory, OutputDirectoryName)
if os.path.exists(OutputDirectoryPath):
shutil.rmtree(OutputDirectoryPath)
time.sleep(0.5)
os.mkdir(OutputDirectoryPath)
CurrentFrame = 1
fpsCounter = 0
FrameWrittenCount = 1
while CurrentFrame < TotalFrames:
_, frame = cap.read()
if frame is None:
continue
if fpsCounter > fps:
fpsCounter = 0
frame = self.AutoResize(frame)
filename = "frame_" + str(FrameWrittenCount) + ".jpg"
cv2.imwrite(os.path.join(OutputDirectoryPath, filename), frame)
FrameWrittenCount += 1
fpsCounter += 1
CurrentFrame += 1
print("[INFO] Frames extracted")
Input: Footage.mp4
Output:
[END]
|
Unsupervised Face Clustering Pipelementsine
|
https://www.geeksforgeeks.org/ml-unsupervised-face-clustering-pipeline/
|
# Following are nodes for pipeline constructions.
# It will create and asynchronously execute threads
# for reading images, extracting facial features and
# storing them independently in different threads
# Keep emitting the filenames into
# the pipeline for processing
class FramesProvider(Node):
def setup(self, sourcePath):
self.sourcePath = sourcePath
self.filesList = []
for item in os.listdir(self.sourcePath):
_, fileExt = os.path.splitext(item)
if fileExt == ".jpg":
self.filesList.append(os.path.join(item))
self.TotalFilesCount = self.size = len(self.filesList)
self.ProcessedFilesCount = self.pos = 0
# Emit each filename in the pipeline for parallel processing
def run(self, data):
if self.ProcessedFilesCount < self.TotalFilesCount:
self.emit(
{
"id": self.ProcessedFilesCount,
"imagePath": os.path.join(
self.sourcePath, self.filesList[self.ProcessedFilesCount]
),
}
)
self.ProcessedFilesCount += 1
self.pos = self.ProcessedFilesCount
else:
self.close()
|
Input: Footage.mp4
Output:
|
Unsupervised Face Clustering Pipelementsine
# Following are nodes for pipeline constructions.
# It will create and asynchronously execute threads
# for reading images, extracting facial features and
# storing them independently in different threads
# Keep emitting the filenames into
# the pipeline for processing
class FramesProvider(Node):
def setup(self, sourcePath):
self.sourcePath = sourcePath
self.filesList = []
for item in os.listdir(self.sourcePath):
_, fileExt = os.path.splitext(item)
if fileExt == ".jpg":
self.filesList.append(os.path.join(item))
self.TotalFilesCount = self.size = len(self.filesList)
self.ProcessedFilesCount = self.pos = 0
# Emit each filename in the pipeline for parallel processing
def run(self, data):
if self.ProcessedFilesCount < self.TotalFilesCount:
self.emit(
{
"id": self.ProcessedFilesCount,
"imagePath": os.path.join(
self.sourcePath, self.filesList[self.ProcessedFilesCount]
),
}
)
self.ProcessedFilesCount += 1
self.pos = self.ProcessedFilesCount
else:
self.close()
Input: Footage.mp4
Output:
[END]
|
Unsupervised Face Clustering Pipelementsine
|
https://www.geeksforgeeks.org/ml-unsupervised-face-clustering-pipeline/
|
# Encode the face embedding, reference path
# and location and emit to pipeline
class FaceEncoder(Node):
def setup(self, detection_method="cnn"):
self.detection_method = detection_method
# detection_method can be cnn or hog
def run(self, data):
id = data["id"]
imagePath = data["imagePath"]
image = cv2.imread(imagePath)
rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
boxes = face_recognition.face_locations(rgb, model=self.detection_method)
encodings = face_recognition.face_encodings(rgb, boxes)
d = [
{"imagePath": imagePath, "loc": box, "encoding": enc}
for (box, enc) in zip(boxes, encodings)
]
self.emit({"id": id, "encodings": d})
|
Input: Footage.mp4
Output:
|
Unsupervised Face Clustering Pipelementsine
# Encode the face embedding, reference path
# and location and emit to pipeline
class FaceEncoder(Node):
def setup(self, detection_method="cnn"):
self.detection_method = detection_method
# detection_method can be cnn or hog
def run(self, data):
id = data["id"]
imagePath = data["imagePath"]
image = cv2.imread(imagePath)
rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
boxes = face_recognition.face_locations(rgb, model=self.detection_method)
encodings = face_recognition.face_encodings(rgb, boxes)
d = [
{"imagePath": imagePath, "loc": box, "encoding": enc}
for (box, enc) in zip(boxes, encodings)
]
self.emit({"id": id, "encodings": d})
Input: Footage.mp4
Output:
[END]
|
Unsupervised Face Clustering Pipelementsine
|
https://www.geeksforgeeks.org/ml-unsupervised-face-clustering-pipeline/
|
# Receive the face embeddings for clustering and
# id for naming the distinct filename
class DatastoreManager(Node):
def setup(self, encodingsOutputPath):
self.encodingsOutputPath = encodingsOutputPath
def run(self, data):
encodings = data["encodings"]
id = data["id"]
with open(
os.path.join(self.encodingsOutputPath, "encodings_" + str(id) + ".pickle"),
"wb",
) as f:
f.write(pickle.dumps(encodings))
|
Input: Footage.mp4
Output:
|
Unsupervised Face Clustering Pipelementsine
# Receive the face embeddings for clustering and
# id for naming the distinct filename
class DatastoreManager(Node):
def setup(self, encodingsOutputPath):
self.encodingsOutputPath = encodingsOutputPath
def run(self, data):
encodings = data["encodings"]
id = data["id"]
with open(
os.path.join(self.encodingsOutputPath, "encodings_" + str(id) + ".pickle"),
"wb",
) as f:
f.write(pickle.dumps(encodings))
Input: Footage.mp4
Output:
[END]
|
Unsupervised Face Clustering Pipelementsine
|
https://www.geeksforgeeks.org/ml-unsupervised-face-clustering-pipeline/
|
# PicklesListCollator takes multiple pickle
# files as input and merges them together
# It is made specifically to support use-case
# of merging distinct pickle files into one
class PicklesListCollator:
def __init__(self, picklesInputDirectory):
self.picklesInputDirectory = picklesInputDirectory
# Here we will list down all the pickles
# files generated from multiple threads,
# read the list of results append them to a
# common list and create another pickle
# with combined list as content
def GeneratePickle(self, outputFilepath):
datastore = []
ListOfPickleFiles = []
for item in os.listdir(self.picklesInputDirectory):
_, fileExt = os.path.splitext(item)
if fileExt == ".pickle":
ListOfPickleFiles.append(os.path.join(self.picklesInputDirectory, item))
for picklePath in ListOfPickleFiles:
with open(picklePath, "rb") as f:
data = pickle.loads(f.read())
datastore.extend(data)
with open(outputFilepath, "wb") as f:
f.write(pickle.dumps(datastore))
|
Input: Footage.mp4
Output:
|
Unsupervised Face Clustering Pipelementsine
# PicklesListCollator takes multiple pickle
# files as input and merges them together
# It is made specifically to support use-case
# of merging distinct pickle files into one
class PicklesListCollator:
def __init__(self, picklesInputDirectory):
self.picklesInputDirectory = picklesInputDirectory
# Here we will list down all the pickles
# files generated from multiple threads,
# read the list of results append them to a
# common list and create another pickle
# with combined list as content
def GeneratePickle(self, outputFilepath):
datastore = []
ListOfPickleFiles = []
for item in os.listdir(self.picklesInputDirectory):
_, fileExt = os.path.splitext(item)
if fileExt == ".pickle":
ListOfPickleFiles.append(os.path.join(self.picklesInputDirectory, item))
for picklePath in ListOfPickleFiles:
with open(picklePath, "rb") as f:
data = pickle.loads(f.read())
datastore.extend(data)
with open(outputFilepath, "wb") as f:
f.write(pickle.dumps(datastore))
Input: Footage.mp4
Output:
[END]
|
Unsupervised Face Clustering Pipelementsine
|
https://www.geeksforgeeks.org/ml-unsupervised-face-clustering-pipeline/
|
# Face clustering functionality
class FaceClusterUtility:
def __init__(self, EncodingFilePath):
self.EncodingFilePath = EncodingFilePath
# Credits: Arian's pyimagesearch for the clustering code
# Here we are using the sklearn.DBSCAN functionality
# cluster all the facial embeddings to get clusters
# representing distinct people
def Cluster(self):
InputEncodingFile = self.EncodingFilePath
if not (
os.path.isfile(InputEncodingFile) and os.access(InputEncodingFile, os.R_OK)
):
print(
"The input encoding file, "
+ str(InputEncodingFile)
+ " does not exists or unreadable"
)
exit()
NumberOfParallelJobs = -1
# load the serialized face encodings
# + bounding box locations from disk,
# then extract the set of encodings to
# so we can cluster on them
print("[INFO] Loading encodings")
data = pickle.loads(open(InputEncodingFile, "rb").read())
data = np.array(data)
encodings = [d["encoding"] for d in data]
# cluster the embeddings
print("[INFO] Clustering")
clt = DBSCAN(eps=0.5, metric="euclidean", n_jobs=NumberOfParallelJobs)
clt.fit(encodings)
# determine the total number of
# unique faces found in the dataset
labelIDs = np.unique(clt.labels_)
numUniqueFaces = len(np.where(labelIDs > -1)[0])
print("[INFO] # unique faces: {}".format(numUniqueFaces))
return clt.labels_
|
Input: Footage.mp4
Output:
|
Unsupervised Face Clustering Pipelementsine
# Face clustering functionality
class FaceClusterUtility:
def __init__(self, EncodingFilePath):
self.EncodingFilePath = EncodingFilePath
# Credits: Arian's pyimagesearch for the clustering code
# Here we are using the sklearn.DBSCAN functionality
# cluster all the facial embeddings to get clusters
# representing distinct people
def Cluster(self):
InputEncodingFile = self.EncodingFilePath
if not (
os.path.isfile(InputEncodingFile) and os.access(InputEncodingFile, os.R_OK)
):
print(
"The input encoding file, "
+ str(InputEncodingFile)
+ " does not exists or unreadable"
)
exit()
NumberOfParallelJobs = -1
# load the serialized face encodings
# + bounding box locations from disk,
# then extract the set of encodings to
# so we can cluster on them
print("[INFO] Loading encodings")
data = pickle.loads(open(InputEncodingFile, "rb").read())
data = np.array(data)
encodings = [d["encoding"] for d in data]
# cluster the embeddings
print("[INFO] Clustering")
clt = DBSCAN(eps=0.5, metric="euclidean", n_jobs=NumberOfParallelJobs)
clt.fit(encodings)
# determine the total number of
# unique faces found in the dataset
labelIDs = np.unique(clt.labels_)
numUniqueFaces = len(np.where(labelIDs > -1)[0])
print("[INFO] # unique faces: {}".format(numUniqueFaces))
return clt.labels_
Input: Footage.mp4
Output:
[END]
|
Unsupervised Face Clustering Pipelementsine
|
https://www.geeksforgeeks.org/ml-unsupervised-face-clustering-pipeline/
|
# Inherit class tqdm for visualization of progress
class TqdmUpdate(tqdm):
# This function will be passed as progress
# callback function. Setting the predefined
# variables for auto-updates in visualization
def update(self, done, total_size=None):
if total_size is not None:
self.total = total_size
self.n = done
super().refresh()
|
Input: Footage.mp4
Output:
|
Unsupervised Face Clustering Pipelementsine
# Inherit class tqdm for visualization of progress
class TqdmUpdate(tqdm):
# This function will be passed as progress
# callback function. Setting the predefined
# variables for auto-updates in visualization
def update(self, done, total_size=None):
if total_size is not None:
self.total = total_size
self.n = done
super().refresh()
Input: Footage.mp4
Output:
[END]
|
Unsupervised Face Clustering Pipelementsine
|
https://www.geeksforgeeks.org/ml-unsupervised-face-clustering-pipeline/
|
class FaceImageGenerator:
def __init__(self, EncodingFilePath):
self.EncodingFilePath = EncodingFilePath
# Here we are creating montages for
# first 25 faces for each distinct face.
# We will also generate images for all
# the distinct faces by using the labels
# from clusters and image url from the
# encodings pickle file.
# The face bounding box is increased a
# little more for training purposes and
# we also created the exact annotation for
# each face image (similar to darknet YOLO)
# to easily adapt the annotation for future
# use in supervised training
def GenerateImages(
self, labels, OutputFolderName="ClusteredFaces", MontageOutputFolder="Montage"
):
output_directory = os.getcwd()
OutputFolder = os.path.join(output_directory, OutputFolderName)
if not os.path.exists(OutputFolder):
os.makedirs(OutputFolder)
else:
shutil.rmtree(OutputFolder)
time.sleep(0.5)
os.makedirs(OutputFolder)
MontageFolderPath = os.path.join(OutputFolder, MontageOutputFolder)
os.makedirs(MontageFolderPath)
data = pickle.loads(open(self.EncodingFilePath, "rb").read())
data = np.array(data)
labelIDs = np.unique(labels)
# loop over the unique face integers
for labelID in labelIDs:
# find all indexes into the `data` array
# that belong to the current label ID, then
# randomly sample a maximum of 25 indexes
# from the set
print("[INFO] faces for face ID: {}".format(labelID))
FaceFolder = os.path.join(OutputFolder, "Face_" + str(labelID))
os.makedirs(FaceFolder)
idxs = np.where(labels == labelID)[0]
# initialize the list of faces to
# include in the montage
portraits = []
# loop over the sampled indexes
counter = 1
for i in idxs:
# load the input image and extract the face ROI
image = cv2.imread(data[i]["imagePath"])
(o_top, o_right, o_bottom, o_left) = data[i]["loc"]
height, width, channel = image.shape
widthMargin = 100
heightMargin = 150
top = o_top - heightMargin
if top < 0:
top = 0
bottom = o_bottom + heightMargin
if bottom > height:
bottom = height
left = o_left - widthMargin
if left < 0:
left = 0
right = o_right + widthMargin
if right > width:
right = width
portrait = image[top:bottom, left:right]
if len(portraits) < 25:
portraits.append(portrait)
resizeUtils = ResizeUtils()
portrait = resizeUtils.rescale_by_width(portrait, 400)
FaceFilename = "face_" + str(counter) + ".jpg"
FaceImagePath = os.path.join(FaceFolder, FaceFilename)
cv2.imwrite(FaceImagePath, portrait)
widthMargin = 20
heightMargin = 20
top = o_top - heightMargin
if top < 0:
top = 0
bottom = o_bottom + heightMargin
if bottom > height:
bottom = height
left = o_left - widthMargin
if left < 0:
left = 0
right = o_right + widthMargin
if right > width:
right = width
AnnotationFilename = "face_" + str(counter) + ".txt"
AnnotationFilePath = os.path.join(FaceFolder, AnnotationFilename)
f = open(AnnotationFilePath, "w")
f.write(
str(labelID)
+ " "
+ str(left)
+ " "
+ str(top)
+ " "
+ str(right)
+ " "
+ str(bottom)
+ "\n"
)
f.close()
counter += 1
montage = build_montages(portraits, (96, 120), (5, 5))[0]
MontageFilenamePath = os.path.join(
MontageFolderPath, "Face_" + str(labelID) + ".jpg"
)
cv2.imwrite(MontageFilenamePath, montage)
|
Input: Footage.mp4
Output:
|
Unsupervised Face Clustering Pipelementsine
class FaceImageGenerator:
def __init__(self, EncodingFilePath):
self.EncodingFilePath = EncodingFilePath
# Here we are creating montages for
# first 25 faces for each distinct face.
# We will also generate images for all
# the distinct faces by using the labels
# from clusters and image url from the
# encodings pickle file.
# The face bounding box is increased a
# little more for training purposes and
# we also created the exact annotation for
# each face image (similar to darknet YOLO)
# to easily adapt the annotation for future
# use in supervised training
def GenerateImages(
self, labels, OutputFolderName="ClusteredFaces", MontageOutputFolder="Montage"
):
output_directory = os.getcwd()
OutputFolder = os.path.join(output_directory, OutputFolderName)
if not os.path.exists(OutputFolder):
os.makedirs(OutputFolder)
else:
shutil.rmtree(OutputFolder)
time.sleep(0.5)
os.makedirs(OutputFolder)
MontageFolderPath = os.path.join(OutputFolder, MontageOutputFolder)
os.makedirs(MontageFolderPath)
data = pickle.loads(open(self.EncodingFilePath, "rb").read())
data = np.array(data)
labelIDs = np.unique(labels)
# loop over the unique face integers
for labelID in labelIDs:
# find all indexes into the `data` array
# that belong to the current label ID, then
# randomly sample a maximum of 25 indexes
# from the set
print("[INFO] faces for face ID: {}".format(labelID))
FaceFolder = os.path.join(OutputFolder, "Face_" + str(labelID))
os.makedirs(FaceFolder)
idxs = np.where(labels == labelID)[0]
# initialize the list of faces to
# include in the montage
portraits = []
# loop over the sampled indexes
counter = 1
for i in idxs:
# load the input image and extract the face ROI
image = cv2.imread(data[i]["imagePath"])
(o_top, o_right, o_bottom, o_left) = data[i]["loc"]
height, width, channel = image.shape
widthMargin = 100
heightMargin = 150
top = o_top - heightMargin
if top < 0:
top = 0
bottom = o_bottom + heightMargin
if bottom > height:
bottom = height
left = o_left - widthMargin
if left < 0:
left = 0
right = o_right + widthMargin
if right > width:
right = width
portrait = image[top:bottom, left:right]
if len(portraits) < 25:
portraits.append(portrait)
resizeUtils = ResizeUtils()
portrait = resizeUtils.rescale_by_width(portrait, 400)
FaceFilename = "face_" + str(counter) + ".jpg"
FaceImagePath = os.path.join(FaceFolder, FaceFilename)
cv2.imwrite(FaceImagePath, portrait)
widthMargin = 20
heightMargin = 20
top = o_top - heightMargin
if top < 0:
top = 0
bottom = o_bottom + heightMargin
if bottom > height:
bottom = height
left = o_left - widthMargin
if left < 0:
left = 0
right = o_right + widthMargin
if right > width:
right = width
AnnotationFilename = "face_" + str(counter) + ".txt"
AnnotationFilePath = os.path.join(FaceFolder, AnnotationFilename)
f = open(AnnotationFilePath, "w")
f.write(
str(labelID)
+ " "
+ str(left)
+ " "
+ str(top)
+ " "
+ str(right)
+ " "
+ str(bottom)
+ "\n"
)
f.close()
counter += 1
montage = build_montages(portraits, (96, 120), (5, 5))[0]
MontageFilenamePath = os.path.join(
MontageFolderPath, "Face_" + str(labelID) + ".jpg"
)
cv2.imwrite(MontageFilenamePath, montage)
Input: Footage.mp4
Output:
[END]
|
Unsupervised Face Clustering Pipelementsine
|
https://www.geeksforgeeks.org/ml-unsupervised-face-clustering-pipeline/
|
# importing all classes from above Python file
from FaceClusteringLibrary import *
if __name__ == "__main__":
# Generate the frames from given video footage
framesGenerator = FramesGenerator("Footage.mp4")
framesGenerator.GenerateFrames("Frames")
# Design and run the face clustering pipeline
CurrentPath = os.getcwd()
FramesDirectory = "Frames"
FramesDirectoryPath = os.path.join(CurrentPath, FramesDirectory)
EncodingsFolder = "Encodings"
EncodingsFolderPath = os.path.join(CurrentPath, EncodingsFolder)
if os.path.exists(EncodingsFolderPath):
shutil.rmtree(EncodingsFolderPath, ignore_errors=True)
time.sleep(0.5)
os.makedirs(EncodingsFolderPath)
pipeline = Pipeline(
FramesProvider("Files source", sourcePath=FramesDirectoryPath)
| FaceEncoder("Encode faces")
| DatastoreManager("Store encoding", encodingsOutputPath=EncodingsFolderPath),
n_threads=3,
quiet=True,
)
pbar = TqdmUpdate()
pipeline.run(update_callback=pbar.update)
print()
print("[INFO] Encodings extracted")
# Merge all the encodings pickle files into one
CurrentPath = os.getcwd()
EncodingsInputDirectory = "Encodings"
EncodingsInputDirectoryPath = os.path.join(CurrentPath, EncodingsInputDirectory)
OutputEncodingPickleFilename = "encodings.pickle"
if os.path.exists(OutputEncodingPickleFilename):
os.remove(OutputEncodingPickleFilename)
picklesListCollator = PicklesListCollator(EncodingsInputDirectoryPath)
picklesListCollator.GeneratePickle(OutputEncodingPickleFilename)
# To manage any delay in file writing
time.sleep(0.5)
# Start clustering process and generate
# output images with annotations
EncodingPickleFilePath = "encodings.pickle"
faceClusterUtility = FaceClusterUtility(EncodingPickleFilePath)
faceImageGenerator = FaceImageGenerator(EncodingPickleFilePath)
labelIDs = faceClusterUtility.Cluster()
faceImageGenerator.GenerateImages(labelIDs, "ClusteredFaces", "Montage")
|
Input: Footage.mp4
Output:
|
Unsupervised Face Clustering Pipelementsine
# importing all classes from above Python file
from FaceClusteringLibrary import *
if __name__ == "__main__":
# Generate the frames from given video footage
framesGenerator = FramesGenerator("Footage.mp4")
framesGenerator.GenerateFrames("Frames")
# Design and run the face clustering pipeline
CurrentPath = os.getcwd()
FramesDirectory = "Frames"
FramesDirectoryPath = os.path.join(CurrentPath, FramesDirectory)
EncodingsFolder = "Encodings"
EncodingsFolderPath = os.path.join(CurrentPath, EncodingsFolder)
if os.path.exists(EncodingsFolderPath):
shutil.rmtree(EncodingsFolderPath, ignore_errors=True)
time.sleep(0.5)
os.makedirs(EncodingsFolderPath)
pipeline = Pipeline(
FramesProvider("Files source", sourcePath=FramesDirectoryPath)
| FaceEncoder("Encode faces")
| DatastoreManager("Store encoding", encodingsOutputPath=EncodingsFolderPath),
n_threads=3,
quiet=True,
)
pbar = TqdmUpdate()
pipeline.run(update_callback=pbar.update)
print()
print("[INFO] Encodings extracted")
# Merge all the encodings pickle files into one
CurrentPath = os.getcwd()
EncodingsInputDirectory = "Encodings"
EncodingsInputDirectoryPath = os.path.join(CurrentPath, EncodingsInputDirectory)
OutputEncodingPickleFilename = "encodings.pickle"
if os.path.exists(OutputEncodingPickleFilename):
os.remove(OutputEncodingPickleFilename)
picklesListCollator = PicklesListCollator(EncodingsInputDirectoryPath)
picklesListCollator.GeneratePickle(OutputEncodingPickleFilename)
# To manage any delay in file writing
time.sleep(0.5)
# Start clustering process and generate
# output images with annotations
EncodingPickleFilePath = "encodings.pickle"
faceClusterUtility = FaceClusterUtility(EncodingPickleFilePath)
faceImageGenerator = FaceImageGenerator(EncodingPickleFilePath)
labelIDs = faceClusterUtility.Cluster()
faceImageGenerator.GenerateImages(labelIDs, "ClusteredFaces", "Montage")
Input: Footage.mp4
Output:
[END]
|
Pedestrian Detection using OpenCV-Python
|
https://www.geeksforgeeks.org/pedestrian-detection-using-opencv-python/
|
import cv2
import imutils
# Initializing the HOG person
# detector
hog = cv2.HOGDescriptor()
hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())
# Reading the Image
image = cv2.imread("img.png")
# Resizing the Image
image = imutils.resize(image, width=min(400, image.shape[1]))
# Detecting all the regions in the
# Image that has a pedestrians inside it
(regions, _) = hog.detectMultiScale(image, winStride=(4, 4), padding=(4, 4), scale=1.05)
# Drawing the regions in the Image
for x, y, w, h in regions:
cv2.rectangle(image, (x, y), (x + w, y + h), (0, 0, 255), 2)
# Showing the output Image
cv2.imshow("Image", image)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
#Output :
|
Pedestrian Detection using OpenCV-Python
import cv2
import imutils
# Initializing the HOG person
# detector
hog = cv2.HOGDescriptor()
hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())
# Reading the Image
image = cv2.imread("img.png")
# Resizing the Image
image = imutils.resize(image, width=min(400, image.shape[1]))
# Detecting all the regions in the
# Image that has a pedestrians inside it
(regions, _) = hog.detectMultiScale(image, winStride=(4, 4), padding=(4, 4), scale=1.05)
# Drawing the regions in the Image
for x, y, w, h in regions:
cv2.rectangle(image, (x, y), (x + w, y + h), (0, 0, 255), 2)
# Showing the output Image
cv2.imshow("Image", image)
cv2.waitKey(0)
cv2.destroyAllWindows()
#Output :
[END]
|
Pedestrian Detection using OpenCV-Python
|
https://www.geeksforgeeks.org/pedestrian-detection-using-opencv-python/
|
import cv2
import imutils
# Initializing the HOG person
# detector
hog = cv2.HOGDescriptor()
hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())
cap = cv2.VideoCapture("vid.mp4")
while cap.isOpened():
# Reading the video stream
ret, image = cap.read()
if ret:
image = imutils.resize(image, width=min(400, image.shape[1]))
# Detecting all the regions
# in the Image that has a
# pedestrians inside it
(regions, _) = hog.detectMultiScale(
image, winStride=(4, 4), padding=(4, 4), scale=1.05
)
# Drawing the regions in the
# Image
for x, y, w, h in regions:
cv2.rectangle(image, (x, y), (x + w, y + h), (0, 0, 255), 2)
# Showing the output Image
cv2.imshow("Image", image)
if cv2.waitKey(25) & 0xFF == ord("q"):
break
else:
break
cap.release()
cv2.destroyAllWindows()
|
#Output :
|
Pedestrian Detection using OpenCV-Python
import cv2
import imutils
# Initializing the HOG person
# detector
hog = cv2.HOGDescriptor()
hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())
cap = cv2.VideoCapture("vid.mp4")
while cap.isOpened():
# Reading the video stream
ret, image = cap.read()
if ret:
image = imutils.resize(image, width=min(400, image.shape[1]))
# Detecting all the regions
# in the Image that has a
# pedestrians inside it
(regions, _) = hog.detectMultiScale(
image, winStride=(4, 4), padding=(4, 4), scale=1.05
)
# Drawing the regions in the
# Image
for x, y, w, h in regions:
cv2.rectangle(image, (x, y), (x + w, y + h), (0, 0, 255), 2)
# Showing the output Image
cv2.imshow("Image", image)
if cv2.waitKey(25) & 0xFF == ord("q"):
break
else:
break
cap.release()
cv2.destroyAllWindows()
#Output :
[END]
|
Saving Operated Video from a webcam
|
https://www.geeksforgeeks.org/saving-operated-video-from-a-webcam-using-opencv/
|
# Python program to illustrate
# saving an operated video
# organize imports
import numpy as np
import cv2
# This will return video from the first webcam on your computer.
cap = cv2.VideoCapture(0)
# Define the codec and create VideoWriter object
fourcc = cv2.VideoWriter_fourcc(*"XVID")
out = cv2.VideoWriter("output.avi", fourcc, 20.0, (640, 480))
# loop runs if capturing has been initialized.
while True:
# reads frames from a camera
# ret checks return at each frame
ret, frame = cap.read()
# Converts to HSV color space, OCV reads colors as BGR
# frame is converted to hsv
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# output the frame
out.write(hsv)
# The original input frame is shown in the window
cv2.imshow("Original", frame)
# The window showing the operated video stream
cv2.imshow("frame", hsv)
# Wait for 'a' key to stop the program
if cv2.waitKey(1) & 0xFF == ord("a"):
break
# Close the window / Release webcam
cap.release()
# After we release our webcam, we also release the output
out.release()
# De-allocate any associated memory usage
cv2.destroyAllWindows()
|
#Output :
|
Saving Operated Video from a webcam
# Python program to illustrate
# saving an operated video
# organize imports
import numpy as np
import cv2
# This will return video from the first webcam on your computer.
cap = cv2.VideoCapture(0)
# Define the codec and create VideoWriter object
fourcc = cv2.VideoWriter_fourcc(*"XVID")
out = cv2.VideoWriter("output.avi", fourcc, 20.0, (640, 480))
# loop runs if capturing has been initialized.
while True:
# reads frames from a camera
# ret checks return at each frame
ret, frame = cap.read()
# Converts to HSV color space, OCV reads colors as BGR
# frame is converted to hsv
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# output the frame
out.write(hsv)
# The original input frame is shown in the window
cv2.imshow("Original", frame)
# The window showing the operated video stream
cv2.imshow("frame", hsv)
# Wait for 'a' key to stop the program
if cv2.waitKey(1) & 0xFF == ord("a"):
break
# Close the window / Release webcam
cap.release()
# After we release our webcam, we also release the output
out.release()
# De-allocate any associated memory usage
cv2.destroyAllWindows()
#Output :
[END]
|
Saving Operated Video from a webcam
|
https://www.geeksforgeeks.org/saving-operated-video-from-a-webcam-using-opencv/
|
# Python program to illustrate
# saving an operated video
# organize imports
import numpy as np
import cv2
# This will return video from the first webcam on your computer.
cap = cv2.VideoCapture(0)
# Define the codec and create VideoWriter object
fourcc = cv2.VideoWriter_fourcc(*"XVID")
out = cv2.VideoWriter("output.avi", fourcc, 20.0, (640, 480))
# loop runs if capturing has been initialized.
while True:
# reads frames from a camera
# ret checks return at each frame
ret, frame = cap.read()
# Converts to grayscale space, OCV reads colors as BGR
# frame is converted to gray
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# output the frame
out.write(gray)
# The original input frame is shown in the window
cv2.imshow("Original", frame)
# The window showing the operated video stream
cv2.imshow("frame", gray)
# Wait for 'a' key to stop the program
if cv2.waitKey(1) & 0xFF == ord("a"):
break
# Close the window / Release webcam
cap.release()
# After we release our webcam, we also release the out-out.release()
# De-allocate any associated memory usage
cv2.destroyAllWindows()
|
#Output :
|
Saving Operated Video from a webcam
# Python program to illustrate
# saving an operated video
# organize imports
import numpy as np
import cv2
# This will return video from the first webcam on your computer.
cap = cv2.VideoCapture(0)
# Define the codec and create VideoWriter object
fourcc = cv2.VideoWriter_fourcc(*"XVID")
out = cv2.VideoWriter("output.avi", fourcc, 20.0, (640, 480))
# loop runs if capturing has been initialized.
while True:
# reads frames from a camera
# ret checks return at each frame
ret, frame = cap.read()
# Converts to grayscale space, OCV reads colors as BGR
# frame is converted to gray
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# output the frame
out.write(gray)
# The original input frame is shown in the window
cv2.imshow("Original", frame)
# The window showing the operated video stream
cv2.imshow("frame", gray)
# Wait for 'a' key to stop the program
if cv2.waitKey(1) & 0xFF == ord("a"):
break
# Close the window / Release webcam
cap.release()
# After we release our webcam, we also release the out-out.release()
# De-allocate any associated memory usage
cv2.destroyAllWindows()
#Output :
[END]
|
Detecting objects of similar color in Python using OpenCV
|
https://www.geeksforgeeks.org/detecting-obects-of-similar-color-in-python-using-opencv/
|
# import required library
import cv2
import numpy as np
import matplotlib.pyplot as plt
# create a video object
# for capture the frames.
# for Webcamera we pass 0
# as an argument
cap = cv2.VideoCapture(0)
# define a empty function
def nothing(x):
pass
# set window name
cv2.namedWindow("Tracking")
# Creates a trackbar and attaches
# it to the specified window
# with nothing function
cv2.createTrackbar("LH", "Tracking", 0, 255, nothing)
cv2.createTrackbar("LS", "Tracking", 0, 255, nothing)
cv2.createTrackbar("LV", "Tracking", 0, 255, nothing)
cv2.createTrackbar("HH", "Tracking", 0, 255, nothing)
cv2.createTrackbar("HS", "Tracking", 0, 255, nothing)
cv2.createTrackbar("HV", "Tracking", 0, 255, nothing)
# This drives the program
# into an infinite loop.
while True:
# Captures the live stream frame-by-frame
_, frame = cap.read()
# Converts images from BGR to HSV
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# find LH trackbar position
l_h = cv2.getTrackbarPos("LH", "Tracking")
# find LS trackbar position
l_s = cv2.getTrackbarPos("LS", "Tracking")
# find LV trackbar position
l_v = cv2.getTrackbarPos("LV", "Tracking")
# find HH trackbar position
h_h = cv2.getTrackbarPos("HH", "Tracking")
# find HS trackbar position
h_s = cv2.getTrackbarPos("HS", "Tracking")
# find HV trackbar position
h_v = cv2.getTrackbarPos("HV", "Tracking")
# create a given numpy array
l_b = np.array([l_h, l_s, l_v])
# create a given numpy array
u_b = np.array([h_h, h_s, h_v])
# create a mask
mask = cv2.inRange(hsv, l_b, u_b)
# applying bitwise_and operation
res = cv2.bitwise_and(frame, frame, mask=mask)
# display frame, mask
# and res window
cv2.imshow("frame", frame)
cv2.imshow("mask", mask)
cv2.imshow("res", res)
# wait for 1 sec
k = cv2.waitKey(1)
# break out of while loop
# if k value is 27
if k == 27:
break
# release the captured frames
cap.release()
# Destroys all windows.
cv2.destroyAllWindows()
|
#Output : OpenCV
|
Detecting objects of similar color in Python using OpenCV
# import required library
import cv2
import numpy as np
import matplotlib.pyplot as plt
# create a video object
# for capture the frames.
# for Webcamera we pass 0
# as an argument
cap = cv2.VideoCapture(0)
# define a empty function
def nothing(x):
pass
# set window name
cv2.namedWindow("Tracking")
# Creates a trackbar and attaches
# it to the specified window
# with nothing function
cv2.createTrackbar("LH", "Tracking", 0, 255, nothing)
cv2.createTrackbar("LS", "Tracking", 0, 255, nothing)
cv2.createTrackbar("LV", "Tracking", 0, 255, nothing)
cv2.createTrackbar("HH", "Tracking", 0, 255, nothing)
cv2.createTrackbar("HS", "Tracking", 0, 255, nothing)
cv2.createTrackbar("HV", "Tracking", 0, 255, nothing)
# This drives the program
# into an infinite loop.
while True:
# Captures the live stream frame-by-frame
_, frame = cap.read()
# Converts images from BGR to HSV
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# find LH trackbar position
l_h = cv2.getTrackbarPos("LH", "Tracking")
# find LS trackbar position
l_s = cv2.getTrackbarPos("LS", "Tracking")
# find LV trackbar position
l_v = cv2.getTrackbarPos("LV", "Tracking")
# find HH trackbar position
h_h = cv2.getTrackbarPos("HH", "Tracking")
# find HS trackbar position
h_s = cv2.getTrackbarPos("HS", "Tracking")
# find HV trackbar position
h_v = cv2.getTrackbarPos("HV", "Tracking")
# create a given numpy array
l_b = np.array([l_h, l_s, l_v])
# create a given numpy array
u_b = np.array([h_h, h_s, h_v])
# create a mask
mask = cv2.inRange(hsv, l_b, u_b)
# applying bitwise_and operation
res = cv2.bitwise_and(frame, frame, mask=mask)
# display frame, mask
# and res window
cv2.imshow("frame", frame)
cv2.imshow("mask", mask)
cv2.imshow("res", res)
# wait for 1 sec
k = cv2.waitKey(1)
# break out of while loop
# if k value is 27
if k == 27:
break
# release the captured frames
cap.release()
# Destroys all windows.
cv2.destroyAllWindows()
#Output : OpenCV
[END]
|
Play a video in reverse mode
|
https://www.geeksforgeeks.org/python-play-video-reverse-mode-using-opencv/
|
# Python program to play a video
# in reverse mode using opencv
# import cv2 library
import cv2
# videoCapture method of cv2 return video object
# Pass absolute address of video file
cap = cv2.VideoCapture("video_file_location")
# read method of video object will return
# a tuple with 1st element denotes whether
# the frame was read successfully or not,
# 2nd element is the actual frame.
# Grab the current frame.
check, vid = cap.read()
# counter variable for
# counting frames
counter = 0
# Initialize the value
# of check variable
check = True
frame_list = []
# If reached the end of the video
# then we got False value of check.
# keep looping until we
# got False value of check.
while check == True:
# imwrite method of cv2 saves the
# image to the specified format.
cv2.imwrite("frame%d.jpg" % counter, vid)
check, vid = cap.read()
# Add each frame in the list by
# using append method of the List
frame_list.append(vid)
# increment the counter by 1
counter += 1
# last value in the frame_list is None
# because when video reaches to the end
# then false value store in check variable
# and None value is store in vide variable.
# removing the last value from the
# frame_list by using pop method of List
frame_list.pop()
# looping in the List of frames.
for frame in frame_list:
# show the frame.
cv2.imshow("Frame", frame)
# waitkey method to stopping the frame
# for some time. q key is presses,
# stop the loop
if cv2.waitKey(25) and 0xFF == ord("q"):
break
# release method of video
# object clean the input video
cap.release()
# close any open windows
cv2.destroyAllWindows()
# reverse the order of the element
# present in the list by using
# reverse method of the List.
frame_list.reverse()
for frame in frame_list:
cv2.imshow("Frame", frame)
if cv2.waitKey(25) and 0xFF == ord("q"):
break
cap.release()
cv2.destroyAllWindows()
|
#Output : OpenCV's application areas include :
|
Play a video in reverse mode
# Python program to play a video
# in reverse mode using opencv
# import cv2 library
import cv2
# videoCapture method of cv2 return video object
# Pass absolute address of video file
cap = cv2.VideoCapture("video_file_location")
# read method of video object will return
# a tuple with 1st element denotes whether
# the frame was read successfully or not,
# 2nd element is the actual frame.
# Grab the current frame.
check, vid = cap.read()
# counter variable for
# counting frames
counter = 0
# Initialize the value
# of check variable
check = True
frame_list = []
# If reached the end of the video
# then we got False value of check.
# keep looping until we
# got False value of check.
while check == True:
# imwrite method of cv2 saves the
# image to the specified format.
cv2.imwrite("frame%d.jpg" % counter, vid)
check, vid = cap.read()
# Add each frame in the list by
# using append method of the List
frame_list.append(vid)
# increment the counter by 1
counter += 1
# last value in the frame_list is None
# because when video reaches to the end
# then false value store in check variable
# and None value is store in vide variable.
# removing the last value from the
# frame_list by using pop method of List
frame_list.pop()
# looping in the List of frames.
for frame in frame_list:
# show the frame.
cv2.imshow("Frame", frame)
# waitkey method to stopping the frame
# for some time. q key is presses,
# stop the loop
if cv2.waitKey(25) and 0xFF == ord("q"):
break
# release method of video
# object clean the input video
cap.release()
# close any open windows
cv2.destroyAllWindows()
# reverse the order of the element
# present in the list by using
# reverse method of the List.
frame_list.reverse()
for frame in frame_list:
cv2.imshow("Frame", frame)
if cv2.waitKey(25) and 0xFF == ord("q"):
break
cap.release()
cv2.destroyAllWindows()
#Output : OpenCV's application areas include :
[END]
|
OpenCV Python program for Vehicle detection in a Video frame
|
https://www.geeksforgeeks.org/opencv-python-program-vehicle-detection-video-frame/
|
# OpenCV Python program to detect cars in video frame# import libraries of python OpenCV??????import cv2????????????# capture frames from a videocap = cv2.VideoCapture('video.avi')????????????# Trained XML classifiers describes some features of some object we want to detectcar_cascade = cv2.CascadeClassifier('cars.xml')????????????# loop runs if capturing has been initialized.while True:????????????????????????# reads frames from a video????????????????????????ret, frames = cap.read()????????????????????????????????????????????????????????????# convert to gray scale of each frames????????????????????????gray = cv2.cvtColor(frames, cv2.COLOR_BGR2GRAY)????????????????????????????????????????????????????????????????????????# Detects cars of different sizes in the input image????????????????????????cars = car_cascade.detectMultiScale(gray, 1.1, 1)????????????????????????????????????????????????????????????# To draw a rectangle in each cars????????????????????????for (x,y,w,h) in cars:???????????????????????????npciated memory usagecv2.destroyAllWindows()
|
#Output : sudo apt-get install python
|
OpenCV Python program for Vehicle detection in a Video frame
# OpenCV Python program to detect cars in video frame# import libraries of python OpenCV??????import cv2????????????# capture frames from a videocap = cv2.VideoCapture('video.avi')????????????# Trained XML classifiers describes some features of some object we want to detectcar_cascade = cv2.CascadeClassifier('cars.xml')????????????# loop runs if capturing has been initialized.while True:????????????????????????# reads frames from a video????????????????????????ret, frames = cap.read()????????????????????????????????????????????????????????????# convert to gray scale of each frames????????????????????????gray = cv2.cvtColor(frames, cv2.COLOR_BGR2GRAY)????????????????????????????????????????????????????????????????????????# Detects cars of different sizes in the input image????????????????????????cars = car_cascade.detectMultiScale(gray, 1.1, 1)????????????????????????????????????????????????????????????# To draw a rectangle in each cars????????????????????????for (x,y,w,h) in cars:???????????????????????????npciated memory usagecv2.destroyAllWindows()
#Output : sudo apt-get install python
[END]
|
Python - Eye blink detection project
|
https://www.geeksforgeeks.org/python-eye-blink-detection-project/
|
# All the imports go here
import numpy as np
import cv2
# Initializing the face and eye cascade classifiers from xml files
face_cascade = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
eye_cascade = cv2.CascadeClassifier("haarcascade_eye_tree_eyeglasses.xml")
# Variable store execution state
first_read = True
# Starting the video capture
cap = cv2.VideoCapture(0)
ret, img = cap.read()
while ret:
ret, img = cap.read()
# Converting the recorded image to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Applying filter to remove impurities
gray = cv2.bilateralFilter(gray, 5, 1, 1)
# Detecting the face for region of image to be fed to eye classifier
faces = face_cascade.detectMultiScale(gray, 1.3, 5, minSize=(200, 200))
if len(faces) > 0:
for x, y, w, h in faces:
img = cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 2)
# roi_face is face which is input to eye classifier
roi_face = gray[y : y + h, x : x + w]
roi_face_clr = img[y : y + h, x : x + w]
eyes = eye_cascade.detectMultiScale(roi_face, 1.3, 5, minSize=(50, 50))
# Examining the length of eyes object for eyes
if len(eyes) >= 2:
# Check if program is running for detection
if first_read:
cv2.putText(
img,
"Eye detected press s to begin",
(70, 70),
cv2.FONT_HERSHEY_PLAIN,
3,
(0, 255, 0),
2,
)
else:
cv2.putText(
img,
"Eyes open!",
(70, 70),
cv2.FONT_HERSHEY_PLAIN,
2,
(255, 255, 255),
2,
)
else:
if first_read:
# To ensure if the eyes are present before starting
cv2.putText(
img,
"No eyes detected",
(70, 70),
cv2.FONT_HERSHEY_PLAIN,
3,
(0, 0, 255),
2,
)
else:
# This will print on console and restart the algorithm
print("Blink detected--------------")
cv2.waitKey(3000)
first_read = True
else:
cv2.putText(
img,
"No face detected",
(100, 100),
cv2.FONT_HERSHEY_PLAIN,
3,
(0, 255, 0),
2,
)
# Controlling the algorithm with keys
cv2.imshow("img", img)
a = cv2.waitKey(1)
if a == ord("q"):
break
elif a == ord("s") and first_read:
# This will start the detection
first_read = False
cap.release()
cv2.destroyAllWindows()
|
#Output : The frame is captured and converted to grayscale.
|
Python - Eye blink detection project
# All the imports go here
import numpy as np
import cv2
# Initializing the face and eye cascade classifiers from xml files
face_cascade = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
eye_cascade = cv2.CascadeClassifier("haarcascade_eye_tree_eyeglasses.xml")
# Variable store execution state
first_read = True
# Starting the video capture
cap = cv2.VideoCapture(0)
ret, img = cap.read()
while ret:
ret, img = cap.read()
# Converting the recorded image to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Applying filter to remove impurities
gray = cv2.bilateralFilter(gray, 5, 1, 1)
# Detecting the face for region of image to be fed to eye classifier
faces = face_cascade.detectMultiScale(gray, 1.3, 5, minSize=(200, 200))
if len(faces) > 0:
for x, y, w, h in faces:
img = cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 2)
# roi_face is face which is input to eye classifier
roi_face = gray[y : y + h, x : x + w]
roi_face_clr = img[y : y + h, x : x + w]
eyes = eye_cascade.detectMultiScale(roi_face, 1.3, 5, minSize=(50, 50))
# Examining the length of eyes object for eyes
if len(eyes) >= 2:
# Check if program is running for detection
if first_read:
cv2.putText(
img,
"Eye detected press s to begin",
(70, 70),
cv2.FONT_HERSHEY_PLAIN,
3,
(0, 255, 0),
2,
)
else:
cv2.putText(
img,
"Eyes open!",
(70, 70),
cv2.FONT_HERSHEY_PLAIN,
2,
(255, 255, 255),
2,
)
else:
if first_read:
# To ensure if the eyes are present before starting
cv2.putText(
img,
"No eyes detected",
(70, 70),
cv2.FONT_HERSHEY_PLAIN,
3,
(0, 0, 255),
2,
)
else:
# This will print on console and restart the algorithm
print("Blink detected--------------")
cv2.waitKey(3000)
first_read = True
else:
cv2.putText(
img,
"No face detected",
(100, 100),
cv2.FONT_HERSHEY_PLAIN,
3,
(0, 255, 0),
2,
)
# Controlling the algorithm with keys
cv2.imshow("img", img)
a = cv2.waitKey(1)
if a == ord("q"):
break
elif a == ord("s") and first_read:
# This will start the detection
first_read = False
cap.release()
cv2.destroyAllWindows()
#Output : The frame is captured and converted to grayscale.
[END]
|
Right and Left Hand Detection Using Python
|
https://www.geeksforgeeks.org/right-and-left-hand-detection-using-python/
|
# Importing Libraries
import cv2
import mediapipe as mp
# Used to convert protobuf message
# to a dictionary.
from google.protobuf.json_format import MessageToDict
|
#Output : pip install mediapipe
|
Right and Left Hand Detection Using Python
# Importing Libraries
import cv2
import mediapipe as mp
# Used to convert protobuf message
# to a dictionary.
from google.protobuf.json_format import MessageToDict
#Output : pip install mediapipe
[END]
|
Right and Left Hand Detection Using Python
|
https://www.geeksforgeeks.org/right-and-left-hand-detection-using-python/
|
# Initializing the ModelmpHands = mp.solutions.handshands = mpHands.Hands(????????????????????????static_image_mode=False,????????????????????????model_complexity=1????????????????????????min_detection_confidence=0.75,?????????????
|
#Output : pip install mediapipe
|
Right and Left Hand Detection Using Python
# Initializing the ModelmpHands = mp.solutions.handshands = mpHands.Hands(????????????????????????static_image_mode=False,????????????????????????model_complexity=1????????????????????????min_detection_confidence=0.75,?????????????
#Output : pip install mediapipe
[END]
|
Right and Left Hand Detection Using Python
|
https://www.geeksforgeeks.org/right-and-left-hand-detection-using-python/
|
# Start capturing video from webcam
cap = cv2.VideoCapture(0)
while True:
# Read video frame by frame
success, img = cap.read()
# Flip the image(frame)
img = cv2.flip(img, 1)
# Convert BGR image to RGB image
imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# Process the RGB image
results = hands.process(imgRGB)
# If hands are present in image(frame)
if results.multi_hand_landmarks:
# Both Hands are present in image(frame)
if len(results.multi_handedness) == 2:
# Display 'Both Hands' on the image
cv2.putText(
img,
"Both Hands",
(250, 50),
cv2.FONT_HERSHEY_COMPLEX,
0.9,
(0, 255, 0),
2,
)
# If any hand present
else:
for i in results.multi_handedness:
# Return whether it is Right or Left Hand
label = MessageToDict(i)["classification"][0]["label"]
if label == "Left":
# Display 'Left Hand' on left side of window
cv2.putText(
img,
label + " Hand",
(20, 50),
cv2.FONT_HERSHEY_COMPLEX,
0.9,
(0, 255, 0),
2,
)
if label == "Right":
# Display 'Left Hand' on left side of window
cv2.putText(
img,
label + " Hand",
(460, 50),
cv2.FONT_HERSHEY_COMPLEX,
0.9,
(0, 255, 0),
2,
)
# Display Video and when 'q' is entered, destroy the window
cv2.imshow("Image", img)
if cv2.waitKey(1) & 0xFF == ord("q"):
break
|
#Output : pip install mediapipe
|
Right and Left Hand Detection Using Python
# Start capturing video from webcam
cap = cv2.VideoCapture(0)
while True:
# Read video frame by frame
success, img = cap.read()
# Flip the image(frame)
img = cv2.flip(img, 1)
# Convert BGR image to RGB image
imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# Process the RGB image
results = hands.process(imgRGB)
# If hands are present in image(frame)
if results.multi_hand_landmarks:
# Both Hands are present in image(frame)
if len(results.multi_handedness) == 2:
# Display 'Both Hands' on the image
cv2.putText(
img,
"Both Hands",
(250, 50),
cv2.FONT_HERSHEY_COMPLEX,
0.9,
(0, 255, 0),
2,
)
# If any hand present
else:
for i in results.multi_handedness:
# Return whether it is Right or Left Hand
label = MessageToDict(i)["classification"][0]["label"]
if label == "Left":
# Display 'Left Hand' on left side of window
cv2.putText(
img,
label + " Hand",
(20, 50),
cv2.FONT_HERSHEY_COMPLEX,
0.9,
(0, 255, 0),
2,
)
if label == "Right":
# Display 'Left Hand' on left side of window
cv2.putText(
img,
label + " Hand",
(460, 50),
cv2.FONT_HERSHEY_COMPLEX,
0.9,
(0, 255, 0),
2,
)
# Display Video and when 'q' is entered, destroy the window
cv2.imshow("Image", img)
if cv2.waitKey(1) & 0xFF == ord("q"):
break
#Output : pip install mediapipe
[END]
|
Right and Left Hand Detection Using Python
|
https://www.geeksforgeeks.org/right-and-left-hand-detection-using-python/
|
# Importing Librariesimport cv2import mediapipe as mp????????????# Used to convert protobuf message to a dictionary.from google.protobuf.json_format import MessageToDict????????????# Initializing the ModelmpHands = mp.solutions.handshands = mpHands.Hands(????????????????????????static_image_mode=False,????????????????????????model_complexity=1????????????????????????min_detection_confidence=0.75,????????????????????????min_tracking_confidence=0.75,????????????????????????max_num_hands=2)????????????# Start capturing video from webcamcap = cv2.VideoCapture(0)????????????while True:????????????????????????# Read video frame by frame????????????????????????success, img = cap.read()????????????????????????????????????# Flip the image(frame)????????????????????????img = cv2.flip(img, 1)????????????????????????????????????# Convert BGR image to RGB image????????????????????????imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)????????????????????????????????????# Process the RGB image????????????????????????results = ha 'Both Hands' on the image????????????????????????????????????????????????????????????????????????cv2.putText(img, 'Both Hands', (250, 50),????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????cv2.FONT_HERSHEY_COMPLEX,????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????0.9, (0, 255, 0), 2)????????????????????????????????????????????????????????????# If any hand present????????????????????????????????????????????????else:????????????????????????????????????????????????????????????????????????for i in results.multi_handedness:????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????# Return whether it is Right or Left Hand??????????????????????????????????????????????????????????????????????????????aLEX,??????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????0.9, (0, 255, 0), 2)????????????????????????????????????????????????????????????????????????????????????????????????????????????if label == 'Right':????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????# Display 'Left Hand'????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????# on
|
#Output : pip install mediapipe
|
Right and Left Hand Detection Using Python
# Importing Librariesimport cv2import mediapipe as mp????????????# Used to convert protobuf message to a dictionary.from google.protobuf.json_format import MessageToDict????????????# Initializing the ModelmpHands = mp.solutions.handshands = mpHands.Hands(????????????????????????static_image_mode=False,????????????????????????model_complexity=1????????????????????????min_detection_confidence=0.75,????????????????????????min_tracking_confidence=0.75,????????????????????????max_num_hands=2)????????????# Start capturing video from webcamcap = cv2.VideoCapture(0)????????????while True:????????????????????????# Read video frame by frame????????????????????????success, img = cap.read()????????????????????????????????????# Flip the image(frame)????????????????????????img = cv2.flip(img, 1)????????????????????????????????????# Convert BGR image to RGB image????????????????????????imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)????????????????????????????????????# Process the RGB image????????????????????????results = ha 'Both Hands' on the image????????????????????????????????????????????????????????????????????????cv2.putText(img, 'Both Hands', (250, 50),????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????cv2.FONT_HERSHEY_COMPLEX,????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????0.9, (0, 255, 0), 2)????????????????????????????????????????????????????????????# If any hand present????????????????????????????????????????????????else:????????????????????????????????????????????????????????????????????????for i in results.multi_handedness:????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????# Return whether it is Right or Left Hand??????????????????????????????????????????????????????????????????????????????aLEX,??????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????0.9, (0, 255, 0), 2)????????????????????????????????????????????????????????????????????????????????????????????????????????????if label == 'Right':????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????# Display 'Left Hand'????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????# on
#Output : pip install mediapipe
[END]
|
Brightness Control With Hand Detection using OpenCV in Python
|
https://www.geeksforgeeks.org/brightness-control-with-hand-detection-using-opencv-in-python/
|
# Importing Libraries
import cv2
import mediapipe as mp
from math import hypot
import screen_brightness_control as sbc
import numpy as np
|
#Output : pip install mediapipe
|
Brightness Control With Hand Detection using OpenCV in Python
# Importing Libraries
import cv2
import mediapipe as mp
from math import hypot
import screen_brightness_control as sbc
import numpy as np
#Output : pip install mediapipe
[END]
|
Brightness Control With Hand Detection using OpenCV in Python
|
https://www.geeksforgeeks.org/brightness-control-with-hand-detection-using-opencv-in-python/
|
# Initializing the Model
mpHands = mp.solutions.hands
hands = mpHands.Hands(
static_image_mode=False,
model_complexity=1,
min_detection_confidence=0.75,
min_tracking_confidence=0.75,
max_num_hands=2,
)
Draw = mp.solutions.drawing_utils
|
#Output : pip install mediapipe
|
Brightness Control With Hand Detection using OpenCV in Python
# Initializing the Model
mpHands = mp.solutions.hands
hands = mpHands.Hands(
static_image_mode=False,
model_complexity=1,
min_detection_confidence=0.75,
min_tracking_confidence=0.75,
max_num_hands=2,
)
Draw = mp.solutions.drawing_utils
#Output : pip install mediapipe
[END]
|
Brightness Control With Hand Detection using OpenCV in Python
|
https://www.geeksforgeeks.org/brightness-control-with-hand-detection-using-opencv-in-python/
|
# Start capturing video from webcam
cap = cv2.VideoCapture(0)
while True:
# Read video frame by frame
_, frame = cap.read()
# Flip image
frame = cv2.flip(frame, 1)
# Convert BGR image to RGB image
frameRGB = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# Process the RGB image
Process = hands.process(frameRGB)
landmarkList = []
# if hands are present in image(frame)
if Process.multi_hand_landmarks:
# detect handmarks
for handlm in Process.multi_hand_landmarks:
for _id, landmarks in enumerate(handlm.landmark):
# store height and width of image
height, width, color_channels = frame.shape
# calculate and append x, y coordinates
# of handmarks from image(frame) to lmList
x, y = int(landmarks.x * width), int(landmarks.y * height)
landmarkList.append([_id, x, y])
# draw Landmarks
Draw.draw_landmarks(frame, handlm, mpHands.HAND_CONNECTIONS)
# If landmarks list is not empty
if landmarkList != []:
# store x,y coordinates of (tip of) thumb
x_1, y_1 = landmarkList[4][1], landmarkList[4][2]
# store x,y coordinates of (tip of) index finger
x_2, y_2 = landmarkList[8][1], landmarkList[8][2]
# draw circle on thumb and index finger tip
cv2.circle(frame, (x_1, y_1), 7, (0, 255, 0), cv2.FILLED)
cv2.circle(frame, (x_2, y_2), 7, (0, 255, 0), cv2.FILLED)
# draw line from tip of thumb to tip of index finger
cv2.line(frame, (x_1, y_1), (x_2, y_2), (0, 255, 0), 3)
# calculate square root of the sum
# of squares of the specified arguments.
L = hypot(x_2 - x_1, y_2 - y_1)
# 1-D linear interpolant to a function
# with given discrete data points
# (Hand range 15 - 220, Brightness range 0 - 100),
# evaluated at length.
b_level = np.interp(L, [15, 220], [0, 100])
# set brightness
sbc.set_brightness(int(b_level))
# Display Video and when 'q' is entered,
# destroy the window
cv2.imshow("Image", frame)
if cv2.waitKey(1) & 0xFF == ord("q"):
break
|
#Output : pip install mediapipe
|
Brightness Control With Hand Detection using OpenCV in Python
# Start capturing video from webcam
cap = cv2.VideoCapture(0)
while True:
# Read video frame by frame
_, frame = cap.read()
# Flip image
frame = cv2.flip(frame, 1)
# Convert BGR image to RGB image
frameRGB = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# Process the RGB image
Process = hands.process(frameRGB)
landmarkList = []
# if hands are present in image(frame)
if Process.multi_hand_landmarks:
# detect handmarks
for handlm in Process.multi_hand_landmarks:
for _id, landmarks in enumerate(handlm.landmark):
# store height and width of image
height, width, color_channels = frame.shape
# calculate and append x, y coordinates
# of handmarks from image(frame) to lmList
x, y = int(landmarks.x * width), int(landmarks.y * height)
landmarkList.append([_id, x, y])
# draw Landmarks
Draw.draw_landmarks(frame, handlm, mpHands.HAND_CONNECTIONS)
# If landmarks list is not empty
if landmarkList != []:
# store x,y coordinates of (tip of) thumb
x_1, y_1 = landmarkList[4][1], landmarkList[4][2]
# store x,y coordinates of (tip of) index finger
x_2, y_2 = landmarkList[8][1], landmarkList[8][2]
# draw circle on thumb and index finger tip
cv2.circle(frame, (x_1, y_1), 7, (0, 255, 0), cv2.FILLED)
cv2.circle(frame, (x_2, y_2), 7, (0, 255, 0), cv2.FILLED)
# draw line from tip of thumb to tip of index finger
cv2.line(frame, (x_1, y_1), (x_2, y_2), (0, 255, 0), 3)
# calculate square root of the sum
# of squares of the specified arguments.
L = hypot(x_2 - x_1, y_2 - y_1)
# 1-D linear interpolant to a function
# with given discrete data points
# (Hand range 15 - 220, Brightness range 0 - 100),
# evaluated at length.
b_level = np.interp(L, [15, 220], [0, 100])
# set brightness
sbc.set_brightness(int(b_level))
# Display Video and when 'q' is entered,
# destroy the window
cv2.imshow("Image", frame)
if cv2.waitKey(1) & 0xFF == ord("q"):
break
#Output : pip install mediapipe
[END]
|
Brightness Control With Hand Detection using OpenCV in Python
|
https://www.geeksforgeeks.org/brightness-control-with-hand-detection-using-opencv-in-python/
|
# Importing Libraries
import cv2
import mediapipe as mp
from math import hypot
import screen_brightness_control as sbc
import numpy as np
# Initializing the Model
mpHands = mp.solutions.hands
hands = mpHands.Hands(
static_image_mode=False,
model_complexity=1,
min_detection_confidence=0.75,
min_tracking_confidence=0.75,
max_num_hands=2,
)
Draw = mp.solutions.drawing_utils
# Start capturing video from webcam
cap = cv2.VideoCapture(0)
while True:
# Read video frame by frame
_, frame = cap.read()
# Flip image
frame = cv2.flip(frame, 1)
# Convert BGR image to RGB image
frameRGB = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# Process the RGB image
Process = hands.process(frameRGB)
landmarkList = []
# if hands are present in image(frame)
if Process.multi_hand_landmarks:
# detect handmarks
for handlm in Process.multi_hand_landmarks:
for _id, landmarks in enumerate(handlm.landmark):
# store height and width of image
height, width, color_channels = frame.shape
# calculate and append x, y coordinates
# of handmarks from image(frame) to lmList
x, y = int(landmarks.x * width), int(landmarks.y * height)
landmarkList.append([_id, x, y])
# draw Landmarks
Draw.draw_landmarks(frame, handlm, mpHands.HAND_CONNECTIONS)
# If landmarks list is not empty
if landmarkList != []:
# store x,y coordinates of (tip of) thumb
x_1, y_1 = landmarkList[4][1], landmarkList[4][2]
# store x,y coordinates of (tip of) index finger
x_2, y_2 = landmarkList[8][1], landmarkList[8][2]
# draw circle on thumb and index finger tip
cv2.circle(frame, (x_1, y_1), 7, (0, 255, 0), cv2.FILLED)
cv2.circle(frame, (x_2, y_2), 7, (0, 255, 0), cv2.FILLED)
# draw line from tip of thumb to tip of index finger
cv2.line(frame, (x_1, y_1), (x_2, y_2), (0, 255, 0), 3)
# calculate square root of the sum of
# squares of the specified arguments.
L = hypot(x_2 - x_1, y_2 - y_1)
# 1-D linear interpolant to a function
# with given discrete data points
# (Hand range 15 - 220, Brightness
# range 0 - 100), evaluated at length.
b_level = np.interp(L, [15, 220], [0, 100])
# set brightness
sbc.set_brightness(int(b_level))
# Display Video and when 'q' is entered, destroy
# the window
cv2.imshow("Image", frame)
if cv2.waitKey(1) & 0xFF == ord("q"):
break
|
#Output : pip install mediapipe
|
Brightness Control With Hand Detection using OpenCV in Python
# Importing Libraries
import cv2
import mediapipe as mp
from math import hypot
import screen_brightness_control as sbc
import numpy as np
# Initializing the Model
mpHands = mp.solutions.hands
hands = mpHands.Hands(
static_image_mode=False,
model_complexity=1,
min_detection_confidence=0.75,
min_tracking_confidence=0.75,
max_num_hands=2,
)
Draw = mp.solutions.drawing_utils
# Start capturing video from webcam
cap = cv2.VideoCapture(0)
while True:
# Read video frame by frame
_, frame = cap.read()
# Flip image
frame = cv2.flip(frame, 1)
# Convert BGR image to RGB image
frameRGB = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# Process the RGB image
Process = hands.process(frameRGB)
landmarkList = []
# if hands are present in image(frame)
if Process.multi_hand_landmarks:
# detect handmarks
for handlm in Process.multi_hand_landmarks:
for _id, landmarks in enumerate(handlm.landmark):
# store height and width of image
height, width, color_channels = frame.shape
# calculate and append x, y coordinates
# of handmarks from image(frame) to lmList
x, y = int(landmarks.x * width), int(landmarks.y * height)
landmarkList.append([_id, x, y])
# draw Landmarks
Draw.draw_landmarks(frame, handlm, mpHands.HAND_CONNECTIONS)
# If landmarks list is not empty
if landmarkList != []:
# store x,y coordinates of (tip of) thumb
x_1, y_1 = landmarkList[4][1], landmarkList[4][2]
# store x,y coordinates of (tip of) index finger
x_2, y_2 = landmarkList[8][1], landmarkList[8][2]
# draw circle on thumb and index finger tip
cv2.circle(frame, (x_1, y_1), 7, (0, 255, 0), cv2.FILLED)
cv2.circle(frame, (x_2, y_2), 7, (0, 255, 0), cv2.FILLED)
# draw line from tip of thumb to tip of index finger
cv2.line(frame, (x_1, y_1), (x_2, y_2), (0, 255, 0), 3)
# calculate square root of the sum of
# squares of the specified arguments.
L = hypot(x_2 - x_1, y_2 - y_1)
# 1-D linear interpolant to a function
# with given discrete data points
# (Hand range 15 - 220, Brightness
# range 0 - 100), evaluated at length.
b_level = np.interp(L, [15, 220], [0, 100])
# set brightness
sbc.set_brightness(int(b_level))
# Display Video and when 'q' is entered, destroy
# the window
cv2.imshow("Image", frame)
if cv2.waitKey(1) & 0xFF == ord("q"):
break
#Output : pip install mediapipe
[END]
|
Creating a Finger Counter Using Computer Vision and OpenCV in Python
|
https://www.geeksforgeeks.org/creating-a-finger-counter-using-computer-vision-and-opencv-in-python/
|
# import libraries and required classes
import cv2
from cvzone.HandTrackingModule import HandDetector
|
#Output : pip install cvzone
|
Creating a Finger Counter Using Computer Vision and OpenCV in Python
# import libraries and required classes
import cv2
from cvzone.HandTrackingModule import HandDetector
#Output : pip install cvzone
[END]
|
Creating a Finger Counter Using Computer Vision and OpenCV in Python
|
https://www.geeksforgeeks.org/creating-a-finger-counter-using-computer-vision-and-opencv-in-python/
|
# declaring HandDetector with
# some basic requirements
detector = HandDetector(maxHands=1, detectionCon=0.8)
# it detect only one hand from
# video with 0.8 detection confidence
video = cv2.VideoCapture(0)
|
#Output : pip install cvzone
|
Creating a Finger Counter Using Computer Vision and OpenCV in Python
# declaring HandDetector with
# some basic requirements
detector = HandDetector(maxHands=1, detectionCon=0.8)
# it detect only one hand from
# video with 0.8 detection confidence
video = cv2.VideoCapture(0)
#Output : pip install cvzone
[END]
|
Creating a Finger Counter Using Computer Vision and OpenCV in Python
|
https://www.geeksforgeeks.org/creating-a-finger-counter-using-computer-vision-and-opencv-in-python/
|
while True:
# Capture frame-by-frame
_, img = video.read()
img = cv2.flip(img, 1)
# Find the hand with help of detector
hand = detector.findHands(img, draw=False)
# Here we take img by default if no hand found
fing = cv2.imread("Put image path with 0 fingures up")
if hand:
# Taking the landmarks of hand
lmlist = hand[0]
if lmlist:
# Find how many fingers are up
# This function return list
fingerup = detector.fingersUp(lmlist)
# Change image based on
# different-different conditions
if fingerup == [0, 1, 0, 0, 0]:
fing = cv2.imread(
"Put image path of 1\
fingures up"
)
if fingerup == [0, 1, 1, 0, 0]:
fing = cv2.imread(
"Put image path of 2\
fingures up"
)
if fingerup == [0, 1, 1, 1, 0]:
fing = cv2.imread(
"Put image path of\
3 fingures up"
)
if fingerup == [0, 1, 1, 1, 1]:
fing = cv2.imread(
"sPut image path \
of 4 fingures up"
)
if fingerup == [1, 1, 1, 1, 1]:
fing = cv2.imread(
"Put image path \
of 4 fingures and thumbs up"
)
# Resize the image
fing = cv2.resize(fing, (220, 280))
# Place the image in main frame
img[50:330, 20:240] = fing
# Display the resulting frame
cv2.imshow("Video", img)
|
#Output : pip install cvzone
|
Creating a Finger Counter Using Computer Vision and OpenCV in Python
while True:
# Capture frame-by-frame
_, img = video.read()
img = cv2.flip(img, 1)
# Find the hand with help of detector
hand = detector.findHands(img, draw=False)
# Here we take img by default if no hand found
fing = cv2.imread("Put image path with 0 fingures up")
if hand:
# Taking the landmarks of hand
lmlist = hand[0]
if lmlist:
# Find how many fingers are up
# This function return list
fingerup = detector.fingersUp(lmlist)
# Change image based on
# different-different conditions
if fingerup == [0, 1, 0, 0, 0]:
fing = cv2.imread(
"Put image path of 1\
fingures up"
)
if fingerup == [0, 1, 1, 0, 0]:
fing = cv2.imread(
"Put image path of 2\
fingures up"
)
if fingerup == [0, 1, 1, 1, 0]:
fing = cv2.imread(
"Put image path of\
3 fingures up"
)
if fingerup == [0, 1, 1, 1, 1]:
fing = cv2.imread(
"sPut image path \
of 4 fingures up"
)
if fingerup == [1, 1, 1, 1, 1]:
fing = cv2.imread(
"Put image path \
of 4 fingures and thumbs up"
)
# Resize the image
fing = cv2.resize(fing, (220, 280))
# Place the image in main frame
img[50:330, 20:240] = fing
# Display the resulting frame
cv2.imshow("Video", img)
#Output : pip install cvzone
[END]
|
Creating a Finger Counter Using Computer Vision and OpenCV in Python
|
https://www.geeksforgeeks.org/creating-a-finger-counter-using-computer-vision-and-opencv-in-python/
|
# Enter key 'q' to break the loop
if cv2.waitKey(1) & 0xFF == ord("q"):
break
# When everything done, release
# the capture and destroy the windows
video.release()
cv2.destroyAllWindows()
|
#Output : pip install cvzone
|
Creating a Finger Counter Using Computer Vision and OpenCV in Python
# Enter key 'q' to break the loop
if cv2.waitKey(1) & 0xFF == ord("q"):
break
# When everything done, release
# the capture and destroy the windows
video.release()
cv2.destroyAllWindows()
#Output : pip install cvzone
[END]
|
Creating a Finger Counter Using Computer Vision and OpenCV in Python
|
https://www.geeksforgeeks.org/creating-a-finger-counter-using-computer-vision-and-opencv-in-python/
|
import cv2
from cvzone.HandTrackingModule import HandDetector
detector = HandDetector(maxHands=1, detectionCon=0.8)
video = cv2.VideoCapture(1)
while True:
_, img = video.read()
img = cv2.flip(img, 1)
hand = detector.findHands(img, draw=False)
fing = cv2.imread("Put image path with 0 fingures up")
if hand:
lmlist = hand[0]
if lmlist:
fingerup = detector.fingersUp(lmlist)
if fingerup == [0, 1, 0, 0, 0]:
fing = cv2.imread(
"Put image \
path of 1 fingures up"
)
if fingerup == [0, 1, 1, 0, 0]:
fing = cv2.imread(
"Put image \
path of 2 fingures up"
)
if fingerup == [0, 1, 1, 1, 0]:
fing = cv2.imread(
"Put image \
path of 3 fingures up"
)
if fingerup == [0, 1, 1, 1, 1]:
fing = cv2.imread(
"Put image \
path of 4 fingures up"
)
if fingerup == [1, 1, 1, 1, 1]:
fing = cv2.imread(
"Put image \
path of 4 fingures and thumbs up"
)
fing = cv2.resize(fing, (220, 280))
img[50:330, 20:240] = fing
cv2.imshow("Video", img)
if cv2.waitKey(1) & 0xFF == ord("q"):
break
video.release()
cv2.destroyAllWindows()
|
#Output : pip install cvzone
|
Creating a Finger Counter Using Computer Vision and OpenCV in Python
import cv2
from cvzone.HandTrackingModule import HandDetector
detector = HandDetector(maxHands=1, detectionCon=0.8)
video = cv2.VideoCapture(1)
while True:
_, img = video.read()
img = cv2.flip(img, 1)
hand = detector.findHands(img, draw=False)
fing = cv2.imread("Put image path with 0 fingures up")
if hand:
lmlist = hand[0]
if lmlist:
fingerup = detector.fingersUp(lmlist)
if fingerup == [0, 1, 0, 0, 0]:
fing = cv2.imread(
"Put image \
path of 1 fingures up"
)
if fingerup == [0, 1, 1, 0, 0]:
fing = cv2.imread(
"Put image \
path of 2 fingures up"
)
if fingerup == [0, 1, 1, 1, 0]:
fing = cv2.imread(
"Put image \
path of 3 fingures up"
)
if fingerup == [0, 1, 1, 1, 1]:
fing = cv2.imread(
"Put image \
path of 4 fingures up"
)
if fingerup == [1, 1, 1, 1, 1]:
fing = cv2.imread(
"Put image \
path of 4 fingures and thumbs up"
)
fing = cv2.resize(fing, (220, 280))
img[50:330, 20:240] = fing
cv2.imshow("Video", img)
if cv2.waitKey(1) & 0xFF == ord("q"):
break
video.release()
cv2.destroyAllWindows()
#Output : pip install cvzone
[END]
|
Python Web Develementsopment - Django Tutorial
|
https://www.geeksforgeeks.org/python-web-development-django-tutorial/
|
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
]
|
#Output : python3 -m venv ./name
|
Python Web Develementsopment - Django Tutorial
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
]
#Output : python3 -m venv ./name
[END]
|
Python Web Develementsopment - Django Tutorial
|
https://www.geeksforgeeks.org/python-web-development-django-tutorial/
|
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"gfg_site_app.apps.GfgSiteAppConfig",
]
|
#Output : python3 -m venv ./name
|
Python Web Develementsopment - Django Tutorial
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"gfg_site_app.apps.GfgSiteAppConfig",
]
#Output : python3 -m venv ./name
[END]
|
Python Web Develementsopment - Django Tutorial
|
https://www.geeksforgeeks.org/python-web-development-django-tutorial/
|
from django.http import HttpResponse
# create a function
def geeks_view(request):
return HttpResponse("<h1>Welcome to GeeksforGeeks</h1>")
|
#Output : python3 -m venv ./name
|
Python Web Develementsopment - Django Tutorial
from django.http import HttpResponse
# create a function
def geeks_view(request):
return HttpResponse("<h1>Welcome to GeeksforGeeks</h1>")
#Output : python3 -m venv ./name
[END]
|
Python Web Develementsopment - Django Tutorial
|
https://www.geeksforgeeks.org/python-web-development-django-tutorial/
|
from django.urls import path
from . import views
urlpatterns = [
path("", views.geeks_view, name="geeks_view"),
]
|
#Output : python3 -m venv ./name
|
Python Web Develementsopment - Django Tutorial
from django.urls import path
from . import views
urlpatterns = [
path("", views.geeks_view, name="geeks_view"),
]
#Output : python3 -m venv ./name
[END]
|
Python Web Develementsopment - Django Tutorial
|
https://www.geeksforgeeks.org/python-web-development-django-tutorial/
|
from django.contrib import admin
from django.urls import path, include
urlpatterns = [path("admin/", admin.site.urls), path("", include("gfg_site_app.urls"))]
|
#Output : python3 -m venv ./name
|
Python Web Develementsopment - Django Tutorial
from django.contrib import admin
from django.urls import path, include
urlpatterns = [path("admin/", admin.site.urls), path("", include("gfg_site_app.urls"))]
#Output : python3 -m venv ./name
[END]
|
Python Web Develementsopment - Django Tutorial
|
https://www.geeksforgeeks.org/python-web-development-django-tutorial/
|
# import the standard Django Model
# from built-in library
from django.db import models
from datetime import datetime
class GeeksModel(models.Model):
# Field Names
title = models.CharField(max_length=200)
description = models.TextField()
created_on = models.DateTimeField(default=datetime.now)
image = models.ImageField(upload_to="images/%Y/%m/%d")
# rename the instances of the model
# with their title name
def __str__(self) -> str:
return self.title
|
#Output : python3 -m venv ./name
|
Python Web Develementsopment - Django Tutorial
# import the standard Django Model
# from built-in library
from django.db import models
from datetime import datetime
class GeeksModel(models.Model):
# Field Names
title = models.CharField(max_length=200)
description = models.TextField()
created_on = models.DateTimeField(default=datetime.now)
image = models.ImageField(upload_to="images/%Y/%m/%d")
# rename the instances of the model
# with their title name
def __str__(self) -> str:
return self.title
#Output : python3 -m venv ./name
[END]
|
Python Web Develementsopment - Django Tutorial
|
https://www.geeksforgeeks.org/python-web-development-django-tutorial/
|
from gfg_site_app.models import GeeksModel
obj = GeeksModel(
title="GeeksforGeeks", description="GFG is a portal for computer science students"
)
obj.save()
|
#Output : python3 -m venv ./name
|
Python Web Develementsopment - Django Tutorial
from gfg_site_app.models import GeeksModel
obj = GeeksModel(
title="GeeksforGeeks", description="GFG is a portal for computer science students"
)
obj.save()
#Output : python3 -m venv ./name
[END]
|
Python Web Develementsopment - Django Tutorial
|
https://www.geeksforgeeks.org/python-web-development-django-tutorial/
|
GeeksModel.objects.all()
|
#Output : python3 -m venv ./name
|
Python Web Develementsopment - Django Tutorial
GeeksModel.objects.all()
#Output : python3 -m venv ./name
[END]
|
Python Web Develementsopment - Django Tutorial
|
https://www.geeksforgeeks.org/python-web-development-django-tutorial/
|
obj = GeeksModel.objects.get(id=1)
obj.title = "GFG"
obj.save()
GeeksModel.objects.all()
|
#Output : python3 -m venv ./name
|
Python Web Develementsopment - Django Tutorial
obj = GeeksModel.objects.get(id=1)
obj.title = "GFG"
obj.save()
GeeksModel.objects.all()
#Output : python3 -m venv ./name
[END]
|
Python Web Develementsopment - Django Tutorial
|
https://www.geeksforgeeks.org/python-web-development-django-tutorial/
|
obj = GeeksModel.objects.get(id=1)
obj.delete()
GeeksModel.objects.all()
|
#Output : python3 -m venv ./name
|
Python Web Develementsopment - Django Tutorial
obj = GeeksModel.objects.get(id=1)
obj.delete()
GeeksModel.objects.all()
#Output : python3 -m venv ./name
[END]
|
Python Web Develementsopment - Django Tutorial
|
https://www.geeksforgeeks.org/python-web-development-django-tutorial/
|
MEDIA_ROOT = BASE_DIR / "media"
MEDIA_URL = "/media/"
|
#Output : python3 -m venv ./name
|
Python Web Develementsopment - Django Tutorial
MEDIA_ROOT = BASE_DIR / "media"
MEDIA_URL = "/media/"
#Output : python3 -m venv ./name
[END]
|
Python Web Develementsopment - Django Tutorial
|
https://www.geeksforgeeks.org/python-web-development-django-tutorial/
|
from django.contrib import admin
from .models import GeeksModel
# Register your models here.
admin.site.register(
GeeksModel,
)
|
#Output : python3 -m venv ./name
|
Python Web Develementsopment - Django Tutorial
from django.contrib import admin
from .models import GeeksModel
# Register your models here.
admin.site.register(
GeeksModel,
)
#Output : python3 -m venv ./name
[END]
|
Python Web Develementsopment - Django Tutorial
|
https://www.geeksforgeeks.org/python-web-development-django-tutorial/
|
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": BASE_DIR / "db.sqlite3",
}
}
|
#Output : python3 -m venv ./name
|
Python Web Develementsopment - Django Tutorial
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": BASE_DIR / "db.sqlite3",
}
}
#Output : python3 -m venv ./name
[END]
|
Python Web Develementsopment - Django Tutorial
|
https://www.geeksforgeeks.org/python-web-development-django-tutorial/
|
DATABASES = {??????????????????'default': {??????????????????????????????????????????'ENGINE': 'django.db.backends.postgresql',??????????????????????????????????????????'NAME': ?????????<database_name>?????????,??????????????????????????????????????????'USER': '<database_username>',?????????????????????????????
|
#Output : python3 -m venv ./name
|
Python Web Develementsopment - Django Tutorial
DATABASES = {??????????????????'default': {??????????????????????????????????????????'ENGINE': 'django.db.backends.postgresql',??????????????????????????????????????????'NAME': ?????????<database_name>?????????,??????????????????????????????????????????'USER': '<database_username>',?????????????????????????????
#Output : python3 -m venv ./name
[END]
|
Python Web Develementsopment - Django Tutorial
|
https://www.geeksforgeeks.org/python-web-development-django-tutorial/
|
TEMPLATES = [
{
# Template backend to be used, For example Jinja
"BACKEND": "django.template.backends.django.DjangoTemplates",
# directories for templates
"DIRS": [],
"APP_DIRS": True,
# options to configure
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
|
#Output : python3 -m venv ./name
|
Python Web Develementsopment - Django Tutorial
TEMPLATES = [
{
# Template backend to be used, For example Jinja
"BACKEND": "django.template.backends.django.DjangoTemplates",
# directories for templates
"DIRS": [],
"APP_DIRS": True,
# options to configure
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
#Output : python3 -m venv ./name
[END]
|
Python Web Develementsopment - Django Tutorial
|
https://www.geeksforgeeks.org/python-web-development-django-tutorial/
|
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
# adding the location of our templates directory
"DIRS": [BASE_DIR / "templates"],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
|
#Output : python3 -m venv ./name
|
Python Web Develementsopment - Django Tutorial
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
# adding the location of our templates directory
"DIRS": [BASE_DIR / "templates"],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
#Output : python3 -m venv ./name
[END]
|
Python Web Develementsopment - Django Tutorial
|
https://www.geeksforgeeks.org/python-web-development-django-tutorial/
|
<!DOCTYPE html><html lang="en"><head>??????????????????????"UTF-8">???????????????????"viewport" content="width=device-width, initial-scale=1.0">????????????????????????<"X-UA-Compatible" content="ie=edge">????????????????????????<title>Homepage</title></head><body>????????????????????????<h1>Welcome to
|
#Output : python3 -m venv ./name
|
Python Web Develementsopment - Django Tutorial
<!DOCTYPE html><html lang="en"><head>??????????????????????"UTF-8">???????????????????"viewport" content="width=device-width, initial-scale=1.0">????????????????????????<"X-UA-Compatible" content="ie=edge">????????????????????????<title>Homepage</title></head><body>????????????????????????<h1>Welcome to
#Output : python3 -m venv ./name
[END]
|
Python Web Develementsopment - Django Tutorial
|
https://www.geeksforgeeks.org/python-web-development-django-tutorial/
|
from django.shortcuts import render
# create a function
def geeks_view(request):
return render(request, "index.html")
|
#Output : python3 -m venv ./name
|
Python Web Develementsopment - Django Tutorial
from django.shortcuts import render
# create a function
def geeks_view(request):
return render(request, "index.html")
#Output : python3 -m venv ./name
[END]
|
Python Web Develementsopment - Django Tutorial
|
https://www.geeksforgeeks.org/python-web-development-django-tutorial/
|
from django.shortcuts import render
from .models import GeeksModel
# create a function
def geeks_view(request):
content = GeeksModel.objects.all()
context = {"content": content}
return render(request, "index.html", context=context)
|
#Output : python3 -m venv ./name
|
Python Web Develementsopment - Django Tutorial
from django.shortcuts import render
from .models import GeeksModel
# create a function
def geeks_view(request):
content = GeeksModel.objects.all()
context = {"content": content}
return render(request, "index.html", context=context)
#Output : python3 -m venv ./name
[END]
|
Python Web Develementsopment - Django Tutorial
|
https://www.geeksforgeeks.org/python-web-development-django-tutorial/
|
<!DOCTYPE html><html lang="en"><head>??????????????????????"UTF-8">???????????????????"viewport" content="width=device-width, initial-scale=1.0">????????????????????????<"X-UA-Compatible" content="ie=edge">????????????????????????<title>Homepage</title></head><body>??????????????????????????????{% for data in content %}????????????"{{ data.image.url }}" alt="">??????????????????????????????<p><strong>Description:</strong>{{data.description}}</p>????????????????????????????????????<p><strong>Created On:</strong>{{data.created_on
|
#Output : python3 -m venv ./name
|
Python Web Develementsopment - Django Tutorial
<!DOCTYPE html><html lang="en"><head>??????????????????????"UTF-8">???????????????????"viewport" content="width=device-width, initial-scale=1.0">????????????????????????<"X-UA-Compatible" content="ie=edge">????????????????????????<title>Homepage</title></head><body>??????????????????????????????{% for data in content %}????????????"{{ data.image.url }}" alt="">??????????????????????????????<p><strong>Description:</strong>{{data.description}}</p>????????????????????????????????????<p><strong>Created On:</strong>{{data.created_on
#Output : python3 -m venv ./name
[END]
|
Python Web Develementsopment - Django Tutorial
|
https://www.geeksforgeeks.org/python-web-development-django-tutorial/
|
{% extends "./base2.html" %}{% extends "../base1.html" %}{% extends "./my/base3.html" %}
|
#Output : python3 -m venv ./name
|
Python Web Develementsopment - Django Tutorial
{% extends "./base2.html" %}{% extends "../base1.html" %}{% extends "./my/base3.html" %}
#Output : python3 -m venv ./name
[END]
|
Python Web Develementsopment - Django Tutorial
|
https://www.geeksforgeeks.org/python-web-development-django-tutorial/
|
from django import forms
class GeeksForm(forms.Form):
title = forms.CharField(max_length=200)
description = forms.CharField(widget=forms.Textarea)
image = forms.ImageField()
|
#Output : python3 -m venv ./name
|
Python Web Develementsopment - Django Tutorial
from django import forms
class GeeksForm(forms.Form):
title = forms.CharField(max_length=200)
description = forms.CharField(widget=forms.Textarea)
image = forms.ImageField()
#Output : python3 -m venv ./name
[END]
|
Python Web Develementsopment - Django Tutorial
|
https://www.geeksforgeeks.org/python-web-development-django-tutorial/
|
from .forms import GeeksForm
def geeks_form(request):
context = {}
context["form"] = GeeksForm
return render(request, "form.html", context=context)
|
#Output : python3 -m venv ./name
|
Python Web Develementsopment - Django Tutorial
from .forms import GeeksForm
def geeks_form(request):
context = {}
context["form"] = GeeksForm
return render(request, "form.html", context=context)
#Output : python3 -m venv ./name
[END]
|
Python Web Develementsopment - Django Tutorial
|
https://www.geeksforgeeks.org/python-web-development-django-tutorial/
|
from django.urls import path
from . import views
urlpatterns = [
path("", views.geeks_view, name="geeks_view"),
path("add/", views.geeks_form, name="geeks_form"),
]
|
#Output : python3 -m venv ./name
|
Python Web Develementsopment - Django Tutorial
from django.urls import path
from . import views
urlpatterns = [
path("", views.geeks_view, name="geeks_view"),
path("add/", views.geeks_form, name="geeks_form"),
]
#Output : python3 -m venv ./name
[END]
|
Python Web Develementsopment - Django Tutorial
|
https://www.geeksforgeeks.org/python-web-development-django-tutorial/
|
<form action="" method="POST">????????????????????????{% csrf_token %}????????????????????????{"submit" value="submit"></form>
|
#Output : python3 -m venv ./name
|
Python Web Develementsopment - Django Tutorial
<form action="" method="POST">????????????????????????{% csrf_token %}????????????????????????{"submit" value="submit"></form>
#Output : python3 -m venv ./name
[END]
|
Python Web Develementsopment - Django Tutorial
|
https://www.geeksforgeeks.org/python-web-development-django-tutorial/
|
from django import forms
from .models import GeeksModel
class GeeksForm(forms.ModelForm):
class Meta:
model = GeeksModel
fields = ["title", "description", "image"]
|
#Output : python3 -m venv ./name
|
Python Web Develementsopment - Django Tutorial
from django import forms
from .models import GeeksModel
class GeeksForm(forms.ModelForm):
class Meta:
model = GeeksModel
fields = ["title", "description", "image"]
#Output : python3 -m venv ./name
[END]
|
Python Web Develementsopment - Django Tutorial
|
https://www.geeksforgeeks.org/python-web-development-django-tutorial/
|
def geeks_form(request):
if request.method == "POST":
form = GeeksForm(request.POST, request.FILES)
if form.is_valid():
form.save()
return redirect("geeks_view")
else:
# uncomment the below line to see errors
# in the form (if any)
# print(form.errors)
return redirect("geeks_form")
else:
context = {}
context["form"] = GeeksForm
return render(request, "form.html", context=context)
|
#Output : python3 -m venv ./name
|
Python Web Develementsopment - Django Tutorial
def geeks_form(request):
if request.method == "POST":
form = GeeksForm(request.POST, request.FILES)
if form.is_valid():
form.save()
return redirect("geeks_view")
else:
# uncomment the below line to see errors
# in the form (if any)
# print(form.errors)
return redirect("geeks_form")
else:
context = {}
context["form"] = GeeksForm
return render(request, "form.html", context=context)
#Output : python3 -m venv ./name
[END]
|
How to Create an App in Django?
|
https://www.geeksforgeeks.org/how-to-create-an-app-in-django/
|
# Application definition
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"projectApp",
]
|
#Output : python manage.py startapp projectApp
|
How to Create an App in Django?
# Application definition
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"projectApp",
]
#Output : python manage.py startapp projectApp
[END]
|
How to Create an App in Django?
|
https://www.geeksforgeeks.org/how-to-create-an-app-in-django/
|
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path("admin/", admin.site.urls),
# Enter the app name in following
# syntax for this to work
path("", include("projectApp.urls")),
]
|
#Output : python manage.py startapp projectApp
|
How to Create an App in Django?
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path("admin/", admin.site.urls),
# Enter the app name in following
# syntax for this to work
path("", include("projectApp.urls")),
]
#Output : python manage.py startapp projectApp
[END]
|
How to Create an App in Django?
|
https://www.geeksforgeeks.org/how-to-create-an-app-in-django/
|
from django.urls import path
# now import the views.py file into this code
from . import views
urlpatterns = [path("", views.index)]
|
#Output : python manage.py startapp projectApp
|
How to Create an App in Django?
from django.urls import path
# now import the views.py file into this code
from . import views
urlpatterns = [path("", views.index)]
#Output : python manage.py startapp projectApp
[END]
|
How to Create an App in Django?
|
https://www.geeksforgeeks.org/how-to-create-an-app-in-django/
|
from django.http import HttpResponse
def index(request):
return HttpResponse("Hello Geeks")
|
#Output : python manage.py startapp projectApp
|
How to Create an App in Django?
from django.http import HttpResponse
def index(request):
return HttpResponse("Hello Geeks")
#Output : python manage.py startapp projectApp
[END]
|
Weather app using Django
|
https://www.geeksforgeeks.org/weather-app-using-django-python/
|
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path("admin/", admin.site.urls),
path("", include("main.urls")),
]
|
#Output : cd weather
|
Weather app using Django
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path("admin/", admin.site.urls),
path("", include("main.urls")),
]
#Output : cd weather
[END]
|
Weather app using Django
|
https://www.geeksforgeeks.org/weather-app-using-django-python/
|
from django.urls import path
from . import views
urlpatterns = [
path("", views.index),
]
|
#Output : cd weather
|
Weather app using Django
from django.urls import path
from . import views
urlpatterns = [
path("", views.index),
]
#Output : cd weather
[END]
|
Weather app using Django
|
https://www.geeksforgeeks.org/weather-app-using-django-python/
|
from django.shortcuts import render
# import json to load json data to python dictionary
import json
# urllib.request to make a request to api
import urllib.request
def index(request):
if request.method == "POST":
city = request.POST["city"]
""" api key might be expired use your own api_key
place api_key in place of appid ="your_api_key_here " """
# source contain JSON data from API
source = urllib.request.urlopen(
"http://api.openweathermap.org/data/2.5/weather?q ="
+ city
+ "&appid = your_api_key_here"
).read()
# converting JSON data to a dictionary
list_of_data = json.loads(source)
# data for variable list_of_data
data = {
"country_code": str(list_of_data["sys"]["country"]),
"coordinate": str(list_of_data["coord"]["lon"])
+ " "
+ str(list_of_data["coord"]["lat"]),
"temp": str(list_of_data["main"]["temp"]) + "k",
"pressure": str(list_of_data["main"]["pressure"]),
"humidity": str(list_of_data["main"]["humidity"]),
}
print(data)
else:
data = {}
return render(request, "main/index.html", data)
|
#Output : cd weather
|
Weather app using Django
from django.shortcuts import render
# import json to load json data to python dictionary
import json
# urllib.request to make a request to api
import urllib.request
def index(request):
if request.method == "POST":
city = request.POST["city"]
""" api key might be expired use your own api_key
place api_key in place of appid ="your_api_key_here " """
# source contain JSON data from API
source = urllib.request.urlopen(
"http://api.openweathermap.org/data/2.5/weather?q ="
+ city
+ "&appid = your_api_key_here"
).read()
# converting JSON data to a dictionary
list_of_data = json.loads(source)
# data for variable list_of_data
data = {
"country_code": str(list_of_data["sys"]["country"]),
"coordinate": str(list_of_data["coord"]["lon"])
+ " "
+ str(list_of_data["coord"]["lat"]),
"temp": str(list_of_data["main"]["temp"]) + "k",
"pressure": str(list_of_data["main"]["pressure"]),
"humidity": str(list_of_data["main"]["humidity"]),
}
print(data)
else:
data = {}
return render(request, "main/index.html", data)
#Output : cd weather
[END]
|
Django Sign Up and login with confirmation Email
|
https://www.geeksforgeeks.org/django-sign-up-and-login-with-confirmation-email-python/
|
from django.contrib import admin
from django.urls import path, include
from user import views as user_view
from django.contrib.auth import views as auth
urlpatterns = [
path("admin/", admin.site.urls),
##### user related path##########################
path("", include("user.urls")),
path("login/", user_view.Login, name="login"),
path(
"logout/",
auth.LogoutView.as_view(template_name="user/index.html"),
name="logout",
),
path("register/", user_view.register, name="register"),
]
|
#Output : pip install --upgrade django-crispy-forms
|
Django Sign Up and login with confirmation Email
from django.contrib import admin
from django.urls import path, include
from user import views as user_view
from django.contrib.auth import views as auth
urlpatterns = [
path("admin/", admin.site.urls),
##### user related path##########################
path("", include("user.urls")),
path("login/", user_view.Login, name="login"),
path(
"logout/",
auth.LogoutView.as_view(template_name="user/index.html"),
name="logout",
),
path("register/", user_view.register, name="register"),
]
#Output : pip install --upgrade django-crispy-forms
[END]
|
Django Sign Up and login with confirmation Email
|
https://www.geeksforgeeks.org/django-sign-up-and-login-with-confirmation-email-python/
|
from django.urls import path, include
from django.conf import settings
from . import views
from django.conf.urls.static import static
urlpatterns = [
path("", views.index, name="index"),
]
|
#Output : pip install --upgrade django-crispy-forms
|
Django Sign Up and login with confirmation Email
from django.urls import path, include
from django.conf import settings
from . import views
from django.conf.urls.static import static
urlpatterns = [
path("", views.index, name="index"),
]
#Output : pip install --upgrade django-crispy-forms
[END]
|
Django Sign Up and login with confirmation Email
|
https://www.geeksforgeeks.org/django-sign-up-and-login-with-confirmation-email-python/
|
from django.shortcuts import render, redirect
from django.contrib import messages
from django.contrib.auth import authenticate, login
from django.contrib.auth.decorators import login_required
from django.contrib.auth.forms import AuthenticationForm
from .forms import UserRegisterForm
from django.core.mail import send_mail
from django.core.mail import EmailMultiAlternatives
from django.template.loader import get_template
from django.template import Context
#################### index#######################################
def index(request):
return render(request, "user/index.html", {"title": "index"})
########### register here #####################################
def register(request):
if request.method == "POST":
form = UserRegisterForm(request.POST)
if form.is_valid():
form.save()
username = form.cleaned_data.get("username")
email = form.cleaned_data.get("email")
######################### mail system ####################################
htmly = get_template("user/Email.html")
d = {"username": username}
subject, from_email, to = "welcome", "[email protected]", email
html_content = htmly.render(d)
msg = EmailMultiAlternatives(subject, html_content, from_email, [to])
msg.attach_alternative(html_content, "text/html")
msg.send()
##################################################################
messages.success(
request, f"Your account has been created ! You are now able to log in"
)
return redirect("login")
else:
form = UserRegisterForm()
return render(
request, "user/register.html", {"form": form, "title": "register here"}
)
################ login forms###################################################
def Login(request):
if request.method == "POST":
# AuthenticationForm_can_also_be_used__
username = request.POST["username"]
password = request.POST["password"]
user = authenticate(request, username=username, password=password)
if user is not None:
form = login(request, user)
messages.success(request, f" welcome {username} !!")
return redirect("index")
else:
messages.info(request, f"account done not exit plz sign in")
form = AuthenticationForm()
return render(request, "user/login.html", {"form": form, "title": "log in"})
|
#Output : pip install --upgrade django-crispy-forms
|
Django Sign Up and login with confirmation Email
from django.shortcuts import render, redirect
from django.contrib import messages
from django.contrib.auth import authenticate, login
from django.contrib.auth.decorators import login_required
from django.contrib.auth.forms import AuthenticationForm
from .forms import UserRegisterForm
from django.core.mail import send_mail
from django.core.mail import EmailMultiAlternatives
from django.template.loader import get_template
from django.template import Context
#################### index#######################################
def index(request):
return render(request, "user/index.html", {"title": "index"})
########### register here #####################################
def register(request):
if request.method == "POST":
form = UserRegisterForm(request.POST)
if form.is_valid():
form.save()
username = form.cleaned_data.get("username")
email = form.cleaned_data.get("email")
######################### mail system ####################################
htmly = get_template("user/Email.html")
d = {"username": username}
subject, from_email, to = "welcome", "[email protected]", email
html_content = htmly.render(d)
msg = EmailMultiAlternatives(subject, html_content, from_email, [to])
msg.attach_alternative(html_content, "text/html")
msg.send()
##################################################################
messages.success(
request, f"Your account has been created ! You are now able to log in"
)
return redirect("login")
else:
form = UserRegisterForm()
return render(
request, "user/register.html", {"form": form, "title": "register here"}
)
################ login forms###################################################
def Login(request):
if request.method == "POST":
# AuthenticationForm_can_also_be_used__
username = request.POST["username"]
password = request.POST["password"]
user = authenticate(request, username=username, password=password)
if user is not None:
form = login(request, user)
messages.success(request, f" welcome {username} !!")
return redirect("index")
else:
messages.info(request, f"account done not exit plz sign in")
form = AuthenticationForm()
return render(request, "user/login.html", {"form": form, "title": "log in"})
#Output : pip install --upgrade django-crispy-forms
[END]
|
Django Sign Up and login with confirmation Email
|
https://www.geeksforgeeks.org/django-sign-up-and-login-with-confirmation-email-python/
|
from django import forms
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm
class UserRegisterForm(UserCreationForm):
email = forms.EmailField()
phone_no = forms.CharField(max_length=20)
first_name = forms.CharField(max_length=20)
last_name = forms.CharField(max_length=20)
class Meta:
model = User
fields = ["username", "email", "phone_no", "password1", "password2"]
|
#Output : pip install --upgrade django-crispy-forms
|
Django Sign Up and login with confirmation Email
from django import forms
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm
class UserRegisterForm(UserCreationForm):
email = forms.EmailField()
phone_no = forms.CharField(max_length=20)
first_name = forms.CharField(max_length=20)
last_name = forms.CharField(max_length=20)
class Meta:
model = User
fields = ["username", "email", "phone_no", "password1", "password2"]
#Output : pip install --upgrade django-crispy-forms
[END]
|
ToDo webapp using Django
|
https://www.geeksforgeeks.org/python-todo-webapp-using-django/
|
from django.contrib import admin
from django.urls import path
from todo import views
urlpatterns = [
#####################home_page###########################################
path("", views.index, name="todo"),
####################give id no. item_id name or item_id=i.id ############
# pass item_id as primary key to remove that the todo with given id
path("del/<str:item_id>", views.remove, name="del"),
########################################################################
path("admin/", admin.site.urls),
]
|
#Output : django-admin startproject todo_site
|
ToDo webapp using Django
from django.contrib import admin
from django.urls import path
from todo import views
urlpatterns = [
#####################home_page###########################################
path("", views.index, name="todo"),
####################give id no. item_id name or item_id=i.id ############
# pass item_id as primary key to remove that the todo with given id
path("del/<str:item_id>", views.remove, name="del"),
########################################################################
path("admin/", admin.site.urls),
]
#Output : django-admin startproject todo_site
[END]
|
ToDo webapp using Django
|
https://www.geeksforgeeks.org/python-todo-webapp-using-django/
|
from django.db import models
from django.utils import timezone
class Todo(models.Model):
title = models.CharField(max_length=100)
details = models.TextField()
date = models.DateTimeField(default=timezone.now)
def __str__(self):
return self.title
|
#Output : django-admin startproject todo_site
|
ToDo webapp using Django
from django.db import models
from django.utils import timezone
class Todo(models.Model):
title = models.CharField(max_length=100)
details = models.TextField()
date = models.DateTimeField(default=timezone.now)
def __str__(self):
return self.title
#Output : django-admin startproject todo_site
[END]
|
ToDo webapp using Django
|
https://www.geeksforgeeks.org/python-todo-webapp-using-django/
|
from django.shortcuts import render, redirect
from django.contrib import messages
# import todo form and models
from .forms import TodoForm
from .models import Todo
###############################################
def index(request):
item_list = Todo.objects.order_by("-date")
if request.method == "POST":
form = TodoForm(request.POST)
if form.is_valid():
form.save()
return redirect("todo")
form = TodoForm()
page = {
"forms": form,
"list": item_list,
"title": "TODO LIST",
}
return render(request, "todo/index.html", page)
### function to remove item, it receive todo item_id as primary key from url ##
def remove(request, item_id):
item = Todo.objects.get(id=item_id)
item.delete()
messages.info(request, "item removed !!!")
return redirect("todo")
|
#Output : django-admin startproject todo_site
|
ToDo webapp using Django
from django.shortcuts import render, redirect
from django.contrib import messages
# import todo form and models
from .forms import TodoForm
from .models import Todo
###############################################
def index(request):
item_list = Todo.objects.order_by("-date")
if request.method == "POST":
form = TodoForm(request.POST)
if form.is_valid():
form.save()
return redirect("todo")
form = TodoForm()
page = {
"forms": form,
"list": item_list,
"title": "TODO LIST",
}
return render(request, "todo/index.html", page)
### function to remove item, it receive todo item_id as primary key from url ##
def remove(request, item_id):
item = Todo.objects.get(id=item_id)
item.delete()
messages.info(request, "item removed !!!")
return redirect("todo")
#Output : django-admin startproject todo_site
[END]
|
ToDo webapp using Django
|
https://www.geeksforgeeks.org/python-todo-webapp-using-django/
|
from django import forms
from .models import Todo
class TodoForm(forms.ModelForm):
class Meta:
model = Todo
fields = "__all__"
|
#Output : django-admin startproject todo_site
|
ToDo webapp using Django
from django import forms
from .models import Todo
class TodoForm(forms.ModelForm):
class Meta:
model = Todo
fields = "__all__"
#Output : django-admin startproject todo_site
[END]
|
ToDo webapp using Django
|
https://www.geeksforgeeks.org/python-todo-webapp-using-django/
|
<!DOCTYPE html><html lang="en" dir="ltr">??????<head>????????????????"utf-8">????????????<title>{{title}}</title>???????"viewport" content="width=device-width, initial-scale=1">????????????<l"stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/css/bootstrap.min.css">????????????<scr"https://ajax.googleapis.com/ajax/libs/jquery/3.3.1/jquery.min.js"></script>????????????<scr"https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/js/bootstrap.min.js"></script>????????????<!--style-->????????????<style>????????????.card {????????????????????????box-shadow: 0 4px 8px 0 rgba(0,0,0,0.5),??????????????????????????????????????????????????????????????????????????????????????????0 6px 20px 0 rgba(0,0,0,0.39);??????????????????background: lightpink;??????????????????margin-bottom : 5%;??????????????????border-radius: 25px;??????????????????padding : 2%;??????????????????overflow: auto;??????????????????resize: both;??????????????????text-overflow: ellipsis;????????????}????????????.card:hover{????????????????????????background: lightblue;????????????}??????????????????.submit_form{??????????????????"container-fluid">??????????????????{% if messages %}????????????{% for message in messag"alert alert-info">????????????????????????<strong>{{message}}</strong>????????????</div>????????????{% endfor %}???"row">????????????????????????<h1><i>__TODO LIST__</i></h1>????????????????????????<hr "row">?????????????????????"col-md-8">??????????????????????????????????????????{% for i in li"card">????????????????????????????????????????????????<center><b>{{i.title}}</b></center>????????????????????????????????????????????????<hr/>????????????????????????????????????????????????{{i.date}}???????????????????????????"/del/{{i.id}}" method="POST" style=" padding-right: 4%; padding-bottom: 3%;">????????????????????????????????????????????????????????????{% csrf_to"remove" type="submit"?? class="btn btn-primary" style="float: right;"><span class="glyphicon glyphicon-trash"></span> ?????? remove</button>????????????????????????????????????????????????</form>????????????????????????????????????</"col-md-1"> </div>???????????????????"col-md-3" >?????????????????????????"submit_form">???????????????????????????"POST">????????????????????????????????????????????????{% csrf_token %}?????????????????????????????????????????????"submit" class="btn btn-default" value="submit" />????????????????????????????????????</center>????????????????????????????????????</for
|
#Output : django-admin startproject todo_site
|
ToDo webapp using Django
<!DOCTYPE html><html lang="en" dir="ltr">??????<head>????????????????"utf-8">????????????<title>{{title}}</title>???????"viewport" content="width=device-width, initial-scale=1">????????????<l"stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/css/bootstrap.min.css">????????????<scr"https://ajax.googleapis.com/ajax/libs/jquery/3.3.1/jquery.min.js"></script>????????????<scr"https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/js/bootstrap.min.js"></script>????????????<!--style-->????????????<style>????????????.card {????????????????????????box-shadow: 0 4px 8px 0 rgba(0,0,0,0.5),??????????????????????????????????????????????????????????????????????????????????????????0 6px 20px 0 rgba(0,0,0,0.39);??????????????????background: lightpink;??????????????????margin-bottom : 5%;??????????????????border-radius: 25px;??????????????????padding : 2%;??????????????????overflow: auto;??????????????????resize: both;??????????????????text-overflow: ellipsis;????????????}????????????.card:hover{????????????????????????background: lightblue;????????????}??????????????????.submit_form{??????????????????"container-fluid">??????????????????{% if messages %}????????????{% for message in messag"alert alert-info">????????????????????????<strong>{{message}}</strong>????????????</div>????????????{% endfor %}???"row">????????????????????????<h1><i>__TODO LIST__</i></h1>????????????????????????<hr "row">?????????????????????"col-md-8">??????????????????????????????????????????{% for i in li"card">????????????????????????????????????????????????<center><b>{{i.title}}</b></center>????????????????????????????????????????????????<hr/>????????????????????????????????????????????????{{i.date}}???????????????????????????"/del/{{i.id}}" method="POST" style=" padding-right: 4%; padding-bottom: 3%;">????????????????????????????????????????????????????????????{% csrf_to"remove" type="submit"?? class="btn btn-primary" style="float: right;"><span class="glyphicon glyphicon-trash"></span> ?????? remove</button>????????????????????????????????????????????????</form>????????????????????????????????????</"col-md-1"> </div>???????????????????"col-md-3" >?????????????????????????"submit_form">???????????????????????????"POST">????????????????????????????????????????????????{% csrf_token %}?????????????????????????????????????????????"submit" class="btn btn-default" value="submit" />????????????????????????????????????</center>????????????????????????????????????</for
#Output : django-admin startproject todo_site
[END]
|
Django project to create a Comments System
|
https://www.geeksforgeeks.org/django-project-to-create-a-comments-system/
|
class Post(models.Model):
image = models.ImageField(default="default_foo.png", upload_to="post_picture")
caption = models.TextField()
date_posted = models.DateTimeField(default=timezone.now)
author = models.ForeignKey(User, on_delete=models.CASCADE)
def __str__(self):
return f"{self.author.username}'s Post- {self.title}"
def save(self, *args, **kwargs):
super().save(*args, **kwargs)
img = Image.open(self.image.path)
if img.height > 400 or img.width > 400:
output_size = (300, 300)
img.thumbnail(output_size)
img.save(self.image.path)
|
#Output : django-admin startproject my_project
|
Django project to create a Comments System
class Post(models.Model):
image = models.ImageField(default="default_foo.png", upload_to="post_picture")
caption = models.TextField()
date_posted = models.DateTimeField(default=timezone.now)
author = models.ForeignKey(User, on_delete=models.CASCADE)
def __str__(self):
return f"{self.author.username}'s Post- {self.title}"
def save(self, *args, **kwargs):
super().save(*args, **kwargs)
img = Image.open(self.image.path)
if img.height > 400 or img.width > 400:
output_size = (300, 300)
img.thumbnail(output_size)
img.save(self.image.path)
#Output : django-admin startproject my_project
[END]
|
Django project to create a Comments System
|
https://www.geeksforgeeks.org/django-project-to-create-a-comments-system/
|
from django import forms
from .models import Comment
class CommentForm(forms.ModelForm):
content = forms.CharField(
label="",
widget=forms.Textarea(
attrs={
"class": "form-control",
"placeholder": "Comment here !",
"rows": 4,
"cols": 50,
}
),
)
class Meta:
model = Comment
fields = ["content"]
|
#Output : django-admin startproject my_project
|
Django project to create a Comments System
from django import forms
from .models import Comment
class CommentForm(forms.ModelForm):
content = forms.CharField(
label="",
widget=forms.Textarea(
attrs={
"class": "form-control",
"placeholder": "Comment here !",
"rows": 4,
"cols": 50,
}
),
)
class Meta:
model = Comment
fields = ["content"]
#Output : django-admin startproject my_project
[END]
|
Django project to create a Comments System
|
https://www.geeksforgeeks.org/django-project-to-create-a-comments-system/
|
from .forms import CommentForm
def post_detailview(request, id):
if request.method == "POST":
cf = CommentForm(request.POST or None)
if cf.is_valid():
content = request.POST.get("content")
comment = Comment.objects.create(
post=post, user=request.user, content=content
)
comment.save()
return redirect(post.get_absolute_url())
else:
cf = CommentForm()
context = {
"comment_form": cf,
}
return render(request, "socio / post_detail.html", context)
|
#Output : django-admin startproject my_project
|
Django project to create a Comments System
from .forms import CommentForm
def post_detailview(request, id):
if request.method == "POST":
cf = CommentForm(request.POST or None)
if cf.is_valid():
content = request.POST.get("content")
comment = Comment.objects.create(
post=post, user=request.user, content=content
)
comment.save()
return redirect(post.get_absolute_url())
else:
cf = CommentForm()
context = {
"comment_form": cf,
}
return render(request, "socio / post_detail.html", context)
#Output : django-admin startproject my_project
[END]
|
Django project to create a Comments System
|
https://www.geeksforgeeks.org/django-project-to-create-a-comments-system/
|
{% load crispy_forms_tags %}<html>????????????<head>????????????<title></title>????????????</head><"POST">????????????????????????{% csrf_token %}????????????????????????{{comme
|
#Output : django-admin startproject my_project
|
Django project to create a Comments System
{% load crispy_forms_tags %}<html>????????????<head>????????????<title></title>????????????</head><"POST">????????????????????????{% csrf_token %}????????????????????????{{comme
#Output : django-admin startproject my_project
[END]
|
Voting System Project Using Django Framework
|
https://www.geeksforgeeks.org/voting-system-project-using-django-framework/
|
from django.db import models
# Create your models here.
class Question(models.Model):
question_text = models.CharField(max_length=200)
pub_date = models.DateTimeField("date published")
def __str__(self):
return self.question_text
class Choice(models.Model):
question = models.ForeignKey(Question, on_delete=models.CASCADE)
choice_text = models.CharField(max_length=200)
votes = models.IntegerField(default=0)
def __str__(self):
return self.choice_text
|
#Output : pip install pipenv
|
Voting System Project Using Django Framework
from django.db import models
# Create your models here.
class Question(models.Model):
question_text = models.CharField(max_length=200)
pub_date = models.DateTimeField("date published")
def __str__(self):
return self.question_text
class Choice(models.Model):
question = models.ForeignKey(Question, on_delete=models.CASCADE)
choice_text = models.CharField(max_length=200)
votes = models.IntegerField(default=0)
def __str__(self):
return self.choice_text
#Output : pip install pipenv
[END]
|
Voting System Project Using Django Framework
|
https://www.geeksforgeeks.org/voting-system-project-using-django-framework/
|
INSTALLED_APPS = [
"polls.apps.PollsConfig",
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
]
|
#Output : pip install pipenv
|
Voting System Project Using Django Framework
INSTALLED_APPS = [
"polls.apps.PollsConfig",
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
]
#Output : pip install pipenv
[END]
|
Voting System Project Using Django Framework
|
https://www.geeksforgeeks.org/voting-system-project-using-django-framework/
|
from django.contrib import admin
# Register your models here.
from .models import Question, Choice
# admin.site.register(Question)
# admin.site.register(Choice)
admin.site.site_header = "Pollster Admin"
admin.site.site_title = "Pollster Admin Area"
admin.site.index_title = "Welcome to the Pollster Admin Area"
class ChoiceInLine(admin.TabularInline):
model = Choice
extra = 3
class QuestionAdmin(admin.ModelAdmin):
fieldsets = [
(None, {"fields": ["question_text"]}),
("Date Information", {"fields": ["pub_date"], "classes": ["collapse"]}),
]
inlines = [ChoiceInLine]
admin.site.register(Question, QuestionAdmin)
|
#Output : pip install pipenv
|
Voting System Project Using Django Framework
from django.contrib import admin
# Register your models here.
from .models import Question, Choice
# admin.site.register(Question)
# admin.site.register(Choice)
admin.site.site_header = "Pollster Admin"
admin.site.site_title = "Pollster Admin Area"
admin.site.index_title = "Welcome to the Pollster Admin Area"
class ChoiceInLine(admin.TabularInline):
model = Choice
extra = 3
class QuestionAdmin(admin.ModelAdmin):
fieldsets = [
(None, {"fields": ["question_text"]}),
("Date Information", {"fields": ["pub_date"], "classes": ["collapse"]}),
]
inlines = [ChoiceInLine]
admin.site.register(Question, QuestionAdmin)
#Output : pip install pipenv
[END]
|
Voting System Project Using Django Framework
|
https://www.geeksforgeeks.org/voting-system-project-using-django-framework/
|
from django.template import loader
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import get_object_or_404, render
from django.urls import reverse
from .models import Question, Choice
# Get questions and display them
def index(request):
latest_question_list = Question.objects.order_by("-pub_date")[:5]
context = {"latest_question_list": latest_question_list}
return render(request, "polls / index.html", context)
# Show specific question and choices
def detail(request, question_id):
try:
question = Question.objects.get(pk=question_id)
except Question.DoesNotExist:
raise Http404("Question does not exist")
return render(request, "polls / detail.html", {"question": question})
# Get question and display results
def results(request, question_id):
question = get_object_or_404(Question, pk=question_id)
return render(request, "polls / results.html", {"question": question})
# Vote for a question choice
def vote(request, question_id):
# print(request.POST['choice'])
question = get_object_or_404(Question, pk=question_id)
try:
selected_choice = question.choice_set.get(pk=request.POST["choice"])
except (KeyError, Choice.DoesNotExist):
# Redisplay the question voting form.
return render(
request,
"polls / detail.html",
{
"question": question,
"error_message": "You didn't select a choice.",
},
)
else:
selected_choice.votes += 1
selected_choice.save()
# Always return an HttpResponseRedirect after successfully dealing
# with POST data. This prevents data from being posted twice if a
# user hits the Back button.
return HttpResponseRedirect(reverse("polls:results", args=(question.id,)))
|
#Output : pip install pipenv
|
Voting System Project Using Django Framework
from django.template import loader
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import get_object_or_404, render
from django.urls import reverse
from .models import Question, Choice
# Get questions and display them
def index(request):
latest_question_list = Question.objects.order_by("-pub_date")[:5]
context = {"latest_question_list": latest_question_list}
return render(request, "polls / index.html", context)
# Show specific question and choices
def detail(request, question_id):
try:
question = Question.objects.get(pk=question_id)
except Question.DoesNotExist:
raise Http404("Question does not exist")
return render(request, "polls / detail.html", {"question": question})
# Get question and display results
def results(request, question_id):
question = get_object_or_404(Question, pk=question_id)
return render(request, "polls / results.html", {"question": question})
# Vote for a question choice
def vote(request, question_id):
# print(request.POST['choice'])
question = get_object_or_404(Question, pk=question_id)
try:
selected_choice = question.choice_set.get(pk=request.POST["choice"])
except (KeyError, Choice.DoesNotExist):
# Redisplay the question voting form.
return render(
request,
"polls / detail.html",
{
"question": question,
"error_message": "You didn't select a choice.",
},
)
else:
selected_choice.votes += 1
selected_choice.save()
# Always return an HttpResponseRedirect after successfully dealing
# with POST data. This prevents data from being posted twice if a
# user hits the Back button.
return HttpResponseRedirect(reverse("polls:results", args=(question.id,)))
#Output : pip install pipenv
[END]
|
Voting System Project Using Django Framework
|
https://www.geeksforgeeks.org/voting-system-project-using-django-framework/
|
from django.urls import path
from . import views
app_name = "polls"
urlpatterns = [
path("", views.index, name="index"),
path("<int:question_id>/", views.detail, name="detail"),
path("<int:question_id>/results/", views.results, name="results"),
path("<int:question_id>/vote/", views.vote, name="vote"),
]
|
#Output : pip install pipenv
|
Voting System Project Using Django Framework
from django.urls import path
from . import views
app_name = "polls"
urlpatterns = [
path("", views.index, name="index"),
path("<int:question_id>/", views.detail, name="detail"),
path("<int:question_id>/results/", views.results, name="results"),
path("<int:question_id>/vote/", views.vote, name="vote"),
]
#Output : pip install pipenv
[END]
|
Voting System Project Using Django Framework
|
https://www.geeksforgeeks.org/voting-system-project-using-django-framework/
|
TEMPLATES = [
{
# make changes in DIRS[].
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [os.path.join(BASE_DIR, "templates")],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
|
#Output : pip install pipenv
|
Voting System Project Using Django Framework
TEMPLATES = [
{
# make changes in DIRS[].
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [os.path.join(BASE_DIR, "templates")],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
#Output : pip install pipenv
[END]
|
Voting System Project Using Django Framework
|
https://www.geeksforgeeks.org/voting-system-project-using-django-framework/
|
{% extends 'base.html' %}{% block content %}<h1 class ="text-center mb-3">Poll Questions</h1>{% if latest_question_list %}{% for question in latest_question_list %}<div class ="card-mb-3">????????????????????"card-body">??????????????????????????"lead">{{ question.question_text }}</p>???????????????????????????"{% url 'polls:detail' question.id %}" class ="btn btn-primary btn-sm">Vote Now</a>?????????????????????????"{% url 'polls:results' question.id %}" class ="btn btn-secondary btn-sm">Results</a>????????????????????????</div></div>{% endfor %}{% else %}??????<p>No polls available</p>??????{
|
#Output : pip install pipenv
|
Voting System Project Using Django Framework
{% extends 'base.html' %}{% block content %}<h1 class ="text-center mb-3">Poll Questions</h1>{% if latest_question_list %}{% for question in latest_question_list %}<div class ="card-mb-3">????????????????????"card-body">??????????????????????????"lead">{{ question.question_text }}</p>???????????????????????????"{% url 'polls:detail' question.id %}" class ="btn btn-primary btn-sm">Vote Now</a>?????????????????????????"{% url 'polls:results' question.id %}" class ="btn btn-secondary btn-sm">Results</a>????????????????????????</div></div>{% endfor %}{% else %}??????<p>No polls available</p>??????{
#Output : pip install pipenv
[END]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.