2020 Sept 5, installation: In anaconda (see https://sites.google.com/site/hongslinks/tensor_windows of how to install anaconda3 ) ,
Reference; http://web.cecs.pdx.edu/~fliu/courses/cs410/python-opencv.html
in win10-type-here-to-search, type Anaconda-prompt- (right lick to run-as-administrator) you get: conda> pip install opencv-python, you will see the following:
Collecting opencv-python
Downloading opencv_python-4.4.0.42-cp37-cp37m-win_amd64.whl (33.5 MB)
|████████████████████████████████| 33.5 MB 101 kB/s
Requirement already satisfied: numpy>=1.14.5 in c:\users\khwong2\anaconda3\envs\tf-cpu\lib\site-packages (from opencv-python) (1.19.1)
Installing collected packages: opencv-python
Successfully installed opencv-python-4.4.0.42
import cv2 # import the opencv library.
cv2. __version__ # this will print the version of your opencv.
2020 Feb 18 , Camera calibration using python demo/tutorial
https://sites.google.com/site/hongslinks/opencv-1/camera-calibration-opencv-python
2020 Jan 31 -- using Python 3.6.7 (under Anaconda navigator 1.9.7), spyder 4.0.0, opencv 4.1.2 .
Installation guide: my recommendation is , install anaconda, then install python and opencv under anaconda, and use spyder for editing/running your program, see
https://problemsolvingwithpython.com/01-Orientation/01.03-Installing-Anaconda-on-Windows/
or
http://web.cecs.pdx.edu/~fliu/courses/cs410/python-opencv.html
https://docs.anaconda.com/anaconda/install/windows/
May also need to install tools such as numpy etc... using "conda install" or "pip install " inside the anaconda_prompt (run as administrator )
#----------2020 Jan 31 tested ok-------test 2: video capture test -------------------------------------------
# ########### start of file test2.py (video test) #############################################
#use aruco 6x6, single aruco marker tracking https://longervision.github.io/page/2/
#http://opencv-python-tutroals.readthedocs.io/en/lat, imeg can be found at est/py_tutorials/py_gui/py_video_display/py_video_display.html
import numpy as np
import cv2
#--------------2020 Jan 31 tested ok-------------- test 1 : corner detection -------------------------------
########### start of file test1.py #############################################
#You need to have chessboard.jpg in your c://images/chessboard.jpg, (in https://www.sanarias.com/blog/115BuildingachessboardimageinGo)
#or put chessboard.jpg it in the same directory as your test1.py file, and change the code to point to the local dir.
import cv2
import numpy as np
filename = 'c:\\images\\chessboard.jpg'
#filename = 'c:\\images\\right05.jpg'
img = cv2.imread(filename)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
gray = np.float32(gray)
dst = cv2.cornerHarris(gray,2,3,0.04)
#result is dilated for marking the corners, not important
dst = cv2.dilate(dst,None)
# Threshold for an optimal value, it may vary depending on the image.
img[dst>0.01*dst.max()]=[0,0,255]
cv2.imshow('dst',img)
if cv2.waitKey(0) & 0xff == 27:
cv2.destroyAllWindows()
########### end of file test1.py #############################################
########### start of test2.py #############################################
import cv2
cap = cv2.VideoCapture(0)
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
# Our operations on the frame come here
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Display the resulting frame
cv2.imshow('frame',gray)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
# ########### end of file test2.py #############################################
;--------------------------2020 jan 31 tested ok--------- test3 aruco marker tracker --------------
# ########### start of file test3.py usign opecv 4.1.2 pythpon#########################
#step 1 : download code from https://github.com/njanirudh/Aruco_tracker
# https://github.com/njanirudh/Aruco_Tracker/blob/master/aruco_tracker.py tested ok
#step2 : modify line 7: cap = cv2.VideoCapture(0) #if you are using webcam of a laptop.
#step3: print this
https://raw.githubusercontent.com/LongerVision/OpenCV_Examples/master/markers/board_aruco_57.png
cut one marker out and put it on a cardboard for easy holding
# step 4: run aruco_tracker.py, use one 6x6 marker will get best result for testing, multiple tracking is possible but not need a fast computer to process
#remark, the code is designed for 6x6 mark, see the code inside ans search for 6x6, if you change the code, you may use other marker formats
# ########### end of file test3.py #############################################
#----2020 Jan 31 tested ok----test 4: line detection using houghline-------------------------------------
# ########### start of file test4.py #############################################
#from https://stackoverflow.com/questions/28091984/hough-lines-in-video
import numpy as np import cv2 cam = cv2.VideoCapture(0)while (True): s, img = cam.read() winName = "Movement Indicator" cv2.namedWindow(winName, cv2.WINDOW_AUTOSIZE) edges = cv2.Canny(img, 100, 200) lines = cv2.HoughLinesP(edges, 1, np.pi / 4, 2, None, 10, 1) if lines is not None: for line in lines[0]: pt1 = (line[0], line[1]) pt2 = (line[2], line[3]) cv2.line(img, pt1, pt2, (0, 0, 255), 3) cv2.imshow('edges', edges) cv2.imshow('original', img) if cv2.waitKey(10) & 0xff == ord('q'): break cam.release() cv2.destroyAllWindows()
# ########### end of file test4.py #############################################
#-------2020 Jan 31 tested ok----------- test5 : face detection -------------------------------------------
# ########### start of file test5.py ##############################################t
# from https://blog.gtwang.org/programming/python-opencv-dlib-face-detection-implementation-tutorial/
## In anacondas (right click administrator ): > conda install -c conda-forge dlib #if you are using python 3.7
#older method: In anacondas (right click administrator ): conda install -c menpo dlib # to install the latest dlib face track
import dlib
import cv2
import imutils #>>pip install imutils #if you have no such library
# 開啟影片檔案
cap = cv2.VideoCapture(0)
# Dlib 的人臉偵測器
detector = dlib.get_frontal_face_detector()
# 以迴圈從影片檔案讀取影格,並顯示出來
while(cap.isOpened()):
ret, frame = cap.read()
# 偵測人臉
face_rects, scores, idx = detector.run(frame, 0)
# 取出所有偵測的結果
for i, d in enumerate(face_rects):
x1 = d.left()
y1 = d.top()
x2 = d.right()
y2 = d.bottom()
text = "%2.2f(%d)" % (scores[i], idx[i])
# 以方框標示偵測的人臉
cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0), 4, cv2.LINE_AA)
# 標示分數
cv2.putText(frame, text, (x1, y1), cv2.FONT_HERSHEY_DUPLEX,
0.7, (255, 255, 255), 1, cv2.LINE_AA)
# 顯示結果
cv2.imshow("Face Detection", frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
# ########### end of file test5.py ##############################################t
#---2020 Jan 31 tested ok---------- test 6: dlib test ------------------------------------------------------------
# ########### start of file test6.py ##############################################t
## In anacondas (right click administrator ): > conda install -c conda-forge dlib #if you are using python 3.7
#older method: In anacondas (right click administrator ): conda install -c menpo dlib # to install the latest dlib face track
# from https://www.pyimagesearch.com/2018/01/22/install-dlib-easy-complete-guide/
#make sure you have the file lena.jpg in c:\\images\\ (see https://en.wikipedia.org/wiki/Lenna)
# and shape_predictor_68_face_landmarks.dat in the current directory , get it from
# https://github.com/AKSHAYUBHAT/TensorFace/blob/master/openface/models/dlib/shape_predictor_68_face_landmarks.dat
# import the necessary packages
from imutils import face_utils
import dlib
import cv2
# initialize dlib's face detector (HOG-based) and then create
# the facial landmark predictor
#p = "shape_predictor_68_face_landmarks.dat"
p = "c:\images\shape_predictor_68_face_landmarks.dat" #depends on where you store it
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(p)
# load the input image and convert it to grayscale
image = cv2.imread("c:\\images\\lena.jpg") #make sure you have the file lena.jpg in c:\\images\\
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# detect faces in the grayscale image
rects = detector(gray, 0)
# loop over the face detections
for (i, rect) in enumerate(rects):
# determine the facial landmarks for the face region, then
# convert the facial landmark (x, y)-coordinates to a NumPy
# array
shape = predictor(gray, rect)
shape = face_utils.shape_to_np(shape)
# loop over the (x, y)-coordinates for the facial landmarks
# and draw them on the image
for (x, y) in shape:
cv2.circle(image, (x, y), 2, (0, 255, 0), -1)
# show the output image with the face detections + facial landmarks
cv2.imshow("Output", image)
cv2.waitKey(0)
# ########### end of file test6, dlib test from image file ############################
#;-------------2020 Jan 31 tested-ok----------------- test 7: dlib track face -------------------------------
## In anacondas (right click administrator ): > conda install -c conda-forge dlib #if you are using python 3.7
#older method: In anacondas (right click administrator ): conda install -c menpo dlib # to install the latest dlib face track
# ########### start of file test7py ##############################################t
# In anacondas (right click administrator ): > conda install -c conda-forge dlib #if you are usig python 3.7
#older method: In anacondas (right click administrator ): conda install -c menpo dlib # to install the latest dlib face track
#https://stackoverflow.com/questions/38782191/whats-wrong-with-this-webcam-face-detection
# in windows "cmd-admin mode, type> pip install scikit-image
# line 35 changed: print ("your faces: %f") #% len(dets)
#
from __future__ import division
import sys
import dlib
from skimage import io
detector = dlib.get_frontal_face_detector()
win = dlib.image_window()
if len( sys.argv[1:] ) == 0:
from cv2 import VideoCapture
from time import time
cam = VideoCapture(0) #set the port of the camera as before
while True:
start = time()
retval, image = cam.read() #return a True bolean and and the image if all go right
for row in image:
for px in row:
#rgb expected... but the array is bgr?
r = px[2]
px[2] = px[0]
px[0] = r
#import matplotlib.pyplot as plt
#plt.imshow(image)
#plt.show()
print( "readimage: " + str( time() - start ) )
start = time()
dets = detector(image, 1)
print ("your faces: %f") #% len(dets)
for i, d in enumerate( dets ):
print("Detection {}: Left: {} Top: {} Right: {} Bottom: {}".format(
i, d.left(), d.top(), d.right(), d.bottom()))
print("from left: {}".format( ( (d.left() + d.right()) / 2 ) / len(image[0]) ))
print("from top: {}".format( ( (d.top() + d.bottom()) / 2 ) /len(image)) )
print( "process: " + str( time() - start ) )
start = time()
win.clear_overlay()
win.set_image(image)
win.add_overlay(dets)
print( "show: " + str( time() - start ) )
#dlib.hit_enter_to_continue()
for f in sys.argv[1:]:
print("Processing file: {}".format(f))
img = io.imread(f)
# The 1 in the second argument indicates that we should upsample the image
# 1 time. This will make everything bigger and allow us to detect more
# faces.
dets = detector(img, 1)
print("Number of faces detected: {}".format(len(dets)))
for i, d in enumerate(dets):
print("Detection {}: Left: {} Top: {} Right: {} Bottom: {}".format(
i, d.left(), d.top(), d.right(), d.bottom()))
win.clear_overlay()
win.set_image(img)
win.add_overlay(dets)
dlib.hit_enter_to_continue()
# Finally, if you really want to you can ask the detector to tell you the score
# for each detection. The score is bigger for more confident detections.
# Also, the idx tells you which of the face sub-detectors matched. This can be
# used to broadly identify faces in different orientations.
if (len(sys.argv[1:]) > 0):
img = io.imread(sys.argv[1])
dets, scores, idx = detector.run(img, 1)
for i, d in enumerate(dets):
print("Detection {}, score: {}, face_type:{}".format(
d, scores[i], idx[i]))
#
# ########### end of file test7py ##############################################t#
####################### test 8 start ###################
## In anacondas (right click administrator ): > conda install -c conda-forge dlib #if you are using python 3.7
#older method: In anacondas (right click administrator ): conda install -c menpo dlib # to install the latest dlib face track
#Download
#The .py source file from
#https://pysource.com/2019/03/12/face-landmarks-detection-opencv-with-python/
#Also shape_predictor_68_face_landmarks.dat from https://github.com/AKSHAYUBHAT/TensorFace/blob/master/openface/models/dlib/shape_predictor_68_face_landmarks.dat
#Place it in a directory, e.g. c:\\images\\
#Change line8 in the .py source file
#dlib.shape_predictor("c:\\images\\shape_predictor_68_face_landmarks.dat")
#And run it, you will get
import cv2
import numpy as np
import dlib
cap = cv2.VideoCapture(0)
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor("c:\\images\\shape_predictor_68_face_landmarks.dat")
while True:
_, frame = cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = detector(gray)
for face in faces:
x1 = face.left()
y1 = face.top()
x2 = face.right()
y2 = face.bottom()
#cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0), 3)
landmarks = predictor(gray, face)
for n in range(0, 68):
x = landmarks.part(n).x
y = landmarks.part(n).y
cv2.circle(frame, (x, y), 4, (255, 0, 0), -1)
cv2.imshow("Frame", frame)
key = cv2.waitKey(1)
if key == 27:
break
####################### test8 end #############################
#----------------------------------------------- test 9: -------------------------------------------------------------------
# ########### start of file test9py ##############################################t#
modifiy
#cap = cv2.VideoCapture('slow.flv') #use video file
cap = cv2.VideoCapture(0) # use laptop webcam
# ########### start of file test9py ##############################################t#
#------------runs ok 2020 feb2 ------- new lk demo------ test 10: -----------------------------------------------
# ########### start of file test9py ##############################################t#
https://github.com/simondlevy/OpenCV-Python-Hacks/blob/master/lkdemo.py
# ########### end of file test10py ##############################################t#
#----------- --- test 11: -----------------------------------------------
empty to be filled
# ########### end of file test11py ##############################################t#