A region of an image in which some properties are constant or vary within a prescribed range of values.
Blue M&Ms are blobs
m_and_ms = Image('m&ms.jpg')
blue_dist = m_and_ms.colorDistance(Color.BLUE)
blue_dist.show()
blue_dist = blue_dist.invert()
blobs = blue_dist.findBlobs()
print len(blobs)
>> 122
blobs.draw(Color.RED, width=-1)
blue_dist.show()
blue_dist.findBlobs(minsize=200)
blobs = blobs.filter(blobs.area() > 200)
len(blobs)
>> 36
average_area = np.average(blobs.area())
>> 37792.77
blue_dist = blue_dist.scale(0.35)
blobs = blue_dist.findBlobs(threshval=177, minsize=100)
len(blobs)
>> 25
blue_dist.binarize(blocksize=501).show()
Detector
Descriptor
Matcher
Filtering or Pruning best matches
Find ROIs
result_image = sample.drawKeypointMatches(template)
skp, tkp = sample.findKeypointMatches(template)
coupons = Image("coupons.jpg")
coupon = Image("coupon.jpg")
match = coupons.findKeypointMatch(coupon)
match.draw(width=10, color=Color.GREEN)
uno.save("result.jpg")
def find_clusters(keypoints, separator=None):
features = FeatureSet(keypoints)
if separator is None:
separator = np.average(features.area())
features = features.filter(
features.area() > separator
)
return features.cluster(
method="hierarchical",
properties="position"
)
def find_biggest_cluster(clusters):
max_number_of_clusters = 0
for cluster in clusters:
if len(cluster) > max_number_of_clusters:
biggest_cluster = cluster
max_number_of_clusters = len(cluster)
return biggest_cluster
Point = namedtuple('Point', 'x y')
def distance_between_points(point_one, point_two):
return sqrt(
pow((point_one.x - point_two.x), 2) + \
pow((point_one.y - point_two.y), 2)
)
skp_set = FeatureSet(biggest_cluster)
x_avg, y_avg = find_centroid(skp_set)
centroid = Point(x_avg, y_avg)
uno.drawRectangle(
x_avg, y_avg, 20, 20, width=30, color=Color.RED
)
distances = []
for kp in biggest_cluster:
distances.append(distance_between_points(kp, centroid))
mu, sigma = cv2.meanStdDev(np.array(distances))
mu = mu[0][0]
sigma = sigma[0][0]
for kp in skp:
if distance_between_points(kp, centroid) < (mu + 2*sigma):
uno.drawRectangle(
kp.x, kp.y, 20, 20, width=30, color=Color.GREEN
)
friends.listHaarFeatures()
['right_ear.xml', 'right_eye.xml', 'nose.xml', 'face4.xml', 'glasses.xml', ...]
faces = friends.findHaarFeatures("face.xml")
faces.draw(width=10, color=Color.RED)
faces.save('result.jpg')
from SimpleCV import *
video = VirtualCamera("jack.mp4", 'video')
video_stream = VideoStream(
"jack_tracking.mp4", framefill=False, codec="mp4v"
)
track_set = []
current = video.getImage()
while (disp.isNotDone()):
frame = video.getImage()
track_set = frame.track(
'camshift', track_set, current, [100, 100, 50, 50]
)
track_set.drawBB()
current = frame
frame.save(video_stream)
video_stream = VideoStream(
"jack_tracking.avi", framefill=False,
codec="mp4v"
)
video = VirtualCamera("jack.mp4", 'video')
disp = Display()
detected = False
current = video.getImage().scale(0.6)
tracked_objects = []
last_diff = None
while (disp.isNotDone()):
frame = video.getImage().scale(0.6)
# Scene changes
diff = cv2.absdiff(frame.getNumpyCv2(), current.getNumpyCv2())
if last_diff and diff.sum() > last_diff * 6:
detected = False
last_diff = diff.sum()
# Detects faces and restarts tracking
faces = frame.findHaarFeatures('face2.xml')
if faces and not detected:
tracked_objects = []
final_faces = []
for face in faces:
if face.area() > 65:
tracked_objects.append([])
final_faces.append(face)
detected = True
# Restart if tracking grows too much
if detected:
for i, track_set in enumerate(tracked_objects):
track_set = frame.track(
'camshift', track_set, current,
final_faces[i].boundingBox()
)
# Restart detection and tracking
if track_set[-1].area > final_faces[i].area() * 3 \
or not detected:
detected = False
break
# Update tracked object and draw it
tracked_objects[i] = track_set
track_set.drawBB()
current = frame
frame.save(video_stream)
mog = MOGSegmentation(
history=200, nMixtures=5, backgroundRatio=0.3, noiseSigma=16,
learningRate=0.3
)
video = VirtualCamera('semaforo.mp4', 'video')
video_stream = VideoStream("mog.mp4", framefill=False, codec="mp4v")
while (disp.isNotDone()):
frame = video.getImage().scale(0.5)
mog.addImage(frame)
# segmentedImage = mog.getSegmentedImage()
blobs = mog.getSegmentedBlobs()
if blobs:
blobs.draw(width=-1)
frame.save(video_stream)
red_light_bb = [432, 212, 13, 13]
cross_line = Line(
frame.scale(0.5), ((329, 230), (10, 360))
)
RED = False
number_of_opposite = 0
HISTERESIS_FRAMES = 5
def is_traffic_light_red(frame):
red_light = frame.crop(*red_light_bb)
# BLACK (30, 28, 35)
# RED (21, 17, 51)
if red_light.meanColor()[2] > 42:
return True
return False
def hysteresis(red_detected=False, green_detected=False):
global RED, number_of_opposite
if RED and green_detected:
number_of_opposite += 1
if number_of_opposite == HISTERESIS_FRAMES:
RED = False
number_of_opposite = 0
elif not RED and red_detected:
number_of_opposite += 1
if number_of_opposite == HISTERESIS_FRAMES:
RED = True
number_of_opposite = 0
else:
number_of_opposite = 0
while (disp.isNotDone()):
frame = video.getImage()
small_frame = frame.scale(0.5)
mog.addImage(small_frame)
if is_traffic_light_red(frame):
hysteresis(red_detected=True)
if RED:
blobs = mog.getSegmentedBlobs()
if blobs:
big_blobs = blobs.filter(blobs.area() > 1000)
for car in big_blobs:
if cross_line.intersects(car.getFullMask()):
# RED LIGHT RUNNER
small_frame.drawRectangle(
*car.boundingBox(), color=Color.RED, width=3
)
else:
hysteresis(green_detected=True)
small_frame.save(disp)