opencv-python | Automated CI toolchain to produce precompiled opencv-python, opencv-python-headless, opencv-contrib- | Computer Vision library
kandi X-RAY | opencv-python Summary
Support
Quality
Security
License
Reuse
Currently covering the most popular Java, JavaScript and Python libraries. See a Sample Here
opencv-python Key Features
opencv-python Examples and Code Snippets
import cv2
import numpy as np
img = cv2.imread('scanned_image.png', cv2.IMREAD_GRAYSCALE) # Read image as grayscale
thesh = cv2.threshold(img, 0, 255, cv2.THRESH_OTSU + cv2.THRESH_BINARY_INV)[1] # Apply automatic thresholding with inversion.
thesh = cv2.morphologyEx(thesh, cv2.MORPH_OPEN, np.ones((1, 3), np.uint8)) # Apply opening morphological operation for removing small artifacts.
thesh = cv2.dilate(thesh, np.ones((1, img.shape[1]), np.uint8)) # Dilate horizontally - make horizontally lines out of the text.
thesh = cv2.morphologyEx(thesh, cv2.MORPH_CLOSE, np.ones((50, 1), np.uint8)) # Apply closing vertically - create two large clusters
nlabel, labels, stats, centroids = cv2.connectedComponentsWithStats(thesh, 4) # Finding connected components with statistics
parts_list = []
# Iterate connected components:
for i in range(1, nlabel):
top = int(stats[i, cv2.CC_STAT_TOP]) # Get most top y coordinate of the connected component
height = int(stats[i, cv2.CC_STAT_HEIGHT]) # Get the height of the connected component
roi = img[top-5:top+height+5, :] # Crop the relevant part of the image (add 5 extra rows from top and bottom).
parts_list.append(roi.copy()) # Add the cropped area to a list
cv2.imwrite(f'part{i}.png', roi) # Save the image part for testing
cv2.imshow(f'part{i}', roi) # Show part for testing
# Show image and thesh testing
cv2.imshow('img', img)
cv2.imshow('thesh', thesh)
cv2.waitKey()
cv2.destroyAllWindows()
import cv2
from imutils import contours
# Load image, grayscale, Gaussian blur, Otsu's threshold
image = cv2.imread('1.png')
original = image.copy()
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray, (7,7), 0)
thresh = cv2.threshold(blur, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]
# Remove small artifacts and noise with morph open
open_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5,5))
opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, open_kernel, iterations=1)
# Create rectangular structuring element and dilate
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (9,9))
dilate = cv2.dilate(opening, kernel, iterations=4)
# Find contours, sort from top to bottom, and extract each question
cnts = cv2.findContours(dilate, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
(cnts, _) = contours.sort_contours(cnts, method="top-to-bottom")
# Get bounding box of each question, crop ROI, and save
question_number = 0
for c in cnts:
# Filter by area to ensure its not noise
area = cv2.contourArea(c)
if area > 150:
x,y,w,h = cv2.boundingRect(c)
cv2.rectangle(image, (x, y), (x + w, y + h), (36,255,12), 2)
question = original[y:y+h, x:x+w]
cv2.imwrite('question_{}.png'.format(question_number), question)
question_number += 1
cv2.imshow('thresh', thresh)
cv2.imshow('dilate', dilate)
cv2.imshow('image', image)
cv2.waitKey()
import cv2
import numpy as np
# Load image, grayscale, Gaussian blur, Otsu's threshold
image = cv2.imread("1.png")
mask = np.zeros(image.shape, dtype=np.uint8)
original = image.copy()
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray, (5,5), 0)
thresh = cv2.threshold(blur, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]
# Remove noise with morph operations
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3,3))
opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel, iterations=1)
invert = 255 - opening
# Find contours and find squares with contour area filtering + shape approximation
cnts = cv2.findContours(invert, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
for c in cnts:
area = cv2.contourArea(c)
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.02 * peri, True)
if len(approx) == 4 and area > 100 and area < 10000:
x,y,w,h = cv2.boundingRect(c)
cv2.drawContours(original, [c], -1, (36,255,12), 2)
cv2.drawContours(mask, [c], -1, (255,255,255), -1)
cv2.imshow("original", original)
cv2.imshow("mask", mask)
cv2.waitKey()
import cv2
import numpy as np
# read image
img = cv2.imread('form_with_label.jpg')
# threshold on yellow
lower=(0,200,200)
upper=(100,255,255)
thresh = cv2.inRange(img, lower, upper)
# apply dilate morphology
kernel = np.ones((9,9), np.uint8)
mask = cv2.morphologyEx(thresh, cv2.MORPH_DILATE, kernel)
# get largest contour
contours = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contours = contours[0] if len(contours) == 2 else contours[1]
big_contour = max(contours, key=cv2.contourArea)
x,y,w,h = cv2.boundingRect(big_contour)
# draw filled white contour on input
result = img.copy()
cv2.drawContours(result,[big_contour],0,(255,255,255),-1)
# save cropped image
cv2.imwrite('form_with_label_thresh.png',thresh)
cv2.imwrite('form_with_label_mask.png',mask)
cv2.imwrite('form_with_label_removed.png',result)
# show the images
cv2.imshow("THRESH", thresh)
cv2.imshow("MASK", mask)
cv2.imshow("RESULT", result)
cv2.waitKey(0)
cv2.destroyAllWindows()
#!/usr/bin/env python
import itertools as its
import sys
import time
import cv2
import numpy as np
def draw_img_orig(arr_in, arr_out, *args):
factor = round(arr_out.shape[0] / arr_in.shape[0])
factor_2 = factor // 2
it = np.nditer(arr_in, flags=["multi_index"])
while not it.finished:
y, x = it.multi_index
color = it[0]
it.iternext()
center = (x * factor + factor_2, y * factor + factor_2) # corresponding circle center
cv2.circle(arr_out, center, int(8 * color / 255), 255, -1)
def draw_img_regular_iter(arr_in, arr_out, *args):
factor = round(arr_out.shape[0] / arr_in.shape[0])
factor_2 = factor // 2
for row_idx, row in enumerate(arr_in):
for col_idx, col in enumerate(row):
cv2.circle(arr_out, (col_idx * factor + factor_2, row_idx * factor + factor_2), int(8 * col / 255), 255, -1)
def draw_img_cache(arr_in, arr_out, *args):
factor = round(arr_out.shape[0] / arr_in.shape[0])
it = np.nditer(arr_in, flags=["multi_index"])
while not it.finished:
y, x = it.multi_index
yf = y * factor
xf = x *factor
arr_out[yf: yf + factor, xf: xf + factor] = args[0][it[0]]
it.iternext()
def generate_input_images(shape, count, dtype=np.uint8):
return np.random.randint(256, size=(count,) + shape, dtype=dtype)
def generate_circles(shape, dtype=np.uint8, func=lambda x: int(8 * x / 255), color=255):
ret = np.zeros((256,) + shape, dtype=dtype)
cy = shape[0] // 2
cx = shape[1] // 2
for idx, arr in enumerate(ret):
cv2.circle(arr, (cx, cy), func(idx), color, -1)
return ret
def test_draw(imgs_in, img_out, count, draw_func, *draw_func_args):
print("\nTesting {:s}".format(draw_func.__name__))
start = time.time()
for i, e in enumerate(its.cycle(range(imgs_in.shape[0]))):
draw_func(imgs_in[e], img_out, *draw_func_args)
if i >= count:
break
print("Took {:.3f} seconds ({:d} images)".format(time.time() - start, count))
def test_speed(shape_in, shape_out, dtype=np.uint8):
imgs_in = generate_input_images(shape_in, 50, dtype=dtype)
#print(imgs_in.shape, imgs_in)
img_out = np.zeros(shape_out, dtype=dtype)
circles = generate_circles((shape_out[0] // shape_in[0], shape_out[1] // shape_in[1]))
count = 250
test_draw(imgs_in, img_out, count, draw_img_orig)
test_draw(imgs_in, img_out, count, draw_img_regular_iter)
test_draw(imgs_in, img_out, count, draw_img_cache, circles)
def test_accuracy(shape_in, shape_out, dtype=np.uint8):
img_in = np.arange(np.product(shape_in), dtype=dtype).reshape(shape_in)
circles = generate_circles((shape_out[0] // shape_in[0], shape_out[1] // shape_in[1]))
data = (
(draw_img_orig, "orig.png", None),
(draw_img_regular_iter, "regit.png", None),
(draw_img_cache, "cache.png", circles),
)
imgs_out = [np.zeros(shape_out, dtype=dtype) for _ in range(len(data))]
for idx, (draw_func, out_name, other_arg) in enumerate(data):
draw_func(img_in, imgs_out[idx], other_arg)
cv2.imwrite(out_name, imgs_out[idx])
for idx, img in enumerate(imgs_out[1:], start=1):
if not np.array_equal(img, imgs_out[0]):
print("Image index different: {:d}".format(idx))
def main(*argv):
dt = np.uint8
shape_in = (32, 32)
factor_io = 20
shape_out = tuple(i * factor_io for i in shape_in)
test_speed(shape_in, shape_out, dtype=dt)
test_accuracy(shape_in, shape_out, dtype=dt)
if __name__ == "__main__":
print("Python {:s} {:03d}bit on {:s}\n".format(" ".join(elem.strip() for elem in sys.version.split("\n")),
64 if sys.maxsize > 0x100000000 else 32, sys.platform))
rc = main(*sys.argv[1:])
print("\nDone.")
sys.exit(rc)
[cfati@CFATI-5510-0:e:\Work\Dev\StackOverflow\q071818080]> sopr.bat
### Set shorter prompt to better fit when pasted in StackOverflow (or other) pages ###
[prompt]> dir /b
code00.py
[prompt]> "e:\Work\Dev\VEnvs\py_pc064_03.09_test0\Scripts\python.exe" code00.py
Python 3.9.9 (tags/v3.9.9:ccb0e6a, Nov 15 2021, 18:08:50) [MSC v.1929 64 bit (AMD64)] 064bit on win32
Testing draw_img_orig
Took 0.908 seconds (250 images)
Testing draw_img_regular_iter
Took 1.061 seconds (250 images)
Testing draw_img_cache
Took 0.426 seconds (250 images)
Done.
[prompt]>
[prompt]> dir /b
cache.png
code00.py
orig.png
regit.png
# import the necessary packages
import argparse
import cv2
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", type=str, default="pca8e.png",
help="path to input image")
args = vars(ap.parse_args())
# load the image, display it to our screen, and initialize a list of
# kernel sizes (so we can evaluate the relationship between kernel
# size and amount of blurring)
image = cv2.imread(args["image"])
cv2.imshow("Original", image)
kernelSizes = [(41,41)]
# loop over the kernel sizes
for (kX, kY) in kernelSizes:
# apply a "Gaussian" blur to the image
blurred = cv2.GaussianBlur(image, (kX, kY), 0)
cv2.imshow("Gaussian ({}, {})".format(kX, kY), blurred)
cv2.waitKey(0)
import cv2
import numpy as np
image = cv2.imread('tulips.jpg')
# Fill the black background with white color
#cv2.floodFill(image, None, seedPoint=(0, 0), newVal=(0, 0, 255), loDiff=(2, 2, 2), upDiff=(2, 2, 2)) # Not working!
hsv_img = cv2.cvtColor(image, cv2.COLOR_BGR2HSV) # rgb to hsv color space
s_ch = hsv_img[:, :, 1] # Get the saturation channel
thesh = cv2.threshold(s_ch, 5, 255, cv2.THRESH_BINARY)[1] # Apply threshold - pixels above 5 are going to be 255, other are zeros.
thesh = cv2.morphologyEx(thesh, cv2.MORPH_OPEN, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (7, 7))) # Apply opening morphological operation for removing artifacts.
cv2.floodFill(thesh, None, seedPoint=(0, 0), newVal=128, loDiff=1, upDiff=1) # Fill the background in thesh with the value 128 (pixel in the foreground stays 0.
image[thesh == 128] = (0, 0, 255) # Set all the pixels where thesh=128 to red.
cv2.imwrite('tulips_red_bg.jpg', image) # Save the output image.
import cv2
import pytesseract
img = cv2.imread('gamepictures/text.png') # Load the image
img = img[98:190,6:149,:]
img = cv2.cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # convert to grey
img = cv2.GaussianBlur(img, (5, 5), 3)
img = cv2.adaptiveThreshold(img, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 7, -2)
txt = pytesseract.image_to_string(img, config='--psm 10 -c tessedit_char_whitelist=0123456789')
print(img.shape)
print(txt)
cv2.imshow("", img)
cv2.waitKey(0)
import cv2
import numpy as np
# load image as grayscale
img = cv2.imread('Diabetic-Retinopathy_G_RM_151064169.jpg')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# threshold input image
mask = cv2.threshold(gray, 10, 255, cv2.THRESH_BINARY)[1]
# optional morphology to clean up small spots
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3,3))
mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel)
mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)
# put mask into alpha channel of image
result = np.dstack((img, mask))
# save resulting masked image
cv2.imwrite('Diabetic-Retinopathy_G_RM_151064169_masked.png', result)
# display result, though it won't show transparency
cv2.imshow("mask", mask)
cv2.imshow("RESULT", result)
cv2.waitKey(0)
cv2.destroyAllWindows()
import cv2
import numpy as np
# load image
img = cv2.imread('black_circle.png')
# convert to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# threshold
threshold = cv2.threshold(gray,128,255,cv2.THRESH_BINARY)[1]
# invert so circle is white on black
mask = 255 - threshold
# put mask into alpha channel of image
result = np.dstack((img, mask))
# save resulting masked image
cv2.imwrite('black_circle_masked.png', result)
# display result, though it won't show transparency
cv2.imshow("MASK", mask)
cv2.imshow("RESULT", result)
cv2.waitKey(0)
cv2.destroyAllWindows()
Trending Discussions on opencv-python
Trending Discussions on opencv-python
QUESTION
Hello stackoverflow people:) I'm trying to masking many image from two different path, but I don't have an idea to do that. This an example for just two images and what I've do so far
image = cv.imread('Dataset/IDRiD_02.jpg', cv.IMREAD_COLOR)
od = cv.imread('od/IDRiD_02_OD.jpg', cv.IMREAD_GRAYSCALE)
mask = od
other = cv.bitwise_not(mask)
masking = cv.bitwise_and(image, image, mask=other)
cv.imwrite('Output/masking/' + 'masking.jpg', masking)
Input is IDRiD_02.jpg and IDRiD_02_OD.jpg then Output is masking.jpg
Then I want to do the same but with many images
import cv2 as cv
import numpy as np
import os
import glob
import os.path
od_images = []
for directory_path in glob.glob("od/"):
for mask_path in glob.glob(os.path.join(directory_path, "*.jpg")):
mask = cv.imread(mask_path, cv.IMREAD_GRAYSCALE)
od_images.append(mask)
od_images = np.array(od_images)
path = "Dataset/*.jpg"
for file in glob.glob(path):
#read image
image = cv.imread(file, cv.IMREAD_COLOR)
# e.g. MyPhoto.jpg
basename = os.path.basename(file)
# e.g. MyPhoto
name = os.path.splitext(basename)[0]
mask = cv.bitwise_not(od_images)
masking = cv.bitwise_and(image, image, mask = mask)
cv.imwrite('Output/masking/' + name + '_masking.jpg', masking)
but then after I run the code, I'm getting the following error message
masking = cv.bitwise_and(image, image, mask = mask)
error: OpenCV(4.5.5) D:\a\opencv-python\opencv-python\opencv\modules\core\src\arithm.cpp:230: error: (-215:Assertion failed) (mtype == CV_8U || mtype == CV_8S) && _mask.sameSize(*psrc1) in function 'cv::binary_op'
anyone can understand and help me? Thank you before:)
ANSWER
Answered 2022-Mar-31 at 04:06Hope it will work for you !
import cv2 as cv
import os
img_path = r"image_folder_path"
od_images = r"od_img_folder_path"
for img,od in zip(os.listdir(img_path), os.listdir(od_images)):
image = cv.imread(img_path+"\\"+img, cv.IMREAD_COLOR)
od = cv.imread(od_images+"\\"+od, cv.IMREAD_GRAYSCALE)
other = cv.bitwise_not(od)
res = cv.bitwise_and(image, image, mask=other)
cv.imwrite('Output/masking/' +img+ '_masking.jpg', res)
QUESTION
I have pretrained model for object detection (Google Colab + TensorFlow) inside Google Colab and I run it two-three times per week for new images I have and everything was fine for the last year till this week. Now when I try to run model I have this message:
Graph execution error:
2 root error(s) found.
(0) UNIMPLEMENTED: DNN library is not found.
[[{{node functional_1/conv1_conv/Conv2D}}]]
[[StatefulPartitionedCall/SecondStagePostprocessor/BatchMultiClassNonMaxSuppression/MultiClassNonMaxSuppression/Reshape_5/_126]]
(1) UNIMPLEMENTED: DNN library is not found.
[[{{node functional_1/conv1_conv/Conv2D}}]]
0 successful operations.
0 derived errors ignored. [Op:__inference_restored_function_body_27380] ***
Never happended before.
Before I can run my model I have to install Tensor Flow object detection API with this command:
import os
os.chdir('/project/models/research')
!protoc object_detection/protos/*.proto --python_out=.
!cp object_detection/packages/tf2/setup.py .
!python -m pip install .
This is the output of command:
Processing /content/gdrive/MyDrive/models/research
DEPRECATION: A future pip version will change local packages to be built in-place without first copying to a temporary directory. We recommend you use --use-feature=in-tree-build to test your packages with this new behavior before it becomes the default.
pip 21.3 will remove support for this functionality. You can find discussion regarding this at https://github.com/pypa/pip/issues/7555.
Collecting avro-python3
Downloading avro-python3-1.10.2.tar.gz (38 kB)
Collecting apache-beam
Downloading apache_beam-2.35.0-cp37-cp37m-manylinux2010_x86_64.whl (9.9 MB)
|████████████████████████████████| 9.9 MB 1.6 MB/s
Requirement already satisfied: pillow in /usr/local/lib/python3.7/dist-packages (from object-detection==0.1) (7.1.2)
Requirement already satisfied: lxml in /usr/local/lib/python3.7/dist-packages (from object-detection==0.1) (4.2.6)
Requirement already satisfied: matplotlib in /usr/local/lib/python3.7/dist-packages (from object-detection==0.1) (3.2.2)
Requirement already satisfied: Cython in /usr/local/lib/python3.7/dist-packages (from object-detection==0.1) (0.29.27)
Requirement already satisfied: contextlib2 in /usr/local/lib/python3.7/dist-packages (from object-detection==0.1) (0.5.5)
Collecting tf-slim
Downloading tf_slim-1.1.0-py2.py3-none-any.whl (352 kB)
|████████████████████████████████| 352 kB 50.5 MB/s
Requirement already satisfied: six in /usr/local/lib/python3.7/dist-packages (from object-detection==0.1) (1.15.0)
Requirement already satisfied: pycocotools in /usr/local/lib/python3.7/dist-packages (from object-detection==0.1) (2.0.4)
Collecting lvis
Downloading lvis-0.5.3-py3-none-any.whl (14 kB)
Requirement already satisfied: scipy in /usr/local/lib/python3.7/dist-packages (from object-detection==0.1) (1.4.1)
Requirement already satisfied: pandas in /usr/local/lib/python3.7/dist-packages (from object-detection==0.1) (1.3.5)
Collecting tf-models-official>=2.5.1
Downloading tf_models_official-2.8.0-py2.py3-none-any.whl (2.2 MB)
|████████████████████████████████| 2.2 MB 38.3 MB/s
Collecting tensorflow_io
Downloading tensorflow_io-0.24.0-cp37-cp37m-manylinux_2_12_x86_64.manylinux2010_x86_64.whl (23.4 MB)
|████████████████████████████████| 23.4 MB 1.7 MB/s
Requirement already satisfied: keras in /usr/local/lib/python3.7/dist-packages (from object-detection==0.1) (2.7.0)
Collecting opencv-python-headless
Downloading opencv_python_headless-4.5.5.62-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (47.7 MB)
|████████████████████████████████| 47.7 MB 74 kB/s
Collecting sacrebleu
Downloading sacrebleu-2.0.0-py3-none-any.whl (90 kB)
|████████████████████████████████| 90 kB 10.4 MB/s
Requirement already satisfied: kaggle>=1.3.9 in /usr/local/lib/python3.7/dist-packages (from tf-models-official>=2.5.1->object-detection==0.1) (1.5.12)
Requirement already satisfied: psutil>=5.4.3 in /usr/local/lib/python3.7/dist-packages (from tf-models-official>=2.5.1->object-detection==0.1) (5.4.8)
Requirement already satisfied: oauth2client in /usr/local/lib/python3.7/dist-packages (from tf-models-official>=2.5.1->object-detection==0.1) (4.1.3)
Collecting tensorflow-addons
Downloading tensorflow_addons-0.15.0-cp37-cp37m-manylinux_2_12_x86_64.manylinux2010_x86_64.whl (1.1 MB)
|████████████████████████████████| 1.1 MB 37.8 MB/s
Requirement already satisfied: gin-config in /usr/local/lib/python3.7/dist-packages (from tf-models-official>=2.5.1->object-detection==0.1) (0.5.0)
Requirement already satisfied: tensorflow-datasets in /usr/local/lib/python3.7/dist-packages (from tf-models-official>=2.5.1->object-detection==0.1) (4.0.1)
Collecting sentencepiece
Downloading sentencepiece-0.1.96-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (1.2 MB)
|████████████████████████████████| 1.2 MB 37.5 MB/s
Collecting tensorflow-model-optimization>=0.4.1
Downloading tensorflow_model_optimization-0.7.0-py2.py3-none-any.whl (213 kB)
|████████████████████████████████| 213 kB 42.7 MB/s
Collecting pyyaml<6.0,>=5.1
Downloading PyYAML-5.4.1-cp37-cp37m-manylinux1_x86_64.whl (636 kB)
|████████████████████████████████| 636 kB 53.3 MB/s
Collecting tensorflow-text~=2.8.0
Downloading tensorflow_text-2.8.1-cp37-cp37m-manylinux_2_12_x86_64.manylinux2010_x86_64.whl (4.9 MB)
|████████████████████████████████| 4.9 MB 46.1 MB/s
Requirement already satisfied: google-api-python-client>=1.6.7 in /usr/local/lib/python3.7/dist-packages (from tf-models-official>=2.5.1->object-detection==0.1) (1.12.10)
Requirement already satisfied: numpy>=1.15.4 in /usr/local/lib/python3.7/dist-packages (from tf-models-official>=2.5.1->object-detection==0.1) (1.19.5)
Requirement already satisfied: tensorflow-hub>=0.6.0 in /usr/local/lib/python3.7/dist-packages (from tf-models-official>=2.5.1->object-detection==0.1) (0.12.0)
Collecting seqeval
Downloading seqeval-1.2.2.tar.gz (43 kB)
|████████████████████████████████| 43 kB 2.1 MB/s
Collecting tensorflow~=2.8.0
Downloading tensorflow-2.8.0-cp37-cp37m-manylinux2010_x86_64.whl (497.5 MB)
|████████████████████████████████| 497.5 MB 28 kB/s
Collecting py-cpuinfo>=3.3.0
Downloading py-cpuinfo-8.0.0.tar.gz (99 kB)
|████████████████████████████████| 99 kB 10.1 MB/s
Requirement already satisfied: google-auth<3dev,>=1.16.0 in /usr/local/lib/python3.7/dist-packages (from google-api-python-client>=1.6.7->tf-models-official>=2.5.1->object-detection==0.1) (1.35.0)
Requirement already satisfied: uritemplate<4dev,>=3.0.0 in /usr/local/lib/python3.7/dist-packages (from google-api-python-client>=1.6.7->tf-models-official>=2.5.1->object-detection==0.1) (3.0.1)
Requirement already satisfied: httplib2<1dev,>=0.15.0 in /usr/local/lib/python3.7/dist-packages (from google-api-python-client>=1.6.7->tf-models-official>=2.5.1->object-detection==0.1) (0.17.4)
Requirement already satisfied: google-auth-httplib2>=0.0.3 in /usr/local/lib/python3.7/dist-packages (from google-api-python-client>=1.6.7->tf-models-official>=2.5.1->object-detection==0.1) (0.0.4)
Requirement already satisfied: google-api-core<3dev,>=1.21.0 in /usr/local/lib/python3.7/dist-packages (from google-api-python-client>=1.6.7->tf-models-official>=2.5.1->object-detection==0.1) (1.26.3)
Requirement already satisfied: setuptools>=40.3.0 in /usr/local/lib/python3.7/dist-packages (from google-api-core<3dev,>=1.21.0->google-api-python-client>=1.6.7->tf-models-official>=2.5.1->object-detection==0.1) (57.4.0)
Requirement already satisfied: pytz in /usr/local/lib/python3.7/dist-packages (from google-api-core<3dev,>=1.21.0->google-api-python-client>=1.6.7->tf-models-official>=2.5.1->object-detection==0.1) (2018.9)
Requirement already satisfied: googleapis-common-protos<2.0dev,>=1.6.0 in /usr/local/lib/python3.7/dist-packages (from google-api-core<3dev,>=1.21.0->google-api-python-client>=1.6.7->tf-models-official>=2.5.1->object-detection==0.1) (1.54.0)
Requirement already satisfied: requests<3.0.0dev,>=2.18.0 in /usr/local/lib/python3.7/dist-packages (from google-api-core<3dev,>=1.21.0->google-api-python-client>=1.6.7->tf-models-official>=2.5.1->object-detection==0.1) (2.23.0)
Requirement already satisfied: packaging>=14.3 in /usr/local/lib/python3.7/dist-packages (from google-api-core<3dev,>=1.21.0->google-api-python-client>=1.6.7->tf-models-official>=2.5.1->object-detection==0.1) (21.3)
Requirement already satisfied: protobuf>=3.12.0 in /usr/local/lib/python3.7/dist-packages (from google-api-core<3dev,>=1.21.0->google-api-python-client>=1.6.7->tf-models-official>=2.5.1->object-detection==0.1) (3.17.3)
Requirement already satisfied: pyasn1-modules>=0.2.1 in /usr/local/lib/python3.7/dist-packages (from google-auth<3dev,>=1.16.0->google-api-python-client>=1.6.7->tf-models-official>=2.5.1->object-detection==0.1) (0.2.8)
Requirement already satisfied: rsa<5,>=3.1.4 in /usr/local/lib/python3.7/dist-packages (from google-auth<3dev,>=1.16.0->google-api-python-client>=1.6.7->tf-models-official>=2.5.1->object-detection==0.1) (4.8)
Requirement already satisfied: cachetools<5.0,>=2.0.0 in /usr/local/lib/python3.7/dist-packages (from google-auth<3dev,>=1.16.0->google-api-python-client>=1.6.7->tf-models-official>=2.5.1->object-detection==0.1) (4.2.4)
Requirement already satisfied: certifi in /usr/local/lib/python3.7/dist-packages (from kaggle>=1.3.9->tf-models-official>=2.5.1->object-detection==0.1) (2021.10.8)
Requirement already satisfied: urllib3 in /usr/local/lib/python3.7/dist-packages (from kaggle>=1.3.9->tf-models-official>=2.5.1->object-detection==0.1) (1.24.3)
Requirement already satisfied: python-dateutil in /usr/local/lib/python3.7/dist-packages (from kaggle>=1.3.9->tf-models-official>=2.5.1->object-detection==0.1) (2.8.2)
Requirement already satisfied: tqdm in /usr/local/lib/python3.7/dist-packages (from kaggle>=1.3.9->tf-models-official>=2.5.1->object-detection==0.1) (4.62.3)
Requirement already satisfied: python-slugify in /usr/local/lib/python3.7/dist-packages (from kaggle>=1.3.9->tf-models-official>=2.5.1->object-detection==0.1) (5.0.2)
Requirement already satisfied: pyparsing!=3.0.5,>=2.0.2 in /usr/local/lib/python3.7/dist-packages (from packaging>=14.3->google-api-core<3dev,>=1.21.0->google-api-python-client>=1.6.7->tf-models-official>=2.5.1->object-detection==0.1) (3.0.7)
Requirement already satisfied: pyasn1<0.5.0,>=0.4.6 in /usr/local/lib/python3.7/dist-packages (from pyasn1-modules>=0.2.1->google-auth<3dev,>=1.16.0->google-api-python-client>=1.6.7->tf-models-official>=2.5.1->object-detection==0.1) (0.4.8)
Requirement already satisfied: idna<3,>=2.5 in /usr/local/lib/python3.7/dist-packages (from requests<3.0.0dev,>=2.18.0->google-api-core<3dev,>=1.21.0->google-api-python-client>=1.6.7->tf-models-official>=2.5.1->object-detection==0.1) (2.10)
Requirement already satisfied: chardet<4,>=3.0.2 in /usr/local/lib/python3.7/dist-packages (from requests<3.0.0dev,>=2.18.0->google-api-core<3dev,>=1.21.0->google-api-python-client>=1.6.7->tf-models-official>=2.5.1->object-detection==0.1) (3.0.4)
Requirement already satisfied: termcolor>=1.1.0 in /usr/local/lib/python3.7/dist-packages (from tensorflow~=2.8.0->tf-models-official>=2.5.1->object-detection==0.1) (1.1.0)
Requirement already satisfied: libclang>=9.0.1 in /usr/local/lib/python3.7/dist-packages (from tensorflow~=2.8.0->tf-models-official>=2.5.1->object-detection==0.1) (13.0.0)
Requirement already satisfied: h5py>=2.9.0 in /usr/local/lib/python3.7/dist-packages (from tensorflow~=2.8.0->tf-models-official>=2.5.1->object-detection==0.1) (3.1.0)
Requirement already satisfied: astunparse>=1.6.0 in /usr/local/lib/python3.7/dist-packages (from tensorflow~=2.8.0->tf-models-official>=2.5.1->object-detection==0.1) (1.6.3)
Requirement already satisfied: gast>=0.2.1 in /usr/local/lib/python3.7/dist-packages (from tensorflow~=2.8.0->tf-models-official>=2.5.1->object-detection==0.1) (0.4.0)
Requirement already satisfied: google-pasta>=0.1.1 in /usr/local/lib/python3.7/dist-packages (from tensorflow~=2.8.0->tf-models-official>=2.5.1->object-detection==0.1) (0.2.0)
Requirement already satisfied: typing-extensions>=3.6.6 in /usr/local/lib/python3.7/dist-packages (from tensorflow~=2.8.0->tf-models-official>=2.5.1->object-detection==0.1) (3.10.0.2)
Requirement already satisfied: wrapt>=1.11.0 in /usr/local/lib/python3.7/dist-packages (from tensorflow~=2.8.0->tf-models-official>=2.5.1->object-detection==0.1) (1.13.3)
Requirement already satisfied: tensorflow-io-gcs-filesystem>=0.23.1 in /usr/local/lib/python3.7/dist-packages (from tensorflow~=2.8.0->tf-models-official>=2.5.1->object-detection==0.1) (0.23.1)
Collecting tf-estimator-nightly==2.8.0.dev2021122109
Downloading tf_estimator_nightly-2.8.0.dev2021122109-py2.py3-none-any.whl (462 kB)
|████████████████████████████████| 462 kB 49.5 MB/s
Requirement already satisfied: keras-preprocessing>=1.1.1 in /usr/local/lib/python3.7/dist-packages (from tensorflow~=2.8.0->tf-models-official>=2.5.1->object-detection==0.1) (1.1.2)
Collecting tensorboard<2.9,>=2.8
Downloading tensorboard-2.8.0-py3-none-any.whl (5.8 MB)
|████████████████████████████████| 5.8 MB 41.2 MB/s
Requirement already satisfied: flatbuffers>=1.12 in /usr/local/lib/python3.7/dist-packages (from tensorflow~=2.8.0->tf-models-official>=2.5.1->object-detection==0.1) (2.0)
Collecting keras
Downloading keras-2.8.0-py2.py3-none-any.whl (1.4 MB)
|████████████████████████████████| 1.4 MB 41.2 MB/s
Requirement already satisfied: opt-einsum>=2.3.2 in /usr/local/lib/python3.7/dist-packages (from tensorflow~=2.8.0->tf-models-official>=2.5.1->object-detection==0.1) (3.3.0)
Collecting numpy>=1.15.4
Downloading numpy-1.21.5-cp37-cp37m-manylinux_2_12_x86_64.manylinux2010_x86_64.whl (15.7 MB)
|████████████████████████████████| 15.7 MB 41.4 MB/s
Requirement already satisfied: absl-py>=0.4.0 in /usr/local/lib/python3.7/dist-packages (from tensorflow~=2.8.0->tf-models-official>=2.5.1->object-detection==0.1) (1.0.0)
Requirement already satisfied: grpcio<2.0,>=1.24.3 in /usr/local/lib/python3.7/dist-packages (from tensorflow~=2.8.0->tf-models-official>=2.5.1->object-detection==0.1) (1.43.0)
Requirement already satisfied: wheel<1.0,>=0.23.0 in /usr/local/lib/python3.7/dist-packages (from astunparse>=1.6.0->tensorflow~=2.8.0->tf-models-official>=2.5.1->object-detection==0.1) (0.37.1)
Requirement already satisfied: cached-property in /usr/local/lib/python3.7/dist-packages (from h5py>=2.9.0->tensorflow~=2.8.0->tf-models-official>=2.5.1->object-detection==0.1) (1.5.2)
Requirement already satisfied: tensorboard-data-server<0.7.0,>=0.6.0 in /usr/local/lib/python3.7/dist-packages (from tensorboard<2.9,>=2.8->tensorflow~=2.8.0->tf-models-official>=2.5.1->object-detection==0.1) (0.6.1)
Requirement already satisfied: werkzeug>=0.11.15 in /usr/local/lib/python3.7/dist-packages (from tensorboard<2.9,>=2.8->tensorflow~=2.8.0->tf-models-official>=2.5.1->object-detection==0.1) (1.0.1)
Requirement already satisfied: google-auth-oauthlib<0.5,>=0.4.1 in /usr/local/lib/python3.7/dist-packages (from tensorboard<2.9,>=2.8->tensorflow~=2.8.0->tf-models-official>=2.5.1->object-detection==0.1) (0.4.6)
Requirement already satisfied: tensorboard-plugin-wit>=1.6.0 in /usr/local/lib/python3.7/dist-packages (from tensorboard<2.9,>=2.8->tensorflow~=2.8.0->tf-models-official>=2.5.1->object-detection==0.1) (1.8.1)
Requirement already satisfied: markdown>=2.6.8 in /usr/local/lib/python3.7/dist-packages (from tensorboard<2.9,>=2.8->tensorflow~=2.8.0->tf-models-official>=2.5.1->object-detection==0.1) (3.3.6)
Requirement already satisfied: requests-oauthlib>=0.7.0 in /usr/local/lib/python3.7/dist-packages (from google-auth-oauthlib<0.5,>=0.4.1->tensorboard<2.9,>=2.8->tensorflow~=2.8.0->tf-models-official>=2.5.1->object-detection==0.1) (1.3.1)
Requirement already satisfied: importlib-metadata>=4.4 in /usr/local/lib/python3.7/dist-packages (from markdown>=2.6.8->tensorboard<2.9,>=2.8->tensorflow~=2.8.0->tf-models-official>=2.5.1->object-detection==0.1) (4.10.1)
Requirement already satisfied: zipp>=0.5 in /usr/local/lib/python3.7/dist-packages (from importlib-metadata>=4.4->markdown>=2.6.8->tensorboard<2.9,>=2.8->tensorflow~=2.8.0->tf-models-official>=2.5.1->object-detection==0.1) (3.7.0)
Requirement already satisfied: oauthlib>=3.0.0 in /usr/local/lib/python3.7/dist-packages (from requests-oauthlib>=0.7.0->google-auth-oauthlib<0.5,>=0.4.1->tensorboard<2.9,>=2.8->tensorflow~=2.8.0->tf-models-official>=2.5.1->object-detection==0.1) (3.2.0)
Requirement already satisfied: dm-tree~=0.1.1 in /usr/local/lib/python3.7/dist-packages (from tensorflow-model-optimization>=0.4.1->tf-models-official>=2.5.1->object-detection==0.1) (0.1.6)
Requirement already satisfied: crcmod<2.0,>=1.7 in /usr/local/lib/python3.7/dist-packages (from apache-beam->object-detection==0.1) (1.7)
Collecting fastavro<2,>=0.21.4
Downloading fastavro-1.4.9-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (2.3 MB)
|████████████████████████████████| 2.3 MB 38.1 MB/s
Requirement already satisfied: pyarrow<7.0.0,>=0.15.1 in /usr/local/lib/python3.7/dist-packages (from apache-beam->object-detection==0.1) (6.0.1)
Requirement already satisfied: pydot<2,>=1.2.0 in /usr/local/lib/python3.7/dist-packages (from apache-beam->object-detection==0.1) (1.3.0)
Collecting proto-plus<2,>=1.7.1
Downloading proto_plus-1.19.9-py3-none-any.whl (45 kB)
|████████████████████████████████| 45 kB 3.2 MB/s
Collecting requests<3.0.0dev,>=2.18.0
Downloading requests-2.27.1-py2.py3-none-any.whl (63 kB)
|████████████████████████████████| 63 kB 1.8 MB/s
Collecting dill<0.3.2,>=0.3.1.1
Downloading dill-0.3.1.1.tar.gz (151 kB)
|████████████████████████████████| 151 kB 44.4 MB/s
Collecting numpy>=1.15.4
Downloading numpy-1.20.3-cp37-cp37m-manylinux_2_12_x86_64.manylinux2010_x86_64.whl (15.3 MB)
|████████████████████████████████| 15.3 MB 21.1 MB/s
Collecting orjson<4.0
Downloading orjson-3.6.6-cp37-cp37m-manylinux_2_24_x86_64.whl (245 kB)
|████████████████████████████████| 245 kB 53.2 MB/s
Collecting hdfs<3.0.0,>=2.1.0
Downloading hdfs-2.6.0-py3-none-any.whl (33 kB)
Collecting pymongo<4.0.0,>=3.8.0
Downloading pymongo-3.12.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (508 kB)
|████████████████████████████████| 508 kB 44.3 MB/s
Requirement already satisfied: docopt in /usr/local/lib/python3.7/dist-packages (from hdfs<3.0.0,>=2.1.0->apache-beam->object-detection==0.1) (0.6.2)
Collecting protobuf>=3.12.0
Downloading protobuf-3.19.4-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (1.1 MB)
|████████████████████████████████| 1.1 MB 47.3 MB/s
Requirement already satisfied: charset-normalizer~=2.0.0 in /usr/local/lib/python3.7/dist-packages (from requests<3.0.0dev,>=2.18.0->google-api-core<3dev,>=1.21.0->google-api-python-client>=1.6.7->tf-models-official>=2.5.1->object-detection==0.1) (2.0.11)
Requirement already satisfied: opencv-python>=4.1.0.25 in /usr/local/lib/python3.7/dist-packages (from lvis->object-detection==0.1) (4.1.2.30)
Requirement already satisfied: cycler>=0.10.0 in /usr/local/lib/python3.7/dist-packages (from lvis->object-detection==0.1) (0.11.0)
Requirement already satisfied: kiwisolver>=1.1.0 in /usr/local/lib/python3.7/dist-packages (from lvis->object-detection==0.1) (1.3.2)
Requirement already satisfied: text-unidecode>=1.3 in /usr/local/lib/python3.7/dist-packages (from python-slugify->kaggle>=1.3.9->tf-models-official>=2.5.1->object-detection==0.1) (1.3)
Requirement already satisfied: regex in /usr/local/lib/python3.7/dist-packages (from sacrebleu->tf-models-official>=2.5.1->object-detection==0.1) (2019.12.20)
Requirement already satisfied: tabulate>=0.8.9 in /usr/local/lib/python3.7/dist-packages (from sacrebleu->tf-models-official>=2.5.1->object-detection==0.1) (0.8.9)
Collecting portalocker
Downloading portalocker-2.3.2-py2.py3-none-any.whl (15 kB)
Collecting colorama
Downloading colorama-0.4.4-py2.py3-none-any.whl (16 kB)
Requirement already satisfied: scikit-learn>=0.21.3 in /usr/local/lib/python3.7/dist-packages (from seqeval->tf-models-official>=2.5.1->object-detection==0.1) (1.0.2)
Requirement already satisfied: joblib>=0.11 in /usr/local/lib/python3.7/dist-packages (from scikit-learn>=0.21.3->seqeval->tf-models-official>=2.5.1->object-detection==0.1) (1.1.0)
Requirement already satisfied: threadpoolctl>=2.0.0 in /usr/local/lib/python3.7/dist-packages (from scikit-learn>=0.21.3->seqeval->tf-models-official>=2.5.1->object-detection==0.1) (3.1.0)
Requirement already satisfied: typeguard>=2.7 in /usr/local/lib/python3.7/dist-packages (from tensorflow-addons->tf-models-official>=2.5.1->object-detection==0.1) (2.7.1)
Requirement already satisfied: promise in /usr/local/lib/python3.7/dist-packages (from tensorflow-datasets->tf-models-official>=2.5.1->object-detection==0.1) (2.3)
Requirement already satisfied: future in /usr/local/lib/python3.7/dist-packages (from tensorflow-datasets->tf-models-official>=2.5.1->object-detection==0.1) (0.16.0)
Requirement already satisfied: attrs>=18.1.0 in /usr/local/lib/python3.7/dist-packages (from tensorflow-datasets->tf-models-official>=2.5.1->object-detection==0.1) (21.4.0)
Requirement already satisfied: importlib-resources in /usr/local/lib/python3.7/dist-packages (from tensorflow-datasets->tf-models-official>=2.5.1->object-detection==0.1) (5.4.0)
Requirement already satisfied: tensorflow-metadata in /usr/local/lib/python3.7/dist-packages (from tensorflow-datasets->tf-models-official>=2.5.1->object-detection==0.1) (1.6.0)
Collecting tensorflow-io-gcs-filesystem>=0.23.1
Downloading tensorflow_io_gcs_filesystem-0.24.0-cp37-cp37m-manylinux_2_12_x86_64.manylinux2010_x86_64.whl (2.1 MB)
|████████████████████████████████| 2.1 MB 40.9 MB/s
Building wheels for collected packages: object-detection, py-cpuinfo, dill, avro-python3, seqeval
Building wheel for object-detection (setup.py) ... done
Created wheel for object-detection: filename=object_detection-0.1-py3-none-any.whl size=1686316 sha256=775b8c34c800b3b3139d1067abd686af9ce9158011fccfb5450ccfd9bf424a5a
Stored in directory: /tmp/pip-ephem-wheel-cache-rmw0fvil/wheels/d0/e3/e9/b9ffe85019ec441e90d8ff9eddee9950c4c23b7598204390b9
Building wheel for py-cpuinfo (setup.py) ... done
Created wheel for py-cpuinfo: filename=py_cpuinfo-8.0.0-py3-none-any.whl size=22257 sha256=ac956c4c039868fdba78645bea056754e667e8840bea783ad2ca75e4d3e682c6
Stored in directory: /root/.cache/pip/wheels/d2/f1/1f/041add21dc9c4220157f1bd2bd6afe1f1a49524c3396b94401
Building wheel for dill (setup.py) ... done
Created wheel for dill: filename=dill-0.3.1.1-py3-none-any.whl size=78544 sha256=d9c6cdfd69aea2b4d78e6afbbe2bc530394e4081eb186eb4f4cd02373ca739fd
Stored in directory: /root/.cache/pip/wheels/a4/61/fd/c57e374e580aa78a45ed78d5859b3a44436af17e22ca53284f
Building wheel for avro-python3 (setup.py) ... done
Created wheel for avro-python3: filename=avro_python3-1.10.2-py3-none-any.whl size=44010 sha256=4eca8b4f30e4850d5dabccee36c40c8dda8a6c7e7058cfb7f0258eea5ce7b2b3
Stored in directory: /root/.cache/pip/wheels/d6/e5/b1/6b151d9b535ee50aaa6ab27d145a0104b6df02e5636f0376da
Building wheel for seqeval (setup.py) ... done
Created wheel for seqeval: filename=seqeval-1.2.2-py3-none-any.whl size=16180 sha256=0ddfa46d0e36e9be346a90833ef11cc0d38cc7e744be34c5a0d321f997a30cba
Stored in directory: /root/.cache/pip/wheels/05/96/ee/7cac4e74f3b19e3158dce26a20a1c86b3533c43ec72a549fd7
Successfully built object-detection py-cpuinfo dill avro-python3 seqeval
Installing collected packages: requests, protobuf, numpy, tf-estimator-nightly, tensorflow-io-gcs-filesystem, tensorboard, keras, tensorflow, portalocker, dill, colorama, tf-slim, tensorflow-text, tensorflow-model-optimization, tensorflow-addons, seqeval, sentencepiece, sacrebleu, pyyaml, pymongo, py-cpuinfo, proto-plus, orjson, opencv-python-headless, hdfs, fastavro, tf-models-official, tensorflow-io, lvis, avro-python3, apache-beam, object-detection
Attempting uninstall: requests
Found existing installation: requests 2.23.0
Uninstalling requests-2.23.0:
Successfully uninstalled requests-2.23.0
Attempting uninstall: protobuf
Found existing installation: protobuf 3.17.3
Uninstalling protobuf-3.17.3:
Successfully uninstalled protobuf-3.17.3
Attempting uninstall: numpy
Found existing installation: numpy 1.19.5
Uninstalling numpy-1.19.5:
Successfully uninstalled numpy-1.19.5
Attempting uninstall: tensorflow-io-gcs-filesystem
Found existing installation: tensorflow-io-gcs-filesystem 0.23.1
Uninstalling tensorflow-io-gcs-filesystem-0.23.1:
Successfully uninstalled tensorflow-io-gcs-filesystem-0.23.1
Attempting uninstall: tensorboard
Found existing installation: tensorboard 2.7.0
Uninstalling tensorboard-2.7.0:
Successfully uninstalled tensorboard-2.7.0
Attempting uninstall: keras
Found existing installation: keras 2.7.0
Uninstalling keras-2.7.0:
Successfully uninstalled keras-2.7.0
Attempting uninstall: tensorflow
Found existing installation: tensorflow 2.7.0
Uninstalling tensorflow-2.7.0:
Successfully uninstalled tensorflow-2.7.0
Attempting uninstall: dill
Found existing installation: dill 0.3.4
Uninstalling dill-0.3.4:
Successfully uninstalled dill-0.3.4
Attempting uninstall: pyyaml
Found existing installation: PyYAML 3.13
Uninstalling PyYAML-3.13:
Successfully uninstalled PyYAML-3.13
Attempting uninstall: pymongo
Found existing installation: pymongo 4.0.1
Uninstalling pymongo-4.0.1:
Successfully uninstalled pymongo-4.0.1
ERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts.
yellowbrick 1.3.post1 requires numpy<1.20,>=1.16.0, but you have numpy 1.20.3 which is incompatible.
multiprocess 0.70.12.2 requires dill>=0.3.4, but you have dill 0.3.1.1 which is incompatible.
google-colab 1.0.0 requires requests~=2.23.0, but you have requests 2.27.1 which is incompatible.
datascience 0.10.6 requires folium==0.2.1, but you have folium 0.8.3 which is incompatible.
albumentations 0.1.12 requires imgaug<0.2.7,>=0.2.5, but you have imgaug 0.2.9 which is incompatible.
Successfully installed apache-beam-2.35.0 avro-python3-1.10.2 colorama-0.4.4 dill-0.3.1.1 fastavro-1.4.9 hdfs-2.6.0 keras-2.8.0 lvis-0.5.3 numpy-1.20.3 object-detection-0.1 opencv-python-headless-4.5.5.62 orjson-3.6.6 portalocker-2.3.2 proto-plus-1.19.9 protobuf-3.19.4 py-cpuinfo-8.0.0 pymongo-3.12.3 pyyaml-5.4.1 requests-2.27.1 sacrebleu-2.0.0 sentencepiece-0.1.96 seqeval-1.2.2 tensorboard-2.8.0 tensorflow-2.8.0 tensorflow-addons-0.15.0 tensorflow-io-0.24.0 tensorflow-io-gcs-filesystem-0.24.0 tensorflow-model-optimization-0.7.0 tensorflow-text-2.8.1 tf-estimator-nightly-2.8.0.dev2021122109 tf-models-official-2.8.0 tf-slim-1.1.0
I am noticing that this command uninstalling tensorflow 2.7 and installing tensorflow 2.8. I am not sure it was happening before. Maybe it's the reason DNN library link is missing o something?
I can confirm these:
- Nothing was changed inside pretrained model or already installed model or object_detection source files I downloaded a year ago.
- I tried to run command !pip install dnn - not working
- I tried to restart runtime (without disconnecting) - not working
Somebody can help? Thanks.
ANSWER
Answered 2022-Feb-07 at 09:19It happened the same to me last friday. I think it has something to do with Cuda instalation in Google Colab but I don't know exactly the reason
QUESTION
I have tried the similar problems' solutions on here but none seem to work. It seems that I get a memory error when installing tensorflow from requirements.txt. Does anyone know of a workaround? I believe that installing with --no-cache-dir would fix it but I can't figure out how to get EB to do that. Thank you.
Logs:
----------------------------------------
Collecting tensorflow==2.8.0
Downloading tensorflow-2.8.0-cp38-cp38-manylinux2010_x86_64.whl (497.6 MB)
2022/02/05 22:08:17.264961 [ERROR] An error occurred during execution of command [app-deploy] - [InstallDependency]. Stop running the command. Error: fail to install dependencies with requirements.txt file with error Command /bin/sh -c /var/app/venv/staging-LQM1lest/bin/pip install -r requirements.txt failed with error exit status 2. Stderr:ERROR: Exception:
Traceback (most recent call last):
File "/var/app/venv/staging-LQM1lest/lib/python3.8/site-packages/pip/_internal/cli/base_command.py", line 164, in exc_logging_wrapper
status = run_func(*args)
File "/var/app/venv/staging-LQM1lest/lib/python3.8/site-packages/pip/_internal/cli/req_command.py", line 205, in wrapper
return func(self, options, args)
File "/var/app/venv/staging-LQM1lest/lib/python3.8/site-packages/pip/_internal/commands/install.py", line 338, in run
requirement_set = resolver.resolve(
File "/var/app/venv/staging-LQM1lest/lib/python3.8/site-packages/pip/_internal/resolution/resolvelib/resolver.py", line 92, in resolve
result = self._result = resolver.resolve(
File "/var/app/venv/staging-LQM1lest/lib/python3.8/site-packages/pip/_vendor/resolvelib/resolvers.py", line 482, in resolve
state = resolution.resolve(requirements, max_rounds=max_rounds)
File "/var/app/venv/staging-LQM1lest/lib/python3.8/site-packages/pip/_vendor/resolvelib/resolvers.py", line 349, in resolve
self._add_to_criteria(self.state.criteria, r, parent=None)
File "/var/app/venv/staging-LQM1lest/lib/python3.8/site-packages/pip/_vendor/resolvelib/resolvers.py", line 173, in _add_to_criteria
if not criterion.candidates:
File "/var/app/venv/staging-LQM1lest/lib/python3.8/site-packages/pip/_vendor/resolvelib/structs.py", line 151, in __bool__
return bool(self._sequence)
File "/var/app/venv/staging-LQM1lest/lib/python3.8/site-packages/pip/_internal/resolution/resolvelib/found_candidates.py", line 155, in __bool__
return any(self)
File "/var/app/venv/staging-LQM1lest/lib/python3.8/site-packages/pip/_internal/resolution/resolvelib/found_candidates.py", line 143, in
return (c for c in iterator if id(c) not in self._incompatible_ids)
File "/var/app/venv/staging-LQM1lest/lib/python3.8/site-packages/pip/_internal/resolution/resolvelib/found_candidates.py", line 47, in _iter_built
candidate = func()
File "/var/app/venv/staging-LQM1lest/lib/python3.8/site-packages/pip/_internal/resolution/resolvelib/factory.py", line 201, in _make_candidate_from_link
self._link_candidate_cache[link] = LinkCandidate(
File "/var/app/venv/staging-LQM1lest/lib/python3.8/site-packages/pip/_internal/resolution/resolvelib/candidates.py", line 281, in __init__
super().__init__(
File "/var/app/venv/staging-LQM1lest/lib/python3.8/site-packages/pip/_internal/resolution/resolvelib/candidates.py", line 156, in __init__
self.dist = self._prepare()
File "/var/app/venv/staging-LQM1lest/lib/python3.8/site-packages/pip/_internal/resolution/resolvelib/candidates.py", line 225, in _prepare
dist = self._prepare_distribution()
File "/var/app/venv/staging-LQM1lest/lib/python3.8/site-packages/pip/_internal/resolution/resolvelib/candidates.py", line 292, in _prepare_distribution
return preparer.prepare_linked_requirement(self._ireq, parallel_builds=True)
File "/var/app/venv/staging-LQM1lest/lib/python3.8/site-packages/pip/_internal/operations/prepare.py", line 482, in prepare_linked_requirement
return self._prepare_linked_requirement(req, parallel_builds)
File "/var/app/venv/staging-LQM1lest/lib/python3.8/site-packages/pip/_internal/operations/prepare.py", line 527, in _prepare_linked_requirement
local_file = unpack_url(
File "/var/app/venv/staging-LQM1lest/lib/python3.8/site-packages/pip/_internal/operations/prepare.py", line 213, in unpack_url
file = get_http_url(
File "/var/app/venv/staging-LQM1lest/lib/python3.8/site-packages/pip/_internal/operations/prepare.py", line 94, in get_http_url
from_path, content_type = download(link, temp_dir.path)
File "/var/app/venv/staging-LQM1lest/lib/python3.8/site-packages/pip/_internal/network/download.py", line 145, in __call__
for chunk in chunks:
File "/var/app/venv/staging-LQM1lest/lib/python3.8/site-packages/pip/_internal/cli/progress_bars.py", line 144, in iter
for x in it:
File "/var/app/venv/staging-LQM1lest/lib/python3.8/site-packages/pip/_internal/network/utils.py", line 63, in response_chunks
for chunk in response.raw.stream(
File "/var/app/venv/staging-LQM1lest/lib/python3.8/site-packages/pip/_vendor/urllib3/response.py", line 576, in stream
data = self.read(amt=amt, decode_content=decode_content)
File "/var/app/venv/staging-LQM1lest/lib/python3.8/site-packages/pip/_vendor/urllib3/response.py", line 519, in read
data = self._fp.read(amt) if not fp_closed else b""
File "/var/app/venv/staging-LQM1lest/lib/python3.8/site-packages/pip/_vendor/cachecontrol/filewrapper.py", line 65, in read
self._close()
File "/var/app/venv/staging-LQM1lest/lib/python3.8/site-packages/pip/_vendor/cachecontrol/filewrapper.py", line 52, in _close
self.__callback(self.__buf.getvalue())
File "/var/app/venv/staging-LQM1lest/lib/python3.8/site-packages/pip/_vendor/cachecontrol/controller.py", line 309, in cache_response
cache_url, self.serializer.dumps(request, response, body=body)
File "/var/app/venv/staging-LQM1lest/lib/python3.8/site-packages/pip/_vendor/cachecontrol/serialize.py", line 72, in dumps
return b",".join([b"cc=4", msgpack.dumps(data, use_bin_type=True)])
File "/var/app/venv/staging-LQM1lest/lib/python3.8/site-packages/pip/_vendor/msgpack/__init__.py", line 35, in packb
return Packer(**kwargs).pack(o)
File "/var/app/venv/staging-LQM1lest/lib/python3.8/site-packages/pip/_vendor/msgpack/fallback.py", line 960, in pack
self._pack(obj)
File "/var/app/venv/staging-LQM1lest/lib/python3.8/site-packages/pip/_vendor/msgpack/fallback.py", line 943, in _pack
return self._pack_map_pairs(
File "/var/app/venv/staging-LQM1lest/lib/python3.8/site-packages/pip/_vendor/msgpack/fallback.py", line 1045, in _pack_map_pairs
self._pack(v, nest_limit - 1)
File "/var/app/venv/staging-LQM1lest/lib/python3.8/site-packages/pip/_vendor/msgpack/fallback.py", line 943, in _pack
return self._pack_map_pairs(
File "/var/app/venv/staging-LQM1lest/lib/python3.8/site-packages/pip/_vendor/msgpack/fallback.py", line 1045, in _pack_map_pairs
self._pack(v, nest_limit - 1)
File "/var/app/venv/staging-LQM1lest/lib/python3.8/site-packages/pip/_vendor/msgpack/fallback.py", line 889, in _pack
return self._buffer.write(obj)
MemoryError
2022/02/05 22:08:17.264976 [INFO] Executing cleanup logic
2022/02/05 22:08:17.265065 [INFO] CommandService Response: {"status":"FAILURE","api_version":"1.0","results":[{"status":"FAILURE","msg":"Engine execution has encountered an error.","returncode":1,"events":[{"msg":"Instance deployment failed to install application dependencies. The deployment failed.","timestamp":1644098897,"severity":"ERROR"},{"msg":"Instance deployment failed. For details, see 'eb-engine.log'.","timestamp":1644098897,"severity":"ERROR"}]}]}
Requirements.txt:
absl-py==1.0.0
asgiref==3.5.0
astunparse==1.6.3
awsebcli==3.20.3
backports.zoneinfo==0.2.1
botocore==1.23.49
cachetools==5.0.0
cement==2.8.2
certifi==2021.10.8
charset-normalizer==2.0.11
colorama==0.4.3
cycler==0.11.0
Django==4.0.2
django-crispy-forms==1.14.0
django-environ==0.8.1
flatbuffers==2.0
fonttools==4.29.1
future==0.16.0
gast==0.5.3
google-auth==2.6.0
google-auth-oauthlib==0.4.6
google-pasta==0.2.0
grpcio==1.43.0
h5py==3.6.0
idna==3.3
importlib-metadata==4.10.1
imutils==0.5.4
jmespath==0.10.0
keras==2.8.0
Keras-Preprocessing==1.1.2
kiwisolver==1.3.2
libclang==13.0.0
Markdown==3.3.6
matplotlib==3.5.1
numpy==1.22.2
oauthlib==3.2.0
opencv-python==4.5.5.62
opt-einsum==3.3.0
packaging==21.3
pathspec==0.9.0
Pillow==9.0.1
protobuf==3.19.4
psycopg2-binary==2.9.3
pyasn1==0.4.8
pyasn1-modules==0.2.8
pyparsing==3.0.7
python-dateutil==2.8.2
PyYAML==5.4.1
requests==2.26.0
requests-oauthlib==1.3.1
rsa==4.8
semantic-version==2.8.5
six==1.14.0
sqlparse==0.4.2
tensorboard==2.8.0
tensorboard-data-server==0.6.1
tensorboard-plugin-wit==1.8.1
tensorflow==2.8.0
tensorflow-io-gcs-filesystem==0.24.0
termcolor==1.1.0
tf-estimator-nightly==2.8.0.dev2021122109
typing_extensions==4.0.1
tzdata==2021.5
urllib3==1.26.8
wcwidth==0.1.9
Werkzeug==2.0.2
wrapt==1.13.3
zipp==3.7.0
ANSWER
Answered 2022-Feb-05 at 22:37The error says MemoryError
. You must upgrade your ec2 instance to something with more memory. tensorflow
is very memory hungry application.
QUESTION
Error while installing manimce, I have been trying to install manimce library on windows subsystem for linux and after running
pip install manimce
Collecting manimce
Downloading manimce-0.1.1.post2-py3-none-any.whl (249 kB)
|████████████████████████████████| 249 kB 257 kB/s
Collecting Pillow
Using cached Pillow-8.4.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (3.1 MB)
Collecting scipy
Using cached scipy-1.7.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (39.3 MB)
Collecting colour
Using cached colour-0.1.5-py2.py3-none-any.whl (23 kB)
Collecting pangocairocffi<0.5.0,>=0.4.0
Downloading pangocairocffi-0.4.0.tar.gz (17 kB)
Preparing metadata (setup.py) ... done
Collecting numpy
Using cached numpy-1.21.5-cp38-cp38-manylinux_2_12_x86_64.manylinux2010_x86_64.whl (15.7 MB)
Collecting pydub
Using cached pydub-0.25.1-py2.py3-none-any.whl (32 kB)
Collecting pygments
Using cached Pygments-2.10.0-py3-none-any.whl (1.0 MB)
Collecting cairocffi<2.0.0,>=1.1.0
Downloading cairocffi-1.3.0.tar.gz (88 kB)
|████████████████████████████████| 88 kB 160 kB/s
Preparing metadata (setup.py) ... done
Collecting tqdm
Using cached tqdm-4.62.3-py2.py3-none-any.whl (76 kB)
Collecting pangocffi<0.9.0,>=0.8.0
Downloading pangocffi-0.8.0.tar.gz (33 kB)
Preparing metadata (setup.py) ... done
Collecting pycairo<2.0,>=1.19
Using cached pycairo-1.20.1.tar.gz (344 kB)
Installing build dependencies ... done
Getting requirements to build wheel ... done
Preparing metadata (pyproject.toml) ... done
Collecting progressbar
Downloading progressbar-2.5.tar.gz (10 kB)
Preparing metadata (setup.py) ... done
Collecting rich<7.0,>=6.0
Using cached rich-6.2.0-py3-none-any.whl (150 kB)
Collecting cffi>=1.1.0
Using cached cffi-1.15.0-cp38-cp38-manylinux_2_12_x86_64.manylinux2010_x86_64.whl (446 kB)
Collecting commonmark<0.10.0,>=0.9.0
Using cached commonmark-0.9.1-py2.py3-none-any.whl (51 kB)
Collecting typing-extensions<4.0.0,>=3.7.4
Using cached typing_extensions-3.10.0.2-py3-none-any.whl (26 kB)
Collecting colorama<0.5.0,>=0.4.0
Using cached colorama-0.4.4-py2.py3-none-any.whl (16 kB)
Collecting pycparser
Using cached pycparser-2.21-py2.py3-none-any.whl (118 kB)
Building wheels for collected packages: cairocffi, pangocairocffi, pangocffi, pycairo, progressbar
Building wheel for cairocffi (setup.py) ... done
Created wheel for cairocffi: filename=cairocffi-1.3.0-py3-none-any.whl size=89650 sha256=afc73218cc9fa1d844d7165f598e2be0428598166b4c3ed9de5bbdc94a0a6977
Stored in directory: /home/yusifer_zendric/.cache/pip/wheels/f3/97/83/8022b9237866102e18d1b7ac0a269769e6fccba0f63dceb9b7
Building wheel for pangocairocffi (setup.py) ... done
Created wheel for pangocairocffi: filename=pangocairocffi-0.4.0-py3-none-any.whl size=19283 sha256=54399796259c6e24f9ab56c5747ab273dcf97fb6fed3e7b54935f9ac49351d50
Stored in directory: /home/yusifer_zendric/.cache/pip/wheels/60/58/92/507a12a5044f7fcda6f4dfd8e0a607cc1fe957bc0dea885906
Building wheel for pangocffi (setup.py) ... done
Created wheel for pangocffi: filename=pangocffi-0.8.0-py3-none-any.whl size=37899 sha256=bea348af93696816b046dd901aa60d29a464460c5faac67628eb7e1ea7d1807d
Stored in directory: /home/yusifer_zendric/.cache/pip/wheels/c4/df/6d/e9d0f79b1545f6e902cc22773b1429de7a5efc240b891ee009
Building wheel for pycairo (pyproject.toml) ... error
ERROR: Command errored out with exit status 1:
command: /home/yusifer_zendric/manim_ce/venv/bin/python /home/yusifer_zendric/manim_ce/venv/lib/python3.8/site-packages/pip/_vendor/pep517/in_process/_in_process.py build_wheel /tmp/tmpuguwzu3u
cwd: /tmp/pip-install-l4hqdegr/pycairo_f4d80b8f3e4840a3802342825adcdff5
Complete output (12 lines):
running bdist_wheel
running build
running build_py
creating build
creating build/lib.linux-x86_64-3.8
creating build/lib.linux-x86_64-3.8/cairo
copying cairo/__init__.py -> build/lib.linux-x86_64-3.8/cairo
copying cairo/__init__.pyi -> build/lib.linux-x86_64-3.8/cairo
copying cairo/py.typed -> build/lib.linux-x86_64-3.8/cairo
running build_ext
'pkg-config' not found.
Command ['pkg-config', '--print-errors', '--exists', 'cairo >= 1.15.10']
----------------------------------------
ERROR: Failed building wheel for pycairo
Building wheel for progressbar (setup.py) ... done
Created wheel for progressbar: filename=progressbar-2.5-py3-none-any.whl size=12074 sha256=7290ef8de5dd955bf756b90130f400dd19c2cc9ea050a5a1dce2803440f581e2
Stored in directory: /home/yusifer_zendric/.cache/pip/wheels/2c/67/ed/d84123843c937d7e7f5ba88a270d11036473144143355e2747
Successfully built cairocffi pangocairocffi pangocffi progressbar
Failed to build pycairo
ERROR: Could not build wheels for pycairo, which is required to install pyproject.toml-based projects
(venv) yusifer_zendric@Laptop-Yusifer:~/manim_ce$
(venv) yusifer_zendric@Laptop-Yusifer:~/manim_ce$ pip install manim_ce
ERROR: Could not find a version that satisfies the requirement manim_ce (from versions: none)
ERROR: No matching distribution found for manim_ce
(venv) yusifer_zendric@Laptop-Yusifer:~/manim_ce$ manim example_scenes/basic.py -pql
Command 'manim' not found, did you mean:
command 'maim' from deb maim (5.5.3-1build1)
Try: sudo apt install
(venv) yusifer_zendric@Laptop-Yusifer:~/manim_ce$ sudo apt-get install manim
[sudo] password for yusifer_zendric:
Reading package lists... Done
Building dependency tree
Reading state information... Done
E: Unable to locate package manim
(venv) yusifer_zendric@Laptop-Yusifer:~/manim_ce$ pip3 install manimlib
Collecting manimlib
Downloading manimlib-0.2.0.tar.gz (4.8 MB)
|████████████████████████████████| 4.8 MB 498 kB/s
Preparing metadata (setup.py) ... done
Collecting Pillow
Using cached Pillow-8.4.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (3.1 MB)
Collecting argparse
Downloading argparse-1.4.0-py2.py3-none-any.whl (23 kB)
Collecting colour
Using cached colour-0.1.5-py2.py3-none-any.whl (23 kB)
Collecting numpy
Using cached numpy-1.21.5-cp38-cp38-manylinux_2_12_x86_64.manylinux2010_x86_64.whl (15.7 MB)
Collecting opencv-python
Downloading opencv_python-4.5.4.60-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (60.3 MB)
|████████████████████████████████| 60.3 MB 520 kB/s
Collecting progressbar
Using cached progressbar-2.5-py3-none-any.whl
Collecting pycairo
Using cached pycairo-1.20.1.tar.gz (344 kB)
Installing build dependencies ... done
Getting requirements to build wheel ... done
Preparing metadata (pyproject.toml) ... done
Collecting pydub
Using cached pydub-0.25.1-py2.py3-none-any.whl (32 kB)
Collecting pygments
Using cached Pygments-2.10.0-py3-none-any.whl (1.0 MB)
Collecting scipy
Using cached scipy-1.7.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (39.3 MB)
Collecting tqdm
Using cached tqdm-4.62.3-py2.py3-none-any.whl (76 kB)
Building wheels for collected packages: manimlib, pycairo
Building wheel for manimlib (setup.py) ... done
Created wheel for manimlib: filename=manimlib-0.2.0-py3-none-any.whl size=212737 sha256=27efe2c226d80cfe5663928e980d3e5f5a164d8e9d0aacea5014d37ffdedb76a
Stored in directory: /home/yusifer_zendric/.cache/pip/wheels/87/36/c1/2db5ed5de9908034108f3c39538cd3367445d9cec01e7c8c23
Building wheel for pycairo (pyproject.toml) ... error
ERROR: Command errored out with exit status 1:
command: /home/yusifer_zendric/manim_ce/venv/bin/python /home/yusifer_zendric/manim_ce/venv/lib/python3.8/site-packages/pip/_vendor/pep517/in_process/_in_process.py build_wheel /tmp/tmp5o2970su
cwd: /tmp/pip-install-sxxp3lw2/pycairo_d372a62d0c6b4c4484391402d21485e1
Complete output (12 lines):
running bdist_wheel
running build
running build_py
creating build
creating build/lib.linux-x86_64-3.8
creating build/lib.linux-x86_64-3.8/cairo
copying cairo/__init__.py -> build/lib.linux-x86_64-3.8/cairo
copying cairo/__init__.pyi -> build/lib.linux-x86_64-3.8/cairo
copying cairo/py.typed -> build/lib.linux-x86_64-3.8/cairo
running build_ext
'pkg-config' not found.
Command ['pkg-config', '--print-errors', '--exists', 'cairo >= 1.15.10']
----------------------------------------
ERROR: Failed building wheel for pycairo
Successfully built manimlib
Failed to build pycairo
ERROR: Could not build wheels for pycairo, which is required to install pyproject.toml-based projects
all the libraries are installed accept the pycairo library. It's just showing this to install pyproject.toml error. Infact I have already done pip install pyproject.toml and it is installed then also it's showing the same error.
ANSWER
Answered 2022-Jan-28 at 02:24apt-get install sox ffmpeg libcairo2 libcairo2-dev
apt-get install texlive-full
pip3 install manimlib # or pip install manimlib
Then:
pip3 install manimce # or pip install manimce
And everything works.
QUESTION
I am trying to write an object detection + text-to-speech code to detect objects and produce a voice output on the raspberry pi 4. However, as of right now, I am trying to write a simple python script that incorporates both elements into a single .py file and preferably as a function. I will then run this script on the raspberry pi. I want to give credit to Murtaza's Workshop "Object Detection OpenCV Python | Easy and Fast (2020)" and https://pypi.org/project/pyttsx3/ for the Text to speech documentation for pyttsx3. I have attached the code below. I have tried running the program and I always keep getting errors with the Text to speech code (commented lines 33-36 for reference). I believe it is some looping error but I just can't seem to get the program to run continuously. For instance, if I run the code without the TTS part, it works fine. Otherwise, it runs for perhaps 3-5 seconds and suddenly stops. I am a beginner but highly passionate in computer vision, and any help is appreciated!
import cv2
#import pyttsx3
cap = cv2.VideoCapture(0)
cap.set(3, 640)
cap.set(4, 480)
classNames = []
classFile = 'coco.names'
with open(classFile,'rt') as f:
classNames = [line.rstrip() for line in f]
configPath = 'ssd_mobilenet_v3_large_coco_2020_01_14.pbtxt'
weightsPath = 'frozen_inference_graph.pb'
net = cv2.dnn_DetectionModel(weightsPath, configPath)
net.setInputSize(320, 320)
net.setInputScale(1.0 / 127.5)
net.setInputMean((127.5, 127.5, 127.5))
net.setInputSwapRB(True)
while True:
success, img = cap.read()
classIds, confs, bbox = net.detect(img, confThreshold=0.45)
if len(classIds) != 0:
for classId, confidence, box in zip(classIds.flatten(), confs.flatten(), bbox):
className = classNames[classId-1]
#engine = pyttsx3.init()
#str1 = str(className)
#engine.say(str1 + "detected")
#engine.runAndWait()
cv2.rectangle(img, box, color=(0, 255, 0), thickness=2)
cv2.putText(img, classNames[classId-1].upper(), (box[0]+10, box[1]+30),
cv2.FONT_HERSHEY_COMPLEX, 1, (0, 255, 0), 2)
cv2.putText(img, str(round(confidence * 100, 2)), (box[0]+200, box[1]+30),
cv2.FONT_HERSHEY_COMPLEX, 1, (0, 255, 0), 2)
cv2.imshow('Output', img)
cv2.waitKey(1)
Here is a screenshot of my code 1
Here is a link to the download files needed to run code as well in case
Here is the error: /Users/venuchannarayappa/PycharmProjects/ObjectDetector/venv/bin/python /Users/venuchannarayappa/PycharmProjects/ObjectDetector/main.py
Traceback (most recent call last): File "/Users/venuchannarayappa/PycharmProjects/ObjectDetector/main.py", line 24, in
classIds, confs, bbox = net.detect(img, confThreshold=0.45)
cv2.error: OpenCV(4.5.4) /Users/runner/work/opencv-python/opencv-python/opencv/modules/imgproc/src/resize.cpp:4051: error: (-215:Assertion failed) !ssize.empty() in function 'resize'
Process finished with exit code 1
Link to video output recorded through iphone: https://www.icloud.com/iclouddrive/03jGfqy7-A9DKfekcu3wjk0rA#IMG_4932
Sorry for such a long post! I was debugging my code for the past few hours and I think I got it to work. I changed the main while loop only and rest of code is the same. The program seems to run continuously for me. I would appreciate any comments if there are any difficulties in running it.
engine = pyttsx3.init()
while True:
success, img = cap.read()
#print(success)
#print(img)
#print(img.shape)
classIds, confs, bbox = net.detect(img, confThreshold=0.45)
if len(classIds) != 0:
for classId, confidence, box in zip(classIds.flatten(), confs.flatten(), bbox):
className = classNames[classId - 1]
#print(len(classIds))
str1 = str(className)
#print(str1)
engine.say(str1 + "detected")
engine.runAndWait()
cv2.rectangle(img, box, color=(0, 255, 0), thickness=2)
cv2.putText(img, classNames[classId-1].upper(), (box[0]+10, box[1]+30),
cv2.FONT_HERSHEY_COMPLEX, 1, (0, 255, 0), 2)
cv2.putText(img, str(round(confidence * 100, 2)), (box[0]+200, box[1]+30),
cv2.FONT_HERSHEY_COMPLEX, 1, (0, 255, 0), 2)
continue
cv2.imshow('Output', img)
cv2.waitKey(1)
I am planning to run this code on the raspberry pi. I am planning on installing opencv using this command: pip3 install opencv-python. However, I am not sure how to install pyttsx3 since I think I need to install from source. Please let me know if there is a simple method to install pyttsx3.
Update: As of December 27th, I have installed all necessary packages and my code is now functional.
ANSWER
Answered 2021-Dec-28 at 16:46I installed pyttsx3 using the two commands in the terminal on the Raspberry Pi:
- sudo apt update && sudo apt install espeak ffmpeg libespeak1
- pip install pyttsx3
I followed the video youtube.com/watch?v=AWhDDl-7Iis&ab_channel=AiPhile to install pyttsx3. My functional code should also be listed above. My question should be resolved but hopefully useful to anyone looking to write a similar program. I have made minor tweaks to my code.
QUESTION
I am trying to run the training of stylegan2-pytorch on a remote system. The remote system has gcc (9.3.0) installed on it. I'm using conda env that has the following installed (cudatoolkit=10.2, torch=1.5.0+, and ninja=1.8.2, gcc_linux-64=7.5.0). I encounter the following error:
RuntimeError: Error building extension 'fused': [1/2]
/home/envs/segmentation_base/bin/nvcc -DTORCH_EXTENSION_NAME=fused -DTORCH_API_INCLUDE_EXTENSION_H -isystem /home/envs/segmentation_base/lib/python3.6/site-packages/torch/include -isystem /home/envs/segmentation_base/lib/python3.6/site-packages/torch/include/torch/csrc/api/include -isystem /home/envs/segmentation_base/lib/python3.6/site-packages/torch/include/TH -isystem /home/envs/segmentation_base/lib/python3.6/site-packages/torch/include/THC -isystem /home/envs/segmentation_base/include -isystem /home/envs/segmentation_base/include/python3.6m -D_GLIBCXX_USE_CXX11_ABI=0 -D__CUDA_NO_HALF_OPERATORS__ -D__CUDA_NO_HALF_CONVERSIONS__ -D__CUDA_NO_HALF2_OPERATORS__ --expt-relaxed-constexpr -gencode=arch=compute_70,code=sm_70 --compiler-options '-fPIC' -std=c++14 -c /home/code/semanticGAN_code/models/op/fused_bias_act_kernel.cu -o fused_bias_act_kernel.cuda.o
FAILED: fused_bias_act_kernel.cuda.o
/home/envs/segmentation_base/bin/nvcc -DTORCH_EXTENSION_NAME=fused -DTORCH_API_INCLUDE_EXTENSION_H -isystem /home/envs/segmentation_base/lib/python3.6/site-packages/torch/include -isystem /home/envs/segmentation_base/lib/python3.6/site-packages/torch/include/torch/csrc/api/include -isystem /home/envs/segmentation_base/lib/python3.6/site-packages/torch/include/TH -isystem /home/envs/segmentation_base/lib/python3.6/site-packages/torch/include/THC -isystem /home/envs/segmentation_base/include -isystem /home/envs/segmentation_base/include/python3.6m -D_GLIBCXX_USE_CXX11_ABI=0 -D__CUDA_NO_HALF_OPERATORS__ -D__CUDA_NO_HALF_CONVERSIONS__ -D__CUDA_NO_HALF2_OPERATORS__ --expt-relaxed-constexpr -gencode=arch=compute_70,code=sm_70 --compiler-options '-fPIC' -std=c++14 -c /home/code/semanticGAN_code/models/op/fused_bias_act_kernel.cu -o fused_bias_act_kernel.cuda.o
In file included from /home/envs/segmentation_base/include/cuda_runtime.h:83,
from :
/home/envs/segmentation_base/include/crt/host_config.h:138:2: error: #error -- unsupported GNU version! gcc versions later than 8 are not supported!
138 | #error -- unsupported GNU version! gcc versions later than 8 are not supported!
| ^~~~~
ninja: build stopped: subcommand failed.
I would like to use the gcc of my conda env (gcc_linux-64=7.5.0) to build cuda. When I run gcc --version
in my conda env, I get the system's gcc:
gcc (Ubuntu 9.3.0-17ubuntu1~20.04) 9.3.0
which gcc
when my conda env is active returns:
usr/bin/gcc
I'd expect it to return gcc version 7.5.0 (the one installed in the environment). I understand that conda has different names for gcc, but the environment variables should point to the installed gcc.
Running echo $CC
returns
/home/envs/segmentation_base/bin/x86_64-conda-linux-gnu-cc
.
Following suggested solution here, I get the following upon activating my environment, but the same issue stand:
INFO: activate-binutils_linux-64.sh made the following environmental changes:
+ADDR2LINE=/home/envs/segmentation_base/bin/x86_64-conda-linux-gnu-addr2line
+AR=/home/envs/segmentation_base/bin/x86_64-conda-linux-gnu-ar
+AS=/home/envs/segmentation_base/bin/x86_64-conda-linux-gnu-as
+CXXFILT=/home/envs/segmentation_base/bin/x86_64-conda-linux-gnu-c++filt
+ELFEDIT=/home/envs/segmentation_base/bin/x86_64-conda-linux-gnu-elfedit
+GPROF=/home/envs/segmentation_base/bin/x86_64-conda-linux-gnu-gprof
+LD_GOLD=/home/envs/segmentation_base/bin/x86_64-conda-linux-gnu-ld.gold
+LD=/home/envs/segmentation_base/bin/x86_64-conda-linux-gnu-ld
+NM=/home/envs/segmentation_base/bin/x86_64-conda-linux-gnu-nm
+OBJCOPY=/home/envs/segmentation_base/bin/x86_64-conda-linux-gnu-objcopy
+OBJDUMP=/home/envs/segmentation_base/bin/x86_64-conda-linux-gnu-objdump
+RANLIB=/home/envs/segmentation_base/bin/x86_64-conda-linux-gnu-ranlib
+READELF=/home/envs/segmentation_base/bin/x86_64-conda-linux-gnu-readelf
+SIZE=/home/envs/segmentation_base/bin/x86_64-conda-linux-gnu-size
+STRINGS=/home/envs/segmentation_base/bin/x86_64-conda-linux-gnu-strings
+STRIP=/home/envs/segmentation_base/bin/x86_64-conda-linux-gnu-strip
INFO: activate-gcc_linux-64.sh made the following environmental changes:
+build_alias=x86_64-conda-linux-gnu
+BUILD=x86_64-conda-linux-gnu
+CC_FOR_BUILD=/home/envs/segmentation_base/bin/x86_64-conda-linux-gnu-cc
+CC=/home/envs/segmentation_base/bin/x86_64-conda-linux-gnu-cc
+CFLAGS=-march=nocona -mtune=haswell -ftree-vectorize -fPIC -fstack-protector-strong -fno-plt -O2 -ffunction-sections -pipe -isystem /include -fdebug-prefix-map==/usr/local/src/conda/- -fdebug-prefix-map==/usr/local/src/conda-prefix
+CMAKE_ARGS=-DCMAKE_LINKER=/home/envs/segmentation_base/bin/x86_64-conda-linux-gnu-ld -DCMAKE_STRIP=/home/envs/segmentation_base/bin/x86_64-conda-linux-gnu-strip -DCMAKE_FIND_ROOT_PATH_MODE_PROGRAM=NEVER -DCMAKE_FIND_ROOT_PATH_MODE_LIBRARY=ONLY -DCMAKE_FIND_ROOT_PATH_MODE_INCLUDE=ONLY -DCMAKE_FIND_ROOT_PATH=;/x86_64-conda-linux-gnu/sysroot -DCMAKE_INSTALL_PREFIX= -DCMAKE_INSTALL_LIBDIR=lib
+CMAKE_PREFIX_PATH=:/home/envs/segmentation_base/x86_64-conda-linux-gnu/sysroot/usr
+CONDA_BUILD_SYSROOT=/home/envs/segmentation_base/x86_64-conda-linux-gnu/sysroot
+_CONDA_PYTHON_SYSCONFIGDATA_NAME=_sysconfigdata_x86_64_conda_linux_gnu
+CPPFLAGS=-DNDEBUG -D_FORTIFY_SOURCE=2 -O2 -isystem /include
+CPP=/home/envs/segmentation_base/bin/x86_64-conda-linux-gnu-cpp
+DEBUG_CFLAGS=-march=nocona -mtune=haswell -ftree-vectorize -fPIC -fstack-protector-all -fno-plt -Og -g -Wall -Wextra -fvar-tracking-assignments -ffunction-sections -pipe -isystem /include -fdebug-prefix-map==/usr/local/src/conda/- -fdebug-prefix-map==/usr/local/src/conda-prefix
+DEBUG_CPPFLAGS=-D_DEBUG -D_FORTIFY_SOURCE=2 -Og -isystem /include
+GCC_AR=/home/envs/segmentation_base/bin/x86_64-conda-linux-gnu-gcc-ar
+GCC_NM=/home/envs/segmentation_base/bin/x86_64-conda-linux-gnu-gcc-nm
+GCC=/home/envs/segmentation_base/bin/x86_64-conda-linux-gnu-gcc
+GCC_RANLIB=/home/envs/segmentation_base/bin/x86_64-conda-linux-gnu-gcc-ranlib
+host_alias=x86_64-conda-linux-gnu
+HOST=x86_64-conda-linux-gnu
+LDFLAGS=-Wl,-O2 -Wl,--sort-common -Wl,--as-needed -Wl,-z,relro -Wl,-z,now -Wl,--disable-new-dtags -Wl,--gc-sections -Wl,-rpath,/lib -Wl,-rpath-link,/lib -L/lib
INFO: activate-gxx_linux-64.sh made the following environmental changes:
+CXXFLAGS=-fvisibility-inlines-hidden -std=c++17 -fmessage-length=0 -march=nocona -mtune=haswell -ftree-vectorize -fPIC -fstack-protector-strong -fno-plt -O2 -ffunction-sections -pipe -isystem /include -fdebug-prefix-map==/usr/local/src/conda/- -fdebug-prefix-map==/usr/local/src/conda-prefix
+CXX_FOR_BUILD=/home/envs/segmentation_base/bin/x86_64-conda-linux-gnu-c++
+CXX=/home/envs/segmentation_base/bin/x86_64-conda-linux-gnu-c++
+DEBUG_CXXFLAGS=-fvisibility-inlines-hidden -std=c++17 -fmessage-length=0 -march=nocona -mtune=haswell -ftree-vectorize -fPIC -fstack-protector-all -fno-plt -Og -g -Wall -Wextra -fvar-tracking-assignments -ffunction-sections -pipe -isystem /include -fdebug-prefix-map==/usr/local/src/conda/- -fdebug-prefix-map==/usr/local/src/conda-prefix
+GXX=/home/envs/segmentation_base/bin/x86_64-conda-linux-gnu-g++
How could one set gcc to conda gcc instead of system gcc? I understand that should be done automatically, when activating the environment through bash scripts in activate.d
.
Most of the open issues (regarding unsupported GNU version!) either require sudo permission to adjust gcc version (which I don't have) or aren't accepted in the case of conda environments. I have yet to find a clear solution to this :/
TLDR: How to force conda to use own installed gcc version instead of host system gcc?
Edit 1: Added conda list
output
# Name Version Build Channel
_libgcc_mutex 0.1 main
_openmp_mutex 4.5 1_gnu
_sysroot_linux-64_curr_repodata_hack 3 haa98f57_10
absl-py 1.0.0 pypi_0 pypi
albumentations 0.5.2 pypi_0 pypi
binutils_impl_linux-64 2.35.1 h27ae35d_9
binutils_linux-64 2.35.1 h454624a_30
blas 1.0 mkl
ca-certificates 2021.10.26 h06a4308_2
cachetools 4.2.4 pypi_0 pypi
certifi 2021.5.30 py36h06a4308_0
charset-normalizer 2.0.9 pypi_0 pypi
cudatoolkit 10.2.89 3 hcc
cycler 0.11.0 pypi_0 pypi
decorator 4.4.2 pypi_0 pypi
freetype 2.11.0 h70c0345_0
gcc_impl_linux-64 7.5.0 h7105cf2_17
gcc_linux-64 7.5.0 h8f34230_30
google-auth 2.3.3 pypi_0 pypi
google-auth-oauthlib 0.4.6 pypi_0 pypi
grpcio 1.42.0 pypi_0 pypi
gxx_impl_linux-64 7.5.0 h0a5bf11_17
gxx_linux-64 7.5.0 hffc177d_30
idna 3.3 pypi_0 pypi
imageio 2.8.0 pypi_0 pypi
imageio-ffmpeg 0.4.2 pypi_0 pypi
imgaug 0.4.0 pypi_0 pypi
importlib-metadata 4.8.2 pypi_0 pypi
intel-openmp 2021.4.0 h06a4308_3561
jpeg 9d h7f8727e_0
kernel-headers_linux-64 3.10.0 h57e8cba_10
kiwisolver 1.3.1 pypi_0 pypi
lcms2 2.12 h3be6417_0
ld_impl_linux-64 2.35.1 h7274673_9
libffi 3.3 he6710b0_2
libgcc-devel_linux-64 7.5.0 hbbeae57_17
libgcc-ng 9.3.0 h5101ec6_17
libgomp 9.3.0 h5101ec6_17
libpng 1.6.37 hbc83047_0
libstdcxx-devel_linux-64 7.5.0 hf0c5c8d_17
libstdcxx-ng 9.3.0 hd4cf53a_17
libtiff 4.2.0 h85742a9_0
libwebp-base 1.2.0 h27cfd23_0
lmdb 0.98 pypi_0 pypi
lz4-c 1.9.3 h295c915_1
markdown 3.3.6 pypi_0 pypi
matplotlib 3.3.4 pypi_0 pypi
mkl 2020.2 256
mkl-service 2.3.0 py36he8ac12f_0
mkl_fft 1.3.0 py36h54f3939_0
mkl_random 1.1.1 py36h0573a6f_0
ncurses 6.3 h7f8727e_2
networkx 2.5.1 pypi_0 pypi
ninja 1.8.2 pypi_0 pypi
numpy 1.19.5 pypi_0 pypi
numpy-base 1.19.2 py36hfa32c7d_0
oauthlib 3.1.1 pypi_0 pypi
olefile 0.46 py36_0
opencv-python 4.5.4.60 pypi_0 pypi
opencv-python-headless 4.5.4.60 pypi_0 pypi
openjpeg 2.4.0 h3ad879b_0
openssl 1.1.1l h7f8727e_0
pillow 8.4.0 pypi_0 pypi
pip 21.2.2 py36h06a4308_0
protobuf 3.19.1 pypi_0 pypi
pyasn1 0.4.8 pypi_0 pypi
pyasn1-modules 0.2.8 pypi_0 pypi
pyparsing 3.0.6 pypi_0 pypi
python 3.6.13 h12debd9_1
python-dateutil 2.8.2 pypi_0 pypi
pytorch 1.5.0 py3.6_cuda10.2.89_cudnn7.6.5_0 pytorch
pywavelets 1.1.1 pypi_0 pypi
pyyaml 6.0 pypi_0 pypi
readline 8.1 h27cfd23_0
requests 2.26.0 pypi_0 pypi
requests-oauthlib 1.3.0 pypi_0 pypi
rsa 4.8 pypi_0 pypi
scikit-image 0.17.2 pypi_0 pypi
scipy 1.5.0 pypi_0 pypi
setuptools 58.0.4 py36h06a4308_0
shapely 1.8.0 pypi_0 pypi
six 1.16.0 pyhd3eb1b0_0
sqlite 3.36.0 hc218d9a_0
sysroot_linux-64 2.17 h57e8cba_10
tensorboard 2.7.0 pypi_0 pypi
tensorboard-data-server 0.6.1 pypi_0 pypi
tensorboard-plugin-wit 1.8.0 pypi_0 pypi
tifffile 2020.9.3 pypi_0 pypi
tk 8.6.11 h1ccaba5_0
torchvision 0.6.0 py36_cu102 pytorch
typing-extensions 4.0.1 pypi_0 pypi
urllib3 1.26.7 pypi_0 pypi
werkzeug 2.0.2 pypi_0 pypi
wheel 0.37.0 pyhd3eb1b0_1
xz 5.2.5 h7b6447c_0
zipp 3.6.0 pypi_0 pypi
zlib 1.2.11 h7b6447c_3
zstd 1.4.9 haebb681_0
ANSWER
Answered 2021-Dec-12 at 16:12Just to share, not sure it will help you. However it shows that in standard conditions it is possible to use the conda
gcc
as described in the documentation instead of the system gcc
.
# system gcc
which gcc && gcc --version
# /usr/bin/gcc
# gcc (Ubuntu 9.3.0-17ubuntu1~20.04) 9.3.0
# creating a conda env with gcc
conda create -n gcc gcc
# activate the environment
conda activating gcc
which gcc && gcc --version
# /opt/conda/envs/gcc/bin/gcc
# gcc (GCC) 11.2.0
Here is the list of packages installed on a fresh environment created with only gcc
.
# packages in environment at /opt/conda/envs/gcc:
#
# Name Version Build Channel
_libgcc_mutex 0.1 conda_forge conda-forge
_openmp_mutex 4.5 1_gnu conda-forge
binutils_impl_linux-64 2.36.1 h193b22a_2 conda-forge
gcc 11.2.0 h702ea55_2 conda-forge
gcc_impl_linux-64 11.2.0 h82a94d6_11 conda-forge
kernel-headers_linux-64 2.6.32 he073ed8_15 conda-forge
ld_impl_linux-64 2.36.1 hea4e1c9_2 conda-forge
libgcc-devel_linux-64 11.2.0 h0952999_11 conda-forge
libgcc-ng 11.2.0 h1d223b6_11 conda-forge
libgomp 11.2.0 h1d223b6_11 conda-forge
libsanitizer 11.2.0 he4da1e4_11 conda-forge
libstdcxx-ng 11.2.0 he4da1e4_11 conda-forge
sysroot_linux-64 2.12 he073ed8_15 conda-forge
QUESTION
I'm working on CI for my Python + Django project. I have to use the python:3.9-alpine
image. A weird error is popping in my CI pipelines:
WARNING: Discarding https://files.pythonhosted.org/packages/aa/8a/7c80e7e44fb1b4277e89bd9ca509aefdd4dd1b2c547c6f293afe9f7ffd04/psycopg2-2.9.1.tar.gz#sha256=de5303a6f1d0a7a34b9d40e4d3bef684ccc44a49bbe3eb85e3c0bffb4a131b7c (from https://pypi.org/simple/psycopg2/) (requires-python:>=3.6). Command errored out with exit status 1: python setup.py egg_info Check the logs for full command output.
ERROR: Could not find a version that satisfies the requirement psycopg2==2.9.1 (from versions: 2.0.10, 2.0.11, 2.0.12, 2.0.13, 2.0.14, 2.2.0, 2.2.1, 2.2.2, 2.3.0, 2.3.1, 2.3.2, 2.4, 2.4.1, 2.4.2, 2.4.3, 2.4.4, 2.4.5, 2.4.6, 2.5, 2.5.1, 2.5.2, 2.5.3, 2.5.4, 2.5.5, 2.6, 2.6.1, 2.6.2, 2.7, 2.7.1, 2.7.2, 2.7.3, 2.7.3.1, 2.7.3.2, 2.7.4, 2.7.5, 2.7.6, 2.7.6.1, 2.7.7, 2.8, 2.8.1, 2.8.2, 2.8.3, 2.8.4, 2.8.5, 2.8.6, 2.9, 2.9.1, 2.9.2)
Preparing metadata (setup.py): finished with status 'error'
ERROR: No matching distribution found for psycopg2==2.9.1
I see 2.9.1 in list of avaliable versions
My .gitlab-ci.yml
stages:
- linter
- build_pip
- build
- meta
- code_quality
- deploy
.except-tags:
except:
- tags
build_pip:build_dist:
stage: build_pip
# image: $CI_DEPENDENCY_PROXY_GROUP_IMAGE_PREFIX/python:3.9-alpine
image: python:3.9-alpine
variables:
OPENCV_VERSION: "4.5.3.56"
before_script:
- pip install --upgrade pip setuptools wheel
- apk update apk add -q --update --no-cache
- postgresql-dev musl-dev
...
- pip install -r requirements/production.txt --no-cache
script:
- python setup.py bdist_wheel
- echo PIP_CI_JOB_ID=$CI_JOB_ID > PIP_CI_JOB_ID.env
dependencies: []
artifacts:
expire_in: 1 hour
paths:
- dist/
- version
reports:
dotenv: PIP_CI_JOB_ID.env
extends:
- .except-tags
...
requirements/production.txt
djangorestframework==3.12.4
drf-extra-fields==3.1.1
djangorestframework-camel-case==1.2.0 # https://pypi.org/project/djangorestframework-camel-case/
Pillow==8.3.2
python-dateutil==2.8.2 # datetime formatting
psycopg2==2.9.1
opencv-python==4.5.3.56
drf-yasg==1.20.0
sentry-sdk==1.4.3
gunicorn==20.1.0
requests==2.26.0
yarl==1.7.0
googlemaps==4.5.3
django_redis==5.0.0
celery==5.2.0
channels==3.0.4
channels_redis==3.3.1
Full gitlab ci log: https://pastebin.com/QhMhErF7
What is the reason for this error?
I tried to replace psycopg2
with psycopg2-binary
but the same error occours.
ANSWER
Answered 2021-Dec-05 at 17:35What is the reason of my error?
Did you read my previous answer to a similar question of yours? The last part warns about certain combinations of Alpine + Python and this seems to be happening right now.
I tried to replace psycopg2 with psycopg2-binary but have the same error
The problem here might be a python library that has dependencies on gcc
, which is not shipped on alpine by default.
Try replacing this:
before_script:
- pip install --upgrade pip setuptools wheel
- apk update
- apk add -q --update --no-cache postgresql-dev musl-dev
with:
before_script:
- pip install --upgrade pip setuptools wheel
- apk update
- apk add -q --no-cache postgresql-dev gcc python3-dev musl-dev
Notice that adding gcc
will increase the image size, since this might be a dependency for either psycopg2
or psycopg2-binary
. If the image size grows a lot I see no point in sticking with alpine, you could just avoid more Python headaches by switching to a debian-based image.
QUESTION
I am working on CI/CD for my python/django project in gitlab.
I have an error -- Gitlab CI: Failed building wheel for opencv-python
Full gitlab ci log -- https://pastebin.com/pZdZ6ws2
I have an error on the build_pip
stage: gitlab-ci.yaml
stages:
- linter
- build_pip
- build
- meta
- code_quality
- deploy
.except-tags:
except:
- tags
build_pip:build_dist:
stage: build_pip
# image: $CI_DEPENDENCY_PROXY_GROUP_IMAGE_PREFIX/python:3.9-alpine
image: python:3.9-alpine
before_script:
- apk update && apk add postgresql-dev gcc python3-dev musl-dev g++ jpeg-dev zlib-dev
- pip install pip --upgrade
- pip install -r requirements/production.txt --no-cache
script:
- python setup.py bdist_wheel
- echo PIP_CI_JOB_ID=$CI_JOB_ID > PIP_CI_JOB_ID.env
dependencies: []
artifacts:
expire_in: 1 hour
paths:
- dist/
- version
reports:
dotenv: PIP_CI_JOB_ID.env
extends:
- .except-tags
meta:version:
stage: meta
needs:
- job: build_pip:build_dist
artifacts: true
script:
- cat version
artifacts:
expire_in: never
paths:
- version
extends: .except-tags
build:build_api:
stage: build
image: registry.ml.bastion-tech.ru:8843/ansible/infrastructure/ansible_tools:2.9
needs:
- job: build_pip:build_dist
artifacts: true
before_script:
- ansible-vault decrypt /ansible/infrastructure/secrets/ansible@infrastructure/id_rsa --vault-password-file=${ANSIBLE_VAULT_PASSWORD}
script:
- |
ansible-playbook -i /ansible/infrastructure/inventories/ml.inventory \
--vault-password-file=${ANSIBLE_VAULT_PASSWORD} \
--private-key /ansible/infrastructure/secrets/ansible@infrastructure/id_rsa \
-e ansible_ssh_user=deploy \
-e smartconstructions_pip_ci_job_id=${PIP_CI_JOB_ID} \
-e build=true -e smartconstructions_build_ref=${CI_COMMIT_BRANCH} \
/ansible/infrastructure/ml_smartconstructions.yml
tags:
- linux-docker
deploy:deploy_api:
stage: deploy
image: registry.ml.bastion-tech.ru:8843/ansible/infrastructure/ansible_tools:2.9
needs:
- job: build_pip:build_dist
artifacts: true
when: manual
only:
- master
- dev
before_script:
- ansible-vault decrypt /ansible/infrastructure/secrets/ansible@infrastructure/id_rsa --vault-password-file=${ANSIBLE_VAULT_PASSWORD}
script:
- |
ansible-playbook -i /ansible/infrastructure/inventories/ml.inventory \
--vault-password-file=${ANSIBLE_VAULT_PASSWORD} \
--private-key /ansible/infrastructure/secrets/ansible@infrastructure/id_rsa \
-e ansible_ssh_user=deploy \
-e smartconstructions_pip_ci_job_id=${PIP_CI_JOB_ID} \
-e run=true -e frontend_restart=true \
/ansible/infrastructure/ml_smartconstructions.yml
tags:
- linux-docker
include:
- local: .gitlab/ci/code-quality.yml
requirements/production.txt
djangorestframework==3.12.4
drf-extra-fields==3.1.1
djangorestframework-camel-case==1.2.0 # https://pypi.org/project/djangorestframework-camel-case/
Pillow==8.3.2
python-dateutil==2.8.2 # datetime formatting
psycopg2==2.9.1
opencv-python==4.5.3.56
drf-yasg==1.20.0
sentry-sdk==1.4.3
gunicorn==20.1.0
requests==2.26.0
yarl==1.7.0
googlemaps==4.5.3
django_redis==5.0.0
celery==5.2.0
channels==3.0.4
channels_redis==3.3.1
ANSWER
Answered 2021-Dec-04 at 23:03In your logs, we can see the following error:
gcc -Wno-unused-result -Wsign-compare -DNDEBUG -g -fwrapv -O3 -Wall -DTHREAD_STACK_SIZE=0x100000 -fPIC -DUSE__THREAD -DHAVE_SYNC_SYNCHRONIZE -I/usr/include/ffi -I/usr/include/libffi -I/usr/local/include/python3.9 -c c/_cffi_backend.c -o build/temp.linux-x86_64-3.9/c/_cffi_backend.o
c/_cffi_backend.c:15:10: fatal error: ffi.h: No such file or directory
15 | #include
| ^~~~~~~
compilation terminated.
error: command '/usr/bin/gcc' failed with exit code 1
Errors like those suggests you're missing header files.
In Alpine, the ffi.h file should be part of libffi-dev. Try this:
apk add libffi-dev
QUESTION
I am trying to install the Tensorflow Object Detection API on a Google Colab and the part that installs the API, shown below, takes a very long time to execute (in excess of one hour) and eventually fails to install.
# Install the Object Detection API
%%bash
cd models/research/
protoc object_detection/protos/*.proto --python_out=.
cp object_detection/packages/tf2/setup.py .
python -m pip install
To discover What I was doing wrong, I reverted to the "Eager Few Shot Object Detection Colab" example available at https://github.com/tensorflow/models/blob/master/research/object_detection/colab_tutorials/eager_few_shot_od_training_tf2_colab.ipynb in a Google Colab Pro notebook, and the "python -m pip install" part hangs as well. Normally, this Colab runs in under 10 minutes, but in Google PRO Colab it is not running at all.
I can't seem to pinpoint what is causing this installation to fail. Anyone has any idea why the Object Detection API is no longer installing on Google Colab notebooks?
Update... yesterday the installation took over two hours, and failes, and this is the output:
Processing /content/models/research
Collecting avro-python3
Using cached avro-python3-1.10.2.tar.gz (38 kB)
Collecting apache-beam
Using cached apache_beam-2.34.0-cp37-cp37m-manylinux2010_x86_64.whl (9.8 MB)
Requirement already satisfied: pillow in /usr/local/lib/python3.7/dist-packages (from object-detection==0.1) (7.1.2)
Requirement already satisfied: lxml in /usr/local/lib/python3.7/dist-packages (from object-detection==0.1) (4.2.6)
Requirement already satisfied: matplotlib in /usr/local/lib/python3.7/dist-packages (from object-detection==0.1) (3.2.2)
Requirement already satisfied: Cython in /usr/local/lib/python3.7/dist-packages (from object-detection==0.1) (0.29.24)
Requirement already satisfied: contextlib2 in /usr/local/lib/python3.7/dist-packages (from object-detection==0.1) (0.5.5)
Collecting tf-slim
Using cached tf_slim-1.1.0-py2.py3-none-any.whl (352 kB)
Requirement already satisfied: six in /usr/local/lib/python3.7/dist-packages (from object-detection==0.1) (1.15.0)
Requirement already satisfied: pycocotools in /usr/local/lib/python3.7/dist-packages (from object-detection==0.1) (2.0.2)
Collecting lvis
Using cached lvis-0.5.3-py3-none-any.whl (14 kB)
Requirement already satisfied: scipy in /usr/local/lib/python3.7/dist-packages (from object-detection==0.1) (1.4.1)
Requirement already satisfied: pandas in /usr/local/lib/python3.7/dist-packages (from object-detection==0.1) (1.1.5)
Collecting tf-models-official>=2.5.1
Using cached tf_models_official-2.7.0-py2.py3-none-any.whl (1.8 MB)
Collecting tensorflow_io
Using cached tensorflow_io-0.22.0-cp37-cp37m-manylinux_2_12_x86_64.manylinux2010_x86_64.whl (22.7 MB)
Collecting keras==2.6.0
Using cached keras-2.6.0-py2.py3-none-any.whl (1.3 MB)
Collecting tensorflow-addons
Using cached tensorflow_addons-0.15.0-cp37-cp37m-manylinux_2_12_x86_64.manylinux2010_x86_64.whl (1.1 MB)
Requirement already satisfied: kaggle>=1.3.9 in /usr/local/lib/python3.7/dist-packages (from tf-models-official>=2.5.1->object-detection==0.1) (1.5.12)
Requirement already satisfied: gin-config in /usr/local/lib/python3.7/dist-packages (from tf-models-official>=2.5.1->object-detection==0.1) (0.5.0)
Collecting sacrebleu
Using cached sacrebleu-2.0.0-py3-none-any.whl (90 kB)
Requirement already satisfied: psutil>=5.4.3 in /usr/local/lib/python3.7/dist-packages (from tf-models-official>=2.5.1->object-detection==0.1) (5.4.8)
Collecting py-cpuinfo>=3.3.0
Using cached py-cpuinfo-8.0.0.tar.gz (99 kB)
Collecting tensorflow-text>=2.7.0
Using cached tensorflow_text-2.7.0-cp37-cp37m-manylinux2010_x86_64.whl (4.9 MB)
Requirement already satisfied: oauth2client in /usr/local/lib/python3.7/dist-packages (from tf-models-official>=2.5.1->object-detection==0.1) (4.1.3)
Collecting seqeval
Using cached seqeval-1.2.2.tar.gz (43 kB)
Requirement already satisfied: numpy>=1.15.4 in /usr/local/lib/python3.7/dist-packages (from tf-models-official>=2.5.1->object-detection==0.1) (1.19.5)
Collecting sentencepiece
Using cached sentencepiece-0.1.96-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (1.2 MB)
Requirement already satisfied: tensorflow-datasets in /usr/local/lib/python3.7/dist-packages (from tf-models-official>=2.5.1->object-detection==0.1) (4.0.1)
Collecting tensorflow-model-optimization>=0.4.1
Using cached tensorflow_model_optimization-0.7.0-py2.py3-none-any.whl (213 kB)
Requirement already satisfied: tensorflow-hub>=0.6.0 in /usr/local/lib/python3.7/dist-packages (from tf-models-official>=2.5.1->object-detection==0.1) (0.12.0)
Collecting opencv-python-headless
Using cached opencv_python_headless-4.5.4.58-cp37-cp37m-manylinux2014_x86_64.whl (47.6 MB)
Collecting tensorflow>=2.7.0
Using cached tensorflow-2.7.0-cp37-cp37m-manylinux2010_x86_64.whl (489.6 MB)
Requirement already satisfied: google-api-python-client>=1.6.7 in /usr/local/lib/python3.7/dist-packages (from tf-models-official>=2.5.1->object-detection==0.1) (1.12.8)
Collecting pyyaml>=5.1
Using cached PyYAML-6.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl (596 kB)
Requirement already satisfied: google-auth>=1.16.0 in /usr/local/lib/python3.7/dist-packages (from google-api-python-client>=1.6.7->tf-models-official>=2.5.1->object-detection==0.1) (1.35.0)
Requirement already satisfied: uritemplate<4dev,>=3.0.0 in /usr/local/lib/python3.7/dist-packages (from google-api-python-client>=1.6.7->tf-models-official>=2.5.1->object-detection==0.1) (3.0.1)
Requirement already satisfied: google-api-core<2dev,>=1.21.0 in /usr/local/lib/python3.7/dist-packages (from google-api-python-client>=1.6.7->tf-models-official>=2.5.1->object-detection==0.1) (1.26.3)
Requirement already satisfied: httplib2<1dev,>=0.15.0 in /usr/local/lib/python3.7/dist-packages (from google-api-python-client>=1.6.7->tf-models-official>=2.5.1->object-detection==0.1) (0.17.4)
Requirement already satisfied: google-auth-httplib2>=0.0.3 in /usr/local/lib/python3.7/dist-packages (from google-api-python-client>=1.6.7->tf-models-official>=2.5.1->object-detection==0.1) (0.0.4)
Requirement already satisfied: packaging>=14.3 in /usr/local/lib/python3.7/dist-packages (from google-api-core<2dev,>=1.21.0->google-api-python-client>=1.6.7->tf-models-official>=2.5.1->object-detection==0.1) (21.2)
Requirement already satisfied: requests<3.0.0dev,>=2.18.0 in /usr/local/lib/python3.7/dist-packages (from google-api-core<2dev,>=1.21.0->google-api-python-client>=1.6.7->tf-models-official>=2.5.1->object-detection==0.1) (2.23.0)
Requirement already satisfied: googleapis-common-protos<2.0dev,>=1.6.0 in /usr/local/lib/python3.7/dist-packages (from google-api-core<2dev,>=1.21.0->google-api-python-client>=1.6.7->tf-models-official>=2.5.1->object-detection==0.1) (1.53.0)
Requirement already satisfied: protobuf>=3.12.0 in /usr/local/lib/python3.7/dist-packages (from google-api-core<2dev,>=1.21.0->google-api-python-client>=1.6.7->tf-models-official>=2.5.1->object-detection==0.1) (3.17.3)
Requirement already satisfied: pytz in /usr/local/lib/python3.7/dist-packages (from google-api-core<2dev,>=1.21.0->google-api-python-client>=1.6.7->tf-models-official>=2.5.1->object-detection==0.1) (2018.9)
Requirement already satisfied: setuptools>=40.3.0 in /usr/local/lib/python3.7/dist-packages (from google-api-core<2dev,>=1.21.0->google-api-python-client>=1.6.7->tf-models-official>=2.5.1->object-detection==0.1) (57.4.0)
Requirement already satisfied: rsa<5,>=3.1.4 in /usr/local/lib/python3.7/dist-packages (from google-auth>=1.16.0->google-api-python-client>=1.6.7->tf-models-official>=2.5.1->object-detection==0.1) (4.7.2)
Requirement already satisfied: pyasn1-modules>=0.2.1 in /usr/local/lib/python3.7/dist-packages (from google-auth>=1.16.0->google-api-python-client>=1.6.7->tf-models-official>=2.5.1->object-detection==0.1) (0.2.8)
Requirement already satisfied: cachetools<5.0,>=2.0.0 in /usr/local/lib/python3.7/dist-packages (from google-auth>=1.16.0->google-api-python-client>=1.6.7->tf-models-official>=2.5.1->object-detection==0.1) (4.2.4)
Requirement already satisfied: urllib3 in /usr/local/lib/python3.7/dist-packages (from kaggle>=1.3.9->tf-models-official>=2.5.1->object-detection==0.1) (1.24.3)
Requirement already satisfied: python-slugify in /usr/local/lib/python3.7/dist-packages (from kaggle>=1.3.9->tf-models-official>=2.5.1->object-detection==0.1) (5.0.2)
Requirement already satisfied: tqdm in /usr/local/lib/python3.7/dist-packages (from kaggle>=1.3.9->tf-models-official>=2.5.1->object-detection==0.1) (4.62.3)
Requirement already satisfied: certifi in /usr/local/lib/python3.7/dist-packages (from kaggle>=1.3.9->tf-models-official>=2.5.1->object-detection==0.1) (2021.10.8)
Requirement already satisfied: python-dateutil in /usr/local/lib/python3.7/dist-packages (from kaggle>=1.3.9->tf-models-official>=2.5.1->object-detection==0.1) (2.8.2)
Requirement already satisfied: pyparsing<3,>=2.0.2 in /usr/local/lib/python3.7/dist-packages (from packaging>=14.3->google-api-core<2dev,>=1.21.0->google-api-python-client>=1.6.7->tf-models-official>=2.5.1->object-detection==0.1) (2.4.7)
Requirement already satisfied: pyasn1<0.5.0,>=0.4.6 in /usr/local/lib/python3.7/dist-packages (from pyasn1-modules>=0.2.1->google-auth>=1.16.0->google-api-python-client>=1.6.7->tf-models-official>=2.5.1->object-detection==0.1) (0.4.8)
Requirement already satisfied: idna<3,>=2.5 in /usr/local/lib/python3.7/dist-packages (from requests<3.0.0dev,>=2.18.0->google-api-core<2dev,>=1.21.0->google-api-python-client>=1.6.7->tf-models-official>=2.5.1->object-detection==0.1) (2.10)
Requirement already satisfied: chardet<4,>=3.0.2 in /usr/local/lib/python3.7/dist-packages (from requests<3.0.0dev,>=2.18.0->google-api-core<2dev,>=1.21.0->google-api-python-client>=1.6.7->tf-models-official>=2.5.1->object-detection==0.1) (3.0.4)
INFO: pip is looking at multiple versions of six to determine which version is compatible with other requirements. This could take a while.
Collecting six
Using cached six-1.16.0-py2.py3-none-any.whl (11 kB)
Using cached six-1.15.0-py2.py3-none-any.whl (10 kB)
Using cached six-1.14.0-py2.py3-none-any.whl (10 kB)
Using cached six-1.13.0-py2.py3-none-any.whl (10 kB)
INFO: pip is looking at multiple versions of scipy to determine which version is compatible with other requirements. This could take a while.
Collecting scipy
Using cached scipy-1.7.2-cp37-cp37m-manylinux_2_12_x86_64.manylinux2010_x86_64.whl (38.2 MB)
INFO: pip is looking at multiple versions of six to determine which version is compatible with other requirements. This could take a while.
Using cached scipy-1.7.1-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl (28.5 MB)
INFO: This is taking longer than usual. You might need to provide the dependency resolver with stricter constraints to reduce runtime. If you want to abort this run, you can press Ctrl + C to do so. To improve how pip performs, tell us what happened here: https://pip.pypa.io/surveys/backtracking
Using cached scipy-1.7.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl (28.5 MB)
Using cached scipy-1.6.3-cp37-cp37m-manylinux1_x86_64.whl (27.4 MB)
Using cached scipy-1.6.2-cp37-cp37m-manylinux1_x86_64.whl (27.4 MB)
Using cached scipy-1.6.1-cp37-cp37m-manylinux1_x86_64.whl (27.4 MB)
Using cached scipy-1.6.0-cp37-cp37m-manylinux1_x86_64.whl (27.4 MB)
INFO: pip is looking at multiple versions of scipy to determine which version is compatible with other requirements. This could take a while.
Using cached scipy-1.5.4-cp37-cp37m-manylinux1_x86_64.whl (25.9 MB)
Using cached scipy-1.5.3-cp37-cp37m-manylinux1_x86_64.whl (25.9 MB)
Using cached scipy-1.5.2-cp37-cp37m-manylinux1_x86_64.whl (25.9 MB)
Using cached scipy-1.5.1-cp37-cp37m-manylinux1_x86_64.whl (25.9 MB)
Using cached scipy-1.5.0-cp37-cp37m-manylinux1_x86_64.whl (25.9 MB)
INFO: This is taking longer than usual. You might need to provide the dependency resolver with stricter constraints to reduce runtime. If you want to abort this run, you can press Ctrl + C to do so. To improve how pip performs, tell us what happened here: https://pip.pypa.io/surveys/backtracking
Using cached scipy-1.4.1-cp37-cp37m-manylinux1_x86_64.whl (26.1 MB)
Using cached scipy-1.4.0-cp37-cp37m-manylinux1_x86_64.whl (26.1 MB)
Using cached scipy-1.3.3-cp37-cp37m-manylinux1_x86_64.whl (25.2 MB)
Using cached scipy-1.3.2-cp37-cp37m-manylinux1_x86_64.whl (25.2 MB)
Using cached scipy-1.3.1-cp37-cp37m-manylinux1_x86_64.whl (25.2 MB)
Using cached scipy-1.3.0-cp37-cp37m-manylinux1_x86_64.whl (25.2 MB)
Using cached scipy-1.2.3-cp37-cp37m-manylinux1_x86_64.whl (24.8 MB)
Using cached scipy-1.2.2-cp37-cp37m-manylinux1_x86_64.whl (24.8 MB)
Using cached scipy-1.2.1-cp37-cp37m-manylinux1_x86_64.whl (24.8 MB)
Using cached scipy-1.2.0-cp37-cp37m-manylinux1_x86_64.whl (26.6 MB)
Using cached scipy-1.1.0-cp37-cp37m-manylinux1_x86_64.whl (31.2 MB)
Using cached scipy-1.0.1.tar.gz (15.5 MB)
Using cached scipy-1.0.0.tar.gz (15.2 MB)
Using cached scipy-0.19.1.tar.gz (14.1 MB)
INFO: pip is looking at multiple versions of rsa to determine which version is compatible with other requirements. This could take a while.
Collecting rsa<5,>=3.1.4
Using cached rsa-4.7.2-py3-none-any.whl (34 kB)
Using cached rsa-4.7.1-py3-none-any.whl (36 kB)
Using cached rsa-4.7-py3-none-any.whl (34 kB)
Using cached rsa-4.6-py3-none-any.whl (47 kB)
Using cached rsa-4.5-py2.py3-none-any.whl (36 kB)
Using cached rsa-4.4.1-py2.py3-none-any.whl (33 kB)
Using cached rsa-4.3-py2.py3-none-any.whl (36 kB)
INFO: pip is looking at multiple versions of rsa to determine which version is compatible with other requirements. This could take a while.
Using cached rsa-4.2.tar.gz (46 kB)
Using cached rsa-4.1-py3-none-any.whl (32 kB)
Using cached rsa-4.0-py2.py3-none-any.whl (38 kB)
Using cached rsa-3.4.2-py2.py3-none-any.whl (46 kB)
Using cached rsa-3.4.1-py2.py3-none-any.whl (46 kB)
INFO: This is taking longer than usual. You might need to provide the dependency resolver with stricter constraints to reduce runtime. If you want to abort this run, you can press Ctrl + C to do so. To improve how pip performs, tell us what happened here: https://pip.pypa.io/surveys/backtracking
Using cached rsa-3.4-py2.py3-none-any.whl (46 kB)
Using cached rsa-3.3-py2.py3-none-any.whl (44 kB)
Using cached rsa-3.2.3-py2.py3-none-any.whl (44 kB)
Using cached rsa-3.2.2-py2.py3-none-any.whl (44 kB)
Using cached rsa-3.2-py2.py3-none-any.whl (43 kB)
Using cached rsa-3.1.4.tar.gz (36 kB)
INFO: pip is looking at multiple versions of idna to determine which version is compatible with other requirements. This could take a while.
Collecting idna<3,>=2.5
Using cached idna-2.10-py2.py3-none-any.whl (58 kB)
Using cached idna-2.9-py2.py3-none-any.whl (58 kB)
Using cached idna-2.8-py2.py3-none-any.whl (58 kB)
Using cached idna-2.7-py2.py3-none-any.whl (58 kB)
Using cached idna-2.6-py2.py3-none-any.whl (56 kB)
Using cached idna-2.5-py2.py3-none-any.whl (55 kB)
INFO: pip is looking at multiple versions of chardet to determine which version is compatible with other requirements. This could take a while.
Collecting chardet<4,>=3.0.2
Using cached chardet-3.0.4-py2.py3-none-any.whl (133 kB)
INFO: pip is looking at multiple versions of idna to determine which version is compatible with other requirements. This could take a while.
INFO: This is taking longer than usual. You might need to provide the dependency resolver with stricter constraints to reduce runtime. If you want to abort this run, you can press Ctrl + C to do so. To improve how pip performs, tell us what happened here: https://pip.pypa.io/surveys/backtracking
Downloading chardet-3.0.3-py2.py3-none-any.whl (133 kB)
Downloading chardet-3.0.2-py2.py3-none-any.whl (133 kB)
INFO: pip is looking at multiple versions of certifi to determine which version is compatible with other requirements. This could take a while.
Collecting certifi
Downloading certifi-2021.10.8-py2.py3-none-any.whl (149 kB)
INFO: pip is looking at multiple versions of chardet to determine which version is compatible with other requirements. This could take a while.
Downloading certifi-2021.5.30-py2.py3-none-any.whl (145 kB)
Downloading certifi-2020.12.5-py2.py3-none-any.whl (147 kB)
INFO: This is taking longer than usual. You might need to provide the dependency resolver with stricter constraints to reduce runtime. If you want to abort this run, you can press Ctrl + C to do so. To improve how pip performs, tell us what happened here: https://pip.pypa.io/surveys/backtracking
Downloading certifi-2020.11.8-py2.py3-none-any.whl (155 kB)
Downloading certifi-2020.6.20-py2.py3-none-any.whl (156 kB)
Downloading certifi-2020.4.5.2-py2.py3-none-any.whl (157 kB)
Downloading certifi-2020.4.5.1-py2.py3-none-any.whl (157 kB)
INFO: pip is looking at multiple versions of certifi to determine which version is compatible with other requirements. This could take a while.
Downloading certifi-2020.4.5-py2.py3-none-any.whl (156 kB)
Downloading certifi-2019.11.28-py2.py3-none-any.whl (156 kB)
Downloading certifi-2019.9.11-py2.py3-none-any.whl (154 kB)
Downloading certifi-2019.6.16-py2.py3-none-any.whl (157 kB)
Downloading certifi-2019.3.9-py2.py3-none-any.whl (158 kB)
DEPRECATION: A future pip version will change local packages to be built in-place without first copying to a temporary directory. We recommend you use --use-feature=in-tree-build to test your packages with this new behavior before it becomes the default.
pip 21.3 will remove support for this functionality. You can find discussion regarding this at https://github.com/pypa/pip/issues/7555.
ERROR: Exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.7/dist-packages/pip/_internal/cli/base_command.py", line 180, in _main
status = self.run(options, args)
File "/usr/local/lib/python3.7/dist-packages/pip/_internal/cli/req_command.py", line 199, in wrapper
return func(self, options, args)
File "/usr/local/lib/python3.7/dist-packages/pip/_internal/commands/install.py", line 319, in run
reqs, check_supported_wheels=not options.target_dir
File "/usr/local/lib/python3.7/dist-packages/pip/_internal/resolution/resolvelib/resolver.py", line 128, in resolve
requirements, max_rounds=try_to_avoid_resolution_too_deep
File "/usr/local/lib/python3.7/dist-packages/pip/_vendor/resolvelib/resolvers.py", line 473, in resolve
state = resolution.resolve(requirements, max_rounds=max_rounds)
File "/usr/local/lib/python3.7/dist-packages/pip/_vendor/resolvelib/resolvers.py", line 384, in resolve
raise ResolutionTooDeep(max_rounds)
pip._vendor.resolvelib.resolvers.ResolutionTooDeep: 2000000
Ivan
ANSWER
Answered 2021-Nov-19 at 00:16QUESTION
i have an import problem when executing my code:
from keras.models import Sequential
from keras.layers.normalization import BatchNormalization
2021-10-06 22:27:14.064885: W tensorflow/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'cudart64_110.dll'; dlerror: cudart64_110.dll not found
2021-10-06 22:27:14.064974: I tensorflow/stream_executor/cuda/cudart_stub.cc:29] Ignore above cudart dlerror if you do not have a GPU set up on your machine.
Traceback (most recent call last):
File "C:\Data\breast-cancer-classification\train_model.py", line 10, in
from cancernet.cancernet import CancerNet
File "C:\Data\breast-cancer-classification\cancernet\cancernet.py", line 2, in
from keras.layers.normalization import BatchNormalization
ImportError: cannot import name 'BatchNormalization' from 'keras.layers.normalization' (C:\Users\Catalin\AppData\Local\Programs\Python\Python39\lib\site-packages\keras\layers\normalization\__init__.py)
- Keras version: 2.6.0
- Tensorflow: 2.6.0
- Python version: 3.9.7
The library it is installed also with
pip install numpy opencv-python pillow tensorflow keras imutils scikit-learn matplotlib
Do you have any ideas?
ANSWER
Answered 2021-Oct-06 at 20:27You're using outdated imports for tf.keras
. Layers can now be imported directly from tensorflow.keras.layers
:
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import (
BatchNormalization, SeparableConv2D, MaxPooling2D, Activation, Flatten, Dropout, Dense
)
from tensorflow.keras import backend as K
class CancerNet:
@staticmethod
def build(width, height, depth, classes):
model = Sequential()
shape = (height, width, depth)
channelDim = -1
if K.image_data_format() == "channels_first":
shape = (depth, height, width)
channelDim = 1
model.add(SeparableConv2D(32, (3, 3), padding="same", input_shape=shape))
model.add(Activation("relu"))
model.add(BatchNormalization(axis=channelDim))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(SeparableConv2D(64, (3, 3), padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(axis=channelDim))
model.add(SeparableConv2D(64, (3, 3), padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(axis=channelDim))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(SeparableConv2D(128, (3, 3), padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(axis=channelDim))
model.add(SeparableConv2D(128, (3, 3), padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(axis=channelDim))
model.add(SeparableConv2D(128, (3, 3), padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(axis=channelDim))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(256))
model.add(Activation("relu"))
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Dense(classes))
model.add(Activation("softmax"))
return model
model = CancerNet()
Community Discussions, Code Snippets contain sources that include Stack Exchange Network
Vulnerabilities
No vulnerabilities reported
Install opencv-python
If you have previous/other manually installed (= not installed via pip) version of OpenCV installed (e.g. cv2 module in the root of Python's site-packages), remove it before installation to avoid conflicts.
Make sure that your pip version is up-to-date (19.3 is the minimum supported version): pip install --upgrade pip. Check version with pip -V. For example Linux distributions ship usually with very old pip versions which cause a lot of unexpected problems especially with the manylinux format.
Select the correct package for your environment: There are four different packages (see options 1, 2, 3 and 4 below) and you should SELECT ONLY ONE OF THEM. Do not install multiple different packages in the same environment. There is no plugin architecture: all the packages use the same namespace (cv2). If you installed multiple different packages in the same environment, uninstall them all with pip uninstall and reinstall only one package. a. Packages for standard desktop environments (Windows, macOS, almost any GNU/Linux distribution) Option 1 - Main modules package: pip install opencv-python Option 2 - Full package (contains both main modules and contrib/extra modules): pip install opencv-contrib-python (check contrib/extra modules listing from OpenCV documentation) b. Packages for server (headless) environments (such as Docker, cloud environments etc.), no GUI library dependencies These packages are smaller than the two other packages above because they do not contain any GUI functionality (not compiled with Qt / other GUI components). This means that the packages avoid a heavy dependency chain to X11 libraries and you will have for example smaller Docker images as a result. You should always use these packages if you do not use cv2.imshow et al. or you are using some other package (such as PyQt) than OpenCV to create your GUI. Option 3 - Headless main modules package: pip install opencv-python-headless Option 4 - Headless full package (contains both main modules and contrib/extra modules): pip install opencv-contrib-python-headless (check contrib/extra modules listing from OpenCV documentation)
Import the package: import cv2 All packages contain Haar cascade files. cv2.data.haarcascades can be used as a shortcut to the data folder. For example: cv2.CascadeClassifier(cv2.data.haarcascades + "haarcascade_frontalface_default.xml")
Read OpenCV documentation
Before opening a new issue, read the FAQ below and have a look at the other issues which are already open.
The project is structured like a normal Python package with a standard setup.py file. The build process for a single entry in the build matrices is as follows (see for example appveyor.yml file):. Steps 1--4 are handled by pip wheel.
In Linux and MacOS build: get OpenCV's optional C dependencies that we compile against
Checkout repository and submodules OpenCV is included as submodule and the version is updated manually by maintainers when a new OpenCV release has been made Contrib modules are also included as a submodule
Find OpenCV version from the sources
Build OpenCV tests are disabled, otherwise build time increases too much there are 4 build matrix entries for each build combination: with and without contrib modules, with and without GUI (headless) Linux builds run in manylinux Docker containers (CentOS 5) source distributions are separate entries in the build matrix
Rearrange OpenCV's build result, add our custom files and generate wheel
Linux and macOS wheels are transformed with auditwheel and delocate, correspondingly
Install the generated wheel
Test that Python can import the library and run some sanity checks
Use twine to upload the generated wheel to PyPI (only in release builds)
CI_BUILD. Set to 1 to emulate the CI environment build behaviour. Used only in CI builds to force certain build flags on in setup.py. Do not use this unless you know what you are doing.
ENABLE_CONTRIB and ENABLE_HEADLESS. Set to 1 to build the contrib and/or headless version
ENABLE_JAVA, Set to 1 to enable the Java client build. This is disabled by default.
CMAKE_ARGS. Additional arguments for OpenCV's CMake invocation. You can use this to make a custom build.
Support
Find, review, and download reusable Libraries, Code Snippets, Cloud APIs from over 650 million Knowledge Items
Find more librariesExplore Kits - Develop, implement, customize Projects, Custom Functions and Applications with kandi kits
Save this library and start creating your kit
Share this Page