1

I'm attempting to detect a piece of paper in a photo on the iPhone using OpenCV. I'm using the code from this question: OpenCV C++/Obj-C: Detecting a sheet of paper / Square Detection

Here's the code:

- (void)findEdges {
    image = [[UIImage imageNamed:@"photo.JPG"] retain];
    Mat matImage = [image CVMat];
    find_squares(matImage, points);

    UIImageView *imageView = [[[UIImageView alloc] initWithImage:image] autorelease];
    [imageView setFrame:CGRectMake(0.0f, 0.0f, self.frame.size.width, self.frame.size.height)];
    [self addSubview:imageView];
    [imageView setAlpha:0.3f];
}

- (void)drawRect:(CGRect)rect {
    [super drawRect:rect];

    CGContextRef context = UIGraphicsGetCurrentContext();
    CGContextSetRGBStrokeColor(context, 1.0, 0.0, 0.0, 0.8);

    CGFloat scaleX = self.frame.size.width / image.size.width;
    CGFloat scaleY = self.frame.size.height / image.size.height;

    // Draw the detected squares.
    for( vector<vector<cv::Point> >::const_iterator it = points.begin(); it != points.end(); it++ ) {
        vector<cv::Point> square = *it;

        cv::Point p1 = square[0];
        cv::Point p2 = square[1];
        cv::Point p3 = square[2];
        cv::Point p4 = square[3];

        CGContextBeginPath(context);

        CGContextMoveToPoint(context, p1.x * scaleX, p1.y * scaleY); //start point
        CGContextAddLineToPoint(context, p2.x * scaleX, p2.y * scaleY);
        CGContextAddLineToPoint(context, p3.x * scaleX, p3.y * scaleY);
        CGContextAddLineToPoint(context, p4.x * scaleX, p4.y * scaleY); // end path

        CGContextClosePath(context);
        CGContextSetLineWidth(context, 4.0);
        CGContextStrokePath(context);
    }
}

double angle( cv::Point pt1, cv::Point pt2, cv::Point pt0 ) {
    double dx1 = pt1.x - pt0.x;
    double dy1 = pt1.y - pt0.y;
    double dx2 = pt2.x - pt0.x;
    double dy2 = pt2.y - pt0.y;
    return (dx1*dx2 + dy1*dy2)/sqrt((dx1*dx1 + dy1*dy1)*(dx2*dx2 + dy2*dy2) + 1e-10);
}

void find_squares(Mat& image, vector<vector<cv::Point> >& squares) {
    // blur will enhance edge detection
    Mat blurred(image);
    medianBlur(image, blurred, 9);

    Mat gray0(blurred.size(), CV_8U), gray;
    vector<vector<cv::Point> > contours;

    // find squares in every color plane of the image
    for (int c = 0; c < 3; c++)
    {
        int ch[] = {c, 0};
        mixChannels(&blurred, 1, &gray0, 1, ch, 1);

        // try several threshold levels
        const int threshold_level = 2;
        for (int l = 0; l < threshold_level; l++)
        {
            // Use Canny instead of zero threshold level!
            // Canny helps to catch squares with gradient shading
            if (l == 0)
            {
                Canny(gray0, gray, 10, 20, 3); //

                // Dilate helps to remove potential holes between edge segments
                dilate(gray, gray, Mat(), cv::Point(-1,-1));
            }
            else
            {
                gray = gray0 >= (l+1) * 255 / threshold_level;
            }

            // Find contours and store them in a list
            findContours(gray, contours, CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE);

            // Test contours
            vector<cv::Point> approx;
            for (size_t i = 0; i < contours.size(); i++)
            {
                // approximate contour with accuracy proportional
                // to the contour perimeter
                approxPolyDP(Mat(contours[i]), approx, arcLength(Mat(contours[i]), true)*0.02, true);

                // Note: absolute value of an area is used because
                // area may be positive or negative - in accordance with the
                // contour orientation
                if (approx.size() == 4 &&
                    fabs(contourArea(Mat(approx))) > 1000 &&
                    isContourConvex(Mat(approx)))
                {
                    double maxCosine = 0;

                    for (int j = 2; j < 5; j++)
                    {
                        double cosine = fabs(angle(approx[j%4], approx[j-2], approx[j-1]));
                        maxCosine = MAX(maxCosine, cosine);
                    }

                    if (maxCosine < 0.3)
                        squares.push_back(approx);
                }
            }
        }
    }
}

Here's the input image:

input image

Here's the result:

result

What am I doing wrong?

Community
  • 1
  • 1
JWood
  • 2,674
  • 2
  • 35
  • 62
  • I think the problem is the scale at which you draw the points on the screen not how you detect them. OpenGL has a different reference system than the image. – Sam Aug 07 '12 at 14:25
  • @JWood I have same issue, can you got the solution for this? please give me suggestion. thanks – QueueOverFlow Oct 27 '12 at 09:31

0 Answers0