// // UIImage+Extension.m // knowledgeBase // // Created by 王洪亮 on 16/9/20. // Copyright © 2016年 wanghongliang. All rights reserved. // #import "UIImage+Extension.h" #ifndef YY_SWAP // swap two value #define YY_SWAP(_a_, _b_) do { __typeof__(_a_) _tmp_ = (_a_); (_a_) = (_b_); (_b_) = _tmp_; } while (0) #endif @implementation UIImage (Extension) + (UIImage *)imageWithBgColor:(UIColor *)color alpha:(CGFloat)alpha{ UIImage *transparentBackground; UIGraphicsBeginImageContextWithOptions(CGSizeMake(1, 1), NO, [UINavigationBar appearance].layer.contentsScale); CGContextRef context = UIGraphicsGetCurrentContext(); CGContextSetFillColorWithColor(context, [[color colorWithAlphaComponent:alpha] CGColor]); UIRectFill(CGRectMake(0, 0, 1, 1)); transparentBackground = UIGraphicsGetImageFromCurrentImageContext(); UIGraphicsEndImageContext(); return transparentBackground; } - (UIImage *)horTransform{ //原始图片 UIImage *srcImage =self; //Quartz重绘图片 CGRect rect= CGRectMake(0, 0, srcImage.size.width, srcImage.size.height); // let rect = CGRectMake(0, 0, srcImage.size.width , srcImage.size.height);//创建矩形框 //根据size大小创建一个基于位图的图形上下文 UIGraphicsBeginImageContextWithOptions(rect.size, false, 2); CGContextRef currentContext = UIGraphicsGetCurrentContext();//获取当前quartz 2d绘图环境 CGContextClipToRect(currentContext, rect);//设置当前绘图环境到矩形框 CGContextRotateCTM(currentContext,M_PI); //旋转180度 //平移, 这里是平移坐标系,跟平移图形是一个道理 CGContextTranslateCTM(currentContext, -rect.size.width, -rect.size.height); CGContextDrawImage(currentContext, rect, srcImage.CGImage);//绘图 //翻转图片 UIImage *drawImage = UIGraphicsGetImageFromCurrentImageContext();//获得图片 UIImage *flipImage = [UIImage imageWithCGImage:drawImage.CGImage scale:srcImage.scale orientation:srcImage.imageOrientation]; return flipImage; } - (UIImage*)imageAddCornerWithRadius:(CGFloat)radius andSize:(CGSize)size{ CGRect rect = CGRectMake(0, 0, size.width, size.height); UIGraphicsBeginImageContextWithOptions(size, NO, [UIScreen mainScreen].scale); CGContextRef ctx = UIGraphicsGetCurrentContext(); UIBezierPath * path = [UIBezierPath bezierPathWithRoundedRect:rect byRoundingCorners:UIRectCornerAllCorners cornerRadii:CGSizeMake(radius, radius)]; CGContextAddPath(ctx,path.CGPath); CGContextClip(ctx); [self drawInRect:rect]; CGContextDrawPath(ctx, kCGPathFillStroke); UIImage * newImage = UIGraphicsGetImageFromCurrentImageContext(); UIGraphicsEndImageContext(); return newImage; } + (UIImage *)boxblurImageWithBlur:(CGFloat)blur andImage:(UIImage *)image { NSData *imageData = UIImageJPEGRepresentation(image, 1); // convert to jpeg UIImage* destImage = [UIImage imageWithData:imageData]; if (blur < 0.f || blur > 1.f) { blur = 0.5f; } int boxSize = (int)(blur * 40); boxSize = boxSize - (boxSize % 2) + 1; CGImageRef img = destImage.CGImage; vImage_Buffer inBuffer, outBuffer; vImage_Error error; void *pixelBuffer; //create vImage_Buffer with data from CGImageRef CGDataProviderRef inProvider = CGImageGetDataProvider(img); CFDataRef inBitmapData = CGDataProviderCopyData(inProvider); inBuffer.width = CGImageGetWidth(img); inBuffer.height = CGImageGetHeight(img); inBuffer.rowBytes = CGImageGetBytesPerRow(img); inBuffer.data = (void*)CFDataGetBytePtr(inBitmapData); //create vImage_Buffer for output pixelBuffer = malloc(CGImageGetBytesPerRow(img) * CGImageGetHeight(img)); if(pixelBuffer == NULL) NSLog(@"No pixelbuffer"); outBuffer.data = pixelBuffer; outBuffer.width = CGImageGetWidth(img); outBuffer.height = CGImageGetHeight(img); outBuffer.rowBytes = CGImageGetBytesPerRow(img); // Create a third buffer for intermediate processing void *pixelBuffer2 = malloc(CGImageGetBytesPerRow(img) * CGImageGetHeight(img)); vImage_Buffer outBuffer2; outBuffer2.data = pixelBuffer2; outBuffer2.width = CGImageGetWidth(img); outBuffer2.height = CGImageGetHeight(img); outBuffer2.rowBytes = CGImageGetBytesPerRow(img); //perform convolution error = vImageBoxConvolve_ARGB8888(&inBuffer, &outBuffer2, NULL, 0, 0, boxSize, boxSize, NULL, kvImageEdgeExtend); if (error) { NSLog(@"error from convolution %ld", error); } error = vImageBoxConvolve_ARGB8888(&outBuffer2, &inBuffer, NULL, 0, 0, boxSize, boxSize, NULL, kvImageEdgeExtend); if (error) { NSLog(@"error from convolution %ld", error); } error = vImageBoxConvolve_ARGB8888(&inBuffer, &outBuffer, NULL, 0, 0, boxSize, boxSize, NULL, kvImageEdgeExtend); if (error) { NSLog(@"error from convolution %ld", error); } CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB(); CGContextRef ctx = CGBitmapContextCreate(outBuffer.data, outBuffer.width, outBuffer.height, 8, outBuffer.rowBytes, colorSpace, (CGBitmapInfo)kCGImageAlphaNoneSkipLast); CGImageRef imageRef = CGBitmapContextCreateImage (ctx); UIImage *returnImage = [UIImage imageWithCGImage:imageRef]; //clean up CGContextRelease(ctx); CGColorSpaceRelease(colorSpace); free(pixelBuffer); free(pixelBuffer2); CFRelease(inBitmapData); CGImageRelease(imageRef); return returnImage; } - (UIImage *)imageByBlurRadius:(CGFloat)blurRadius tintColor:(UIColor *)tintColor tintMode:(CGBlendMode)tintBlendMode saturation:(CGFloat)saturation maskImage:(UIImage *)maskImage { if (self.size.width < 1 || self.size.height < 1) { NSLog(@"UIImage+YYAdd error: invalid size: (%.2f x %.2f). Both dimensions must be >= 1: %@", self.size.width, self.size.height, self); return nil; } if (!self.CGImage) { NSLog(@"UIImage+YYAdd error: inputImage must be backed by a CGImage: %@", self); return nil; } if (maskImage && !maskImage.CGImage) { NSLog(@"UIImage+YYAdd error: effectMaskImage must be backed by a CGImage: %@", maskImage); return nil; } // iOS7 and above can use new func. BOOL hasNewFunc = (long)vImageBuffer_InitWithCGImage != 0 && (long)vImageCreateCGImageFromBuffer != 0; BOOL hasBlur = blurRadius > __FLT_EPSILON__; BOOL hasSaturation = fabs(saturation - 1.0) > __FLT_EPSILON__; CGSize size = self.size; CGRect rect = { CGPointZero, size }; CGFloat scale = self.scale; CGImageRef imageRef = self.CGImage; BOOL opaque = NO; if (!hasBlur && !hasSaturation) { return [self _yy_mergeImageRef:imageRef tintColor:tintColor tintBlendMode:tintBlendMode maskImage:maskImage opaque:opaque]; } vImage_Buffer effect = { 0 }, scratch = { 0 }; vImage_Buffer *input = NULL, *output = NULL; vImage_CGImageFormat format = { .bitsPerComponent = 8, .bitsPerPixel = 32, .colorSpace = NULL, .bitmapInfo = kCGImageAlphaPremultipliedFirst | kCGBitmapByteOrder32Little, //requests a BGRA buffer. .version = 0, .decode = NULL, .renderingIntent = kCGRenderingIntentDefault }; if (hasNewFunc) { vImage_Error err; err = vImageBuffer_InitWithCGImage(&effect, &format, NULL, imageRef, kvImagePrintDiagnosticsToConsole); if (err != kvImageNoError) { NSLog(@"UIImage+YYAdd error: vImageBuffer_InitWithCGImage returned error code %zi for inputImage: %@", err, self); return nil; } err = vImageBuffer_Init(&scratch, effect.height, effect.width, format.bitsPerPixel, kvImageNoFlags); if (err != kvImageNoError) { NSLog(@"UIImage+YYAdd error: vImageBuffer_Init returned error code %zi for inputImage: %@", err, self); return nil; } } else { UIGraphicsBeginImageContextWithOptions(size, opaque, scale); CGContextRef effectCtx = UIGraphicsGetCurrentContext(); CGContextScaleCTM(effectCtx, 1.0, -1.0); CGContextTranslateCTM(effectCtx, 0, -size.height); CGContextDrawImage(effectCtx, rect, imageRef); effect.data = CGBitmapContextGetData(effectCtx); effect.width = CGBitmapContextGetWidth(effectCtx); effect.height = CGBitmapContextGetHeight(effectCtx); effect.rowBytes = CGBitmapContextGetBytesPerRow(effectCtx); UIGraphicsBeginImageContextWithOptions(size, opaque, scale); CGContextRef scratchCtx = UIGraphicsGetCurrentContext(); scratch.data = CGBitmapContextGetData(scratchCtx); scratch.width = CGBitmapContextGetWidth(scratchCtx); scratch.height = CGBitmapContextGetHeight(scratchCtx); scratch.rowBytes = CGBitmapContextGetBytesPerRow(scratchCtx); } input = &effect; output = &scratch; if (hasBlur) { // A description of how to compute the box kernel width from the Gaussian // radius (aka standard deviation) appears in the SVG spec: // http://www.w3.org/TR/SVG/filters.html#feGaussianBlurElement // // For larger values of 's' (s >= 2.0), an approximation can be used: Three // successive box-blurs build a piece-wise quadratic convolution kernel, which // approximates the Gaussian kernel to within roughly 3%. // // let d = floor(s * 3*sqrt(2*pi)/4 + 0.5) // // ... if d is odd, use three box-blurs of size 'd', centered on the output pixel. // CGFloat inputRadius = blurRadius * scale; if (inputRadius - 2.0 < __FLT_EPSILON__) inputRadius = 2.0; uint32_t radius = floor((inputRadius * 3.0 * sqrt(2 * M_PI) / 4 + 0.5) / 2); radius |= 1; // force radius to be odd so that the three box-blur methodology works. int iterations; if (blurRadius * scale < 0.5) iterations = 1; else if (blurRadius * scale < 1.5) iterations = 2; else iterations = 3; NSInteger tempSize = vImageBoxConvolve_ARGB8888(input, output, NULL, 0, 0, radius, radius, NULL, kvImageGetTempBufferSize | kvImageEdgeExtend); void *temp = malloc(tempSize); for (int i = 0; i < iterations; i++) { vImageBoxConvolve_ARGB8888(input, output, temp, 0, 0, radius, radius, NULL, kvImageEdgeExtend); YY_SWAP(input, output); } free(temp); } if (hasSaturation) { // These values appear in the W3C Filter Effects spec: // https://dvcs.w3.org/hg/FXTF/raw-file/default/filters/Publish.html#grayscaleEquivalent CGFloat s = saturation; CGFloat matrixFloat[] = { 0.0722 + 0.9278 * s, 0.0722 - 0.0722 * s, 0.0722 - 0.0722 * s, 0, 0.7152 - 0.7152 * s, 0.7152 + 0.2848 * s, 0.7152 - 0.7152 * s, 0, 0.2126 - 0.2126 * s, 0.2126 - 0.2126 * s, 0.2126 + 0.7873 * s, 0, 0, 0, 0, 1, }; const int32_t divisor = 256; NSUInteger matrixSize = sizeof(matrixFloat) / sizeof(matrixFloat[0]); int16_t matrix[matrixSize]; for (NSUInteger i = 0; i < matrixSize; ++i) { matrix[i] = (int16_t)roundf(matrixFloat[i] * divisor); } vImageMatrixMultiply_ARGB8888(input, output, matrix, divisor, NULL, NULL, kvImageNoFlags); YY_SWAP(input, output); } UIImage *outputImage = nil; if (hasNewFunc) { CGImageRef effectCGImage = NULL; effectCGImage = vImageCreateCGImageFromBuffer(input, &format, &_yy_cleanupBuffer, NULL, kvImageNoAllocate, NULL); if (effectCGImage == NULL) { effectCGImage = vImageCreateCGImageFromBuffer(input, &format, NULL, NULL, kvImageNoFlags, NULL); free(input->data); } free(output->data); outputImage = [self _yy_mergeImageRef:effectCGImage tintColor:tintColor tintBlendMode:tintBlendMode maskImage:maskImage opaque:opaque]; CGImageRelease(effectCGImage); } else { CGImageRef effectCGImage; UIImage *effectImage; if (input != &effect) effectImage = UIGraphicsGetImageFromCurrentImageContext(); UIGraphicsEndImageContext(); if (input == &effect) effectImage = UIGraphicsGetImageFromCurrentImageContext(); UIGraphicsEndImageContext(); effectCGImage = effectImage.CGImage; outputImage = [self _yy_mergeImageRef:effectCGImage tintColor:tintColor tintBlendMode:tintBlendMode maskImage:maskImage opaque:opaque]; } return outputImage; } // Helper function to handle deferred cleanup of a buffer. static void _yy_cleanupBuffer(void *userData, void *buf_data) { free(buf_data); } // Helper function to add tint and mask. - (UIImage *)_yy_mergeImageRef:(CGImageRef)effectCGImage tintColor:(UIColor *)tintColor tintBlendMode:(CGBlendMode)tintBlendMode maskImage:(UIImage *)maskImage opaque:(BOOL)opaque { BOOL hasTint = tintColor != nil && CGColorGetAlpha(tintColor.CGColor) > __FLT_EPSILON__; BOOL hasMask = maskImage != nil; CGSize size = self.size; CGRect rect = { CGPointZero, size }; CGFloat scale = self.scale; if (!hasTint && !hasMask) { return [UIImage imageWithCGImage:effectCGImage]; } UIGraphicsBeginImageContextWithOptions(size, opaque, scale); CGContextRef context = UIGraphicsGetCurrentContext(); CGContextScaleCTM(context, 1.0, -1.0); CGContextTranslateCTM(context, 0, -size.height); if (hasMask) { CGContextDrawImage(context, rect, self.CGImage); CGContextSaveGState(context); CGContextClipToMask(context, rect, maskImage.CGImage); } CGContextDrawImage(context, rect, effectCGImage); if (hasTint) { CGContextSaveGState(context); CGContextSetBlendMode(context, tintBlendMode); CGContextSetFillColorWithColor(context, tintColor.CGColor); CGContextFillRect(context, rect); CGContextRestoreGState(context); } if (hasMask) { CGContextRestoreGState(context); } UIImage *outputImage = UIGraphicsGetImageFromCurrentImageContext(); UIGraphicsEndImageContext(); return outputImage; } - (UIImage *)fixOrientation { // No-op if the orientation is already correct. if (self.imageOrientation == UIImageOrientationUp) { return self; } // We need to calculate the proper transformation to make the image upright. // We do it in 2 steps: Rotate if Left/Right/Down, and then flip if Mirrored. CGAffineTransform transform = CGAffineTransformIdentity; switch (self.imageOrientation) { case UIImageOrientationDown: case UIImageOrientationDownMirrored: transform = CGAffineTransformTranslate(transform, self.size.width, self.size.height); transform = CGAffineTransformRotate(transform, M_PI); break; case UIImageOrientationLeft: case UIImageOrientationLeftMirrored: transform = CGAffineTransformTranslate(transform, self.size.width, 0); transform = CGAffineTransformRotate(transform, M_PI_2); break; case UIImageOrientationRight: case UIImageOrientationRightMirrored: transform = CGAffineTransformTranslate(transform, 0, self.size.height); transform = CGAffineTransformRotate(transform, -M_PI_2); break; case UIImageOrientationUp: case UIImageOrientationUpMirrored: break; } switch (self.imageOrientation) { case UIImageOrientationUpMirrored: case UIImageOrientationDownMirrored: transform = CGAffineTransformTranslate(transform, self.size.width, 0); transform = CGAffineTransformScale(transform, -1, 1); break; case UIImageOrientationLeftMirrored: case UIImageOrientationRightMirrored: transform = CGAffineTransformTranslate(transform, self.size.height, 0); transform = CGAffineTransformScale(transform, -1, 1); break; case UIImageOrientationUp: case UIImageOrientationDown: case UIImageOrientationLeft: case UIImageOrientationRight: break; } // Now we draw the underlying CGImage into a new context, applying the transform // calculated above. CGContextRef ctx = CGBitmapContextCreate(NULL, self.size.width, self.size.height, CGImageGetBitsPerComponent(self.CGImage), 0, CGImageGetColorSpace(self.CGImage), CGImageGetBitmapInfo(self.CGImage)); CGContextConcatCTM(ctx, transform); switch (self.imageOrientation) { case UIImageOrientationLeft: case UIImageOrientationLeftMirrored: case UIImageOrientationRight: case UIImageOrientationRightMirrored: CGContextDrawImage(ctx, CGRectMake(0, 0, self.size.height, self.size.width), self.CGImage); break; default: CGContextDrawImage(ctx, CGRectMake(0, 0, self.size.width, self.size.height), self.CGImage); break; } // And now we just create a new UIImage from the drawing context. CGImageRef cgimg = CGBitmapContextCreateImage(ctx); UIImage *img = [UIImage imageWithCGImage:cgimg]; CGContextRelease(ctx); CGImageRelease(cgimg); return img; } + (UIImage *)originImageWithName: (NSString *)name { return [[UIImage imageNamed:name] imageWithRenderingMode:UIImageRenderingModeAlwaysOriginal]; } - (UIImage *)circleImage { CGSize size = self.size; CGFloat drawWH = size.width < size.height ? size.width : size.height; // 1. 开启图形上下文 UIGraphicsBeginImageContext(CGSizeMake(drawWH, drawWH)); // 2. 绘制一个圆形区域, 进行裁剪 CGContextRef context = UIGraphicsGetCurrentContext(); CGRect clipRect = CGRectMake(0, 0, drawWH, drawWH); CGContextAddEllipseInRect(context, clipRect); CGContextClip(context); // 3. 绘制大图片 CGRect drawRect = CGRectMake(0, 0, size.width, size.height); [self drawInRect:drawRect]; // 4. 取出结果图片 UIImage *resultImage = UIGraphicsGetImageFromCurrentImageContext(); // 5. 关闭上下文 UIGraphicsEndImageContext(); return resultImage; } @end