getFinalImageData(:UIImage) takes a UIImage and downscales its size dimensions (to 400 points, in this example) and returns it as Data that has been compressed to be within the byte limit (using the two UIImage extensions). This function works great but I would love to get some other eyes on it.
typealias Bytes = Int64
extension Bytes {
static let KB300: Int64 = 300_000
static let MB1: Int64 = 1_048_576
static let MB2: Int64 = 2_097_152
static let MB80: Int64 = 83_886_080
static let MB100: Int64 = 104_857_600
static let MB120: Int64 = 125_829_120
}
extension UIImage {
func compressedToJPEGData(maxBytes limit: Int64) -> Data? {
/// These are compression multipliers (1 = least compression,
/// 0 = most compression) to define the granularity of
/// compression reduction.
let multipliers: [CGFloat] = [1, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0]
for m in multipliers {
if let data = jpegData(compressionQuality: m),
data.count <= limit {
return data
}
}
return nil
}
func downscaled(maxPoints limit: CGFloat) -> UIImage {
let width = size.width
let height = size.height
let maxLength = max(width, height)
guard maxLength > limit else {
return self
}
let downscaleDivisor = maxLength / limit
let downscaledDimensions = CGSize(width: width / downscaleDivisor,
height: height / downscaleDivisor)
return UIGraphicsImageRenderer(size: downscaledDimensions).image { (_) in
draw(in: CGRect(origin: .zero, size: downscaledDimensions))
}
}
}
func getFinalImageData(from image: UIImage) -> Data? {
let downscaled = image.downscaled(maxPoints: 400)
if let compressed = downscaled.compressedToJPEGData(maxBytes: Bytes.KB300) {
return compressed
}
return nil
}