| OLD | NEW |
| (Empty) |
| 1 /* | |
| 2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved. | |
| 3 * | |
| 4 * Use of this source code is governed by a BSD-style license | |
| 5 * that can be found in the LICENSE file in the root of the source | |
| 6 * tree. An additional intellectual property rights grant can be found | |
| 7 * in the file PATENTS. All contributing project authors may | |
| 8 * be found in the AUTHORS file in the root of the source tree. | |
| 9 */ | |
| 10 | |
| 11 | |
| 12 #include "onyx_int.h" | |
| 13 | |
| 14 void vp8_ssim_parms_16x16_c | |
| 15 ( | |
| 16 unsigned char *s, | |
| 17 int sp, | |
| 18 unsigned char *r, | |
| 19 int rp, | |
| 20 unsigned long *sum_s, | |
| 21 unsigned long *sum_r, | |
| 22 unsigned long *sum_sq_s, | |
| 23 unsigned long *sum_sq_r, | |
| 24 unsigned long *sum_sxr | |
| 25 ) | |
| 26 { | |
| 27 int i,j; | |
| 28 for(i=0;i<16;i++,s+=sp,r+=rp) | |
| 29 { | |
| 30 for(j=0;j<16;j++) | |
| 31 { | |
| 32 *sum_s += s[j]; | |
| 33 *sum_r += r[j]; | |
| 34 *sum_sq_s += s[j] * s[j]; | |
| 35 *sum_sq_r += r[j] * r[j]; | |
| 36 *sum_sxr += s[j] * r[j]; | |
| 37 } | |
| 38 } | |
| 39 } | |
| 40 void vp8_ssim_parms_8x8_c | |
| 41 ( | |
| 42 unsigned char *s, | |
| 43 int sp, | |
| 44 unsigned char *r, | |
| 45 int rp, | |
| 46 unsigned long *sum_s, | |
| 47 unsigned long *sum_r, | |
| 48 unsigned long *sum_sq_s, | |
| 49 unsigned long *sum_sq_r, | |
| 50 unsigned long *sum_sxr | |
| 51 ) | |
| 52 { | |
| 53 int i,j; | |
| 54 for(i=0;i<8;i++,s+=sp,r+=rp) | |
| 55 { | |
| 56 for(j=0;j<8;j++) | |
| 57 { | |
| 58 *sum_s += s[j]; | |
| 59 *sum_r += r[j]; | |
| 60 *sum_sq_s += s[j] * s[j]; | |
| 61 *sum_sq_r += r[j] * r[j]; | |
| 62 *sum_sxr += s[j] * r[j]; | |
| 63 } | |
| 64 } | |
| 65 } | |
| 66 | |
| 67 const static int64_t cc1 = 26634; // (64^2*(.01*255)^2 | |
| 68 const static int64_t cc2 = 239708; // (64^2*(.03*255)^2 | |
| 69 | |
| 70 static double similarity | |
| 71 ( | |
| 72 unsigned long sum_s, | |
| 73 unsigned long sum_r, | |
| 74 unsigned long sum_sq_s, | |
| 75 unsigned long sum_sq_r, | |
| 76 unsigned long sum_sxr, | |
| 77 int count | |
| 78 ) | |
| 79 { | |
| 80 int64_t ssim_n, ssim_d; | |
| 81 int64_t c1, c2; | |
| 82 | |
| 83 //scale the constants by number of pixels | |
| 84 c1 = (cc1*count*count)>>12; | |
| 85 c2 = (cc2*count*count)>>12; | |
| 86 | |
| 87 ssim_n = (2*sum_s*sum_r+ c1)*((int64_t) 2*count*sum_sxr- | |
| 88 (int64_t) 2*sum_s*sum_r+c2); | |
| 89 | |
| 90 ssim_d = (sum_s*sum_s +sum_r*sum_r+c1)* | |
| 91 ((int64_t)count*sum_sq_s-(int64_t)sum_s*sum_s + | |
| 92 (int64_t)count*sum_sq_r-(int64_t) sum_r*sum_r +c2) ; | |
| 93 | |
| 94 return ssim_n * 1.0 / ssim_d; | |
| 95 } | |
| 96 | |
| 97 static double ssim_16x16(unsigned char *s,int sp, unsigned char *r,int rp) | |
| 98 { | |
| 99 unsigned long sum_s=0,sum_r=0,sum_sq_s=0,sum_sq_r=0,sum_sxr=0; | |
| 100 vp8_ssim_parms_16x16(s, sp, r, rp, &sum_s, &sum_r, &sum_sq_s, &sum_sq_r, &su
m_sxr); | |
| 101 return similarity(sum_s, sum_r, sum_sq_s, sum_sq_r, sum_sxr, 256); | |
| 102 } | |
| 103 static double ssim_8x8(unsigned char *s,int sp, unsigned char *r,int rp) | |
| 104 { | |
| 105 unsigned long sum_s=0,sum_r=0,sum_sq_s=0,sum_sq_r=0,sum_sxr=0; | |
| 106 vp8_ssim_parms_8x8(s, sp, r, rp, &sum_s, &sum_r, &sum_sq_s, &sum_sq_r, &sum_
sxr); | |
| 107 return similarity(sum_s, sum_r, sum_sq_s, sum_sq_r, sum_sxr, 64); | |
| 108 } | |
| 109 | |
| 110 // TODO: (jbb) tried to scale this function such that we may be able to use it | |
| 111 // for distortion metric in mode selection code ( provided we do a reconstructio
n) | |
| 112 long dssim(unsigned char *s,int sp, unsigned char *r,int rp) | |
| 113 { | |
| 114 unsigned long sum_s=0,sum_r=0,sum_sq_s=0,sum_sq_r=0,sum_sxr=0; | |
| 115 int64_t ssim3; | |
| 116 int64_t ssim_n1,ssim_n2; | |
| 117 int64_t ssim_d1,ssim_d2; | |
| 118 int64_t ssim_t1,ssim_t2; | |
| 119 int64_t c1, c2; | |
| 120 | |
| 121 // normalize by 256/64 | |
| 122 c1 = cc1*16; | |
| 123 c2 = cc2*16; | |
| 124 | |
| 125 vp8_ssim_parms_16x16(s, sp, r, rp, &sum_s, &sum_r, &sum_sq_s, &sum_sq_r, &su
m_sxr); | |
| 126 ssim_n1 = (2*sum_s*sum_r+ c1); | |
| 127 | |
| 128 ssim_n2 =((int64_t) 2*256*sum_sxr-(int64_t) 2*sum_s*sum_r+c2); | |
| 129 | |
| 130 ssim_d1 =((int64_t)sum_s*sum_s +(int64_t)sum_r*sum_r+c1); | |
| 131 | |
| 132 ssim_d2 = (256 * (int64_t) sum_sq_s-(int64_t) sum_s*sum_s + | |
| 133 (int64_t) 256*sum_sq_r-(int64_t) sum_r*sum_r +c2) ; | |
| 134 | |
| 135 ssim_t1 = 256 - 256 * ssim_n1 / ssim_d1; | |
| 136 ssim_t2 = 256 - 256 * ssim_n2 / ssim_d2; | |
| 137 | |
| 138 ssim3 = 256 *ssim_t1 * ssim_t2; | |
| 139 if(ssim3 <0 ) | |
| 140 ssim3=0; | |
| 141 return (long)( ssim3 ); | |
| 142 } | |
| 143 | |
| 144 // We are using a 8x8 moving window with starting location of each 8x8 window | |
| 145 // on the 4x4 pixel grid. Such arrangement allows the windows to overlap | |
| 146 // block boundaries to penalize blocking artifacts. | |
| 147 double vp8_ssim2 | |
| 148 ( | |
| 149 unsigned char *img1, | |
| 150 unsigned char *img2, | |
| 151 int stride_img1, | |
| 152 int stride_img2, | |
| 153 int width, | |
| 154 int height | |
| 155 ) | |
| 156 { | |
| 157 int i,j; | |
| 158 int samples =0; | |
| 159 double ssim_total=0; | |
| 160 | |
| 161 // sample point start with each 4x4 location | |
| 162 for(i=0; i < height-8; i+=4, img1 += stride_img1*4, img2 += stride_img2*4) | |
| 163 { | |
| 164 for(j=0; j < width-8; j+=4 ) | |
| 165 { | |
| 166 double v = ssim_8x8(img1+j, stride_img1, img2+j, stride_img2); | |
| 167 ssim_total += v; | |
| 168 samples++; | |
| 169 } | |
| 170 } | |
| 171 ssim_total /= samples; | |
| 172 return ssim_total; | |
| 173 } | |
| 174 double vp8_calc_ssim | |
| 175 ( | |
| 176 YV12_BUFFER_CONFIG *source, | |
| 177 YV12_BUFFER_CONFIG *dest, | |
| 178 int lumamask, | |
| 179 double *weight | |
| 180 ) | |
| 181 { | |
| 182 double a, b, c; | |
| 183 double ssimv; | |
| 184 | |
| 185 a = vp8_ssim2(source->y_buffer, dest->y_buffer, | |
| 186 source->y_stride, dest->y_stride, source->y_width, | |
| 187 source->y_height); | |
| 188 | |
| 189 b = vp8_ssim2(source->u_buffer, dest->u_buffer, | |
| 190 source->uv_stride, dest->uv_stride, source->uv_width, | |
| 191 source->uv_height); | |
| 192 | |
| 193 c = vp8_ssim2(source->v_buffer, dest->v_buffer, | |
| 194 source->uv_stride, dest->uv_stride, source->uv_width, | |
| 195 source->uv_height); | |
| 196 | |
| 197 ssimv = a * .8 + .1 * (b + c); | |
| 198 | |
| 199 *weight = 1; | |
| 200 | |
| 201 return ssimv; | |
| 202 } | |
| 203 | |
| 204 double vp8_calc_ssimg | |
| 205 ( | |
| 206 YV12_BUFFER_CONFIG *source, | |
| 207 YV12_BUFFER_CONFIG *dest, | |
| 208 double *ssim_y, | |
| 209 double *ssim_u, | |
| 210 double *ssim_v | |
| 211 ) | |
| 212 { | |
| 213 double ssim_all = 0; | |
| 214 double a, b, c; | |
| 215 | |
| 216 a = vp8_ssim2(source->y_buffer, dest->y_buffer, | |
| 217 source->y_stride, dest->y_stride, source->y_width, | |
| 218 source->y_height); | |
| 219 | |
| 220 b = vp8_ssim2(source->u_buffer, dest->u_buffer, | |
| 221 source->uv_stride, dest->uv_stride, source->uv_width, | |
| 222 source->uv_height); | |
| 223 | |
| 224 c = vp8_ssim2(source->v_buffer, dest->v_buffer, | |
| 225 source->uv_stride, dest->uv_stride, source->uv_width, | |
| 226 source->uv_height); | |
| 227 *ssim_y = a; | |
| 228 *ssim_u = b; | |
| 229 *ssim_v = c; | |
| 230 ssim_all = (a * 4 + b + c) /6; | |
| 231 | |
| 232 return ssim_all; | |
| 233 } | |
| OLD | NEW |