@@ -227,7 +227,7 @@ enum CambiTVIBisectFlag {
227
227
CAMBI_TVI_BISECT_TOO_BIG
228
228
};
229
229
230
- static FORCE_INLINE inline int clip (int value , int low , int high ) {
230
+ static FORCE_INLINE int clip (int value , int low , int high ) {
231
231
return value < low ? low : (value > high ? high : value );
232
232
}
233
233
@@ -282,7 +282,7 @@ static int get_tvi_for_diff(int diff, double tvi_threshold, int bitdepth, VmafLu
282
282
}
283
283
}
284
284
285
- static FORCE_INLINE inline void adjust_window_size (uint16_t * window_size ,
285
+ static FORCE_INLINE void adjust_window_size (uint16_t * window_size ,
286
286
unsigned input_width ,
287
287
unsigned input_height )
288
288
{
@@ -725,7 +725,7 @@ static void filter_mode(const VmafPicture *image, int width, int height, uint16_
725
725
}
726
726
}
727
727
728
- static FORCE_INLINE inline uint16_t ceil_log2 (uint32_t num ) {
728
+ static FORCE_INLINE uint16_t ceil_log2 (uint32_t num ) {
729
729
if (num == 0 )
730
730
return 0 ;
731
731
@@ -738,7 +738,7 @@ static FORCE_INLINE inline uint16_t ceil_log2(uint32_t num) {
738
738
return shift ;
739
739
}
740
740
741
- static FORCE_INLINE inline uint16_t get_mask_index (unsigned input_width , unsigned input_height ,
741
+ static FORCE_INLINE uint16_t get_mask_index (unsigned input_width , unsigned input_height ,
742
742
uint16_t filter_size ) {
743
743
uint32_t shifted_wh = (input_width >> 6 ) * (input_height >> 6 );
744
744
return (filter_size * filter_size + 3 * (ceil_log2 (shifted_wh ) - 11 ) - 1 )>>1 ;
@@ -853,7 +853,7 @@ static float c_value_pixel(const uint16_t *histograms, uint16_t value, const int
853
853
return c_value ;
854
854
}
855
855
856
- static FORCE_INLINE inline void update_histogram_subtract_edge (uint16_t * histograms , uint16_t * image , uint16_t * mask ,
856
+ static FORCE_INLINE void update_histogram_subtract_edge (uint16_t * histograms , uint16_t * image , uint16_t * mask ,
857
857
int i , int j , int width , ptrdiff_t stride , uint16_t pad_size ,
858
858
const uint16_t num_diffs , VmafRangeUpdater dec_range_callback ) {
859
859
uint16_t mask_val = mask [(i - pad_size - 1 ) * stride + j ];
@@ -863,7 +863,7 @@ static FORCE_INLINE inline void update_histogram_subtract_edge(uint16_t *histogr
863
863
}
864
864
}
865
865
866
- static FORCE_INLINE inline void update_histogram_subtract (uint16_t * histograms , uint16_t * image , uint16_t * mask ,
866
+ static FORCE_INLINE void update_histogram_subtract (uint16_t * histograms , uint16_t * image , uint16_t * mask ,
867
867
int i , int j , int width , ptrdiff_t stride , uint16_t pad_size ,
868
868
const uint16_t num_diffs , VmafRangeUpdater dec_range_callback ) {
869
869
uint16_t mask_val = mask [(i - pad_size - 1 ) * stride + j ];
@@ -873,7 +873,7 @@ static FORCE_INLINE inline void update_histogram_subtract(uint16_t *histograms,
873
873
}
874
874
}
875
875
876
- static FORCE_INLINE inline void update_histogram_add_edge (uint16_t * histograms , uint16_t * image , uint16_t * mask ,
876
+ static FORCE_INLINE void update_histogram_add_edge (uint16_t * histograms , uint16_t * image , uint16_t * mask ,
877
877
int i , int j , int width , ptrdiff_t stride , uint16_t pad_size ,
878
878
const uint16_t num_diffs , VmafRangeUpdater inc_range_callback ) {
879
879
uint16_t mask_val = mask [(i + pad_size ) * stride + j ];
@@ -883,7 +883,7 @@ static FORCE_INLINE inline void update_histogram_add_edge(uint16_t *histograms,
883
883
}
884
884
}
885
885
886
- static FORCE_INLINE inline void update_histogram_add (uint16_t * histograms , uint16_t * image , uint16_t * mask ,
886
+ static FORCE_INLINE void update_histogram_add (uint16_t * histograms , uint16_t * image , uint16_t * mask ,
887
887
int i , int j , int width , ptrdiff_t stride , uint16_t pad_size ,
888
888
const uint16_t num_diffs , VmafRangeUpdater inc_range_callback ) {
889
889
uint16_t mask_val = mask [(i + pad_size ) * stride + j ];
@@ -893,7 +893,7 @@ static FORCE_INLINE inline void update_histogram_add(uint16_t *histograms, uint1
893
893
}
894
894
}
895
895
896
- static FORCE_INLINE inline void update_histogram_add_edge_first_pass (uint16_t * histograms , uint16_t * image , uint16_t * mask ,
896
+ static FORCE_INLINE void update_histogram_add_edge_first_pass (uint16_t * histograms , uint16_t * image , uint16_t * mask ,
897
897
int i , int j , int width , ptrdiff_t stride , uint16_t pad_size ,
898
898
const uint16_t num_diffs , VmafRangeUpdater inc_range_callback ) {
899
899
uint16_t mask_val = mask [i * stride + j ];
@@ -903,7 +903,7 @@ static FORCE_INLINE inline void update_histogram_add_edge_first_pass(uint16_t *h
903
903
}
904
904
}
905
905
906
- static FORCE_INLINE inline void update_histogram_add_first_pass (uint16_t * histograms , uint16_t * image , uint16_t * mask ,
906
+ static FORCE_INLINE void update_histogram_add_first_pass (uint16_t * histograms , uint16_t * image , uint16_t * mask ,
907
907
int i , int j , int width , ptrdiff_t stride , uint16_t pad_size ,
908
908
const uint16_t num_diffs , VmafRangeUpdater inc_range_callback ) {
909
909
uint16_t mask_val = mask [i * stride + j ];
@@ -913,7 +913,7 @@ static FORCE_INLINE inline void update_histogram_add_first_pass(uint16_t *histog
913
913
}
914
914
}
915
915
916
- static FORCE_INLINE inline void calculate_c_values_row (float * c_values , uint16_t * histograms , uint16_t * image ,
916
+ static FORCE_INLINE void calculate_c_values_row (float * c_values , uint16_t * histograms , uint16_t * image ,
917
917
uint16_t * mask , int row , int width , ptrdiff_t stride ,
918
918
const uint16_t num_diffs , const uint16_t * tvi_for_diff ,
919
919
const int * diff_weights , const int * all_diffs ) {
@@ -1050,13 +1050,13 @@ static double spatial_pooling(float *c_values, double topk, unsigned width, unsi
1050
1050
return average_topk_elements (c_values , topk_num_elements );
1051
1051
}
1052
1052
1053
- static FORCE_INLINE inline uint16_t get_pixels_in_window (uint16_t window_length ) {
1053
+ static FORCE_INLINE uint16_t get_pixels_in_window (uint16_t window_length ) {
1054
1054
uint16_t odd_length = 2 * (window_length >> 1 ) + 1 ;
1055
1055
return odd_length * odd_length ;
1056
1056
}
1057
1057
1058
1058
// Inner product weighting scores for each scale
1059
- static FORCE_INLINE inline double weight_scores_per_scale (double * scores_per_scale , uint16_t normalization ) {
1059
+ static FORCE_INLINE double weight_scores_per_scale (double * scores_per_scale , uint16_t normalization ) {
1060
1060
double score = 0.0 ;
1061
1061
for (unsigned scale = 0 ; scale < NUM_SCALES ; scale ++ )
1062
1062
score += (scores_per_scale [scale ] * g_scale_weights [scale ]);
0 commit comments