5
\$\begingroup\$

I want to begin by sincerely thanking the community for the invaluable feedback on my previous BRESort byte-sorting research. Your insights about enums, named constants, loop optimizations, and test harness design directly influenced the development of this more robust universal version.

BRESort(Bitwise Relationship Extraction) uses runtime pattern detection including bit entropy analysis, XOR pattern recognition, and component-wise entropy analysis to automatically select optimal sorting algorithms for 32/64-bit integers and floating-point data.

Runtime pattern detection analyzes data characteristics like:

Sequential patterns (insertion sort for nearly-sorted data)

Byte structure (radix sort for small-range or structured bytes)

Random distribution (quicksort for truly random data)

Real-world distributions (GPS clustering, price power-laws, timestamp sequences)

The system automatically selects between insertion sort, quicksort, and radix sort based on these patterns

Initial testing shows significant speedups on real-world patterns while maintaining performance safety.

I would appreciate your review and reality check on this approach.

bresort_research.h

/**
 * BRESort Research - Bitwise Relationship Extraction Sort
 * Intelligent Adaptive Sorting Engine for 32/64-bit & Floating-Point Data
 *
 * Advanced data pattern detection using bit entropy analysis,
 * XOR pattern recognition, and component-wise entropy analysis.
 * Universal intelligent algorithm selection across major data types.
 *
 * Research demonstrates significant speedup over traditional qsort
 * through data-aware algorithm selection.
 *
 * Author: Vladimir Kalinin
 * License: MIT
 */

#ifndef BRESORT_RESEARCH_H
#define BRESORT_RESEARCH_H

#include <stdint.h>
#include <stdbool.h>

#ifdef __cplusplus
extern "C" {
#endif

// ============================================================================
// UTILITY FUNCTIONS
// ============================================================================

// Timing and calculation utilities
double get_time_ms(void);
double get_time_us(void);
double calculate_safe_speedup(double bresort_time, double qsort_time);
void print_timing_result(const char* label, double bresort_time, double qsort_time, int use_microseconds);

// ============================================================================
// STRATEGY ENUMERATION
// ============================================================================

typedef enum {
    STRATEGY_RADIX_SORT,
    STRATEGY_QUICKSORT,
    STRATEGY_MERGE_SORT,
    STRATEGY_INSERTION_SORT,
    STRATEGY_HYBRID_RADIX,
    STRATEGY_BITMASK_SORT,
    STRATEGY_FLOAT_OPTIMIZED
} ResearchSortStrategy;


// ANALYSIS STRUCTURES
 

typedef struct {
    uint32_t min_val;
    uint32_t max_val;
    uint32_t range;
    float sorted_ratio;
    float reverse_sorted_ratio;
    int byte_patterns[4]; // 0=random, 1=fixed, 2=sequential
} QuickAnalysis;

typedef struct {
    uint64_t min_val;
    uint64_t max_val;
    uint64_t range;
    float sorted_ratio;
    int byte_patterns[8]; // 0=random, 1=fixed, 2=sequential
} QuickAnalysis64;

typedef struct {
    float min_val;
    float max_val;
    float value_range;
    float sorted_ratio;
    int fixed_sign; // 0=mixed, 1=all positive, -1=all negative
    int structured_exponent;
} FloatAnalysis32;


// CORE RESEARCH INTERFACE


bool has_expensive_unsortedness(uint32_t arr[], size_t n);

// 32-bit integer sorting
void BRESort_research_32bit(uint32_t arr[], size_t n);
QuickAnalysis quick_analyze_32bit(const uint32_t arr[], int n);
ResearchSortStrategy choose_optimized_strategy(const QuickAnalysis* analysis, int n);

// 64-bit integer sorting
void BRESort_research_64bit(uint64_t arr[], size_t n);
QuickAnalysis64 quick_analyze_64bit(const uint64_t arr[], int n);
ResearchSortStrategy choose_64bit_strategy(const QuickAnalysis64* analysis, int n);

// 32-bit float sorting
void BRESort_research_float32(float arr[], size_t n);
FloatAnalysis32 quick_analyze_float32(const float arr[], int n);
ResearchSortStrategy choose_float32_strategy(const FloatAnalysis32* analysis, int n);


// OPTIMIZED ALGORITHM IMPLEMENTATIONS


// Core algorithms
void insertion_sort_32bit(uint32_t arr[], size_t n);
void insertion_sort_64bit(uint64_t arr[], size_t n);
void insertion_sort_float32(float arr[], size_t n);

void quick_sort_optimized_32bit(uint32_t arr[], size_t n);
void quick_sort_optimized_64bit(uint64_t arr[], size_t n);
void quick_sort_optimized_float32(float arr[], size_t n);

// Radix variants
void radix_sort_32bit(uint32_t arr[], size_t n);
void radix_sort_64bit(uint64_t arr[], int n);

// Hybrid algorithms
void hybrid_radix_32bit(uint32_t arr[], int n, const QuickAnalysis* analysis);


// TESTING & VALIDATION FRAMEWORK


// Data generators
void generate_sequential_32bit(uint32_t arr[], int n);
void generate_random_32bit(uint32_t arr[], int n);
void generate_timestamp_32bit(uint32_t arr[], int n);
void generate_small_range_32bit(uint32_t arr[], int n);

void generate_sequential_64bit(uint64_t arr[], int n);
void generate_random_64bit(uint64_t arr[], int n);
void generate_timestamp_64bit(uint64_t arr[], int n);

void generate_uniform_floats(float arr[], int n);
void generate_clustered_floats(float arr[], int n);
void generate_sorted_floats(float arr[], int n);
void generate_small_range_floats(float arr[], int n);

// Performance testing
void test_performance_32bit(void);
void test_performance_all_types(void);

// Analysis validation
void test_analysis_validation(void);

#ifdef __cplusplus
}
#endif

#endif /* BRESORT_RESEARCH_H */

bresort_research.c

#include "bresort_research.h"
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <stdio.h>
#include <time.h>


// CONFIGURATION & THRESHOLDS (OPTIMIZED)


#define RADIX_SORT_THRESHOLD         1000
#define INSERTION_SORT_THRESHOLD     32
#define QUICKSORT_THRESHOLD          50
#define SAMPLE_SIZE                  500              // For large arrays

// 32-bit thresholds
#define MAX_RANGE_FOR_RADIX          (1 << 20)        // 16 million values
#define HUGE_RANGE_THRESHOLD_32BIT   (1 << 28)        // 268 million values

// 64-bit thresholds
#define RADIX_RANGE_THRESHOLD_64BIT  (1ULL << 40)     // 1 trillion values
#define HUGE_RANGE_THRESHOLD_64BIT   (1ULL << 50)     // 1 quadrillion values

// Analysis thresholds
#define LOW_SORTED_RATIO_THRESHOLD   0.6f
#define STRUCTURED_BYTES_THRESHOLD   3
#define HIGHLY_STRUCTURED_BYTES_64BIT 4
#define FLOAT_SMALL_RANGE_THRESHOLD  100.0f


// UTILITY FUNCTIONS

void float_radix_sort(float arr[], size_t n);

static int compare_uint32(const void* a, const void* b) {
    return (*(uint32_t*)a > *(uint32_t*)b) - (*(uint32_t*)a < *(uint32_t*)b);
}

static void swap_uint32(uint32_t* a, uint32_t* b) {
    uint32_t temp = *a;
    *a = *b;
    *b = temp;
}

bool has_expensive_unsortedness(uint32_t arr[], size_t n) {
     if (n <= 1) return false;

    int large_jumps = 0;
    int sample_size = 1000;  // Sample 1000 points throughout the array

    // Sample throughout the entire array, not just the beginning
    for (int sample = 0; sample < sample_size; sample++) {
        // Distribute samples evenly throughout the array
        size_t i = (sample * n) / sample_size;
        if (i == 0) continue;  // Skip first element (no previous element)

        uint32_t diff = (arr[i] > arr[i-1]) ? (arr[i] - arr[i-1]) : (arr[i-1] - arr[i]);

        if (diff > 500000000) {
            large_jumps++;
            if (large_jumps >= 2) {
                return true;  // Early exit: found multiple blocks
            }
        }
    }

    // Check for out-of-order elements
    int out_of_order = 0;
    for (int sample = 0; sample < sample_size; sample++) {
        size_t i = (sample * n) / sample_size;
        if (i == 0) continue;

        if (arr[i] < arr[i-1]) {
            out_of_order++;
            if (out_of_order > sample_size * 0.1) {  // More than 10% out of order
                return true;
            }
        }
    }

    return (large_jumps >= 2);
}

void print_timing_result(const char* label, double bresort_time, double qsort_time, int use_microseconds) {
    double speedup = calculate_safe_speedup(bresort_time, qsort_time);

    if (use_microseconds) {

        if (bresort_time < 1000 || qsort_time < 1000) {
            printf("   |-- %s: BRESort: %5.0f us, qsort: %5.0f us, Speedup: %5.2fx\n",
                   label, bresort_time, qsort_time, speedup);
        } else {
            printf("   |-- %s: BRESort: %5.2f ms, qsort: %5.2f ms, Speedup: %5.2fx\n",
                   label, bresort_time / 1000.0, qsort_time / 1000.0, speedup);
        }
    } else {

        if (bresort_time < 0.1) {
            printf("   |-- %s: BRESort: %5.0f us, qsort: %5.0f us, Speedup: %5.2fx\n",
                   label, bresort_time * 1000.0, qsort_time * 1000.0, speedup);
        } else {
            printf("   |-- %s: BRESort: %5.2f ms, qsort: %5.2f ms, Speedup: %5.2fx\n",
                   label, bresort_time, qsort_time, speedup);
        }
    }
}

 
// DATA GENERATORS


void generate_sequential_32bit(uint32_t arr[], int n) {
    for (int i = 0; i < n; i++) arr[i] = 0x12345000 + i;
}

void generate_random_32bit(uint32_t arr[], int n) {
    for (int i = 0; i < n; i++) arr[i] = rand() ^ (rand() << 16);
}

void generate_timestamp_32bit(uint32_t arr[], int n) {
    uint32_t base = 0x5F3A0000;
    for (int i = 0; i < n; i++) arr[i] = base + (i % 1000);
}

void generate_small_range_32bit(uint32_t arr[], int n) {
    uint32_t base = rand() % (1 << 20);
    for (int i = 0; i < n; i++) arr[i] = base + (rand() % 1000);
}


// FAST ANALYSIS ENGINE (OPTIMIZED)


/**
 * Ultra-fast analysis with minimal overhead
 * Uses sampling for large arrays
 */
QuickAnalysis quick_analyze_32bit(const uint32_t arr[], int n) {
    QuickAnalysis analysis = {0xFFFFFFFF, 0, 0, 0.0f, 0.0f, {0}};

    if (n <= 1) {
        if (n == 1) analysis.min_val = analysis.max_val = arr[0];
        return analysis;
    }

    int effective_n = n > SAMPLE_SIZE ? SAMPLE_SIZE : n;

    int sorted_count = 0;
    int reverse_sorted_count = 0;
    uint32_t prev = arr[0];
    analysis.min_val = arr[0];
    analysis.max_val = arr[0];

    // Single pass for min/max and sortedness
    for (int i = 1; i < effective_n; i++) {
        uint32_t current = arr[i];

        if (current < analysis.min_val) analysis.min_val = current;
        if (current > analysis.max_val) analysis.max_val = current;
        if (current >= prev) sorted_count++;
        if (current <= prev) reverse_sorted_count++;

        prev = current;
    }

    analysis.range = analysis.max_val - analysis.min_val;
    analysis.sorted_ratio = (float)sorted_count / (effective_n - 1);
    analysis.reverse_sorted_ratio = (float)reverse_sorted_count / (effective_n - 1);

    // Quick byte pattern detection (much faster than full entropy)
    if (effective_n > 10) {
        uint32_t first = arr[0];
        uint32_t last = arr[effective_n - 1];

        for (int byte = 0; byte < 4; byte++) {
            uint8_t first_byte = (first >> (byte * 8)) & 0xFF;
            uint8_t last_byte = (last >> (byte * 8)) & 0xFF;

            if (first_byte == last_byte) {
                analysis.byte_patterns[byte] = 1; // Fixed byte
            } else if (abs((int)first_byte - (int)last_byte) < effective_n / 10) {
                analysis.byte_patterns[byte] = 2; // Sequential byte
            } else {
                analysis.byte_patterns[byte] = 0; // Random byte
            }
        }
    }

    return analysis;
}


// CORE SORTING ALGORITHMS


/**
 * Insertion sort for small arrays
 */
void insertion_sort_32bit(uint32_t arr[], size_t n) {
    if (n <= 1) return;

    for (size_t i = 1; i < n; i++) {
        uint32_t key = arr[i];
        size_t j = i;

        while (j > 0 && arr[j-1] > key) {
            arr[j] = arr[j-1];
            j--;
        }
        arr[j] = key;
    }
}


// INSERTION SORT


void insertion_sort_32bit_OPTIMIZED(uint32_t arr[], size_t n) {
    if (n <= 1) return;

    for (size_t i = 1; i < n; i++) {
        uint32_t key = arr[i];
        size_t j = i;

        // Move elements that are greater than key one position ahead
        while (j > 0 && arr[j-1] > key) {
            arr[j] = arr[j-1];
            j--;
        }
        arr[j] = key;
    }
}

/**
 * Optimized radix sort - in-place MSD variant for better cache performance
 */
void radix_sort_32bit(uint32_t arr[], size_t n) {
    if (n <= INSERTION_SORT_THRESHOLD) {
        insertion_sort_32bit(arr, n);
        return;
    }

    uint32_t* buffer = (uint32_t*)malloc(n * sizeof(uint32_t));
    if (!buffer) return;

    // Most Significant Digit first (better for structured data)
    for (int byte = 3; byte >= 0; byte--) {
        int count[256] = {0};

        // Count
        for (int i = 0; i < n; i++) {
            uint8_t byte_val = (arr[i] >> (byte * 8)) & 0xFF;
            count[byte_val]++;
        }

        // Prefix sum
        for (int i = 1; i < 256; i++) {
            count[i] += count[i - 1];
        }

        // Sort into buffer
        for (int i = n - 1; i >= 0; i--) {
            uint8_t byte_val = (arr[i] >> (byte * 8)) & 0xFF;
            buffer[--count[byte_val]] = arr[i];
        }

        // Copy back
        memcpy(arr, buffer, n * sizeof(uint32_t));
    }

    free(buffer);
}

/**
 * Hybrid radix - only on high bytes when they're structured
 */
    //didn't pass the test, removed for inefficiency, needs more work

/**
 * Optimized quicksort
 */
static void quick_sort_recursive(uint32_t arr[], int left, int right) {
    if (right - left <= INSERTION_SORT_THRESHOLD) {
        insertion_sort_32bit(arr + left, right - left + 1);
        return;
    }

    // Median-of-three pivot
    int mid = left + (right - left) / 2;
    if (arr[mid] < arr[left]) swap_uint32(&arr[left], &arr[mid]);
    if (arr[right] < arr[left]) swap_uint32(&arr[left], &arr[right]);
    if (arr[right] < arr[mid]) swap_uint32(&arr[mid], &arr[right]);

    uint32_t pivot = arr[mid];
    int i = left, j = right;

    while (i <= j) {
        while (arr[i] < pivot) i++;
        while (arr[j] > pivot) j--;
        if (i <= j) {
            swap_uint32(&arr[i], &arr[j]);
            i++;
            j--;
        }
    }

    if (left < j) quick_sort_recursive(arr, left, j);
    if (i < right) quick_sort_recursive(arr, i, right);
}

void quick_sort_optimized_32bit(uint32_t arr[], size_t n) {
    if (n <= 1) return;
    quick_sort_recursive(arr, 0, n - 1);
}


// STRATEGY DECISION ENGINE


ResearchSortStrategy choose_optimized_strategy(const QuickAnalysis* analysis, int n) {
    // Early exits for obvious cases
    if (analysis->sorted_ratio > 0.95f) {
        return STRATEGY_INSERTION_SORT;
    }

    if (analysis->reverse_sorted_ratio > 0.95f) {
        // For reverse sorted data:
        if (n <= 1000) {
            return STRATEGY_INSERTION_SORT;
        } else {
            return STRATEGY_QUICKSORT;
        }
    }

    // BETTER RANDOM DETECTION: If range is huge and low sorted ratio → QUICKSORT
    if (analysis->range > (HUGE_RANGE_THRESHOLD_32BIT) && analysis->sorted_ratio < (LOW_SORTED_RATIO_THRESHOLD)) {
        return STRATEGY_QUICKSORT;
    }

    if (analysis->range < MAX_RANGE_FOR_RADIX) {
        // Small range - radix sort is excellent
        return STRATEGY_RADIX_SORT;
    }

    // Check byte patterns for structured data - but be more conservative
    int structured_bytes = 0;
    for (int i = 0; i < 4; i++) {
        if (analysis->byte_patterns[i] != 0) structured_bytes++;
    }

    // Only use hybrid if we're VERY confident about structure
    if (structured_bytes >= 3) {
        return STRATEGY_HYBRID_RADIX;
    }

    // Default to quicksort for uncertain cases
    return STRATEGY_QUICKSORT;
}

// ============================================================================
// MAIN INTERFACE
// ============================================================================

void BRESort_research_32bit(uint32_t arr[], size_t n) {
    if (n <= 1) return;

    if (n <= INSERTION_SORT_THRESHOLD) {
        insertion_sort_32bit(arr, n);
        return;
    }

    QuickAnalysis analysis = quick_analyze_32bit(arr, (int)n);


    ResearchSortStrategy strategy = choose_optimized_strategy(&analysis, (int)n);

    // SAFETY CHECK
    if (strategy == STRATEGY_INSERTION_SORT) {
        if (has_expensive_unsortedness(arr, n)) {
            quick_sort_optimized_32bit(arr, n);
            return;
        }
    }

    // Switch on the strategy directly

    switch (strategy) {
        case STRATEGY_INSERTION_SORT:
            insertion_sort_32bit(arr, n);
            break;
        case STRATEGY_QUICKSORT:
            quick_sort_optimized_32bit(arr, n);
            break;
        case STRATEGY_RADIX_SORT:
            radix_sort_32bit(arr, n);
            break;
        case STRATEGY_HYBRID_RADIX:
        case STRATEGY_BITMASK_SORT:
        case STRATEGY_MERGE_SORT:
        case STRATEGY_FLOAT_OPTIMIZED:
            // Fall back to quicksort for other strategies
            quick_sort_optimized_32bit(arr, n);
            break;
    }
}

void BRESort_research_float32(float arr[], size_t n) {
    if (n <= 1) return;

    if (n <= INSERTION_SORT_THRESHOLD) {
        insertion_sort_float32(arr, n);
        return;
    }

    FloatAnalysis32 analysis = quick_analyze_float32(arr, (int)n);
    ResearchSortStrategy strategy = choose_float32_strategy(&analysis, (int)n);

    // Switch on the strategy directly
switch (strategy) {
    case STRATEGY_INSERTION_SORT:
        insertion_sort_float32(arr, n);
        break;
    case STRATEGY_QUICKSORT:
        quick_sort_optimized_float32(arr, n);
        break;
    case STRATEGY_RADIX_SORT:

        // Fall back to quicksort for now
        quick_sort_optimized_float32(arr, n);
        break;
    case STRATEGY_FLOAT_OPTIMIZED:
        float_radix_sort(arr, n);
        break;
    case STRATEGY_HYBRID_RADIX:
    case STRATEGY_BITMASK_SORT:
    case STRATEGY_MERGE_SORT:
    default:
        // Fall back to quicksort for other strategies
        quick_sort_optimized_float32(arr, n);
        break;
}
}



// 64-BIT SUPPORT


// Utility functions for 64-bit


// ENTROPY CALCULATION


float calculate_entropy(int ones, int total) {
    if (ones == 0 || ones == total) return 0.0f;
    float p = (float)ones / total;
    return -p * logf(p) / logf(2.0f) - (1.0f - p) * logf(1.0f - p) / logf(2.0f);
}

static int compare_uint64(const void* a, const void* b) {
    return (*(uint64_t*)a > *(uint64_t*)b) - (*(uint64_t*)a < *(uint64_t*)b);
}

static void swap_uint64(uint64_t* a, uint64_t* b) {
    uint64_t temp = *a;
    *a = *b;
    *b = temp;
}

// 64-bit data generators

void generate_sequential_64bit(uint64_t arr[], int n) {
    for (int i = 0; i < n; i++) arr[i] = 0x1234567800000000ULL + i;
}

void generate_random_64bit(uint64_t arr[], int n) {
    for (int i = 0; i < n; i++) {
        arr[i] = ((uint64_t)rand() << 32) | rand();
    }
}

void generate_timestamp_64bit(uint64_t arr[], int n) {
    uint64_t base = 0x5F3A2B0100000000ULL;
    for (int i = 0; i < n; i++) arr[i] = base + (i * 1000);
}

// 64-bit analysis WITH ENTROPY

QuickAnalysis64 quick_analyze_64bit(const uint64_t arr[], int n) {
    QuickAnalysis64 analysis = {UINT64_MAX, 0, 0, 0.0f, {0}};

    if (n <= 1) {
        if (n == 1) analysis.min_val = analysis.max_val = arr[0];
        return analysis;
    }

    int sample_size = (n > SAMPLE_SIZE) ? SAMPLE_SIZE : n;
    int bit_ones[64] = {0};
    int sorted_count = 0;
    uint64_t prev = arr[0];
    analysis.min_val = arr[0];
    analysis.max_val = arr[0];

    // Single pass for min/max, sortedness, and bit counting
    for (int i = 0; i < sample_size; i++) {
        uint64_t value = arr[i];

        if (value < analysis.min_val) analysis.min_val = value;
        if (value > analysis.max_val) analysis.max_val = value;
        if (i > 0 && value >= prev) sorted_count++;
        prev = value;

        // Count ones for each bit position (ENTROPY ANALYSIS!)
        for (int bit = 0; bit < 64; bit++) {
            if (value & (1ULL << bit)) bit_ones[bit]++;
        }
    }

    analysis.range = analysis.max_val - analysis.min_val;
    analysis.sorted_ratio = (float)sorted_count / (sample_size - 1);

    // Calculate byte entropy
    for (int byte_idx = 0; byte_idx < 8; byte_idx++) {
        int byte_ones[8] = {0};

        // Re-analyze to get byte-level patterns
        for (int i = 0; i < sample_size; i++) {
            uint8_t byte_val = (arr[i] >> (byte_idx * 8)) & 0xFF;
            for (int bit = 0; bit < 8; bit++) {
                if (byte_val & (1 << bit)) byte_ones[bit]++;
            }
        }

        // Calculate byte entropy
        float byte_entropy = 0.0f;
        for (int bit = 0; bit < 8; bit++) {
            byte_entropy += calculate_entropy(byte_ones[bit], sample_size);
        }
        byte_entropy /= 8.0f;

        // Use entropy for pattern detection instead of first/last
        if (byte_entropy < 0.05f) {
            analysis.byte_patterns[byte_idx] = 1; // Fixed byte
        } else if (byte_entropy < 0.2f) {
            analysis.byte_patterns[byte_idx] = 2; // Structured byte
        } else {
            analysis.byte_patterns[byte_idx] = 0; // Random byte
        }
    }

    return analysis;
}

// 64-bit sorting algorithms
    void insertion_sort_64bit(uint64_t arr[], size_t n) {
    for (size_t i = 1; i < n; i++) {
        uint64_t key = arr[i];
        int j = i - 1;
        while (j >= 0 && arr[j] > key) {
            arr[j + 1] = arr[j];
            j--;
        }
        arr[j + 1] = key;
    }
}

void radix_sort_64bit(uint64_t arr[], int n) {
    if (n <= INSERTION_SORT_THRESHOLD) {
        insertion_sort_64bit(arr, n);
        return;
    }

    uint64_t* buffer = (uint64_t*)malloc(n * sizeof(uint64_t));
    if (!buffer) return;

    for (int byte = 7; byte >= 0; byte--) {
        int count[256] = {0};

        for (int i = 0; i < n; i++) {
            uint8_t byte_val = (arr[i] >> (byte * 8)) & 0xFF;
            count[byte_val]++;
        }

        for (int i = 1; i < 256; i++) {
            count[i] += count[i - 1];
        }

        for (int i = n - 1; i >= 0; i--) {
            uint8_t byte_val = (arr[i] >> (byte * 8)) & 0xFF;
            buffer[--count[byte_val]] = arr[i];
        }

        memcpy(arr, buffer, n * sizeof(uint64_t));
    }

    free(buffer);
}

static void quick_sort_64bit_recursive(uint64_t arr[], int left, int right) {
    if (right - left <= INSERTION_SORT_THRESHOLD) {
        insertion_sort_64bit(arr + left, right - left + 1);
        return;
    }

    int mid = left + (right - left) / 2;
    if (arr[mid] < arr[left]) swap_uint64(&arr[left], &arr[mid]);
    if (arr[right] < arr[left]) swap_uint64(&arr[left], &arr[right]);
    if (arr[right] < arr[mid]) swap_uint64(&arr[mid], &arr[right]);

    uint64_t pivot = arr[mid];
    int i = left, j = right;

    while (i <= j) {
        while (arr[i] < pivot) i++;
        while (arr[j] > pivot) j--;
        if (i <= j) {
            swap_uint64(&arr[i], &arr[j]);
            i++;
            j--;
        }
    }

    if (left < j) quick_sort_64bit_recursive(arr, left, j);
    if (i < right) quick_sort_64bit_recursive(arr, i, right);
}

void quick_sort_optimized_64bit(uint64_t arr[], size_t n) {
    if (n <= 1) return;
    quick_sort_64bit_recursive(arr, 0, n - 1);
}

// 64-bit strategy selection
ResearchSortStrategy choose_64bit_strategy(const QuickAnalysis64* analysis, int n) {
    if (n <= INSERTION_SORT_THRESHOLD) return STRATEGY_INSERTION_SORT;
    if (analysis->sorted_ratio > 0.95f) return STRATEGY_INSERTION_SORT;

    // Count structured bytes
    int structured_bytes = 0;
    for (int i = 0; i < 8; i++) {
        if (analysis->byte_patterns[i] != 0) structured_bytes++;
    }

    // For 64-bit, be more aggressive with radix due to larger data
    if (analysis->range < RADIX_RANGE_THRESHOLD_64BIT) { // 1 trillion values
        return STRATEGY_RADIX_SORT;
    }

    if (structured_bytes >HIGHLY_STRUCTURED_BYTES_64BIT) {
        return STRATEGY_RADIX_SORT; // Use full radix for highly structured 64-bit data
    }

    if (analysis->range > HUGE_RANGE_THRESHOLD_64BIT && analysis->sorted_ratio < LOW_SORTED_RATIO_THRESHOLD) {
        return STRATEGY_QUICKSORT;
    }

    return STRATEGY_QUICKSORT;
}

void BRESort_research_64bit(uint64_t arr[], size_t n) {
    if (n <= 1) return;

    if (n <= INSERTION_SORT_THRESHOLD) {
        insertion_sort_64bit(arr, n);
        return;
    }

    QuickAnalysis64 analysis = quick_analyze_64bit(arr, n);
    ResearchSortStrategy strategy = choose_64bit_strategy(&analysis, n);

    switch (strategy) {
        case STRATEGY_RADIX_SORT:
            radix_sort_64bit(arr, n);
            break;
        case STRATEGY_INSERTION_SORT:
            insertion_sort_64bit(arr, n);
            break;
        case STRATEGY_QUICKSORT:
        default:
            quick_sort_optimized_64bit(arr, n);
    }
}


// FLOAT32 SUPPORT

// Utility functions for float32

static int compare_float32(const void* a, const void* b) {
    float fa = *(float*)a, fb = *(float*)b;
    return (fa > fb) - (fa < fb);
}

static void swap_float32(float* a, float* b) {
    float temp = *a;
    *a = *b;
    *b = temp;
}

// Float32 data generators

void generate_uniform_floats(float arr[], int n) {
    for (int i = 0; i < n; i++) arr[i] = (float)rand() / RAND_MAX * 2000.0f - 1000.0f;
}

void generate_clustered_floats(float arr[], int n) {
    float clusters[] = {-50.0f, 0.0f, 50.0f};
    for (int i = 0; i < n; i++) {
        int cluster = i % 3;
        arr[i] = clusters[cluster] + (float)rand() / RAND_MAX * 10.0f - 5.0f;
    }
}

void generate_sorted_floats(float arr[], int n) {
    for (int i = 0; i < n; i++) arr[i] = (float)i * 0.1f;
}

void generate_small_range_floats(float arr[], int n) {
    float base = 10.0f;
    for (int i = 0; i < n; i++) arr[i] = base + (float)rand() / RAND_MAX * 0.1f;
}

// Float32 analysis WITH ENTROPY

FloatAnalysis32 quick_analyze_float32(const float arr[], int n) {
    FloatAnalysis32 analysis = {0};

    if (n <= 1) return analysis;

    int sample_size = (n > SAMPLE_SIZE) ? SAMPLE_SIZE : n;
    int sign_ones = 0;
    int exponent_ones[8] = {0};
    int mantissa_ones[23] = {0};
    int sorted_count = 0;
    int positive_count = 0;
    float prev = arr[0];
    analysis.min_val = arr[0];
    analysis.max_val = arr[0];

    // Single pass analysis
    for (int i = 0; i < sample_size; i++) {
        float value = arr[i];
        uint32_t int_bits = *(uint32_t*)&value;

        // Min/max and sortedness
        if (value < analysis.min_val) analysis.min_val = value;
        if (value > analysis.max_val) analysis.max_val = value;
        if (value >= 0) positive_count++;
        if (i > 0 && value >= prev) sorted_count++;
        prev = value;

        // Sign bit
        if (int_bits & 0x80000000) sign_ones++;

        // Exponent bits (8 bits)
        uint8_t exponent = (int_bits >> 23) & 0xFF;
        for (int bit = 0; bit < 8; bit++) {
            if (exponent & (1 << bit)) exponent_ones[bit]++;
        }

        // Mantissa bits (23 bits)
        uint32_t mantissa = int_bits & 0x7FFFFF;
        for (int bit = 0; bit < 23; bit++) {
            if (mantissa & (1 << bit)) mantissa_ones[bit]++;
        }
    }

    analysis.value_range = analysis.max_val - analysis.min_val;
    analysis.sorted_ratio = (float)sorted_count / (sample_size - 1);

    // Simple sign analysis
    if (positive_count == sample_size) analysis.fixed_sign = 1;
    else if (positive_count == 0) analysis.fixed_sign = -1;
    else analysis.fixed_sign = 0;

    // Calculate exponent entropy
    float exponent_entropy = 0.0f;
    for (int i = 0; i < 8; i++) {
        exponent_entropy += calculate_entropy(exponent_ones[i], sample_size);
    }
    exponent_entropy /= 8.0f;

    // Use entropy for exponent structure detection
    analysis.structured_exponent = (exponent_entropy < 0.3f);

    // ============================================================================
    // CLUSTERING DETECTION
    // ============================================================================

    int unique_count = 0;
    float unique_values[50];
    int clustering_sample_size = (sample_size < 50) ? sample_size : 50;

    // Smart tolerance calculation
    float tolerance;
    if (analysis.value_range > 10.0f) {
        tolerance = analysis.value_range * 0.02f; // 2% for large ranges
    } else if (analysis.value_range > 1.0f) {
        tolerance = analysis.value_range * 0.05f; // 5% for medium ranges
    } else {
        tolerance = 0.05f; // Fixed for very small ranges
    }

    // Minimum tolerance
    if (tolerance < 0.1f && analysis.value_range > 1.0f) {
        tolerance = 0.1f;
    }

    // Count unique clusters
    for (int i = 0; i < clustering_sample_size; i++) {
        int is_unique = 1;
        for (int j = 0; j < unique_count; j++) {
            if (fabsf(arr[i] - unique_values[j]) <= tolerance) {
                is_unique = 0;
                break;
            }
        }
        if (is_unique && unique_count < 50) {
            unique_values[unique_count++] = arr[i];
        }
    }

    float clustering_ratio = (float)unique_count / clustering_sample_size;

    // Mark as structured if clustered or small range
    if (clustering_ratio < 0.3f ||
        (unique_count <= 5 && clustering_sample_size >= 20) ||
        analysis.value_range < 50.0f) {
        analysis.structured_exponent = 1;
    }

    return analysis;
}

// Float32 sorting algorithms
    void insertion_sort_float32(float arr[], size_t n) {
    for (int i = 1; i < n; i++) {
        float key = arr[i];
        int j = i - 1;
        while (j >= 0 && arr[j] > key) {
            arr[j + 1] = arr[j];
            j--;
        }
        arr[j + 1] = key;
    }
}

static void quick_sort_float32_recursive(float arr[], int left, int right) {
    if (right - left <= INSERTION_SORT_THRESHOLD) {
        insertion_sort_float32(arr + left, right - left + 1);
        return;
    }

    int mid = left + (right - left) / 2;
    if (arr[mid] < arr[left]) swap_float32(&arr[left], &arr[mid]);
    if (arr[right] < arr[left]) swap_float32(&arr[left], &arr[right]);
    if (arr[right] < arr[mid]) swap_float32(&arr[mid], &arr[right]);

    float pivot = arr[mid];
    int i = left, j = right;

    while (i <= j) {
        while (arr[i] < pivot) i++;
        while (arr[j] > pivot) j--;
        if (i <= j) {
            swap_float32(&arr[i], &arr[j]);
            i++;
            j--;
        }
    }

    if (left < j) quick_sort_float32_recursive(arr, left, j);
    if (i < right) quick_sort_float32_recursive(arr, i, right);
}

void quick_sort_optimized_float32(float arr[], size_t n) {
    if (n <= 1) return;
    quick_sort_float32_recursive(arr, 0, n - 1);
}

// FLOAT RADIX SORT HERE
void float_radix_sort(float arr[], size_t n) {
    if (n <= INSERTION_SORT_THRESHOLD) {
        insertion_sort_float32(arr, n);
        return;
    }

    // Convert floats to sortable integers
    uint32_t* int_arr = (uint32_t*)malloc(n * sizeof(uint32_t));
    if (!int_arr) return;

    for (size_t i = 0; i < n; i++) {
        // Transform float bits for proper sorting
        uint32_t float_bits = *(uint32_t*)&arr[i];
        // Flip sign bit for negative numbers, flip all bits for positive
        int_arr[i] = (float_bits & 0x80000000) ? (0x80000000 - float_bits) : (float_bits | 0x80000000);
    }

    // Sort as integers
    radix_sort_32bit(int_arr, n);

    // Convert back to floats
    for (size_t i = 0; i < n; i++) {
        uint32_t sorted_bits = int_arr[i];
        // Reverse the transformation
        uint32_t result_bits;
        if (sorted_bits & 0x80000000) {
            result_bits = 0x80000000 - sorted_bits;
        } else {
            result_bits = sorted_bits & 0x7FFFFFFF;
        }
        arr[i] = *(float*)&result_bits;
    }
    free(int_arr);
}

// Float32 strategy selection

ResearchSortStrategy choose_float32_strategy(const FloatAnalysis32* analysis, int n) {
    if (n <= INSERTION_SORT_THRESHOLD) return STRATEGY_INSERTION_SORT;
    if (analysis->sorted_ratio > 0.95f) return STRATEGY_INSERTION_SORT;

    // DEBUG: Print analysis info
    printf("  [FLOAT ANALYSIS] n=%d, structured=%d, range=%.2f, sorted_ratio=%.3f -> ",
           n, analysis->structured_exponent, analysis->value_range, analysis->sorted_ratio);

    // For UNSTRUCTURED data (most uniform random floats), use QUICKSORT
    if (!analysis->structured_exponent) {
        printf("QUICKSORT (unstructured)\n");
        return STRATEGY_QUICKSORT;
    }

    // For structured data with small range, use specialized approach
    if (analysis->value_range < 1000.0f) {
        printf("FLOAT_OPTIMIZED (structured, small range)\n");
        return STRATEGY_FLOAT_OPTIMIZED;
    }

    printf("QUICKSORT (fallback)\n");
    return STRATEGY_QUICKSORT;
}

// ============================================================================
// PERFORMANCE TESTING FRAMEWORK
// ============================================================================

double get_time_ms() {
    // Higher precision timing with multiple samples
    clock_t start = clock();
    volatile int dummy = 0; // Prevent optimization
    for (int i = 0; i < 1000; i++) {
        dummy += i;
    }
    clock_t end = clock();

    // Calculate the actual time for our operation
    double overhead = (double)(end - start) / CLOCKS_PER_SEC * 1000.0;

    return (double)clock() / CLOCKS_PER_SEC * 1000.0 - overhead;
}

// Microsecond version
double get_time_us() {

    clock_t start = clock();

    // Small calibration loop to measure overhead
    volatile int dummy = 0;
    for (int i = 0; i < 100; i++) {
        dummy += i;
    }
    clock_t end = clock();

    // Calculate overhead in microseconds
    double overhead_us = (double)(end - start) / CLOCKS_PER_SEC * 1000000.0;

    // Get current time and subtract overhead
    double current_time = (double)clock() / CLOCKS_PER_SEC * 1000000.0;

    return current_time - overhead_us;
}



// HELPER FUNCTION for test_performance_all_types

double calculate_safe_speedup(double bresort_time, double qsort_time) {
    // Safe speedup calculation with minimum threshold (in MICROSECONDS)
    double speedup = 0.0;
    const double min_threshold = 50.0; // Increased to 50 microseconds

    if (bresort_time > min_threshold && qsort_time > min_threshold) {
        // Both times are measurable - calculate normal speedup
        speedup = qsort_time / bresort_time;
    } else if (bresort_time <= min_threshold && qsort_time > min_threshold) {
        // BRESort was extremely fast - indicate massive speedup but not infinite
        speedup = (qsort_time / min_threshold); // More realistic massive speedup
        if (speedup > 100.0) speedup = 100.0; // Cap at 100x
    } else if (bresort_time > min_threshold && qsort_time <= min_threshold) {
        // qsort was extremely fast (unlikely but possible)
        speedup = 0.1;
    } else {
        // Both were too fast to measure accurately
        speedup = 1.0;
    }

    return speedup;
}

void test_performance_all_types() {
    printf("=== BRESort Research - ALL DATA TYPES Performance ===\n\n");

    const int sizes[] = {10000, 50000, 100000};
    const int num_tests = sizeof(sizes) / sizeof(sizes[0]);
    const int num_runs = 5;

    for (int test = 0; test < num_tests; test++) {
        int n = sizes[test];
        printf(" Testing with %d elements:\n", n);

        // ==================== 32-BIT INTEGERS ====================
        printf("   32-bit Integers:\n");
        double bresort_total_us = 0, qsort_total_us = 0;

        for (int run = 0; run < num_runs; run++) {
            uint32_t* data1 = (uint32_t*)malloc(n * sizeof(uint32_t));
            uint32_t* data2 = (uint32_t*)malloc(n * sizeof(uint32_t));

            generate_sequential_32bit(data1, n);
            memcpy(data2, data1, n * sizeof(uint32_t));

            double start = get_time_us();
            BRESort_research_32bit(data1, n);
            bresort_total_us += get_time_us() - start;

            start = get_time_us();
            qsort(data2, n, sizeof(uint32_t), compare_uint32);
            qsort_total_us += get_time_us() - start;

            free(data1); free(data2);
        }

        double bresort_avg_us_32 = bresort_total_us / num_runs;
        double qsort_avg_us_32 = qsort_total_us / num_runs;
        // REMOVED: double speedup_32 = calculate_safe_speedup(bresort_avg_us_32, qsort_avg_us_32);

        print_timing_result("32-bit Integers", bresort_avg_us_32, qsort_avg_us_32, 1);

        // ==================== 64-BIT INTEGERS ====================
        printf("   64-bit Integers:\n");
        bresort_total_us = 0; qsort_total_us = 0;

        for (int run = 0; run < num_runs; run++) {
            uint64_t* data1 = (uint64_t*)malloc(n * sizeof(uint64_t));
            uint64_t* data2 = (uint64_t*)malloc(n * sizeof(uint64_t));

            generate_sequential_64bit(data1, n);
            memcpy(data2, data1, n * sizeof(uint64_t));

            double start = get_time_us();
            BRESort_research_64bit(data1, n);
            bresort_total_us += get_time_us() - start;

            start = get_time_us();
            qsort(data2, n, sizeof(uint64_t), compare_uint64);
            qsort_total_us += get_time_us() - start;

            free(data1); free(data2);
        }

        double bresort_avg_us_64 = bresort_total_us / num_runs;
        double qsort_avg_us_64 = qsort_total_us / num_runs;
        // REMOVED: double speedup_64 = calculate_safe_speedup(bresort_avg_us_64, qsort_avg_us_64);

        print_timing_result("64-bit Integers", bresort_avg_us_64, qsort_avg_us_64, 1);

        // ==================== FLOAT32 ====================
        printf("   Float32:\n");
        bresort_total_us = 0; qsort_total_us = 0;

        for (int run = 0; run < num_runs; run++) {
            float* data1 = (float*)malloc(n * sizeof(float));
            float* data2 = (float*)malloc(n * sizeof(float));

            generate_uniform_floats(data1, n);
            memcpy(data2, data1, n * sizeof(float));

            double start = get_time_us();
            BRESort_research_float32(data1, n);
            bresort_total_us += get_time_us() - start;

            start = get_time_us();
            qsort(data2, n, sizeof(float), compare_float32);
            qsort_total_us += get_time_us() - start;

            free(data1); free(data2);
        }

        double bresort_avg_us_float = bresort_total_us / num_runs;
        double qsort_avg_us_float = qsort_total_us / num_runs;
        // REMOVED: double speedup_float = calculate_safe_speedup(bresort_avg_us_float, qsort_avg_us_float);

        print_timing_result("Float32", bresort_avg_us_float, qsort_avg_us_float, 1);

        printf("\n");
    }
}


// ============================================================================
// ULTIMATE STRESS TESTING
// ============================================================================

void stress_test_edge_cases() {
    printf("=== BRESort RESEARCH - ULTIMATE STRESS TEST ===\n\n");

    // Test 1: Extreme Edge Cases
    printf(" TEST 1: EXTREME EDGE CASES\n");

    // Single element
    uint32_t single[1] = {42};
    BRESort_research_32bit(single, 1);
    printf("   Single element: %s\n", single[0] == 42 ? "PASS" : "FAIL");

    // Two elements
    uint32_t two[2] = {2, 1};
    BRESort_research_32bit(two, 2);
    printf("   Two elements: %s\n", (two[0] == 1 && two[1] == 2) ? "PASS" : "FAIL");

    // All identical
    uint32_t identical[1000];
    for (int i = 0; i < 1000; i++) identical[i] = 0xDEADBEEF;
    BRESort_research_32bit(identical, 1000);
    int identical_ok = 1;
    for (int i = 1; i < 1000; i++) {
        if (identical[i] != identical[i-1]) identical_ok = 0;
    }
    printf("   All identical: %s\n", identical_ok ? "PASS" : "FAIL");

    // Already sorted
    uint32_t sorted[1000];
    for (int i = 0; i < 1000; i++) sorted[i] = i;
    BRESort_research_32bit(sorted, 1000);
    int sorted_ok = 1;
    for (int i = 1; i < 1000; i++) {
        if (sorted[i] < sorted[i-1]) sorted_ok = 0;
    }
    printf("   Already sorted: %s\n", sorted_ok ? "PASS" : "FAIL");

    // Reverse sorted
    uint32_t reverse[1000];
    for (int i = 0; i < 1000; i++) reverse[i] = 999 - i;
    BRESort_research_32bit(reverse, 1000);
    int reverse_ok = 1;
    for (int i = 1; i < 1000; i++) {
        if (reverse[i] < reverse[i-1]) reverse_ok = 0;
    }
    printf("   Reverse sorted: %s\n", reverse_ok ? "PASS" : "FAIL");
}

void stress_test_corner_cases_32bit() {
    printf("\n TEST 2: 32-BIT CORNER CASES\n");

    // Maximum range
    uint32_t max_range[1000];
    for (int i = 0; i < 1000; i++) {
        max_range[i] = (i % 2 == 0) ? 0 : 0xFFFFFFFF;
    }
    BRESort_research_32bit(max_range, 1000);
    int max_range_ok = 1;
    for (int i = 1; i < 1000; i++) {
        if (max_range[i] < max_range[i-1]) max_range_ok = 0;
    }
    printf("   Max range (0-0xFFFFFFFF): %s\n", max_range_ok ? "PASS" : "FAIL");

    // All zeros
    uint32_t zeros[1000] = {0};
    BRESort_research_32bit(zeros, 1000);
    int zeros_ok = 1;
    for (int i = 1; i < 1000; i++) {
        if (zeros[i] != 0) zeros_ok = 0;
    }
    printf("   All zeros: %s\n", zeros_ok ? "PASS" : "FAIL");

    // Pattern: 01010101...
    uint32_t pattern[1000];
    for (int i = 0; i < 1000; i++) {
        pattern[i] = (i % 2 == 0) ? 0x00000000 : 0xFFFFFFFF;
    }
    BRESort_research_32bit(pattern, 1000);
    int pattern_ok = 1;
    for (int i = 1; i < 1000; i++) {
        if (pattern[i] < pattern[i-1]) pattern_ok = 0;
    }
    printf("   Bit pattern 0101: %s\n", pattern_ok ? "PASS" : "FAIL");
}

void stress_test_float_special() {
    printf("\n TEST 3: FLOAT SPECIAL CASES\n");

    // NaN values
    float nan_test[100];
    for (int i = 0; i < 100; i++) {
        nan_test[i] = (i % 10 == 0) ? NAN : (float)i;
    }
    BRESort_research_float32(nan_test, 100);
    printf("   NaN handling: COMPLETED (behavior depends on NaN policy)\n");

    // Infinity
    float inf_test[100];
    for (int i = 0; i < 100; i++) {
        if (i < 33) inf_test[i] = -INFINITY;
        else if (i < 66) inf_test[i] = (float)(i - 33);
        else inf_test[i] = INFINITY;
    }
    BRESort_research_float32(inf_test, 100);
    int inf_ok = 1;
    for (int i = 1; i < 100; i++) {
        if (isnan(inf_test[i]) || isnan(inf_test[i-1])) continue;
        if (inf_test[i] < inf_test[i-1]) inf_ok = 0;
    }
    printf("   Infinity handling: %s\n", inf_ok ? "PASS" : "FAIL");

    // Denormal numbers
    float denormal[100];
    for (int i = 0; i < 100; i++) {
        denormal[i] = (i % 2 == 0) ? 1.0e-45f : (float)i; // Very small denormal
    }
    BRESort_research_float32(denormal, 100);
    printf("   Denormal numbers: COMPLETED\n");
}

void stress_test_large_scale() {
    printf("\n TEST 4: LARGE SCALE PERFORMANCE\n");

    const int large_sizes[] = {100000, 500000, 1000000};
    const int num_tests = sizeof(large_sizes) / sizeof(large_sizes[0]);

    for (int test = 0; test < num_tests; test++) {
        int n = large_sizes[test];
        printf("   Testing %d elements:\n", n);

        // Large sequential
        uint32_t* large_seq = (uint32_t*)malloc(n * sizeof(uint32_t));
        for (int i = 0; i < n; i++) large_seq[i] = i;

        double start = get_time_ms();
        BRESort_research_32bit(large_seq, n);
        double time_seq = get_time_ms() - start;

        // Verify
        int seq_ok = 1;
        for (int i = 1; i < n; i++) {
            if (large_seq[i] < large_seq[i-1]) {
                seq_ok = 0;
                break;
            }
        }

        printf("     Sequential: %.2f ms, %s\n", time_seq, seq_ok ? "PASS" : "FAIL");
        free(large_seq);

        // Large random
        uint32_t* large_rand = (uint32_t*)malloc(n * sizeof(uint32_t));
        for (int i = 0; i < n; i++) large_rand[i] = rand() ^ (rand() << 16);

        start = get_time_ms();
        BRESort_research_32bit(large_rand, n);
        double time_rand = get_time_ms() - start;

        // Verify
        int rand_ok = 1;
        for (int i = 1; i < n; i++) {
            if (large_rand[i] < large_rand[i-1]) {
                rand_ok = 0;
                break;
            }
        }

        printf("     Random:     %.2f ms, %s\n", time_rand, rand_ok ? "PASS" : "FAIL");
        free(large_rand);
    }
}

void stress_test_algorithm_transitions() {
    printf("\n TEST 5: ALGORITHM TRANSITION BOUNDARIES\n");

    // Test around insertion sort threshold (32 elements)
    for (int size = 28; size <= 36; size++) {
        uint32_t* data = (uint32_t*)malloc(size * sizeof(uint32_t));
        for (int i = 0; i < size; i++) data[i] = size - i - 1; // Reverse sorted

        BRESort_research_32bit(data, size);

        int ok = 1;
        for (int i = 1; i < size; i++) {
            if (data[i] < data[i-1]) ok = 0;
        }

        printf("   Size %d (near threshold): %s\n", size, ok ? "PASS" : "FAIL");
        free(data);
    }

    // Test mixed patterns that might confuse the analyzer
    uint32_t mixed[1000];
    for (int i = 0; i < 1000; i++) {
        if (i < 100) mixed[i] = i;                    // Sequential
        else if (i < 200) mixed[i] = 0x12345000;      // All identical
        else if (i < 300) mixed[i] = rand();          // Random
        else if (i < 400) mixed[i] = 1000 + (i % 10); // Small range
        else mixed[i] = 0x5F3A0000 + (i % 100);       // Timestamp-like
    }

    BRESort_research_32bit(mixed, 1000);
    int mixed_ok = 1;
    for (int i = 1; i < 1000; i++) {
        if (mixed[i] < mixed[i-1]) mixed_ok = 0;
    }
    printf("   Mixed pattern data: %s\n", mixed_ok ? "PASS" : "FAIL");
}

void stress_test_memory_pressure() {
    printf("\n TEST 6: MEMORY PRESSURE TESTS\n");

    printf("   Many small arrays:\n");
    int total_passed = 0;
    const int array_size = 100;
    const int num_arrays = 50;  // Reduced from 100 for stability

    for (int batch = 0; batch < 10; batch++) {
        uint32_t* small_arrays[num_arrays];
        int batch_ok = 1;

        // Create and sort arrays
        for (int i = 0; i < num_arrays; i++) {
            small_arrays[i] = (uint32_t*)malloc(array_size * sizeof(uint32_t));
            if (!small_arrays[i]) {
                printf("     Batch %d: MEMORY ALLOCATION FAILED\n", batch + 1);
                batch_ok = 0;
                break;
            }

            // Fill with predictable pattern for easy verification
            for (int j = 0; j < array_size; j++) {
                small_arrays[i][j] = (array_size - j - 1);  // Reverse sorted
            }

            BRESort_research_32bit(small_arrays[i], array_size);
        }

        // Verify a sample of arrays (not all to save time)
        if (batch_ok) {
            int verification_passed = 1;
            for (int sample = 0; sample < 5; sample++) {  // Check 5 random arrays
                int idx = rand() % num_arrays;
                for (int j = 1; j < array_size; j++) {
                    if (small_arrays[idx][j] < small_arrays[idx][j-1]) {
                        verification_passed = 0;
                        break;
                    }
                }
                if (!verification_passed) break;
            }

            if (verification_passed) {
                printf("     Batch %d: PASS\n", batch + 1);
                total_passed++;
            } else {
                printf("     Batch %d: FAIL (verification)\n", batch + 1);
            }
        }

        // Cleanup
        for (int i = 0; i < num_arrays; i++) {
            if (small_arrays[i]) free(small_arrays[i]);
        }
    }

    printf("   Overall: %d/10 batches passed\n", total_passed);
}

void run_comprehensive_stress_test() {
    printf(" BRESort RESEARCH - ULTIMATE STRESS TEST SUITE\n");
    printf("==================================================\n\n");

    stress_test_edge_cases();
    stress_test_corner_cases_32bit();
    stress_test_float_special();
    stress_test_large_scale();
    stress_test_algorithm_transitions();
    stress_test_memory_pressure();

    printf("\n STRESS TESTING COMPLETE!\n");
}



// ============================================================================
// ANALYSIS VALIDATION
// ============================================================================


void test_analysis_validation() {
    printf("=== Optimized Analysis Validation ===\n\n");

    const int n = 1000;
    uint32_t data[n];

    struct {
        char* name;
        void (*generator)(uint32_t[], int);
    } tests[] = {
        {"Sequential", generate_sequential_32bit},
        {"Random", generate_random_32bit},
        {"Timestamp", generate_timestamp_32bit},
        {"Small Range", generate_small_range_32bit},
    };

    for (int i = 0; i < 4; i++) {
        tests[i].generator(data, n);
        QuickAnalysis analysis = quick_analyze_32bit(data, n);

        printf(" %s Data:\n", tests[i].name);
        printf("   Range: %u, Sorted Ratio: %.3f\n", analysis.range, analysis.sorted_ratio);
        printf("   Byte Patterns: ");
        for (int j = 3; j >= 0; j--) {
            const char* pattern = "RAND";
            if (analysis.byte_patterns[j] == 1) pattern = "FIXED";
            else if (analysis.byte_patterns[j] == 2) pattern = "SEQ";
            printf("B%d=%s ", j, pattern);
        }
        printf("\n");

        ResearchSortStrategy strategy = choose_optimized_strategy(&analysis, n);
const char* strategy_name = "UNKNOWN";
switch(strategy) {
    case STRATEGY_RADIX_SORT: strategy_name = "RADIX_SORT"; break;
    case STRATEGY_QUICKSORT: strategy_name = "QUICKSORT"; break;
    case STRATEGY_MERGE_SORT: strategy_name = "MERGE_SORT"; break;
    case STRATEGY_INSERTION_SORT: strategy_name = "INSERTION_SORT"; break;
    case STRATEGY_HYBRID_RADIX: strategy_name = "HYBRID_RADIX"; break;
    case STRATEGY_BITMASK_SORT: strategy_name = "BITMASK_SORT"; break;
    case STRATEGY_FLOAT_OPTIMIZED: strategy_name = "FLOAT_OPTIMIZED"; break;
    default: strategy_name = "QUICKSORT"; break;
}
printf("   → Recommended: %s\n\n", strategy_name);
    }
}

// ============================================================================
// COMPREHENSIVE VALIDATION (Addresses qsort overhead concern)
// ============================================================================

void validate_performance_fairness() {
    printf("=== BRESort - FAIR PERFORMANCE VALIDATION ===\n\n");
    printf("Addressing qsort function pointer overhead concern\n\n");

    const int sizes[] = {10000, 50000, 100000};
    const int num_tests = sizeof(sizes) / sizeof(sizes[0]);
    const int num_runs = 2;  // Quick runs for demonstration


    double bresort_total = 0, quick_direct_total = 0, qsort_total = 0;
    int final_n = 0;

    for (int test = 0; test < num_tests; test++) {
        int n = sizes[test];
        printf(" Testing with %d elements:\n", n);

        // Reset totals for each test size
        bresort_total = 0;
        quick_direct_total = 0;
        qsort_total = 0;

        for (int run = 0; run < num_runs; run++) {
            uint32_t* data1 = (uint32_t*)malloc(n * sizeof(uint32_t));
            uint32_t* data2 = (uint32_t*)malloc(n * sizeof(uint32_t));
            uint32_t* data3 = (uint32_t*)malloc(n * sizeof(uint32_t));

           // ANDOM DATA instead of sequential!
            generate_random_32bit(data1, n);
            memcpy(data2, data1, n * sizeof(uint32_t));
            memcpy(data3, data1, n * sizeof(uint32_t));

            // Test BRESort
            double start = get_time_ms();
            BRESort_research_32bit(data1, n);
            bresort_total += get_time_ms() - start;

            // Test DIRECT QuickSort
            start = get_time_ms();
            quick_sort_optimized_32bit(data2, n);
            quick_direct_total += get_time_ms() - start;

            // Test QSORT (with function pointer overhead)
            start = get_time_ms();
            qsort(data3, n, sizeof(uint32_t), compare_uint32);
            qsort_total += get_time_ms() - start;

            free(data1); free(data2); free(data3);
        }

        double bresort_time = bresort_total / num_runs;
        double quick_time = quick_direct_total / num_runs;
        double qsort_time = qsort_total / num_runs;

        printf("   |-- BRESort:        %7.2f ms\n", bresort_time);
        printf("   |-- Direct Quick:   %7.2f ms (Gain: %.2fx)\n",
               quick_time, quick_time / bresort_time);
        printf("   |-- System qsort:   %7.2f ms (Gain: %.2fx)\n",
               qsort_time, qsort_time / bresort_time);
        printf("\n");

        // Store the last test size for conclusion
        final_n = n;
    }

    // Calculate final gains for conclusion
    double bresort_avg = bresort_total / num_runs;
    double quick_avg = quick_direct_total / num_runs;
    double qsort_avg = qsort_total / num_runs;

    printf("CONCLUSION: For %d elements, BRESort intelligence provides ~%.1fx gain\n",
           final_n, quick_avg / bresort_avg);
    printf("over direct quicksort, plus additional ~%.1fx from avoiding\n",
           qsort_avg / quick_avg);
    printf("qsort function pointer overhead.\n");
}

// REAL-WORLD DATA TESTING
// ============================================================================

void test_real_world_patterns() {
    printf("=== BRESort - REAL-WORLD DATA VALIDATION ===\n\n");

    const int n = 50000;
    printf("Testing with %d elements of real-world patterns:\n\n", n);

    // Test 1: Database Timestamps (common in real apps)
    printf(" Database Timestamps:\n");
    uint32_t* timestamps = (uint32_t*)malloc(n * sizeof(uint32_t));
    uint32_t base_time = 0x5F3A0000; // Some base timestamp
    for (int i = 0; i < n; i++) {
        // Real timestamp pattern: base + random gaps + some duplicates
        if (i % 100 == 0) {
            timestamps[i] = base_time; // Some duplicates
        } else {
            timestamps[i] = base_time + (i * 60) + (rand() % 3600); // Seconds with gaps
        }
    }

    double start = get_time_ms();
    BRESort_research_32bit(timestamps, n);
    double bresort_time = get_time_ms() - start;

    start = get_time_ms();
    qsort(timestamps, n, sizeof(uint32_t), compare_uint32);
    double qsort_time = get_time_ms() - start;

    printf("   |-- BRESort: %6.2f ms, qsort: %6.2f ms, Speedup: %.2fx\n",
           bresort_time, qsort_time, qsort_time / bresort_time);
    free(timestamps);

    // Test 2: Sensor Data (noisy with patterns)
    printf(" Sensor Readings (noisy with trends):\n");
    float* sensor_data = (float*)malloc(n * sizeof(float));
    float trend = 0.0f;
    for (int i = 0; i < n; i++) {
        trend += (float)(rand() % 100 - 50) / 1000.0f; // Slow trend
        sensor_data[i] = 25.0f + trend + (float)(rand() % 200 - 100) / 10.0f; // Noise
    }

    start = get_time_ms();
    BRESort_research_float32(sensor_data, n);
    bresort_time = get_time_ms() - start;

    start = get_time_ms();
    qsort(sensor_data, n, sizeof(float), compare_float32);
    qsort_time = get_time_ms() - start;

    printf("   |-- BRESort: %6.2f ms, qsort: %6.2f ms, Speedup: %.2fx\n",
           bresort_time, qsort_time, qsort_time / bresort_time);
    free(sensor_data);

    // Test 3: Mixed Structured Data (very common)
    printf(" Mixed Structured Data:\n");
    uint32_t* mixed_data = (uint32_t*)malloc(n * sizeof(uint32_t));
    for (int i = 0; i < n; i++) {
        if (i < n/3) {
            mixed_data[i] = 0x40000000 + (i % 1000);  // Memory addresses
        } else if (i < 2*n/3) {
            mixed_data[i] = 0x12345000 + (i * 10);    // Sequential IDs
        } else {
            mixed_data[i] = rand() ^ (rand() << 16);  // Random data
        }
    }

    start = get_time_ms();
    BRESort_research_32bit(mixed_data, n);
    bresort_time = get_time_ms() - start;

    start = get_time_ms();
    qsort(mixed_data, n, sizeof(uint32_t), compare_uint32);
    qsort_time = get_time_ms() - start;

    printf("   |-- BRESort: %6.2f ms, qsort: %6.2f ms, Speedup: %.2fx\n",
           bresort_time, qsort_time, qsort_time / bresort_time);
    free(mixed_data);

}

void test_documented_real_patterns() {
    printf("=== BRESort - DOCUMENTED REAL-WORLD PATTERNS ===\n\n");

    const int n = 50000;
    printf("Testing with %d elements of documented real-world distributions:\n\n", n);

    // Test 1: E-Commerce Product Prices (from Amazon dataset analysis)
    printf(" E-Commerce Prices (clustered, power-law):\n");
    uint32_t* prices = (uint32_t*)malloc(n * sizeof(uint32_t));
    uint32_t* prices_copy = (uint32_t*)malloc(n * sizeof(uint32_t));

    for (int i = 0; i < n; i++) {
        // Real pattern: Most items $10-100, some expensive outliers
        if (rand() % 100 < 80) {
            prices[i] = 1000 + (rand() % 9000); // $10-100 range (80%)
        } else if (rand() % 100 < 95) {
            prices[i] = 10000 + (rand() % 90000); // $100-1000 (15%)
        } else {
            prices[i] = 100000 + (rand() % 900000); // $1000+ (5%) - outliers
        }
    }

    memcpy(prices_copy, prices, n * sizeof(uint32_t));

    double start = get_time_ms();
    BRESort_research_32bit(prices, n);
    double bresort_time = get_time_ms() - start;

    start = get_time_ms();
    qsort(prices_copy, n, sizeof(uint32_t), compare_uint32);
    double qsort_time = get_time_ms() - start;

    printf("   |-- BRESort: %6.2f ms, qsort: %6.2f ms, Speedup: %.2fx\n",
           bresort_time, qsort_time, qsort_time / bresort_time);
    free(prices); free(prices_copy);

    // Test 2: Web Server Response Times (from Apache log analysis)
    printf(" Server Response Times (long-tail distribution):\n");
    uint32_t* response_times = (uint32_t*)malloc(n * sizeof(uint32_t));
    uint32_t* response_times_copy = (uint32_t*)malloc(n * sizeof(uint32_t));

    for (int i = 0; i < n; i++) {
        // Real pattern: Most responses fast, some very slow
        if (rand() % 100 < 90) {
            response_times[i] = 10 + (rand() % 90); // 10-100ms (90%)
        } else {
            response_times[i] = 100 + (rand() % 9000); // 100ms-10s (10%)
        }
    }

    memcpy(response_times_copy, response_times, n * sizeof(uint32_t));

    start = get_time_ms();
    BRESort_research_32bit(response_times, n);
    bresort_time = get_time_ms() - start;

    start = get_time_ms();
    qsort(response_times_copy, n, sizeof(uint32_t), compare_uint32);
    qsort_time = get_time_ms() - start;

    printf("   |-- BRESort: %6.2f ms, qsort: %6.2f ms, Speedup: %.2fx\n",
           bresort_time, qsort_time, qsort_time / bresort_time);
    free(response_times); free(response_times_copy);

    // Test 3: GPS Coordinates (from OpenStreetMap data)
    printf(" GPS Coordinates (spatial clustering):\n");
    uint32_t* coordinates = (uint32_t*)malloc(n * sizeof(uint32_t));
    uint32_t* coordinates_copy = (uint32_t*)malloc(n * sizeof(uint32_t));

    // Real pattern: Coordinates cluster around cities/points of interest
    for (int i = 0; i < n; i++) {
        int city = i % 5; // 5 major cities
        uint32_t base_lat = 0x12340000 + (city * 0x00010000);
        coordinates[i] = base_lat + (rand() % 0x0000FFFF); // Cluster around city
    }

    memcpy(coordinates_copy, coordinates, n * sizeof(uint32_t));

    start = get_time_ms();
    BRESort_research_32bit(coordinates, n);
    bresort_time = get_time_ms() - start;

    start = get_time_ms();
    qsort(coordinates_copy, n, sizeof(uint32_t), compare_uint32);
    qsort_time = get_time_ms() - start;

    printf("   |-- BRESort: %6.2f ms, qsort: %6.2f ms, Speedup: %.2fx\n",
           bresort_time, qsort_time, qsort_time / bresort_time);
    free(coordinates); free(coordinates_copy);

    printf("\n");
}

// ENTROPY ANALYSIS VALIDATION
============================================================================

void test_entropy_analysis_validation() {
    printf("=== ENTROPY ANALYSIS VALIDATION ===\n\n");

    const int n = 1000;
    uint64_t sequential[1000], random[1000], timestamps[1000];

    // Generate test data
    for (int i = 0; i < n; i++) {
        sequential[i] = 0x1234567800000000ULL + i;
        random[i] = ((uint64_t)rand() << 32) | rand();
        timestamps[i] = 0x5F3A2B0100000000ULL + (i * 1000);
    }

    QuickAnalysis64 seq_analysis = quick_analyze_64bit(sequential, n);
    QuickAnalysis64 rand_analysis = quick_analyze_64bit(random, n);
    QuickAnalysis64 ts_analysis = quick_analyze_64bit(timestamps, n);

    printf("64-bit Sequential Data:\n");
    printf("  Structured bytes detected: ");
    for (int i = 0; i < 8; i++) {
        if (seq_analysis.byte_patterns[i] != 0) printf("B%d ", 7-i);
    }
    printf("\n");

    printf("64-bit Random Data:\n");
    printf("  Structured bytes detected: ");
    for (int i = 0; i < 8; i++) {
        if (rand_analysis.byte_patterns[i] != 0) printf("B%d ", 7-i);
    }
    printf("\n");

    printf("64-bit Timestamps:\n");
    printf("  Structured bytes detected: ");
    for (int i = 0; i < 8; i++) {
        if (ts_analysis.byte_patterns[i] != 0) printf("B%d ", 7-i);
    }
    printf("\n\n");

    // Test float analysis
    float clustered[1000], small_range[1000];
    float clusters[] = {-50.0f, 0.0f, 50.0f};
    for (int i = 0; i < n; i++) {
        int cluster = i % 3;
        clustered[i] = clusters[cluster] + (float)rand() / RAND_MAX * 10.0f - 5.0f;
        small_range[i] = 10.0f + (float)rand() / RAND_MAX * 0.1f;
    }

    FloatAnalysis32 cluster_analysis = quick_analyze_float32(clustered, n);
    FloatAnalysis32 small_analysis = quick_analyze_float32(small_range, n);

    printf("Float Clustered Data: %s\n", cluster_analysis.structured_exponent ? "STRUCTURED" : "RANDOM");
    printf("Float Small Range: %s\n", small_analysis.structured_exponent ? "STRUCTURED" : "RANDOM");
}

// ============================================================================
// MAIN FUNCTION
// ============================================================================

int main() {
    srand(42);

    printf(" BRESort Research Project - UNIVERSAL VERSION (32/64/Float)\n");
    printf("===============================================================\n\n");


    // Test the new entropy analysis first
    test_entropy_analysis_validation();
    printf("\n");

    test_analysis_validation();
    printf("\n");

    test_performance_all_types();
    printf("\n");

    validate_performance_fairness();
    printf("\n");

    test_real_world_patterns();
    printf("\n");

    run_comprehensive_stress_test();
    printf("\n");

    test_documented_real_patterns();




    return 0;
}
\$\endgroup\$
2
  • \$\begingroup\$ Please do not edit the question, especially the code, after an answer has been posted. Changing the question may cause answer invalidation. Everyone needs to be able to see what the reviewer was referring to. What to do after the question has been answered. \$\endgroup\$ Commented Oct 12 at 14:12
  • \$\begingroup\$ I'm confused by the test program - always seems to return 0, without actually checking whether the results are sorted. \$\endgroup\$ Commented Oct 13 at 8:36

2 Answers 2

9
\$\begingroup\$

I advise against having an interface start out as elaborate as in bresort_research.h - imagine having to keep everything backwards compatible.

I see a lot of code repeating - a maintenance nightmare if nothing else.
For undisclosed reasons the implementation language is C - no support for genericity.

Have the preprocessor include a snippet more than once:
qa.snippet:

#define E       uint##BITS##_t
/* Conceptually Uses sampling for large arrays - 1st few for now */
QuickAnalysis##BITS quick_analyze_##BITS##bit(const E arr[], const size_t n) {
    QuickAnalysis##BITS analysis = {~(E)0, 0, 0, 0.0f, 0.0f, {0}};

    if (n < 1)
        return analysis;
    analysis.min_val = analysis.max_val = arr[0];
    if (n == 1) 
        return analysis;

    const size_t effective_n = n > SAMPLE_SIZE ? SAMPLE_SIZE : n;

    size_t ascending = 0;
#if REVERSE
    size_t descending = 0;
#endif
    E prev = arr[0];

    // Single pass for min/max and sortedness
    for (size_t i = 1; i < effective_n; i++) {
        E current = arr[i];
        // compare to one extremum at most
        if (current > prev) {
            ascending++;
            if (current > analysis.max_val)
                analysis.max_val = current;
        } else
#if REVERSE
        if (current < prev) {
            descending++;
#else
        {
#endif
            if (current < analysis.min_val)
                analysis.min_val = current;
        }
        prev = current;
    }

    analysis.range = analysis.max_val - analysis.min_val;
    const size_t sorted_count = effective_n - 1 - descending;
    analysis.sorted_ratio = (double)sorted_count / (effective_n - 1);
#if REVERSE
    const size_t reverse_sorted_count = effective_n - 1 - ascending;
    analysis.reverse_sorted_ratio = (double)reverse_sorted_count / (effective_n - 1);
#endif

    // Quick byte pattern detection (much faster than full entropy)
    if (effective_n > 10) {
        E   first = arr[0],
            last = arr[effective_n - 1];

        for (int byte = 0; byte < sizeof(E) ; byte++) {
            uint8_t first_byte = (first >> (byte * 8)) & 0xFF;
            uint8_t last_byte = (last >> (byte * 8)) & 0xFF;

            if (first_byte == last_byte) {
                analysis.byte_patterns[byte] = 1; // Fixed byte
            } else if (abs((int)first_byte - (int)last_byte) < effective_n / 10) {
                analysis.byte_patterns[byte] = 2; // Sequential byte
            } else {
                analysis.byte_patterns[byte] = 0; // Random byte
            }
        }
    }

    return analysis;
}
#undef E

In bresort_research.c:

#define BITS    32
#define REVERSE 1
#include "qa.snippet"
#undef REVERSE
#undef BITS
#define BITS    64
#include "qa.snippet"
#undef BITS

(Uses QuickAnalysis32 rather than QuickAnalysis.) Extension to more types and functions left as an exercise…

Now I can see that counting bits set per bit position may be slow, esp. in quick_analyze_64bit(). (? I thought that was quick_ due to not investing in establishing entropies?!) And pointless while the counts are never used…
There is counting all bits in parallel, albeit giving the counts "corner bended":

    T counts[LOG2MAXCOUNT] = { a[0]; };
    for (size_t i = 1 ; i < length ; i++) {
        T carry_in = a[i];
        for (int power = 0 ; carry_in ; power++) {
            T const
                count_bits = counts[power],
                carry_out = carry_in & count_bits;  // software incrementers
            counts[power] ^= carry_in;
            carry_in = carry_out;
        }
    }

There is no need to separately count the number of bits set per position in a byte when the counts for a bigger size are known:

    void bits_set_per_position_in_octet(
            const size_t counted[], int positions,
            size_t counts_in_octet[]) {
        memset(counts_in_octet, 0, sizeof counts_in_octet[0] * 8);
        for (int position = 0 ; position < positions ; position++)
            counts_in_octet[position % 8] += counted[position];
    }
\$\endgroup\$
1
  • 1
    \$\begingroup\$ Yes, the maintenance is a nightmare. For this research version I thought that it would benefit from separate 32/64-bit implementations for readability and debuggability, but your template approach is definitely superior in the long run. I'll refactor using this pattern for the next version. \$\endgroup\$ Commented Oct 12 at 7:40
8
\$\begingroup\$

Naming

A consistent prefix is useful, yet code makes public

bresort....
BRESORT_...
BRESort_...
bres_...
get_time...
calculate_safe...
print_timing...
ResearchSortStrategy
QuickAnalysis...
has_expensive_unsortedness
insertion_sort_...
many more

I recommend 2 in bresort.h:

bresort....
BRESORT_...

and others in the test code.

In other words: the key components destined for public use should carve out a limited namespace related to the work.

Units

Make clear physicals units by comment or name

// double calculate_safe_speedup(double bresort_time, double qsort_time);
double calculate_safe_speedup(double bresort_time /* us */, double qsort_time);
double calculate_safe_speedup(double bresort_time_us, double qsort_time);

int may be 16-bit

Code like (1 << 20) is certainly a problem.

If not wanting to support such int, maybe a static_assert(INT_MAX >= 0x7FFFFFFF, "Only 32+ bit int");

... or re-write macro. Perhaps 0x100000?

Minor: long long maybe more than 64-bit

Rather than trying to make a 64-bit put of some various unfixed width integer type, :

//#define RADIX_RANGE_THRESHOLD_64BIT  (1ULL << 40) 
#define RADIX_RANGE_THRESHOLD_64BIT  (UINT64_C(1) << 40) 
#define RADIX_RANGE_THRESHOLD_64BIT  ((uint64_t)1 << 40) 

Unclear comment

int byte_patterns[8]; // 0=random, 1=fixed, 2=sequential

Why magic number 8?

float

quick_analyze_float32() should detail how it handles NaNs and +/-0.0. As is it appears, NaNs invoke undefined behavior with return (fa > fb) - (fa < fb); as the sorting process loses total ordering.

quick_analyze_float32() assumes that a float has 8 exponent bits and 23 mantissa significand bits. Surely this is common yet not required my C. Some judicious static_assert()s would help detect the odd ones.

Too many naked magic numbers for my taste. Consider using named macros or named constants.

Use consistent type

Save time: enable more warnings

void insertion_sort_float32(float arr[], size_t n) {
    // for (int i = 1; i < n; i++) {
    for (size_t i = 1; i < n; i++) {

When moving to size_t avoid uncontrolled subtraction:

//static void quick_sort_recursive(uint32_t arr[], int left, int right) {
    //if (right - left <= INSERTION_SORT_THRESHOLD) {
static void quick_sort_recursive(uint32_t arr[], size_t left, size_t right) {
    if (right <= left + INSERTION_SORT_THRESHOLD) {

Minor: Consider integers for the analysis math

The thresholds do not require FP math. Scale to say 0-100 or 0-1000 and use integer types.

// analysis->sorted_ratio > 0.95f
analysis->sorted_ratio > 95 /* percent */
\$\endgroup\$
3
  • \$\begingroup\$ "Why magic number 8?" There are 8 bit positions in a byte/uint8_t, in all likelyhood. Why call an octet byte? \$\endgroup\$ Commented Oct 14 at 15:34
  • 1
    \$\begingroup\$ @greybeard Yes there are 8 bytes in a uint8_t, yet that fact is not obviously relevant to code int byte_patterns[8];. Avoiding magic numbers remains a good coding practice. \$\endgroup\$ Commented Oct 14 at 15:39
  • 1
    \$\begingroup\$ All valid points. I was rushing to get the code reviewed and got tangled up with tests and debugging, mixing and matching working and broken blocks. Looking at it with fresh eyes now... well, it's a mess. I have so much to fix. Thank you for taking the time! \$\endgroup\$ Commented Oct 14 at 18:50

You must log in to answer this question.

Start asking to get answers

Find the answer to your question by asking.

Ask question

Explore related questions

See similar questions with these tags.