The following is OK for low-dimensional (<= ~5) spaces, but only sufficient as a pre-conditioning step and not an efficient solution for high-dimensional (10+) spaces for reasons I will discuss.
- If you don't already have input inequalities, then to generate example data, set up some random points and calculate a convex hull
- If you don't already understand a good feasible point within the hull data, generate one via linear programming
- Calculate a halfspace intersection.
- From the halfspace intersection vertices, perform an eigendecomposition and calculate a whitening transform.
- Affine-adjust the whitening matrix to put intersection extrema on the hyperplanes of the unit hypercube.
- Invert to get the colouring transform. Intuitively this is a transform from unit space to a (potentially much narrower) halfspace intersection.
- Generate a locus of random points in unit space.
- Apply the colouring transform.
- Apply the inequality matrix to check for containment.
- Reject non-contained points.
Step (9) has a fractional yield that is computationally acceptable for low-dimensional spaces, but this yield gets exponentially worse for high-dimensional space. Still, it would be faster than the loop shown in the question.
import matplotlib.pyplot as plt
import numpy as np
from scipy.optimize import milp, LinearConstraint, Bounds
from scipy.spatial import ConvexHull, HalfspaceIntersection
def get_feasible_point(halfspaces: np.ndarray, epsilon: float = 1e-3) -> np.ndarray:
"""
Get an arbitrary point somewhere in the middle of the hull. This is used to satisfy qhull's
request that the feasible point be "clearly" within the hull. It's hacky but fast enough.
"""
# c0x0 + c1x1 + c2x2 + ...+ cn ≤ -epsilon
# c@x <= -cn - epsilon
n = halfspaces.shape[1] - 1
ineq_constraint = LinearConstraint(A=halfspaces[:, :-1], ub=-halfspaces[:, -1] - epsilon)
no_bounds = Bounds()
result = milp(c=np.zeros(n), integrality=0, bounds=no_bounds, constraints=ineq_constraint)
if not result.success:
raise ValueError(result.message)
return result.x
def get_whitening(intersections: np.ndarray) -> np.ndarray:
"""
Perform an eigendecomposition to calculate a whitening (as in 'white noise') matrix
https://en.wikipedia.org/wiki/Whitening_transformation
"""
w, v = np.linalg.eig(np.cov(intersections.T))
return v/np.sqrt(w)
def get_fit(c: np.ndarray, validate: bool = True) -> tuple[np.ndarray, HalfspaceIntersection]:
"""
From an inequality matrix `c` whose last column is the offset, calculate a colouring
transformation from unit space to a frame closely bounding the halfspace intersection polytope.
"""
n = c.shape[1] - 1
feasible = get_feasible_point(c)
if validate:
check = c @ np.concat((feasible, [1]))
assert (check < -1e-4).all()
halfspaces = HalfspaceIntersection(halfspaces=c, interior_point=feasible)
intersections = halfspaces.intersections
if validate:
check = c @ np.hstack((
intersections, np.ones((len(intersections), 1)),
)).T
assert (check < 1e-12).all()
whiten = get_whitening(intersections)
whitened = intersections @ whiten
# What follows is not strictly a whitening matrix anymore, but a pseudo-whitening matrix that
# is combined with an affine transformation of the halfspace intersections to unit space.
wmin = whitened.min(axis=0)
wmax = whitened.max(axis=0)
scale = 1/(wmax - wmin)
whiten_homogeneous = np.zeros((n + 1, n + 1))
whiten_homogeneous[:n, :n] = whiten @ np.diag(scale)
whiten_homogeneous[-1, :-1] = -wmin * scale
whiten_homogeneous[-1, -1] = 1
colouring = np.linalg.inv(whiten_homogeneous)
colouring[:, -1] = 0
colouring[-1, -1] = 1
return colouring, halfspaces
def apply_fit(
colouring: np.ndarray, white_points: np.ndarray, halfspaces: np.ndarray,
) -> tuple[np.ndarray, np.ndarray]:
"""
Given a colouring transformation from get_fit(), a set of "white" (as in noise) points uniformly
distributed in unit space, and a set of halfspace inequalities, transform the points to the
narrower halfspace intersection region. Return those points and their halfspace containment
predicate.
"""
transformed = np.hstack((
white_points, np.ones((white_points.shape[0], 1)),
)) @ colouring
contained = (transformed @ halfspaces.T <= 0).all(axis=1)
return transformed[:, :-1], contained
def demo() -> None:
rand = np.random.default_rng(seed=0)
n = 3 # eventually 10 for OP
print('Dimensions:', n)
# Form an example hull guaranteed to be feasible and having some dimensions with very narrow ranges
p = 100
example_points = np.empty((p, n))
example_points[:, :-1] = rand.uniform(
low= [0.1,0.70],
high=[0.2,0.71],
size=(p, n-1),
)
# Highly-correlated axis example
example_points[:, -1] = example_points[:, 1] + rand.uniform(low=-0.005, high=0.005, size=p)
# s.t. example_points @ hull.equations[:, :-1].T + hull.equations[:, -1] <= 0
hull = ConvexHull(example_points)
c = hull.equations
print('Inequalities:', len(c))
colouring, halfspaces = get_fit(c)
print('Colouring matrix:')
np.set_printoptions(precision=2, linewidth=200)
print(colouring)
m = 1_000_000
transformed, contained = apply_fit(colouring, white_points=rand.random((m, n)), halfspaces=c)
print(f'{np.count_nonzero(contained)}/{m} random point yield')
fig, ax = plt.subplots(subplot_kw={'projection': '3d'})
ax.scatter(*halfspaces.intersections.T, label='Halfspace intersections')
ax.scatter(*transformed[contained][::100].T, alpha=0.05)
ax.scatter(*transformed[~contained][::100].T, alpha=0.05)
ax.set_title('Bounding and contained random locus')
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
ax.legend()
plt.show()
if __name__ == '__main__':
demo()
Dimensions: 3
Inequalities: 62
Colouring matrix:
[[ 9.91e-02 9.36e-04 3.61e-03 0.00e+00]
[ 6.67e-04 -9.85e-03 -1.57e-02 0.00e+00]
[-7.90e-05 -5.90e-03 3.69e-03 0.00e+00]
[ 1.01e-01 7.13e-01 7.10e-01 1.00e+00]]
556320/1000000 random point yield
