diff --git a/geos-geomechanics/src/geos/geomechanics/model/MohrCircle.py b/geos-geomechanics/src/geos/geomechanics/model/MohrCircle.py index b0916da13..228722ab6 100644 --- a/geos-geomechanics/src/geos/geomechanics/model/MohrCircle.py +++ b/geos-geomechanics/src/geos/geomechanics/model/MohrCircle.py @@ -137,4 +137,7 @@ def computePrincipalComponents( self: Self, stressVector: npt.NDArray[ np.float6 Stress vector must follow GEOS convention (XX, YY, ZZ, YZ, XZ, XY) """ # Get stress principal components - self.p3, self.p2, self.p1 = ( computeStressPrincipalComponentsFromStressVector( stressVector ) ) + vp1, vp2, vp3 = np.unstack( + np.swapaxes( computeStressPrincipalComponentsFromStressVector( stressVector.reshape( -1, 6 ) )[ 0 ], 0, + 1 ) ) + self.p1, self.p2, self.p3 = ( vp1[ 0 ], vp2[ 0 ], vp3[ 0 ] ) diff --git a/geos-geomechanics/src/geos/geomechanics/processing/geomechanicsCalculatorFunctions.py b/geos-geomechanics/src/geos/geomechanics/processing/geomechanicsCalculatorFunctions.py index 84f21f0fe..745f98b9f 100644 --- a/geos-geomechanics/src/geos/geomechanics/processing/geomechanicsCalculatorFunctions.py +++ b/geos-geomechanics/src/geos/geomechanics/processing/geomechanicsCalculatorFunctions.py @@ -672,21 +672,41 @@ def criticalPorePressure( assert ( frictionAngle >= 0.0 ) and ( frictionAngle < np.pi / 2.0 ), ( "Fristion angle " + "must range between 0 and pi/2." ) - minimumPrincipalStress: npt.NDArray[ np.float64 ] = np.full( stressVector.shape[ 0 ], np.nan ) - maximumPrincipalStress: npt.NDArray[ np.float64 ] = np.copy( minimumPrincipalStress ) - for i in range( minimumPrincipalStress.shape[ 0 ] ): - p3, p2, p1 = computeStressPrincipalComponentsFromStressVector( stressVector[ i ] ) - minimumPrincipalStress[ i ] = p3 - maximumPrincipalStress[ i ] = p1 + pca, _ = computeStressPrincipalComponentsFromStressVector( stressVector ) # assertion frictionAngle < np.pi/2., so sin(frictionAngle) != 1 cohesiveTerm: npt.NDArray[ np.floating[ Any ] ] = ( rockCohesion * np.cos( frictionAngle ) / ( 1.0 - np.sin( frictionAngle ) ) ) - residualTerm: npt.NDArray[ np.floating[ Any ] ] = ( 3.0 * minimumPrincipalStress - maximumPrincipalStress ) / 2.0 + residualTerm: npt.NDArray[ np.floating[ Any ] ] = ( 3.0 * np.min( pca, axis=1 ) - np.max( pca, axis=1 ) ) / 2.0 return cohesiveTerm + residualTerm +def principalAxesAndDirections( + stressVector: npt.NDArray[ np.float64 ], ) -> tuple[ npt.NDArray[ np.float64 ], npt.NDArray[ np.float64 ] ]: + r"""Getting the principal axes and directions separately by fast eigh decomposition. + + Beware they are ordered in reverse natural ordre so that p1>=p2>=p3. + + Args: + stressVector (npt.NDArray[np.float64]): stress vector of Nx6 dims + (:math:`\sigma` - Pa). + + Returns: + tuple[ npt.NDArray[ np.float64 ], npt.NDArray[ np.float64 ]: tuple containing principal stresses values and directions + (p1, p2, p3) and (d1, d2, d3) where p1>=p2>=p3 and d1, d2, d3 are the corresponding eigenvectors. + + """ + assert stressVector is not None, "Stress vector must be defined" + assert stressVector.shape[ 1 ] == 6, "Stress vector must be of size 6." + + principalStresses: npt.NDArray[ np.float64 ] = np.empty( shape=( stressVector.shape[ 0 ], 3 ) ) + principalDirs: npt.NDArray[ np.float64 ] = np.empty( shape=( stressVector.shape[ 0 ], 3, 3 ) ) + principalStresses, principalDirs = computeStressPrincipalComponentsFromStressVector( stressVector ) + + return principalStresses, principalDirs + + def criticalPorePressureThreshold( pressure: npt.NDArray[ np.float64 ], criticalPorePressure: npt.NDArray[ np.float64 ] ) -> npt.NDArray[ np.float64 ]: r"""Compute the critical pore pressure threshold. @@ -882,62 +902,49 @@ def shearCapacityUtilization( traction: npt.NDArray[ np.float64 ], rockCohesion: def computeStressPrincipalComponentsFromStressVector( - stressVector: npt.NDArray[ np.float64 ], ) -> tuple[ float, float, float ]: + stressVector: npt.NDArray[ np.float64 ], ) -> tuple[ npt.NDArray[ np.float64 ], npt.NDArray[ np.float64 ] ]: """Compute stress principal components from stress vector. Args: stressVector (npt.NDArray[np.float64]): stress vector. Returns: - tuple[float, float, float]: Principal components sorted in ascending + tuple[float, float, float]: Principal components sorted in descending order. """ - assert stressVector.size == 6, "Stress vector dimension is wrong." + assert stressVector.shape[ 1 ] == 6, "Stress vector dimension is wrong." stressTensor: npt.NDArray[ np.float64 ] = getAttributeMatrixFromVector( stressVector ) - return computeStressPrincipalComponents( stressTensor ) - - -def computeStressPrincipalComponents( stressTensor: npt.NDArray[ np.float64 ], ) -> tuple[ float, float, float ]: - """Compute stress principal components. - - Args: - stressTensor (npt.NDArray[np.float64]): stress tensor. - - Returns: - tuple[float, float, float]: Principal components sorted in ascending - order. - - """ - # get eigen values - e_val, e_vec = np.linalg.eig( stressTensor ) # sort principal stresses from smallest to largest - p3, p2, p1 = np.sort( e_val ) - return ( p3, p2, p1 ) + eVal, eVec = np.linalg.eigh( stressTensor ) + + return eVal[ :, ::-1 ], eVec[ :, :, ::-1 ] -def computeNormalShearStress( stressTensor: npt.NDArray[ np.float64 ], - directionVector: npt.NDArray[ np.float64 ] ) -> tuple[ float, float ]: +def computeNormalShearStress( + stressTensors: npt.NDArray[ np.float64 ], + directionVectors: npt.NDArray[ np.float64 ] ) -> tuple[ npt.NDArray[ np.float64 ], npt.NDArray[ np.float64 ] ]: """Compute normal and shear stress according to stress tensor and direction. Args: - stressTensor (npt.NDArray[np.float64]): 3x3 stress tensor - directionVector (npt.NDArray[np.float64]): direction vector + stressTensors (npt.NDArray[np.float64]): stress vector. + directionVectors: npt.NDArray[ np.float64 ]: the direction vectors of the local frame. Returns: - tuple[float, float]: normal and shear stresses. + tuple[npt.NDArray[np.float64] , npt.NDArray[np.float64]]: normal and shear stresses. """ - assert stressTensor.shape == ( 3, 3 ), "Stress tensor must be 3x3 matrix." - assert directionVector.size == 3, "Direction vector must have 3 components" + assert stressTensors.shape[ 0 ] == directionVectors.shape[ 0 ], "First dim should be number of points" + assert stressTensors.shape[ 1: ] == ( 3, 3 ), "Stress tensor must be nx3x3 matrix." + assert directionVectors.shape[ 1 ] == 3, "Direction vector must have 3 components" # normalization of direction vector - directionVector = directionVector / np.linalg.norm( directionVector ) + directionVectors = directionVectors / np.linalg.norm( directionVectors, axis=1, keepdims=True ) # stress vector - T: npt.NDArray[ np.float64 ] = np.dot( stressTensor, directionVector ) + T: npt.NDArray[ np.float64 ] = np.einsum( 'nij,nj->ni', stressTensors, directionVectors ) # normal stress - sigmaN: float = np.dot( T, directionVector ) + sigmaN: npt.NDArray[ np.float64 ] = np.einsum( 'ni,ni->n', T, directionVectors ) # shear stress - tauVec: npt.NDArray[ np.float64 ] = T - np.dot( sigmaN, directionVector ) - tau: float = float( np.linalg.norm( tauVec ) ) + tauVec: npt.NDArray[ np.float64 ] = T - np.einsum( 'n,nj->nj', sigmaN, directionVectors ) + tau: npt.NDArray[ np.float64 ] = np.linalg.norm( tauVec, axis=1 ) return ( sigmaN, tau ) diff --git a/geos-geomechanics/tests/testsFunctionsGeomechanicsCalculator.py b/geos-geomechanics/tests/testsFunctionsGeomechanicsCalculator.py index 7fc8b4b5a..3f9875bb8 100644 --- a/geos-geomechanics/tests/testsFunctionsGeomechanicsCalculator.py +++ b/geos-geomechanics/tests/testsFunctionsGeomechanicsCalculator.py @@ -37,7 +37,7 @@ verticalStress: npt.NDArray[ np.float64 ] = effectiveStress[ :, 2 ] horizontalStress: npt.NDArray[ np.float64 ] = np.min( effectiveStress[ :, :2 ], axis=1 ) -stressVector: npt.NDArray[ np.float64 ] = np.array( [ 1.8, 2.5, 5.2, 0.3, 0.4, 0.1 ] ) +stressVector: npt.NDArray[ np.float64 ] = np.array( [ 1.8, 2.5, 5.2, 0.3, 0.4, 0.1 ] ).reshape( 1, 6 ) stressTensor: npt.NDArray[ np.float64 ] = getAttributeMatrixFromVector( stressVector ) @@ -250,28 +250,21 @@ def test_shearCapacityUtilization( self: Self ) -> None: self.assertSequenceEqual( np.round( obtained, 3 ).flatten().tolist(), expected ) - def test_computeStressPrincipalComponents( self: Self ) -> None: - """Test calculation of stress principal components from stress tensor.""" - obtained: list[ float ] = [ round( val, 3 ) for val in fcts.computeStressPrincipalComponents( stressTensor ) ] - expected: tuple[ float, float, float ] = ( 1.748, 2.471, 5.281 ) - self.assertSequenceEqual( tuple( obtained ), expected ) - def test_computeStressPrincipalComponentsFromStressVector( self: Self ) -> None: """Test calculation of stress principal components from stress vector.""" - obtained: list[ float ] = [ - round( val, 3 ) for val in fcts.computeStressPrincipalComponentsFromStressVector( stressVector ) - ] - expected: tuple[ float, float, float ] = ( 1.748, 2.471, 5.281 ) - self.assertSequenceEqual( tuple( obtained ), expected ) + obtained: npt.NDArray[ np.float64 ] = fcts.computeStressPrincipalComponentsFromStressVector( stressVector )[ 0 ] + expected: npt.NDArray[ np.float64 ] = np.array( [ 5.281, 2.471, 1.748 ] ).reshape( 1, -1 ) + assert np.linalg.norm( obtained - expected ) < 1e-3 def test_computeNormalShearStress( self: Self ) -> None: """Test calculation of normal and shear stress.""" - directionVector = np.array( [ 1.0, 1.0, 0.0 ] ) - obtained: list[ float ] = [ - round( val, 3 ) for val in fcts.computeNormalShearStress( stressTensor, directionVector ) - ] - expected: tuple[ float, float ] = ( 2.250, 0.606 ) - self.assertSequenceEqual( tuple( obtained ), expected ) + directionVector = np.array( [ 1.0, 1.0, 0.0 ] ).reshape( 1, 3 ) + dv = np.vstack( [ directionVector, directionVector ] ) + sv = np.vstack( [ stressTensor, stressTensor ] ) + obtained: tuple[ npt.NDArray[ np.float64 ], + npt.NDArray[ np.float64 ] ] = fcts.computeNormalShearStress( sv, dv ) + expected: tuple[ list[ float ], list[ float ] ] = ( [ 2.250, 2.250 ], [ 0.606, 0.606 ] ) + assert np.linalg.norm( [ obt - exp for obt, exp in zip( obtained, expected ) ] ) < 1e-3 if __name__ == "__main__": diff --git a/geos-mesh/src/geos/mesh/utils/genericHelpers.py b/geos-mesh/src/geos/mesh/utils/genericHelpers.py index d6c2321ad..2e1031ebd 100644 --- a/geos-mesh/src/geos/mesh/utils/genericHelpers.py +++ b/geos-mesh/src/geos/mesh/utils/genericHelpers.py @@ -7,12 +7,11 @@ from typing import ( Iterator, List, Sequence, Any, Union, Tuple ) from vtkmodules.util.numpy_support import ( numpy_to_vtk, vtk_to_numpy ) -from vtkmodules.vtkCommonCore import vtkIdList, vtkPoints, reference, vtkDataArray, vtkLogger, vtkFloatArray +from vtkmodules.vtkCommonCore import vtkIdList, vtkPoints, reference, vtkLogger from vtkmodules.vtkCommonDataModel import ( vtkUnstructuredGrid, vtkMultiBlockDataSet, vtkPolyData, vtkDataSet, vtkDataObject, vtkPlane, vtkCellTypes, vtkIncrementalOctreePointLocator, VTK_TRIANGLE, vtkSelection, vtkSelectionNode ) -from vtkmodules.vtkFiltersCore import ( vtk3DLinearGridPlaneCutter, vtkPolyDataNormals, vtkPolyDataTangents ) -from vtkmodules.vtkFiltersTexture import vtkTextureMapToPlane +from vtkmodules.vtkFiltersCore import ( vtk3DLinearGridPlaneCutter, vtkPolyDataNormals ) from vtkmodules.vtkFiltersGeometry import vtkDataSetSurfaceFilter, vtkGeometryFilter from vtkmodules.vtkFiltersGeneral import vtkDataSetTriangleFilter from vtkmodules.util.numpy_support import numpy_to_vtkIdTypeArray @@ -22,7 +21,7 @@ from geos.mesh.utils.multiblockHelpers import ( getBlockElementIndexesFlatten, getBlockFromFlatIndex ) from geos.utils.algebraFunctions import ( getAttributeMatrixFromVector, getAttributeVectorFromMatrix ) -from geos.utils.geometryFunctions import ( getChangeOfBasisMatrix, CANONICAL_BASIS_3D ) +from geos.utils.geometryFunctions import ( getChangeOfBasisMatrix, CANONICAL_BASIS_3D, _normalize, _cross ) from geos.utils.Logger import ( getLogger, Logger, VTKCaptureLog, RegexExceptionFilter ) from geos.utils.Errors import ( VTKError ) @@ -397,7 +396,7 @@ def createVertices( cellPtsCoord: list[ npt.NDArray[ np.float64 ] ], return points, cellVertexMapAll -def convertAttributeFromLocalToXYZForOneCell( +def convertAttributeFromLocalToXYZ( vector: npt.NDArray[ np.float64 ], localBasisVectors: tuple[ npt.NDArray[ np.float64 ], npt.NDArray[ np.float64 ], npt.NDArray[ np.float64 ] ] ) -> npt.NDArray[ np.float64 ]: @@ -421,10 +420,11 @@ def convertAttributeFromLocalToXYZForOneCell( transformMatrix: npt.NDArray[ np.float64 ] = getChangeOfBasisMatrix( localBasisVectors, CANONICAL_BASIS_3D ) # Apply transformation - arrayXYZ: npt.NDArray[ np.float64 ] = transformMatrix @ matrix3x3 @ transformMatrix.T + # arrayXYZ: npt.NDArray[ np.float64 ] = np.einsum( 'nij,njk,nlk->nil', transformMatrix, matrix3x3, transformMatrix ) + arrayXYZ: npt.NDArray[ np.float64 ] = np.einsum( 'nki,nkl,nlj->nij', transformMatrix, matrix3x3, transformMatrix ) # Convert back to GEOS type attribute and return - return getAttributeVectorFromMatrix( arrayXYZ, vector.size ) + return getAttributeVectorFromMatrix( arrayXYZ, vector.shape ) def getNormalVectors( surface: vtkPolyData ) -> npt.NDArray[ np.float64 ]: @@ -460,20 +460,15 @@ def getTangentsVectors( surface: vtkPolyData ) -> Tuple[ npt.NDArray[ np.float64 Returns: Tuple[npt.NDArray[np.float64], npt.NDArray[np.float64]]: The tangents vectors of the input surface. """ - # Get first tangential component - vtkTangents: Union[ vtkFloatArray, None ] = surface.GetCellData().GetTangents() # type: ignore[no-untyped-call] - tangents1: npt.NDArray[ np.float64 ] - try: - tangents1 = vtk_to_numpy( vtkTangents ) + tangents1: npt.NDArray[ np.float64 ] = _normalize( vtk_to_numpy( surface.GetCellData().GetTangents() ) ) + # Compute second tangential component + normals: npt.NDArray[ np.float64 ] = getNormalVectors( surface ) + tangents2: npt.NDArray[ np.float64 ] = _cross( normals, tangents1 ) + tangents2 = _normalize( tangents2 ) except AttributeError as err: context: str = f"No tangential attribute found in the mesh. Use the computeTangents function beforehand.\n{ err }" raise VTKError( context ) from err - else: - # Compute second tangential component - normals: npt.NDArray[ np.float64 ] = getNormalVectors( surface ) - - tangents2: npt.NDArray[ np.float64 ] = np.cross( normals, tangents1, axis=1 ).astype( np.float64 ) return ( tangents1, tangents2 ) @@ -513,7 +508,7 @@ def getLocalBasisVectors( surfaceWithTangents: vtkPolyData = computeTangents( surfaceWithNormals, logger ) tangents = getTangentsVectors( surfaceWithTangents ) - return np.array( ( normals, *tangents ) ) + return np.stack( [ normals, *tangents ], axis=2 ) def computeNormals( @@ -581,94 +576,27 @@ def computeTangents( vtkPolyData: The surface with tangent attribute """ # Need to compute texture coordinates required for tangent calculation - surface1: vtkPolyData = computeSurfaceTextureCoordinates( triangulatedSurface, logger ) + # surface1: vtkPolyData = computeSurfaceTextureCoordinates( triangulatedSurface, logger ) # TODO: triangulate the surface before computation of the tangents if needed. # Compute tangents - vtkErrorLogger: Logger - if logger is None: - vtkErrorLogger = getLogger( "Compute Surface Tangents vtkError Logger", True ) - else: - vtkErrorLogger = logging.getLogger( f"{ logger.name } vtkError Logger" ) - vtkErrorLogger.setLevel( logging.INFO ) - vtkErrorLogger.addHandler( logger.handlers[ 0 ] ) - vtkErrorLogger.propagate = False - - vtkLogger.SetStderrVerbosity( vtkLogger.VERBOSITY_ERROR ) - vtkErrorLogger.addFilter( RegexExceptionFilter() ) # will raise VTKError if captured VTK Error - - with VTKCaptureLog() as capturedLog: - - tangentFilter: vtkPolyDataTangents = vtkPolyDataTangents() - tangentFilter.SetInputData( surface1 ) - tangentFilter.ComputeCellTangentsOn() - tangentFilter.ComputePointTangentsOff() - tangentFilter.Update() - surfaceOut: vtkPolyData = tangentFilter.GetOutput() - - capturedLog.seek( 0 ) - captured = capturedLog.read().decode() - - if captured != "": - vtkErrorLogger.error( captured.strip() ) - - if surfaceOut is None: - raise VTKError( "Something went wrong in VTK calculation." ) + ptArray: npt.NDArray[ np.float64 ] = vtk_to_numpy( triangulatedSurface.GetPoints().GetData() ) + # Cooking the indexes + ids: list[ list[ int ] ] = [ [ + triangulatedSurface.GetCell( i ).GetPointIds().GetId( j ) + for j in range( triangulatedSurface.GetCell( i ).GetNumberOfPoints() ) + ] for i in range( triangulatedSurface.GetNumberOfCells() ) ] + # to honor orientation t1 is always p1-p0 normalized !! Beware to pinched cells + array = _normalize( ptArray[ ids ][ :, 1, : ] - ptArray[ ids ][ :, 0, : ] ) # copy tangents attributes into filter input surface because surface attributes # are not transferred to the output (bug that should be corrected by Kitware) - array: vtkDataArray = surfaceOut.GetCellData().GetTangents() - if array is None: - raise VTKError( "Attribute Tangents is not in the mesh." ) - - surface1.GetCellData().SetTangents( array ) - surface1.GetCellData().Modified() - surface1.Modified() - return surface1 - - -def computeSurfaceTextureCoordinates( - surface: vtkPolyData, - logger: Union[ Logger, None ] = None, -) -> vtkPolyData: - """Generate the 2D texture coordinates required for tangent computation. The dataset points are mapped onto a plane generated automatically. - - Args: - surface (vtkPolyData): The input surface. - logger (Union[Logger, None], optional): A logger to manage the output messages. - Defaults to None, an internal logger is used. - - Returns: - vtkPolyData: The input surface with generated texture map. - """ - # Need to compute texture coordinates required for tangent calculation - vtkErrorLogger: Logger - if logger is None: - vtkErrorLogger = getLogger( "Compute Surface Texture Coordinates vtkError Logger", True ) - else: - vtkErrorLogger = logging.getLogger( f"{ logger.name } vtkError Logger" ) - vtkErrorLogger.setLevel( logging.INFO ) - vtkErrorLogger.addHandler( logger.handlers[ 0 ] ) - vtkErrorLogger.propagate = False - - vtkLogger.SetStderrVerbosity( vtkLogger.VERBOSITY_ERROR ) - vtkErrorLogger.addFilter( RegexExceptionFilter() ) # will raise VTKError if captured VTK Error - - with VTKCaptureLog() as capturedLog: - - textureFilter: vtkTextureMapToPlane = vtkTextureMapToPlane() - textureFilter.SetInputData( surface ) - textureFilter.AutomaticPlaneGenerationOn() - textureFilter.Update() - - capturedLog.seek( 0 ) - captured = capturedLog.read().decode() - - if captured != "": - vtkErrorLogger.error( captured.strip() ) - return textureFilter.GetOutput() + triangulatedSurface.GetCellData().SetTangents( numpy_to_vtk( array ) ) + triangulatedSurface.GetCellData().Modified() + triangulatedSurface.Modified() + return triangulatedSurface def extractCellSelection( mesh: vtkUnstructuredGrid, ids: list[ int ] ) -> vtkUnstructuredGrid: diff --git a/geos-mesh/tests/test_QualityMetricSummary.py b/geos-mesh/tests/test_QualityMetricSummary.py index 91335f8d5..ccc8166a2 100644 --- a/geos-mesh/tests/test_QualityMetricSummary.py +++ b/geos-mesh/tests/test_QualityMetricSummary.py @@ -7,7 +7,7 @@ import pandas as pd import random as rd import pytest -from typing import Iterator +from typing import Iterator, Any from matplotlib.figure import Figure from geos.mesh.model.QualityMetricSummary import QualityMetricSummary, StatTypes @@ -24,7 +24,7 @@ cellType_all: tuple[ int, ...] = ( VTK_TRIANGLE, VTK_QUAD, VTK_TETRA, VTK_PYRAMID, VTK_WEDGE, VTK_HEXAHEDRON, VTK_POLYGON, VTK_POLYHEDRON ) -cellMetricIndexes_all: tuple[ int, ...] = ( +cellMetricIndexes_all: tuple[ Any, ...] = ( ( vtkMeshQuality.QualityMeasureTypes.EDGE_RATIO, vtkMeshQuality.QualityMeasureTypes.AREA, vtkMeshQuality.QualityMeasureTypes.CONDITION ), ( vtkMeshQuality.QualityMeasureTypes.MED_ASPECT_FROBENIUS, vtkMeshQuality.QualityMeasureTypes.AREA, @@ -41,7 +41,7 @@ ( vtkMeshQuality.QualityMeasureTypes.SHAPE, vtkMeshQuality.QualityMeasureTypes.VOLUME ), ) -metricIndexesFail_all: tuple[ int, ...] = ( +metricIndexesFail_all: tuple[ Any, ...] = ( ( vtkMeshQuality.QualityMeasureTypes.MED_ASPECT_FROBENIUS, vtkMeshQuality.QualityMeasureTypes.VOLUME, 200 ), ( vtkMeshQuality.QualityMeasureTypes.NORMALIZED_INRADIUS, vtkMeshQuality.QualityMeasureTypes.VOLUME, 200 ), ( vtkMeshQuality.QualityMeasureTypes.ODDY, vtkMeshQuality.QualityMeasureTypes.AREA, 200 ), @@ -58,7 +58,7 @@ class TestCase_StatsType: """Test case.""" __test__ = False statType: StatTypes - values: tuple[ float, ...] + values: npt.NDArray[ Any ] expected: int | float @@ -135,7 +135,7 @@ def __generate_failed_test_data() -> Iterator[ TestCase ]: """ for metricIndexes, cellType in zip( metricIndexesFail_all, cellType_all, strict=True ): otherMetricIndexes: tuple = () - values: tuple[ float, float, float, float, int ] = ( 0., 0., 0., 0., 0., 0 ) + values: tuple[ float, float, float, float, int ] = ( 0., 0., 0., 0., 0 ) yield TestCase( metricIndexes, otherMetricIndexes, cellType, statTypes, values ) @@ -225,4 +225,4 @@ def test_QualityMetricSummary_plotSummaryFigure( test_case: TestCase ) -> None: assert fig is not None, "Figure is undefined" # metrics + global counts + cell count + legend nbAxesExp: int = len( test_case.cellMetricIndexes ) + len( test_case.otherMetricIndexes ) + 3 - assert len( fig.get_axes() ) == nbAxesExp, f"Number of Axes is expected to be {nbAxesExp}." \ No newline at end of file + assert len( fig.get_axes() ) == nbAxesExp, f"Number of Axes is expected to be {nbAxesExp}." diff --git a/geos-mesh/tests/test_genericHelpers_normalAndTangents.py b/geos-mesh/tests/test_genericHelpers_normalAndTangents.py index 5cfc3f276..abbc2235c 100644 --- a/geos-mesh/tests/test_genericHelpers_normalAndTangents.py +++ b/geos-mesh/tests/test_genericHelpers_normalAndTangents.py @@ -12,68 +12,109 @@ from vtkmodules.vtkCommonCore import vtkPoints from vtkmodules.util.numpy_support import vtk_to_numpy -from geos.mesh.utils.genericHelpers import ( computeSurfaceTextureCoordinates, computeTangents, computeNormals, - getLocalBasisVectors, getTangentsVectors, getNormalVectors, - convertAttributeFromLocalToXYZForOneCell ) +from geos.mesh.utils.genericHelpers import ( computeTangents, computeNormals, getLocalBasisVectors, getTangentsVectors, + getNormalVectors, convertAttributeFromLocalToXYZ ) from geos.utils.Errors import VTKError # yapf: disable pointsCoordsAll: list[ list[ list[ float ] ] ] = [ [ [ 0., 0., 0. ], [ 0., 1., 0. ], [ 0., 2., 0. ], [ 0., 2., 1. ], [ 0., 1., 1. ], [ 0., 0., 1. ], ], - [ [ 0., 0., 0. ], [ 0., 1., 0. ], [ 0., 2., 0. ], [ 0., 2., 1. ], [ 1., 1., 1.5 ], [ 0., 0., 1. ], ], + [ [ 0., 0., 0. ], [ 0., 1., 0. ], [ 0., 2., 0. ], [ 0., 2., 1. ], [ 1., 1., 1. ], [ 0., 0., 1. ], ], ] trianglesAll: list[ list[ tuple[ int, int, int ] ] ] = [ - [ ( 0, 1, 5 ), ( 1, 4, 5 ), ( 1, 2, 3 ), ( 1, 4, 3 ) ], - [ ( 0, 1, 5 ), ( 1, 4, 5 ), ( 1, 2, 3 ), ( 1, 4, 3 ) ], + [ ( 0, 1, 5 ), ( 1, 4, 5 ), ( 1, 2, 3 ), ( 3, 4, 1 ) ], + [ ( 0, 1, 5 ), ( 1, 4, 5 ), ( 1, 2, 3 ), ( 3, 4, 1 ) ], ] -expectedNormalsAll: list[ npt.NDArray[ np.float64 ] ] = [ - np.array( [ [ 1., 0., 0. ], [ 1., 0., 0. ], [ 1., 0., 0. ], [ 1., 0., 0. ], ] ), - np.array( [ [ 1., 0., 0. ], [ 0.7276069, -0.48507124, -0.48507124 ], [ 1., 0., 0. ], - [ 0.7276069, 0.48507124, -0.48507124 ] ] ), + +B_015: npt.NDArray[ np.float64 ] = np.array([ + [1.0, 0.0, 0.0], # n + [0.0, 1.0, 0.0], # t1 + [0.0, 0.0, 1.0], # t2 +]) + +B_145: list[ npt.NDArray[ np.float64 ] ] = [ +np.array([ + [1.0, 0.0, 0.0], # n + [0.0, 0.0, 1.0], # t1 + [0.0, -1.0, 0.0], # t2 +]), +np.array([ + [ 0.577350, -0.577350, -0.577350], # n + [ 0.707107, 0.000000, 0.707107], # t1 + [-0.408248, -0.816497, 0.408248], # t2 +]) ] -expectedTangentsAll: list[ npt.NDArray[ np.float64 ] ] = [ - np.array( [ [ [ 0., 2., 0. ], [ -0., 2., 0. ], [ 0., 2., 0. ], [ 0., 2., 0. ] ], - [ [ 0., 0., 2. ], [ 0., -0., 2. ], [ 0., 0., 2. ], [ 0., 0., 2. ], - ] ] ), - np.array( [ [ [ 0., 2., 0. ], [ 0.8301887, 2., -0.754717 ], [ 0., 2., 0. ], [ -0.8301887, 2., 0.754717 ] ], - [ [ 0., 0., 2. ], [ 1.33623397, 0.14643663, 1.85791445 ], [ 0., 0., 2. ], - [ 1.33623397, -0.14643663, 1.85791445 ] ] ] ), + +B_123: npt.NDArray[ np.float64 ] = np.array([ + [1.0, 0.0, 0.0], # n + [0.0, 1.0, 0.0], # t1 + [0.0, 0.0, 1.0], # t2 +]) + +B_341: list[ npt.NDArray[ np.float64 ] ] = [ +np.array([ + [1.0, 0.0, 0.0], # n + [0.0, -1.0, 0.0], # t1 + [0.0, 0.0, -1.0], # t2 +]), +np.array([ + [ 0.577350, 0.577350, -0.577350], # n + [ 0.707107, -0.707107, 0.000000], # t1 + [-0.408248, -0.408248, -0.816497], # t2 +]) ] -expectedTexturedCoordsAll: list[ npt.NDArray[ np.float64 ] ] = [ - np.array( [ [ 0., 0. ], [ 0.5, 0. ], [ 1., 0. ], [ 1., 1. ], [ 0.5, 1. ], [ 0., 1. ], - ] ), - np.array( [ [ [ 0., 0. ], [ 0.5, 0. ], [ 1., 0. ], [ 1., 0.41509435 ], [ 0.5, 1. ], [ 0., 0.41509435 ] ], - ] ), + +expectedBasisAll: list[ npt.NDArray[ np.float64 ] ] = [ + np.array([B_015.T, B_145[0].T, B_123.T, B_341[0].T]), + np.array([B_015.T, B_145[1].T, B_123.T, B_341[1].T]), ] + # Same attribute for all cells, test for vector size 3, 6, 9 -attributes: list[ npt.NDArray[ np.float64 ] ] = [ np.arange( 1., 4., dtype=np.float64 ), np.arange( 1., 7., dtype=np.float64 ), np.arange( 1., 10., dtype=np.float64 ) +attributes: list[ npt.NDArray[ np.float64 ] ] = [ np.arange( 1., 4., dtype=np.float64 ).reshape(1,3), +np.arange( 1., 7., dtype=np.float64 ).reshape(1,6), np.arange( 1., 10., dtype=np.float64 ).reshape(1,9) ] -expectedAttributesXYZ: list[ list[ npt.NDArray[ np.float64 ] ] ] = [ - [ - np.array( [ [ 1., 8., 12. ], [ 1., 8., 12. ], [ 1., 8., 12. ], [ 1., 8., 12. ], - ] ), - np.array( [ [ 1., 8., 12., 16., 10., 12. ], [ 1., 8., 12., 16., 10., 12. ], [ 1., 8., 12., 16., 10., 12. ], - [ 1., 8., 12., 16., 10., 12. ] ] ), - np.array( [ [ 1., 8., 12., 16., 10., 12., 28., 16., 18. ], [ 1., 8., 12., 16., 10., 12., 28., 16., 18. ], - [ 1., 8., 12., 16., 10., 12., 28., 16., 18. ], [ 1., 8., 12., 16., 10., 12., 28., 16., 18. ], - [ 1., 8., 12., 16., 10., 12., 28., 16., 18. ], [ 1., 8., 12., 16., 10., 12., 28., 16., 18. ] ] ) + +# -2.309401, 2.828427,-0.816497 +expectedAttributesXYZ: list[ list[ list[ npt.NDArray[ np.float64 ] ] ] ] = [[ + [ + np.array( [ 1., 2., 3. ] ), + np.array( [ 1., 3., 2. ] ), + np.array( [ 1., 2., 3. ] ), + np.array( [ 1., 2., 3. ] ), ], + [ + np.array( [ 1., 2., 3., 4., 5., 6.] ), + np.array( [ 1., 3., 2., -4., 6. , -5.] ), + np.array( [ 1., 2., 3., 4., 5., 6.] ), + np.array( [ 1., 2., 3., 4., -5., -6.] ) ], + [ + + np.array( [1.,2.,3.,4.,5.,6.,7.,8.,9.] ), + np.array( [1., 3., 2., -7., 6., -5., -4., 9., -8. ] ), + np.array( [1.,2.,3.,4.,5.,6.,7.,8.,9.] ), + np.array( [1.,2.,3., 4., -5., -6., 7., -8., -9.] ) + ] ], # case 1 [ - np.array( [ [ 1., 8., 12. ], [ 7.26440201, 8.29962517, 11.73002787 ], [ 1., 8., 12. ], - [ 7.26440201, 8.29962517, 11.73002787 ] ] ), - np.array( [ [ 1., 8., 12., 16., 10., 12. ], - [ 33.11015535, -1.70942051, -4.10667954, 3.96829788, 5.78481908, 18.33796323 ], - [ 1., 8., 12., 16., 10., 12. ], - [ 0.86370966, 16.88802688, 9.54231792, 17.62557587, 12.93534579, 16.64449814 ] ] ), - np.array( [ [ 1., 8., 12., 16., 10., 12., 28., 16., 18. ], - [ 41.16704654, -3.95432477, -9.91866643, 0.51321919, -0.39322437, 23.20275907, 13.51039649, - 12.82015999, 23.3879596 - ], [ 1., 8., 12., 16., 10., 12., 28., 16., 18. ], - [ -1.35966323, 18.70673795, 9.9469796, 14.59669037, 15.22437721, 25.39830602, 32.57499969, - 14.01099303, 21.0552047 - ] ] ) - ], # case 2 + [ + np.array( [ 1., 2., 3. ] ), + np.array( [1.99999814e+00, 2.00000124e+00, 2.00000042e+00, 5.77350037e-01, 1.15470000e-06,-8.16496453e-01] ), + np.array( [ 1., 2. , 3. ] ), + np.array([ 1.99999814, 1.50000093,2.50000134, 0.28867502, 0.70710768,-0.40824823 ]) + ], + [ + np.array( [1.,2.,3.,4.,5.,6.] ), + np.array( [-2.66666418,7.00000433,1.66666919,-5.1961574,1.88561586,-4.89897872]), + np.array( [1.,2.,3.,4.,5.,6.]), + np.array( [2.54962629e-17,-4.50000279e+00,1.04999973e+01,-2.88675726e-01,-4.24263915e+00,-8.16496453e-01]) + ], + [ + np.array([1.,2.,3.,4.,5.,6.,7.,8.,9.]), + np.array([-3.66666325,8.50000526,1.16666991,-7.79423469,4.71404139,-7.34846808,-6.06218458,0.47140223,-4.89897872]), + np.array([1.,2.,3.,4.,5.,6.,7.,8.,9.]), + np.array([-0.99999907,-6.00000371,12.9999962,0.57734933,-3.53553321,0.40824823,-1.15470078,-7.77817236,-2.04124113]) + ],# case 2 +] ] # yapf: enable @@ -83,13 +124,11 @@ class Options( IntEnum ): - RAW : bare mesh - NORMALS : only normals are calculated - - NORMALS_TEXTURED : normals and textured coordinates are computed - TANGENTS : normals and tangents are present in the mesh """ RAW = 0 NORMALS = 1 - NORMALS_TEXTURED = 2 - TANGENTS = 3 + TANGENTS = 2 @dataclass @@ -98,10 +137,10 @@ class TriangulatedSurfaceTestCase: pointsCoords: list[ list[ float ] ] triangles: list[ tuple[ int, int, int ] ] attribute: list[ npt.NDArray ] - expectedNormals: npt.NDArray - expectedTangents: npt.NDArray - expectedTexturedCoords: npt.NDArray - expectedAttributeInXYZ: list[ npt.NDArray ] + # expectedNormals: npt.NDArray + # expectedTangents: npt.NDArray + expectedBasis: npt.NDArray + expectedAttributeInXYZ: list[ list[ npt.NDArray ] ] options: Options mesh: vtkPolyData = field( init=False ) @@ -127,16 +166,9 @@ def __post_init__( self: Self ) -> None: if self.options == Options.NORMALS: self.mesh = computeNormals( polydata ) - elif self.options == Options.NORMALS_TEXTURED: - mesh: vtkPolyData = computeSurfaceTextureCoordinates( polydata ) - mesh2: vtkPolyData = computeNormals( mesh ) - self.mesh = mesh2 - elif self.options == Options.TANGENTS: - mesh = computeSurfaceTextureCoordinates( polydata ) - mesh2 = computeNormals( mesh ) - mesh3: vtkPolyData = computeTangents( mesh2 ) - self.mesh = mesh3 + mesh2 = computeNormals( polydata ) + self.mesh = computeTangents( mesh2 ) else: # Unknown cases and case 0 @@ -155,25 +187,7 @@ def __generateSurfacicTestCase( options: tuple[ Options, ...] ) -> Iterator[ Tri for opt in options: for i in range( len( pointsCoordsAll ) ): yield TriangulatedSurfaceTestCase( pointsCoordsAll[ i ], trianglesAll[ i ], attributes, - expectedNormalsAll[ i ], expectedTangentsAll[ i ], - expectedTexturedCoordsAll[ i ], expectedAttributesXYZ[ i ], opt ) - - -@pytest.mark.parametrize( "case", __generateSurfacicTestCase( options=( Options.RAW, ) ) ) -def test_computeTextureCoords( case: TriangulatedSurfaceTestCase ) -> None: - """Test the computation of texture coordinates.""" - stc: vtkPolyData = computeSurfaceTextureCoordinates( case.mesh ) - texturedMap: npt.NDArray[ np.float64 ] = vtk_to_numpy( stc.GetPointData().GetArray( "Texture Coordinates" ) ) - assert np.allclose( texturedMap, case.expectedTexturedCoords ) - - -@pytest.mark.parametrize( "case", __generateSurfacicTestCase( options=( Options.NORMALS_TEXTURED, ) ) ) -def test_computeTangents( case: TriangulatedSurfaceTestCase ) -> None: - """Test the computation of tangents.""" - surfaceWithTangents: vtkPolyData = computeTangents( case.mesh ) - tangents: npt.NDArray[ np.float64 ] = vtk_to_numpy( surfaceWithTangents.GetCellData().GetTangents() ) - - assert np.allclose( tangents, case.expectedTangents[ 0 ] ) + expectedBasisAll[ i ], expectedAttributesXYZ[ i ], opt ) @pytest.mark.parametrize( "case", __generateSurfacicTestCase( options=( Options.TANGENTS, ) ) ) @@ -184,8 +198,8 @@ def test_getTangents( case: TriangulatedSurfaceTestCase ) -> None: tangents1, tangents2 = getTangentsVectors( case.mesh ) - assert np.allclose( tangents1, case.expectedTangents[ 0 ], rtol=1e-3 ) - assert np.allclose( tangents2, case.expectedTangents[ 1 ], rtol=1e-3 ) + assert np.allclose( tangents1, case.expectedBasis[ :, :, 1 ], rtol=1e-3 ) + assert np.allclose( tangents2, case.expectedBasis[ :, :, 2 ], rtol=1e-3 ) @pytest.mark.parametrize( "case", __generateSurfacicTestCase( options=( Options.RAW, ) ) ) @@ -195,7 +209,7 @@ def test_computeNormals( case: TriangulatedSurfaceTestCase ) -> None: normals = vtk_to_numpy( surfaceWithNormals.GetCellData().GetNormals() ) - assert np.allclose( normals, case.expectedNormals, rtol=1e-3 ) + assert np.allclose( normals, case.expectedBasis[ :, :, 0 ], rtol=1e-3 ) @pytest.mark.parametrize( "case", __generateSurfacicTestCase( options=( Options.NORMALS, ) ) ) @@ -203,7 +217,7 @@ def test_getNormals( case: TriangulatedSurfaceTestCase ) -> None: """Test normals getter.""" normals = getNormalVectors( case.mesh ) - assert np.allclose( normals, case.expectedNormals, rtol=1e-3 ) + assert np.allclose( normals, case.expectedBasis[ :, :, 0 ], rtol=1e-3 ) @pytest.mark.parametrize( "case", @@ -214,11 +228,9 @@ def test_getNormals( case: TriangulatedSurfaceTestCase ) -> None: ) ) ) def test_getLocalBasis( case: TriangulatedSurfaceTestCase ) -> None: """Test local basis getter.""" - normals, tangents1, tangents2 = getLocalBasisVectors( case.mesh ) + basis = getLocalBasisVectors( case.mesh ) - assert np.allclose( normals, case.expectedNormals, rtol=1e-3 ) - assert np.allclose( tangents1, case.expectedTangents[ 0 ], rtol=1e-3 ) - assert np.allclose( tangents2, case.expectedTangents[ 1 ], rtol=1e-3 ) + assert np.linalg.norm( basis - case.expectedBasis ) < 1e-3 ######################################################################################### @@ -234,18 +246,6 @@ def emptySurface() -> vtkPolyData: return vtkPolyData() -def test_failingComputeSurfaceTextureCoords( emptySurface: vtkPolyData ) -> None: - """Test VTK error raising of texture coordinate calculation.""" - with pytest.raises( VTKError ): - computeSurfaceTextureCoordinates( emptySurface ) - - -def test_failingComputeTangents( emptySurface: vtkPolyData ) -> None: - """Test VTK error raising during tangent calculation.""" - with pytest.raises( VTKError ): - computeTangents( emptySurface ) - - def test_failingComputeNormals( emptySurface: vtkPolyData ) -> None: """Test VTK error raising of normals calculation.""" with pytest.raises( VTKError ): @@ -271,9 +271,7 @@ def test_failingGetNormals( emptySurface: vtkPolyData ) -> None: class AttributeConversionTestCase: """Test case for attribute conversion from local basis to XYZ basis for one cell.""" vector: npt.NDArray[ np.float64 ] - normal: npt.NDArray[ np.float64 ] - tangent1: npt.NDArray[ np.float64 ] - tangent2: npt.NDArray[ np.float64 ] + basis: npt.NDArray[ np.float64 ] expectedVectorXYZ: npt.NDArray[ np.float64 ] @@ -286,10 +284,9 @@ def __generateAttributeConversionTestCase() -> Iterator[ AttributeConversionTest for nvec, localAttribute in enumerate( attributes ): for ncell in range( testcase.mesh.GetNumberOfCells() ): - yield AttributeConversionTestCase( localAttribute, testcase.expectedNormals[ ncell ], - testcase.expectedTangents[ 0 ][ ncell ], - testcase.expectedTangents[ 1 ][ ncell ], - testcase.expectedAttributeInXYZ[ nvec ][ ncell ] ) + yield AttributeConversionTestCase( + localAttribute, testcase.expectedBasis[ ncell, :, : ].reshape( 1, 3, 3 ), + testcase.expectedAttributeInXYZ[ nvec ][ ncell ].reshape( 1, -1 ) ) @pytest.mark.parametrize( "testcase", __generateAttributeConversionTestCase() ) @@ -297,6 +294,5 @@ def test_convertAttributesToXYZ( testcase: AttributeConversionTestCase ) -> None """Test the conversion of one cell attribute from local to canonic basis.""" localAttr: npt.NDArray[ np.float64 ] = testcase.vector - attrXYZ: npt.NDArray[ np.float64 ] = convertAttributeFromLocalToXYZForOneCell( - localAttr, ( testcase.normal, testcase.tangent1, testcase.tangent2 ) ) + attrXYZ: npt.NDArray[ np.float64 ] = convertAttributeFromLocalToXYZ( localAttr, testcase.basis ) assert np.allclose( attrXYZ, testcase.expectedVectorXYZ, rtol=1e-6 ) diff --git a/geos-mesh/tests/test_meshQualityHelpers.py b/geos-mesh/tests/test_meshQualityHelpers.py index 2b077d96f..67a0e6add 100644 --- a/geos-mesh/tests/test_meshQualityHelpers.py +++ b/geos-mesh/tests/test_meshQualityHelpers.py @@ -361,4 +361,4 @@ def test_getAllCellTypesExtended() -> None: def test_getAllCellTypes() -> None: """Test of getAllCellTypes method.""" cellTypesExp: list[ int ] = [ VTK_TRIANGLE, VTK_QUAD, VTK_TETRA, VTK_PYRAMID, VTK_WEDGE, VTK_HEXAHEDRON ] - assert cellTypesExp == getAllCellTypes(), "Cell types differ." \ No newline at end of file + assert cellTypesExp == getAllCellTypes(), "Cell types differ." diff --git a/geos-processing/src/geos/processing/post_processing/GeomechanicsCalculator.py b/geos-processing/src/geos/processing/post_processing/GeomechanicsCalculator.py index da65b869c..960ddfd58 100644 --- a/geos-processing/src/geos/processing/post_processing/GeomechanicsCalculator.py +++ b/geos-processing/src/geos/processing/post_processing/GeomechanicsCalculator.py @@ -10,6 +10,10 @@ import numpy as np import numpy.typing as npt +import sys + +sys.path.insert( 0, "/data/pau901/SIM_CS/04_WORKSPACE/USERS/jfranc/geosPythonPackages/geos-processing/src" ) + import geos.geomechanics.processing.geomechanicsCalculatorFunctions as fcts from geos.mesh.utils.arrayModifiers import createAttribute @@ -47,6 +51,7 @@ - Total initial stress, total current stress and total stress ratio - Elastic stain - Real reservoir stress path and reservoir stress path in oedometric condition + - Average stress Principal values and direction The advanced geomechanics properties computed on the mesh are: - Fracture index and threshold @@ -146,11 +151,16 @@ RSP_REAL: AttributeEnum = PostProcessingOutputsEnum.RSP_REAL RSP_OED: AttributeEnum = PostProcessingOutputsEnum.RSP_OED STRESS_EFFECTIVE_RATIO_OED: AttributeEnum = PostProcessingOutputsEnum.STRESS_EFFECTIVE_RATIO_OED +PRINCIPAL_AXIS_VAL: AttributeEnum = PostProcessingOutputsEnum.PRINCIPAL_AXIS_VAL +PRINCIPAL_AXIS_DIR_1: AttributeEnum = PostProcessingOutputsEnum.PRINCIPAL_AXIS_DIR_1 +PRINCIPAL_AXIS_DIR_2: AttributeEnum = PostProcessingOutputsEnum.PRINCIPAL_AXIS_DIR_2 +PRINCIPAL_AXIS_DIR_3: AttributeEnum = PostProcessingOutputsEnum.PRINCIPAL_AXIS_DIR_3 BASIC_PROPERTIES: tuple[ AttributeEnum, ...] = ( BIOT_COEFFICIENT, COMPRESSIBILITY, COMPRESSIBILITY_OED, COMPRESSIBILITY_REAL, SPECIFIC_GRAVITY, STRESS_EFFECTIVE_RATIO_REAL, STRESS_TOTAL, STRESS_TOTAL_T0, STRESS_TOTAL_RATIO_REAL, LITHOSTATIC_STRESS, AVERAGE_STRAIN, STRESS_TOTAL_DELTA, - RSP_REAL, RSP_OED, STRESS_EFFECTIVE_RATIO_OED ) + RSP_REAL, RSP_OED, STRESS_EFFECTIVE_RATIO_OED, PRINCIPAL_AXIS_VAL, + PRINCIPAL_AXIS_DIR_1, PRINCIPAL_AXIS_DIR_2, PRINCIPAL_AXIS_DIR_3 ) # Advanced properties: CRITICAL_TOTAL_STRESS_RATIO: AttributeEnum = PostProcessingOutputsEnum.CRITICAL_TOTAL_STRESS_RATIO @@ -423,6 +433,10 @@ class BasicProperties: _rspReal: npt.NDArray[ np.float64 ] | None = None _rspOed: npt.NDArray[ np.float64 ] | None = None _effectiveStressRatioOed: npt.NDArray[ np.float64 ] | None = None + _principalAxesVal: npt.NDArray[ np.float64 ] | None = None + _principalAxesDir1: npt.NDArray[ np.float64 ] | None = None + _principalAxesDir2: npt.NDArray[ np.float64 ] | None = None + _principalAxesDir3: npt.NDArray[ np.float64 ] | None = None @property def biotCoefficient( self: Self ) -> npt.NDArray[ np.float64 ] | None: @@ -600,6 +614,14 @@ def getBasicPropertyValue( self: Self, name: str ) -> npt.NDArray[ np.float64 ] return self.rspOed elif name == STRESS_EFFECTIVE_RATIO_OED.attributeName: return self.effectiveStressRatioOed + elif name == PRINCIPAL_AXIS_VAL.attributeName: + return self._principalAxesVal + elif name == PRINCIPAL_AXIS_DIR_1.attributeName: + return self._principalAxesDir1 + elif name == PRINCIPAL_AXIS_DIR_2.attributeName: + return self._principalAxesDir2 + elif name == PRINCIPAL_AXIS_DIR_3.attributeName: + return self._principalAxesDir3 else: raise NameError( f"The property { name } is not a basic property." ) @@ -896,7 +918,19 @@ def _computeBasicProperties( self: Self ) -> None: self._computeEffectiveStressRatioOed() self._computeReservoirStressPathOed() self._computeReservoirStressPathReal() + self._computePrincipalAxesAndDirections() + + self.logger.info( "All geomechanics basic properties have been successfully computed." ) + return + def _computePrincipalAxesAndDirections( self: Self ) -> None: + """Compute the Principal axis and directions.""" + self._basicProperties._principalAxesVal, dir = fcts.computeStressPrincipalComponentsFromStressVector( + self._mandatoryProperties._effectiveStress ) + self._basicProperties._principalAxesDir1, self._basicProperties._principalAxesDir2, self._basicProperties._principalAxesDir3 = np.unstack( + dir, axis=2 ) + self._attributesToCreate.extend( + [ PRINCIPAL_AXIS_VAL, PRINCIPAL_AXIS_DIR_1, PRINCIPAL_AXIS_DIR_2, PRINCIPAL_AXIS_DIR_3 ] ) self.logger.info( "All geomechanics basic properties have been successfully computed." ) return diff --git a/geos-processing/src/geos/processing/post_processing/SurfaceGeomechanics.py b/geos-processing/src/geos/processing/post_processing/SurfaceGeomechanics.py index 843ac9222..335ceeab9 100644 --- a/geos-processing/src/geos/processing/post_processing/SurfaceGeomechanics.py +++ b/geos-processing/src/geos/processing/post_processing/SurfaceGeomechanics.py @@ -13,7 +13,7 @@ from geos.mesh.utils.arrayModifiers import createAttribute from geos.mesh.utils.arrayHelpers import ( getArrayInObject, getAttributeSet, isAttributeInObject ) -from geos.mesh.utils.genericHelpers import ( getLocalBasisVectors, convertAttributeFromLocalToXYZForOneCell ) +from geos.mesh.utils.genericHelpers import ( getLocalBasisVectors, convertAttributeFromLocalToXYZ ) import geos.geomechanics.processing.geomechanicsCalculatorFunctions as fcts from geos.utils.pieceEnum import Piece from geos.utils.Logger import ( getLogger, Logger, CountVerbosityHandler, isHandlerInLogger, getLoggerHandlerType ) @@ -359,20 +359,19 @@ def __computeXYZCoordinates( Raises: ValueError: Error with the shape of attrArray or the computation of the attribute coordinates. """ - attrXYZ: npt.NDArray[ np.float64 ] = np.full_like( attrArray, np.nan ) + attrXYZ: npt.NDArray[ np.float64 ] = np.zeros_like( attrArray ) # Get all local basis vectors localBasis: npt.NDArray[ np.float64 ] = getLocalBasisVectors( self.outputMesh, self.logger ) - for i, cellAttribute in enumerate( attrArray ): - if len( cellAttribute ) not in ( 3, 6, 9 ): - raise ValueError( - f"Inconsistent number of components for attribute. Expected 3, 6 or 9 but got { len( cellAttribute.shape ) }." - ) + if len( attrArray[ 0 ] ) not in ( 3, 6, 9 ): + raise ValueError( + f"Inconsistent number of components for attribute. Expected 3, 6 or 9 but got { len( attrArray[0].shape ) }." + ) # Compute attribute XYZ components - cellLocalBasis: npt.NDArray[ np.float64 ] = localBasis[ :, i, : ] - attrXYZ[ i ] = convertAttributeFromLocalToXYZForOneCell( cellAttribute, cellLocalBasis ) + # cellLocalBasis: npt.NDArray[ np.float64 ] = localBasis[ :, i, : ] + attrXYZ = convertAttributeFromLocalToXYZ( attrArray, localBasis ) if not np.any( np.isfinite( attrXYZ ) ): raise ValueError( "Attribute new coordinates calculation failed." ) diff --git a/geos-processing/src/geos/processing/pre_processing/MeshQualityEnhanced.py b/geos-processing/src/geos/processing/pre_processing/MeshQualityEnhanced.py index 84ad960f4..6690fbb30 100644 --- a/geos-processing/src/geos/processing/pre_processing/MeshQualityEnhanced.py +++ b/geos-processing/src/geos/processing/pre_processing/MeshQualityEnhanced.py @@ -778,6 +778,7 @@ def _computeSquishIndex( self: Self ) -> None: copyData: vtkUnstructuredGrid = vtkUnstructuredGrid() copyData.ShallowCopy( self._outputMesh ) points: vtkPoints = copyData.GetPoints() + for c in range( copyData.GetNumberOfCells() ): cell: vtkCell = copyData.GetCell( c ) # Applies only to polyhedra @@ -796,10 +797,12 @@ def _computeSquishIndex( self: Self ) -> None: for i in range( ptsIdsList.GetNumberOfIds() ): ptsIds.InsertNextValue( ptsIdsList.GetId( i ) ) faceCenter: npt.NDArray[ np.float64 ] = self._getCellCenter( face, ptsIds, points ) + # TODO use vtkFilter instead of reinvening the wheel a 1000th time !!! faceNormal: npt.NDArray[ np.float64 ] = self._getNormalVector( points, face ) vec: npt.NDArray[ np.float64 ] = cellCenter - faceCenter - angle: float = vtkMath.AngleBetweenVectors( vec, faceNormal ) # type: ignore[arg-type] + # TODO vtk Batch ?? + angle: float = vtkMath.AngleBetweenVectors( vec, faceNormal[ 0 ] ) # type: ignore[arg-type] squishIndex[ f ] = np.sin( angle ) newArray.InsertValue( c, np.nanmax( squishIndex ) ) @@ -861,4 +864,6 @@ def _getNormalVector( self: Self, points: vtkPoints, face: vtkCell ) -> npt.NDAr ptsCoords: npt.NDArray[ np.float64 ] = np.zeros( ( 3, 3 ), dtype=float ) for i in range( 3 ): points.GetPoint( facePtsIds.GetId( i ), ptsCoords[ i ] ) - return geom.computeNormalFromPoints( ptsCoords[ 0 ], ptsCoords[ 1 ], ptsCoords[ 2 ] ) + # TODO vectorize !!! + return geom.computeNormalFromPoints( np.array( [ ptsCoords[ 0 ] ] ), np.array( [ ptsCoords[ 1 ] ] ), + np.array( [ ptsCoords[ 2 ] ] ) ) diff --git a/geos-processing/tests/test_GeomechanicsCalculator.py b/geos-processing/tests/test_GeomechanicsCalculator.py index 78e319185..2f73baf18 100644 --- a/geos-processing/tests/test_GeomechanicsCalculator.py +++ b/geos-processing/tests/test_GeomechanicsCalculator.py @@ -8,6 +8,8 @@ from vtkmodules.vtkCommonDataModel import vtkUnstructuredGrid +from vtkmodules.vtkIOXML import vtkXMLUnstructuredGridWriter + from geos.mesh.utils.arrayHelpers import isAttributeInObject from geos.processing.post_processing.GeomechanicsCalculator import ( GeomechanicsCalculator, BASIC_PROPERTIES, ADVANCED_PROPERTIES, LITHOSTATIC_STRESS ) @@ -36,3 +38,9 @@ def test_GeomechanicsCalculator( if computeAdvancedProperties: for attribute in ADVANCED_PROPERTIES: assert isAttributeInObject( output, attribute.attributeName, attribute.piece ) + + w = vtkXMLUnstructuredGridWriter() + w.SetInputData( output ) + w.SetFileName( "/data/pau901/SIM_CS/04_WORKSPACE/USERS/jfranc/tmp/testNicola.vtu" ) + w.Update() + w.Write() diff --git a/geos-processing/tests/test_MeshQualityEnhanced.py b/geos-processing/tests/test_MeshQualityEnhanced.py index e60c16eb1..6984c42d2 100644 --- a/geos-processing/tests/test_MeshQualityEnhanced.py +++ b/geos-processing/tests/test_MeshQualityEnhanced.py @@ -81,6 +81,7 @@ class TestCase: ] ) def test_MeshQualityEnhanced( dataSetTest: Any, case: int ) -> None: """Test of MeshQualityEnhanced filter.""" + print( f"test [{meshName_all[ case ]}]" ) test_case: TestCase = TestCase( dataSetTest( meshName_all[ case ] ), cellTypes_all[ case ], qualityMetrics_all[ case ], cellTypeCounts_all[ case ], metricsSummary_all[ case ] ) mesh: vtkUnstructuredGrid = test_case.mesh diff --git a/geos-processing/tests/test_MeshToMeshInterpolator.py b/geos-processing/tests/test_MeshToMeshInterpolator.py index 9a9eb937a..1a63de2ff 100644 --- a/geos-processing/tests/test_MeshToMeshInterpolator.py +++ b/geos-processing/tests/test_MeshToMeshInterpolator.py @@ -8,10 +8,6 @@ from collections import Counter import numpy as np -import sys - -sys.path.insert( 0, "/data/pau901/SIM_CS/users/jfranc/geosPythonPackages/geos-processing" ) - from geos.processing.generic_processing_tools.MeshToMeshInterpolator import MeshToMeshInterpolator from vtkmodules.vtkCommonDataModel import vtkDataSet from vtkmodules.util.numpy_support import vtk_to_numpy diff --git a/geos-utils/pyproject.toml b/geos-utils/pyproject.toml index bc11980d8..4ce1f94f4 100644 --- a/geos-utils/pyproject.toml +++ b/geos-utils/pyproject.toml @@ -28,6 +28,7 @@ requires-python = ">=3.10" dependencies = [ "numpy >= 2.2", + "scipy >= 1.15.3 ", "typing_extensions >= 4.12", ] diff --git a/geos-utils/src/geos/utils/GeosOutputsConstants.py b/geos-utils/src/geos/utils/GeosOutputsConstants.py index 2eb89314e..c484c04ee 100644 --- a/geos-utils/src/geos/utils/GeosOutputsConstants.py +++ b/geos-utils/src/geos/utils/GeosOutputsConstants.py @@ -203,6 +203,10 @@ class PostProcessingOutputsEnum( AttributeEnum ): AVERAGE_STRAIN = ( "averageStrain", 6, Piece.CELLS ) RSP_OED = ( "rsp_oed", 1, Piece.CELLS ) RSP_REAL = ( "rsp_real", 6, Piece.CELLS ) + PRINCIPAL_AXIS_VAL = ( "principalAxisVal", 3, Piece.CELLS ) + PRINCIPAL_AXIS_DIR_1 = ( "principalAxisDir1", 3, Piece.CELLS ) + PRINCIPAL_AXIS_DIR_2 = ( "principalAxisDir2", 3, Piece.CELLS ) + PRINCIPAL_AXIS_DIR_3 = ( "principalAxisDir3", 3, Piece.CELLS ) # advanced geomechanical outputs CRITICAL_TOTAL_STRESS_RATIO = ( "totalStressRatioCritical_real", 1, Piece.CELLS ) diff --git a/geos-utils/src/geos/utils/algebraFunctions.py b/geos-utils/src/geos/utils/algebraFunctions.py index a7027f9e1..988346f14 100644 --- a/geos-utils/src/geos/utils/algebraFunctions.py +++ b/geos-utils/src/geos/utils/algebraFunctions.py @@ -54,37 +54,43 @@ def getAttributeMatrixFromVector( attrArray: npt.NDArray[ np.float64 ], ) -> npt Returns: npt.NDArray[np.float64]: The matrix of attribute values. """ - if attrArray.size not in ( 3, 6, 9 ): + if attrArray.shape[ 1 ] not in ( 3, 6, 9 ): raise ValueError( "Vectorial attribute must contain 3, 6 or 9 components." ) # diagonal terms - matrix: npt.NDArray[ np.float64 ] = np.diagflat( attrArray[ :3 ] ) + matrices: npt.NDArray[ np.float64 ] = np.zeros( shape=( attrArray.shape[ 0 ], 3, 3 ) ) + + #diag + matrices[ :, 0, 0 ] = attrArray[ :, 0 ] # + matrices[ :, 1, 1 ] = attrArray[ :, 1 ] # + matrices[ :, 2, 2 ] = attrArray[ :, 2 ] # # shear stress components - if attrArray.size == 6: - matrix[ 0, 1 ] = attrArray[ 5 ] # v1 - matrix[ 1, 0 ] = attrArray[ 5 ] + if attrArray.shape[ 1 ] == 6: + matrices[ :, 0, 1 ] = attrArray[ :, 5 ] # v1 + matrices[ :, 1, 0 ] = attrArray[ :, 5 ] - matrix[ 0, 2 ] = attrArray[ 4 ] # v5 - matrix[ 2, 0 ] = attrArray[ 4 ] + matrices[ :, 0, 2 ] = attrArray[ :, 4 ] # v5 + matrices[ :, 2, 0 ] = attrArray[ :, 4 ] - matrix[ 1, 2 ] = attrArray[ 3 ] # v4 - matrix[ 2, 1 ] = attrArray[ 3 ] + matrices[ :, 1, 2 ] = attrArray[ :, 3 ] # v4 + matrices[ :, 2, 1 ] = attrArray[ :, 3 ] - elif attrArray.size == 9: - matrix[ 0, 1 ] = attrArray[ 5 ] # v1 - matrix[ 1, 0 ] = attrArray[ 8 ] # v9 + elif attrArray.shape[ 1 ] == 9: + matrices[ :, 0, 1 ] = attrArray[ :, 5 ] # v1 + matrices[ :, 1, 0 ] = attrArray[ :, 8 ] # v9 - matrix[ 0, 2 ] = attrArray[ 4 ] # v5 - matrix[ 2, 0 ] = attrArray[ 7 ] # v8 + matrices[ :, 0, 2 ] = attrArray[ :, 4 ] # v5 + matrices[ :, 2, 0 ] = attrArray[ :, 7 ] # v8 - matrix[ 1, 2 ] = attrArray[ 3 ] # v4 - matrix[ 2, 1 ] = attrArray[ 6 ] # v7 + matrices[ :, 1, 2 ] = attrArray[ :, 3 ] # v4 + matrices[ :, 2, 1 ] = attrArray[ :, 6 ] # v7 - return matrix + return matrices -def getAttributeVectorFromMatrix( attrMatrix: npt.NDArray[ np.float64 ], size: int ) -> npt.NDArray[ np.float64 ]: +def getAttributeVectorFromMatrix( attrMatrix: npt.NDArray[ np.float64 ], + shape: tuple[ int, int, int ] ) -> npt.NDArray[ np.float64 ]: r"""Get the vector of attribute values from the matrix. Matrix to vector conversion is the following: @@ -124,7 +130,7 @@ def getAttributeVectorFromMatrix( attrMatrix: npt.NDArray[ np.float64 ], size: i Args: attrMatrix (npt.NDArray[np.float64]): Matrix of attribute values. - size (int): Size of the final vector. Values accepted are 3, 9 or 6. + shape (tuple[int, int, int]): Shape of the output vector (n, 3), (n, 6) or (n, 9). Raises: ValueError: The output vector size requested can only be 3, 9 or 6. @@ -133,27 +139,29 @@ def getAttributeVectorFromMatrix( attrMatrix: npt.NDArray[ np.float64 ], size: i Returns: npt.NDArray[np.float64]: Vector of attribute values. """ - attrArray: npt.NDArray[ np.float64 ] = np.full( size, np.nan ) - if size not in ( 3, 6, 9 ): + attrArray: npt.NDArray[ np.float64 ] = np.zeros( shape ) + if shape[ 1 ] not in ( 3, 6, 9 ): raise ValueError( "Requested output size can only be 3, 9 or 6 (symmetrical case)." ) - if attrMatrix.shape != ( 3, 3 ): + if attrMatrix.shape[ 1: ] != ( 3, 3 ): raise ValueError( "Input matrix shape must be (3,3)." ) # Diagonal terms - attrArray[ :3 ] = np.diag( attrMatrix ) + attrArray[ :, 0 ] = attrMatrix[ :, 0, 0 ] + attrArray[ :, 1 ] = attrMatrix[ :, 1, 1 ] + attrArray[ :, 2 ] = attrMatrix[ :, 2, 2 ] # shear stress components if attrArray.size == 6: - attrArray[ 3 ] = attrMatrix[ 1, 2 ] - attrArray[ 4 ] = attrMatrix[ 0, 2 ] - attrArray[ 5 ] = attrMatrix[ 0, 1 ] + attrArray[ :, 3 ] = attrMatrix[ :, 1, 2 ] + attrArray[ :, 4 ] = attrMatrix[ :, 0, 2 ] + attrArray[ :, 5 ] = attrMatrix[ :, 0, 1 ] elif attrArray.size == 9: - attrArray[ 3 ] = attrMatrix[ 1, 2 ] - attrArray[ 4 ] = attrMatrix[ 0, 2 ] - attrArray[ 5 ] = attrMatrix[ 0, 1 ] - attrArray[ 6 ] = attrMatrix[ 2, 1 ] - attrArray[ 7 ] = attrMatrix[ 2, 0 ] - attrArray[ 8 ] = attrMatrix[ 1, 0 ] + attrArray[ :, 3 ] = attrMatrix[ :, 1, 2 ] + attrArray[ :, 4 ] = attrMatrix[ :, 0, 2 ] + attrArray[ :, 5 ] = attrMatrix[ :, 0, 1 ] + attrArray[ :, 6 ] = attrMatrix[ :, 2, 1 ] + attrArray[ :, 7 ] = attrMatrix[ :, 2, 0 ] + attrArray[ :, 8 ] = attrMatrix[ :, 1, 0 ] return attrArray diff --git a/geos-utils/src/geos/utils/geometryFunctions.py b/geos-utils/src/geos/utils/geometryFunctions.py index eb1843758..ed2a0164d 100644 --- a/geos-utils/src/geos/utils/geometryFunctions.py +++ b/geos-utils/src/geos/utils/geometryFunctions.py @@ -4,226 +4,263 @@ from typing import Any import numpy as np +from scipy.linalg import polar +from packaging.version import Version +from scipy import __version__ as __scipy_version__ import numpy.typing as npt -from geos.utils.PhysicalConstants import EPSILON __doc__ = """Functions to permform geometry calculations.""" -CANONICAL_BASIS_3D: tuple[ npt.NDArray[ np.float64 ], npt.NDArray[ np.float64 ], - npt.NDArray[ np.float64 ] ] = ( np.array( [ 1.0, 0.0, 0.0 ] ), np.array( [ 0.0, 1.0, 0.0 ] ), - np.array( [ 0.0, 0.0, 1.0 ] ) ) +CANONICAL_BASIS_3D: npt.NDArray[ np.float64 ] = np.array( [ [ 1.0, 0.0, 0.0 ], [ 0.0, 1.0, 0.0 ], [ 0.0, 0.0, 1.0 ] ] ) +# for batch cross product +EPS = np.zeros( ( 3, 3, 3 ), dtype=int ) +EPS[ 0, 1, 2 ] = EPS[ 1, 2, 0 ] = EPS[ 2, 0, 1 ] = 1 +EPS[ 0, 2, 1 ] = EPS[ 2, 1, 0 ] = EPS[ 1, 0, 2 ] = -1 -def getChangeOfBasisMatrix( - basisFrom: tuple[ npt.NDArray[ np.floating[ Any ] ], npt.NDArray[ np.floating[ Any ] ], - npt.NDArray[ np.floating[ Any ] ] ], - basisTo: tuple[ npt.NDArray[ np.floating[ Any ] ], npt.NDArray[ np.floating[ Any ] ], - npt.NDArray[ np.floating[ Any ] ] ], -) -> Any: - """Get the change of basis matrix from basis basisFrom to basis basisTo. - - Let's define the basis (basisFrom) (b1, b2, b3) and basis (basisTo) (c1, c2, c3) - where b1, b2, b3, and c1, c2, c3 are the vectors of the basis in the canonic form. - This method returns the change of basis matrix P from basisFrom to basisTo. - - The coordinates of a vector Vb(vb1, vb2, vb3) from the basis B in the basis - C is then Vc = P.Vb - Args: - basisFrom (tuple[npt.NDArray[np.floating[Any]], npt.NDArray[np.floating[Any]], npt.NDArray[np.floating[Any]]]): Origin basis - basisTo (tuple[npt.NDArray[np.floating[Any]], npt.NDArray[np.floating[Any]], npt.NDArray[np.floating[Any]]]): Destination basis - - Returns: - npt.NDArray[np.floating[Any]]: Change of basis matrix. - """ - assert ( basisFrom[ 0 ].size == basisFrom[ 1 ].size ) and ( basisFrom[ 0 ].size == basisFrom[ 2 ].size ), ( - "Origin space vectors must have the same size." ) - - assert ( basisTo[ 0 ].size == basisTo[ 1 ].size ) and ( basisTo[ 0 ].size == basisTo[ 2 ].size ), ( - "Destination space vectors must have the same size." ) +# (n,x,x) opertors +def _normalize( arr: npt.NDArray[ np.float64 ] ) -> npt.NDArray[ np.float64 ]: + """N generatlization of normalization.""" + return np.einsum( 'ni,n->ni', arr, 1 / np.linalg.norm( arr, axis=1 ) ) - # build the matrices where columns are the vectors of the bases - B = np.transpose( np.array( basisFrom ) ) - C = np.transpose( np.array( basisTo ) ) - # compute the inverse of C - C1: npt.NDArray[ np.floating[ Any ] ] = np.linalg.inv( C ) - # get the change of basis matrix - return np.dot( C1, B ) +def _transposeProd( basisTo: npt.NDArray[ np.float64 ], + basisFrom: npt.NDArray[ np.float64 ] ) -> npt.NDArray[ np.float64 ]: + """N generalization of transpose product.""" + return np.einsum( 'nlj,nkj->nlk', basisTo, basisFrom ) -def computeCoordinatesInNewBasis( vec: npt.NDArray[ np.floating[ Any ] ], - changeOfBasisMatrix: npt.NDArray[ np.floating[ Any ] ] ) -> Any: - """Computes the coordinates of a matrix from a basis B in the new basis B'. - Args: - vec (npt.NDArray[np.floating[Any]]): Vector to compute the new coordinates - changeOfBasisMatrix (npt.NDArray[np.floating[Any]]): Change of basis matrix +def _cross( vec1: npt.NDArray[ np.float64 ], vec2: npt.NDArray[ np.float64 ] ) -> npt.NDArray[ np.float64 ]: + """N generatlization of cross product.""" + return np.einsum( 'ijk,nj,nk->ni', EPS, vec1, vec2 ) - Returns: - npt.NDArray[np.floating[Any]]: The new coordinates of the matrix in the basis - B'. - """ - assert ( vec.size == changeOfBasisMatrix.shape[ 1 ] ), """The size of the vector - must be equal to the number of columns of and change of basis matrix.""" - return np.dot( changeOfBasisMatrix, vec ) +def _normBasis( basis: npt.NDArray[ np.float64 ] ) -> npt.NDArray[ np.float64 ]: + """Norm and orthonormalize basis vector wise.""" + # Q, R = np.linalg.qr( basis ) + if Version( __scipy_version__ ) > Version( "1.15.3" ): + U, _ = polar( basis ) + else: + U = np.zeros_like( basis ) + for i in range( basis.shape[ 0 ] ): + U[ i, :, : ], _ = polar( basis[ i, :, : ] ) -def computePlaneFrom3Points( - pt1: npt.NDArray[ np.floating[ Any ] ], - pt2: npt.NDArray[ np.floating[ Any ] ], - pt3: npt.NDArray[ np.floating[ Any ] ], -) -> tuple[ float, float, float, float ]: - """Compute the 4 coefficients of a plane equation. - - Let's defined a plane from the following equation: ax + by + cz + d = 0. - This function determines a, b, c, d from 3 points of the plane. - - Args: - pt1 (npt.NDArray[np.floating[Any]]): 1st point of the plane. - pt2 (npt.NDArray[np.floating[Any]]): 2nd point of the plane. - pt3 (npt.NDArray[np.floating[Any]]): 3rd point of the plane. - - Returns: - tuple[float, float, float, float]: Tuple of the 4 coefficients. - """ - # plane vectors - v1: npt.NDArray[ np.floating[ Any ] ] = pt2 - pt1 - v2: npt.NDArray[ np.floating[ Any ] ] = pt3 - pt1 - # normal vector - normal: npt.NDArray[ np.floating[ Any ] ] = np.cross( v1, v2 ) - assert np.linalg.norm( normal ) != 0, "Vectors of the plane must not be colinear." - # first 3 coefficients of the plane equation - a, b, c = normal - # last coefficient of the plane equation - d: float = -np.dot( normal, pt1 ) - return a, b, c, d - - -def getCellSideAgainstPlane( - cellPtsCoords: npt.NDArray[ np.floating[ Any ] ], - planePt: npt.NDArray[ np.floating[ Any ] ], - planeNormal: npt.NDArray[ np.floating[ Any ] ], -) -> bool: - """Get the side of input cell against input plane. - - Input plane is defined by a point on it and its normal vector. - - Args: - cellPtsCoords (npt.NDArray[np.floating[Any]]): Coordinates of the vertices of - the cell to get the side. - planePt (npt.NDArray[np.floating[Any]]): Point on the plane. - planeNormal (npt.NDArray[np.floating[Any]]): Normal vector to the plane. - - Returns: - bool: True if the cell is in the direction of the normal vector, - False otherwise. - """ - assert ( len( cellPtsCoords ) > 1 ), "The list of points must contains more than one element" - ptCenter: npt.NDArray[ np.floating[ Any ] ] = np.mean( cellPtsCoords, axis=0 ) - return getPointSideAgainstPlane( ptCenter, planePt, planeNormal ) - - -def getPointSideAgainstPlane( - ptCoords: npt.NDArray[ np.floating[ Any ] ], - planePt: npt.NDArray[ np.floating[ Any ] ], - planeNormal: npt.NDArray[ np.floating[ Any ] ], -) -> bool: - """Get the side of input point against input plane. - - Input plane is defined by a point on it and its normal vector. - - Args: - ptCoords (npt.NDArray[np.floating[Any]]): Coordinates of the point to get - the side. - planePt (npt.NDArray[np.floating[Any]]): Point on the plane. - planeNormal (npt.NDArray[np.floating[Any]]): Normal vector to the plane. - - Returns: - bool: True if the point is in the direction of the normal vector, - False otherwise. - """ - assert ptCoords.size == 3, "Point coordinates must have 3 components" - assert planeNormal.size == 3, "Plane normal vector must have 3 components" - assert planePt.size == 3, "Plane point must have 3 components" - vec: npt.NDArray[ np.floating[ Any ] ] = ptCoords - planePt - dot: float = np.dot( planeNormal, vec ) - assert abs( dot ) > EPSILON, "The point is on the plane." - return dot > 0 - + # det = np.linalg.det( Q ) + # Q[ det < 0 ] *= -1 -def computeAngleFromPoints( pt1: npt.NDArray[ np.float64 ], pt2: npt.NDArray[ np.float64 ], - pt3: npt.NDArray[ np.float64 ] ) -> float: - """Compute angle from 3 points. + return U - Args: - pt1 (npt.NDArray[np.float64]): First point - pt2 (npt.NDArray[np.float64]): Second point - pt3 (npt.NDArray[np.float64]): Third point - Returns: - float: Angle - """ - # compute vectors - vec1: npt.NDArray[ np.float64 ] = pt1 - pt2 - vec2: npt.NDArray[ np.float64 ] = pt3 - pt2 - return computeAngleFromVectors( vec1, vec2 ) +def getChangeOfBasisMatrix( + basisFrom: npt.NDArray[ np.float64 ], + basisTo: npt.NDArray[ np.float64 ], +) -> Any: + """Get the change of basis matrix from basis basisFrom to basis basisTo. + Let's define the basis (basisFrom) (b1, b2, b3) and basis (basisTo) (c1, c2, c3) + where b1, b2, b3, and c1, c2, c3 are the vectors of the basis in the canonic form. + This method returns the change of basis matrix P from basisFrom to basisTo. -def computeAngleFromVectors( - vec1: npt.NDArray[ np.float64 ], - vec2: npt.NDArray[ np.float64 ], -) -> float: - """Compute angle from 2 vectors. + The coordinates of a vector Vb(vb1, vb2, vb3) from the basis B in the basis + C is then Vc = P.Vb Args: - vec1 (npt.NDArray[np.float64]): First vector - vec2 (npt.NDArray[np.float64]): Second vector + basisFrom (tuple[npt.NDArray[np.float64 npt.NDArray[np.float64 npt.NDArray[np.float64): Origin basis + basisTo (tuple[npt.NDArray[np.float64 npt.NDArray[np.float64 npt.NDArray[np.float64): Destination basis Returns: - float: angle + npt.NDArray[np.float64 Change of basis matrix. """ - assert abs( np.linalg.norm( vec1 ) ) > 0., "First vector cannot be null" - assert abs( np.linalg.norm( vec2 ) ) > 0., "Second vector cannot be null" - # normalization - vec1_norm: npt.NDArray[ np.float64 ] = vec1 / np.linalg.norm( vec1 ) - vec2_norm: npt.NDArray[ np.float64 ] = vec2 / np.linalg.norm( vec2 ) - - # Use normalized vectors for dot product - dot: float = np.dot( vec1_norm, vec2_norm ) + basisFrom = _normBasis( basisFrom ) + if len( basisTo.shape ) < len( basisFrom.shape ): + basisTo = np.repeat( basisTo[ None, : ], basisFrom.shape[ 0 ], axis=0 ) + basisTo = _normBasis( basisTo ) - # Clamp to valid range for arccos - dot = np.clip( dot, -1.0, 1.0 ) + assert ( basisFrom.shape[ 1 ] == basisFrom.shape[ 2 ] ), ( + f"Origin space vectors must have the same size. shape: {basisFrom.shape}" ) - # Handle 2D vs 3D cases - if vec1.size == 2 and vec2.size == 2: - # For 2D, use cross product to determine orientation - cross: float = vec1_norm[ 0 ] * vec2_norm[ 1 ] - vec1_norm[ 1 ] * vec2_norm[ 0 ] - angle: float = np.arccos( dot ) - return angle if cross >= 0 else 2.0 * np.pi - angle - else: - # For 3D, return angle in [0, π] - return np.arccos( dot ) - - -def computeCosineFromVectors( - vec1: npt.NDArray[ np.float64 ], - vec2: npt.NDArray[ np.float64 ], -) -> float: - """Compute cosine from 2 vectors. + assert ( basisTo.shape[ 1 ] == basisTo.shape[ 2 ] ), ( "Destination space vectors must have the same size." ) - Args: - vec1 (npt.NDArray[np.float64]): First vector - vec2 (npt.NDArray[np.float64]): Second vector - - Returns: - float: Cosine - """ - assert abs( np.linalg.norm( vec1 ) ) > 0., "First vector cannot be null" - assert abs( np.linalg.norm( vec2 ) ) > 0., "Second vector cannot be null" - # normalization - vec1_norm: npt.NDArray[ np.float64 ] = vec1 / np.linalg.norm( vec1 ) - vec2_norm: npt.NDArray[ np.float64 ] = vec2 / np.linalg.norm( vec2 ) - return np.dot( vec1_norm, vec2_norm ) + # build the matrices where columns are the vectors of the bases + # B = np.transpose( np.array( basisFrom ) ) + # C = np.transpose( np.array( basisTo ) ) + # no need to compute the inverse of C as it is orthonormal checked - transpose is enough + assert np.linalg.norm( + _transposeProd( basisTo, basisTo ) - np.repeat( np.eye( 3 )[ None, : ], basisFrom.shape[ 0 ], axis=0 ) ) < 1e-6 + # get the change of basis matrix + return _transposeProd( basisTo, basisFrom ) + + +# def computeCoordinatesInNewBasis( vec: npt.NDArray[ np.float64 ], +# changeOfBasisMatrix: npt.NDArray[ np.float64 ] ) -> Any: +# """Computes the coordinates of a matrix from a basis B in the new basis B'. + +# Args: +# vec (npt.NDArray[np.float64: Vector to compute the new coordinates +# changeOfBasisMatrix (npt.NDArray[np.float64: Change of basis matrix + +# Returns: +# npt.NDArray[np.float64 The new coordinates of the matrix in the basis +# B'. +# """ +# assert ( vec.shape[ 1 ] == changeOfBasisMatrix.shape[ 1 ] ), """The size of the vector +# must be equal to the number of columns of and change of basis matrix.""" +# return np.einsum('nij,ni->ni' ,changeOfBasisMatrix, vec ) + +# def computePlaneFrom3Points( +# pt1: npt.NDArray[ np.float64 ], +# pt2: npt.NDArray[ np.float64 ], +# pt3: npt.NDArray[ np.float64 ], +# ) -> tuple[ npt.NDArray[ np.float64 ], npt.NDArray[ np.float64 ], npt.NDArray[ np.float64 ], npt.NDArray[ np.float64 ] ]: +# """Compute the 4 coefficients of a plane equation. + +# Let's defined a plane from the following equation: ax + by + cz + d = 0. +# This function determines a, b, c, d from 3 points of the plane. + +# Args: +# pt1 (npt.NDArray[np.float64: 1st point of the plane. +# pt2 (npt.NDArray[np.float64: 2nd point of the plane. +# pt3 (npt.NDArray[np.float64: 3rd point of the plane. + +# Returns: +# tuple[float, float, float, float]: Tuple of the 4 coefficients. +# """ +# assert pt1.shape[0] == pt2.shape[0] and pt2.shape[0] == pt3.shape[0] + +# # plane vectors +# v1: npt.NDArray[ np.float64 ] = pt2 - pt1 +# v2: npt.NDArray[ np.float64 ] = pt3 - pt1 +# # normal vector +# normal: npt.NDArray[ np.float64 ] = np.einsum('ijk,nj,nk->ni', EPS, v1, v2) +# assert np.linalg.norm( normal ) != 0, "Vectors of the plane must not be colinear." +# # first 3 coefficients of the plane equation +# a, b, c = np.unstack( normal, axis=1 ) +# # last coefficient of the plane equation +# d: float = -np.einsum('ni,ni->n', normal, pt1 ) +# return a, b, c, d + +# def getCellSideAgainstPlane( +# cellPtsCoords: npt.NDArray[ np.float64 ], +# planePt: npt.NDArray[ np.float64 ], +# planeNormal: npt.NDArray[ np.float64 ], +# ) -> bool: +# """Get the side of input cell against input plane. + +# Input plane is defined by a point on it and its normal vector. + +# Args: +# cellPtsCoords (npt.NDArray[np.float64: Coordinates of the vertices of +# the cell to get the side. +# planePt (npt.NDArray[np.float64: Point on the plane. +# planeNormal (npt.NDArray[np.float64: Normal vector to the plane. + +# Returns: +# bool: True if the cell is in the direction of the normal vector, +# False otherwise. +# """ +# assert ( len( cellPtsCoords.shape[0] ) > 1 ), "The list of points must contains more than one element" +# ptCenter: npt.NDArray[ np.float64 ] = np.mean( cellPtsCoords, axis=0 ) +# return getPointSideAgainstPlane( ptCenter, planePt, planeNormal ) + +# def getPointSideAgainstPlane( +# ptCoords: npt.NDArray[ np.float64 ], +# planePt: npt.NDArray[ np.float64 ], +# planeNormal: npt.NDArray[ np.float64 ], +# ) -> bool: +# """Get the side of input point against input plane. + +# Input plane is defined by a point on it and its normal vector. + +# Args: +# ptCoords (npt.NDArray[np.float64: Coordinates of the point to get +# the side. +# planePt (npt.NDArray[np.float64: Point on the plane. +# planeNormal (npt.NDArray[np.float64: Normal vector to the plane. + +# Returns: +# bool: True if the point is in the direction of the normal vector, +# False otherwise. +# """ +# assert ptCoords.shape[1] == 3, "Point coordinates must have 3 components" +# assert planeNormal.shape[1] == 3, "Plane normal vector must have 3 components" +# assert planePt.shape[1] == 3, "Plane point must have 3 components" +# vec: npt.NDArray[ np.float64 ] = ptCoords - planePt +# dot: float = np.einsum('ni,ni->n', planeNormal, vec ) +# assert np.linalg.norm( dot ) > EPSILON, "The point is on the plane." +# return dot > 0 + +# def computeAngleFromPoints( pt1: npt.NDArray[ np.float64 ], pt2: npt.NDArray[ np.float64 ], +# pt3: npt.NDArray[ np.float64 ] ) -> float: +# """Compute angle from 3 points. + +# Args: +# pt1 (npt.NDArray[np.float64]): First point +# pt2 (npt.NDArray[np.float64]): Second point +# pt3 (npt.NDArray[np.float64]): Third point + +# Returns: +# float: Angle +# """ +# # compute vectors +# vec1: npt.NDArray[ np.float64 ] = pt1 - pt2 +# vec2: npt.NDArray[ np.float64 ] = pt3 - pt2 +# return computeAngleFromVectors( vec1, vec2 ) + +# def computeAngleFromVectors( +# vec1: npt.NDArray[ np.float64 ], +# vec2: npt.NDArray[ np.float64 ], +# ) -> float: +# """Compute angle from 2 vectors. + +# Args: +# vec1 (npt.NDArray[np.float64]): First vector +# vec2 (npt.NDArray[np.float64]): Second vector + +# Returns: +# float: angle +# """ +# assert np.where( np.abs( np.linalg.norm( vec1, axis=0 ) ) ) == 0., "First vector cannot be null" +# assert np.where( np.abs( np.linalg.norm( vec2, axis=0 ) ) ) == 0., "Second vector cannot be null" +# # normalization +# vec1_norm: npt.NDArray[ np.float64 ] = np.einsum( 'ni,nj->ni', vec1, 1. / np.linalg.norm( vec1, axis=1, keepdims=True ) ) +# vec2_norm: npt.NDArray[ np.float64 ] = np.einsum( 'ni,nj->ni' , vec2, 1. / np.linalg.norm( vec2, axis=1, keepdims=True ) ) + +# # Use normalized vectors for dot product +# dot: float = np.einsum('ni,nj->n', vec1_norm, vec2_norm) + +# # Clamp to valid range for arccos +# dot = np.clip( dot, -1.0, 1.0 ) + +# # Handle 2D vs 3D cases +# if vec1.shape[1] == 2 and vec2.shape[1] == 2: +# # For 2D, use cross product to determine orientation +# cross: float = np.einsum('ijk,nj,nk->ni', EPS, vec1_norm, vec2_norm) +# angle: float = np.arccos( dot ) +# return angle if cross >= 0 else 2.0 * np.pi - angle +# else: +# # For 3D, return angle in [0, π] +# return np.arccos( dot ) + +# def computeCosineFromVectors( +# vec1: npt.NDArray[ np.float64 ], +# vec2: npt.NDArray[ np.float64 ], +# ) -> float: +# """Compute cosine from 2 vectors. + +# Args: +# vec1 (npt.NDArray[np.float64]): First vector +# vec2 (npt.NDArray[np.float64]): Second vector + +# Returns: +# float: Cosine +# """ +# assert abs( np.linalg.norm( vec1 ) ) > 0., "First vector cannot be null" +# assert abs( np.linalg.norm( vec2 ) ) > 0., "Second vector cannot be null" +# # normalization +# vec1_norm: npt.NDArray[ np.float64 ] = vec1 / np.linalg.norm( vec1 ) +# vec2_norm: npt.NDArray[ np.float64 ] = vec2 / np.linalg.norm( vec2 ) +# return np.dot( vec1_norm, vec2_norm ) def computeNormalFromPoints( pt1: npt.NDArray[ np.float64 ], pt2: npt.NDArray[ np.float64 ], @@ -238,9 +275,12 @@ def computeNormalFromPoints( pt1: npt.NDArray[ np.float64 ], pt2: npt.NDArray[ n Returns: npt.NDArray[np.float64]: Normal vector coordinates """ + assert pt1.shape[ 0 ] == pt2.shape[ 0 ] and pt2.shape[ 0 ] == pt3.shape[ 0 ] # compute vectors vec1: npt.NDArray[ np.float64 ] = pt1 - pt2 vec2: npt.NDArray[ np.float64 ] = pt3 - pt2 + assert np.all( np.abs( np.linalg.norm( vec1, axis=1 ) ) > 0. ), "First and second points must be different" + assert np.all( np.abs( np.linalg.norm( vec2, axis=1 ) ) > 0. ), "First and second points must be different" return computeNormalFromVectors( vec1, vec2 ) @@ -257,9 +297,7 @@ def computeNormalFromVectors( Returns: npt.NDArray[np.float64]: Normal vector coordinates """ - assert abs( np.linalg.norm( vec1 ) ) > 0., "First and second points must be different" - assert abs( np.linalg.norm( vec2 ) ) > 0., "Second and third points must be different" # normalization - vec1_norm = vec1 / np.linalg.norm( vec1 ) - vec2_norm = vec2 / np.linalg.norm( vec2 ) - return np.cross( vec1_norm, vec2_norm ) + vec1_norm = _normalize( vec1 ) + vec2_norm = _normalize( vec2 ) + return _cross( vec1_norm, vec2_norm ) diff --git a/geos-utils/tests/test_algebraFunctions.py b/geos-utils/tests/test_algebraFunctions.py index 104a8136b..a3bba38ef 100644 --- a/geos-utils/tests/test_algebraFunctions.py +++ b/geos-utils/tests/test_algebraFunctions.py @@ -18,30 +18,30 @@ class TestAttributeMatrixFromVector( TestCase ): def test_wrongInputVectorSize( self: Self ) -> None: """Test failure on incorrect input vector size.""" - emptyVector: npt.NDArray[ np.float64 ] = np.array( [] ) + emptyVector: npt.NDArray[ np.float64 ] = np.array( [ [] ] ) with self.assertRaises( ValueError ): getAttributeMatrixFromVector( emptyVector ) def test_vector3size( self: Self ) -> None: """Test for an input vector size of 3.""" vector3 = np.arange( 1, 4 ) - expectedMatrix: npt.NDArray[ np.float64 ] = np.array( [ [ 1, 0, 0 ], [ 0, 2, 0 ], [ 0, 0, 3 ] ] ) + expectedMatrix: npt.NDArray[ np.float64 ] = np.array( [ [ [ 1, 0, 0 ], [ 0, 2, 0 ], [ 0, 0, 3 ] ] ] ) - self.assertTrue( np.array_equal( expectedMatrix, getAttributeMatrixFromVector( vector3 ) ) ) + self.assertTrue( np.array_equal( expectedMatrix, getAttributeMatrixFromVector( np.array( [ vector3 ] ) ) ) ) def test_vector6( self: Self ) -> None: """Test for an input vector size of 6.""" vector6 = np.arange( 1, 7 ) - expectedMatrix = np.array( [ [ 1, 6, 5 ], [ 6, 2, 4 ], [ 5, 4, 3 ] ] ) + expectedMatrix = np.array( [ [ [ 1, 6, 5 ], [ 6, 2, 4 ], [ 5, 4, 3 ] ] ] ) - self.assertTrue( np.array_equal( expectedMatrix, getAttributeMatrixFromVector( vector6 ) ) ) + self.assertTrue( np.array_equal( expectedMatrix, getAttributeMatrixFromVector( np.array( [ vector6 ] ) ) ) ) def test_vector9( self: Self ) -> None: """Test for an input vector size of 9.""" vector9 = np.arange( 1, 10 ) - expectedMatrix = np.array( [ [ 1, 6, 5 ], [ 9, 2, 4 ], [ 8, 7, 3 ] ] ) + expectedMatrix = np.array( [ [ [ 1, 6, 5 ], [ 9, 2, 4 ], [ 8, 7, 3 ] ] ] ) - self.assertTrue( np.array_equal( expectedMatrix, getAttributeMatrixFromVector( vector9 ) ) ) + self.assertTrue( np.array_equal( expectedMatrix, getAttributeMatrixFromVector( np.array( [ vector9 ] ) ) ) ) class TestAttributeVectorFromMatrix( TestCase ): @@ -49,25 +49,26 @@ class TestAttributeVectorFromMatrix( TestCase ): def setUp( self: Self ) -> None: """Set up parameters.""" - self.rdMatrix = np.arange( 1, 10 ).reshape( 3, 3 ) - self.expected: npt.NDArray[ np.float64 ] = np.array( [ 1, 5, 9, 6, 3, 2, 8, 7, 4 ] ) + self.rdMatrix = np.arange( 1, 10 ).reshape( 1, 3, 3 ) + self.expected: npt.NDArray[ np.float64 ] = np.array( [ [ 1, 5, 9, 6, 3, 2, 8, 7, 4 ] ] ) def test_wrongInputMatrixShape( self ) -> None: """Test failure on empty input matrix.""" - emptyMatrix = np.array( [] ) + emptyMatrix = np.array( [ [] ] ) with self.assertRaises( ValueError ): - getAttributeVectorFromMatrix( emptyMatrix, 3 ) + getAttributeVectorFromMatrix( emptyMatrix, ( 1, 3 ) ) - def test_wrongOutputSize( self: Self ) -> None: - """Test failure on incorrect output size requested.""" - size = 4 + def test_wrongOutputShape( self: Self ) -> None: + """Test failure on incorrect output shape requested.""" + shape = ( 1, 4 ) with self.assertRaises( ValueError ): - getAttributeVectorFromMatrix( self.rdMatrix, size ) + getAttributeVectorFromMatrix( self.rdMatrix, shape ) def test_vecOutput( self: Self ) -> None: """Test correct output for requested size.""" listSize = ( 3, 6, 9 ) for size in listSize: with self.subTest( size ): - expectedVector = np.array( self.expected[ :size ] ) - self.assertTrue( np.array_equal( expectedVector, getAttributeVectorFromMatrix( self.rdMatrix, size ) ) ) + expectedVector = np.array( self.expected[ :, :size ] ) + self.assertTrue( + np.array_equal( expectedVector, getAttributeVectorFromMatrix( self.rdMatrix, ( 1, size ) ) ) ) diff --git a/geos-utils/tests/testsGeometryFunctions.py b/geos-utils/tests/testsGeometryFunctions.py index dbb83717f..1a1990e6f 100644 --- a/geos-utils/tests/testsGeometryFunctions.py +++ b/geos-utils/tests/testsGeometryFunctions.py @@ -1,242 +1,206 @@ # SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright 2023-2024 TotalEnergies. -# SPDX-FileContributor: Martin Lemay +# SPDX-FileContributor: Martin Lemay, Jacques Franc # ruff: noqa: E402 # disable Module level import not at top of file -import pytest import numpy as np import numpy.typing as npt -from typing import Iterator -from dataclasses import dataclass -from itertools import combinations import geos.utils.geometryFunctions as fcts -basisCanon: tuple[ npt.NDArray[ np.float64 ], npt.NDArray[ np.float64 ], - npt.NDArray[ np.float64 ] ] = ( np.array( [ 1.0, 0.0, 0.0 ] ), np.array( [ 0.0, 1.0, 0.0 ] ), - np.array( [ 0.0, 0.0, 1.0 ] ) ) +basisCanon: npt.NDArray[ np.float64 ] = np.array( [ [ [ 1.0, 0.0, 0.0 ], [ 0.0, 1.0, 0.0 ], [ 0.0, 0.0, 1.0 ] ] ] ) # destination basis according to canonic coordinates -basisTo0: tuple[ npt.NDArray[ np.float64 ], npt.NDArray[ np.float64 ], - npt.NDArray[ np.float64 ] ] = ( np.array( [ 2.0, 3.0, 0.0 ] ), np.array( [ 4.0, 5.0, 0.0 ] ), - np.array( [ 0.0, 0.0, 1.0 ] ) ) -basisTo1: tuple[ npt.NDArray[ np.float64 ], npt.NDArray[ np.float64 ], - npt.NDArray[ np.float64 ] ] = ( np.array( [ 1.0, 1.0, 0.0 ] ), np.array( [ 0.0, 1.0, 1.0 ] ), - np.array( [ 1.0, 0.0, 1.0 ] ) ) -basisTo2: tuple[ npt.NDArray[ np.float64 ], npt.NDArray[ np.float64 ], - npt.NDArray[ np.float64 ] ] = ( np.array( [ 0.0, 2.0, 0.0 ] ), np.array( [ -2.0, 0.0, 0.0 ] ), - np.array( [ 0.0, 0.0, 2.0 ] ) ) +basisTo0: npt.NDArray[ np.float64 ] = np.array( [ [ [ 1.0, 1.0, 0.0 ], [ -1.0, 1.0, 0.0 ], [ 0.0, 0.0, 1.0 ] ] ] ) +basisTo1: npt.NDArray[ np.float64 ] = np.array( [ [ [ 1.0, -1.0, 0.0 ], [ 1.0, 1.0, 0.0 ], [ 0.0, 0.0, 2.0 ] ] ] ) +# basisTo2: npt.NDArray[ np.float64 ] = np.array( [[ [0.0, 2.0, 0.0 ] ,[ -2.0, 0.0, 0.0 ], +# [0.0, 0.0, 2.0 ] ]]) def test_getChangeOfBasisMatrixToCanonic() -> None: """Test change of basis matrix using canonic basis.""" - obtained: npt.NDArray[ np.float64 ] = fcts.getChangeOfBasisMatrix( basisTo0, basisCanon ) + obtained: npt.NDArray[ np.float64 ] = np.vstack( + [ fcts.getChangeOfBasisMatrix( basisTo0, basisCanon ), + fcts.getChangeOfBasisMatrix( basisTo0, basisTo1 ) ] ) # matrix where the columns are the vectors - expected: npt.NDArray[ np.float64 ] = np.transpose( np.array( basisTo0 ) ) - assert np.array_equal( np.round( obtained, 2 ), np.round( expected, 2 ), - equal_nan=True ), f"Expected array is {np.round( expected, 2 ).tolist()}" - - -def test_getChangeOfBasisMatrix() -> None: - """Test change of basis matrix format from basis vectors.""" - obtained: npt.NDArray[ np.float64 ] = fcts.getChangeOfBasisMatrix( basisTo0, basisTo1 ) - expected: npt.NDArray[ np.float64 ] = np.array( [ [ 2.5, 4.5, -0.5 ], [ 0.5, 0.5, 0.5 ], [ -0.5, -0.5, 0.5 ] ] ) - assert np.array_equal( np.round( obtained, 2 ), np.round( expected, 2 ), - equal_nan=True ), f"Expected array is {np.round( expected, 2 ).tolist()}" - - -def test_computeCoordinatesInNewBasisIdentity() -> None: - """Test calculation of coordinates of a vector in the same basis.""" - vec: npt.NDArray[ np.float64 ] = np.array( [ 2.0, 3.0, 4.0 ] ) - - # get change of basis matrix - changeOfBasisMatrix: npt.NDArray[ np.float64 ] = fcts.getChangeOfBasisMatrix( basisCanon, basisCanon ) - obtained: npt.NDArray[ np.float64 ] = fcts.computeCoordinatesInNewBasis( vec, changeOfBasisMatrix ) - expected: npt.NDArray[ np.float64 ] = vec - assert np.array_equal( np.round( obtained, 2 ), np.round( expected, 2 ), - equal_nan=True ), f"Expected array is {np.round( expected, 2 ).tolist()}" - - -def test_computeCoordinatesInNewBasis() -> None: - """Test calculation of coordinates of a vector in another basis.""" - vec: npt.NDArray[ np.float64 ] = np.array( [ 2.0, 3.0, 4.0 ] ) - - # get change of basis matrix and the inverse - changeOfBasisMatrix: npt.NDArray[ np.float64 ] = fcts.getChangeOfBasisMatrix( basisTo0, basisTo1 ) - obtained = fcts.computeCoordinatesInNewBasis( vec, changeOfBasisMatrix ) - expected: npt.NDArray[ np.float64 ] = np.array( [ 16.5, 4.5, -0.5 ] ) - assert np.array_equal( np.round( obtained, 2 ), np.round( expected, 2 ), - equal_nan=True ), f"Expected array is {np.round( expected, 2 ).tolist()}" - - -def test_computePlaneFrom3Points() -> None: - """Test calculation of plane coefficients from 3 points.""" - pt1: npt.NDArray[ np.float64 ] = np.array( [ 1.0, 2.0, 1.0 ] ) - pt2: npt.NDArray[ np.float64 ] = np.array( [ 1.0, 1.0, 2.0 ] ) - pt3: npt.NDArray[ np.float64 ] = np.array( [ 3.0, 2.0, 2.0 ] ) - obtained: tuple[ float, float, float, float ] = fcts.computePlaneFrom3Points( pt1, pt2, pt3 ) - expected: tuple[ float, float, float, float ] = ( -1.0, 2.0, 2.0, -5.0 ) - assert obtained == expected, f"Expected tuple is {expected}" - - -def test_getPointSideAgainstPlaneAssertion() -> None: - """Test get point side against a plane.""" - planePt: npt.NDArray[ np.float64 ] = np.array( [ 0.0, 0.0, 0.0 ] ) - - # assertion error - Point on the plane - planeNormal: npt.NDArray[ np.float64 ] = np.array( [ 0.0, 0.0, 1.0 ] ) - with pytest.raises( AssertionError ): - fcts.getPointSideAgainstPlane( planePt, planePt, planeNormal ) - - -# yapf: disable -listPtsCoords_all = ( [ np.array( [ 0.5, 0.5, 0.5 ] ), np.array( [ 0.5, 0.5, -0.5 ] ) ], - [ np.array( [ 0.5, 0.5, 0.5 ] ), np.array( [ -0.5, 0.5, 0.5 ] ) ] ) -planePt_all = ( np.array( [ 0.0, 0.0, 0.0 ] ), np.array( [ 0.0, 0.0, 0.0 ] ) ) -planeNormal_all = ( np.array( [ 0.0, 0.0, 1.0 ] ), np.array( [ 1.0, 0.0, 0.0 ] ) ) -expected_all = ( ( True, False ), ( True, False ) ) -# yapf: enable - - -@dataclass( frozen=True ) -class TestCasePointSideAgainstPlane: - """Test case.""" - __test__ = False - #: list of points - listPtsCoords: list[ npt.NDArray[ np.float64 ] ] - #: plane point - planePt: npt.NDArray[ np.float64 ] - #: plane normal - planeNormal: npt.NDArray[ np.float64 ] - #: expected result - expected: list[ bool ] - - -def __generate_PointSideAgainstPlane_test_data() -> Iterator[ TestCasePointSideAgainstPlane ]: - """Generate test cases. - - Yields: - Iterator[ TestCase ]: Iterator on test cases - """ - for listPtsCoords, planePt, planeNormal, expected in zip( listPtsCoords_all, - planePt_all, - planeNormal_all, - expected_all, - strict=True ): - yield TestCasePointSideAgainstPlane( listPtsCoords, planePt, planeNormal, list( expected ) ) - - -ids: tuple[ str, str ] = ( "Horizontal plane", "Vertical plane" ) - - -@pytest.mark.parametrize( "test_case", __generate_PointSideAgainstPlane_test_data(), ids=ids ) -def test_getPointSideAgainstPlane( test_case: TestCasePointSideAgainstPlane ) -> None: - """Test get point side against a plane.""" - obtained: list[ bool ] = [] - for ptCoords in test_case.listPtsCoords: - side: bool = fcts.getPointSideAgainstPlane( ptCoords, test_case.planePt, test_case.planeNormal ) - obtained += [ side ] - assert obtained == test_case.expected, f"Expected tuple is {test_case.expected}" - - -def test_getCellSideAgainstPlaneRandom() -> None: - """Test get cell side against a plane.""" - # random plane - planePt: npt.NDArray[ np.float64 ] = np.array( [ 125.58337, 1386.0465, -2782.502 ] ) - listCellPtsCoords: list[ npt.NDArray[ np.float64 ] ] = [ - np.array( [ - [ 135.49551, 1374.7644, -2786.884 ], - [ 125.58337, 1376.7441, -2782.502 ], - [ 132.19525, 1382.2516, -2771.0508 ], - [ 125.58337, 1386.0465, -2782.502 ], - ] ), - np.array( [ - [ 111.9148, 1377.0265, -2764.875 ], - [ 132.19525, 1382.2516, -2771.0508 ], - [ 125.58337, 1376.7441, -2782.502 ], - [ 125.58337, 1386.0465, -2782.502 ], - ] ), - ] - planeNormal: npt.NDArray[ np.float64 ] = np.array( [ 0.8660075, 0.0, -0.5000311 ] ) - obtained: list[ bool ] = [] - for cellPtsCoords in listCellPtsCoords: - side: bool = fcts.getCellSideAgainstPlane( cellPtsCoords, planePt, planeNormal ) - obtained += [ side ] - expected: list[ bool ] = [ True, False ] - assert obtained == expected, f"Expected tuple is {expected}" - - -pts_all: tuple[ npt.NDArray[ np.float64 ], ...] = ( - np.array( [ 0.0, 0.0, 0.0 ] ), - np.array( [ 1.0, 0.0, 0.0 ] ), - np.array( [ 0.5, 0.5, 0.0 ] ), - np.array( [ -0.5, 0.5, 0.0 ] ), - np.array( [ -0.5, -0.5, 0.0 ] ), - np.array( [ 0.5, -0.5, -1.0 ] ), -) + expected: npt.NDArray[ np.float64 ] = np.vstack( [ + fcts._normBasis( basisTo0 ).transpose( 0, 2, 1 ), + np.array( [ [ [ 0., -1., 0. ], [ 1., 0., 0. ], [ 0., 0., 1. ] ] ] ) + ] ) + assert np.linalg.norm( obtained - expected ) < 10e-12, f"Expected array is {np.round( expected, 2 ).tolist()}" + + +# def test_computeCoordinatesInNewBasis() -> None: +# """Test calculation of coordinates of a vector in another basis.""" +# vec: npt.NDArray[ np.float64 ] = np.array( [[ 3.0, -3.0, 0.0 ], [0.0, 0.0, 50.0]] ) + +# # get change of basis matrix and the inverse +# changeOfBasisMatrix: npt.NDArray[ np.float64 ] = fcts.getChangeOfBasisMatrix( basisTo0, basisTo1 ) +# obtained = fcts.computeCoordinatesInNewBasis( vec, changeOfBasisMatrix ) +# expected: npt.NDArray[ np.float64 ] = np.array( [[ -3.0, -3.0, 0. ], [0.0, 0.0, 50.0]] ) +# assert np.array_equal( np.round( obtained, 2 ), np.round( expected, 2 ), +# equal_nan=True ), f"Expected array is {np.round( expected, 2 ).tolist()}" + +# def test_computePlaneFrom3Points() -> None: +# """Test calculation of plane coefficients from 3 points.""" +# pt1: npt.NDArray[ np.float64 ] = np.array( [[ 1.0, 2.0, 1.0 ]] ) +# pt2: npt.NDArray[ np.float64 ] = np.array( [[ 1.0, 1.0, 2.0 ]] ) +# pt3: npt.NDArray[ np.float64 ] = np.array( [[ 3.0, 2.0, 2.0 ]] ) +# obtained: tuple[ npt.NDArray[ np.float64 ], npt.NDArray[ np.float64 ], npt.NDArray[ np.float64 ], npt.NDArray[ np.float64 ] ] = fcts.computePlaneFrom3Points( pt1, pt2, pt3 ) +# expected: tuple[ npt.NDArray[ np.float64 ], npt.NDArray[ np.float64 ], npt.NDArray[ np.float64 ], npt.NDArray[ np.float64 ] ] = ( np.array([-1.0]), np.array([2.0]), np.array([2.0]), np.array([-5.0]) ) +# assert obtained == expected, f"Expected tuple is {expected}" + +# def test_getPointSideAgainstPlaneAssertion() -> None: +# """Test get point side against a plane.""" +# planePt: npt.NDArray[ np.float64 ] = np.array( [[ 0.0, 0.0, 0.0 ]] ) + +# # assertion error - Point on the plane +# planeNormal: npt.NDArray[ np.float64 ] = np.array( [[ 0.0, 0.0, 1.0 ]] ) +# with pytest.raises( AssertionError ): +# fcts.getPointSideAgainstPlane( planePt, planePt, planeNormal ) + +# # yapf: disable +# listPtsCoords_all = ( [ np.array( [ 0.5, 0.5, 0.5 ] ), np.array( [ 0.5, 0.5, -0.5 ] ) ], +# [ np.array( [ 0.5, 0.5, 0.5 ] ), np.array( [ -0.5, 0.5, 0.5 ] ) ] ) +# planePt_all = ( np.array( [ 0.0, 0.0, 0.0 ] ), np.array( [ 0.0, 0.0, 0.0 ] ) ) +# planeNormal_all = ( np.array( [ 0.0, 0.0, 1.0 ] ), np.array( [ 1.0, 0.0, 0.0 ] ) ) +# expected_all = ( ( True, False ), ( True, False ) ) +# # yapf: enable + + +# @dataclass( frozen=True ) +# class TestCasePointSideAgainstPlane: +# """Test case.""" +# __test__ = False +# #: list of points +# listPtsCoords: list[ npt.NDArray[ np.float64 ] ] +# #: plane point +# planePt: npt.NDArray[ np.float64 ] +# #: plane normal +# planeNormal: npt.NDArray[ np.float64 ] +# #: expected result +# expected: list[ bool ] + + +# def __generate_PointSideAgainstPlane_test_data() -> Iterator[ TestCasePointSideAgainstPlane ]: +# """Generate test cases. + +# Yields: +# Iterator[ TestCase ]: Iterator on test cases +# """ +# for listPtsCoords, planePt, planeNormal, expected in zip( listPtsCoords_all, +# planePt_all, +# planeNormal_all, +# expected_all, +# strict=True ): +# yield TestCasePointSideAgainstPlane( listPtsCoords, planePt, planeNormal, list( expected ) ) + + +# ids: tuple[ str, str ] = ( "Horizontal plane", "Vertical plane" ) + + +# @pytest.mark.parametrize( "test_case", __generate_PointSideAgainstPlane_test_data(), ids=ids ) +# def test_getPointSideAgainstPlane( test_case: TestCasePointSideAgainstPlane ) -> None: +# """Test get point side against a plane.""" +# obtained: list[ bool ] = [] +# for ptCoords in test_case.listPtsCoords: +# side: bool = fcts.getPointSideAgainstPlane( ptCoords, test_case.planePt, test_case.planeNormal ) +# obtained += [ side ] +# assert obtained == test_case.expected, f"Expected tuple is {test_case.expected}" + + +# def test_getCellSideAgainstPlaneRandom() -> None: +# """Test get cell side against a plane.""" +# # random plane +# planePt: npt.NDArray[ np.float64 ] = np.array( [ 125.58337, 1386.0465, -2782.502 ] ) +# listCellPtsCoords: list[ npt.NDArray[ np.float64 ] ] = [ +# np.array( [ +# [ 135.49551, 1374.7644, -2786.884 ], +# [ 125.58337, 1376.7441, -2782.502 ], +# [ 132.19525, 1382.2516, -2771.0508 ], +# [ 125.58337, 1386.0465, -2782.502 ], +# ] ), +# np.array( [ +# [ 111.9148, 1377.0265, -2764.875 ], +# [ 132.19525, 1382.2516, -2771.0508 ], +# [ 125.58337, 1376.7441, -2782.502 ], +# [ 125.58337, 1386.0465, -2782.502 ], +# ] ), +# ] +# planeNormal: npt.NDArray[ np.float64 ] = np.array( [ 0.8660075, 0.0, -0.5000311 ] ) +# obtained: list[ bool ] = [] +# for cellPtsCoords in listCellPtsCoords: +# side: bool = fcts.getCellSideAgainstPlane( cellPtsCoords, planePt, planeNormal ) +# obtained += [ side ] +# expected: list[ bool ] = [ True, False ] +# assert obtained == expected, f"Expected tuple is {expected}" + + +# pts_all: tuple[ npt.NDArray[ np.float64 ], ...] = ( +# np.array( [ 0.0, 0.0, 0.0 ] ), +# np.array( [ 1.0, 0.0, 0.0 ] ), +# np.array( [ 0.5, 0.5, 0.0 ] ), +# np.array( [ -0.5, 0.5, 0.0 ] ), +# np.array( [ -0.5, -0.5, 0.0 ] ), +# np.array( [ 0.5, -0.5, -1.0 ] ), +# ) # yapf: disable -angleExp_all: tuple[ float, ...] = ( - 0.78539816, 0.32175055, 0.32175055, 1.15026200, 0.78539816, 2.0e-8, 1.04719755, - 0.78539816, 0.61547971, 1.04719755, 2.35619449, 1.57079633, 1.04719755, 1.24904577, - 0.75204009, 0.83548187, 1.57079633, 0.95531662, 1.04719755, 1.57079633 -) -# yapf: enable - - -@dataclass( frozen=True ) -class TestCaseAngle: - """Test case.""" - __test__ = False - pt1: npt.NDArray[ np.float64 ] - pt2: npt.NDArray[ np.float64 ] - pt3: npt.NDArray[ np.float64 ] - angleExp: float - - -def __generate_Angle_test_data() -> Iterator[ TestCaseAngle ]: - """Generate test cases. - - Yields: - Iterator[ TestCase ]: Iterator on test cases - """ - for pts, angle in zip( list( combinations( pts_all, 3 ) ), angleExp_all, strict=True ): - yield TestCaseAngle( pts[ 0 ], pts[ 1 ], pts[ 2 ], angle ) - - -@pytest.mark.parametrize( "test_case", __generate_Angle_test_data() ) -def test_computeAngleFromPoints( test_case: TestCaseAngle ) -> None: - """Test computeAngleFromPoints method.""" - obs: float = fcts.computeAngleFromPoints( test_case.pt1, test_case.pt2, test_case.pt3 ) - assert np.isclose( obs, test_case.angleExp ), f"Expected angle {test_case.angleExp}, but got {obs}." - - -@pytest.mark.parametrize( "test_case", __generate_Angle_test_data() ) -def test_computeAngleFromVectors( test_case: TestCaseAngle ) -> None: - """Test computeAngleFromVectors method.""" - vec1: npt.NDArray[ np.float64 ] = test_case.pt1 - test_case.pt2 - vec2: npt.NDArray[ np.float64 ] = test_case.pt3 - test_case.pt2 - obs: float = fcts.computeAngleFromVectors( vec1, vec2 ) - assert np.isclose( obs, test_case.angleExp ), f"Expected angle {test_case.angleExp}, but got {obs}." +# angleExp_all: tuple[ float, ...] = ( +# 0.78539816, 0.32175055, 0.32175055, 1.15026200, 0.78539816, 2.0e-8, 1.04719755, +# 0.78539816, 0.61547971, 1.04719755, 2.35619449, 1.57079633, 1.04719755, 1.24904577, +# 0.75204009, 0.83548187, 1.57079633, 0.95531662, 1.04719755, 1.57079633 +# ) +# # yapf: enable + +# @dataclass( frozen=True ) +# class TestCaseAngle: +# """Test case.""" +# __test__ = False +# pt1: npt.NDArray[ np.float64 ] +# pt2: npt.NDArray[ np.float64 ] +# pt3: npt.NDArray[ np.float64 ] +# angleExp: float + +# def __generate_Angle_test_data() -> Iterator[ TestCaseAngle ]: +# """Generate test cases. + +# Yields: +# Iterator[ TestCase ]: Iterator on test cases +# """ +# for pts, angle in zip( list( combinations( pts_all, 3 ) ), angleExp_all, strict=True ): +# yield TestCaseAngle( pts[ 0 ], pts[ 1 ], pts[ 2 ], angle ) + +# @pytest.mark.parametrize( "test_case", __generate_Angle_test_data() ) +# def test_computeAngleFromPoints( test_case: TestCaseAngle ) -> None: +# """Test computeAngleFromPoints method.""" +# obs: float = fcts.computeAngleFromPoints( test_case.pt1, test_case.pt2, test_case.pt3 ) +# assert np.isclose( obs, test_case.angleExp ), f"Expected angle {test_case.angleExp}, but got {obs}." + +# @pytest.mark.parametrize( "test_case", __generate_Angle_test_data() ) +# def test_computeAngleFromVectors( test_case: TestCaseAngle ) -> None: +# """Test computeAngleFromVectors method.""" +# vec1: npt.NDArray[ np.float64 ] = test_case.pt1 - test_case.pt2 +# vec2: npt.NDArray[ np.float64 ] = test_case.pt3 - test_case.pt2 +# obs: float = fcts.computeAngleFromVectors( vec1, vec2 ) +# assert np.isclose( obs, test_case.angleExp ), f"Expected angle {test_case.angleExp}, but got {obs}." def test_computeNormalFromPoints() -> None: """Test computeNormalFromPoints method.""" - pt1 = np.array( [ 1.0, 0.0, 0.0 ] ) - pt2 = np.array( [ 0.0, 0.0, 0.0 ] ) - pt3 = np.array( [ 0.0, 1.0, 0.0 ] ) - # vec1 = pt1 - pt2 = (1, 0, 0) - # vec2 = pt3 - pt2 = (0, 1, 0) + pt1 = np.array( [ [ 1.0, 0.0, 0.0 ] ] ) + pt2 = np.array( [ [ 0.0, 0.0, 0.0 ] ] ) + pt3 = np.array( [ [ 0.0, 1.0, 0.0 ] ] ) # The function normalizes these vectors before computing the cross product. # The cross product of (1,0,0) and (0,1,0) is (0,0,1). obtained: npt.NDArray[ np.float64 ] = fcts.computeNormalFromPoints( pt1, pt2, pt3 ) - expected: npt.NDArray[ np.float64 ] = np.array( [ 0.0, 0.0, 1.0 ] ) + expected: npt.NDArray[ np.float64 ] = np.array( [ [ 0.0, 0.0, 1.0 ] ] ) assert np.allclose( obtained, expected ), f"Expected normal {expected}, but got {obtained}." def test_computeNormalFromVectors() -> None: """Test computeNormalFromVectors method.""" # Use non-unit vectors to test normalization step - vec1 = np.array( [ 5.0, 0.0, 0.0 ] ) - vec2 = np.array( [ 0.0, 2.0, 0.0 ] ) + vec1 = np.array( [ [ 5.0, 0.0, 0.0 ] ] ) + vec2 = np.array( [ [ 0.0, 2.0, 0.0 ] ] ) # The function should normalize vec1 to (1,0,0) and vec2 to (0,1,0) # The resulting cross product should be (0,0,1) obtained: npt.NDArray[ np.float64 ] = fcts.computeNormalFromVectors( vec1, vec2 ) - expected: npt.NDArray[ np.float64 ] = np.array( [ 0.0, 0.0, 1.0 ] ) + expected: npt.NDArray[ np.float64 ] = np.array( [ [ 0.0, 0.0, 1.0 ] ] ) assert np.allclose( obtained, expected ), f"Expected normal {expected}, but got {obtained}."