summaryrefslogtreecommitdiff
path: root/libs/assimp/contrib/draco/src
diff options
context:
space:
mode:
authorsanine <sanine.not@pm.me>2022-04-16 11:55:09 -0500
committersanine <sanine.not@pm.me>2022-04-16 11:55:09 -0500
commitdb81b925d776103326128bf629cbdda576a223e7 (patch)
tree58bea8155c686733310009f6bed7363f91fbeb9d /libs/assimp/contrib/draco/src
parent55860037b14fb3893ba21cf2654c83d349cc1082 (diff)
move 3rd-party librarys into libs/ and add built-in honeysuckle
Diffstat (limited to 'libs/assimp/contrib/draco/src')
-rw-r--r--libs/assimp/contrib/draco/src/draco/animation/keyframe_animation.cc54
-rw-r--r--libs/assimp/contrib/draco/src/draco/animation/keyframe_animation.h107
-rw-r--r--libs/assimp/contrib/draco/src/draco/animation/keyframe_animation_decoder.cc30
-rw-r--r--libs/assimp/contrib/draco/src/draco/animation/keyframe_animation_decoder.h34
-rw-r--r--libs/assimp/contrib/draco/src/draco/animation/keyframe_animation_encoder.cc28
-rw-r--r--libs/assimp/contrib/draco/src/draco/animation/keyframe_animation_encoder.h39
-rw-r--r--libs/assimp/contrib/draco/src/draco/animation/keyframe_animation_encoding_test.cc168
-rw-r--r--libs/assimp/contrib/draco/src/draco/animation/keyframe_animation_test.cc102
-rw-r--r--libs/assimp/contrib/draco/src/draco/attributes/attribute_octahedron_transform.cc145
-rw-r--r--libs/assimp/contrib/draco/src/draco/attributes/attribute_octahedron_transform.h81
-rw-r--r--libs/assimp/contrib/draco/src/draco/attributes/attribute_quantization_transform.cc260
-rw-r--r--libs/assimp/contrib/draco/src/draco/attributes/attribute_quantization_transform.h102
-rw-r--r--libs/assimp/contrib/draco/src/draco/attributes/attribute_transform.cc40
-rw-r--r--libs/assimp/contrib/draco/src/draco/attributes/attribute_transform.h76
-rw-r--r--libs/assimp/contrib/draco/src/draco/attributes/attribute_transform_data.h71
-rw-r--r--libs/assimp/contrib/draco/src/draco/attributes/attribute_transform_type.h30
-rw-r--r--libs/assimp/contrib/draco/src/draco/attributes/geometry_attribute.cc102
-rw-r--r--libs/assimp/contrib/draco/src/draco/attributes/geometry_attribute.h350
-rw-r--r--libs/assimp/contrib/draco/src/draco/attributes/geometry_indices.h54
-rw-r--r--libs/assimp/contrib/draco/src/draco/attributes/point_attribute.cc225
-rw-r--r--libs/assimp/contrib/draco/src/draco/attributes/point_attribute.h190
-rw-r--r--libs/assimp/contrib/draco/src/draco/attributes/point_attribute_test.cc128
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/attributes/attributes_decoder.cc127
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/attributes/attributes_decoder.h97
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/attributes/attributes_decoder_interface.h62
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/attributes/attributes_encoder.cc49
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/attributes/attributes_encoder.h154
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/attributes/kd_tree_attributes_decoder.cc556
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/attributes/kd_tree_attributes_decoder.h46
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/attributes/kd_tree_attributes_encoder.cc305
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/attributes/kd_tree_attributes_encoder.h51
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/attributes/kd_tree_attributes_shared.h28
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/attributes/linear_sequencer.h51
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/attributes/mesh_attribute_indices_encoding_data.h58
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/attributes/normal_compression_utils.h360
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/attributes/point_d_vector.h279
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/attributes/point_d_vector_test.cc360
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/attributes/points_sequencer.h63
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_constrained_multi_parallelogram_decoder.h231
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_constrained_multi_parallelogram_encoder.h414
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_constrained_multi_parallelogram_shared.h34
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_data.h72
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_decoder.h46
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_encoder.h46
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_geometric_normal_decoder.h172
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_geometric_normal_encoder.h180
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_geometric_normal_predictor_area.h117
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_geometric_normal_predictor_base.h96
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_multi_parallelogram_decoder.h126
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_multi_parallelogram_encoder.h133
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_parallelogram_decoder.h98
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_parallelogram_encoder.h111
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_parallelogram_shared.h78
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_tex_coords_decoder.h344
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_tex_coords_encoder.h318
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_tex_coords_portable_decoder.h143
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_tex_coords_portable_encoder.h133
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_tex_coords_portable_predictor.h263
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_decoder.h90
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_decoder_factory.h194
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_decoder_interface.h53
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_decoding_transform.h65
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_delta_decoder.h65
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_delta_encoder.h69
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_encoder.h90
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_encoder_factory.cc85
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_encoder_factory.h129
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_encoder_interface.h55
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_encoding_transform.h77
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_factory.h85
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_interface.h60
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_canonicalized_decoding_transform.h118
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_canonicalized_encoding_transform.h116
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_canonicalized_transform_base.h102
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_canonicalized_transform_test.cc192
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_decoding_transform.h103
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_encoding_transform.h105
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_transform_base.h90
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_transform_test.cc71
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_wrap_decoding_transform.h88
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_wrap_encoding_transform.h81
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_wrap_transform_base.h120
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/attributes/sequential_attribute_decoder.cc118
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/attributes/sequential_attribute_decoder.h86
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/attributes/sequential_attribute_decoders_controller.cc149
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/attributes/sequential_attribute_decoders_controller.h61
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/attributes/sequential_attribute_encoder.cc108
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/attributes/sequential_attribute_encoder.h134
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/attributes/sequential_attribute_encoders_controller.cc159
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/attributes/sequential_attribute_encoders_controller.h115
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/attributes/sequential_integer_attribute_decoder.cc240
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/attributes/sequential_integer_attribute_decoder.h76
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/attributes/sequential_integer_attribute_encoder.cc233
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/attributes/sequential_integer_attribute_encoder.h67
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/attributes/sequential_integer_attribute_encoding_test.cc64
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/attributes/sequential_normal_attribute_decoder.cc76
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/attributes/sequential_normal_attribute_decoder.h83
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/attributes/sequential_normal_attribute_encoder.cc57
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/attributes/sequential_normal_attribute_encoder.h82
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/attributes/sequential_quantization_attribute_decoder.cc88
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/attributes/sequential_quantization_attribute_decoder.h52
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/attributes/sequential_quantization_attribute_encoder.cc86
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/attributes/sequential_quantization_attribute_encoder.h52
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/bit_coders/adaptive_rans_bit_coding_shared.h43
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/bit_coders/adaptive_rans_bit_decoder.cc70
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/bit_coders/adaptive_rans_bit_decoder.h54
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/bit_coders/adaptive_rans_bit_encoder.cc59
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/bit_coders/adaptive_rans_bit_encoder.h61
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/bit_coders/direct_bit_decoder.cc54
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/bit_coders/direct_bit_decoder.h90
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/bit_coders/direct_bit_encoder.cc39
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/bit_coders/direct_bit_encoder.h89
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/bit_coders/folded_integer_bit_decoder.h77
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/bit_coders/folded_integer_bit_encoder.h82
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/bit_coders/rans_bit_decoder.cc82
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/bit_coders/rans_bit_decoder.h55
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/bit_coders/rans_bit_encoder.cc125
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/bit_coders/rans_bit_encoder.h57
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/bit_coders/rans_coding_test.cc9
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/bit_coders/symbol_bit_decoder.cc49
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/bit_coders/symbol_bit_decoder.h36
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/bit_coders/symbol_bit_encoder.cc30
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/bit_coders/symbol_bit_encoder.h36
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/config/compression_shared.h155
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/config/decoder_options.h34
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/config/decoder_options_test.cc67
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/config/draco_options.h249
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/config/encoder_options.h97
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/config/encoding_features.h39
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/decode.cc135
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/decode.h80
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/decode_test.cc169
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/encode.cc96
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/encode.h140
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/encode_base.h131
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/encode_test.cc407
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/entropy/ans.h527
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/entropy/rans_symbol_coding.h53
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/entropy/rans_symbol_decoder.h164
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/entropy/rans_symbol_encoder.h290
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/entropy/shannon_entropy.cc147
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/entropy/shannon_entropy.h110
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/entropy/shannon_entropy_test.cc58
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/entropy/symbol_coding_test.cc170
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/entropy/symbol_decoding.cc181
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/entropy/symbol_decoding.h29
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/entropy/symbol_encoding.cc376
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/entropy/symbol_encoding.h47
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/expert_encode.cc182
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/expert_encode.h147
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/mesh/mesh_decoder.cc37
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/mesh/mesh_decoder.h68
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/mesh/mesh_edgebreaker_decoder.cc70
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/mesh/mesh_edgebreaker_decoder.h54
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/mesh/mesh_edgebreaker_decoder_impl.cc1231
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/mesh/mesh_edgebreaker_decoder_impl.h228
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/mesh/mesh_edgebreaker_decoder_impl_interface.h47
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/mesh/mesh_edgebreaker_encoder.cc195
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/mesh/mesh_edgebreaker_encoder.h73
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/mesh/mesh_edgebreaker_encoder_impl.cc854
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/mesh/mesh_edgebreaker_encoder_impl.h210
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/mesh/mesh_edgebreaker_encoder_impl_interface.h57
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/mesh/mesh_edgebreaker_encoding_test.cc247
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/mesh/mesh_edgebreaker_shared.h131
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/mesh/mesh_edgebreaker_traversal_decoder.h201
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/mesh/mesh_edgebreaker_traversal_encoder.h139
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/mesh/mesh_edgebreaker_traversal_predictive_decoder.h134
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/mesh/mesh_edgebreaker_traversal_predictive_encoder.h172
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/mesh/mesh_edgebreaker_traversal_valence_decoder.h215
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/mesh/mesh_edgebreaker_traversal_valence_encoder.h226
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/mesh/mesh_encoder.cc34
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/mesh/mesh_encoder.h84
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/mesh/mesh_encoder_test.cc116
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/mesh/mesh_sequential_decoder.cc169
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/mesh/mesh_sequential_decoder.h39
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/mesh/mesh_sequential_encoder.cc132
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/mesh/mesh_sequential_encoder.h57
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/mesh/traverser/depth_first_traverser.h172
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/mesh/traverser/max_prediction_degree_traverser.h226
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/mesh/traverser/mesh_attribute_indices_encoding_observer.h76
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/mesh/traverser/mesh_traversal_sequencer.h113
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/mesh/traverser/traverser_base.h87
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/point_cloud/algorithms/dynamic_integer_points_kd_tree_decoder.cc26
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/point_cloud/algorithms/dynamic_integer_points_kd_tree_decoder.h330
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/point_cloud/algorithms/dynamic_integer_points_kd_tree_encoder.cc26
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/point_cloud/algorithms/dynamic_integer_points_kd_tree_encoder.h371
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/point_cloud/algorithms/float_points_tree_decoder.cc152
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/point_cloud/algorithms/float_points_tree_decoder.h141
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/point_cloud/algorithms/float_points_tree_encoder.cc94
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/point_cloud/algorithms/float_points_tree_encoder.h126
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/point_cloud/algorithms/integer_points_kd_tree_decoder.cc45
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/point_cloud/algorithms/integer_points_kd_tree_decoder.h314
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/point_cloud/algorithms/integer_points_kd_tree_encoder.cc45
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/point_cloud/algorithms/integer_points_kd_tree_encoder.h404
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/point_cloud/algorithms/point_cloud_compression_method.h34
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/point_cloud/algorithms/point_cloud_types.h76
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/point_cloud/algorithms/quantize_points_3.h84
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/point_cloud/algorithms/queuing_policy.h75
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/point_cloud/point_cloud_decoder.cc199
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/point_cloud/point_cloud_decoder.h118
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/point_cloud/point_cloud_encoder.cc306
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/point_cloud/point_cloud_encoder.h158
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/point_cloud/point_cloud_kd_tree_decoder.cc40
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/point_cloud/point_cloud_kd_tree_decoder.h31
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/point_cloud/point_cloud_kd_tree_encoder.cc43
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/point_cloud/point_cloud_kd_tree_encoder.h45
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/point_cloud/point_cloud_kd_tree_encoding_test.cc458
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/point_cloud/point_cloud_sequential_decoder.cc42
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/point_cloud/point_cloud_sequential_decoder.h33
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/point_cloud/point_cloud_sequential_encoder.cc49
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/point_cloud/point_cloud_sequential_encoder.h43
-rw-r--r--libs/assimp/contrib/draco/src/draco/compression/point_cloud/point_cloud_sequential_encoding_test.cc92
-rw-r--r--libs/assimp/contrib/draco/src/draco/core/bit_utils.cc36
-rw-r--r--libs/assimp/contrib/draco/src/draco/core/bit_utils.h124
-rw-r--r--libs/assimp/contrib/draco/src/draco/core/bounding_box.cc30
-rw-r--r--libs/assimp/contrib/draco/src/draco/core/bounding_box.h72
-rw-r--r--libs/assimp/contrib/draco/src/draco/core/buffer_bit_coding_test.cc115
-rw-r--r--libs/assimp/contrib/draco/src/draco/core/cycle_timer.cc49
-rw-r--r--libs/assimp/contrib/draco/src/draco/core/cycle_timer.h51
-rw-r--r--libs/assimp/contrib/draco/src/draco/core/data_buffer.cc61
-rw-r--r--libs/assimp/contrib/draco/src/draco/core/data_buffer.h82
-rw-r--r--libs/assimp/contrib/draco/src/draco/core/decoder_buffer.cc72
-rw-r--r--libs/assimp/contrib/draco/src/draco/core/decoder_buffer.h216
-rw-r--r--libs/assimp/contrib/draco/src/draco/core/divide.cc88
-rw-r--r--libs/assimp/contrib/draco/src/draco/core/divide.h42
-rw-r--r--libs/assimp/contrib/draco/src/draco/core/draco_index_type.h183
-rw-r--r--libs/assimp/contrib/draco/src/draco/core/draco_index_type_vector.h83
-rw-r--r--libs/assimp/contrib/draco/src/draco/core/draco_test_base.h11
-rw-r--r--libs/assimp/contrib/draco/src/draco/core/draco_test_utils.cc80
-rw-r--r--libs/assimp/contrib/draco/src/draco/core/draco_test_utils.h93
-rw-r--r--libs/assimp/contrib/draco/src/draco/core/draco_types.cc61
-rw-r--r--libs/assimp/contrib/draco/src/draco/core/draco_types.h52
-rw-r--r--libs/assimp/contrib/draco/src/draco/core/draco_version.h27
-rw-r--r--libs/assimp/contrib/draco/src/draco/core/encoder_buffer.cc93
-rw-r--r--libs/assimp/contrib/draco/src/draco/core/encoder_buffer.h152
-rw-r--r--libs/assimp/contrib/draco/src/draco/core/hash_utils.cc58
-rw-r--r--libs/assimp/contrib/draco/src/draco/core/hash_utils.h64
-rw-r--r--libs/assimp/contrib/draco/src/draco/core/macros.h119
-rw-r--r--libs/assimp/contrib/draco/src/draco/core/math_utils.h55
-rw-r--r--libs/assimp/contrib/draco/src/draco/core/math_utils_test.cc22
-rw-r--r--libs/assimp/contrib/draco/src/draco/core/options.cc94
-rw-r--r--libs/assimp/contrib/draco/src/draco/core/options.h150
-rw-r--r--libs/assimp/contrib/draco/src/draco/core/quantization_utils.cc42
-rw-r--r--libs/assimp/contrib/draco/src/draco/core/quantization_utils.h82
-rw-r--r--libs/assimp/contrib/draco/src/draco/core/quantization_utils_test.cc91
-rw-r--r--libs/assimp/contrib/draco/src/draco/core/status.h77
-rw-r--r--libs/assimp/contrib/draco/src/draco/core/status_or.h81
-rw-r--r--libs/assimp/contrib/draco/src/draco/core/status_test.cc38
-rw-r--r--libs/assimp/contrib/draco/src/draco/core/varint_decoding.h81
-rw-r--r--libs/assimp/contrib/draco/src/draco/core/varint_encoding.h61
-rw-r--r--libs/assimp/contrib/draco/src/draco/core/vector_d.h355
-rw-r--r--libs/assimp/contrib/draco/src/draco/core/vector_d_test.cc306
-rw-r--r--libs/assimp/contrib/draco/src/draco/io/file_reader_factory.cc45
-rw-r--r--libs/assimp/contrib/draco/src/draco/io/file_reader_factory.h34
-rw-r--r--libs/assimp/contrib/draco/src/draco/io/file_reader_factory_test.cc85
-rw-r--r--libs/assimp/contrib/draco/src/draco/io/file_reader_interface.h32
-rw-r--r--libs/assimp/contrib/draco/src/draco/io/file_reader_test_common.h13
-rw-r--r--libs/assimp/contrib/draco/src/draco/io/file_utils.cc110
-rw-r--r--libs/assimp/contrib/draco/src/draco/io/file_utils.h73
-rw-r--r--libs/assimp/contrib/draco/src/draco/io/file_utils_test.cc69
-rw-r--r--libs/assimp/contrib/draco/src/draco/io/file_writer_factory.cc45
-rw-r--r--libs/assimp/contrib/draco/src/draco/io/file_writer_factory.h34
-rw-r--r--libs/assimp/contrib/draco/src/draco/io/file_writer_factory_test.cc70
-rw-r--r--libs/assimp/contrib/draco/src/draco/io/file_writer_interface.h26
-rw-r--r--libs/assimp/contrib/draco/src/draco/io/file_writer_utils.cc57
-rw-r--r--libs/assimp/contrib/draco/src/draco/io/file_writer_utils.h38
-rw-r--r--libs/assimp/contrib/draco/src/draco/io/mesh_io.cc87
-rw-r--r--libs/assimp/contrib/draco/src/draco/io/mesh_io.h107
-rw-r--r--libs/assimp/contrib/draco/src/draco/io/obj_decoder.cc708
-rw-r--r--libs/assimp/contrib/draco/src/draco/io/obj_decoder.h129
-rw-r--r--libs/assimp/contrib/draco/src/draco/io/obj_decoder_test.cc193
-rw-r--r--libs/assimp/contrib/draco/src/draco/io/obj_encoder.cc346
-rw-r--r--libs/assimp/contrib/draco/src/draco/io/obj_encoder.h92
-rw-r--r--libs/assimp/contrib/draco/src/draco/io/obj_encoder_test.cc110
-rw-r--r--libs/assimp/contrib/draco/src/draco/io/parser_utils.cc261
-rw-r--r--libs/assimp/contrib/draco/src/draco/io/parser_utils.h66
-rw-r--r--libs/assimp/contrib/draco/src/draco/io/ply_decoder.cc320
-rw-r--r--libs/assimp/contrib/draco/src/draco/io/ply_decoder.h69
-rw-r--r--libs/assimp/contrib/draco/src/draco/io/ply_decoder_test.cc93
-rw-r--r--libs/assimp/contrib/draco/src/draco/io/ply_encoder.cc211
-rw-r--r--libs/assimp/contrib/draco/src/draco/io/ply_encoder.h54
-rw-r--r--libs/assimp/contrib/draco/src/draco/io/ply_property_reader.h96
-rw-r--r--libs/assimp/contrib/draco/src/draco/io/ply_property_writer.h94
-rw-r--r--libs/assimp/contrib/draco/src/draco/io/ply_reader.cc312
-rw-r--r--libs/assimp/contrib/draco/src/draco/io/ply_reader.h155
-rw-r--r--libs/assimp/contrib/draco/src/draco/io/ply_reader_test.cc143
-rw-r--r--libs/assimp/contrib/draco/src/draco/io/point_cloud_io.cc58
-rw-r--r--libs/assimp/contrib/draco/src/draco/io/point_cloud_io.h89
-rw-r--r--libs/assimp/contrib/draco/src/draco/io/point_cloud_io_test.cc115
-rw-r--r--libs/assimp/contrib/draco/src/draco/io/stdio_file_reader.cc103
-rw-r--r--libs/assimp/contrib/draco/src/draco/io/stdio_file_reader.h48
-rw-r--r--libs/assimp/contrib/draco/src/draco/io/stdio_file_reader_test.cc49
-rw-r--r--libs/assimp/contrib/draco/src/draco/io/stdio_file_writer.cc59
-rw-r--r--libs/assimp/contrib/draco/src/draco/io/stdio_file_writer.h42
-rw-r--r--libs/assimp/contrib/draco/src/draco/io/stdio_file_writer_test.cc38
-rw-r--r--libs/assimp/contrib/draco/src/draco/javascript/emscripten/animation_decoder_webidl_wrapper.cc101
-rw-r--r--libs/assimp/contrib/draco/src/draco/javascript/emscripten/animation_decoder_webidl_wrapper.h73
-rw-r--r--libs/assimp/contrib/draco/src/draco/javascript/emscripten/animation_encoder_webidl_wrapper.cc89
-rw-r--r--libs/assimp/contrib/draco/src/draco/javascript/emscripten/animation_encoder_webidl_wrapper.h66
-rw-r--r--libs/assimp/contrib/draco/src/draco/javascript/emscripten/decoder_functions.js33
-rw-r--r--libs/assimp/contrib/draco/src/draco/javascript/emscripten/decoder_webidl_wrapper.cc363
-rw-r--r--libs/assimp/contrib/draco/src/draco/javascript/emscripten/decoder_webidl_wrapper.h330
-rw-r--r--libs/assimp/contrib/draco/src/draco/javascript/emscripten/draco_animation_decoder_glue_wrapper.cc28
-rw-r--r--libs/assimp/contrib/draco/src/draco/javascript/emscripten/draco_animation_encoder_glue_wrapper.cc25
-rw-r--r--libs/assimp/contrib/draco/src/draco/javascript/emscripten/draco_animation_web_decoder.idl52
-rw-r--r--libs/assimp/contrib/draco/src/draco/javascript/emscripten/draco_animation_web_encoder.idl34
-rw-r--r--libs/assimp/contrib/draco/src/draco/javascript/emscripten/draco_decoder_glue_wrapper.cc28
-rw-r--r--libs/assimp/contrib/draco/src/draco/javascript/emscripten/draco_encoder_glue_wrapper.cc25
-rw-r--r--libs/assimp/contrib/draco/src/draco/javascript/emscripten/draco_web_decoder.idl283
-rw-r--r--libs/assimp/contrib/draco/src/draco/javascript/emscripten/draco_web_encoder.idl208
-rw-r--r--libs/assimp/contrib/draco/src/draco/javascript/emscripten/encoder_webidl_wrapper.cc359
-rw-r--r--libs/assimp/contrib/draco/src/draco/javascript/emscripten/encoder_webidl_wrapper.h186
-rw-r--r--libs/assimp/contrib/draco/src/draco/javascript/emscripten/finalize.js22
-rw-r--r--libs/assimp/contrib/draco/src/draco/javascript/emscripten/prepareCallbacks.js38
-rw-r--r--libs/assimp/contrib/draco/src/draco/javascript/emscripten/version.js29
-rw-r--r--libs/assimp/contrib/draco/src/draco/maya/draco_maya_plugin.cc265
-rw-r--r--libs/assimp/contrib/draco/src/draco/maya/draco_maya_plugin.h81
-rw-r--r--libs/assimp/contrib/draco/src/draco/mesh/corner_table.cc441
-rw-r--r--libs/assimp/contrib/draco/src/draco/mesh/corner_table.h396
-rw-r--r--libs/assimp/contrib/draco/src/draco/mesh/corner_table_iterators.h289
-rw-r--r--libs/assimp/contrib/draco/src/draco/mesh/mesh.cc40
-rw-r--r--libs/assimp/contrib/draco/src/draco/mesh/mesh.h152
-rw-r--r--libs/assimp/contrib/draco/src/draco/mesh/mesh_are_equivalent.cc205
-rw-r--r--libs/assimp/contrib/draco/src/draco/mesh/mesh_are_equivalent.h71
-rw-r--r--libs/assimp/contrib/draco/src/draco/mesh/mesh_are_equivalent_test.cc98
-rw-r--r--libs/assimp/contrib/draco/src/draco/mesh/mesh_attribute_corner_table.cc211
-rw-r--r--libs/assimp/contrib/draco/src/draco/mesh/mesh_attribute_corner_table.h196
-rw-r--r--libs/assimp/contrib/draco/src/draco/mesh/mesh_cleanup.cc251
-rw-r--r--libs/assimp/contrib/draco/src/draco/mesh/mesh_cleanup.h65
-rw-r--r--libs/assimp/contrib/draco/src/draco/mesh/mesh_cleanup_test.cc192
-rw-r--r--libs/assimp/contrib/draco/src/draco/mesh/mesh_misc_functions.cc63
-rw-r--r--libs/assimp/contrib/draco/src/draco/mesh/mesh_misc_functions.h98
-rw-r--r--libs/assimp/contrib/draco/src/draco/mesh/mesh_stripifier.cc102
-rw-r--r--libs/assimp/contrib/draco/src/draco/mesh/mesh_stripifier.h260
-rw-r--r--libs/assimp/contrib/draco/src/draco/mesh/triangle_soup_mesh_builder.cc89
-rw-r--r--libs/assimp/contrib/draco/src/draco/mesh/triangle_soup_mesh_builder.h63
-rw-r--r--libs/assimp/contrib/draco/src/draco/mesh/triangle_soup_mesh_builder_test.cc197
-rw-r--r--libs/assimp/contrib/draco/src/draco/mesh/valence_cache.h142
-rw-r--r--libs/assimp/contrib/draco/src/draco/metadata/geometry_metadata.cc44
-rw-r--r--libs/assimp/contrib/draco/src/draco/metadata/geometry_metadata.h140
-rw-r--r--libs/assimp/contrib/draco/src/draco/metadata/metadata.cc132
-rw-r--r--libs/assimp/contrib/draco/src/draco/metadata/metadata.h208
-rw-r--r--libs/assimp/contrib/draco/src/draco/metadata/metadata_decoder.cc148
-rw-r--r--libs/assimp/contrib/draco/src/draco/metadata/metadata_decoder.h42
-rw-r--r--libs/assimp/contrib/draco/src/draco/metadata/metadata_encoder.cc97
-rw-r--r--libs/assimp/contrib/draco/src/draco/metadata/metadata_encoder.h41
-rw-r--r--libs/assimp/contrib/draco/src/draco/metadata/metadata_encoder_test.cc167
-rw-r--r--libs/assimp/contrib/draco/src/draco/metadata/metadata_test.cc157
-rw-r--r--libs/assimp/contrib/draco/src/draco/point_cloud/point_cloud.cc275
-rw-r--r--libs/assimp/contrib/draco/src/draco/point_cloud/point_cloud.h244
-rw-r--r--libs/assimp/contrib/draco/src/draco/point_cloud/point_cloud_builder.cc76
-rw-r--r--libs/assimp/contrib/draco/src/draco/point_cloud/point_cloud_builder.h80
-rw-r--r--libs/assimp/contrib/draco/src/draco/point_cloud/point_cloud_builder_test.cc171
-rw-r--r--libs/assimp/contrib/draco/src/draco/point_cloud/point_cloud_test.cc132
-rw-r--r--libs/assimp/contrib/draco/src/draco/tools/draco_decoder.cc168
-rw-r--r--libs/assimp/contrib/draco/src/draco/tools/draco_encoder.cc369
-rw-r--r--libs/assimp/contrib/draco/src/draco/tools/fuzz/build.sh35
-rw-r--r--libs/assimp/contrib/draco/src/draco/tools/fuzz/draco_mesh_decoder_fuzzer.cc29
-rw-r--r--libs/assimp/contrib/draco/src/draco/tools/fuzz/draco_mesh_decoder_without_dequantization_fuzzer.cc30
-rw-r--r--libs/assimp/contrib/draco/src/draco/tools/fuzz/draco_pc_decoder_fuzzer.cc29
-rw-r--r--libs/assimp/contrib/draco/src/draco/tools/fuzz/draco_pc_decoder_without_dequantization_fuzzer.cc30
-rw-r--r--libs/assimp/contrib/draco/src/draco/unity/draco_unity_plugin.cc407
-rw-r--r--libs/assimp/contrib/draco/src/draco/unity/draco_unity_plugin.h154
-rw-r--r--libs/assimp/contrib/draco/src/draco/unity/draco_unity_plugin_test.cc243
364 files changed, 46691 insertions, 0 deletions
diff --git a/libs/assimp/contrib/draco/src/draco/animation/keyframe_animation.cc b/libs/assimp/contrib/draco/src/draco/animation/keyframe_animation.cc
new file mode 100644
index 0000000..eaf94a3
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/animation/keyframe_animation.cc
@@ -0,0 +1,54 @@
+// Copyright 2017 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/animation/keyframe_animation.h"
+
+namespace draco {
+
+KeyframeAnimation::KeyframeAnimation() {}
+
+bool KeyframeAnimation::SetTimestamps(
+ const std::vector<TimestampType> &timestamp) {
+ // Already added attributes.
+ const int32_t num_frames = timestamp.size();
+ if (num_attributes() > 0) {
+ // Timestamp attribute could be added only once.
+ if (timestamps()->size()) {
+ return false;
+ } else {
+ // Check if the number of frames is consistent with
+ // the existing keyframes.
+ if (num_frames != num_points()) {
+ return false;
+ }
+ }
+ } else {
+ // This is the first attribute.
+ set_num_frames(num_frames);
+ }
+
+ // Add attribute for time stamp data.
+ std::unique_ptr<PointAttribute> timestamp_att =
+ std::unique_ptr<PointAttribute>(new PointAttribute());
+ timestamp_att->Init(GeometryAttribute::GENERIC, 1, DT_FLOAT32, false,
+ num_frames);
+ for (PointIndex i(0); i < num_frames; ++i) {
+ timestamp_att->SetAttributeValue(timestamp_att->mapped_index(i),
+ &timestamp[i.value()]);
+ }
+ this->SetAttribute(kTimestampId, std::move(timestamp_att));
+ return true;
+}
+
+} // namespace draco
diff --git a/libs/assimp/contrib/draco/src/draco/animation/keyframe_animation.h b/libs/assimp/contrib/draco/src/draco/animation/keyframe_animation.h
new file mode 100644
index 0000000..a7afb2b
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/animation/keyframe_animation.h
@@ -0,0 +1,107 @@
+// Copyright 2017 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_ANIMATION_KEYFRAME_ANIMATION_H_
+#define DRACO_ANIMATION_KEYFRAME_ANIMATION_H_
+
+#include <vector>
+
+#include "draco/point_cloud/point_cloud.h"
+
+namespace draco {
+
+// Class for holding keyframe animation data. It will have two or more
+// attributes as a point cloud. The first attribute is always the timestamp
+// of the animation. Each KeyframeAnimation could have multiple animations with
+// the same number of frames. Each animation will be treated as a point
+// attribute.
+class KeyframeAnimation : public PointCloud {
+ public:
+ // Force time stamp to be float type.
+ using TimestampType = float;
+
+ KeyframeAnimation();
+
+ // Animation must have only one timestamp attribute.
+ // This function must be called before adding any animation data.
+ // Returns false if timestamp already exists.
+ bool SetTimestamps(const std::vector<TimestampType> &timestamp);
+
+ // Returns an id for the added animation data. This id will be used to
+ // identify this animation.
+ // Returns -1 if error, e.g. number of frames is not consistent.
+ // Type |T| should be consistent with |DataType|, e.g:
+ // float - DT_FLOAT32,
+ // int32_t - DT_INT32, ...
+ template <typename T>
+ int32_t AddKeyframes(DataType data_type, uint32_t num_components,
+ const std::vector<T> &data);
+
+ const PointAttribute *timestamps() const {
+ return GetAttributeByUniqueId(kTimestampId);
+ }
+ const PointAttribute *keyframes(int32_t animation_id) const {
+ return GetAttributeByUniqueId(animation_id);
+ }
+
+ // Number of frames should be equal to number points in the point cloud.
+ void set_num_frames(int32_t num_frames) { set_num_points(num_frames); }
+ int32_t num_frames() const { return static_cast<int32_t>(num_points()); }
+
+ int32_t num_animations() const { return num_attributes() - 1; }
+
+ private:
+ // Attribute id of timestamp is fixed to 0.
+ static constexpr int32_t kTimestampId = 0;
+};
+
+template <typename T>
+int32_t KeyframeAnimation::AddKeyframes(DataType data_type,
+ uint32_t num_components,
+ const std::vector<T> &data) {
+ // TODO(draco-eng): Verify T is consistent with |data_type|.
+ if (num_components == 0) {
+ return -1;
+ }
+ // If timestamps is not added yet, then reserve attribute 0 for timestamps.
+ if (!num_attributes()) {
+ // Add a temporary attribute with 0 points to fill attribute id 0.
+ std::unique_ptr<PointAttribute> temp_att =
+ std::unique_ptr<PointAttribute>(new PointAttribute());
+ temp_att->Init(GeometryAttribute::GENERIC, num_components, data_type, false,
+ 0);
+ this->AddAttribute(std::move(temp_att));
+
+ set_num_frames(data.size() / num_components);
+ }
+
+ if (data.size() != num_components * num_frames()) {
+ return -1;
+ }
+
+ std::unique_ptr<PointAttribute> keyframe_att =
+ std::unique_ptr<PointAttribute>(new PointAttribute());
+ keyframe_att->Init(GeometryAttribute::GENERIC, num_components, data_type,
+ false, num_frames());
+ const size_t stride = num_components;
+ for (PointIndex i(0); i < num_frames(); ++i) {
+ keyframe_att->SetAttributeValue(keyframe_att->mapped_index(i),
+ &data[i.value() * stride]);
+ }
+ return this->AddAttribute(std::move(keyframe_att));
+}
+
+} // namespace draco
+
+#endif // DRACO_ANIMATION_KEYFRAME_ANIMATION_H_
diff --git a/libs/assimp/contrib/draco/src/draco/animation/keyframe_animation_decoder.cc b/libs/assimp/contrib/draco/src/draco/animation/keyframe_animation_decoder.cc
new file mode 100644
index 0000000..2065946
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/animation/keyframe_animation_decoder.cc
@@ -0,0 +1,30 @@
+// Copyright 2017 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/animation/keyframe_animation_decoder.h"
+
+namespace draco {
+
+Status KeyframeAnimationDecoder::Decode(const DecoderOptions &options,
+ DecoderBuffer *in_buffer,
+ KeyframeAnimation *animation) {
+ const auto status = PointCloudSequentialDecoder::Decode(
+ options, in_buffer, static_cast<PointCloud *>(animation));
+ if (!status.ok()) {
+ return status;
+ }
+ return OkStatus();
+}
+
+} // namespace draco
diff --git a/libs/assimp/contrib/draco/src/draco/animation/keyframe_animation_decoder.h b/libs/assimp/contrib/draco/src/draco/animation/keyframe_animation_decoder.h
new file mode 100644
index 0000000..fdf086b
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/animation/keyframe_animation_decoder.h
@@ -0,0 +1,34 @@
+// Copyright 2017 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_ANIMATION_KEYFRAME_ANIMATION_DECODER_H_
+#define DRACO_ANIMATION_KEYFRAME_ANIMATION_DECODER_H_
+
+#include "draco/animation/keyframe_animation.h"
+#include "draco/compression/point_cloud/point_cloud_sequential_decoder.h"
+
+namespace draco {
+
+// Class for decoding keyframe animation.
+class KeyframeAnimationDecoder : private PointCloudSequentialDecoder {
+ public:
+ KeyframeAnimationDecoder(){};
+
+ Status Decode(const DecoderOptions &options, DecoderBuffer *in_buffer,
+ KeyframeAnimation *animation);
+};
+
+} // namespace draco
+
+#endif // DRACO_ANIMATION_KEYFRAME_ANIMATION_DECODER_H_
diff --git a/libs/assimp/contrib/draco/src/draco/animation/keyframe_animation_encoder.cc b/libs/assimp/contrib/draco/src/draco/animation/keyframe_animation_encoder.cc
new file mode 100644
index 0000000..f7d84f3
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/animation/keyframe_animation_encoder.cc
@@ -0,0 +1,28 @@
+// Copyright 2017 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/animation/keyframe_animation_encoder.h"
+
+namespace draco {
+
+KeyframeAnimationEncoder::KeyframeAnimationEncoder() {}
+
+Status KeyframeAnimationEncoder::EncodeKeyframeAnimation(
+ const KeyframeAnimation &animation, const EncoderOptions &options,
+ EncoderBuffer *out_buffer) {
+ SetPointCloud(animation);
+ return Encode(options, out_buffer);
+}
+
+} // namespace draco
diff --git a/libs/assimp/contrib/draco/src/draco/animation/keyframe_animation_encoder.h b/libs/assimp/contrib/draco/src/draco/animation/keyframe_animation_encoder.h
new file mode 100644
index 0000000..6096c79
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/animation/keyframe_animation_encoder.h
@@ -0,0 +1,39 @@
+// Copyright 2017 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_ANIMATION_KEYFRAME_ANIMATION_ENCODER_H_
+#define DRACO_ANIMATION_KEYFRAME_ANIMATION_ENCODER_H_
+
+#include "draco/animation/keyframe_animation.h"
+#include "draco/compression/point_cloud/point_cloud_sequential_encoder.h"
+
+namespace draco {
+
+// Class for encoding keyframe animation. It takes KeyframeAnimation as a
+// PointCloud and compress it. It's mostly a wrapper around PointCloudEncoder so
+// that the animation module could be separated from geometry compression when
+// exposed to developers.
+class KeyframeAnimationEncoder : private PointCloudSequentialEncoder {
+ public:
+ KeyframeAnimationEncoder();
+
+ // Encode an animation to a buffer.
+ Status EncodeKeyframeAnimation(const KeyframeAnimation &animation,
+ const EncoderOptions &options,
+ EncoderBuffer *out_buffer);
+};
+
+} // namespace draco
+
+#endif // DRACO_ANIMATION_KEYFRAME_ANIMATION_ENCODER_H_
diff --git a/libs/assimp/contrib/draco/src/draco/animation/keyframe_animation_encoding_test.cc b/libs/assimp/contrib/draco/src/draco/animation/keyframe_animation_encoding_test.cc
new file mode 100644
index 0000000..4a6491f
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/animation/keyframe_animation_encoding_test.cc
@@ -0,0 +1,168 @@
+// Copyright 2017 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/animation/keyframe_animation.h"
+#include "draco/animation/keyframe_animation_decoder.h"
+#include "draco/animation/keyframe_animation_encoder.h"
+#include "draco/core/draco_test_base.h"
+#include "draco/core/draco_test_utils.h"
+
+namespace draco {
+
+class KeyframeAnimationEncodingTest : public ::testing::Test {
+ protected:
+ KeyframeAnimationEncodingTest() {}
+
+ bool CreateAndAddTimestamps(int32_t num_frames) {
+ timestamps_.resize(num_frames);
+ for (int i = 0; i < timestamps_.size(); ++i)
+ timestamps_[i] = static_cast<draco::KeyframeAnimation::TimestampType>(i);
+ return keyframe_animation_.SetTimestamps(timestamps_);
+ }
+
+ int32_t CreateAndAddAnimationData(int32_t num_frames,
+ uint32_t num_components) {
+ // Create and add animation data with.
+ animation_data_.resize(num_frames * num_components);
+ for (int i = 0; i < animation_data_.size(); ++i)
+ animation_data_[i] = static_cast<float>(i);
+ return keyframe_animation_.AddKeyframes(draco::DT_FLOAT32, num_components,
+ animation_data_);
+ }
+
+ template <int num_components_t>
+ void CompareAnimationData(const KeyframeAnimation &animation0,
+ const KeyframeAnimation &animation1,
+ bool quantized) {
+ ASSERT_EQ(animation0.num_frames(), animation1.num_frames());
+ ASSERT_EQ(animation0.num_animations(), animation1.num_animations());
+
+ if (quantized) {
+ // TODO(hemmer) : Add test for stable quantization.
+ // Quantization will result in slightly different values.
+ // Skip comparing values.
+ return;
+ }
+
+ // Compare time stamp.
+ const auto timestamp_att0 = animation0.timestamps();
+ const auto timestamp_att1 = animation0.timestamps();
+ for (int i = 0; i < animation0.num_frames(); ++i) {
+ std::array<float, 1> att_value0;
+ std::array<float, 1> att_value1;
+ ASSERT_TRUE((timestamp_att0->GetValue<float, 1>(
+ draco::AttributeValueIndex(i), &att_value0)));
+ ASSERT_TRUE((timestamp_att1->GetValue<float, 1>(
+ draco::AttributeValueIndex(i), &att_value1)));
+ ASSERT_FLOAT_EQ(att_value0[0], att_value1[0]);
+ }
+
+ for (int animation_id = 1; animation_id < animation0.num_animations();
+ ++animation_id) {
+ // Compare keyframe data.
+ const auto keyframe_att0 = animation0.keyframes(animation_id);
+ const auto keyframe_att1 = animation1.keyframes(animation_id);
+ ASSERT_EQ(keyframe_att0->num_components(),
+ keyframe_att1->num_components());
+ for (int i = 0; i < animation0.num_frames(); ++i) {
+ std::array<float, num_components_t> att_value0;
+ std::array<float, num_components_t> att_value1;
+ ASSERT_TRUE((keyframe_att0->GetValue<float, num_components_t>(
+ draco::AttributeValueIndex(i), &att_value0)));
+ ASSERT_TRUE((keyframe_att1->GetValue<float, num_components_t>(
+ draco::AttributeValueIndex(i), &att_value1)));
+ for (int j = 0; j < att_value0.size(); ++j) {
+ ASSERT_FLOAT_EQ(att_value0[j], att_value1[j]);
+ }
+ }
+ }
+ }
+
+ template <int num_components_t>
+ void TestKeyframeAnimationEncoding() {
+ TestKeyframeAnimationEncoding<num_components_t>(false);
+ }
+
+ template <int num_components_t>
+ void TestKeyframeAnimationEncoding(bool quantized) {
+ // Encode animation class.
+ draco::EncoderBuffer buffer;
+ draco::KeyframeAnimationEncoder encoder;
+ EncoderOptions options = EncoderOptions::CreateDefaultOptions();
+ if (quantized) {
+ // Set quantization for timestamps.
+ options.SetAttributeInt(0, "quantization_bits", 20);
+ // Set quantization for keyframes.
+ for (int i = 1; i <= keyframe_animation_.num_animations(); ++i) {
+ options.SetAttributeInt(i, "quantization_bits", 20);
+ }
+ }
+
+ ASSERT_TRUE(
+ encoder.EncodeKeyframeAnimation(keyframe_animation_, options, &buffer)
+ .ok());
+
+ draco::DecoderBuffer dec_decoder;
+ draco::KeyframeAnimationDecoder decoder;
+ DecoderBuffer dec_buffer;
+ dec_buffer.Init(buffer.data(), buffer.size());
+
+ // Decode animation class.
+ std::unique_ptr<KeyframeAnimation> decoded_animation(
+ new KeyframeAnimation());
+ DecoderOptions dec_options;
+ ASSERT_TRUE(
+ decoder.Decode(dec_options, &dec_buffer, decoded_animation.get()).ok());
+
+ // Verify if animation before and after compression is identical.
+ CompareAnimationData<num_components_t>(keyframe_animation_,
+ *decoded_animation, quantized);
+ }
+
+ draco::KeyframeAnimation keyframe_animation_;
+ std::vector<draco::KeyframeAnimation::TimestampType> timestamps_;
+ std::vector<float> animation_data_;
+};
+
+TEST_F(KeyframeAnimationEncodingTest, OneComponent) {
+ const int num_frames = 1;
+ ASSERT_TRUE(CreateAndAddTimestamps(num_frames));
+ ASSERT_EQ(CreateAndAddAnimationData(num_frames, 1), 1);
+ TestKeyframeAnimationEncoding<1>();
+}
+
+TEST_F(KeyframeAnimationEncodingTest, ManyComponents) {
+ const int num_frames = 100;
+ ASSERT_TRUE(CreateAndAddTimestamps(num_frames));
+ ASSERT_EQ(CreateAndAddAnimationData(num_frames, 100), 1);
+ TestKeyframeAnimationEncoding<100>();
+}
+
+TEST_F(KeyframeAnimationEncodingTest, ManyComponentsWithQuantization) {
+ const int num_frames = 100;
+ ASSERT_TRUE(CreateAndAddTimestamps(num_frames));
+ ASSERT_EQ(CreateAndAddAnimationData(num_frames, 4), 1);
+ // Test compression with quantization.
+ TestKeyframeAnimationEncoding<4>(true);
+}
+
+TEST_F(KeyframeAnimationEncodingTest, MultipleAnimations) {
+ const int num_frames = 5;
+ ASSERT_TRUE(CreateAndAddTimestamps(num_frames));
+ ASSERT_EQ(CreateAndAddAnimationData(num_frames, 3), 1);
+ ASSERT_EQ(CreateAndAddAnimationData(num_frames, 3), 2);
+ TestKeyframeAnimationEncoding<3>();
+}
+
+} // namespace draco
diff --git a/libs/assimp/contrib/draco/src/draco/animation/keyframe_animation_test.cc b/libs/assimp/contrib/draco/src/draco/animation/keyframe_animation_test.cc
new file mode 100644
index 0000000..bc92b25
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/animation/keyframe_animation_test.cc
@@ -0,0 +1,102 @@
+// Copyright 2017 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/animation/keyframe_animation.h"
+
+#include "draco/core/draco_test_base.h"
+
+namespace {
+
+class KeyframeAnimationTest : public ::testing::Test {
+ protected:
+ KeyframeAnimationTest() {}
+
+ bool CreateAndAddTimestamps(int32_t num_frames) {
+ timestamps_.resize(num_frames);
+ for (int i = 0; i < timestamps_.size(); ++i)
+ timestamps_[i] = static_cast<draco::KeyframeAnimation::TimestampType>(i);
+ return keyframe_animation_.SetTimestamps(timestamps_);
+ }
+
+ int32_t CreateAndAddAnimationData(int32_t num_frames,
+ uint32_t num_components) {
+ // Create and add animation data with.
+ animation_data_.resize(num_frames * num_components);
+ for (int i = 0; i < animation_data_.size(); ++i)
+ animation_data_[i] = static_cast<float>(i);
+ return keyframe_animation_.AddKeyframes(draco::DT_FLOAT32, num_components,
+ animation_data_);
+ }
+
+ template <int num_components_t>
+ void CompareAnimationData() {
+ // Compare time stamp.
+ const auto timestamp_att = keyframe_animation_.timestamps();
+ for (int i = 0; i < timestamps_.size(); ++i) {
+ std::array<float, 1> att_value;
+ ASSERT_TRUE((timestamp_att->GetValue<float, 1>(
+ draco::AttributeValueIndex(i), &att_value)));
+ ASSERT_FLOAT_EQ(att_value[0], i);
+ }
+
+ // Compare keyframe data.
+ const auto keyframe_att = keyframe_animation_.keyframes(1);
+ for (int i = 0; i < animation_data_.size() / num_components_t; ++i) {
+ std::array<float, num_components_t> att_value;
+ ASSERT_TRUE((keyframe_att->GetValue<float, num_components_t>(
+ draco::AttributeValueIndex(i), &att_value)));
+ for (int j = 0; j < num_components_t; ++j) {
+ ASSERT_FLOAT_EQ(att_value[j], i * num_components_t + j);
+ }
+ }
+ }
+
+ template <int num_components_t>
+ void TestKeyframeAnimation(int32_t num_frames) {
+ ASSERT_TRUE(CreateAndAddTimestamps(num_frames));
+ ASSERT_EQ(CreateAndAddAnimationData(num_frames, num_components_t), 1);
+ CompareAnimationData<num_components_t>();
+ }
+
+ draco::KeyframeAnimation keyframe_animation_;
+ std::vector<draco::KeyframeAnimation::TimestampType> timestamps_;
+ std::vector<float> animation_data_;
+};
+
+// Test animation with 1 component and 10 frames.
+TEST_F(KeyframeAnimationTest, OneComponent) { TestKeyframeAnimation<1>(10); }
+
+// Test animation with 4 component and 10 frames.
+TEST_F(KeyframeAnimationTest, FourComponent) { TestKeyframeAnimation<4>(10); }
+
+// Test adding animation data before timestamp.
+TEST_F(KeyframeAnimationTest, AddingAnimationFirst) {
+ ASSERT_EQ(CreateAndAddAnimationData(5, 1), 1);
+ ASSERT_TRUE(CreateAndAddTimestamps(5));
+}
+
+// Test adding timestamp more than once.
+TEST_F(KeyframeAnimationTest, ErrorAddingTimestampsTwice) {
+ ASSERT_TRUE(CreateAndAddTimestamps(5));
+ ASSERT_FALSE(CreateAndAddTimestamps(5));
+}
+// Test animation with multiple animation data.
+TEST_F(KeyframeAnimationTest, MultipleAnimationData) {
+ const int num_frames = 5;
+ ASSERT_TRUE(CreateAndAddTimestamps(num_frames));
+ ASSERT_EQ(CreateAndAddAnimationData(num_frames, 1), 1);
+ ASSERT_EQ(CreateAndAddAnimationData(num_frames, 2), 2);
+}
+
+} // namespace
diff --git a/libs/assimp/contrib/draco/src/draco/attributes/attribute_octahedron_transform.cc b/libs/assimp/contrib/draco/src/draco/attributes/attribute_octahedron_transform.cc
new file mode 100644
index 0000000..51c3bb6
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/attributes/attribute_octahedron_transform.cc
@@ -0,0 +1,145 @@
+// Copyright 2017 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "draco/attributes/attribute_octahedron_transform.h"
+
+#include "draco/attributes/attribute_transform_type.h"
+#include "draco/compression/attributes/normal_compression_utils.h"
+
+namespace draco {
+
+bool AttributeOctahedronTransform::InitFromAttribute(
+ const PointAttribute &attribute) {
+ const AttributeTransformData *const transform_data =
+ attribute.GetAttributeTransformData();
+ if (!transform_data ||
+ transform_data->transform_type() != ATTRIBUTE_OCTAHEDRON_TRANSFORM) {
+ return false; // Wrong transform type.
+ }
+ quantization_bits_ = transform_data->GetParameterValue<int32_t>(0);
+ return true;
+}
+
+void AttributeOctahedronTransform::CopyToAttributeTransformData(
+ AttributeTransformData *out_data) const {
+ out_data->set_transform_type(ATTRIBUTE_OCTAHEDRON_TRANSFORM);
+ out_data->AppendParameterValue(quantization_bits_);
+}
+
+bool AttributeOctahedronTransform::TransformAttribute(
+ const PointAttribute &attribute, const std::vector<PointIndex> &point_ids,
+ PointAttribute *target_attribute) {
+ return GeneratePortableAttribute(attribute, point_ids,
+ target_attribute->size(), target_attribute);
+}
+
+bool AttributeOctahedronTransform::InverseTransformAttribute(
+ const PointAttribute &attribute, PointAttribute *target_attribute) {
+ if (target_attribute->data_type() != DT_FLOAT32) {
+ return false;
+ }
+
+ const int num_points = target_attribute->size();
+ const int num_components = target_attribute->num_components();
+ if (num_components != 3) {
+ return false;
+ }
+ constexpr int kEntrySize = sizeof(float) * 3;
+ float att_val[3];
+ const int32_t *source_attribute_data = reinterpret_cast<const int32_t *>(
+ attribute.GetAddress(AttributeValueIndex(0)));
+ uint8_t *target_address =
+ target_attribute->GetAddress(AttributeValueIndex(0));
+ OctahedronToolBox octahedron_tool_box;
+ if (!octahedron_tool_box.SetQuantizationBits(quantization_bits_)) {
+ return false;
+ }
+ for (uint32_t i = 0; i < num_points; ++i) {
+ const int32_t s = *source_attribute_data++;
+ const int32_t t = *source_attribute_data++;
+ octahedron_tool_box.QuantizedOctahedralCoordsToUnitVector(s, t, att_val);
+
+ // Store the decoded floating point values into the attribute buffer.
+ std::memcpy(target_address, att_val, kEntrySize);
+ target_address += kEntrySize;
+ }
+ return true;
+}
+
+void AttributeOctahedronTransform::SetParameters(int quantization_bits) {
+ quantization_bits_ = quantization_bits;
+}
+
+bool AttributeOctahedronTransform::EncodeParameters(
+ EncoderBuffer *encoder_buffer) const {
+ if (is_initialized()) {
+ encoder_buffer->Encode(static_cast<uint8_t>(quantization_bits_));
+ return true;
+ }
+ return false;
+}
+
+bool AttributeOctahedronTransform::DecodeParameters(
+ const PointAttribute &attribute, DecoderBuffer *decoder_buffer) {
+ uint8_t quantization_bits;
+ if (!decoder_buffer->Decode(&quantization_bits)) {
+ return false;
+ }
+ quantization_bits_ = quantization_bits;
+ return true;
+}
+
+bool AttributeOctahedronTransform::GeneratePortableAttribute(
+ const PointAttribute &attribute, const std::vector<PointIndex> &point_ids,
+ int num_points, PointAttribute *target_attribute) const {
+ DRACO_DCHECK(is_initialized());
+
+ // Quantize all values in the order given by point_ids into portable
+ // attribute.
+ int32_t *const portable_attribute_data = reinterpret_cast<int32_t *>(
+ target_attribute->GetAddress(AttributeValueIndex(0)));
+ float att_val[3];
+ int32_t dst_index = 0;
+ OctahedronToolBox converter;
+ if (!converter.SetQuantizationBits(quantization_bits_)) {
+ return false;
+ }
+ if (!point_ids.empty()) {
+ for (uint32_t i = 0; i < point_ids.size(); ++i) {
+ const AttributeValueIndex att_val_id =
+ attribute.mapped_index(point_ids[i]);
+ attribute.GetValue(att_val_id, att_val);
+ // Encode the vector into a s and t octahedral coordinates.
+ int32_t s, t;
+ converter.FloatVectorToQuantizedOctahedralCoords(att_val, &s, &t);
+ portable_attribute_data[dst_index++] = s;
+ portable_attribute_data[dst_index++] = t;
+ }
+ } else {
+ for (PointIndex i(0); i < num_points; ++i) {
+ const AttributeValueIndex att_val_id = attribute.mapped_index(i);
+ attribute.GetValue(att_val_id, att_val);
+ // Encode the vector into a s and t octahedral coordinates.
+ int32_t s, t;
+ converter.FloatVectorToQuantizedOctahedralCoords(att_val, &s, &t);
+ portable_attribute_data[dst_index++] = s;
+ portable_attribute_data[dst_index++] = t;
+ }
+ }
+
+ return true;
+}
+
+} // namespace draco
diff --git a/libs/assimp/contrib/draco/src/draco/attributes/attribute_octahedron_transform.h b/libs/assimp/contrib/draco/src/draco/attributes/attribute_octahedron_transform.h
new file mode 100644
index 0000000..21a1725
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/attributes/attribute_octahedron_transform.h
@@ -0,0 +1,81 @@
+// Copyright 2017 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef DRACO_ATTRIBUTES_ATTRIBUTE_OCTAHEDRON_TRANSFORM_H_
+#define DRACO_ATTRIBUTES_ATTRIBUTE_OCTAHEDRON_TRANSFORM_H_
+
+#include "draco/attributes/attribute_transform.h"
+#include "draco/attributes/point_attribute.h"
+#include "draco/core/encoder_buffer.h"
+
+namespace draco {
+
+// Attribute transform for attributes transformed to octahedral coordinates.
+class AttributeOctahedronTransform : public AttributeTransform {
+ public:
+ AttributeOctahedronTransform() : quantization_bits_(-1) {}
+
+ // Return attribute transform type.
+ AttributeTransformType Type() const override {
+ return ATTRIBUTE_OCTAHEDRON_TRANSFORM;
+ }
+ // Try to init transform from attribute.
+ bool InitFromAttribute(const PointAttribute &attribute) override;
+ // Copy parameter values into the provided AttributeTransformData instance.
+ void CopyToAttributeTransformData(
+ AttributeTransformData *out_data) const override;
+
+ bool TransformAttribute(const PointAttribute &attribute,
+ const std::vector<PointIndex> &point_ids,
+ PointAttribute *target_attribute) override;
+
+ bool InverseTransformAttribute(const PointAttribute &attribute,
+ PointAttribute *target_attribute) override;
+
+ // Set number of quantization bits.
+ void SetParameters(int quantization_bits);
+
+ // Encode relevant parameters into buffer.
+ bool EncodeParameters(EncoderBuffer *encoder_buffer) const override;
+
+ bool DecodeParameters(const PointAttribute &attribute,
+ DecoderBuffer *decoder_buffer) override;
+
+ bool is_initialized() const { return quantization_bits_ != -1; }
+ int32_t quantization_bits() const { return quantization_bits_; }
+
+ protected:
+ DataType GetTransformedDataType(
+ const PointAttribute &attribute) const override {
+ return DT_UINT32;
+ }
+ int GetTransformedNumComponents(
+ const PointAttribute &attribute) const override {
+ return 2;
+ }
+
+ // Perform the actual transformation.
+ bool GeneratePortableAttribute(const PointAttribute &attribute,
+ const std::vector<PointIndex> &point_ids,
+ int num_points,
+ PointAttribute *target_attribute) const;
+
+ private:
+ int32_t quantization_bits_;
+};
+
+} // namespace draco
+
+#endif // DRACO_ATTRIBUTES_ATTRIBUTE_OCTAHEDRON_TRANSFORM_H_
diff --git a/libs/assimp/contrib/draco/src/draco/attributes/attribute_quantization_transform.cc b/libs/assimp/contrib/draco/src/draco/attributes/attribute_quantization_transform.cc
new file mode 100644
index 0000000..a7f93a4
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/attributes/attribute_quantization_transform.cc
@@ -0,0 +1,260 @@
+// Copyright 2017 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/attributes/attribute_quantization_transform.h"
+
+#include "draco/attributes/attribute_transform_type.h"
+#include "draco/core/quantization_utils.h"
+
+namespace draco {
+
+bool AttributeQuantizationTransform::InitFromAttribute(
+ const PointAttribute &attribute) {
+ const AttributeTransformData *const transform_data =
+ attribute.GetAttributeTransformData();
+ if (!transform_data ||
+ transform_data->transform_type() != ATTRIBUTE_QUANTIZATION_TRANSFORM) {
+ return false; // Wrong transform type.
+ }
+ int32_t byte_offset = 0;
+ quantization_bits_ = transform_data->GetParameterValue<int32_t>(byte_offset);
+ byte_offset += 4;
+ min_values_.resize(attribute.num_components());
+ for (int i = 0; i < attribute.num_components(); ++i) {
+ min_values_[i] = transform_data->GetParameterValue<float>(byte_offset);
+ byte_offset += 4;
+ }
+ range_ = transform_data->GetParameterValue<float>(byte_offset);
+ return true;
+}
+
+// Copy parameter values into the provided AttributeTransformData instance.
+void AttributeQuantizationTransform::CopyToAttributeTransformData(
+ AttributeTransformData *out_data) const {
+ out_data->set_transform_type(ATTRIBUTE_QUANTIZATION_TRANSFORM);
+ out_data->AppendParameterValue(quantization_bits_);
+ for (int i = 0; i < min_values_.size(); ++i) {
+ out_data->AppendParameterValue(min_values_[i]);
+ }
+ out_data->AppendParameterValue(range_);
+}
+
+bool AttributeQuantizationTransform::TransformAttribute(
+ const PointAttribute &attribute, const std::vector<PointIndex> &point_ids,
+ PointAttribute *target_attribute) {
+ if (point_ids.empty()) {
+ GeneratePortableAttribute(attribute, target_attribute->size(),
+ target_attribute);
+ } else {
+ GeneratePortableAttribute(attribute, point_ids, target_attribute->size(),
+ target_attribute);
+ }
+ return true;
+}
+
+bool AttributeQuantizationTransform::InverseTransformAttribute(
+ const PointAttribute &attribute, PointAttribute *target_attribute) {
+ if (target_attribute->data_type() != DT_FLOAT32) {
+ return false;
+ }
+
+ // Convert all quantized values back to floats.
+ const int32_t max_quantized_value =
+ (1u << static_cast<uint32_t>(quantization_bits_)) - 1;
+ const int num_components = target_attribute->num_components();
+ const int entry_size = sizeof(float) * num_components;
+ const std::unique_ptr<float[]> att_val(new float[num_components]);
+ int quant_val_id = 0;
+ int out_byte_pos = 0;
+ Dequantizer dequantizer;
+ if (!dequantizer.Init(range_, max_quantized_value)) {
+ return false;
+ }
+ const int32_t *const source_attribute_data =
+ reinterpret_cast<const int32_t *>(
+ attribute.GetAddress(AttributeValueIndex(0)));
+
+ const int num_values = target_attribute->size();
+
+ for (uint32_t i = 0; i < num_values; ++i) {
+ for (int c = 0; c < num_components; ++c) {
+ float value =
+ dequantizer.DequantizeFloat(source_attribute_data[quant_val_id++]);
+ value = value + min_values_[c];
+ att_val[c] = value;
+ }
+ // Store the floating point value into the attribute buffer.
+ target_attribute->buffer()->Write(out_byte_pos, att_val.get(), entry_size);
+ out_byte_pos += entry_size;
+ }
+ return true;
+}
+
+bool AttributeQuantizationTransform::IsQuantizationValid(
+ int quantization_bits) {
+ // Currently we allow only up to 30 bit quantization.
+ return quantization_bits >= 1 && quantization_bits <= 30;
+}
+
+bool AttributeQuantizationTransform::SetParameters(int quantization_bits,
+ const float *min_values,
+ int num_components,
+ float range) {
+ if (!IsQuantizationValid(quantization_bits)) {
+ return false;
+ }
+ quantization_bits_ = quantization_bits;
+ min_values_.assign(min_values, min_values + num_components);
+ range_ = range;
+ return true;
+}
+
+bool AttributeQuantizationTransform::ComputeParameters(
+ const PointAttribute &attribute, const int quantization_bits) {
+ if (quantization_bits_ != -1) {
+ return false; // already initialized.
+ }
+ if (!IsQuantizationValid(quantization_bits)) {
+ return false;
+ }
+ quantization_bits_ = quantization_bits;
+
+ const int num_components = attribute.num_components();
+ range_ = 0.f;
+ min_values_ = std::vector<float>(num_components, 0.f);
+ const std::unique_ptr<float[]> max_values(new float[num_components]);
+ const std::unique_ptr<float[]> att_val(new float[num_components]);
+ // Compute minimum values and max value difference.
+ attribute.GetValue(AttributeValueIndex(0), att_val.get());
+ attribute.GetValue(AttributeValueIndex(0), min_values_.data());
+ attribute.GetValue(AttributeValueIndex(0), max_values.get());
+
+ for (AttributeValueIndex i(1); i < static_cast<uint32_t>(attribute.size());
+ ++i) {
+ attribute.GetValue(i, att_val.get());
+ for (int c = 0; c < num_components; ++c) {
+ if (min_values_[c] > att_val[c]) {
+ min_values_[c] = att_val[c];
+ }
+ if (max_values[c] < att_val[c]) {
+ max_values[c] = att_val[c];
+ }
+ }
+ }
+ for (int c = 0; c < num_components; ++c) {
+ if (std::isnan(min_values_[c]) || std::isinf(min_values_[c]) ||
+ std::isnan(max_values[c]) || std::isinf(max_values[c])) {
+ return false;
+ }
+ const float dif = max_values[c] - min_values_[c];
+ if (dif > range_) {
+ range_ = dif;
+ }
+ }
+
+ // In case all values are the same, initialize the range to unit length. This
+ // will ensure that all values are quantized properly to the same value.
+ if (range_ == 0.f) {
+ range_ = 1.f;
+ }
+
+ return true;
+}
+
+bool AttributeQuantizationTransform::EncodeParameters(
+ EncoderBuffer *encoder_buffer) const {
+ if (is_initialized()) {
+ encoder_buffer->Encode(min_values_.data(),
+ sizeof(float) * min_values_.size());
+ encoder_buffer->Encode(range_);
+ encoder_buffer->Encode(static_cast<uint8_t>(quantization_bits_));
+ return true;
+ }
+ return false;
+}
+
+bool AttributeQuantizationTransform::DecodeParameters(
+ const PointAttribute &attribute, DecoderBuffer *decoder_buffer) {
+ min_values_.resize(attribute.num_components());
+ if (!decoder_buffer->Decode(&min_values_[0],
+ sizeof(float) * min_values_.size())) {
+ return false;
+ }
+ if (!decoder_buffer->Decode(&range_)) {
+ return false;
+ }
+ uint8_t quantization_bits;
+ if (!decoder_buffer->Decode(&quantization_bits)) {
+ return false;
+ }
+ if (!IsQuantizationValid(quantization_bits)) {
+ return false;
+ }
+ quantization_bits_ = quantization_bits;
+ return true;
+}
+
+void AttributeQuantizationTransform::GeneratePortableAttribute(
+ const PointAttribute &attribute, int num_points,
+ PointAttribute *target_attribute) const {
+ DRACO_DCHECK(is_initialized());
+
+ const int num_components = attribute.num_components();
+
+ // Quantize all values using the order given by point_ids.
+ int32_t *const portable_attribute_data = reinterpret_cast<int32_t *>(
+ target_attribute->GetAddress(AttributeValueIndex(0)));
+ const uint32_t max_quantized_value = (1 << (quantization_bits_)) - 1;
+ Quantizer quantizer;
+ quantizer.Init(range(), max_quantized_value);
+ int32_t dst_index = 0;
+ const std::unique_ptr<float[]> att_val(new float[num_components]);
+ for (PointIndex i(0); i < num_points; ++i) {
+ const AttributeValueIndex att_val_id = attribute.mapped_index(i);
+ attribute.GetValue(att_val_id, att_val.get());
+ for (int c = 0; c < num_components; ++c) {
+ const float value = (att_val[c] - min_values()[c]);
+ const int32_t q_val = quantizer.QuantizeFloat(value);
+ portable_attribute_data[dst_index++] = q_val;
+ }
+ }
+}
+
+void AttributeQuantizationTransform::GeneratePortableAttribute(
+ const PointAttribute &attribute, const std::vector<PointIndex> &point_ids,
+ int num_points, PointAttribute *target_attribute) const {
+ DRACO_DCHECK(is_initialized());
+
+ const int num_components = attribute.num_components();
+
+ // Quantize all values using the order given by point_ids.
+ int32_t *const portable_attribute_data = reinterpret_cast<int32_t *>(
+ target_attribute->GetAddress(AttributeValueIndex(0)));
+ const uint32_t max_quantized_value = (1 << (quantization_bits_)) - 1;
+ Quantizer quantizer;
+ quantizer.Init(range(), max_quantized_value);
+ int32_t dst_index = 0;
+ const std::unique_ptr<float[]> att_val(new float[num_components]);
+ for (uint32_t i = 0; i < point_ids.size(); ++i) {
+ const AttributeValueIndex att_val_id = attribute.mapped_index(point_ids[i]);
+ attribute.GetValue(att_val_id, att_val.get());
+ for (int c = 0; c < num_components; ++c) {
+ const float value = (att_val[c] - min_values()[c]);
+ const int32_t q_val = quantizer.QuantizeFloat(value);
+ portable_attribute_data[dst_index++] = q_val;
+ }
+ }
+}
+
+} // namespace draco
diff --git a/libs/assimp/contrib/draco/src/draco/attributes/attribute_quantization_transform.h b/libs/assimp/contrib/draco/src/draco/attributes/attribute_quantization_transform.h
new file mode 100644
index 0000000..f1122b6
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/attributes/attribute_quantization_transform.h
@@ -0,0 +1,102 @@
+// Copyright 2017 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_ATTRIBUTES_ATTRIBUTE_QUANTIZATION_TRANSFORM_H_
+#define DRACO_ATTRIBUTES_ATTRIBUTE_QUANTIZATION_TRANSFORM_H_
+
+#include <vector>
+
+#include "draco/attributes/attribute_transform.h"
+#include "draco/attributes/point_attribute.h"
+#include "draco/core/encoder_buffer.h"
+
+namespace draco {
+
+// Attribute transform for quantized attributes.
+class AttributeQuantizationTransform : public AttributeTransform {
+ public:
+ AttributeQuantizationTransform() : quantization_bits_(-1), range_(0.f) {}
+ // Return attribute transform type.
+ AttributeTransformType Type() const override {
+ return ATTRIBUTE_QUANTIZATION_TRANSFORM;
+ }
+ // Try to init transform from attribute.
+ bool InitFromAttribute(const PointAttribute &attribute) override;
+ // Copy parameter values into the provided AttributeTransformData instance.
+ void CopyToAttributeTransformData(
+ AttributeTransformData *out_data) const override;
+
+ bool TransformAttribute(const PointAttribute &attribute,
+ const std::vector<PointIndex> &point_ids,
+ PointAttribute *target_attribute) override;
+
+ bool InverseTransformAttribute(const PointAttribute &attribute,
+ PointAttribute *target_attribute) override;
+
+ bool SetParameters(int quantization_bits, const float *min_values,
+ int num_components, float range);
+
+ bool ComputeParameters(const PointAttribute &attribute,
+ const int quantization_bits);
+
+ // Encode relevant parameters into buffer.
+ bool EncodeParameters(EncoderBuffer *encoder_buffer) const override;
+
+ bool DecodeParameters(const PointAttribute &attribute,
+ DecoderBuffer *decoder_buffer) override;
+
+ int32_t quantization_bits() const { return quantization_bits_; }
+ float min_value(int axis) const { return min_values_[axis]; }
+ const std::vector<float> &min_values() const { return min_values_; }
+ float range() const { return range_; }
+ bool is_initialized() const { return quantization_bits_ != -1; }
+
+ protected:
+ // Create portable attribute using 1:1 mapping between points in the input and
+ // output attribute.
+ void GeneratePortableAttribute(const PointAttribute &attribute,
+ int num_points,
+ PointAttribute *target_attribute) const;
+
+ // Create portable attribute using custom mapping between input and output
+ // points.
+ void GeneratePortableAttribute(const PointAttribute &attribute,
+ const std::vector<PointIndex> &point_ids,
+ int num_points,
+ PointAttribute *target_attribute) const;
+
+ DataType GetTransformedDataType(
+ const PointAttribute &attribute) const override {
+ return DT_UINT32;
+ }
+ int GetTransformedNumComponents(
+ const PointAttribute &attribute) const override {
+ return attribute.num_components();
+ }
+
+ static bool IsQuantizationValid(int quantization_bits);
+
+ private:
+ int32_t quantization_bits_;
+
+ // Minimal dequantized value for each component of the attribute.
+ std::vector<float> min_values_;
+
+ // Bounds of the dequantized attribute (max delta over all components).
+ float range_;
+};
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_ATTRIBUTES_ATTRIBUTE_DEQUANTIZATION_TRANSFORM_H_
diff --git a/libs/assimp/contrib/draco/src/draco/attributes/attribute_transform.cc b/libs/assimp/contrib/draco/src/draco/attributes/attribute_transform.cc
new file mode 100644
index 0000000..174e6b8
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/attributes/attribute_transform.cc
@@ -0,0 +1,40 @@
+// Copyright 2017 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/attributes/attribute_transform.h"
+
+namespace draco {
+
+bool AttributeTransform::TransferToAttribute(PointAttribute *attribute) const {
+ std::unique_ptr<AttributeTransformData> transform_data(
+ new AttributeTransformData());
+ this->CopyToAttributeTransformData(transform_data.get());
+ attribute->SetAttributeTransformData(std::move(transform_data));
+ return true;
+}
+
+std::unique_ptr<PointAttribute> AttributeTransform::InitTransformedAttribute(
+ const PointAttribute &src_attribute, int num_entries) {
+ const int num_components = GetTransformedNumComponents(src_attribute);
+ const DataType dt = GetTransformedDataType(src_attribute);
+ GeometryAttribute va;
+ va.Init(src_attribute.attribute_type(), nullptr, num_components, dt, false,
+ num_components * DataTypeLength(dt), 0);
+ std::unique_ptr<PointAttribute> transformed_attribute(new PointAttribute(va));
+ transformed_attribute->Reset(num_entries);
+ transformed_attribute->SetIdentityMapping();
+ return transformed_attribute;
+}
+
+} // namespace draco
diff --git a/libs/assimp/contrib/draco/src/draco/attributes/attribute_transform.h b/libs/assimp/contrib/draco/src/draco/attributes/attribute_transform.h
new file mode 100644
index 0000000..62aad60
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/attributes/attribute_transform.h
@@ -0,0 +1,76 @@
+// Copyright 2017 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_ATTRIBUTES_ATTRIBUTE_TRANSFORM_H_
+#define DRACO_ATTRIBUTES_ATTRIBUTE_TRANSFORM_H_
+
+#include "draco/attributes/attribute_transform_data.h"
+#include "draco/attributes/point_attribute.h"
+#include "draco/core/decoder_buffer.h"
+#include "draco/core/encoder_buffer.h"
+
+namespace draco {
+
+// Virtual base class for various attribute transforms, enforcing common
+// interface where possible.
+class AttributeTransform {
+ public:
+ virtual ~AttributeTransform() = default;
+
+ // Return attribute transform type.
+ virtual AttributeTransformType Type() const = 0;
+ // Try to init transform from attribute.
+ virtual bool InitFromAttribute(const PointAttribute &attribute) = 0;
+ // Copy parameter values into the provided AttributeTransformData instance.
+ virtual void CopyToAttributeTransformData(
+ AttributeTransformData *out_data) const = 0;
+ bool TransferToAttribute(PointAttribute *attribute) const;
+
+ // Applies the transform to |attribute| and stores the result in
+ // |target_attribute|. |point_ids| is an optional vector that can be used to
+ // remap values during the transform.
+ virtual bool TransformAttribute(const PointAttribute &attribute,
+ const std::vector<PointIndex> &point_ids,
+ PointAttribute *target_attribute) = 0;
+
+ // Applies an inverse transform to |attribute| and stores the result in
+ // |target_attribute|. In this case, |attribute| is an attribute that was
+ // already transformed (e.g. quantized) and |target_attribute| is the
+ // attribute before the transformation.
+ virtual bool InverseTransformAttribute(const PointAttribute &attribute,
+ PointAttribute *target_attribute) = 0;
+
+ // Encodes all data needed by the transformation into the |encoder_buffer|.
+ virtual bool EncodeParameters(EncoderBuffer *encoder_buffer) const = 0;
+
+ // Decodes all data needed to transform |attribute| back to the original
+ // format.
+ virtual bool DecodeParameters(const PointAttribute &attribute,
+ DecoderBuffer *decoder_buffer) = 0;
+
+ // Initializes a transformed attribute that can be used as target in the
+ // TransformAttribute() function call.
+ virtual std::unique_ptr<PointAttribute> InitTransformedAttribute(
+ const PointAttribute &src_attribute, int num_entries);
+
+ protected:
+ virtual DataType GetTransformedDataType(
+ const PointAttribute &attribute) const = 0;
+ virtual int GetTransformedNumComponents(
+ const PointAttribute &attribute) const = 0;
+};
+
+} // namespace draco
+
+#endif // DRACO_ATTRIBUTES_ATTRIBUTE_OCTAHEDRON_TRANSFORM_H_
diff --git a/libs/assimp/contrib/draco/src/draco/attributes/attribute_transform_data.h b/libs/assimp/contrib/draco/src/draco/attributes/attribute_transform_data.h
new file mode 100644
index 0000000..96ed073
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/attributes/attribute_transform_data.h
@@ -0,0 +1,71 @@
+// Copyright 2017 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_ATTRIBUTES_ATTRIBUTE_TRANSFORM_DATA_H_
+#define DRACO_ATTRIBUTES_ATTRIBUTE_TRANSFORM_DATA_H_
+
+#include <memory>
+
+#include "draco/attributes/attribute_transform_type.h"
+#include "draco/core/data_buffer.h"
+
+namespace draco {
+
+// Class for holding parameter values for an attribute transform of a
+// PointAttribute. This can be for example quantization data for an attribute
+// that holds quantized values. This class provides only a basic storage for
+// attribute transform parameters and it should be accessed only through wrapper
+// classes for a specific transform (e.g. AttributeQuantizationTransform).
+class AttributeTransformData {
+ public:
+ AttributeTransformData() : transform_type_(ATTRIBUTE_INVALID_TRANSFORM) {}
+ AttributeTransformData(const AttributeTransformData &data) = default;
+
+ // Returns the type of the attribute transform that is described by the class.
+ AttributeTransformType transform_type() const { return transform_type_; }
+ void set_transform_type(AttributeTransformType type) {
+ transform_type_ = type;
+ }
+
+ // Returns a parameter value on a given |byte_offset|.
+ template <typename DataTypeT>
+ DataTypeT GetParameterValue(int byte_offset) const {
+ DataTypeT out_data;
+ buffer_.Read(byte_offset, &out_data, sizeof(DataTypeT));
+ return out_data;
+ }
+
+ // Sets a parameter value on a given |byte_offset|.
+ template <typename DataTypeT>
+ void SetParameterValue(int byte_offset, const DataTypeT &in_data) {
+ if (byte_offset + sizeof(DataTypeT) > buffer_.data_size()) {
+ buffer_.Resize(byte_offset + sizeof(DataTypeT));
+ }
+ buffer_.Write(byte_offset, &in_data, sizeof(DataTypeT));
+ }
+
+ // Sets a parameter value at the end of the |buffer_|.
+ template <typename DataTypeT>
+ void AppendParameterValue(const DataTypeT &in_data) {
+ SetParameterValue(static_cast<int>(buffer_.data_size()), in_data);
+ }
+
+ private:
+ AttributeTransformType transform_type_;
+ DataBuffer buffer_;
+};
+
+} // namespace draco
+
+#endif // DRACO_ATTRIBUTES_ATTRIBUTE_TRANSFORM_DATA_H_
diff --git a/libs/assimp/contrib/draco/src/draco/attributes/attribute_transform_type.h b/libs/assimp/contrib/draco/src/draco/attributes/attribute_transform_type.h
new file mode 100644
index 0000000..51ce6f3
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/attributes/attribute_transform_type.h
@@ -0,0 +1,30 @@
+// Copyright 2017 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_ATTRIBUTES_ATTRIBUTE_TRANSFORM_TYPE_H_
+#define DRACO_ATTRIBUTES_ATTRIBUTE_TRANSFORM_TYPE_H_
+
+namespace draco {
+
+// List of all currently supported attribute transforms.
+enum AttributeTransformType {
+ ATTRIBUTE_INVALID_TRANSFORM = -1,
+ ATTRIBUTE_NO_TRANSFORM = 0,
+ ATTRIBUTE_QUANTIZATION_TRANSFORM = 1,
+ ATTRIBUTE_OCTAHEDRON_TRANSFORM = 2,
+};
+
+} // namespace draco
+
+#endif // DRACO_ATTRIBUTES_ATTRIBUTE_TRANSFORM_TYPE_H_
diff --git a/libs/assimp/contrib/draco/src/draco/attributes/geometry_attribute.cc b/libs/assimp/contrib/draco/src/draco/attributes/geometry_attribute.cc
new file mode 100644
index 0000000..b624784
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/attributes/geometry_attribute.cc
@@ -0,0 +1,102 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/attributes/geometry_attribute.h"
+
+namespace draco {
+
+GeometryAttribute::GeometryAttribute()
+ : buffer_(nullptr),
+ num_components_(1),
+ data_type_(DT_FLOAT32),
+ byte_stride_(0),
+ byte_offset_(0),
+ attribute_type_(INVALID),
+ unique_id_(0) {}
+
+void GeometryAttribute::Init(GeometryAttribute::Type attribute_type,
+ DataBuffer *buffer, int8_t num_components,
+ DataType data_type, bool normalized,
+ int64_t byte_stride, int64_t byte_offset) {
+ buffer_ = buffer;
+ if (buffer) {
+ buffer_descriptor_.buffer_id = buffer->buffer_id();
+ buffer_descriptor_.buffer_update_count = buffer->update_count();
+ }
+ num_components_ = num_components;
+ data_type_ = data_type;
+ normalized_ = normalized;
+ byte_stride_ = byte_stride;
+ byte_offset_ = byte_offset;
+ attribute_type_ = attribute_type;
+}
+
+bool GeometryAttribute::CopyFrom(const GeometryAttribute &src_att) {
+ num_components_ = src_att.num_components_;
+ data_type_ = src_att.data_type_;
+ normalized_ = src_att.normalized_;
+ byte_stride_ = src_att.byte_stride_;
+ byte_offset_ = src_att.byte_offset_;
+ attribute_type_ = src_att.attribute_type_;
+ buffer_descriptor_ = src_att.buffer_descriptor_;
+ unique_id_ = src_att.unique_id_;
+ if (src_att.buffer_ == nullptr) {
+ buffer_ = nullptr;
+ } else {
+ if (buffer_ == nullptr) {
+ return false;
+ }
+ buffer_->Update(src_att.buffer_->data(), src_att.buffer_->data_size());
+ }
+ return true;
+}
+
+bool GeometryAttribute::operator==(const GeometryAttribute &va) const {
+ if (attribute_type_ != va.attribute_type_) {
+ return false;
+ }
+ // It's OK to compare just the buffer descriptors here. We don't need to
+ // compare the buffers themselves.
+ if (buffer_descriptor_.buffer_id != va.buffer_descriptor_.buffer_id) {
+ return false;
+ }
+ if (buffer_descriptor_.buffer_update_count !=
+ va.buffer_descriptor_.buffer_update_count) {
+ return false;
+ }
+ if (num_components_ != va.num_components_) {
+ return false;
+ }
+ if (data_type_ != va.data_type_) {
+ return false;
+ }
+ if (byte_stride_ != va.byte_stride_) {
+ return false;
+ }
+ if (byte_offset_ != va.byte_offset_) {
+ return false;
+ }
+ return true;
+}
+
+void GeometryAttribute::ResetBuffer(DataBuffer *buffer, int64_t byte_stride,
+ int64_t byte_offset) {
+ buffer_ = buffer;
+ buffer_descriptor_.buffer_id = buffer->buffer_id();
+ buffer_descriptor_.buffer_update_count = buffer->update_count();
+ byte_stride_ = byte_stride;
+ byte_offset_ = byte_offset;
+}
+
+} // namespace draco
diff --git a/libs/assimp/contrib/draco/src/draco/attributes/geometry_attribute.h b/libs/assimp/contrib/draco/src/draco/attributes/geometry_attribute.h
new file mode 100644
index 0000000..f4d099b
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/attributes/geometry_attribute.h
@@ -0,0 +1,350 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_ATTRIBUTES_GEOMETRY_ATTRIBUTE_H_
+#define DRACO_ATTRIBUTES_GEOMETRY_ATTRIBUTE_H_
+
+#include <array>
+#include <limits>
+
+#include "draco/attributes/geometry_indices.h"
+#include "draco/core/data_buffer.h"
+#include "draco/core/hash_utils.h"
+
+namespace draco {
+
+// The class provides access to a specific attribute which is stored in a
+// DataBuffer, such as normals or coordinates. However, the GeometryAttribute
+// class does not own the buffer and the buffer itself may store other data
+// unrelated to this attribute (such as data for other attributes in which case
+// we can have multiple GeometryAttributes accessing one buffer). Typically,
+// all attributes for a point (or corner, face) are stored in one block, which
+// is advantageous in terms of memory access. The length of the entire block is
+// given by the byte_stride, the position where the attribute starts is given by
+// the byte_offset, the actual number of bytes that the attribute occupies is
+// given by the data_type and the number of components.
+class GeometryAttribute {
+ public:
+ // Supported attribute types.
+ enum Type {
+ INVALID = -1,
+ // Named attributes start here. The difference between named and generic
+ // attributes is that for named attributes we know their purpose and we
+ // can apply some special methods when dealing with them (e.g. during
+ // encoding).
+ POSITION = 0,
+ NORMAL,
+ COLOR,
+ TEX_COORD,
+ // A special id used to mark attributes that are not assigned to any known
+ // predefined use case. Such attributes are often used for a shader specific
+ // data.
+ GENERIC,
+ // Total number of different attribute types.
+ // Always keep behind all named attributes.
+ NAMED_ATTRIBUTES_COUNT,
+ };
+
+ GeometryAttribute();
+ // Initializes and enables the attribute.
+ void Init(Type attribute_type, DataBuffer *buffer, int8_t num_components,
+ DataType data_type, bool normalized, int64_t byte_stride,
+ int64_t byte_offset);
+ bool IsValid() const { return buffer_ != nullptr; }
+
+ // Copies data from the source attribute to the this attribute.
+ // This attribute must have a valid buffer allocated otherwise the operation
+ // is going to fail and return false.
+ bool CopyFrom(const GeometryAttribute &src_att);
+
+ // Function for getting a attribute value with a specific format.
+ // Unsafe. Caller must ensure the accessed memory is valid.
+ // T is the attribute data type.
+ // att_components_t is the number of attribute components.
+ template <typename T, int att_components_t>
+ std::array<T, att_components_t> GetValue(
+ AttributeValueIndex att_index) const {
+ // Byte address of the attribute index.
+ const int64_t byte_pos = byte_offset_ + byte_stride_ * att_index.value();
+ std::array<T, att_components_t> out;
+ buffer_->Read(byte_pos, &(out[0]), sizeof(out));
+ return out;
+ }
+
+ // Function for getting a attribute value with a specific format.
+ // T is the attribute data type.
+ // att_components_t is the number of attribute components.
+ template <typename T, int att_components_t>
+ bool GetValue(AttributeValueIndex att_index,
+ std::array<T, att_components_t> *out) const {
+ // Byte address of the attribute index.
+ const int64_t byte_pos = byte_offset_ + byte_stride_ * att_index.value();
+ // Check we are not reading past end of data.
+ if (byte_pos + sizeof(*out) > buffer_->data_size()) {
+ return false;
+ }
+ buffer_->Read(byte_pos, &((*out)[0]), sizeof(*out));
+ return true;
+ }
+
+ // Returns the byte position of the attribute entry in the data buffer.
+ inline int64_t GetBytePos(AttributeValueIndex att_index) const {
+ return byte_offset_ + byte_stride_ * att_index.value();
+ }
+
+ inline const uint8_t *GetAddress(AttributeValueIndex att_index) const {
+ const int64_t byte_pos = GetBytePos(att_index);
+ return buffer_->data() + byte_pos;
+ }
+ inline uint8_t *GetAddress(AttributeValueIndex att_index) {
+ const int64_t byte_pos = GetBytePos(att_index);
+ return buffer_->data() + byte_pos;
+ }
+ inline bool IsAddressValid(const uint8_t *address) const {
+ return ((buffer_->data() + buffer_->data_size()) > address);
+ }
+
+ // Fills out_data with the raw value of the requested attribute entry.
+ // out_data must be at least byte_stride_ long.
+ void GetValue(AttributeValueIndex att_index, void *out_data) const {
+ const int64_t byte_pos = byte_offset_ + byte_stride_ * att_index.value();
+ buffer_->Read(byte_pos, out_data, byte_stride_);
+ }
+
+ // Sets a value of an attribute entry. The input value must be allocated to
+ // cover all components of a single attribute entry.
+ void SetAttributeValue(AttributeValueIndex entry_index, const void *value) {
+ const int64_t byte_pos = entry_index.value() * byte_stride();
+ buffer_->Write(byte_pos, value, byte_stride());
+ }
+
+ // DEPRECATED: Use
+ // ConvertValue(AttributeValueIndex att_id,
+ // int out_num_components,
+ // OutT *out_val);
+ //
+ // Function for conversion of a attribute to a specific output format.
+ // OutT is the desired data type of the attribute.
+ // out_att_components_t is the number of components of the output format.
+ // Returns false when the conversion failed.
+ template <typename OutT, int out_att_components_t>
+ bool ConvertValue(AttributeValueIndex att_id, OutT *out_val) const {
+ return ConvertValue(att_id, out_att_components_t, out_val);
+ }
+
+ // Function for conversion of a attribute to a specific output format.
+ // |out_val| needs to be able to store |out_num_components| values.
+ // OutT is the desired data type of the attribute.
+ // Returns false when the conversion failed.
+ template <typename OutT>
+ bool ConvertValue(AttributeValueIndex att_id, int8_t out_num_components,
+ OutT *out_val) const {
+ if (out_val == nullptr) {
+ return false;
+ }
+ switch (data_type_) {
+ case DT_INT8:
+ return ConvertTypedValue<int8_t, OutT>(att_id, out_num_components,
+ out_val);
+ case DT_UINT8:
+ return ConvertTypedValue<uint8_t, OutT>(att_id, out_num_components,
+ out_val);
+ case DT_INT16:
+ return ConvertTypedValue<int16_t, OutT>(att_id, out_num_components,
+ out_val);
+ case DT_UINT16:
+ return ConvertTypedValue<uint16_t, OutT>(att_id, out_num_components,
+ out_val);
+ case DT_INT32:
+ return ConvertTypedValue<int32_t, OutT>(att_id, out_num_components,
+ out_val);
+ case DT_UINT32:
+ return ConvertTypedValue<uint32_t, OutT>(att_id, out_num_components,
+ out_val);
+ case DT_INT64:
+ return ConvertTypedValue<int64_t, OutT>(att_id, out_num_components,
+ out_val);
+ case DT_UINT64:
+ return ConvertTypedValue<uint64_t, OutT>(att_id, out_num_components,
+ out_val);
+ case DT_FLOAT32:
+ return ConvertTypedValue<float, OutT>(att_id, out_num_components,
+ out_val);
+ case DT_FLOAT64:
+ return ConvertTypedValue<double, OutT>(att_id, out_num_components,
+ out_val);
+ case DT_BOOL:
+ return ConvertTypedValue<bool, OutT>(att_id, out_num_components,
+ out_val);
+ default:
+ // Wrong attribute type.
+ return false;
+ }
+ }
+
+ // Function for conversion of a attribute to a specific output format.
+ // The |out_value| must be able to store all components of a single attribute
+ // entry.
+ // OutT is the desired data type of the attribute.
+ // Returns false when the conversion failed.
+ template <typename OutT>
+ bool ConvertValue(AttributeValueIndex att_index, OutT *out_value) const {
+ return ConvertValue<OutT>(att_index, num_components_, out_value);
+ }
+
+ // Utility function. Returns |attribute_type| as std::string.
+ static std::string TypeToString(Type attribute_type) {
+ switch (attribute_type) {
+ case INVALID:
+ return "INVALID";
+ case POSITION:
+ return "POSITION";
+ case NORMAL:
+ return "NORMAL";
+ case COLOR:
+ return "COLOR";
+ case TEX_COORD:
+ return "TEX_COORD";
+ case GENERIC:
+ return "GENERIC";
+ default:
+ return "UNKNOWN";
+ }
+ }
+
+ bool operator==(const GeometryAttribute &va) const;
+
+ // Returns the type of the attribute indicating the nature of the attribute.
+ Type attribute_type() const { return attribute_type_; }
+ void set_attribute_type(Type type) { attribute_type_ = type; }
+ // Returns the data type that is stored in the attribute.
+ DataType data_type() const { return data_type_; }
+ // Returns the number of components that are stored for each entry.
+ // For position attribute this is usually three (x,y,z),
+ // while texture coordinates have two components (u,v).
+ int8_t num_components() const { return num_components_; }
+ // Indicates whether the data type should be normalized before interpretation,
+ // that is, it should be divided by the max value of the data type.
+ bool normalized() const { return normalized_; }
+ // The buffer storing the entire data of the attribute.
+ const DataBuffer *buffer() const { return buffer_; }
+ // Returns the number of bytes between two attribute entries, this is, at
+ // least size of the data types times number of components.
+ int64_t byte_stride() const { return byte_stride_; }
+ // The offset where the attribute starts within the block of size byte_stride.
+ int64_t byte_offset() const { return byte_offset_; }
+ void set_byte_offset(int64_t byte_offset) { byte_offset_ = byte_offset; }
+ DataBufferDescriptor buffer_descriptor() const { return buffer_descriptor_; }
+ uint32_t unique_id() const { return unique_id_; }
+ void set_unique_id(uint32_t id) { unique_id_ = id; }
+
+ protected:
+ // Sets a new internal storage for the attribute.
+ void ResetBuffer(DataBuffer *buffer, int64_t byte_stride,
+ int64_t byte_offset);
+
+ private:
+ // Function for conversion of an attribute to a specific output format given a
+ // format of the stored attribute.
+ // T is the stored attribute data type.
+ // OutT is the desired data type of the attribute.
+ template <typename T, typename OutT>
+ bool ConvertTypedValue(AttributeValueIndex att_id, int8_t out_num_components,
+ OutT *out_value) const {
+ const uint8_t *src_address = GetAddress(att_id);
+
+ // Convert all components available in both the original and output formats.
+ for (int i = 0; i < std::min(num_components_, out_num_components); ++i) {
+ if (!IsAddressValid(src_address)) {
+ return false;
+ }
+ const T in_value = *reinterpret_cast<const T *>(src_address);
+
+ // Make sure the in_value fits within the range of values that OutT
+ // is able to represent. Perform the check only for integral types.
+ if (std::is_integral<T>::value && std::is_integral<OutT>::value) {
+ static constexpr OutT kOutMin =
+ std::is_signed<T>::value ? std::numeric_limits<OutT>::lowest() : 0;
+ if (in_value < kOutMin || in_value > std::numeric_limits<OutT>::max()) {
+ return false;
+ }
+ }
+
+ out_value[i] = static_cast<OutT>(in_value);
+ // When converting integer to floating point, normalize the value if
+ // necessary.
+ if (std::is_integral<T>::value && std::is_floating_point<OutT>::value &&
+ normalized_) {
+ out_value[i] /= static_cast<OutT>(std::numeric_limits<T>::max());
+ }
+ // TODO(ostava): Add handling of normalized attributes when converting
+ // between different integer representations. If the attribute is
+ // normalized, integer values should be converted as if they represent 0-1
+ // range. E.g. when we convert uint16 to uint8, the range <0, 2^16 - 1>
+ // should be converted to range <0, 2^8 - 1>.
+ src_address += sizeof(T);
+ }
+ // Fill empty data for unused output components if needed.
+ for (int i = num_components_; i < out_num_components; ++i) {
+ out_value[i] = static_cast<OutT>(0);
+ }
+ return true;
+ }
+
+ DataBuffer *buffer_;
+ // The buffer descriptor is stored at the time the buffer is attached to this
+ // attribute. The purpose is to detect if any changes happened to the buffer
+ // since the time it was attached.
+ DataBufferDescriptor buffer_descriptor_;
+ int8_t num_components_;
+ DataType data_type_;
+ bool normalized_;
+ int64_t byte_stride_;
+ int64_t byte_offset_;
+
+ Type attribute_type_;
+
+ // Unique id of this attribute. No two attributes could have the same unique
+ // id. It is used to identify each attribute, especially when there are
+ // multiple attribute of the same type in a point cloud.
+ uint32_t unique_id_;
+
+ friend struct GeometryAttributeHasher;
+};
+
+// Hashing support
+
+// Function object for using Attribute as a hash key.
+struct GeometryAttributeHasher {
+ size_t operator()(const GeometryAttribute &va) const {
+ size_t hash = HashCombine(va.buffer_descriptor_.buffer_id,
+ va.buffer_descriptor_.buffer_update_count);
+ hash = HashCombine(va.num_components_, hash);
+ hash = HashCombine(static_cast<int8_t>(va.data_type_), hash);
+ hash = HashCombine(static_cast<int8_t>(va.attribute_type_), hash);
+ hash = HashCombine(va.byte_stride_, hash);
+ return HashCombine(va.byte_offset_, hash);
+ }
+};
+
+// Function object for using GeometryAttribute::Type as a hash key.
+struct GeometryAttributeTypeHasher {
+ size_t operator()(const GeometryAttribute::Type &at) const {
+ return static_cast<size_t>(at);
+ }
+};
+
+} // namespace draco
+
+#endif // DRACO_ATTRIBUTES_GEOMETRY_ATTRIBUTE_H_
diff --git a/libs/assimp/contrib/draco/src/draco/attributes/geometry_indices.h b/libs/assimp/contrib/draco/src/draco/attributes/geometry_indices.h
new file mode 100644
index 0000000..80e43e3
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/attributes/geometry_indices.h
@@ -0,0 +1,54 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_ATTRIBUTES_GEOMETRY_INDICES_H_
+#define DRACO_ATTRIBUTES_GEOMETRY_INDICES_H_
+
+#include <inttypes.h>
+
+#include <limits>
+
+#include "draco/core/draco_index_type.h"
+
+namespace draco {
+
+// Index of an attribute value entry stored in a GeometryAttribute.
+DEFINE_NEW_DRACO_INDEX_TYPE(uint32_t, AttributeValueIndex)
+// Index of a point in a PointCloud.
+DEFINE_NEW_DRACO_INDEX_TYPE(uint32_t, PointIndex)
+// Vertex index in a Mesh or CornerTable.
+DEFINE_NEW_DRACO_INDEX_TYPE(uint32_t, VertexIndex)
+// Corner index that identifies a corner in a Mesh or CornerTable.
+DEFINE_NEW_DRACO_INDEX_TYPE(uint32_t, CornerIndex)
+// Face index for Mesh and CornerTable.
+DEFINE_NEW_DRACO_INDEX_TYPE(uint32_t, FaceIndex)
+
+// Constants denoting invalid indices.
+static constexpr AttributeValueIndex kInvalidAttributeValueIndex(
+ std::numeric_limits<uint32_t>::max());
+static constexpr PointIndex kInvalidPointIndex(
+ std::numeric_limits<uint32_t>::max());
+static constexpr VertexIndex kInvalidVertexIndex(
+ std::numeric_limits<uint32_t>::max());
+static constexpr CornerIndex kInvalidCornerIndex(
+ std::numeric_limits<uint32_t>::max());
+static constexpr FaceIndex kInvalidFaceIndex(
+ std::numeric_limits<uint32_t>::max());
+
+// TODO(ostava): Add strongly typed indices for attribute id and unique
+// attribute id.
+
+} // namespace draco
+
+#endif // DRACO_ATTRIBUTES_GEOMETRY_INDICES_H_
diff --git a/libs/assimp/contrib/draco/src/draco/attributes/point_attribute.cc b/libs/assimp/contrib/draco/src/draco/attributes/point_attribute.cc
new file mode 100644
index 0000000..b28f860
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/attributes/point_attribute.cc
@@ -0,0 +1,225 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/attributes/point_attribute.h"
+
+#include <unordered_map>
+
+using std::unordered_map;
+
+// Shortcut for typed conditionals.
+template <bool B, class T, class F>
+using conditional_t = typename std::conditional<B, T, F>::type;
+
+namespace draco {
+
+PointAttribute::PointAttribute()
+ : num_unique_entries_(0), identity_mapping_(false) {}
+
+PointAttribute::PointAttribute(const GeometryAttribute &att)
+ : GeometryAttribute(att),
+ num_unique_entries_(0),
+ identity_mapping_(false) {}
+
+void PointAttribute::Init(Type attribute_type, int8_t num_components,
+ DataType data_type, bool normalized,
+ size_t num_attribute_values) {
+ attribute_buffer_ = std::unique_ptr<DataBuffer>(new DataBuffer());
+ GeometryAttribute::Init(attribute_type, attribute_buffer_.get(),
+ num_components, data_type, normalized,
+ DataTypeLength(data_type) * num_components, 0);
+ Reset(num_attribute_values);
+ SetIdentityMapping();
+}
+
+void PointAttribute::CopyFrom(const PointAttribute &src_att) {
+ if (buffer() == nullptr) {
+ // If the destination attribute doesn't have a valid buffer, create it.
+ attribute_buffer_ = std::unique_ptr<DataBuffer>(new DataBuffer());
+ ResetBuffer(attribute_buffer_.get(), 0, 0);
+ }
+ if (!GeometryAttribute::CopyFrom(src_att)) {
+ return;
+ }
+ identity_mapping_ = src_att.identity_mapping_;
+ num_unique_entries_ = src_att.num_unique_entries_;
+ indices_map_ = src_att.indices_map_;
+ if (src_att.attribute_transform_data_) {
+ attribute_transform_data_ = std::unique_ptr<AttributeTransformData>(
+ new AttributeTransformData(*src_att.attribute_transform_data_));
+ } else {
+ attribute_transform_data_ = nullptr;
+ }
+}
+
+bool PointAttribute::Reset(size_t num_attribute_values) {
+ if (attribute_buffer_ == nullptr) {
+ attribute_buffer_ = std::unique_ptr<DataBuffer>(new DataBuffer());
+ }
+ const int64_t entry_size = DataTypeLength(data_type()) * num_components();
+ if (!attribute_buffer_->Update(nullptr, num_attribute_values * entry_size)) {
+ return false;
+ }
+ // Assign the new buffer to the parent attribute.
+ ResetBuffer(attribute_buffer_.get(), entry_size, 0);
+ num_unique_entries_ = static_cast<uint32_t>(num_attribute_values);
+ return true;
+}
+
+void PointAttribute::Resize(size_t new_num_unique_entries) {
+ num_unique_entries_ = static_cast<uint32_t>(new_num_unique_entries);
+ attribute_buffer_->Resize(new_num_unique_entries * byte_stride());
+}
+
+#ifdef DRACO_ATTRIBUTE_VALUES_DEDUPLICATION_SUPPORTED
+AttributeValueIndex::ValueType PointAttribute::DeduplicateValues(
+ const GeometryAttribute &in_att) {
+ return DeduplicateValues(in_att, AttributeValueIndex(0));
+}
+
+AttributeValueIndex::ValueType PointAttribute::DeduplicateValues(
+ const GeometryAttribute &in_att, AttributeValueIndex in_att_offset) {
+ AttributeValueIndex::ValueType unique_vals = 0;
+ switch (in_att.data_type()) {
+ // Currently we support only float, uint8, and uint16 arguments.
+ case DT_FLOAT32:
+ unique_vals = DeduplicateTypedValues<float>(in_att, in_att_offset);
+ break;
+ case DT_INT8:
+ unique_vals = DeduplicateTypedValues<int8_t>(in_att, in_att_offset);
+ break;
+ case DT_UINT8:
+ case DT_BOOL:
+ unique_vals = DeduplicateTypedValues<uint8_t>(in_att, in_att_offset);
+ break;
+ case DT_UINT16:
+ unique_vals = DeduplicateTypedValues<uint16_t>(in_att, in_att_offset);
+ break;
+ case DT_INT16:
+ unique_vals = DeduplicateTypedValues<int16_t>(in_att, in_att_offset);
+ break;
+ case DT_UINT32:
+ unique_vals = DeduplicateTypedValues<uint32_t>(in_att, in_att_offset);
+ break;
+ case DT_INT32:
+ unique_vals = DeduplicateTypedValues<int32_t>(in_att, in_att_offset);
+ break;
+ default:
+ return -1; // Unsupported data type.
+ }
+ if (unique_vals == 0) {
+ return -1; // Unexpected error.
+ }
+ return unique_vals;
+}
+
+// Helper function for calling UnifyDuplicateAttributes<T,num_components_t>
+// with the correct template arguments.
+// Returns the number of unique attribute values.
+template <typename T>
+AttributeValueIndex::ValueType PointAttribute::DeduplicateTypedValues(
+ const GeometryAttribute &in_att, AttributeValueIndex in_att_offset) {
+ // Select the correct method to call based on the number of attribute
+ // components.
+ switch (in_att.num_components()) {
+ case 1:
+ return DeduplicateFormattedValues<T, 1>(in_att, in_att_offset);
+ case 2:
+ return DeduplicateFormattedValues<T, 2>(in_att, in_att_offset);
+ case 3:
+ return DeduplicateFormattedValues<T, 3>(in_att, in_att_offset);
+ case 4:
+ return DeduplicateFormattedValues<T, 4>(in_att, in_att_offset);
+ default:
+ return 0;
+ }
+}
+
+template <typename T, int num_components_t>
+AttributeValueIndex::ValueType PointAttribute::DeduplicateFormattedValues(
+ const GeometryAttribute &in_att, AttributeValueIndex in_att_offset) {
+ // We want to detect duplicates using a hash map but we cannot hash floating
+ // point numbers directly so bit-copy floats to the same sized integers and
+ // hash them.
+
+ // First we need to determine which int type to use (1, 2, 4 or 8 bytes).
+ // Note, this is done at compile time using std::conditional struct.
+ // Conditional is in form <bool-expression, true, false>. If bool-expression
+ // is true the "true" branch is used and vice versa. All at compile time.
+ typedef conditional_t<sizeof(T) == 1, uint8_t,
+ conditional_t<sizeof(T) == 2, uint16_t,
+ conditional_t<sizeof(T) == 4, uint32_t,
+ /*else*/ uint64_t>>>
+ HashType;
+
+ AttributeValueIndex unique_vals(0);
+ typedef std::array<T, num_components_t> AttributeValue;
+ typedef std::array<HashType, num_components_t> AttributeHashableValue;
+ // Hash map storing index of the first attribute with a given value.
+ unordered_map<AttributeHashableValue, AttributeValueIndex,
+ HashArray<AttributeHashableValue>>
+ value_to_index_map;
+ AttributeValue att_value;
+ AttributeHashableValue hashable_value;
+ IndexTypeVector<AttributeValueIndex, AttributeValueIndex> value_map(
+ num_unique_entries_);
+ for (AttributeValueIndex i(0); i < num_unique_entries_; ++i) {
+ const AttributeValueIndex att_pos = i + in_att_offset;
+ att_value = in_att.GetValue<T, num_components_t>(att_pos);
+ // Convert the value to hashable type. Bit-copy real attributes to integers.
+ memcpy(&(hashable_value[0]), &(att_value[0]), sizeof(att_value));
+
+ // Check if the given attribute value has been used before already.
+ auto it = value_to_index_map.find(hashable_value);
+ if (it != value_to_index_map.end()) {
+ // Duplicated value found. Update index mapping.
+ value_map[i] = it->second;
+ } else {
+ // New unique value.
+ // Update the hash map with a new entry pointing to the latest unique
+ // vertex index.
+ value_to_index_map.insert(
+ std::pair<AttributeHashableValue, AttributeValueIndex>(hashable_value,
+ unique_vals));
+ // Add the unique value to the mesh builder.
+ SetAttributeValue(unique_vals, &att_value);
+ // Update index mapping.
+ value_map[i] = unique_vals;
+
+ ++unique_vals;
+ }
+ }
+ if (unique_vals == num_unique_entries_) {
+ return unique_vals.value(); // Nothing has changed.
+ }
+ if (is_mapping_identity()) {
+ // Change identity mapping to the explicit one.
+ // The number of points is equal to the number of old unique values.
+ SetExplicitMapping(num_unique_entries_);
+ // Update the explicit map.
+ for (uint32_t i = 0; i < num_unique_entries_; ++i) {
+ SetPointMapEntry(PointIndex(i), value_map[AttributeValueIndex(i)]);
+ }
+ } else {
+ // Update point to value map using the mapping between old and new values.
+ for (PointIndex i(0); i < static_cast<uint32_t>(indices_map_.size()); ++i) {
+ SetPointMapEntry(i, value_map[indices_map_[i]]);
+ }
+ }
+ num_unique_entries_ = unique_vals.value();
+ return num_unique_entries_;
+}
+#endif
+
+} // namespace draco
diff --git a/libs/assimp/contrib/draco/src/draco/attributes/point_attribute.h b/libs/assimp/contrib/draco/src/draco/attributes/point_attribute.h
new file mode 100644
index 0000000..ee36620
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/attributes/point_attribute.h
@@ -0,0 +1,190 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_ATTRIBUTES_POINT_ATTRIBUTE_H_
+#define DRACO_ATTRIBUTES_POINT_ATTRIBUTE_H_
+
+#include <memory>
+
+#include "draco/attributes/attribute_transform_data.h"
+#include "draco/attributes/geometry_attribute.h"
+#include "draco/core/draco_index_type_vector.h"
+#include "draco/core/hash_utils.h"
+#include "draco/core/macros.h"
+#include "draco/draco_features.h"
+
+namespace draco {
+
+// Class for storing point specific data about each attribute. In general,
+// multiple points stored in a point cloud can share the same attribute value
+// and this class provides the necessary mapping between point ids and attribute
+// value ids.
+class PointAttribute : public GeometryAttribute {
+ public:
+ PointAttribute();
+ explicit PointAttribute(const GeometryAttribute &att);
+
+ // Make sure the move constructor is defined (needed for better performance
+ // when new attributes are added to PointCloud).
+ PointAttribute(PointAttribute &&attribute) = default;
+ PointAttribute &operator=(PointAttribute &&attribute) = default;
+
+ // Initializes a point attribute. By default the attribute will be set to
+ // identity mapping between point indices and attribute values. To set custom
+ // mapping use SetExplicitMapping() function.
+ void Init(Type attribute_type, int8_t num_components, DataType data_type,
+ bool normalized, size_t num_attribute_values);
+
+ // Copies attribute data from the provided |src_att| attribute.
+ void CopyFrom(const PointAttribute &src_att);
+
+ // Prepares the attribute storage for the specified number of entries.
+ bool Reset(size_t num_attribute_values);
+
+ size_t size() const { return num_unique_entries_; }
+ AttributeValueIndex mapped_index(PointIndex point_index) const {
+ if (identity_mapping_) {
+ return AttributeValueIndex(point_index.value());
+ }
+ return indices_map_[point_index];
+ }
+ DataBuffer *buffer() const { return attribute_buffer_.get(); }
+ bool is_mapping_identity() const { return identity_mapping_; }
+ size_t indices_map_size() const {
+ if (is_mapping_identity()) {
+ return 0;
+ }
+ return indices_map_.size();
+ }
+
+ const uint8_t *GetAddressOfMappedIndex(PointIndex point_index) const {
+ return GetAddress(mapped_index(point_index));
+ }
+
+ // Sets the new number of unique attribute entries for the attribute. The
+ // function resizes the attribute storage to hold |num_attribute_values|
+ // entries.
+ // All previous entries with AttributeValueIndex < |num_attribute_values|
+ // are preserved. Caller needs to ensure that the PointAttribute is still
+ // valid after the resizing operation (that is, each point is mapped to a
+ // valid attribute value).
+ void Resize(size_t new_num_unique_entries);
+
+ // Functions for setting the type of mapping between point indices and
+ // attribute entry ids.
+ // This function sets the mapping to implicit, where point indices are equal
+ // to attribute entry indices.
+ void SetIdentityMapping() {
+ identity_mapping_ = true;
+ indices_map_.clear();
+ }
+ // This function sets the mapping to be explicitly using the indices_map_
+ // array that needs to be initialized by the caller.
+ void SetExplicitMapping(size_t num_points) {
+ identity_mapping_ = false;
+ indices_map_.resize(num_points, kInvalidAttributeValueIndex);
+ }
+
+ // Set an explicit map entry for a specific point index.
+ void SetPointMapEntry(PointIndex point_index,
+ AttributeValueIndex entry_index) {
+ DRACO_DCHECK(!identity_mapping_);
+ indices_map_[point_index] = entry_index;
+ }
+
+ // Same as GeometryAttribute::GetValue(), but using point id as the input.
+ // Mapping to attribute value index is performed automatically.
+ void GetMappedValue(PointIndex point_index, void *out_data) const {
+ return GetValue(mapped_index(point_index), out_data);
+ }
+
+#ifdef DRACO_ATTRIBUTE_VALUES_DEDUPLICATION_SUPPORTED
+ // Deduplicate |in_att| values into |this| attribute. |in_att| can be equal
+ // to |this|.
+ // Returns -1 if the deduplication failed.
+ AttributeValueIndex::ValueType DeduplicateValues(
+ const GeometryAttribute &in_att);
+
+ // Same as above but the values read from |in_att| are sampled with the
+ // provided offset |in_att_offset|.
+ AttributeValueIndex::ValueType DeduplicateValues(
+ const GeometryAttribute &in_att, AttributeValueIndex in_att_offset);
+#endif
+
+ // Set attribute transform data for the attribute. The data is used to store
+ // the type and parameters of the transform that is applied on the attribute
+ // data (optional).
+ void SetAttributeTransformData(
+ std::unique_ptr<AttributeTransformData> transform_data) {
+ attribute_transform_data_ = std::move(transform_data);
+ }
+ const AttributeTransformData *GetAttributeTransformData() const {
+ return attribute_transform_data_.get();
+ }
+
+ private:
+#ifdef DRACO_ATTRIBUTE_VALUES_DEDUPLICATION_SUPPORTED
+ template <typename T>
+ AttributeValueIndex::ValueType DeduplicateTypedValues(
+ const GeometryAttribute &in_att, AttributeValueIndex in_att_offset);
+ template <typename T, int COMPONENTS_COUNT>
+ AttributeValueIndex::ValueType DeduplicateFormattedValues(
+ const GeometryAttribute &in_att, AttributeValueIndex in_att_offset);
+#endif
+
+ // Data storage for attribute values. GeometryAttribute itself doesn't own its
+ // buffer so we need to allocate it here.
+ std::unique_ptr<DataBuffer> attribute_buffer_;
+
+ // Mapping between point ids and attribute value ids.
+ IndexTypeVector<PointIndex, AttributeValueIndex> indices_map_;
+ AttributeValueIndex::ValueType num_unique_entries_;
+ // Flag when the mapping between point ids and attribute values is identity.
+ bool identity_mapping_;
+
+ // If an attribute contains transformed data (e.g. quantized), we can specify
+ // the attribute transform here and use it to transform the attribute back to
+ // its original format.
+ std::unique_ptr<AttributeTransformData> attribute_transform_data_;
+
+ friend struct PointAttributeHasher;
+};
+
+// Hash functor for the PointAttribute class.
+struct PointAttributeHasher {
+ size_t operator()(const PointAttribute &attribute) const {
+ GeometryAttributeHasher base_hasher;
+ size_t hash = base_hasher(attribute);
+ hash = HashCombine(attribute.identity_mapping_, hash);
+ hash = HashCombine(attribute.num_unique_entries_, hash);
+ hash = HashCombine(attribute.indices_map_.size(), hash);
+ if (!attribute.indices_map_.empty()) {
+ const uint64_t indices_hash = FingerprintString(
+ reinterpret_cast<const char *>(attribute.indices_map_.data()),
+ attribute.indices_map_.size());
+ hash = HashCombine(indices_hash, hash);
+ }
+ if (attribute.attribute_buffer_ != nullptr) {
+ const uint64_t buffer_hash = FingerprintString(
+ reinterpret_cast<const char *>(attribute.attribute_buffer_->data()),
+ attribute.attribute_buffer_->data_size());
+ hash = HashCombine(buffer_hash, hash);
+ }
+ return hash;
+ }
+};
+
+} // namespace draco
+
+#endif // DRACO_ATTRIBUTES_POINT_ATTRIBUTE_H_
diff --git a/libs/assimp/contrib/draco/src/draco/attributes/point_attribute_test.cc b/libs/assimp/contrib/draco/src/draco/attributes/point_attribute_test.cc
new file mode 100644
index 0000000..4ae23fb
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/attributes/point_attribute_test.cc
@@ -0,0 +1,128 @@
+// Copyright 2017 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/attributes/point_attribute.h"
+
+#include "draco/core/draco_test_base.h"
+
+namespace {
+
+class PointAttributeTest : public ::testing::Test {
+ protected:
+ PointAttributeTest() {}
+};
+
+TEST_F(PointAttributeTest, TestCopy) {
+ // This test verifies that PointAttribute can copy data from another point
+ // attribute.
+ draco::PointAttribute pa;
+ pa.Init(draco::GeometryAttribute::POSITION, 1, draco::DT_INT32, false, 10);
+
+ for (int32_t i = 0; i < 10; ++i) {
+ pa.SetAttributeValue(draco::AttributeValueIndex(i), &i);
+ }
+
+ pa.set_unique_id(12);
+
+ draco::PointAttribute other_pa;
+ other_pa.CopyFrom(pa);
+
+ draco::PointAttributeHasher hasher;
+ ASSERT_EQ(hasher(pa), hasher(other_pa));
+ ASSERT_EQ(pa.unique_id(), other_pa.unique_id());
+
+ // The hash function does not actually compute the hash from attribute values,
+ // so ensure the data got copied correctly as well.
+ for (int32_t i = 0; i < 10; ++i) {
+ int32_t data;
+ other_pa.GetValue(draco::AttributeValueIndex(i), &data);
+ ASSERT_EQ(data, i);
+ }
+}
+
+TEST_F(PointAttributeTest, TestGetValueFloat) {
+ draco::PointAttribute pa;
+ pa.Init(draco::GeometryAttribute::POSITION, 3, draco::DT_FLOAT32, false, 5);
+ float points[3];
+ for (int32_t i = 0; i < 5; ++i) {
+ points[0] = i * 3.0;
+ points[1] = (i * 3.0) + 1.0;
+ points[2] = (i * 3.0) + 2.0;
+ pa.SetAttributeValue(draco::AttributeValueIndex(i), &points);
+ }
+
+ for (int32_t i = 0; i < 5; ++i) {
+ pa.GetValue(draco::AttributeValueIndex(i), &points);
+ ASSERT_FLOAT_EQ(points[0], i * 3.0);
+ ASSERT_FLOAT_EQ(points[1], (i * 3.0) + 1.0);
+ ASSERT_FLOAT_EQ(points[2], (i * 3.0) + 2.0);
+ }
+}
+
+TEST_F(PointAttributeTest, TestGetArray) {
+ draco::PointAttribute pa;
+ pa.Init(draco::GeometryAttribute::POSITION, 3, draco::DT_FLOAT32, false, 5);
+ float points[3];
+ for (int32_t i = 0; i < 5; ++i) {
+ points[0] = i * 3.0;
+ points[1] = (i * 3.0) + 1.0;
+ points[2] = (i * 3.0) + 2.0;
+ pa.SetAttributeValue(draco::AttributeValueIndex(i), &points);
+ }
+
+ for (int32_t i = 0; i < 5; ++i) {
+ std::array<float, 3> att_value;
+ att_value = pa.GetValue<float, 3>(draco::AttributeValueIndex(i));
+ ASSERT_FLOAT_EQ(att_value[0], i * 3.0);
+ ASSERT_FLOAT_EQ(att_value[1], (i * 3.0) + 1.0);
+ ASSERT_FLOAT_EQ(att_value[2], (i * 3.0) + 2.0);
+ }
+ for (int32_t i = 0; i < 5; ++i) {
+ std::array<float, 3> att_value;
+ EXPECT_TRUE(
+ (pa.GetValue<float, 3>(draco::AttributeValueIndex(i), &att_value)));
+ ASSERT_FLOAT_EQ(att_value[0], i * 3.0);
+ ASSERT_FLOAT_EQ(att_value[1], (i * 3.0) + 1.0);
+ ASSERT_FLOAT_EQ(att_value[2], (i * 3.0) + 2.0);
+ }
+}
+
+TEST_F(PointAttributeTest, TestArrayReadError) {
+ draco::PointAttribute pa;
+ pa.Init(draco::GeometryAttribute::POSITION, 3, draco::DT_FLOAT32, false, 5);
+ float points[3];
+ for (int32_t i = 0; i < 5; ++i) {
+ points[0] = i * 3.0;
+ points[1] = (i * 3.0) + 1.0;
+ points[2] = (i * 3.0) + 2.0;
+ pa.SetAttributeValue(draco::AttributeValueIndex(i), &points);
+ }
+
+ std::array<float, 3> att_value;
+ EXPECT_FALSE(
+ (pa.GetValue<float, 3>(draco::AttributeValueIndex(5), &att_value)));
+}
+
+TEST_F(PointAttributeTest, TestResize) {
+ draco::PointAttribute pa;
+ pa.Init(draco::GeometryAttribute::POSITION, 3, draco::DT_FLOAT32, false, 5);
+ ASSERT_EQ(pa.size(), 5);
+ ASSERT_EQ(pa.buffer()->data_size(), 4 * 3 * 5);
+
+ pa.Resize(10);
+ ASSERT_EQ(pa.size(), 10);
+ ASSERT_EQ(pa.buffer()->data_size(), 4 * 3 * 10);
+}
+
+} // namespace
diff --git a/libs/assimp/contrib/draco/src/draco/compression/attributes/attributes_decoder.cc b/libs/assimp/contrib/draco/src/draco/compression/attributes/attributes_decoder.cc
new file mode 100644
index 0000000..007dd2f
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/attributes/attributes_decoder.cc
@@ -0,0 +1,127 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/compression/attributes/attributes_decoder.h"
+
+#include "draco/core/varint_decoding.h"
+
+namespace draco {
+
+AttributesDecoder::AttributesDecoder()
+ : point_cloud_decoder_(nullptr), point_cloud_(nullptr) {}
+
+bool AttributesDecoder::Init(PointCloudDecoder *decoder, PointCloud *pc) {
+ point_cloud_decoder_ = decoder;
+ point_cloud_ = pc;
+ return true;
+}
+
+bool AttributesDecoder::DecodeAttributesDecoderData(DecoderBuffer *in_buffer) {
+ // Decode and create attributes.
+ uint32_t num_attributes;
+#ifdef DRACO_BACKWARDS_COMPATIBILITY_SUPPORTED
+ if (point_cloud_decoder_->bitstream_version() <
+ DRACO_BITSTREAM_VERSION(2, 0)) {
+ if (!in_buffer->Decode(&num_attributes)) {
+ return false;
+ }
+ } else
+#endif
+ {
+ if (!DecodeVarint(&num_attributes, in_buffer)) {
+ return false;
+ }
+ }
+
+ // Check that decoded number of attributes is valid.
+ if (num_attributes == 0) {
+ return false;
+ }
+ if (num_attributes > 5 * in_buffer->remaining_size()) {
+ // The decoded number of attributes is unreasonably high, because at least
+ // five bytes of attribute descriptor data per attribute are expected.
+ return false;
+ }
+
+ // Decode attribute descriptor data.
+ point_attribute_ids_.resize(num_attributes);
+ PointCloud *pc = point_cloud_;
+ for (uint32_t i = 0; i < num_attributes; ++i) {
+ // Decode attribute descriptor data.
+ uint8_t att_type, data_type, num_components, normalized;
+ if (!in_buffer->Decode(&att_type)) {
+ return false;
+ }
+ if (!in_buffer->Decode(&data_type)) {
+ return false;
+ }
+ if (!in_buffer->Decode(&num_components)) {
+ return false;
+ }
+ if (!in_buffer->Decode(&normalized)) {
+ return false;
+ }
+ if (att_type >= GeometryAttribute::NAMED_ATTRIBUTES_COUNT) {
+ return false;
+ }
+ if (data_type == DT_INVALID || data_type >= DT_TYPES_COUNT) {
+ return false;
+ }
+
+ // Check decoded attribute descriptor data.
+ if (num_components == 0) {
+ return false;
+ }
+
+ // Add the attribute to the point cloud.
+ const DataType draco_dt = static_cast<DataType>(data_type);
+ GeometryAttribute ga;
+ ga.Init(static_cast<GeometryAttribute::Type>(att_type), nullptr,
+ num_components, draco_dt, normalized > 0,
+ DataTypeLength(draco_dt) * num_components, 0);
+ uint32_t unique_id;
+#ifdef DRACO_BACKWARDS_COMPATIBILITY_SUPPORTED
+ if (point_cloud_decoder_->bitstream_version() <
+ DRACO_BITSTREAM_VERSION(1, 3)) {
+ uint16_t custom_id;
+ if (!in_buffer->Decode(&custom_id)) {
+ return false;
+ }
+ // TODO(draco-eng): Add "custom_id" to attribute metadata.
+ unique_id = static_cast<uint32_t>(custom_id);
+ ga.set_unique_id(unique_id);
+ } else
+#endif
+ {
+ if (!DecodeVarint(&unique_id, in_buffer)) {
+ return false;
+ }
+ ga.set_unique_id(unique_id);
+ }
+ const int att_id = pc->AddAttribute(
+ std::unique_ptr<PointAttribute>(new PointAttribute(ga)));
+ pc->attribute(att_id)->set_unique_id(unique_id);
+ point_attribute_ids_[i] = att_id;
+
+ // Update the inverse map.
+ if (att_id >=
+ static_cast<int32_t>(point_attribute_to_local_id_map_.size())) {
+ point_attribute_to_local_id_map_.resize(att_id + 1, -1);
+ }
+ point_attribute_to_local_id_map_[att_id] = i;
+ }
+ return true;
+}
+
+} // namespace draco
diff --git a/libs/assimp/contrib/draco/src/draco/compression/attributes/attributes_decoder.h b/libs/assimp/contrib/draco/src/draco/compression/attributes/attributes_decoder.h
new file mode 100644
index 0000000..5b2bb2c
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/attributes/attributes_decoder.h
@@ -0,0 +1,97 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_COMPRESSION_ATTRIBUTES_ATTRIBUTES_DECODER_H_
+#define DRACO_COMPRESSION_ATTRIBUTES_ATTRIBUTES_DECODER_H_
+
+#include <vector>
+
+#include "draco/compression/attributes/attributes_decoder_interface.h"
+#include "draco/compression/point_cloud/point_cloud_decoder.h"
+#include "draco/core/decoder_buffer.h"
+#include "draco/draco_features.h"
+#include "draco/point_cloud/point_cloud.h"
+
+namespace draco {
+
+// Base class for decoding one or more attributes that were encoded with a
+// matching AttributesEncoder. It is a basic implementation of
+// AttributesDecoderInterface that provides functionality that is shared between
+// all AttributesDecoders.
+class AttributesDecoder : public AttributesDecoderInterface {
+ public:
+ AttributesDecoder();
+ virtual ~AttributesDecoder() = default;
+
+ // Called after all attribute decoders are created. It can be used to perform
+ // any custom initialization.
+ bool Init(PointCloudDecoder *decoder, PointCloud *pc) override;
+
+ // Decodes any attribute decoder specific data from the |in_buffer|.
+ bool DecodeAttributesDecoderData(DecoderBuffer *in_buffer) override;
+
+ int32_t GetAttributeId(int i) const override {
+ return point_attribute_ids_[i];
+ }
+ int32_t GetNumAttributes() const override {
+ return static_cast<int32_t>(point_attribute_ids_.size());
+ }
+ PointCloudDecoder *GetDecoder() const override {
+ return point_cloud_decoder_;
+ }
+
+ // Decodes attribute data from the source buffer.
+ bool DecodeAttributes(DecoderBuffer *in_buffer) override {
+ if (!DecodePortableAttributes(in_buffer)) {
+ return false;
+ }
+ if (!DecodeDataNeededByPortableTransforms(in_buffer)) {
+ return false;
+ }
+ if (!TransformAttributesToOriginalFormat()) {
+ return false;
+ }
+ return true;
+ }
+
+ protected:
+ int32_t GetLocalIdForPointAttribute(int32_t point_attribute_id) const {
+ const int id_map_size =
+ static_cast<int>(point_attribute_to_local_id_map_.size());
+ if (point_attribute_id >= id_map_size) {
+ return -1;
+ }
+ return point_attribute_to_local_id_map_[point_attribute_id];
+ }
+ virtual bool DecodePortableAttributes(DecoderBuffer *in_buffer) = 0;
+ virtual bool DecodeDataNeededByPortableTransforms(DecoderBuffer *in_buffer) {
+ return true;
+ }
+ virtual bool TransformAttributesToOriginalFormat() { return true; }
+
+ private:
+ // List of attribute ids that need to be decoded with this decoder.
+ std::vector<int32_t> point_attribute_ids_;
+
+ // Map between point attribute id and the local id (i.e., the inverse of the
+ // |point_attribute_ids_|.
+ std::vector<int32_t> point_attribute_to_local_id_map_;
+
+ PointCloudDecoder *point_cloud_decoder_;
+ PointCloud *point_cloud_;
+};
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_ATTRIBUTES_ATTRIBUTES_DECODER_H_
diff --git a/libs/assimp/contrib/draco/src/draco/compression/attributes/attributes_decoder_interface.h b/libs/assimp/contrib/draco/src/draco/compression/attributes/attributes_decoder_interface.h
new file mode 100644
index 0000000..8e5cf52
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/attributes/attributes_decoder_interface.h
@@ -0,0 +1,62 @@
+// Copyright 2017 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_COMPRESSION_ATTRIBUTES_ATTRIBUTES_DECODER_INTERFACE_H_
+#define DRACO_COMPRESSION_ATTRIBUTES_ATTRIBUTES_DECODER_INTERFACE_H_
+
+#include <vector>
+
+#include "draco/core/decoder_buffer.h"
+#include "draco/point_cloud/point_cloud.h"
+
+namespace draco {
+
+class PointCloudDecoder;
+
+// Interface class for decoding one or more attributes that were encoded with a
+// matching AttributesEncoder. It provides only the basic interface
+// that is used by the PointCloudDecoder. The actual decoding must be
+// implemented in derived classes using the DecodeAttributes() method.
+class AttributesDecoderInterface {
+ public:
+ AttributesDecoderInterface() = default;
+ virtual ~AttributesDecoderInterface() = default;
+
+ // Called after all attribute decoders are created. It can be used to perform
+ // any custom initialization.
+ virtual bool Init(PointCloudDecoder *decoder, PointCloud *pc) = 0;
+
+ // Decodes any attribute decoder specific data from the |in_buffer|.
+ virtual bool DecodeAttributesDecoderData(DecoderBuffer *in_buffer) = 0;
+
+ // Decode attribute data from the source buffer. Needs to be implemented by
+ // the derived classes.
+ virtual bool DecodeAttributes(DecoderBuffer *in_buffer) = 0;
+
+ virtual int32_t GetAttributeId(int i) const = 0;
+ virtual int32_t GetNumAttributes() const = 0;
+ virtual PointCloudDecoder *GetDecoder() const = 0;
+
+ // Returns an attribute containing data processed by the attribute transform.
+ // (see TransformToPortableFormat() method). This data is guaranteed to be
+ // same for encoder and decoder and it can be used by predictors.
+ virtual const PointAttribute *GetPortableAttribute(
+ int32_t /* point_attribute_id */) {
+ return nullptr;
+ }
+};
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_ATTRIBUTES_ATTRIBUTES_DECODER_INTERFACE_H_
diff --git a/libs/assimp/contrib/draco/src/draco/compression/attributes/attributes_encoder.cc b/libs/assimp/contrib/draco/src/draco/compression/attributes/attributes_encoder.cc
new file mode 100644
index 0000000..797c62f
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/attributes/attributes_encoder.cc
@@ -0,0 +1,49 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/compression/attributes/attributes_encoder.h"
+
+#include "draco/core/varint_encoding.h"
+
+namespace draco {
+
+AttributesEncoder::AttributesEncoder()
+ : point_cloud_encoder_(nullptr), point_cloud_(nullptr) {}
+
+AttributesEncoder::AttributesEncoder(int att_id) : AttributesEncoder() {
+ AddAttributeId(att_id);
+}
+
+bool AttributesEncoder::Init(PointCloudEncoder *encoder, const PointCloud *pc) {
+ point_cloud_encoder_ = encoder;
+ point_cloud_ = pc;
+ return true;
+}
+
+bool AttributesEncoder::EncodeAttributesEncoderData(EncoderBuffer *out_buffer) {
+ // Encode data about all attributes.
+ EncodeVarint(num_attributes(), out_buffer);
+ for (uint32_t i = 0; i < num_attributes(); ++i) {
+ const int32_t att_id = point_attribute_ids_[i];
+ const PointAttribute *const pa = point_cloud_->attribute(att_id);
+ out_buffer->Encode(static_cast<uint8_t>(pa->attribute_type()));
+ out_buffer->Encode(static_cast<uint8_t>(pa->data_type()));
+ out_buffer->Encode(static_cast<uint8_t>(pa->num_components()));
+ out_buffer->Encode(static_cast<uint8_t>(pa->normalized()));
+ EncodeVarint(pa->unique_id(), out_buffer);
+ }
+ return true;
+}
+
+} // namespace draco
diff --git a/libs/assimp/contrib/draco/src/draco/compression/attributes/attributes_encoder.h b/libs/assimp/contrib/draco/src/draco/compression/attributes/attributes_encoder.h
new file mode 100644
index 0000000..9de846a
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/attributes/attributes_encoder.h
@@ -0,0 +1,154 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_COMPRESSION_ATTRIBUTES_ATTRIBUTES_ENCODER_H_
+#define DRACO_COMPRESSION_ATTRIBUTES_ATTRIBUTES_ENCODER_H_
+
+#include "draco/attributes/point_attribute.h"
+#include "draco/core/encoder_buffer.h"
+#include "draco/point_cloud/point_cloud.h"
+
+namespace draco {
+
+class PointCloudEncoder;
+
+// Base class for encoding one or more attributes of a PointCloud (or other
+// geometry). This base class provides only the basic interface that is used
+// by the PointCloudEncoder.
+class AttributesEncoder {
+ public:
+ AttributesEncoder();
+ // Constructs an attribute encoder associated with a given point attribute.
+ explicit AttributesEncoder(int point_attrib_id);
+ virtual ~AttributesEncoder() = default;
+
+ // Called after all attribute encoders are created. It can be used to perform
+ // any custom initialization, including setting up attribute dependencies.
+ // Note: no data should be encoded in this function, because the decoder may
+ // process encoders in a different order from the decoder.
+ virtual bool Init(PointCloudEncoder *encoder, const PointCloud *pc);
+
+ // Encodes data needed by the target attribute decoder.
+ virtual bool EncodeAttributesEncoderData(EncoderBuffer *out_buffer);
+
+ // Returns a unique identifier of the given encoder type, that is used during
+ // decoding to construct the corresponding attribute decoder.
+ virtual uint8_t GetUniqueId() const = 0;
+
+ // Encode attribute data to the target buffer.
+ virtual bool EncodeAttributes(EncoderBuffer *out_buffer) {
+ if (!TransformAttributesToPortableFormat()) {
+ return false;
+ }
+ if (!EncodePortableAttributes(out_buffer)) {
+ return false;
+ }
+ // Encode data needed by portable transforms after the attribute is encoded.
+ // This corresponds to the order in which the data is going to be decoded by
+ // the decoder.
+ if (!EncodeDataNeededByPortableTransforms(out_buffer)) {
+ return false;
+ }
+ return true;
+ }
+
+ // Returns the number of attributes that need to be encoded before the
+ // specified attribute is encoded.
+ // Note that the attribute is specified by its point attribute id.
+ virtual int NumParentAttributes(int32_t /* point_attribute_id */) const {
+ return 0;
+ }
+
+ virtual int GetParentAttributeId(int32_t /* point_attribute_id */,
+ int32_t /* parent_i */) const {
+ return -1;
+ }
+
+ // Marks a given attribute as a parent of another attribute.
+ virtual bool MarkParentAttribute(int32_t /* point_attribute_id */) {
+ return false;
+ }
+
+ // Returns an attribute containing data processed by the attribute transform.
+ // (see TransformToPortableFormat() method). This data is guaranteed to be
+ // encoded losslessly and it can be safely used for predictors.
+ virtual const PointAttribute *GetPortableAttribute(
+ int32_t /* point_attribute_id */) {
+ return nullptr;
+ }
+
+ void AddAttributeId(int32_t id) {
+ point_attribute_ids_.push_back(id);
+ if (id >= static_cast<int32_t>(point_attribute_to_local_id_map_.size())) {
+ point_attribute_to_local_id_map_.resize(id + 1, -1);
+ }
+ point_attribute_to_local_id_map_[id] =
+ static_cast<int32_t>(point_attribute_ids_.size()) - 1;
+ }
+
+ // Sets new attribute point ids (replacing the existing ones).
+ void SetAttributeIds(const std::vector<int32_t> &point_attribute_ids) {
+ point_attribute_ids_.clear();
+ point_attribute_to_local_id_map_.clear();
+ for (int32_t att_id : point_attribute_ids) {
+ AddAttributeId(att_id);
+ }
+ }
+
+ int32_t GetAttributeId(int i) const { return point_attribute_ids_[i]; }
+ uint32_t num_attributes() const {
+ return static_cast<uint32_t>(point_attribute_ids_.size());
+ }
+ PointCloudEncoder *encoder() const { return point_cloud_encoder_; }
+
+ protected:
+ // Transforms the input attribute data into a form that should be losslessly
+ // encoded (transform itself can be lossy).
+ virtual bool TransformAttributesToPortableFormat() { return true; }
+
+ // Losslessly encodes data of all portable attributes.
+ // Precondition: All attributes must have been transformed into portable
+ // format at this point (see TransformAttributesToPortableFormat() method).
+ virtual bool EncodePortableAttributes(EncoderBuffer *out_buffer) = 0;
+
+ // Encodes any data needed to revert the transform to portable format for each
+ // attribute (e.g. data needed for dequantization of quantized values).
+ virtual bool EncodeDataNeededByPortableTransforms(EncoderBuffer *out_buffer) {
+ return true;
+ }
+
+ int32_t GetLocalIdForPointAttribute(int32_t point_attribute_id) const {
+ const int id_map_size =
+ static_cast<int>(point_attribute_to_local_id_map_.size());
+ if (point_attribute_id >= id_map_size) {
+ return -1;
+ }
+ return point_attribute_to_local_id_map_[point_attribute_id];
+ }
+
+ private:
+ // List of attribute ids that need to be encoded with this encoder.
+ std::vector<int32_t> point_attribute_ids_;
+
+ // Map between point attribute id and the local id (i.e., the inverse of the
+ // |point_attribute_ids_|.
+ std::vector<int32_t> point_attribute_to_local_id_map_;
+
+ PointCloudEncoder *point_cloud_encoder_;
+ const PointCloud *point_cloud_;
+};
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_ATTRIBUTES_ATTRIBUTES_ENCODER_H_
diff --git a/libs/assimp/contrib/draco/src/draco/compression/attributes/kd_tree_attributes_decoder.cc b/libs/assimp/contrib/draco/src/draco/compression/attributes/kd_tree_attributes_decoder.cc
new file mode 100644
index 0000000..e4d5348
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/attributes/kd_tree_attributes_decoder.cc
@@ -0,0 +1,556 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/compression/attributes/kd_tree_attributes_decoder.h"
+
+#include "draco/compression/attributes/kd_tree_attributes_shared.h"
+#include "draco/compression/point_cloud/algorithms/dynamic_integer_points_kd_tree_decoder.h"
+#include "draco/compression/point_cloud/algorithms/float_points_tree_decoder.h"
+#include "draco/compression/point_cloud/point_cloud_decoder.h"
+#include "draco/core/draco_types.h"
+#include "draco/core/varint_decoding.h"
+
+namespace draco {
+
+// attribute, offset_dimensionality, data_type, data_size, num_components
+using AttributeTuple =
+ std::tuple<PointAttribute *, uint32_t, DataType, uint32_t, uint32_t>;
+
+// Output iterator that is used to decode values directly into the data buffer
+// of the modified PointAttribute.
+// The extension of this iterator beyond the DT_UINT32 concerns itself only with
+// the size of the data for efficiency, not the type. DataType is conveyed in
+// but is an unused field populated for any future logic/special casing.
+// DT_UINT32 and all other 4-byte types are naturally supported from the size of
+// data in the kd tree encoder. DT_UINT16 and DT_UINT8 are supported by way
+// of byte copies into a temporary memory buffer.
+template <class CoeffT>
+class PointAttributeVectorOutputIterator {
+ typedef PointAttributeVectorOutputIterator<CoeffT> Self;
+
+ public:
+ PointAttributeVectorOutputIterator(
+ PointAttributeVectorOutputIterator &&that) = default;
+
+ explicit PointAttributeVectorOutputIterator(
+ const std::vector<AttributeTuple> &atts)
+ : attributes_(atts), point_id_(0) {
+ DRACO_DCHECK_GE(atts.size(), 1);
+ uint32_t required_decode_bytes = 0;
+ for (auto index = 0; index < attributes_.size(); index++) {
+ const AttributeTuple &att = attributes_[index];
+ required_decode_bytes = (std::max)(required_decode_bytes,
+ std::get<3>(att) * std::get<4>(att));
+ }
+ memory_.resize(required_decode_bytes);
+ data_ = memory_.data();
+ }
+
+ const Self &operator++() {
+ ++point_id_;
+ return *this;
+ }
+
+ // We do not want to do ANY copying of this constructor so this particular
+ // operator is disabled for performance reasons.
+ // Self operator++(int) {
+ // Self copy = *this;
+ // ++point_id_;
+ // return copy;
+ // }
+
+ Self &operator*() { return *this; }
+ // Still needed in some cases.
+ // TODO(hemmer): remove.
+ // hardcoded to 3 based on legacy usage.
+ const Self &operator=(const VectorD<CoeffT, 3> &val) {
+ DRACO_DCHECK_EQ(attributes_.size(), 1); // Expect only ONE attribute.
+ AttributeTuple &att = attributes_[0];
+ PointAttribute *attribute = std::get<0>(att);
+ const uint32_t &offset = std::get<1>(att);
+ DRACO_DCHECK_EQ(offset, 0); // expected to be zero
+ attribute->SetAttributeValue(attribute->mapped_index(point_id_),
+ &val[0] + offset);
+ return *this;
+ }
+ // Additional operator taking std::vector as argument.
+ const Self &operator=(const std::vector<CoeffT> &val) {
+ for (auto index = 0; index < attributes_.size(); index++) {
+ AttributeTuple &att = attributes_[index];
+ PointAttribute *attribute = std::get<0>(att);
+ const uint32_t &offset = std::get<1>(att);
+ const uint32_t &data_size = std::get<3>(att);
+ const uint32_t &num_components = std::get<4>(att);
+ const uint32_t *data_source = val.data() + offset;
+ if (data_size < 4) { // handle uint16_t, uint8_t
+ // selectively copy data bytes
+ uint8_t *data_counter = data_;
+ for (uint32_t index = 0; index < num_components;
+ index += 1, data_counter += data_size) {
+ std::memcpy(data_counter, data_source + index, data_size);
+ }
+ // redirect to copied data
+ data_source = reinterpret_cast<uint32_t *>(data_);
+ }
+ const AttributeValueIndex avi = attribute->mapped_index(point_id_);
+ if (avi >= static_cast<uint32_t>(attribute->size())) {
+ return *this;
+ }
+ attribute->SetAttributeValue(avi, data_source);
+ }
+ return *this;
+ }
+
+ private:
+ // preallocated memory for buffering different data sizes. Never reallocated.
+ std::vector<uint8_t> memory_;
+ uint8_t *data_;
+ std::vector<AttributeTuple> attributes_;
+ PointIndex point_id_;
+
+ // NO COPY
+ PointAttributeVectorOutputIterator(
+ const PointAttributeVectorOutputIterator &that) = delete;
+ PointAttributeVectorOutputIterator &operator=(
+ PointAttributeVectorOutputIterator const &) = delete;
+};
+
+KdTreeAttributesDecoder::KdTreeAttributesDecoder() {}
+
+bool KdTreeAttributesDecoder::DecodePortableAttributes(
+ DecoderBuffer *in_buffer) {
+ if (in_buffer->bitstream_version() < DRACO_BITSTREAM_VERSION(2, 3)) {
+ // Old bitstream does everything in the
+ // DecodeDataNeededByPortableTransforms() method.
+ return true;
+ }
+ uint8_t compression_level = 0;
+ if (!in_buffer->Decode(&compression_level)) {
+ return false;
+ }
+ const int32_t num_points = GetDecoder()->point_cloud()->num_points();
+
+ // Decode data using the kd tree decoding into integer (portable) attributes.
+ // We first need to go over all attributes and create a new portable storage
+ // for those attributes that need it (floating point attributes that have to
+ // be dequantized after decoding).
+
+ const int num_attributes = GetNumAttributes();
+ uint32_t total_dimensionality = 0; // position is a required dimension
+ std::vector<AttributeTuple> atts(num_attributes);
+
+ for (int i = 0; i < GetNumAttributes(); ++i) {
+ const int att_id = GetAttributeId(i);
+ PointAttribute *const att = GetDecoder()->point_cloud()->attribute(att_id);
+ // All attributes have the same number of values and identity mapping
+ // between PointIndex and AttributeValueIndex.
+ att->Reset(num_points);
+ att->SetIdentityMapping();
+
+ PointAttribute *target_att = nullptr;
+ if (att->data_type() == DT_UINT32 || att->data_type() == DT_UINT16 ||
+ att->data_type() == DT_UINT8) {
+ // We can decode to these attributes directly.
+ target_att = att;
+ } else if (att->data_type() == DT_INT32 || att->data_type() == DT_INT16 ||
+ att->data_type() == DT_INT8) {
+ // Prepare storage for data that is used to convert unsigned values back
+ // to the signed ones.
+ for (int c = 0; c < att->num_components(); ++c) {
+ min_signed_values_.push_back(0);
+ }
+ target_att = att;
+ } else if (att->data_type() == DT_FLOAT32) {
+ // Create a portable attribute that will hold the decoded data. We will
+ // dequantize the decoded data to the final attribute later on.
+ const int num_components = att->num_components();
+ GeometryAttribute va;
+ va.Init(att->attribute_type(), nullptr, num_components, DT_UINT32, false,
+ num_components * DataTypeLength(DT_UINT32), 0);
+ std::unique_ptr<PointAttribute> port_att(new PointAttribute(va));
+ port_att->SetIdentityMapping();
+ port_att->Reset(num_points);
+ quantized_portable_attributes_.push_back(std::move(port_att));
+ target_att = quantized_portable_attributes_.back().get();
+ } else {
+ // Unsupported type.
+ return false;
+ }
+ // Add attribute to the output iterator used by the core algorithm.
+ const DataType data_type = target_att->data_type();
+ const uint32_t data_size = (std::max)(0, DataTypeLength(data_type));
+ const uint32_t num_components = target_att->num_components();
+ atts[i] = std::make_tuple(target_att, total_dimensionality, data_type,
+ data_size, num_components);
+ total_dimensionality += num_components;
+ }
+ PointAttributeVectorOutputIterator<uint32_t> out_it(atts);
+
+ switch (compression_level) {
+ case 0: {
+ DynamicIntegerPointsKdTreeDecoder<0> decoder(total_dimensionality);
+ if (!decoder.DecodePoints(in_buffer, out_it)) {
+ return false;
+ }
+ break;
+ }
+ case 1: {
+ DynamicIntegerPointsKdTreeDecoder<1> decoder(total_dimensionality);
+ if (!decoder.DecodePoints(in_buffer, out_it)) {
+ return false;
+ }
+ break;
+ }
+ case 2: {
+ DynamicIntegerPointsKdTreeDecoder<2> decoder(total_dimensionality);
+ if (!decoder.DecodePoints(in_buffer, out_it)) {
+ return false;
+ }
+ break;
+ }
+ case 3: {
+ DynamicIntegerPointsKdTreeDecoder<3> decoder(total_dimensionality);
+ if (!decoder.DecodePoints(in_buffer, out_it)) {
+ return false;
+ }
+ break;
+ }
+ case 4: {
+ DynamicIntegerPointsKdTreeDecoder<4> decoder(total_dimensionality);
+ if (!decoder.DecodePoints(in_buffer, out_it)) {
+ return false;
+ }
+ break;
+ }
+ case 5: {
+ DynamicIntegerPointsKdTreeDecoder<5> decoder(total_dimensionality);
+ if (!decoder.DecodePoints(in_buffer, out_it)) {
+ return false;
+ }
+ break;
+ }
+ case 6: {
+ DynamicIntegerPointsKdTreeDecoder<6> decoder(total_dimensionality);
+ if (!decoder.DecodePoints(in_buffer, out_it)) {
+ return false;
+ }
+ break;
+ }
+ default:
+ return false;
+ }
+ return true;
+}
+
+bool KdTreeAttributesDecoder::DecodeDataNeededByPortableTransforms(
+ DecoderBuffer *in_buffer) {
+ if (in_buffer->bitstream_version() >= DRACO_BITSTREAM_VERSION(2, 3)) {
+ // Decode quantization data for each attribute that need it.
+ // TODO(ostava): This should be moved to AttributeQuantizationTransform.
+ std::vector<float> min_value;
+ for (int i = 0; i < GetNumAttributes(); ++i) {
+ const int att_id = GetAttributeId(i);
+ const PointAttribute *const att =
+ GetDecoder()->point_cloud()->attribute(att_id);
+ if (att->data_type() == DT_FLOAT32) {
+ const int num_components = att->num_components();
+ min_value.resize(num_components);
+ if (!in_buffer->Decode(&min_value[0], sizeof(float) * num_components)) {
+ return false;
+ }
+ float max_value_dif;
+ if (!in_buffer->Decode(&max_value_dif)) {
+ return false;
+ }
+ uint8_t quantization_bits;
+ if (!in_buffer->Decode(&quantization_bits) || quantization_bits > 31) {
+ return false;
+ }
+ AttributeQuantizationTransform transform;
+ if (!transform.SetParameters(quantization_bits, min_value.data(),
+ num_components, max_value_dif)) {
+ return false;
+ }
+ const int num_transforms =
+ static_cast<int>(attribute_quantization_transforms_.size());
+ if (!transform.TransferToAttribute(
+ quantized_portable_attributes_[num_transforms].get())) {
+ return false;
+ }
+ attribute_quantization_transforms_.push_back(transform);
+ }
+ }
+
+ // Decode transform data for signed integer attributes.
+ for (int i = 0; i < min_signed_values_.size(); ++i) {
+ int32_t val;
+ if (!DecodeVarint(&val, in_buffer)) {
+ return false;
+ }
+ min_signed_values_[i] = val;
+ }
+ return true;
+ }
+#ifdef DRACO_BACKWARDS_COMPATIBILITY_SUPPORTED
+ // Handle old bitstream
+ // Figure out the total dimensionality of the point cloud
+ const uint32_t attribute_count = GetNumAttributes();
+ uint32_t total_dimensionality = 0; // position is a required dimension
+ std::vector<AttributeTuple> atts(attribute_count);
+ for (auto attribute_index = 0;
+ static_cast<uint32_t>(attribute_index) < attribute_count;
+ attribute_index += 1) // increment the dimensionality as needed...
+ {
+ const int att_id = GetAttributeId(attribute_index);
+ PointAttribute *const att = GetDecoder()->point_cloud()->attribute(att_id);
+ const DataType data_type = att->data_type();
+ const uint32_t data_size = (std::max)(0, DataTypeLength(data_type));
+ const uint32_t num_components = att->num_components();
+ if (data_size > 4) {
+ return false;
+ }
+
+ atts[attribute_index] = std::make_tuple(
+ att, total_dimensionality, data_type, data_size, num_components);
+ // everything is treated as 32bit in the encoder.
+ total_dimensionality += num_components;
+ }
+
+ const int att_id = GetAttributeId(0);
+ PointAttribute *const att = GetDecoder()->point_cloud()->attribute(att_id);
+ att->SetIdentityMapping();
+ // Decode method
+ uint8_t method;
+ if (!in_buffer->Decode(&method)) {
+ return false;
+ }
+ if (method == KdTreeAttributesEncodingMethod::kKdTreeQuantizationEncoding) {
+ uint8_t compression_level = 0;
+ if (!in_buffer->Decode(&compression_level)) {
+ return false;
+ }
+ uint32_t num_points = 0;
+ if (!in_buffer->Decode(&num_points)) {
+ return false;
+ }
+ att->Reset(num_points);
+ FloatPointsTreeDecoder decoder;
+ decoder.set_num_points_from_header(num_points);
+ PointAttributeVectorOutputIterator<float> out_it(atts);
+ if (!decoder.DecodePointCloud(in_buffer, out_it)) {
+ return false;
+ }
+ } else if (method == KdTreeAttributesEncodingMethod::kKdTreeIntegerEncoding) {
+ uint8_t compression_level = 0;
+ if (!in_buffer->Decode(&compression_level)) {
+ return false;
+ }
+ if (6 < compression_level) {
+ DRACO_LOGE(
+ "KdTreeAttributesDecoder: compression level %i not supported.\n",
+ compression_level);
+ return false;
+ }
+
+ uint32_t num_points;
+ if (!in_buffer->Decode(&num_points)) {
+ return false;
+ }
+
+ for (auto attribute_index = 0;
+ static_cast<uint32_t>(attribute_index) < attribute_count;
+ attribute_index += 1) {
+ const int att_id = GetAttributeId(attribute_index);
+ PointAttribute *const attr =
+ GetDecoder()->point_cloud()->attribute(att_id);
+ attr->Reset(num_points);
+ attr->SetIdentityMapping();
+ };
+
+ PointAttributeVectorOutputIterator<uint32_t> out_it(atts);
+
+ switch (compression_level) {
+ case 0: {
+ DynamicIntegerPointsKdTreeDecoder<0> decoder(total_dimensionality);
+ if (!decoder.DecodePoints(in_buffer, out_it)) {
+ return false;
+ }
+ break;
+ }
+ case 1: {
+ DynamicIntegerPointsKdTreeDecoder<1> decoder(total_dimensionality);
+ if (!decoder.DecodePoints(in_buffer, out_it)) {
+ return false;
+ }
+ break;
+ }
+ case 2: {
+ DynamicIntegerPointsKdTreeDecoder<2> decoder(total_dimensionality);
+ if (!decoder.DecodePoints(in_buffer, out_it)) {
+ return false;
+ }
+ break;
+ }
+ case 3: {
+ DynamicIntegerPointsKdTreeDecoder<3> decoder(total_dimensionality);
+ if (!decoder.DecodePoints(in_buffer, out_it)) {
+ return false;
+ }
+ break;
+ }
+ case 4: {
+ DynamicIntegerPointsKdTreeDecoder<4> decoder(total_dimensionality);
+ if (!decoder.DecodePoints(in_buffer, out_it)) {
+ return false;
+ }
+ break;
+ }
+ case 5: {
+ DynamicIntegerPointsKdTreeDecoder<5> decoder(total_dimensionality);
+ if (!decoder.DecodePoints(in_buffer, out_it)) {
+ return false;
+ }
+ break;
+ }
+ case 6: {
+ DynamicIntegerPointsKdTreeDecoder<6> decoder(total_dimensionality);
+ if (!decoder.DecodePoints(in_buffer, out_it)) {
+ return false;
+ }
+ break;
+ }
+ default:
+ return false;
+ }
+ } else {
+ // Invalid method.
+ return false;
+ }
+ return true;
+#else
+ return false;
+#endif
+}
+
+template <typename SignedDataTypeT>
+bool KdTreeAttributesDecoder::TransformAttributeBackToSignedType(
+ PointAttribute *att, int num_processed_signed_components) {
+ typedef typename std::make_unsigned<SignedDataTypeT>::type UnsignedType;
+ std::vector<UnsignedType> unsigned_val(att->num_components());
+ std::vector<SignedDataTypeT> signed_val(att->num_components());
+
+ for (AttributeValueIndex avi(0); avi < static_cast<uint32_t>(att->size());
+ ++avi) {
+ att->GetValue(avi, &unsigned_val[0]);
+ for (int c = 0; c < att->num_components(); ++c) {
+ // Up-cast |unsigned_val| to int32_t to ensure we don't overflow it for
+ // smaller data types.
+ signed_val[c] = static_cast<SignedDataTypeT>(
+ static_cast<int32_t>(unsigned_val[c]) +
+ min_signed_values_[num_processed_signed_components + c]);
+ }
+ att->SetAttributeValue(avi, &signed_val[0]);
+ }
+ return true;
+}
+
+bool KdTreeAttributesDecoder::TransformAttributesToOriginalFormat() {
+ if (quantized_portable_attributes_.empty() && min_signed_values_.empty()) {
+ return true;
+ }
+ int num_processed_quantized_attributes = 0;
+ int num_processed_signed_components = 0;
+ // Dequantize attributes that needed it.
+ for (int i = 0; i < GetNumAttributes(); ++i) {
+ const int att_id = GetAttributeId(i);
+ PointAttribute *const att = GetDecoder()->point_cloud()->attribute(att_id);
+ if (att->data_type() == DT_INT32 || att->data_type() == DT_INT16 ||
+ att->data_type() == DT_INT8) {
+ std::vector<uint32_t> unsigned_val(att->num_components());
+ std::vector<int32_t> signed_val(att->num_components());
+ // Values are stored as unsigned in the attribute, make them signed again.
+ if (att->data_type() == DT_INT32) {
+ if (!TransformAttributeBackToSignedType<int32_t>(
+ att, num_processed_signed_components)) {
+ return false;
+ }
+ } else if (att->data_type() == DT_INT16) {
+ if (!TransformAttributeBackToSignedType<int16_t>(
+ att, num_processed_signed_components)) {
+ return false;
+ }
+ } else if (att->data_type() == DT_INT8) {
+ if (!TransformAttributeBackToSignedType<int8_t>(
+ att, num_processed_signed_components)) {
+ return false;
+ }
+ }
+ num_processed_signed_components += att->num_components();
+ } else if (att->data_type() == DT_FLOAT32) {
+ // TODO(ostava): This code should be probably moved out to attribute
+ // transform and shared with the SequentialQuantizationAttributeDecoder.
+
+ const PointAttribute *const src_att =
+ quantized_portable_attributes_[num_processed_quantized_attributes]
+ .get();
+
+ const AttributeQuantizationTransform &transform =
+ attribute_quantization_transforms_
+ [num_processed_quantized_attributes];
+
+ num_processed_quantized_attributes++;
+
+ if (GetDecoder()->options()->GetAttributeBool(
+ att->attribute_type(), "skip_attribute_transform", false)) {
+ // Attribute transform should not be performed. In this case, we replace
+ // the output geometry attribute with the portable attribute.
+ // TODO(ostava): We can potentially avoid this copy by introducing a new
+ // mechanism that would allow to use the final attributes as portable
+ // attributes for predictors that may need them.
+ att->CopyFrom(*src_att);
+ continue;
+ }
+
+ // Convert all quantized values back to floats.
+ const int32_t max_quantized_value =
+ (1u << static_cast<uint32_t>(transform.quantization_bits())) - 1;
+ const int num_components = att->num_components();
+ const int entry_size = sizeof(float) * num_components;
+ const std::unique_ptr<float[]> att_val(new float[num_components]);
+ int quant_val_id = 0;
+ int out_byte_pos = 0;
+ Dequantizer dequantizer;
+ if (!dequantizer.Init(transform.range(), max_quantized_value)) {
+ return false;
+ }
+ const uint32_t *const portable_attribute_data =
+ reinterpret_cast<const uint32_t *>(
+ src_att->GetAddress(AttributeValueIndex(0)));
+ for (uint32_t i = 0; i < src_att->size(); ++i) {
+ for (int c = 0; c < num_components; ++c) {
+ float value = dequantizer.DequantizeFloat(
+ portable_attribute_data[quant_val_id++]);
+ value = value + transform.min_value(c);
+ att_val[c] = value;
+ }
+ // Store the floating point value into the attribute buffer.
+ att->buffer()->Write(out_byte_pos, att_val.get(), entry_size);
+ out_byte_pos += entry_size;
+ }
+ }
+ }
+ return true;
+}
+
+} // namespace draco
diff --git a/libs/assimp/contrib/draco/src/draco/compression/attributes/kd_tree_attributes_decoder.h b/libs/assimp/contrib/draco/src/draco/compression/attributes/kd_tree_attributes_decoder.h
new file mode 100644
index 0000000..87338d6
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/attributes/kd_tree_attributes_decoder.h
@@ -0,0 +1,46 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_COMPRESSION_ATTRIBUTES_KD_TREE_ATTRIBUTES_DECODER_H_
+#define DRACO_COMPRESSION_ATTRIBUTES_KD_TREE_ATTRIBUTES_DECODER_H_
+
+#include "draco/attributes/attribute_quantization_transform.h"
+#include "draco/compression/attributes/attributes_decoder.h"
+
+namespace draco {
+
+// Decodes attributes encoded with the KdTreeAttributesEncoder.
+class KdTreeAttributesDecoder : public AttributesDecoder {
+ public:
+ KdTreeAttributesDecoder();
+
+ protected:
+ bool DecodePortableAttributes(DecoderBuffer *in_buffer) override;
+ bool DecodeDataNeededByPortableTransforms(DecoderBuffer *in_buffer) override;
+ bool TransformAttributesToOriginalFormat() override;
+
+ private:
+ template <typename SignedDataTypeT>
+ bool TransformAttributeBackToSignedType(PointAttribute *att,
+ int num_processed_signed_components);
+
+ std::vector<AttributeQuantizationTransform>
+ attribute_quantization_transforms_;
+ std::vector<int32_t> min_signed_values_;
+ std::vector<std::unique_ptr<PointAttribute>> quantized_portable_attributes_;
+};
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_ATTRIBUTES_KD_TREE_ATTRIBUTES_DECODER_H_
diff --git a/libs/assimp/contrib/draco/src/draco/compression/attributes/kd_tree_attributes_encoder.cc b/libs/assimp/contrib/draco/src/draco/compression/attributes/kd_tree_attributes_encoder.cc
new file mode 100644
index 0000000..b70deb9
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/attributes/kd_tree_attributes_encoder.cc
@@ -0,0 +1,305 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/compression/attributes/kd_tree_attributes_encoder.h"
+
+#include "draco/compression/attributes/kd_tree_attributes_shared.h"
+#include "draco/compression/attributes/point_d_vector.h"
+#include "draco/compression/point_cloud/algorithms/dynamic_integer_points_kd_tree_encoder.h"
+#include "draco/compression/point_cloud/algorithms/float_points_tree_encoder.h"
+#include "draco/compression/point_cloud/point_cloud_encoder.h"
+#include "draco/core/varint_encoding.h"
+
+namespace draco {
+
+KdTreeAttributesEncoder::KdTreeAttributesEncoder() : num_components_(0) {}
+
+KdTreeAttributesEncoder::KdTreeAttributesEncoder(int att_id)
+ : AttributesEncoder(att_id), num_components_(0) {}
+
+bool KdTreeAttributesEncoder::TransformAttributesToPortableFormat() {
+ // Convert any of the input attributes into a format that can be processed by
+ // the kd tree encoder (quantization of floating attributes for now).
+ const size_t num_points = encoder()->point_cloud()->num_points();
+ int num_components = 0;
+ for (uint32_t i = 0; i < num_attributes(); ++i) {
+ const int att_id = GetAttributeId(i);
+ const PointAttribute *const att =
+ encoder()->point_cloud()->attribute(att_id);
+ num_components += att->num_components();
+ }
+ num_components_ = num_components;
+
+ // Go over all attributes and quantize them if needed.
+ for (uint32_t i = 0; i < num_attributes(); ++i) {
+ const int att_id = GetAttributeId(i);
+ const PointAttribute *const att =
+ encoder()->point_cloud()->attribute(att_id);
+ if (att->data_type() == DT_FLOAT32) {
+ // Quantization path.
+ AttributeQuantizationTransform attribute_quantization_transform;
+ const int quantization_bits = encoder()->options()->GetAttributeInt(
+ att_id, "quantization_bits", -1);
+ if (quantization_bits < 1) {
+ return false;
+ }
+ if (encoder()->options()->IsAttributeOptionSet(att_id,
+ "quantization_origin") &&
+ encoder()->options()->IsAttributeOptionSet(att_id,
+ "quantization_range")) {
+ // Quantization settings are explicitly specified in the provided
+ // options.
+ std::vector<float> quantization_origin(att->num_components());
+ encoder()->options()->GetAttributeVector(att_id, "quantization_origin",
+ att->num_components(),
+ &quantization_origin[0]);
+ const float range = encoder()->options()->GetAttributeFloat(
+ att_id, "quantization_range", 1.f);
+ attribute_quantization_transform.SetParameters(
+ quantization_bits, quantization_origin.data(),
+ att->num_components(), range);
+ } else {
+ // Compute quantization settings from the attribute values.
+ if (!attribute_quantization_transform.ComputeParameters(
+ *att, quantization_bits)) {
+ return false;
+ }
+ }
+ attribute_quantization_transforms_.push_back(
+ attribute_quantization_transform);
+ // Store the quantized attribute in an array that will be used when we do
+ // the actual encoding of the data.
+ auto portable_att =
+ attribute_quantization_transform.InitTransformedAttribute(*att,
+ num_points);
+ attribute_quantization_transform.TransformAttribute(*att, {},
+ portable_att.get());
+ quantized_portable_attributes_.push_back(std::move(portable_att));
+ } else if (att->data_type() == DT_INT32 || att->data_type() == DT_INT16 ||
+ att->data_type() == DT_INT8) {
+ // For signed types, find the minimum value for each component. These
+ // values are going to be used to transform the attribute values to
+ // unsigned integers that can be processed by the core kd tree algorithm.
+ std::vector<int32_t> min_value(att->num_components(),
+ std::numeric_limits<int32_t>::max());
+ std::vector<int32_t> act_value(att->num_components());
+ for (AttributeValueIndex avi(0); avi < static_cast<uint32_t>(att->size());
+ ++avi) {
+ att->ConvertValue<int32_t>(avi, &act_value[0]);
+ for (int c = 0; c < att->num_components(); ++c) {
+ if (min_value[c] > act_value[c]) {
+ min_value[c] = act_value[c];
+ }
+ }
+ }
+ for (int c = 0; c < att->num_components(); ++c) {
+ min_signed_values_.push_back(min_value[c]);
+ }
+ }
+ }
+ return true;
+}
+
+bool KdTreeAttributesEncoder::EncodeDataNeededByPortableTransforms(
+ EncoderBuffer *out_buffer) {
+ // Store quantization settings for all attributes that need it.
+ for (int i = 0; i < attribute_quantization_transforms_.size(); ++i) {
+ attribute_quantization_transforms_[i].EncodeParameters(out_buffer);
+ }
+
+ // Encode data needed for transforming signed integers to unsigned ones.
+ for (int i = 0; i < min_signed_values_.size(); ++i) {
+ EncodeVarint<int32_t>(min_signed_values_[i], out_buffer);
+ }
+ return true;
+}
+
+bool KdTreeAttributesEncoder::EncodePortableAttributes(
+ EncoderBuffer *out_buffer) {
+ // Encode the data using the kd tree encoder algorithm. The data is first
+ // copied to a PointDVector that provides all the API expected by the core
+ // encoding algorithm.
+
+ // We limit the maximum value of compression_level to 6 as we don't currently
+ // have viable algorithms for higher compression levels.
+ uint8_t compression_level =
+ std::min(10 - encoder()->options()->GetSpeed(), 6);
+ DRACO_DCHECK_LE(compression_level, 6);
+
+ if (compression_level == 6 && num_components_ > 15) {
+ // Don't use compression level for CL >= 6. Axis selection is currently
+ // encoded using 4 bits.
+ compression_level = 5;
+ }
+
+ out_buffer->Encode(compression_level);
+
+ // Init PointDVector. The number of dimensions is equal to the total number
+ // of dimensions across all attributes.
+ const int num_points = encoder()->point_cloud()->num_points();
+ PointDVector<uint32_t> point_vector(num_points, num_components_);
+
+ int num_processed_components = 0;
+ int num_processed_quantized_attributes = 0;
+ int num_processed_signed_components = 0;
+ // Copy data to the point vector.
+ for (uint32_t i = 0; i < num_attributes(); ++i) {
+ const int att_id = GetAttributeId(i);
+ const PointAttribute *const att =
+ encoder()->point_cloud()->attribute(att_id);
+ const PointAttribute *source_att = nullptr;
+ if (att->data_type() == DT_UINT32 || att->data_type() == DT_UINT16 ||
+ att->data_type() == DT_UINT8 || att->data_type() == DT_INT32 ||
+ att->data_type() == DT_INT16 || att->data_type() == DT_INT8) {
+ // Use the original attribute.
+ source_att = att;
+ } else if (att->data_type() == DT_FLOAT32) {
+ // Use the portable (quantized) attribute instead.
+ source_att =
+ quantized_portable_attributes_[num_processed_quantized_attributes]
+ .get();
+ num_processed_quantized_attributes++;
+ } else {
+ // Unsupported data type.
+ return false;
+ }
+
+ if (source_att == nullptr) {
+ return false;
+ }
+
+ // Copy source_att to the vector.
+ if (source_att->data_type() == DT_UINT32) {
+ // If the data type is the same as the one used by the point vector, we
+ // can directly copy individual elements.
+ for (PointIndex pi(0); pi < num_points; ++pi) {
+ const AttributeValueIndex avi = source_att->mapped_index(pi);
+ const uint8_t *const att_value_address = source_att->GetAddress(avi);
+ point_vector.CopyAttribute(source_att->num_components(),
+ num_processed_components, pi.value(),
+ att_value_address);
+ }
+ } else if (source_att->data_type() == DT_INT32 ||
+ source_att->data_type() == DT_INT16 ||
+ source_att->data_type() == DT_INT8) {
+ // Signed values need to be converted to unsigned before they are stored
+ // in the point vector.
+ std::vector<int32_t> signed_point(source_att->num_components());
+ std::vector<uint32_t> unsigned_point(source_att->num_components());
+ for (PointIndex pi(0); pi < num_points; ++pi) {
+ const AttributeValueIndex avi = source_att->mapped_index(pi);
+ source_att->ConvertValue<int32_t>(avi, &signed_point[0]);
+ for (int c = 0; c < source_att->num_components(); ++c) {
+ unsigned_point[c] =
+ signed_point[c] -
+ min_signed_values_[num_processed_signed_components + c];
+ }
+
+ point_vector.CopyAttribute(source_att->num_components(),
+ num_processed_components, pi.value(),
+ &unsigned_point[0]);
+ }
+ num_processed_signed_components += source_att->num_components();
+ } else {
+ // If the data type of the attribute is different, we have to convert the
+ // value before we put it to the point vector.
+ std::vector<uint32_t> point(source_att->num_components());
+ for (PointIndex pi(0); pi < num_points; ++pi) {
+ const AttributeValueIndex avi = source_att->mapped_index(pi);
+ source_att->ConvertValue<uint32_t>(avi, &point[0]);
+ point_vector.CopyAttribute(source_att->num_components(),
+ num_processed_components, pi.value(),
+ point.data());
+ }
+ }
+ num_processed_components += source_att->num_components();
+ }
+
+ // Compute the maximum bit length needed for the kd tree encoding.
+ int num_bits = 0;
+ const uint32_t *data = point_vector[0];
+ for (int i = 0; i < num_points * num_components_; ++i) {
+ if (data[i] > 0) {
+ const int msb = MostSignificantBit(data[i]) + 1;
+ if (msb > num_bits) {
+ num_bits = msb;
+ }
+ }
+ }
+
+ switch (compression_level) {
+ case 6: {
+ DynamicIntegerPointsKdTreeEncoder<6> points_encoder(num_components_);
+ if (!points_encoder.EncodePoints(point_vector.begin(), point_vector.end(),
+ num_bits, out_buffer)) {
+ return false;
+ }
+ break;
+ }
+ case 5: {
+ DynamicIntegerPointsKdTreeEncoder<5> points_encoder(num_components_);
+ if (!points_encoder.EncodePoints(point_vector.begin(), point_vector.end(),
+ num_bits, out_buffer)) {
+ return false;
+ }
+ break;
+ }
+ case 4: {
+ DynamicIntegerPointsKdTreeEncoder<4> points_encoder(num_components_);
+ if (!points_encoder.EncodePoints(point_vector.begin(), point_vector.end(),
+ num_bits, out_buffer)) {
+ return false;
+ }
+ break;
+ }
+ case 3: {
+ DynamicIntegerPointsKdTreeEncoder<3> points_encoder(num_components_);
+ if (!points_encoder.EncodePoints(point_vector.begin(), point_vector.end(),
+ num_bits, out_buffer)) {
+ return false;
+ }
+ break;
+ }
+ case 2: {
+ DynamicIntegerPointsKdTreeEncoder<2> points_encoder(num_components_);
+ if (!points_encoder.EncodePoints(point_vector.begin(), point_vector.end(),
+ num_bits, out_buffer)) {
+ return false;
+ }
+ break;
+ }
+ case 1: {
+ DynamicIntegerPointsKdTreeEncoder<1> points_encoder(num_components_);
+ if (!points_encoder.EncodePoints(point_vector.begin(), point_vector.end(),
+ num_bits, out_buffer)) {
+ return false;
+ }
+ break;
+ }
+ case 0: {
+ DynamicIntegerPointsKdTreeEncoder<0> points_encoder(num_components_);
+ if (!points_encoder.EncodePoints(point_vector.begin(), point_vector.end(),
+ num_bits, out_buffer)) {
+ return false;
+ }
+ break;
+ }
+ // Compression level and/or encoding speed seem wrong.
+ default:
+ return false;
+ }
+ return true;
+}
+
+} // namespace draco
diff --git a/libs/assimp/contrib/draco/src/draco/compression/attributes/kd_tree_attributes_encoder.h b/libs/assimp/contrib/draco/src/draco/compression/attributes/kd_tree_attributes_encoder.h
new file mode 100644
index 0000000..80748e0
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/attributes/kd_tree_attributes_encoder.h
@@ -0,0 +1,51 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_COMPRESSION_ATTRIBUTES_KD_TREE_ATTRIBUTES_ENCODER_H_
+#define DRACO_COMPRESSION_ATTRIBUTES_KD_TREE_ATTRIBUTES_ENCODER_H_
+
+#include "draco/attributes/attribute_quantization_transform.h"
+#include "draco/compression/attributes/attributes_encoder.h"
+#include "draco/compression/config/compression_shared.h"
+
+namespace draco {
+
+// Encodes all attributes of a given PointCloud using one of the available
+// Kd-tree compression methods.
+// See compression/point_cloud/point_cloud_kd_tree_encoder.h for more details.
+class KdTreeAttributesEncoder : public AttributesEncoder {
+ public:
+ KdTreeAttributesEncoder();
+ explicit KdTreeAttributesEncoder(int att_id);
+
+ uint8_t GetUniqueId() const override { return KD_TREE_ATTRIBUTE_ENCODER; }
+
+ protected:
+ bool TransformAttributesToPortableFormat() override;
+ bool EncodePortableAttributes(EncoderBuffer *out_buffer) override;
+ bool EncodeDataNeededByPortableTransforms(EncoderBuffer *out_buffer) override;
+
+ private:
+ std::vector<AttributeQuantizationTransform>
+ attribute_quantization_transforms_;
+ // Min signed values are used to transform signed integers into unsigned ones
+ // (by subtracting the min signed value for each component).
+ std::vector<int32_t> min_signed_values_;
+ std::vector<std::unique_ptr<PointAttribute>> quantized_portable_attributes_;
+ int num_components_;
+};
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_ATTRIBUTES_KD_TREE_ATTRIBUTES_ENCODER_H_
diff --git a/libs/assimp/contrib/draco/src/draco/compression/attributes/kd_tree_attributes_shared.h b/libs/assimp/contrib/draco/src/draco/compression/attributes/kd_tree_attributes_shared.h
new file mode 100644
index 0000000..94841a9
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/attributes/kd_tree_attributes_shared.h
@@ -0,0 +1,28 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_COMPRESSION_ATTRIBUTES_KD_TREE_ATTRIBUTES_SHARED_H_
+#define DRACO_COMPRESSION_ATTRIBUTES_KD_TREE_ATTRIBUTES_SHARED_H_
+
+namespace draco {
+
+// Defines types of kD-tree compression
+enum KdTreeAttributesEncodingMethod {
+ kKdTreeQuantizationEncoding = 0,
+ kKdTreeIntegerEncoding
+};
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_ATTRIBUTES_KD_TREE_ATTRIBUTES_SHARED_H_
diff --git a/libs/assimp/contrib/draco/src/draco/compression/attributes/linear_sequencer.h b/libs/assimp/contrib/draco/src/draco/compression/attributes/linear_sequencer.h
new file mode 100644
index 0000000..7d9b526
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/attributes/linear_sequencer.h
@@ -0,0 +1,51 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_COMPRESSION_ATTRIBUTES_LINEAR_SEQUENCER_H_
+#define DRACO_COMPRESSION_ATTRIBUTES_LINEAR_SEQUENCER_H_
+
+#include "draco/compression/attributes/points_sequencer.h"
+
+namespace draco {
+
+// A simple sequencer that generates a linear sequence [0, num_points - 1].
+// I.e., the order of the points is preserved for the input data.
+class LinearSequencer : public PointsSequencer {
+ public:
+ explicit LinearSequencer(int32_t num_points) : num_points_(num_points) {}
+
+ bool UpdatePointToAttributeIndexMapping(PointAttribute *attribute) override {
+ attribute->SetIdentityMapping();
+ return true;
+ }
+
+ protected:
+ bool GenerateSequenceInternal() override {
+ if (num_points_ < 0) {
+ return false;
+ }
+ out_point_ids()->resize(num_points_);
+ for (int i = 0; i < num_points_; ++i) {
+ out_point_ids()->at(i) = PointIndex(i);
+ }
+ return true;
+ }
+
+ private:
+ int32_t num_points_;
+};
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_ATTRIBUTES_LINEAR_SEQUENCER_H_
diff --git a/libs/assimp/contrib/draco/src/draco/compression/attributes/mesh_attribute_indices_encoding_data.h b/libs/assimp/contrib/draco/src/draco/compression/attributes/mesh_attribute_indices_encoding_data.h
new file mode 100644
index 0000000..9a358e4
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/attributes/mesh_attribute_indices_encoding_data.h
@@ -0,0 +1,58 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_COMPRESSION_ATTRIBUTES_MESH_ATTRIBUTE_INDICES_ENCODING_DATA_H_
+#define DRACO_COMPRESSION_ATTRIBUTES_MESH_ATTRIBUTE_INDICES_ENCODING_DATA_H_
+
+#include <inttypes.h>
+
+#include <vector>
+
+#include "draco/attributes/geometry_indices.h"
+
+namespace draco {
+
+// Data used for encoding and decoding of mesh attributes.
+struct MeshAttributeIndicesEncodingData {
+ MeshAttributeIndicesEncodingData() : num_values(0) {}
+
+ void Init(int num_vertices) {
+ vertex_to_encoded_attribute_value_index_map.resize(num_vertices);
+
+ // We expect to store one value for each vertex.
+ encoded_attribute_value_index_to_corner_map.reserve(num_vertices);
+ }
+
+ // Array for storing the corner ids in the order their associated attribute
+ // entries were encoded/decoded. For every encoded attribute value entry we
+ // store exactly one corner. I.e., this is the mapping between an encoded
+ // attribute entry ids and corner ids. This map is needed for example by
+ // prediction schemes. Note that not all corners are included in this map,
+ // e.g., if multiple corners share the same attribute value, only one of these
+ // corners will be usually included.
+ std::vector<CornerIndex> encoded_attribute_value_index_to_corner_map;
+
+ // Map for storing encoding order of attribute entries for each vertex.
+ // i.e. Mapping between vertices and their corresponding attribute entry ids
+ // that are going to be used by the decoder.
+ // -1 if an attribute entry hasn't been encoded/decoded yet.
+ std::vector<int32_t> vertex_to_encoded_attribute_value_index_map;
+
+ // Total number of encoded/decoded attribute entries.
+ int num_values;
+};
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_ATTRIBUTES_MESH_ATTRIBUTE_INDICES_ENCODING_DATA_H_
diff --git a/libs/assimp/contrib/draco/src/draco/compression/attributes/normal_compression_utils.h b/libs/assimp/contrib/draco/src/draco/compression/attributes/normal_compression_utils.h
new file mode 100644
index 0000000..8a6f25b
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/attributes/normal_compression_utils.h
@@ -0,0 +1,360 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Utilities for converting unit vectors to octahedral coordinates and back.
+// For more details about octahedral coordinates, see for example Cigolle
+// et al.'14 “A Survey of Efficient Representations for Independent Unit
+// Vectors”.
+//
+// In short this is motivated by an octahedron inscribed into a sphere. The
+// direction of the normal vector can be defined by a point on the octahedron.
+// On the right hemisphere (x > 0) this point is projected onto the x = 0 plane,
+// that is, the right side of the octahedron forms a diamond like shape. The
+// left side of the octahedron is also projected onto the x = 0 plane, however,
+// in this case we flap the triangles of the diamond outward. Afterwards we
+// shift the resulting square such that all values are positive.
+//
+// Important values in this file:
+// * q: number of quantization bits
+// * max_quantized_value: the max value representable with q bits (odd)
+// * max_value: max value of the diamond = max_quantized_value - 1 (even)
+// * center_value: center of the diamond after shift
+//
+// Note that the parameter space is somewhat periodic, e.g. (0, 0) ==
+// (max_value, max_value), which is also why the diamond is one smaller than the
+// maximal representable value in order to have an odd range of values.
+
+#ifndef DRACO_COMPRESSION_ATTRIBUTES_NORMAL_COMPRESSION_UTILS_H_
+#define DRACO_COMPRESSION_ATTRIBUTES_NORMAL_COMPRESSION_UTILS_H_
+
+#include <inttypes.h>
+
+#include <algorithm>
+#include <cmath>
+
+#include "draco/core/macros.h"
+
+namespace draco {
+
+class OctahedronToolBox {
+ public:
+ OctahedronToolBox()
+ : quantization_bits_(-1),
+ max_quantized_value_(-1),
+ max_value_(-1),
+ dequantization_scale_(1.f),
+ center_value_(-1) {}
+
+ bool SetQuantizationBits(int32_t q) {
+ if (q < 2 || q > 30) {
+ return false;
+ }
+ quantization_bits_ = q;
+ max_quantized_value_ = (1 << quantization_bits_) - 1;
+ max_value_ = max_quantized_value_ - 1;
+ dequantization_scale_ = 2.f / max_value_;
+ center_value_ = max_value_ / 2;
+ return true;
+ }
+ bool IsInitialized() const { return quantization_bits_ != -1; }
+
+ // Convert all edge points in the top left and bottom right quadrants to
+ // their corresponding position in the bottom left and top right quadrants.
+ // Convert all corner edge points to the top right corner.
+ inline void CanonicalizeOctahedralCoords(int32_t s, int32_t t, int32_t *out_s,
+ int32_t *out_t) const {
+ if ((s == 0 && t == 0) || (s == 0 && t == max_value_) ||
+ (s == max_value_ && t == 0)) {
+ s = max_value_;
+ t = max_value_;
+ } else if (s == 0 && t > center_value_) {
+ t = center_value_ - (t - center_value_);
+ } else if (s == max_value_ && t < center_value_) {
+ t = center_value_ + (center_value_ - t);
+ } else if (t == max_value_ && s < center_value_) {
+ s = center_value_ + (center_value_ - s);
+ } else if (t == 0 && s > center_value_) {
+ s = center_value_ - (s - center_value_);
+ }
+
+ *out_s = s;
+ *out_t = t;
+ }
+
+ // Converts an integer vector to octahedral coordinates.
+ // Precondition: |int_vec| abs sum must equal center value.
+ inline void IntegerVectorToQuantizedOctahedralCoords(const int32_t *int_vec,
+ int32_t *out_s,
+ int32_t *out_t) const {
+ DRACO_DCHECK_EQ(
+ std::abs(int_vec[0]) + std::abs(int_vec[1]) + std::abs(int_vec[2]),
+ center_value_);
+ int32_t s, t;
+ if (int_vec[0] >= 0) {
+ // Right hemisphere.
+ s = (int_vec[1] + center_value_);
+ t = (int_vec[2] + center_value_);
+ } else {
+ // Left hemisphere.
+ if (int_vec[1] < 0) {
+ s = std::abs(int_vec[2]);
+ } else {
+ s = (max_value_ - std::abs(int_vec[2]));
+ }
+ if (int_vec[2] < 0) {
+ t = std::abs(int_vec[1]);
+ } else {
+ t = (max_value_ - std::abs(int_vec[1]));
+ }
+ }
+ CanonicalizeOctahedralCoords(s, t, out_s, out_t);
+ }
+
+ template <class T>
+ void FloatVectorToQuantizedOctahedralCoords(const T *vector, int32_t *out_s,
+ int32_t *out_t) const {
+ const double abs_sum = std::abs(static_cast<double>(vector[0])) +
+ std::abs(static_cast<double>(vector[1])) +
+ std::abs(static_cast<double>(vector[2]));
+
+ // Adjust values such that abs sum equals 1.
+ double scaled_vector[3];
+ if (abs_sum > 1e-6) {
+ // Scale needed to project the vector to the surface of an octahedron.
+ const double scale = 1.0 / abs_sum;
+ scaled_vector[0] = vector[0] * scale;
+ scaled_vector[1] = vector[1] * scale;
+ scaled_vector[2] = vector[2] * scale;
+ } else {
+ scaled_vector[0] = 1.0;
+ scaled_vector[1] = 0;
+ scaled_vector[2] = 0;
+ }
+
+ // Scale vector such that the sum equals the center value.
+ int32_t int_vec[3];
+ int_vec[0] =
+ static_cast<int32_t>(floor(scaled_vector[0] * center_value_ + 0.5));
+ int_vec[1] =
+ static_cast<int32_t>(floor(scaled_vector[1] * center_value_ + 0.5));
+ // Make sure the sum is exactly the center value.
+ int_vec[2] = center_value_ - std::abs(int_vec[0]) - std::abs(int_vec[1]);
+ if (int_vec[2] < 0) {
+ // If the sum of first two coordinates is too large, we need to decrease
+ // the length of one of the coordinates.
+ if (int_vec[1] > 0) {
+ int_vec[1] += int_vec[2];
+ } else {
+ int_vec[1] -= int_vec[2];
+ }
+ int_vec[2] = 0;
+ }
+ // Take care of the sign.
+ if (scaled_vector[2] < 0) {
+ int_vec[2] *= -1;
+ }
+
+ IntegerVectorToQuantizedOctahedralCoords(int_vec, out_s, out_t);
+ }
+
+ // Normalize |vec| such that its abs sum is equal to the center value;
+ template <class T>
+ void CanonicalizeIntegerVector(T *vec) const {
+ static_assert(std::is_integral<T>::value, "T must be an integral type.");
+ static_assert(std::is_signed<T>::value, "T must be a signed type.");
+ const int64_t abs_sum = static_cast<int64_t>(std::abs(vec[0])) +
+ static_cast<int64_t>(std::abs(vec[1])) +
+ static_cast<int64_t>(std::abs(vec[2]));
+
+ if (abs_sum == 0) {
+ vec[0] = center_value_; // vec[1] == v[2] == 0
+ } else {
+ vec[0] =
+ (static_cast<int64_t>(vec[0]) * static_cast<int64_t>(center_value_)) /
+ abs_sum;
+ vec[1] =
+ (static_cast<int64_t>(vec[1]) * static_cast<int64_t>(center_value_)) /
+ abs_sum;
+ if (vec[2] >= 0) {
+ vec[2] = center_value_ - std::abs(vec[0]) - std::abs(vec[1]);
+ } else {
+ vec[2] = -(center_value_ - std::abs(vec[0]) - std::abs(vec[1]));
+ }
+ }
+ }
+
+ inline void QuantizedOctahedralCoordsToUnitVector(int32_t in_s, int32_t in_t,
+ float *out_vector) const {
+ OctahedralCoordsToUnitVector(in_s * dequantization_scale_ - 1.f,
+ in_t * dequantization_scale_ - 1.f,
+ out_vector);
+ }
+
+ // |s| and |t| are expected to be signed values.
+ inline bool IsInDiamond(const int32_t &s, const int32_t &t) const {
+ // Expect center already at origin.
+ DRACO_DCHECK_LE(s, center_value_);
+ DRACO_DCHECK_LE(t, center_value_);
+ DRACO_DCHECK_GE(s, -center_value_);
+ DRACO_DCHECK_GE(t, -center_value_);
+ return std::abs(s) + std::abs(t) <= center_value_;
+ }
+
+ void InvertDiamond(int32_t *s, int32_t *t) const {
+ // Expect center already at origin.
+ DRACO_DCHECK_LE(*s, center_value_);
+ DRACO_DCHECK_LE(*t, center_value_);
+ DRACO_DCHECK_GE(*s, -center_value_);
+ DRACO_DCHECK_GE(*t, -center_value_);
+ int32_t sign_s = 0;
+ int32_t sign_t = 0;
+ if (*s >= 0 && *t >= 0) {
+ sign_s = 1;
+ sign_t = 1;
+ } else if (*s <= 0 && *t <= 0) {
+ sign_s = -1;
+ sign_t = -1;
+ } else {
+ sign_s = (*s > 0) ? 1 : -1;
+ sign_t = (*t > 0) ? 1 : -1;
+ }
+
+ const int32_t corner_point_s = sign_s * center_value_;
+ const int32_t corner_point_t = sign_t * center_value_;
+ *s = 2 * *s - corner_point_s;
+ *t = 2 * *t - corner_point_t;
+ if (sign_s * sign_t >= 0) {
+ int32_t temp = *s;
+ *s = -*t;
+ *t = -temp;
+ } else {
+ std::swap(*s, *t);
+ }
+ *s = (*s + corner_point_s) / 2;
+ *t = (*t + corner_point_t) / 2;
+ }
+
+ void InvertDirection(int32_t *s, int32_t *t) const {
+ // Expect center already at origin.
+ DRACO_DCHECK_LE(*s, center_value_);
+ DRACO_DCHECK_LE(*t, center_value_);
+ DRACO_DCHECK_GE(*s, -center_value_);
+ DRACO_DCHECK_GE(*t, -center_value_);
+ *s *= -1;
+ *t *= -1;
+ this->InvertDiamond(s, t);
+ }
+
+ // For correction values.
+ int32_t ModMax(int32_t x) const {
+ if (x > this->center_value()) {
+ return x - this->max_quantized_value();
+ }
+ if (x < -this->center_value()) {
+ return x + this->max_quantized_value();
+ }
+ return x;
+ }
+
+ // For correction values.
+ int32_t MakePositive(int32_t x) const {
+ DRACO_DCHECK_LE(x, this->center_value() * 2);
+ if (x < 0) {
+ return x + this->max_quantized_value();
+ }
+ return x;
+ }
+
+ int32_t quantization_bits() const { return quantization_bits_; }
+ int32_t max_quantized_value() const { return max_quantized_value_; }
+ int32_t max_value() const { return max_value_; }
+ int32_t center_value() const { return center_value_; }
+
+ private:
+ inline void OctahedralCoordsToUnitVector(float in_s_scaled, float in_t_scaled,
+ float *out_vector) const {
+ // Background about the encoding:
+ // A normal is encoded in a normalized space <s, t> depicted below. The
+ // encoding correponds to an octahedron that is unwrapped to a 2D plane.
+ // During encoding, a normal is projected to the surface of the octahedron
+ // and the projection is then unwrapped to the 2D plane. Decoding is the
+ // reverse of this process.
+ // All points in the central diamond are located on triangles on the
+ // right "hemisphere" of the octahedron while all points outside of the
+ // diamond are on the left hemisphere (basically, they would have to be
+ // wrapped along the diagonal edges to form the octahedron). The central
+ // point corresponds to the right most vertex of the octahedron and all
+ // corners of the plane correspond to the left most vertex of the
+ // octahedron.
+ //
+ // t
+ // ^ *-----*-----*
+ // | | /|\ |
+ // | / | \ |
+ // | / | \ |
+ // | / | \ |
+ // *-----*---- *
+ // | \ | / |
+ // | \ | / |
+ // | \ | / |
+ // | \|/ |
+ // *-----*-----* --> s
+
+ // Note that the input |in_s_scaled| and |in_t_scaled| are already scaled to
+ // <-1, 1> range. This way, the central point is at coordinate (0, 0).
+ float y = in_s_scaled;
+ float z = in_t_scaled;
+
+ // Remaining coordinate can be computed by projecting the (y, z) values onto
+ // the surface of the octahedron.
+ const float x = 1.f - abs(y) - abs(z);
+
+ // |x| is essentially a signed distance from the diagonal edges of the
+ // diamond shown on the figure above. It is positive for all points in the
+ // diamond (right hemisphere) and negative for all points outside the
+ // diamond (left hemisphere). For all points on the left hemisphere we need
+ // to update their (y, z) coordinates to account for the wrapping along
+ // the edges of the diamond.
+ float x_offset = -x;
+ x_offset = x_offset < 0 ? 0 : x_offset;
+
+ // This will do nothing for the points on the right hemisphere but it will
+ // mirror the (y, z) location along the nearest diagonal edge of the
+ // diamond.
+ y += y < 0 ? x_offset : -x_offset;
+ z += z < 0 ? x_offset : -x_offset;
+
+ // Normalize the computed vector.
+ const float norm_squared = x * x + y * y + z * z;
+ if (norm_squared < 1e-6) {
+ out_vector[0] = 0;
+ out_vector[1] = 0;
+ out_vector[2] = 0;
+ } else {
+ const float d = 1.0f / std::sqrt(norm_squared);
+ out_vector[0] = x * d;
+ out_vector[1] = y * d;
+ out_vector[2] = z * d;
+ }
+ }
+
+ int32_t quantization_bits_;
+ int32_t max_quantized_value_;
+ int32_t max_value_;
+ float dequantization_scale_;
+ int32_t center_value_;
+};
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_ATTRIBUTES_NORMAL_COMPRESSION_UTILS_H_
diff --git a/libs/assimp/contrib/draco/src/draco/compression/attributes/point_d_vector.h b/libs/assimp/contrib/draco/src/draco/compression/attributes/point_d_vector.h
new file mode 100644
index 0000000..3b115d5
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/attributes/point_d_vector.h
@@ -0,0 +1,279 @@
+// Copyright 2018 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef DRACO_COMPRESSION_ATTRIBUTES_POINT_D_VECTOR_H_
+#define DRACO_COMPRESSION_ATTRIBUTES_POINT_D_VECTOR_H_
+
+#include <cstring>
+#include <memory>
+#include <vector>
+
+#include "draco/core/macros.h"
+
+namespace draco {
+
+// The main class of this file is PointDVector providing an interface similar to
+// std::vector<PointD> for arbitrary number of dimensions (without a template
+// argument). PointDVectorIterator is a random access iterator, which allows for
+// compatibility with existing algorithms. PseudoPointD provides for a view on
+// the individual items in a contiguous block of memory, which is compatible
+// with the swap function and is returned by a dereference of
+// PointDVectorIterator. Swap functions provide for compatibility/specialization
+// that allows these classes to work with currently utilized STL functions.
+
+// This class allows for swap functionality from the RandomIterator
+// It seems problematic to bring this inside PointDVector due to templating.
+template <typename internal_t>
+class PseudoPointD {
+ public:
+ PseudoPointD(internal_t *mem, internal_t dimension)
+ : mem_(mem), dimension_(dimension) {}
+
+ // Specifically copies referenced memory
+ void swap(PseudoPointD &other) noexcept {
+ for (internal_t dim = 0; dim < dimension_; dim += 1) {
+ std::swap(mem_[dim], other.mem_[dim]);
+ }
+ }
+
+ PseudoPointD(const PseudoPointD &other)
+ : mem_(other.mem_), dimension_(other.dimension_) {}
+
+ const internal_t &operator[](const size_t &n) const {
+ DRACO_DCHECK_LT(n, dimension_);
+ return mem_[n];
+ }
+ internal_t &operator[](const size_t &n) {
+ DRACO_DCHECK_LT(n, dimension_);
+ return mem_[n];
+ }
+
+ bool operator==(const PseudoPointD &other) const {
+ for (auto dim = 0; dim < dimension_; dim += 1) {
+ if (mem_[dim] != other.mem_[dim]) {
+ return false;
+ }
+ }
+ return true;
+ }
+ bool operator!=(const PseudoPointD &other) const {
+ return !this->operator==(other);
+ }
+
+ private:
+ internal_t *const mem_;
+ const internal_t dimension_;
+};
+
+// It seems problematic to bring this inside PointDVector due to templating.
+template <typename internal_t>
+void swap(draco::PseudoPointD<internal_t> &&a,
+ draco::PseudoPointD<internal_t> &&b) noexcept {
+ a.swap(b);
+};
+template <typename internal_t>
+void swap(draco::PseudoPointD<internal_t> &a,
+ draco::PseudoPointD<internal_t> &b) noexcept {
+ a.swap(b);
+};
+
+template <typename internal_t>
+class PointDVector {
+ public:
+ PointDVector(const uint32_t n_items, const uint32_t dimensionality)
+ : n_items_(n_items),
+ dimensionality_(dimensionality),
+ item_size_bytes_(dimensionality * sizeof(internal_t)),
+ data_(n_items * dimensionality),
+ data0_(data_.data()) {}
+ // random access iterator
+ class PointDVectorIterator
+ : public std::iterator<std::random_access_iterator_tag, size_t, size_t> {
+ friend class PointDVector;
+
+ public:
+ // std::iter_swap is called inside of std::partition and needs this
+ // specialized support
+ PseudoPointD<internal_t> operator*() const {
+ return PseudoPointD<internal_t>(vec_->data0_ + item_ * dimensionality_,
+ dimensionality_);
+ }
+ const PointDVectorIterator &operator++() {
+ item_ += 1;
+ return *this;
+ }
+ const PointDVectorIterator &operator--() {
+ item_ -= 1;
+ return *this;
+ }
+ PointDVectorIterator operator++(int32_t) {
+ PointDVectorIterator copy(*this);
+ item_ += 1;
+ return copy;
+ }
+ PointDVectorIterator operator--(int32_t) {
+ PointDVectorIterator copy(*this);
+ item_ -= 1;
+ return copy;
+ }
+ PointDVectorIterator &operator=(const PointDVectorIterator &other) {
+ this->item_ = other.item_;
+ return *this;
+ }
+
+ bool operator==(const PointDVectorIterator &ref) const {
+ return item_ == ref.item_;
+ }
+ bool operator!=(const PointDVectorIterator &ref) const {
+ return item_ != ref.item_;
+ }
+ bool operator<(const PointDVectorIterator &ref) const {
+ return item_ < ref.item_;
+ }
+ bool operator>(const PointDVectorIterator &ref) const {
+ return item_ > ref.item_;
+ }
+ bool operator<=(const PointDVectorIterator &ref) const {
+ return item_ <= ref.item_;
+ }
+ bool operator>=(const PointDVectorIterator &ref) const {
+ return item_ >= ref.item_;
+ }
+
+ PointDVectorIterator operator+(const int32_t &add) const {
+ PointDVectorIterator copy(vec_, item_ + add);
+ return copy;
+ }
+ PointDVectorIterator &operator+=(const int32_t &add) {
+ item_ += add;
+ return *this;
+ }
+ PointDVectorIterator operator-(const int32_t &sub) const {
+ PointDVectorIterator copy(vec_, item_ - sub);
+ return copy;
+ }
+ size_t operator-(const PointDVectorIterator &sub) const {
+ return (item_ - sub.item_);
+ }
+
+ PointDVectorIterator &operator-=(const int32_t &sub) {
+ item_ -= sub;
+ return *this;
+ }
+
+ internal_t *operator[](const size_t &n) const {
+ return vec_->data0_ + (item_ + n) * dimensionality_;
+ }
+
+ protected:
+ explicit PointDVectorIterator(PointDVector *vec, size_t start_item)
+ : item_(start_item), vec_(vec), dimensionality_(vec->dimensionality_) {}
+
+ private:
+ size_t item_; // this counts the item that should be referenced.
+ PointDVector *const vec_; // the thing that we're iterating on
+ const uint32_t dimensionality_; // local copy from vec_
+ };
+
+ PointDVectorIterator begin() { return PointDVectorIterator(this, 0); }
+ PointDVectorIterator end() { return PointDVectorIterator(this, n_items_); }
+
+ // operator[] allows for unprotected user-side usage of operator[] on the
+ // return value AS IF it were a natively indexable type like Point3*
+ internal_t *operator[](const uint32_t index) {
+ DRACO_DCHECK_LT(index, n_items_);
+ return data0_ + index * dimensionality_;
+ }
+ const internal_t *operator[](const uint32_t index) const {
+ DRACO_DCHECK_LT(index, n_items_);
+ return data0_ + index * dimensionality_;
+ }
+
+ uint32_t size() const { return n_items_; }
+ size_t GetBufferSize() const { return data_.size(); }
+
+ // copy a single contiguous 'item' from one PointDVector into this one.
+ void CopyItem(const PointDVector &source, const internal_t source_index,
+ const internal_t destination_index) {
+ DRACO_DCHECK(&source != this ||
+ (&source == this && source_index != destination_index));
+ DRACO_DCHECK_LT(destination_index, n_items_);
+ DRACO_DCHECK_LT(source_index, source.n_items_);
+
+ // DRACO_DCHECK_EQ(source.n_items_, n_items_); // not technically necessary
+ DRACO_DCHECK_EQ(source.dimensionality_, dimensionality_);
+
+ const internal_t *ref = source[source_index];
+ internal_t *const dest = this->operator[](destination_index);
+ std::memcpy(dest, ref, item_size_bytes_);
+ }
+
+ // Copy data directly off of an attribute buffer interleaved into internal
+ // memory.
+ void CopyAttribute(
+ // The dimensionality of the attribute being integrated
+ const internal_t attribute_dimensionality,
+ // The offset in dimensions to insert this attribute.
+ const internal_t offset_dimensionality, const internal_t index,
+ // The direct pointer to the data
+ const void *const attribute_item_data) {
+ // chunk copy
+ const size_t copy_size = sizeof(internal_t) * attribute_dimensionality;
+
+ // a multiply and add can be optimized away with an iterator
+ std::memcpy(data0_ + index * dimensionality_ + offset_dimensionality,
+ attribute_item_data, copy_size);
+ }
+ // Copy data off of a contiguous buffer interleaved into internal memory
+ void CopyAttribute(
+ // The dimensionality of the attribute being integrated
+ const internal_t attribute_dimensionality,
+ // The offset in dimensions to insert this attribute.
+ const internal_t offset_dimensionality,
+ const internal_t *const attribute_mem) {
+ DRACO_DCHECK_LT(offset_dimensionality,
+ dimensionality_ - attribute_dimensionality);
+ // degenerate case block copy the whole buffer.
+ if (dimensionality_ == attribute_dimensionality) {
+ DRACO_DCHECK_EQ(offset_dimensionality, 0);
+ const size_t copy_size =
+ sizeof(internal_t) * attribute_dimensionality * n_items_;
+ std::memcpy(data0_, attribute_mem, copy_size);
+ } else { // chunk copy
+ const size_t copy_size = sizeof(internal_t) * attribute_dimensionality;
+ internal_t *internal_data;
+ const internal_t *attribute_data;
+ internal_t item;
+ for (internal_data = data0_ + offset_dimensionality,
+ attribute_data = attribute_mem, item = 0;
+ item < n_items_; internal_data += dimensionality_,
+ attribute_data += attribute_dimensionality, item += 1) {
+ std::memcpy(internal_data, attribute_data, copy_size);
+ }
+ }
+ }
+
+ private:
+ // internal parameters.
+ const uint32_t n_items_;
+ const uint32_t dimensionality_; // The dimension of the points in the buffer
+ const uint32_t item_size_bytes_;
+ std::vector<internal_t> data_; // contiguously stored data. Never resized.
+ internal_t *const data0_; // raw pointer to base data.
+};
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_ATTRIBUTES_POINT_D_VECTOR_H_
diff --git a/libs/assimp/contrib/draco/src/draco/compression/attributes/point_d_vector_test.cc b/libs/assimp/contrib/draco/src/draco/compression/attributes/point_d_vector_test.cc
new file mode 100644
index 0000000..59f28f8
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/attributes/point_d_vector_test.cc
@@ -0,0 +1,360 @@
+// Copyright 2018 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/compression/attributes/point_d_vector.h"
+
+#include "draco/compression/point_cloud/algorithms/point_cloud_types.h"
+#include "draco/core/draco_test_base.h"
+
+namespace draco {
+
+class PointDVectorTest : public ::testing::Test {
+ protected:
+ template <typename PT>
+ void TestIntegrity() {}
+ template <typename PT>
+ void TestSize() {
+ for (uint32_t n_items = 0; n_items <= 10; ++n_items) {
+ for (uint32_t dimensionality = 1; dimensionality <= 10;
+ ++dimensionality) {
+ draco::PointDVector<PT> var(n_items, dimensionality);
+ ASSERT_EQ(n_items, var.size());
+ ASSERT_EQ(n_items * dimensionality, var.GetBufferSize());
+ }
+ }
+ }
+ template <typename PT>
+ void TestContentsContiguous() {
+ for (uint32_t n_items = 1; n_items <= 1000; n_items *= 10) {
+ for (uint32_t dimensionality = 1; dimensionality < 10;
+ dimensionality += 2) {
+ for (uint32_t att_dimensionality = 1;
+ att_dimensionality <= dimensionality; att_dimensionality += 2) {
+ for (uint32_t offset_dimensionality = 0;
+ offset_dimensionality < dimensionality - att_dimensionality;
+ ++offset_dimensionality) {
+ PointDVector<PT> var(n_items, dimensionality);
+
+ std::vector<PT> att(n_items * att_dimensionality);
+ for (PT val = 0; val < n_items; val += 1) {
+ for (PT att_dim = 0; att_dim < att_dimensionality; att_dim += 1) {
+ att[val * att_dimensionality + att_dim] = val;
+ }
+ }
+ const PT *const attribute_data = att.data();
+
+ var.CopyAttribute(att_dimensionality, offset_dimensionality,
+ attribute_data);
+
+ for (PT val = 0; val < n_items; val += 1) {
+ for (PT att_dim = 0; att_dim < att_dimensionality; att_dim += 1) {
+ ASSERT_EQ(var[val][offset_dimensionality + att_dim], val);
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ template <typename PT>
+ void TestContentsDiscrete() {
+ for (uint32_t n_items = 1; n_items <= 1000; n_items *= 10) {
+ for (uint32_t dimensionality = 1; dimensionality < 10;
+ dimensionality += 2) {
+ for (uint32_t att_dimensionality = 1;
+ att_dimensionality <= dimensionality; att_dimensionality += 2) {
+ for (uint32_t offset_dimensionality = 0;
+ offset_dimensionality < dimensionality - att_dimensionality;
+ ++offset_dimensionality) {
+ PointDVector<PT> var(n_items, dimensionality);
+
+ std::vector<PT> att(n_items * att_dimensionality);
+ for (PT val = 0; val < n_items; val += 1) {
+ for (PT att_dim = 0; att_dim < att_dimensionality; att_dim += 1) {
+ att[val * att_dimensionality + att_dim] = val;
+ }
+ }
+ const PT *const attribute_data = att.data();
+
+ for (PT item = 0; item < n_items; item += 1) {
+ var.CopyAttribute(att_dimensionality, offset_dimensionality, item,
+ attribute_data + item * att_dimensionality);
+ }
+
+ for (PT val = 0; val < n_items; val += 1) {
+ for (PT att_dim = 0; att_dim < att_dimensionality; att_dim += 1) {
+ ASSERT_EQ(var[val][offset_dimensionality + att_dim], val);
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+ template <typename PT>
+ void TestContentsCopy() {
+ for (uint32_t n_items = 1; n_items <= 1000; n_items *= 10) {
+ for (uint32_t dimensionality = 1; dimensionality < 10;
+ dimensionality += 2) {
+ for (uint32_t att_dimensionality = 1;
+ att_dimensionality <= dimensionality; att_dimensionality += 2) {
+ for (uint32_t offset_dimensionality = 0;
+ offset_dimensionality < dimensionality - att_dimensionality;
+ ++offset_dimensionality) {
+ PointDVector<PT> var(n_items, dimensionality);
+ PointDVector<PT> dest(n_items, dimensionality);
+
+ std::vector<PT> att(n_items * att_dimensionality);
+ for (PT val = 0; val < n_items; val += 1) {
+ for (PT att_dim = 0; att_dim < att_dimensionality; att_dim += 1) {
+ att[val * att_dimensionality + att_dim] = val;
+ }
+ }
+ const PT *const attribute_data = att.data();
+
+ var.CopyAttribute(att_dimensionality, offset_dimensionality,
+ attribute_data);
+
+ for (PT item = 0; item < n_items; item += 1) {
+ dest.CopyItem(var, item, item);
+ }
+
+ for (PT val = 0; val < n_items; val += 1) {
+ for (PT att_dim = 0; att_dim < att_dimensionality; att_dim += 1) {
+ ASSERT_EQ(var[val][offset_dimensionality + att_dim], val);
+ ASSERT_EQ(dest[val][offset_dimensionality + att_dim], val);
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ template <typename PT>
+ void TestIterator() {
+ for (uint32_t n_items = 1; n_items <= 1000; n_items *= 10) {
+ for (uint32_t dimensionality = 1; dimensionality < 10;
+ dimensionality += 2) {
+ for (uint32_t att_dimensionality = 1;
+ att_dimensionality <= dimensionality; att_dimensionality += 2) {
+ for (uint32_t offset_dimensionality = 0;
+ offset_dimensionality < dimensionality - att_dimensionality;
+ ++offset_dimensionality) {
+ PointDVector<PT> var(n_items, dimensionality);
+ PointDVector<PT> dest(n_items, dimensionality);
+
+ std::vector<PT> att(n_items * att_dimensionality);
+ for (PT val = 0; val < n_items; val += 1) {
+ for (PT att_dim = 0; att_dim < att_dimensionality; att_dim += 1) {
+ att[val * att_dimensionality + att_dim] = val;
+ }
+ }
+ const PT *const attribute_data = att.data();
+
+ var.CopyAttribute(att_dimensionality, offset_dimensionality,
+ attribute_data);
+
+ for (PT item = 0; item < n_items; item += 1) {
+ dest.CopyItem(var, item, item);
+ }
+
+ auto V0 = var.begin();
+ auto VE = var.end();
+ auto D0 = dest.begin();
+ auto DE = dest.end();
+
+ while (V0 != VE && D0 != DE) {
+ ASSERT_EQ(*D0, *V0); // compare PseudoPointD
+ // verify elemental values
+ for (auto index = 0; index < dimensionality; index += 1) {
+ ASSERT_EQ((*D0)[index], (*V0)[index]);
+ }
+ ++V0;
+ ++D0;
+ }
+
+ for (PT val = 0; val < n_items; val += 1) {
+ for (PT att_dim = 0; att_dim < att_dimensionality; att_dim += 1) {
+ ASSERT_EQ(var[val][offset_dimensionality + att_dim], val);
+ ASSERT_EQ(dest[val][offset_dimensionality + att_dim], val);
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ template <typename PT>
+ void TestPoint3Iterator() {
+ for (uint32_t n_items = 1; n_items <= 1000; n_items *= 10) {
+ const uint32_t dimensionality = 3;
+ // for (uint32_t dimensionality = 1; dimensionality < 10;
+ // dimensionality += 2) {
+ const uint32_t att_dimensionality = 3;
+ // for (uint32_t att_dimensionality = 1;
+ // att_dimensionality <= dimensionality; att_dimensionality += 2) {
+ for (uint32_t offset_dimensionality = 0;
+ offset_dimensionality < dimensionality - att_dimensionality;
+ ++offset_dimensionality) {
+ PointDVector<PT> var(n_items, dimensionality);
+ PointDVector<PT> dest(n_items, dimensionality);
+
+ std::vector<PT> att(n_items * att_dimensionality);
+ std::vector<draco::Point3ui> att3(n_items);
+ for (PT val = 0; val < n_items; val += 1) {
+ att3[val][0] = val;
+ att3[val][1] = val;
+ att3[val][2] = val;
+ for (PT att_dim = 0; att_dim < att_dimensionality; att_dim += 1) {
+ att[val * att_dimensionality + att_dim] = val;
+ }
+ }
+ const PT *const attribute_data = att.data();
+
+ var.CopyAttribute(att_dimensionality, offset_dimensionality,
+ attribute_data);
+
+ for (PT item = 0; item < n_items; item += 1) {
+ dest.CopyItem(var, item, item);
+ }
+
+ auto aV0 = att3.begin();
+ auto aVE = att3.end();
+ auto V0 = var.begin();
+ auto VE = var.end();
+ auto D0 = dest.begin();
+ auto DE = dest.end();
+
+ while (aV0 != aVE && V0 != VE && D0 != DE) {
+ ASSERT_EQ(*D0, *V0); // compare PseudoPointD
+ // verify elemental values
+ for (auto index = 0; index < dimensionality; index += 1) {
+ ASSERT_EQ((*D0)[index], (*V0)[index]);
+ ASSERT_EQ((*D0)[index], (*aV0)[index]);
+ ASSERT_EQ((*aV0)[index], (*V0)[index]);
+ }
+ ++aV0;
+ ++V0;
+ ++D0;
+ }
+
+ for (PT val = 0; val < n_items; val += 1) {
+ for (PT att_dim = 0; att_dim < att_dimensionality; att_dim += 1) {
+ ASSERT_EQ(var[val][offset_dimensionality + att_dim], val);
+ ASSERT_EQ(dest[val][offset_dimensionality + att_dim], val);
+ }
+ }
+ }
+ }
+ }
+
+ void TestPseudoPointDSwap() {
+ draco::Point3ui val = {0, 1, 2};
+ draco::Point3ui dest = {10, 11, 12};
+ draco::PseudoPointD<uint32_t> val_src1(&val[0], 3);
+ draco::PseudoPointD<uint32_t> dest_src1(&dest[0], 3);
+
+ ASSERT_EQ(val_src1[0], 0);
+ ASSERT_EQ(val_src1[1], 1);
+ ASSERT_EQ(val_src1[2], 2);
+ ASSERT_EQ(dest_src1[0], 10);
+ ASSERT_EQ(dest_src1[1], 11);
+ ASSERT_EQ(dest_src1[2], 12);
+
+ ASSERT_NE(val_src1, dest_src1);
+
+ swap(val_src1, dest_src1);
+
+ ASSERT_EQ(dest_src1[0], 0);
+ ASSERT_EQ(dest_src1[1], 1);
+ ASSERT_EQ(dest_src1[2], 2);
+ ASSERT_EQ(val_src1[0], 10);
+ ASSERT_EQ(val_src1[1], 11);
+ ASSERT_EQ(val_src1[2], 12);
+
+ ASSERT_NE(val_src1, dest_src1);
+ }
+ void TestPseudoPointDEquality() {
+ draco::Point3ui val = {0, 1, 2};
+ draco::Point3ui dest = {0, 1, 2};
+ draco::PseudoPointD<uint32_t> val_src1(&val[0], 3);
+ draco::PseudoPointD<uint32_t> val_src2(&val[0], 3);
+ draco::PseudoPointD<uint32_t> dest_src1(&dest[0], 3);
+ draco::PseudoPointD<uint32_t> dest_src2(&dest[0], 3);
+
+ ASSERT_EQ(val_src1, val_src1);
+ ASSERT_EQ(val_src1, val_src2);
+ ASSERT_EQ(dest_src1, val_src1);
+ ASSERT_EQ(dest_src1, val_src2);
+ ASSERT_EQ(val_src2, val_src1);
+ ASSERT_EQ(val_src2, val_src2);
+ ASSERT_EQ(dest_src2, val_src1);
+ ASSERT_EQ(dest_src2, val_src2);
+
+ for (auto i = 0; i < 3; i++) {
+ ASSERT_EQ(val_src1[i], val_src1[i]);
+ ASSERT_EQ(val_src1[i], val_src2[i]);
+ ASSERT_EQ(dest_src1[i], val_src1[i]);
+ ASSERT_EQ(dest_src1[i], val_src2[i]);
+ ASSERT_EQ(val_src2[i], val_src1[i]);
+ ASSERT_EQ(val_src2[i], val_src2[i]);
+ ASSERT_EQ(dest_src2[i], val_src1[i]);
+ ASSERT_EQ(dest_src2[i], val_src2[i]);
+ }
+ }
+ void TestPseudoPointDInequality() {
+ draco::Point3ui val = {0, 1, 2};
+ draco::Point3ui dest = {1, 2, 3};
+ draco::PseudoPointD<uint32_t> val_src1(&val[0], 3);
+ draco::PseudoPointD<uint32_t> val_src2(&val[0], 3);
+ draco::PseudoPointD<uint32_t> dest_src1(&dest[0], 3);
+ draco::PseudoPointD<uint32_t> dest_src2(&dest[0], 3);
+
+ ASSERT_EQ(val_src1, val_src1);
+ ASSERT_EQ(val_src1, val_src2);
+ ASSERT_NE(dest_src1, val_src1);
+ ASSERT_NE(dest_src1, val_src2);
+ ASSERT_EQ(val_src2, val_src1);
+ ASSERT_EQ(val_src2, val_src2);
+ ASSERT_NE(dest_src2, val_src1);
+ ASSERT_NE(dest_src2, val_src2);
+
+ for (auto i = 0; i < 3; i++) {
+ ASSERT_EQ(val_src1[i], val_src1[i]);
+ ASSERT_EQ(val_src1[i], val_src2[i]);
+ ASSERT_NE(dest_src1[i], val_src1[i]);
+ ASSERT_NE(dest_src1[i], val_src2[i]);
+ ASSERT_EQ(val_src2[i], val_src1[i]);
+ ASSERT_EQ(val_src2[i], val_src2[i]);
+ ASSERT_NE(dest_src2[i], val_src1[i]);
+ ASSERT_NE(dest_src2[i], val_src2[i]);
+ }
+ }
+};
+
+TEST_F(PointDVectorTest, VectorTest) {
+ TestSize<uint32_t>();
+ TestContentsDiscrete<uint32_t>();
+ TestContentsContiguous<uint32_t>();
+ TestContentsCopy<uint32_t>();
+ TestIterator<uint32_t>();
+ TestPoint3Iterator<uint32_t>();
+}
+TEST_F(PointDVectorTest, PseudoPointDTest) {
+ TestPseudoPointDSwap();
+ TestPseudoPointDEquality();
+ TestPseudoPointDInequality();
+}
+} // namespace draco
diff --git a/libs/assimp/contrib/draco/src/draco/compression/attributes/points_sequencer.h b/libs/assimp/contrib/draco/src/draco/compression/attributes/points_sequencer.h
new file mode 100644
index 0000000..2f4f7e1
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/attributes/points_sequencer.h
@@ -0,0 +1,63 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_COMPRESSION_ATTRIBUTES_POINTS_SEQUENCER_H_
+#define DRACO_COMPRESSION_ATTRIBUTES_POINTS_SEQUENCER_H_
+
+#include <vector>
+
+#include "draco/attributes/point_attribute.h"
+
+namespace draco {
+
+// Class for generating a sequence of point ids that can be used to encode
+// or decode attribute values in a specific order.
+// See sequential_attribute_encoders/decoders_controller.h for more details.
+class PointsSequencer {
+ public:
+ PointsSequencer() : out_point_ids_(nullptr) {}
+ virtual ~PointsSequencer() = default;
+
+ // Fills the |out_point_ids| with the generated sequence of point ids.
+ bool GenerateSequence(std::vector<PointIndex> *out_point_ids) {
+ out_point_ids_ = out_point_ids;
+ return GenerateSequenceInternal();
+ }
+
+ // Appends a point to the sequence.
+ void AddPointId(PointIndex point_id) { out_point_ids_->push_back(point_id); }
+
+ // Sets the correct mapping between point ids and value ids. I.e., the inverse
+ // of the |out_point_ids|. In general, |out_point_ids_| does not contain
+ // sufficient information to compute the inverse map, because not all point
+ // ids are necessarily contained within the map.
+ // Must be implemented for sequencers that are used by attribute decoders.
+ virtual bool UpdatePointToAttributeIndexMapping(PointAttribute * /* attr */) {
+ return false;
+ }
+
+ protected:
+ // Method that needs to be implemented by the derived classes. The
+ // implementation is responsible for filling |out_point_ids_| with the valid
+ // sequence of point ids.
+ virtual bool GenerateSequenceInternal() = 0;
+ std::vector<PointIndex> *out_point_ids() const { return out_point_ids_; }
+
+ private:
+ std::vector<PointIndex> *out_point_ids_;
+};
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_ATTRIBUTES_POINTS_SEQUENCER_H_
diff --git a/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_constrained_multi_parallelogram_decoder.h b/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_constrained_multi_parallelogram_decoder.h
new file mode 100644
index 0000000..36c124b
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_constrained_multi_parallelogram_decoder.h
@@ -0,0 +1,231 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_CONSTRAINED_MULTI_PARALLELOGRAM_DECODER_H_
+#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_CONSTRAINED_MULTI_PARALLELOGRAM_DECODER_H_
+
+#include <algorithm>
+#include <cmath>
+
+#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_constrained_multi_parallelogram_shared.h"
+#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_decoder.h"
+#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_parallelogram_shared.h"
+#include "draco/compression/bit_coders/rans_bit_decoder.h"
+#include "draco/core/varint_decoding.h"
+#include "draco/draco_features.h"
+
+namespace draco {
+
+// Decoder for predictions encoded with the constrained multi-parallelogram
+// encoder. See the corresponding encoder for more details about the prediction
+// method.
+template <typename DataTypeT, class TransformT, class MeshDataT>
+class MeshPredictionSchemeConstrainedMultiParallelogramDecoder
+ : public MeshPredictionSchemeDecoder<DataTypeT, TransformT, MeshDataT> {
+ public:
+ using CorrType =
+ typename PredictionSchemeDecoder<DataTypeT, TransformT>::CorrType;
+ using CornerTable = typename MeshDataT::CornerTable;
+
+ explicit MeshPredictionSchemeConstrainedMultiParallelogramDecoder(
+ const PointAttribute *attribute)
+ : MeshPredictionSchemeDecoder<DataTypeT, TransformT, MeshDataT>(
+ attribute),
+ selected_mode_(Mode::OPTIMAL_MULTI_PARALLELOGRAM) {}
+ MeshPredictionSchemeConstrainedMultiParallelogramDecoder(
+ const PointAttribute *attribute, const TransformT &transform,
+ const MeshDataT &mesh_data)
+ : MeshPredictionSchemeDecoder<DataTypeT, TransformT, MeshDataT>(
+ attribute, transform, mesh_data),
+ selected_mode_(Mode::OPTIMAL_MULTI_PARALLELOGRAM) {}
+
+ bool ComputeOriginalValues(const CorrType *in_corr, DataTypeT *out_data,
+ int size, int num_components,
+ const PointIndex *entry_to_point_id_map) override;
+
+ bool DecodePredictionData(DecoderBuffer *buffer) override;
+
+ PredictionSchemeMethod GetPredictionMethod() const override {
+ return MESH_PREDICTION_CONSTRAINED_MULTI_PARALLELOGRAM;
+ }
+
+ bool IsInitialized() const override {
+ return this->mesh_data().IsInitialized();
+ }
+
+ private:
+ typedef constrained_multi_parallelogram::Mode Mode;
+ static constexpr int kMaxNumParallelograms =
+ constrained_multi_parallelogram::kMaxNumParallelograms;
+ // Crease edges are used to store whether any given edge should be used for
+ // parallelogram prediction or not. New values are added in the order in which
+ // the edges are processed. For better compression, the flags are stored in
+ // in separate contexts based on the number of available parallelograms at a
+ // given vertex.
+ std::vector<bool> is_crease_edge_[kMaxNumParallelograms];
+ Mode selected_mode_;
+};
+
+template <typename DataTypeT, class TransformT, class MeshDataT>
+bool MeshPredictionSchemeConstrainedMultiParallelogramDecoder<
+ DataTypeT, TransformT, MeshDataT>::
+ ComputeOriginalValues(const CorrType *in_corr, DataTypeT *out_data,
+ int /* size */, int num_components,
+ const PointIndex * /* entry_to_point_id_map */) {
+ this->transform().Init(num_components);
+
+ // Predicted values for all simple parallelograms encountered at any given
+ // vertex.
+ std::vector<DataTypeT> pred_vals[kMaxNumParallelograms];
+ for (int i = 0; i < kMaxNumParallelograms; ++i) {
+ pred_vals[i].resize(num_components, 0);
+ }
+ this->transform().ComputeOriginalValue(pred_vals[0].data(), in_corr,
+ out_data);
+
+ const CornerTable *const table = this->mesh_data().corner_table();
+ const std::vector<int32_t> *const vertex_to_data_map =
+ this->mesh_data().vertex_to_data_map();
+
+ // Current position in the |is_crease_edge_| array for each context.
+ std::vector<int> is_crease_edge_pos(kMaxNumParallelograms, 0);
+
+ // Used to store predicted value for multi-parallelogram prediction.
+ std::vector<DataTypeT> multi_pred_vals(num_components);
+
+ const int corner_map_size =
+ static_cast<int>(this->mesh_data().data_to_corner_map()->size());
+ for (int p = 1; p < corner_map_size; ++p) {
+ const CornerIndex start_corner_id =
+ this->mesh_data().data_to_corner_map()->at(p);
+
+ CornerIndex corner_id(start_corner_id);
+ int num_parallelograms = 0;
+ bool first_pass = true;
+ while (corner_id != kInvalidCornerIndex) {
+ if (ComputeParallelogramPrediction(
+ p, corner_id, table, *vertex_to_data_map, out_data,
+ num_components, &(pred_vals[num_parallelograms][0]))) {
+ // Parallelogram prediction applied and stored in
+ // |pred_vals[num_parallelograms]|
+ ++num_parallelograms;
+ // Stop processing when we reach the maximum number of allowed
+ // parallelograms.
+ if (num_parallelograms == kMaxNumParallelograms) {
+ break;
+ }
+ }
+
+ // Proceed to the next corner attached to the vertex. First swing left
+ // and if we reach a boundary, swing right from the start corner.
+ if (first_pass) {
+ corner_id = table->SwingLeft(corner_id);
+ } else {
+ corner_id = table->SwingRight(corner_id);
+ }
+ if (corner_id == start_corner_id) {
+ break;
+ }
+ if (corner_id == kInvalidCornerIndex && first_pass) {
+ first_pass = false;
+ corner_id = table->SwingRight(start_corner_id);
+ }
+ }
+
+ // Check which of the available parallelograms are actually used and compute
+ // the final predicted value.
+ int num_used_parallelograms = 0;
+ if (num_parallelograms > 0) {
+ for (int i = 0; i < num_components; ++i) {
+ multi_pred_vals[i] = 0;
+ }
+ // Check which parallelograms are actually used.
+ for (int i = 0; i < num_parallelograms; ++i) {
+ const int context = num_parallelograms - 1;
+ const int pos = is_crease_edge_pos[context]++;
+ if (is_crease_edge_[context].size() <= pos) {
+ return false;
+ }
+ const bool is_crease = is_crease_edge_[context][pos];
+ if (!is_crease) {
+ ++num_used_parallelograms;
+ for (int j = 0; j < num_components; ++j) {
+ multi_pred_vals[j] += pred_vals[i][j];
+ }
+ }
+ }
+ }
+ const int dst_offset = p * num_components;
+ if (num_used_parallelograms == 0) {
+ // No parallelogram was valid.
+ // We use the last decoded point as a reference.
+ const int src_offset = (p - 1) * num_components;
+ this->transform().ComputeOriginalValue(
+ out_data + src_offset, in_corr + dst_offset, out_data + dst_offset);
+ } else {
+ // Compute the correction from the predicted value.
+ for (int c = 0; c < num_components; ++c) {
+ multi_pred_vals[c] /= num_used_parallelograms;
+ }
+ this->transform().ComputeOriginalValue(
+ multi_pred_vals.data(), in_corr + dst_offset, out_data + dst_offset);
+ }
+ }
+ return true;
+}
+
+template <typename DataTypeT, class TransformT, class MeshDataT>
+bool MeshPredictionSchemeConstrainedMultiParallelogramDecoder<
+ DataTypeT, TransformT, MeshDataT>::DecodePredictionData(DecoderBuffer
+ *buffer) {
+#ifdef DRACO_BACKWARDS_COMPATIBILITY_SUPPORTED
+ if (buffer->bitstream_version() < DRACO_BITSTREAM_VERSION(2, 2)) {
+ // Decode prediction mode.
+ uint8_t mode;
+ if (!buffer->Decode(&mode)) {
+ return false;
+ }
+
+ if (mode != Mode::OPTIMAL_MULTI_PARALLELOGRAM) {
+ // Unsupported mode.
+ return false;
+ }
+ }
+#endif
+
+ // Encode selected edges using separate rans bit coder for each context.
+ for (int i = 0; i < kMaxNumParallelograms; ++i) {
+ uint32_t num_flags;
+ if (!DecodeVarint<uint32_t>(&num_flags, buffer)) {
+ return false;
+ }
+ if (num_flags > 0) {
+ is_crease_edge_[i].resize(num_flags);
+ RAnsBitDecoder decoder;
+ if (!decoder.StartDecoding(buffer)) {
+ return false;
+ }
+ for (uint32_t j = 0; j < num_flags; ++j) {
+ is_crease_edge_[i][j] = decoder.DecodeNextBit();
+ }
+ decoder.EndDecoding();
+ }
+ }
+ return MeshPredictionSchemeDecoder<DataTypeT, TransformT,
+ MeshDataT>::DecodePredictionData(buffer);
+}
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_CONSTRAINED_MULTI_PARALLELOGRAM_DECODER_H_
diff --git a/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_constrained_multi_parallelogram_encoder.h b/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_constrained_multi_parallelogram_encoder.h
new file mode 100644
index 0000000..77df8ee
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_constrained_multi_parallelogram_encoder.h
@@ -0,0 +1,414 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_CONSTRAINED_MULTI_PARALLELOGRAM_ENCODER_H_
+#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_CONSTRAINED_MULTI_PARALLELOGRAM_ENCODER_H_
+
+#include <algorithm>
+#include <cmath>
+
+#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_constrained_multi_parallelogram_shared.h"
+#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_encoder.h"
+#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_parallelogram_shared.h"
+#include "draco/compression/bit_coders/rans_bit_encoder.h"
+#include "draco/compression/entropy/shannon_entropy.h"
+#include "draco/core/varint_encoding.h"
+
+namespace draco {
+
+// Compared to standard multi-parallelogram, constrained multi-parallelogram can
+// explicitly select which of the available parallelograms are going to be used
+// for the prediction by marking crease edges between two triangles. This
+// requires storing extra data, but it allows the predictor to avoid using
+// parallelograms that would lead to poor predictions. For improved efficiency,
+// our current implementation limits the maximum number of used parallelograms
+// to four, which covers >95% of the cases (on average, there are only two
+// parallelograms available for any given vertex).
+// All bits of the explicitly chosen configuration are stored together in a
+// single context chosen by the total number of parallelograms available to
+// choose from.
+template <typename DataTypeT, class TransformT, class MeshDataT>
+class MeshPredictionSchemeConstrainedMultiParallelogramEncoder
+ : public MeshPredictionSchemeEncoder<DataTypeT, TransformT, MeshDataT> {
+ public:
+ using CorrType =
+ typename PredictionSchemeEncoder<DataTypeT, TransformT>::CorrType;
+ using CornerTable = typename MeshDataT::CornerTable;
+
+ explicit MeshPredictionSchemeConstrainedMultiParallelogramEncoder(
+ const PointAttribute *attribute)
+ : MeshPredictionSchemeEncoder<DataTypeT, TransformT, MeshDataT>(
+ attribute),
+ selected_mode_(Mode::OPTIMAL_MULTI_PARALLELOGRAM) {}
+ MeshPredictionSchemeConstrainedMultiParallelogramEncoder(
+ const PointAttribute *attribute, const TransformT &transform,
+ const MeshDataT &mesh_data)
+ : MeshPredictionSchemeEncoder<DataTypeT, TransformT, MeshDataT>(
+ attribute, transform, mesh_data),
+ selected_mode_(Mode::OPTIMAL_MULTI_PARALLELOGRAM) {}
+
+ bool ComputeCorrectionValues(
+ const DataTypeT *in_data, CorrType *out_corr, int size,
+ int num_components, const PointIndex *entry_to_point_id_map) override;
+
+ bool EncodePredictionData(EncoderBuffer *buffer) override;
+
+ PredictionSchemeMethod GetPredictionMethod() const override {
+ return MESH_PREDICTION_CONSTRAINED_MULTI_PARALLELOGRAM;
+ }
+
+ bool IsInitialized() const override {
+ return this->mesh_data().IsInitialized();
+ }
+
+ private:
+ // Function used to compute number of bits needed to store overhead of the
+ // predictor. In this case, we consider overhead to be all bits that mark
+ // whether a parallelogram should be used for prediction or not. The input
+ // to this method is the total number of parallelograms that were evaluated so
+ // far(total_parallelogram), and the number of parallelograms we decided to
+ // use for prediction (total_used_parallelograms).
+ // Returns number of bits required to store the overhead.
+ int64_t ComputeOverheadBits(int64_t total_used_parallelograms,
+ int64_t total_parallelogram) const {
+ // For now we assume RAns coding for the bits where the total required size
+ // is directly correlated to the binary entropy of the input stream.
+ // TODO(ostava): This should be generalized in case we use other binary
+ // coding scheme.
+ const double entropy = ComputeBinaryShannonEntropy(
+ static_cast<uint32_t>(total_parallelogram),
+ static_cast<uint32_t>(total_used_parallelograms));
+
+ // Round up to the nearest full bit.
+ return static_cast<int64_t>(
+ ceil(static_cast<double>(total_parallelogram) * entropy));
+ }
+
+ // Struct that contains data used for measuring the error of each available
+ // parallelogram configuration.
+ struct Error {
+ Error() : num_bits(0), residual_error(0) {}
+
+ // Primary metric: number of bits required to store the data as a result of
+ // the selected prediction configuration.
+ int num_bits;
+ // Secondary metric: absolute difference of residuals for the given
+ // configuration.
+ int residual_error;
+
+ bool operator<(const Error &e) const {
+ if (num_bits < e.num_bits) {
+ return true;
+ }
+ if (num_bits > e.num_bits) {
+ return false;
+ }
+ return residual_error < e.residual_error;
+ }
+ };
+
+ // Computes error for predicting |predicted_val| instead of |actual_val|.
+ // Error is computed as the number of bits needed to encode the difference
+ // between the values.
+ Error ComputeError(const DataTypeT *predicted_val,
+ const DataTypeT *actual_val, int *out_residuals,
+ int num_components) {
+ Error error;
+
+ for (int i = 0; i < num_components; ++i) {
+ const int dif = (predicted_val[i] - actual_val[i]);
+ error.residual_error += std::abs(dif);
+ out_residuals[i] = dif;
+ // Entropy needs unsigned symbols, so convert the signed difference to an
+ // unsigned symbol.
+ entropy_symbols_[i] = ConvertSignedIntToSymbol(dif);
+ }
+
+ // Generate entropy data for case that this configuration was used.
+ // Note that the entropy stream is NOT updated in this case.
+ const auto entropy_data =
+ entropy_tracker_.Peek(entropy_symbols_.data(), num_components);
+
+ error.num_bits = entropy_tracker_.GetNumberOfDataBits(entropy_data) +
+ entropy_tracker_.GetNumberOfRAnsTableBits(entropy_data);
+ return error;
+ }
+
+ typedef constrained_multi_parallelogram::Mode Mode;
+ static constexpr int kMaxNumParallelograms =
+ constrained_multi_parallelogram::kMaxNumParallelograms;
+ // Crease edges are used to store whether any given edge should be used for
+ // parallelogram prediction or not. New values are added in the order in which
+ // the edges are processed. For better compression, the flags are stored in
+ // in separate contexts based on the number of available parallelograms at a
+ // given vertex.
+ // TODO(draco-eng) reconsider std::vector<bool> (performance/space).
+ std::vector<bool> is_crease_edge_[kMaxNumParallelograms];
+ Mode selected_mode_;
+
+ ShannonEntropyTracker entropy_tracker_;
+
+ // Temporary storage for symbols that are fed into the |entropy_stream|.
+ // Always contains only |num_components| entries.
+ std::vector<uint32_t> entropy_symbols_;
+};
+
+template <typename DataTypeT, class TransformT, class MeshDataT>
+bool MeshPredictionSchemeConstrainedMultiParallelogramEncoder<
+ DataTypeT, TransformT, MeshDataT>::
+ ComputeCorrectionValues(const DataTypeT *in_data, CorrType *out_corr,
+ int size, int num_components,
+ const PointIndex * /* entry_to_point_id_map */) {
+ this->transform().Init(in_data, size, num_components);
+ const CornerTable *const table = this->mesh_data().corner_table();
+ const std::vector<int32_t> *const vertex_to_data_map =
+ this->mesh_data().vertex_to_data_map();
+
+ // Predicted values for all simple parallelograms encountered at any given
+ // vertex.
+ std::vector<DataTypeT> pred_vals[kMaxNumParallelograms];
+ for (int i = 0; i < kMaxNumParallelograms; ++i) {
+ pred_vals[i].resize(num_components);
+ }
+ // Used to store predicted value for various multi-parallelogram predictions
+ // (combinations of simple parallelogram predictions).
+ std::vector<DataTypeT> multi_pred_vals(num_components);
+ entropy_symbols_.resize(num_components);
+
+ // Struct for holding data about prediction configuration for different sets
+ // of used parallelograms.
+ struct PredictionConfiguration {
+ PredictionConfiguration()
+ : error(), configuration(0), num_used_parallelograms(0) {}
+ Error error;
+ uint8_t configuration; // Bitfield, 1 use parallelogram, 0 don't use it.
+ int num_used_parallelograms;
+ std::vector<DataTypeT> predicted_value;
+ std::vector<int32_t> residuals;
+ };
+
+ // Bit-field used for computing permutations of excluded edges
+ // (parallelograms).
+ bool exluded_parallelograms[kMaxNumParallelograms];
+
+ // Data about the number of used parallelogram and total number of available
+ // parallelogram for each context. Used to compute overhead needed for storing
+ // the parallelogram choices made by the encoder.
+ int64_t total_used_parallelograms[kMaxNumParallelograms] = {0};
+ int64_t total_parallelograms[kMaxNumParallelograms] = {0};
+
+ std::vector<int> current_residuals(num_components);
+
+ // We start processing the vertices from the end because this prediction uses
+ // data from previous entries that could be overwritten when an entry is
+ // processed.
+ for (int p =
+ static_cast<int>(this->mesh_data().data_to_corner_map()->size()) - 1;
+ p > 0; --p) {
+ const CornerIndex start_corner_id =
+ this->mesh_data().data_to_corner_map()->at(p);
+
+ // Go over all corners attached to the vertex and compute the predicted
+ // value from the parallelograms defined by their opposite faces.
+ CornerIndex corner_id(start_corner_id);
+ int num_parallelograms = 0;
+ bool first_pass = true;
+ while (corner_id != kInvalidCornerIndex) {
+ if (ComputeParallelogramPrediction(
+ p, corner_id, table, *vertex_to_data_map, in_data, num_components,
+ &(pred_vals[num_parallelograms][0]))) {
+ // Parallelogram prediction applied and stored in
+ // |pred_vals[num_parallelograms]|
+ ++num_parallelograms;
+ // Stop processing when we reach the maximum number of allowed
+ // parallelograms.
+ if (num_parallelograms == kMaxNumParallelograms) {
+ break;
+ }
+ }
+
+ // Proceed to the next corner attached to the vertex. First swing left
+ // and if we reach a boundary, swing right from the start corner.
+ if (first_pass) {
+ corner_id = table->SwingLeft(corner_id);
+ } else {
+ corner_id = table->SwingRight(corner_id);
+ }
+ if (corner_id == start_corner_id) {
+ break;
+ }
+ if (corner_id == kInvalidCornerIndex && first_pass) {
+ first_pass = false;
+ corner_id = table->SwingRight(start_corner_id);
+ }
+ }
+
+ // Offset to the target (destination) vertex.
+ const int dst_offset = p * num_components;
+ Error error;
+
+ // Compute all prediction errors for all possible configurations of
+ // available parallelograms.
+
+ // Variable for holding the best configuration that has been found so far.
+ PredictionConfiguration best_prediction;
+
+ // Compute delta coding error (configuration when no parallelogram is
+ // selected).
+ const int src_offset = (p - 1) * num_components;
+ error = ComputeError(in_data + src_offset, in_data + dst_offset,
+ &current_residuals[0], num_components);
+
+ if (num_parallelograms > 0) {
+ total_parallelograms[num_parallelograms - 1] += num_parallelograms;
+ const int64_t new_overhead_bits =
+ ComputeOverheadBits(total_used_parallelograms[num_parallelograms - 1],
+ total_parallelograms[num_parallelograms - 1]);
+ error.num_bits += new_overhead_bits;
+ }
+
+ best_prediction.error = error;
+ best_prediction.configuration = 0;
+ best_prediction.num_used_parallelograms = 0;
+ best_prediction.predicted_value.assign(
+ in_data + src_offset, in_data + src_offset + num_components);
+ best_prediction.residuals.assign(current_residuals.begin(),
+ current_residuals.end());
+
+ // Compute prediction error for different cases of used parallelograms.
+ for (int num_used_parallelograms = 1;
+ num_used_parallelograms <= num_parallelograms;
+ ++num_used_parallelograms) {
+ // Mark all parallelograms as excluded.
+ std::fill(exluded_parallelograms,
+ exluded_parallelograms + num_parallelograms, true);
+ // TODO(draco-eng) maybe this should be another std::fill.
+ // Mark the first |num_used_parallelograms| as not excluded.
+ for (int j = 0; j < num_used_parallelograms; ++j) {
+ exluded_parallelograms[j] = false;
+ }
+ // Permute over the excluded edges and compute error for each
+ // configuration (permutation of excluded parallelograms).
+ do {
+ // Reset the multi-parallelogram predicted values.
+ for (int j = 0; j < num_components; ++j) {
+ multi_pred_vals[j] = 0;
+ }
+ uint8_t configuration = 0;
+ for (int j = 0; j < num_parallelograms; ++j) {
+ if (exluded_parallelograms[j]) {
+ continue;
+ }
+ for (int c = 0; c < num_components; ++c) {
+ multi_pred_vals[c] += pred_vals[j][c];
+ }
+ // Set jth bit of the configuration.
+ configuration |= (1 << j);
+ }
+
+ for (int j = 0; j < num_components; ++j) {
+ multi_pred_vals[j] /= num_used_parallelograms;
+ }
+ error = ComputeError(multi_pred_vals.data(), in_data + dst_offset,
+ &current_residuals[0], num_components);
+ if (num_parallelograms > 0) {
+ const int64_t new_overhead_bits = ComputeOverheadBits(
+ total_used_parallelograms[num_parallelograms - 1] +
+ num_used_parallelograms,
+ total_parallelograms[num_parallelograms - 1]);
+
+ // Add overhead bits to the total error.
+ error.num_bits += new_overhead_bits;
+ }
+ if (error < best_prediction.error) {
+ best_prediction.error = error;
+ best_prediction.configuration = configuration;
+ best_prediction.num_used_parallelograms = num_used_parallelograms;
+ best_prediction.predicted_value.assign(multi_pred_vals.begin(),
+ multi_pred_vals.end());
+ best_prediction.residuals.assign(current_residuals.begin(),
+ current_residuals.end());
+ }
+ } while (std::next_permutation(
+ exluded_parallelograms, exluded_parallelograms + num_parallelograms));
+ }
+ if (num_parallelograms > 0) {
+ total_used_parallelograms[num_parallelograms - 1] +=
+ best_prediction.num_used_parallelograms;
+ }
+
+ // Update the entropy stream by adding selected residuals as symbols to the
+ // stream.
+ for (int i = 0; i < num_components; ++i) {
+ entropy_symbols_[i] =
+ ConvertSignedIntToSymbol(best_prediction.residuals[i]);
+ }
+ entropy_tracker_.Push(entropy_symbols_.data(), num_components);
+
+ for (int i = 0; i < num_parallelograms; ++i) {
+ if ((best_prediction.configuration & (1 << i)) == 0) {
+ // Parallelogram not used, mark the edge as crease.
+ is_crease_edge_[num_parallelograms - 1].push_back(true);
+ } else {
+ // Parallelogram used. Add it to the predicted value and mark the
+ // edge as not a crease.
+ is_crease_edge_[num_parallelograms - 1].push_back(false);
+ }
+ }
+ this->transform().ComputeCorrection(in_data + dst_offset,
+ best_prediction.predicted_value.data(),
+ out_corr + dst_offset);
+ }
+ // First element is always fixed because it cannot be predicted.
+ for (int i = 0; i < num_components; ++i) {
+ pred_vals[0][i] = static_cast<DataTypeT>(0);
+ }
+ this->transform().ComputeCorrection(in_data, pred_vals[0].data(), out_corr);
+ return true;
+}
+
+template <typename DataTypeT, class TransformT, class MeshDataT>
+bool MeshPredictionSchemeConstrainedMultiParallelogramEncoder<
+ DataTypeT, TransformT, MeshDataT>::EncodePredictionData(EncoderBuffer
+ *buffer) {
+ // Encode selected edges using separate rans bit coder for each context.
+ for (int i = 0; i < kMaxNumParallelograms; ++i) {
+ // |i| is the context based on the number of available parallelograms, which
+ // is always equal to |i + 1|.
+ const int num_used_parallelograms = i + 1;
+ EncodeVarint<uint32_t>(is_crease_edge_[i].size(), buffer);
+ if (is_crease_edge_[i].size()) {
+ RAnsBitEncoder encoder;
+ encoder.StartEncoding();
+ // Encode the crease edge flags in the reverse vertex order that is needed
+ // be the decoder. Note that for the currently supported mode, each vertex
+ // has exactly |num_used_parallelograms| edges that need to be encoded.
+ for (int j = static_cast<int>(is_crease_edge_[i].size()) -
+ num_used_parallelograms;
+ j >= 0; j -= num_used_parallelograms) {
+ // Go over all edges of the current vertex.
+ for (int k = 0; k < num_used_parallelograms; ++k) {
+ encoder.EncodeBit(is_crease_edge_[i][j + k]);
+ }
+ }
+ encoder.EndEncoding(buffer);
+ }
+ }
+ return MeshPredictionSchemeEncoder<DataTypeT, TransformT,
+ MeshDataT>::EncodePredictionData(buffer);
+}
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_CONSTRAINED_MULTI_PARALLELOGRAM_ENCODER_H_
diff --git a/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_constrained_multi_parallelogram_shared.h b/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_constrained_multi_parallelogram_shared.h
new file mode 100644
index 0000000..c7a4e35
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_constrained_multi_parallelogram_shared.h
@@ -0,0 +1,34 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_CONSTRAINED_MULTI_PARALLELOGRAM_SHARED_H_
+#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_CONSTRAINED_MULTI_PARALLELOGRAM_SHARED_H_
+
+namespace draco {
+
+// Data shared between constrained multi-parallelogram encoder and decoder.
+namespace constrained_multi_parallelogram {
+
+enum Mode {
+ // Selects the optimal multi-parallelogram from up to 4 available
+ // parallelograms.
+ OPTIMAL_MULTI_PARALLELOGRAM = 0,
+};
+
+static constexpr int kMaxNumParallelograms = 4;
+
+} // namespace constrained_multi_parallelogram
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_CONSTRAINED_MULTI_PARALLELOGRAM_SHARED_H_
diff --git a/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_data.h b/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_data.h
new file mode 100644
index 0000000..2960a5e
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_data.h
@@ -0,0 +1,72 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_DATA_H_
+#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_DATA_H_
+
+#include "draco/mesh/corner_table.h"
+#include "draco/mesh/mesh.h"
+
+namespace draco {
+
+// Class stores data about the connectivity data of the mesh and information
+// about how the connectivity was encoded/decoded.
+template <class CornerTableT>
+class MeshPredictionSchemeData {
+ public:
+ typedef CornerTableT CornerTable;
+ MeshPredictionSchemeData()
+ : mesh_(nullptr),
+ corner_table_(nullptr),
+ vertex_to_data_map_(nullptr),
+ data_to_corner_map_(nullptr) {}
+
+ void Set(const Mesh *mesh, const CornerTable *table,
+ const std::vector<CornerIndex> *data_to_corner_map,
+ const std::vector<int32_t> *vertex_to_data_map) {
+ mesh_ = mesh;
+ corner_table_ = table;
+ data_to_corner_map_ = data_to_corner_map;
+ vertex_to_data_map_ = vertex_to_data_map;
+ }
+
+ const Mesh *mesh() const { return mesh_; }
+ const CornerTable *corner_table() const { return corner_table_; }
+ const std::vector<int32_t> *vertex_to_data_map() const {
+ return vertex_to_data_map_;
+ }
+ const std::vector<CornerIndex> *data_to_corner_map() const {
+ return data_to_corner_map_;
+ }
+ bool IsInitialized() const {
+ return mesh_ != nullptr && corner_table_ != nullptr &&
+ vertex_to_data_map_ != nullptr && data_to_corner_map_ != nullptr;
+ }
+
+ private:
+ const Mesh *mesh_;
+ const CornerTable *corner_table_;
+
+ // Mapping between vertices and their encoding order. I.e. when an attribute
+ // entry on a given vertex was encoded.
+ const std::vector<int32_t> *vertex_to_data_map_;
+
+ // Array that stores which corner was processed when a given attribute entry
+ // was encoded or decoded.
+ const std::vector<CornerIndex> *data_to_corner_map_;
+};
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_DATA_H_
diff --git a/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_decoder.h b/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_decoder.h
new file mode 100644
index 0000000..6694a98
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_decoder.h
@@ -0,0 +1,46 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_DECODER_H_
+#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_DECODER_H_
+
+#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_data.h"
+#include "draco/compression/attributes/prediction_schemes/prediction_scheme_decoder.h"
+
+namespace draco {
+
+// Base class for all mesh prediction scheme decoders that use the mesh
+// connectivity data. |MeshDataT| can be any class that provides the same
+// interface as the PredictionSchemeMeshData class.
+template <typename DataTypeT, class TransformT, class MeshDataT>
+class MeshPredictionSchemeDecoder
+ : public PredictionSchemeDecoder<DataTypeT, TransformT> {
+ public:
+ typedef MeshDataT MeshData;
+ MeshPredictionSchemeDecoder(const PointAttribute *attribute,
+ const TransformT &transform,
+ const MeshDataT &mesh_data)
+ : PredictionSchemeDecoder<DataTypeT, TransformT>(attribute, transform),
+ mesh_data_(mesh_data) {}
+
+ protected:
+ const MeshData &mesh_data() const { return mesh_data_; }
+
+ private:
+ MeshData mesh_data_;
+};
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_DECODER_H_
diff --git a/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_encoder.h b/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_encoder.h
new file mode 100644
index 0000000..ab3c81a
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_encoder.h
@@ -0,0 +1,46 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_ENCODER_H_
+#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_ENCODER_H_
+
+#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_data.h"
+#include "draco/compression/attributes/prediction_schemes/prediction_scheme_encoder.h"
+
+namespace draco {
+
+// Base class for all mesh prediction scheme encoders that use the mesh
+// connectivity data. |MeshDataT| can be any class that provides the same
+// interface as the PredictionSchemeMeshData class.
+template <typename DataTypeT, class TransformT, class MeshDataT>
+class MeshPredictionSchemeEncoder
+ : public PredictionSchemeEncoder<DataTypeT, TransformT> {
+ public:
+ typedef MeshDataT MeshData;
+ MeshPredictionSchemeEncoder(const PointAttribute *attribute,
+ const TransformT &transform,
+ const MeshDataT &mesh_data)
+ : PredictionSchemeEncoder<DataTypeT, TransformT>(attribute, transform),
+ mesh_data_(mesh_data) {}
+
+ protected:
+ const MeshData &mesh_data() const { return mesh_data_; }
+
+ private:
+ MeshData mesh_data_;
+};
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_ENCODER_H_
diff --git a/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_geometric_normal_decoder.h b/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_geometric_normal_decoder.h
new file mode 100644
index 0000000..da1387a
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_geometric_normal_decoder.h
@@ -0,0 +1,172 @@
+// Copyright 2017 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_GEOMETRIC_NORMAL_DECODER_H_
+#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_GEOMETRIC_NORMAL_DECODER_H_
+
+#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_decoder.h"
+#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_geometric_normal_predictor_area.h"
+#include "draco/compression/bit_coders/rans_bit_decoder.h"
+#include "draco/draco_features.h"
+
+namespace draco {
+
+// See MeshPredictionSchemeGeometricNormalEncoder for documentation.
+template <typename DataTypeT, class TransformT, class MeshDataT>
+class MeshPredictionSchemeGeometricNormalDecoder
+ : public MeshPredictionSchemeDecoder<DataTypeT, TransformT, MeshDataT> {
+ public:
+ using CorrType = typename MeshPredictionSchemeDecoder<DataTypeT, TransformT,
+ MeshDataT>::CorrType;
+ MeshPredictionSchemeGeometricNormalDecoder(const PointAttribute *attribute,
+ const TransformT &transform,
+ const MeshDataT &mesh_data)
+ : MeshPredictionSchemeDecoder<DataTypeT, TransformT, MeshDataT>(
+ attribute, transform, mesh_data),
+ predictor_(mesh_data) {}
+
+ private:
+ MeshPredictionSchemeGeometricNormalDecoder() {}
+
+ public:
+ bool ComputeOriginalValues(const CorrType *in_corr, DataTypeT *out_data,
+ int size, int num_components,
+ const PointIndex *entry_to_point_id_map) override;
+
+ bool DecodePredictionData(DecoderBuffer *buffer) override;
+
+ PredictionSchemeMethod GetPredictionMethod() const override {
+ return MESH_PREDICTION_GEOMETRIC_NORMAL;
+ }
+
+ bool IsInitialized() const override {
+ if (!predictor_.IsInitialized()) {
+ return false;
+ }
+ if (!this->mesh_data().IsInitialized()) {
+ return false;
+ }
+ if (!octahedron_tool_box_.IsInitialized()) {
+ return false;
+ }
+ return true;
+ }
+
+ int GetNumParentAttributes() const override { return 1; }
+
+ GeometryAttribute::Type GetParentAttributeType(int i) const override {
+ DRACO_DCHECK_EQ(i, 0);
+ (void)i;
+ return GeometryAttribute::POSITION;
+ }
+
+ bool SetParentAttribute(const PointAttribute *att) override {
+ if (att->attribute_type() != GeometryAttribute::POSITION) {
+ return false; // Invalid attribute type.
+ }
+ if (att->num_components() != 3) {
+ return false; // Currently works only for 3 component positions.
+ }
+ predictor_.SetPositionAttribute(*att);
+ return true;
+ }
+ void SetQuantizationBits(int q) {
+ octahedron_tool_box_.SetQuantizationBits(q);
+ }
+
+ private:
+ MeshPredictionSchemeGeometricNormalPredictorArea<DataTypeT, TransformT,
+ MeshDataT>
+ predictor_;
+ OctahedronToolBox octahedron_tool_box_;
+ RAnsBitDecoder flip_normal_bit_decoder_;
+};
+
+template <typename DataTypeT, class TransformT, class MeshDataT>
+bool MeshPredictionSchemeGeometricNormalDecoder<
+ DataTypeT, TransformT,
+ MeshDataT>::ComputeOriginalValues(const CorrType *in_corr,
+ DataTypeT *out_data, int /* size */,
+ int num_components,
+ const PointIndex *entry_to_point_id_map) {
+ this->SetQuantizationBits(this->transform().quantization_bits());
+ predictor_.SetEntryToPointIdMap(entry_to_point_id_map);
+ DRACO_DCHECK(this->IsInitialized());
+
+ // Expecting in_data in octahedral coordinates, i.e., portable attribute.
+ DRACO_DCHECK_EQ(num_components, 2);
+
+ const int corner_map_size =
+ static_cast<int>(this->mesh_data().data_to_corner_map()->size());
+
+ VectorD<int32_t, 3> pred_normal_3d;
+ int32_t pred_normal_oct[2];
+
+ for (int data_id = 0; data_id < corner_map_size; ++data_id) {
+ const CornerIndex corner_id =
+ this->mesh_data().data_to_corner_map()->at(data_id);
+ predictor_.ComputePredictedValue(corner_id, pred_normal_3d.data());
+
+ // Compute predicted octahedral coordinates.
+ octahedron_tool_box_.CanonicalizeIntegerVector(pred_normal_3d.data());
+ DRACO_DCHECK_EQ(pred_normal_3d.AbsSum(),
+ octahedron_tool_box_.center_value());
+ if (flip_normal_bit_decoder_.DecodeNextBit()) {
+ pred_normal_3d = -pred_normal_3d;
+ }
+ octahedron_tool_box_.IntegerVectorToQuantizedOctahedralCoords(
+ pred_normal_3d.data(), pred_normal_oct, pred_normal_oct + 1);
+
+ const int data_offset = data_id * 2;
+ this->transform().ComputeOriginalValue(
+ pred_normal_oct, in_corr + data_offset, out_data + data_offset);
+ }
+ flip_normal_bit_decoder_.EndDecoding();
+ return true;
+}
+
+template <typename DataTypeT, class TransformT, class MeshDataT>
+bool MeshPredictionSchemeGeometricNormalDecoder<
+ DataTypeT, TransformT, MeshDataT>::DecodePredictionData(DecoderBuffer
+ *buffer) {
+ // Get data needed for transform
+ if (!this->transform().DecodeTransformData(buffer)) {
+ return false;
+ }
+
+#ifdef DRACO_BACKWARDS_COMPATIBILITY_SUPPORTED
+ if (buffer->bitstream_version() < DRACO_BITSTREAM_VERSION(2, 2)) {
+ uint8_t prediction_mode;
+ if (!buffer->Decode(&prediction_mode)) {
+ return false;
+ }
+
+ if (!predictor_.SetNormalPredictionMode(
+ NormalPredictionMode(prediction_mode))) {
+ return false;
+ }
+ }
+#endif
+
+ // Init normal flips.
+ if (!flip_normal_bit_decoder_.StartDecoding(buffer)) {
+ return false;
+ }
+
+ return true;
+}
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_GEOMETRIC_NORMAL_DECODER_H_
diff --git a/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_geometric_normal_encoder.h b/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_geometric_normal_encoder.h
new file mode 100644
index 0000000..cf146f8
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_geometric_normal_encoder.h
@@ -0,0 +1,180 @@
+// Copyright 2017 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_GEOMETRIC_NORMAL_ENCODER_H_
+#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_GEOMETRIC_NORMAL_ENCODER_H_
+
+#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_encoder.h"
+#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_geometric_normal_predictor_area.h"
+#include "draco/compression/bit_coders/rans_bit_encoder.h"
+#include "draco/compression/config/compression_shared.h"
+
+namespace draco {
+
+// Prediction scheme for normals based on the underlying geometry.
+// At a smooth vertices normals are computed by weighting the normals of
+// adjacent faces with the area of these faces. At seams, the same approach
+// applies for seam corners.
+template <typename DataTypeT, class TransformT, class MeshDataT>
+class MeshPredictionSchemeGeometricNormalEncoder
+ : public MeshPredictionSchemeEncoder<DataTypeT, TransformT, MeshDataT> {
+ public:
+ using CorrType = typename MeshPredictionSchemeEncoder<DataTypeT, TransformT,
+ MeshDataT>::CorrType;
+ MeshPredictionSchemeGeometricNormalEncoder(const PointAttribute *attribute,
+ const TransformT &transform,
+ const MeshDataT &mesh_data)
+ : MeshPredictionSchemeEncoder<DataTypeT, TransformT, MeshDataT>(
+ attribute, transform, mesh_data),
+ predictor_(mesh_data) {}
+
+ bool ComputeCorrectionValues(
+ const DataTypeT *in_data, CorrType *out_corr, int size,
+ int num_components, const PointIndex *entry_to_point_id_map) override;
+
+ bool EncodePredictionData(EncoderBuffer *buffer) override;
+
+ PredictionSchemeMethod GetPredictionMethod() const override {
+ return MESH_PREDICTION_GEOMETRIC_NORMAL;
+ }
+
+ bool IsInitialized() const override {
+ if (!predictor_.IsInitialized()) {
+ return false;
+ }
+ if (!this->mesh_data().IsInitialized()) {
+ return false;
+ }
+ return true;
+ }
+
+ int GetNumParentAttributes() const override { return 1; }
+
+ GeometryAttribute::Type GetParentAttributeType(int i) const override {
+ DRACO_DCHECK_EQ(i, 0);
+ (void)i;
+ return GeometryAttribute::POSITION;
+ }
+
+ bool SetParentAttribute(const PointAttribute *att) override {
+ if (att->attribute_type() != GeometryAttribute::POSITION) {
+ return false; // Invalid attribute type.
+ }
+ if (att->num_components() != 3) {
+ return false; // Currently works only for 3 component positions.
+ }
+ predictor_.SetPositionAttribute(*att);
+ return true;
+ }
+
+ private:
+ void SetQuantizationBits(int q) {
+ DRACO_DCHECK_GE(q, 2);
+ DRACO_DCHECK_LE(q, 30);
+ octahedron_tool_box_.SetQuantizationBits(q);
+ }
+ MeshPredictionSchemeGeometricNormalPredictorArea<DataTypeT, TransformT,
+ MeshDataT>
+ predictor_;
+
+ OctahedronToolBox octahedron_tool_box_;
+ RAnsBitEncoder flip_normal_bit_encoder_;
+};
+
+template <typename DataTypeT, class TransformT, class MeshDataT>
+bool MeshPredictionSchemeGeometricNormalEncoder<DataTypeT, TransformT,
+ MeshDataT>::
+ ComputeCorrectionValues(const DataTypeT *in_data, CorrType *out_corr,
+ int size, int num_components,
+ const PointIndex *entry_to_point_id_map) {
+ this->SetQuantizationBits(this->transform().quantization_bits());
+ predictor_.SetEntryToPointIdMap(entry_to_point_id_map);
+ DRACO_DCHECK(this->IsInitialized());
+ // Expecting in_data in octahedral coordinates, i.e., portable attribute.
+ DRACO_DCHECK_EQ(num_components, 2);
+
+ flip_normal_bit_encoder_.StartEncoding();
+
+ const int corner_map_size =
+ static_cast<int>(this->mesh_data().data_to_corner_map()->size());
+
+ VectorD<int32_t, 3> pred_normal_3d;
+ VectorD<int32_t, 2> pos_pred_normal_oct;
+ VectorD<int32_t, 2> neg_pred_normal_oct;
+ VectorD<int32_t, 2> pos_correction;
+ VectorD<int32_t, 2> neg_correction;
+ for (int data_id = 0; data_id < corner_map_size; ++data_id) {
+ const CornerIndex corner_id =
+ this->mesh_data().data_to_corner_map()->at(data_id);
+ predictor_.ComputePredictedValue(corner_id, pred_normal_3d.data());
+
+ // Compute predicted octahedral coordinates.
+ octahedron_tool_box_.CanonicalizeIntegerVector(pred_normal_3d.data());
+ DRACO_DCHECK_EQ(pred_normal_3d.AbsSum(),
+ octahedron_tool_box_.center_value());
+
+ // Compute octahedral coordinates for both possible directions.
+ octahedron_tool_box_.IntegerVectorToQuantizedOctahedralCoords(
+ pred_normal_3d.data(), pos_pred_normal_oct.data(),
+ pos_pred_normal_oct.data() + 1);
+ pred_normal_3d = -pred_normal_3d;
+ octahedron_tool_box_.IntegerVectorToQuantizedOctahedralCoords(
+ pred_normal_3d.data(), neg_pred_normal_oct.data(),
+ neg_pred_normal_oct.data() + 1);
+
+ // Choose the one with the best correction value.
+ const int data_offset = data_id * 2;
+ this->transform().ComputeCorrection(in_data + data_offset,
+ pos_pred_normal_oct.data(),
+ pos_correction.data());
+ this->transform().ComputeCorrection(in_data + data_offset,
+ neg_pred_normal_oct.data(),
+ neg_correction.data());
+ pos_correction[0] = octahedron_tool_box_.ModMax(pos_correction[0]);
+ pos_correction[1] = octahedron_tool_box_.ModMax(pos_correction[1]);
+ neg_correction[0] = octahedron_tool_box_.ModMax(neg_correction[0]);
+ neg_correction[1] = octahedron_tool_box_.ModMax(neg_correction[1]);
+ if (pos_correction.AbsSum() < neg_correction.AbsSum()) {
+ flip_normal_bit_encoder_.EncodeBit(false);
+ (out_corr + data_offset)[0] =
+ octahedron_tool_box_.MakePositive(pos_correction[0]);
+ (out_corr + data_offset)[1] =
+ octahedron_tool_box_.MakePositive(pos_correction[1]);
+ } else {
+ flip_normal_bit_encoder_.EncodeBit(true);
+ (out_corr + data_offset)[0] =
+ octahedron_tool_box_.MakePositive(neg_correction[0]);
+ (out_corr + data_offset)[1] =
+ octahedron_tool_box_.MakePositive(neg_correction[1]);
+ }
+ }
+ return true;
+}
+
+template <typename DataTypeT, class TransformT, class MeshDataT>
+bool MeshPredictionSchemeGeometricNormalEncoder<
+ DataTypeT, TransformT, MeshDataT>::EncodePredictionData(EncoderBuffer
+ *buffer) {
+ if (!this->transform().EncodeTransformData(buffer)) {
+ return false;
+ }
+
+ // Encode normal flips.
+ flip_normal_bit_encoder_.EndEncoding(buffer);
+ return true;
+}
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_GEOMETRIC_NORMAL_ENCODER_H_
diff --git a/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_geometric_normal_predictor_area.h b/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_geometric_normal_predictor_area.h
new file mode 100644
index 0000000..775eded
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_geometric_normal_predictor_area.h
@@ -0,0 +1,117 @@
+// Copyright 2017 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_GEOMETRIC_NORMAL_PREDICTOR_AREA_H_
+#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_GEOMETRIC_NORMAL_PREDICTOR_AREA_H_
+
+#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_geometric_normal_predictor_base.h"
+
+namespace draco {
+
+// This predictor estimates the normal via the surrounding triangles of the
+// given corner. Triangles are weighted according to their area.
+template <typename DataTypeT, class TransformT, class MeshDataT>
+class MeshPredictionSchemeGeometricNormalPredictorArea
+ : public MeshPredictionSchemeGeometricNormalPredictorBase<
+ DataTypeT, TransformT, MeshDataT> {
+ typedef MeshPredictionSchemeGeometricNormalPredictorBase<
+ DataTypeT, TransformT, MeshDataT>
+ Base;
+
+ public:
+ explicit MeshPredictionSchemeGeometricNormalPredictorArea(const MeshDataT &md)
+ : Base(md) {
+ this->SetNormalPredictionMode(TRIANGLE_AREA);
+ };
+ virtual ~MeshPredictionSchemeGeometricNormalPredictorArea() {}
+
+ // Computes predicted octahedral coordinates on a given corner.
+ void ComputePredictedValue(CornerIndex corner_id,
+ DataTypeT *prediction) override {
+ DRACO_DCHECK(this->IsInitialized());
+ typedef typename MeshDataT::CornerTable CornerTable;
+ const CornerTable *const corner_table = this->mesh_data_.corner_table();
+ // Going to compute the predicted normal from the surrounding triangles
+ // according to the connectivity of the given corner table.
+ VertexCornersIterator<CornerTable> cit(corner_table, corner_id);
+ // Position of central vertex does not change in loop.
+ const VectorD<int64_t, 3> pos_cent = this->GetPositionForCorner(corner_id);
+ // Computing normals for triangles and adding them up.
+
+ VectorD<int64_t, 3> normal;
+ CornerIndex c_next, c_prev;
+ while (!cit.End()) {
+ // Getting corners.
+ if (this->normal_prediction_mode_ == ONE_TRIANGLE) {
+ c_next = corner_table->Next(corner_id);
+ c_prev = corner_table->Previous(corner_id);
+ } else {
+ c_next = corner_table->Next(cit.Corner());
+ c_prev = corner_table->Previous(cit.Corner());
+ }
+ const VectorD<int64_t, 3> pos_next = this->GetPositionForCorner(c_next);
+ const VectorD<int64_t, 3> pos_prev = this->GetPositionForCorner(c_prev);
+
+ // Computing delta vectors to next and prev.
+ const VectorD<int64_t, 3> delta_next = pos_next - pos_cent;
+ const VectorD<int64_t, 3> delta_prev = pos_prev - pos_cent;
+
+ // Computing cross product.
+ const VectorD<int64_t, 3> cross = CrossProduct(delta_next, delta_prev);
+
+ // Prevent signed integer overflows by doing math as unsigned.
+ auto normal_data = reinterpret_cast<uint64_t *>(normal.data());
+ auto cross_data = reinterpret_cast<const uint64_t *>(cross.data());
+ normal_data[0] = normal_data[0] + cross_data[0];
+ normal_data[1] = normal_data[1] + cross_data[1];
+ normal_data[2] = normal_data[2] + cross_data[2];
+
+ cit.Next();
+ }
+
+ // Convert to int32_t, make sure entries are not too large.
+ constexpr int64_t upper_bound = 1 << 29;
+ if (this->normal_prediction_mode_ == ONE_TRIANGLE) {
+ const int32_t abs_sum = static_cast<int32_t>(normal.AbsSum());
+ if (abs_sum > upper_bound) {
+ const int64_t quotient = abs_sum / upper_bound;
+ normal = normal / quotient;
+ }
+ } else {
+ const int64_t abs_sum = normal.AbsSum();
+ if (abs_sum > upper_bound) {
+ const int64_t quotient = abs_sum / upper_bound;
+ normal = normal / quotient;
+ }
+ }
+ DRACO_DCHECK_LE(normal.AbsSum(), upper_bound);
+ prediction[0] = static_cast<int32_t>(normal[0]);
+ prediction[1] = static_cast<int32_t>(normal[1]);
+ prediction[2] = static_cast<int32_t>(normal[2]);
+ }
+ bool SetNormalPredictionMode(NormalPredictionMode mode) override {
+ if (mode == ONE_TRIANGLE) {
+ this->normal_prediction_mode_ = mode;
+ return true;
+ } else if (mode == TRIANGLE_AREA) {
+ this->normal_prediction_mode_ = mode;
+ return true;
+ }
+ return false;
+ }
+};
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_GEOMETRIC_NORMAL_PREDICTOR_AREA_H_
diff --git a/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_geometric_normal_predictor_base.h b/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_geometric_normal_predictor_base.h
new file mode 100644
index 0000000..a554dda
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_geometric_normal_predictor_base.h
@@ -0,0 +1,96 @@
+// Copyright 2017 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_GEOMETRIC_NORMAL_PREDICTOR_BASE_H_
+#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_GEOMETRIC_NORMAL_PREDICTOR_BASE_H_
+
+#include <math.h>
+
+#include "draco/attributes/point_attribute.h"
+#include "draco/compression/attributes/normal_compression_utils.h"
+#include "draco/compression/config/compression_shared.h"
+#include "draco/core/math_utils.h"
+#include "draco/core/vector_d.h"
+#include "draco/mesh/corner_table.h"
+#include "draco/mesh/corner_table_iterators.h"
+
+namespace draco {
+
+// Base class for geometric normal predictors using position attribute.
+template <typename DataTypeT, class TransformT, class MeshDataT>
+class MeshPredictionSchemeGeometricNormalPredictorBase {
+ protected:
+ explicit MeshPredictionSchemeGeometricNormalPredictorBase(const MeshDataT &md)
+ : pos_attribute_(nullptr),
+ entry_to_point_id_map_(nullptr),
+ mesh_data_(md) {}
+ virtual ~MeshPredictionSchemeGeometricNormalPredictorBase() {}
+
+ public:
+ void SetPositionAttribute(const PointAttribute &position_attribute) {
+ pos_attribute_ = &position_attribute;
+ }
+ void SetEntryToPointIdMap(const PointIndex *map) {
+ entry_to_point_id_map_ = map;
+ }
+ bool IsInitialized() const {
+ if (pos_attribute_ == nullptr) {
+ return false;
+ }
+ if (entry_to_point_id_map_ == nullptr) {
+ return false;
+ }
+ return true;
+ }
+
+ virtual bool SetNormalPredictionMode(NormalPredictionMode mode) = 0;
+ virtual NormalPredictionMode GetNormalPredictionMode() const {
+ return normal_prediction_mode_;
+ }
+
+ protected:
+ VectorD<int64_t, 3> GetPositionForDataId(int data_id) const {
+ DRACO_DCHECK(this->IsInitialized());
+ const auto point_id = entry_to_point_id_map_[data_id];
+ const auto pos_val_id = pos_attribute_->mapped_index(point_id);
+ VectorD<int64_t, 3> pos;
+ pos_attribute_->ConvertValue(pos_val_id, &pos[0]);
+ return pos;
+ }
+ VectorD<int64_t, 3> GetPositionForCorner(CornerIndex ci) const {
+ DRACO_DCHECK(this->IsInitialized());
+ const auto corner_table = mesh_data_.corner_table();
+ const auto vert_id = corner_table->Vertex(ci).value();
+ const auto data_id = mesh_data_.vertex_to_data_map()->at(vert_id);
+ return GetPositionForDataId(data_id);
+ }
+ VectorD<int32_t, 2> GetOctahedralCoordForDataId(int data_id,
+ const DataTypeT *data) const {
+ DRACO_DCHECK(this->IsInitialized());
+ const int data_offset = data_id * 2;
+ return VectorD<int32_t, 2>(data[data_offset], data[data_offset + 1]);
+ }
+ // Computes predicted octahedral coordinates on a given corner.
+ virtual void ComputePredictedValue(CornerIndex corner_id,
+ DataTypeT *prediction) = 0;
+
+ const PointAttribute *pos_attribute_;
+ const PointIndex *entry_to_point_id_map_;
+ MeshDataT mesh_data_;
+ NormalPredictionMode normal_prediction_mode_;
+};
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_GEOMETRIC_NORMAL_PREDICTOR_BASE_H_
diff --git a/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_multi_parallelogram_decoder.h b/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_multi_parallelogram_decoder.h
new file mode 100644
index 0000000..fc82e0a
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_multi_parallelogram_decoder.h
@@ -0,0 +1,126 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifdef DRACO_BACKWARDS_COMPATIBILITY_SUPPORTED
+#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_MULTI_PARALLELOGRAM_DECODER_H_
+#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_MULTI_PARALLELOGRAM_DECODER_H_
+
+#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_decoder.h"
+#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_parallelogram_shared.h"
+#include "draco/draco_features.h"
+
+namespace draco {
+
+// Decoder for predictions encoded by multi-parallelogram encoding scheme.
+// See the corresponding encoder for method description.
+template <typename DataTypeT, class TransformT, class MeshDataT>
+class MeshPredictionSchemeMultiParallelogramDecoder
+ : public MeshPredictionSchemeDecoder<DataTypeT, TransformT, MeshDataT> {
+ public:
+ using CorrType =
+ typename PredictionSchemeDecoder<DataTypeT, TransformT>::CorrType;
+ using CornerTable = typename MeshDataT::CornerTable;
+
+ explicit MeshPredictionSchemeMultiParallelogramDecoder(
+ const PointAttribute *attribute)
+ : MeshPredictionSchemeDecoder<DataTypeT, TransformT, MeshDataT>(
+ attribute) {}
+ MeshPredictionSchemeMultiParallelogramDecoder(const PointAttribute *attribute,
+ const TransformT &transform,
+ const MeshDataT &mesh_data)
+ : MeshPredictionSchemeDecoder<DataTypeT, TransformT, MeshDataT>(
+ attribute, transform, mesh_data) {}
+
+ bool ComputeOriginalValues(const CorrType *in_corr, DataTypeT *out_data,
+ int size, int num_components,
+ const PointIndex *entry_to_point_id_map) override;
+ PredictionSchemeMethod GetPredictionMethod() const override {
+ return MESH_PREDICTION_MULTI_PARALLELOGRAM;
+ }
+
+ bool IsInitialized() const override {
+ return this->mesh_data().IsInitialized();
+ }
+};
+
+template <typename DataTypeT, class TransformT, class MeshDataT>
+bool MeshPredictionSchemeMultiParallelogramDecoder<DataTypeT, TransformT,
+ MeshDataT>::
+ ComputeOriginalValues(const CorrType *in_corr, DataTypeT *out_data,
+ int /* size */, int num_components,
+ const PointIndex * /* entry_to_point_id_map */) {
+ this->transform().Init(num_components);
+
+ // For storage of prediction values (already initialized to zero).
+ std::unique_ptr<DataTypeT[]> pred_vals(new DataTypeT[num_components]());
+ std::unique_ptr<DataTypeT[]> parallelogram_pred_vals(
+ new DataTypeT[num_components]());
+
+ this->transform().ComputeOriginalValue(pred_vals.get(), in_corr, out_data);
+
+ const CornerTable *const table = this->mesh_data().corner_table();
+ const std::vector<int32_t> *const vertex_to_data_map =
+ this->mesh_data().vertex_to_data_map();
+
+ const int corner_map_size =
+ static_cast<int>(this->mesh_data().data_to_corner_map()->size());
+ for (int p = 1; p < corner_map_size; ++p) {
+ const CornerIndex start_corner_id =
+ this->mesh_data().data_to_corner_map()->at(p);
+
+ CornerIndex corner_id(start_corner_id);
+ int num_parallelograms = 0;
+ for (int i = 0; i < num_components; ++i) {
+ pred_vals[i] = static_cast<DataTypeT>(0);
+ }
+ while (corner_id != kInvalidCornerIndex) {
+ if (ComputeParallelogramPrediction(
+ p, corner_id, table, *vertex_to_data_map, out_data,
+ num_components, parallelogram_pred_vals.get())) {
+ for (int c = 0; c < num_components; ++c) {
+ pred_vals[c] += parallelogram_pred_vals[c];
+ }
+ ++num_parallelograms;
+ }
+
+ // Proceed to the next corner attached to the vertex.
+ corner_id = table->SwingRight(corner_id);
+ if (corner_id == start_corner_id) {
+ corner_id = kInvalidCornerIndex;
+ }
+ }
+
+ const int dst_offset = p * num_components;
+ if (num_parallelograms == 0) {
+ // No parallelogram was valid.
+ // We use the last decoded point as a reference.
+ const int src_offset = (p - 1) * num_components;
+ this->transform().ComputeOriginalValue(
+ out_data + src_offset, in_corr + dst_offset, out_data + dst_offset);
+ } else {
+ // Compute the correction from the predicted value.
+ for (int c = 0; c < num_components; ++c) {
+ pred_vals[c] /= num_parallelograms;
+ }
+ this->transform().ComputeOriginalValue(
+ pred_vals.get(), in_corr + dst_offset, out_data + dst_offset);
+ }
+ }
+ return true;
+}
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_MULTI_PARALLELOGRAM_DECODER_H_
+#endif
diff --git a/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_multi_parallelogram_encoder.h b/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_multi_parallelogram_encoder.h
new file mode 100644
index 0000000..301b357
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_multi_parallelogram_encoder.h
@@ -0,0 +1,133 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_MULTI_PARALLELOGRAM_ENCODER_H_
+#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_MULTI_PARALLELOGRAM_ENCODER_H_
+
+#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_encoder.h"
+#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_parallelogram_shared.h"
+
+namespace draco {
+
+// Multi parallelogram prediction predicts attribute values using information
+// from all opposite faces to the predicted vertex, compared to the standard
+// prediction scheme, where only one opposite face is used (see
+// prediction_scheme_parallelogram.h). This approach is generally slower than
+// the standard parallelogram prediction, but it usually results in better
+// prediction (5 - 20% based on the quantization level. Better gains can be
+// achieved when more aggressive quantization is used).
+template <typename DataTypeT, class TransformT, class MeshDataT>
+class MeshPredictionSchemeMultiParallelogramEncoder
+ : public MeshPredictionSchemeEncoder<DataTypeT, TransformT, MeshDataT> {
+ public:
+ using CorrType =
+ typename PredictionSchemeEncoder<DataTypeT, TransformT>::CorrType;
+ using CornerTable = typename MeshDataT::CornerTable;
+
+ explicit MeshPredictionSchemeMultiParallelogramEncoder(
+ const PointAttribute *attribute)
+ : MeshPredictionSchemeEncoder<DataTypeT, TransformT, MeshDataT>(
+ attribute) {}
+ MeshPredictionSchemeMultiParallelogramEncoder(const PointAttribute *attribute,
+ const TransformT &transform,
+ const MeshDataT &mesh_data)
+ : MeshPredictionSchemeEncoder<DataTypeT, TransformT, MeshDataT>(
+ attribute, transform, mesh_data) {}
+
+ bool ComputeCorrectionValues(
+ const DataTypeT *in_data, CorrType *out_corr, int size,
+ int num_components, const PointIndex *entry_to_point_id_map) override;
+ PredictionSchemeMethod GetPredictionMethod() const override {
+ return MESH_PREDICTION_MULTI_PARALLELOGRAM;
+ }
+
+ bool IsInitialized() const override {
+ return this->mesh_data().IsInitialized();
+ }
+};
+
+template <typename DataTypeT, class TransformT, class MeshDataT>
+bool MeshPredictionSchemeMultiParallelogramEncoder<DataTypeT, TransformT,
+ MeshDataT>::
+ ComputeCorrectionValues(const DataTypeT *in_data, CorrType *out_corr,
+ int size, int num_components,
+ const PointIndex * /* entry_to_point_id_map */) {
+ this->transform().Init(in_data, size, num_components);
+ const CornerTable *const table = this->mesh_data().corner_table();
+ const std::vector<int32_t> *const vertex_to_data_map =
+ this->mesh_data().vertex_to_data_map();
+
+ // For storage of prediction values (already initialized to zero).
+ std::unique_ptr<DataTypeT[]> pred_vals(new DataTypeT[num_components]());
+ std::unique_ptr<DataTypeT[]> parallelogram_pred_vals(
+ new DataTypeT[num_components]());
+
+ // We start processing from the end because this prediction uses data from
+ // previous entries that could be overwritten when an entry is processed.
+ for (int p =
+ static_cast<int>(this->mesh_data().data_to_corner_map()->size() - 1);
+ p > 0; --p) {
+ const CornerIndex start_corner_id =
+ this->mesh_data().data_to_corner_map()->at(p);
+
+ // Go over all corners attached to the vertex and compute the predicted
+ // value from the parallelograms defined by their opposite faces.
+ CornerIndex corner_id(start_corner_id);
+ int num_parallelograms = 0;
+ for (int i = 0; i < num_components; ++i) {
+ pred_vals[i] = static_cast<DataTypeT>(0);
+ }
+ while (corner_id != kInvalidCornerIndex) {
+ if (ComputeParallelogramPrediction(
+ p, corner_id, table, *vertex_to_data_map, in_data, num_components,
+ parallelogram_pred_vals.get())) {
+ for (int c = 0; c < num_components; ++c) {
+ pred_vals[c] += parallelogram_pred_vals[c];
+ }
+ ++num_parallelograms;
+ }
+
+ // Proceed to the next corner attached to the vertex.
+ corner_id = table->SwingRight(corner_id);
+ if (corner_id == start_corner_id) {
+ corner_id = kInvalidCornerIndex;
+ }
+ }
+ const int dst_offset = p * num_components;
+ if (num_parallelograms == 0) {
+ // No parallelogram was valid.
+ // We use the last encoded point as a reference.
+ const int src_offset = (p - 1) * num_components;
+ this->transform().ComputeCorrection(
+ in_data + dst_offset, in_data + src_offset, out_corr + dst_offset);
+ } else {
+ // Compute the correction from the predicted value.
+ for (int c = 0; c < num_components; ++c) {
+ pred_vals[c] /= num_parallelograms;
+ }
+ this->transform().ComputeCorrection(in_data + dst_offset, pred_vals.get(),
+ out_corr + dst_offset);
+ }
+ }
+ // First element is always fixed because it cannot be predicted.
+ for (int i = 0; i < num_components; ++i) {
+ pred_vals[i] = static_cast<DataTypeT>(0);
+ }
+ this->transform().ComputeCorrection(in_data, pred_vals.get(), out_corr);
+ return true;
+}
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_MULTI_PARALLELOGRAM_ENCODER_H_
diff --git a/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_parallelogram_decoder.h b/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_parallelogram_decoder.h
new file mode 100644
index 0000000..4d47ddf
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_parallelogram_decoder.h
@@ -0,0 +1,98 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_PARALLELOGRAM_DECODER_H_
+#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_PARALLELOGRAM_DECODER_H_
+
+#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_decoder.h"
+#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_parallelogram_shared.h"
+
+namespace draco {
+
+// Decoder for attribute values encoded with the standard parallelogram
+// prediction. See the description of the corresponding encoder for more
+// details.
+template <typename DataTypeT, class TransformT, class MeshDataT>
+class MeshPredictionSchemeParallelogramDecoder
+ : public MeshPredictionSchemeDecoder<DataTypeT, TransformT, MeshDataT> {
+ public:
+ using CorrType =
+ typename PredictionSchemeDecoder<DataTypeT, TransformT>::CorrType;
+ using CornerTable = typename MeshDataT::CornerTable;
+ explicit MeshPredictionSchemeParallelogramDecoder(
+ const PointAttribute *attribute)
+ : MeshPredictionSchemeDecoder<DataTypeT, TransformT, MeshDataT>(
+ attribute) {}
+ MeshPredictionSchemeParallelogramDecoder(const PointAttribute *attribute,
+ const TransformT &transform,
+ const MeshDataT &mesh_data)
+ : MeshPredictionSchemeDecoder<DataTypeT, TransformT, MeshDataT>(
+ attribute, transform, mesh_data) {}
+
+ bool ComputeOriginalValues(const CorrType *in_corr, DataTypeT *out_data,
+ int size, int num_components,
+ const PointIndex *entry_to_point_id_map) override;
+ PredictionSchemeMethod GetPredictionMethod() const override {
+ return MESH_PREDICTION_PARALLELOGRAM;
+ }
+
+ bool IsInitialized() const override {
+ return this->mesh_data().IsInitialized();
+ }
+};
+
+template <typename DataTypeT, class TransformT, class MeshDataT>
+bool MeshPredictionSchemeParallelogramDecoder<DataTypeT, TransformT,
+ MeshDataT>::
+ ComputeOriginalValues(const CorrType *in_corr, DataTypeT *out_data,
+ int /* size */, int num_components,
+ const PointIndex * /* entry_to_point_id_map */) {
+ this->transform().Init(num_components);
+
+ const CornerTable *const table = this->mesh_data().corner_table();
+ const std::vector<int32_t> *const vertex_to_data_map =
+ this->mesh_data().vertex_to_data_map();
+
+ // For storage of prediction values (already initialized to zero).
+ std::unique_ptr<DataTypeT[]> pred_vals(new DataTypeT[num_components]());
+
+ // Restore the first value.
+ this->transform().ComputeOriginalValue(pred_vals.get(), in_corr, out_data);
+
+ const int corner_map_size =
+ static_cast<int>(this->mesh_data().data_to_corner_map()->size());
+ for (int p = 1; p < corner_map_size; ++p) {
+ const CornerIndex corner_id = this->mesh_data().data_to_corner_map()->at(p);
+ const int dst_offset = p * num_components;
+ if (!ComputeParallelogramPrediction(p, corner_id, table,
+ *vertex_to_data_map, out_data,
+ num_components, pred_vals.get())) {
+ // Parallelogram could not be computed, Possible because some of the
+ // vertices are not valid (not encoded yet).
+ // We use the last encoded point as a reference (delta coding).
+ const int src_offset = (p - 1) * num_components;
+ this->transform().ComputeOriginalValue(
+ out_data + src_offset, in_corr + dst_offset, out_data + dst_offset);
+ } else {
+ // Apply the parallelogram prediction.
+ this->transform().ComputeOriginalValue(
+ pred_vals.get(), in_corr + dst_offset, out_data + dst_offset);
+ }
+ }
+ return true;
+}
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_PARALLELOGRAM_DECODER_H_
diff --git a/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_parallelogram_encoder.h b/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_parallelogram_encoder.h
new file mode 100644
index 0000000..f008019
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_parallelogram_encoder.h
@@ -0,0 +1,111 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_PARALLELOGRAM_ENCODER_H_
+#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_PARALLELOGRAM_ENCODER_H_
+
+#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_encoder.h"
+#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_parallelogram_shared.h"
+
+namespace draco {
+
+// Parallelogram prediction predicts an attribute value V from three vertices
+// on the opposite face to the predicted vertex. The values on the three
+// vertices are used to construct a parallelogram V' = O - A - B, where O is the
+// value on the opposite vertex, and A, B are values on the shared vertices:
+// V
+// / \
+// / \
+// / \
+// A-------B
+// \ /
+// \ /
+// \ /
+// O
+//
+template <typename DataTypeT, class TransformT, class MeshDataT>
+class MeshPredictionSchemeParallelogramEncoder
+ : public MeshPredictionSchemeEncoder<DataTypeT, TransformT, MeshDataT> {
+ public:
+ using CorrType =
+ typename PredictionSchemeEncoder<DataTypeT, TransformT>::CorrType;
+ using CornerTable = typename MeshDataT::CornerTable;
+ explicit MeshPredictionSchemeParallelogramEncoder(
+ const PointAttribute *attribute)
+ : MeshPredictionSchemeEncoder<DataTypeT, TransformT, MeshDataT>(
+ attribute) {}
+ MeshPredictionSchemeParallelogramEncoder(const PointAttribute *attribute,
+ const TransformT &transform,
+ const MeshDataT &mesh_data)
+ : MeshPredictionSchemeEncoder<DataTypeT, TransformT, MeshDataT>(
+ attribute, transform, mesh_data) {}
+
+ bool ComputeCorrectionValues(
+ const DataTypeT *in_data, CorrType *out_corr, int size,
+ int num_components, const PointIndex *entry_to_point_id_map) override;
+ PredictionSchemeMethod GetPredictionMethod() const override {
+ return MESH_PREDICTION_PARALLELOGRAM;
+ }
+
+ bool IsInitialized() const override {
+ return this->mesh_data().IsInitialized();
+ }
+};
+
+template <typename DataTypeT, class TransformT, class MeshDataT>
+bool MeshPredictionSchemeParallelogramEncoder<DataTypeT, TransformT,
+ MeshDataT>::
+ ComputeCorrectionValues(const DataTypeT *in_data, CorrType *out_corr,
+ int size, int num_components,
+ const PointIndex * /* entry_to_point_id_map */) {
+ this->transform().Init(in_data, size, num_components);
+ // For storage of prediction values (already initialized to zero).
+ std::unique_ptr<DataTypeT[]> pred_vals(new DataTypeT[num_components]());
+
+ // We start processing from the end because this prediction uses data from
+ // previous entries that could be overwritten when an entry is processed.
+ const CornerTable *const table = this->mesh_data().corner_table();
+ const std::vector<int32_t> *const vertex_to_data_map =
+ this->mesh_data().vertex_to_data_map();
+ for (int p =
+ static_cast<int>(this->mesh_data().data_to_corner_map()->size() - 1);
+ p > 0; --p) {
+ const CornerIndex corner_id = this->mesh_data().data_to_corner_map()->at(p);
+ const int dst_offset = p * num_components;
+ if (!ComputeParallelogramPrediction(p, corner_id, table,
+ *vertex_to_data_map, in_data,
+ num_components, pred_vals.get())) {
+ // Parallelogram could not be computed, Possible because some of the
+ // vertices are not valid (not encoded yet).
+ // We use the last encoded point as a reference (delta coding).
+ const int src_offset = (p - 1) * num_components;
+ this->transform().ComputeCorrection(
+ in_data + dst_offset, in_data + src_offset, out_corr + dst_offset);
+ } else {
+ // Apply the parallelogram prediction.
+ this->transform().ComputeCorrection(in_data + dst_offset, pred_vals.get(),
+ out_corr + dst_offset);
+ }
+ }
+ // First element is always fixed because it cannot be predicted.
+ for (int i = 0; i < num_components; ++i) {
+ pred_vals[i] = static_cast<DataTypeT>(0);
+ }
+ this->transform().ComputeCorrection(in_data, pred_vals.get(), out_corr);
+ return true;
+}
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_PARALLELOGRAM_ENCODER_H_
diff --git a/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_parallelogram_shared.h b/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_parallelogram_shared.h
new file mode 100644
index 0000000..fd10fb5
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_parallelogram_shared.h
@@ -0,0 +1,78 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Shared functionality for different parallelogram prediction schemes.
+
+#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_PARALLELOGRAM_SHARED_H_
+#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_PARALLELOGRAM_SHARED_H_
+
+#include "draco/mesh/corner_table.h"
+#include "draco/mesh/mesh.h"
+
+namespace draco {
+
+// TODO(draco-eng) consolidate Vertex/next/previous queries to one call
+// (performance).
+template <class CornerTableT>
+inline void GetParallelogramEntries(
+ const CornerIndex ci, const CornerTableT *table,
+ const std::vector<int32_t> &vertex_to_data_map, int *opp_entry,
+ int *next_entry, int *prev_entry) {
+ // One vertex of the input |table| correspond to exactly one attribute value
+ // entry. The |table| can be either CornerTable for per-vertex attributes,
+ // or MeshAttributeCornerTable for attributes with interior seams.
+ *opp_entry = vertex_to_data_map[table->Vertex(ci).value()];
+ *next_entry = vertex_to_data_map[table->Vertex(table->Next(ci)).value()];
+ *prev_entry = vertex_to_data_map[table->Vertex(table->Previous(ci)).value()];
+}
+
+// Computes parallelogram prediction for a given corner and data entry id.
+// The prediction is stored in |out_prediction|.
+// Function returns false when the prediction couldn't be computed, e.g. because
+// not all entry points were available.
+template <class CornerTableT, typename DataTypeT>
+inline bool ComputeParallelogramPrediction(
+ int data_entry_id, const CornerIndex ci, const CornerTableT *table,
+ const std::vector<int32_t> &vertex_to_data_map, const DataTypeT *in_data,
+ int num_components, DataTypeT *out_prediction) {
+ const CornerIndex oci = table->Opposite(ci);
+ if (oci == kInvalidCornerIndex) {
+ return false;
+ }
+ int vert_opp, vert_next, vert_prev;
+ GetParallelogramEntries<CornerTableT>(oci, table, vertex_to_data_map,
+ &vert_opp, &vert_next, &vert_prev);
+ if (vert_opp < data_entry_id && vert_next < data_entry_id &&
+ vert_prev < data_entry_id) {
+ // Apply the parallelogram prediction.
+ const int v_opp_off = vert_opp * num_components;
+ const int v_next_off = vert_next * num_components;
+ const int v_prev_off = vert_prev * num_components;
+ for (int c = 0; c < num_components; ++c) {
+ const int64_t in_data_next_off = in_data[v_next_off + c];
+ const int64_t in_data_prev_off = in_data[v_prev_off + c];
+ const int64_t in_data_opp_off = in_data[v_opp_off + c];
+ const int64_t result =
+ (in_data_next_off + in_data_prev_off) - in_data_opp_off;
+
+ out_prediction[c] = static_cast<DataTypeT>(result);
+ }
+ return true;
+ }
+ return false; // Not all data is available for prediction
+}
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_PARALLELOGRAM_SHARED_H_
diff --git a/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_tex_coords_decoder.h b/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_tex_coords_decoder.h
new file mode 100644
index 0000000..02cf7e6
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_tex_coords_decoder.h
@@ -0,0 +1,344 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifdef DRACO_BACKWARDS_COMPATIBILITY_SUPPORTED
+#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_TEX_COORDS_DECODER_H_
+#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_TEX_COORDS_DECODER_H_
+
+#include <math.h>
+
+#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_decoder.h"
+#include "draco/compression/bit_coders/rans_bit_decoder.h"
+#include "draco/core/varint_decoding.h"
+#include "draco/core/vector_d.h"
+#include "draco/draco_features.h"
+#include "draco/mesh/corner_table.h"
+
+namespace draco {
+
+// Decoder for predictions of UV coordinates encoded by our specialized texture
+// coordinate predictor. See the corresponding encoder for more details. Note
+// that this predictor is not portable and should not be used anymore. See
+// MeshPredictionSchemeTexCoordsPortableEncoder/Decoder for a portable version
+// of this prediction scheme.
+template <typename DataTypeT, class TransformT, class MeshDataT>
+class MeshPredictionSchemeTexCoordsDecoder
+ : public MeshPredictionSchemeDecoder<DataTypeT, TransformT, MeshDataT> {
+ public:
+ using CorrType = typename MeshPredictionSchemeDecoder<DataTypeT, TransformT,
+ MeshDataT>::CorrType;
+ MeshPredictionSchemeTexCoordsDecoder(const PointAttribute *attribute,
+ const TransformT &transform,
+ const MeshDataT &mesh_data, int version)
+ : MeshPredictionSchemeDecoder<DataTypeT, TransformT, MeshDataT>(
+ attribute, transform, mesh_data),
+ pos_attribute_(nullptr),
+ entry_to_point_id_map_(nullptr),
+ num_components_(0),
+ version_(version) {}
+
+ bool ComputeOriginalValues(const CorrType *in_corr, DataTypeT *out_data,
+ int size, int num_components,
+ const PointIndex *entry_to_point_id_map) override;
+
+ bool DecodePredictionData(DecoderBuffer *buffer) override;
+
+ PredictionSchemeMethod GetPredictionMethod() const override {
+ return MESH_PREDICTION_TEX_COORDS_DEPRECATED;
+ }
+
+ bool IsInitialized() const override {
+ if (pos_attribute_ == nullptr) {
+ return false;
+ }
+ if (!this->mesh_data().IsInitialized()) {
+ return false;
+ }
+ return true;
+ }
+
+ int GetNumParentAttributes() const override { return 1; }
+
+ GeometryAttribute::Type GetParentAttributeType(int i) const override {
+ DRACO_DCHECK_EQ(i, 0);
+ (void)i;
+ return GeometryAttribute::POSITION;
+ }
+
+ bool SetParentAttribute(const PointAttribute *att) override {
+ if (att == nullptr) {
+ return false;
+ }
+ if (att->attribute_type() != GeometryAttribute::POSITION) {
+ return false; // Invalid attribute type.
+ }
+ if (att->num_components() != 3) {
+ return false; // Currently works only for 3 component positions.
+ }
+ pos_attribute_ = att;
+ return true;
+ }
+
+ protected:
+ Vector3f GetPositionForEntryId(int entry_id) const {
+ const PointIndex point_id = entry_to_point_id_map_[entry_id];
+ Vector3f pos;
+ pos_attribute_->ConvertValue(pos_attribute_->mapped_index(point_id),
+ &pos[0]);
+ return pos;
+ }
+
+ Vector2f GetTexCoordForEntryId(int entry_id, const DataTypeT *data) const {
+ const int data_offset = entry_id * num_components_;
+ return Vector2f(static_cast<float>(data[data_offset]),
+ static_cast<float>(data[data_offset + 1]));
+ }
+
+ void ComputePredictedValue(CornerIndex corner_id, const DataTypeT *data,
+ int data_id);
+
+ private:
+ const PointAttribute *pos_attribute_;
+ const PointIndex *entry_to_point_id_map_;
+ std::unique_ptr<DataTypeT[]> predicted_value_;
+ int num_components_;
+ // Encoded / decoded array of UV flips.
+ std::vector<bool> orientations_;
+ int version_;
+};
+
+template <typename DataTypeT, class TransformT, class MeshDataT>
+bool MeshPredictionSchemeTexCoordsDecoder<DataTypeT, TransformT, MeshDataT>::
+ ComputeOriginalValues(const CorrType *in_corr, DataTypeT *out_data,
+ int /* size */, int num_components,
+ const PointIndex *entry_to_point_id_map) {
+ num_components_ = num_components;
+ entry_to_point_id_map_ = entry_to_point_id_map;
+ predicted_value_ =
+ std::unique_ptr<DataTypeT[]>(new DataTypeT[num_components]);
+ this->transform().Init(num_components);
+
+ const int corner_map_size =
+ static_cast<int>(this->mesh_data().data_to_corner_map()->size());
+ for (int p = 0; p < corner_map_size; ++p) {
+ const CornerIndex corner_id = this->mesh_data().data_to_corner_map()->at(p);
+ ComputePredictedValue(corner_id, out_data, p);
+
+ const int dst_offset = p * num_components;
+ this->transform().ComputeOriginalValue(
+ predicted_value_.get(), in_corr + dst_offset, out_data + dst_offset);
+ }
+ return true;
+}
+
+template <typename DataTypeT, class TransformT, class MeshDataT>
+bool MeshPredictionSchemeTexCoordsDecoder<DataTypeT, TransformT, MeshDataT>::
+ DecodePredictionData(DecoderBuffer *buffer) {
+ // Decode the delta coded orientations.
+ uint32_t num_orientations = 0;
+ if (buffer->bitstream_version() < DRACO_BITSTREAM_VERSION(2, 2)) {
+ if (!buffer->Decode(&num_orientations)) {
+ return false;
+ }
+ } else {
+ if (!DecodeVarint(&num_orientations, buffer)) {
+ return false;
+ }
+ }
+ if (num_orientations == 0) {
+ return false;
+ }
+ orientations_.resize(num_orientations);
+ bool last_orientation = true;
+ RAnsBitDecoder decoder;
+ if (!decoder.StartDecoding(buffer)) {
+ return false;
+ }
+ for (uint32_t i = 0; i < num_orientations; ++i) {
+ if (!decoder.DecodeNextBit()) {
+ last_orientation = !last_orientation;
+ }
+ orientations_[i] = last_orientation;
+ }
+ decoder.EndDecoding();
+ return MeshPredictionSchemeDecoder<DataTypeT, TransformT,
+ MeshDataT>::DecodePredictionData(buffer);
+}
+
+template <typename DataTypeT, class TransformT, class MeshDataT>
+void MeshPredictionSchemeTexCoordsDecoder<DataTypeT, TransformT, MeshDataT>::
+ ComputePredictedValue(CornerIndex corner_id, const DataTypeT *data,
+ int data_id) {
+ // Compute the predicted UV coordinate from the positions on all corners
+ // of the processed triangle. For the best prediction, the UV coordinates
+ // on the next/previous corners need to be already encoded/decoded.
+ const CornerIndex next_corner_id =
+ this->mesh_data().corner_table()->Next(corner_id);
+ const CornerIndex prev_corner_id =
+ this->mesh_data().corner_table()->Previous(corner_id);
+ // Get the encoded data ids from the next and previous corners.
+ // The data id is the encoding order of the UV coordinates.
+ int next_data_id, prev_data_id;
+
+ int next_vert_id, prev_vert_id;
+ next_vert_id =
+ this->mesh_data().corner_table()->Vertex(next_corner_id).value();
+ prev_vert_id =
+ this->mesh_data().corner_table()->Vertex(prev_corner_id).value();
+
+ next_data_id = this->mesh_data().vertex_to_data_map()->at(next_vert_id);
+ prev_data_id = this->mesh_data().vertex_to_data_map()->at(prev_vert_id);
+
+ if (prev_data_id < data_id && next_data_id < data_id) {
+ // Both other corners have available UV coordinates for prediction.
+ const Vector2f n_uv = GetTexCoordForEntryId(next_data_id, data);
+ const Vector2f p_uv = GetTexCoordForEntryId(prev_data_id, data);
+ if (p_uv == n_uv) {
+ // We cannot do a reliable prediction on degenerated UV triangles.
+ predicted_value_[0] = static_cast<int>(p_uv[0]);
+ predicted_value_[1] = static_cast<int>(p_uv[1]);
+ return;
+ }
+
+ // Get positions at all corners.
+ const Vector3f tip_pos = GetPositionForEntryId(data_id);
+ const Vector3f next_pos = GetPositionForEntryId(next_data_id);
+ const Vector3f prev_pos = GetPositionForEntryId(prev_data_id);
+ // Use the positions of the above triangle to predict the texture coordinate
+ // on the tip corner C.
+ // Convert the triangle into a new coordinate system defined by orthogonal
+ // bases vectors S, T, where S is vector prev_pos - next_pos and T is an
+ // perpendicular vector to S in the same plane as vector the
+ // tip_pos - next_pos.
+ // The transformed triangle in the new coordinate system is then going to
+ // be represented as:
+ //
+ // 1 ^
+ // |
+ // |
+ // | C
+ // | / \
+ // | / \
+ // |/ \
+ // N--------------P
+ // 0 1
+ //
+ // Where next_pos point (N) is at position (0, 0), prev_pos point (P) is
+ // at (1, 0). Our goal is to compute the position of the tip_pos point (C)
+ // in this new coordinate space (s, t).
+ //
+ const Vector3f pn = prev_pos - next_pos;
+ const Vector3f cn = tip_pos - next_pos;
+ const float pn_norm2_squared = pn.SquaredNorm();
+ // Coordinate s of the tip corner C is simply the dot product of the
+ // normalized vectors |pn| and |cn| (normalized by the length of |pn|).
+ // Since both of these vectors are normalized, we don't need to perform the
+ // normalization explicitly and instead we can just use the squared norm
+ // of |pn| as a denominator of the resulting dot product of non normalized
+ // vectors.
+ float s, t;
+ // |pn_norm2_squared| can be exactly 0 when the next_pos and prev_pos are
+ // the same positions (e.g. because they were quantized to the same
+ // location).
+ if (version_ < DRACO_BITSTREAM_VERSION(1, 2) || pn_norm2_squared > 0) {
+ s = pn.Dot(cn) / pn_norm2_squared;
+ // To get the coordinate t, we can use formula:
+ // t = |C-N - (P-N) * s| / |P-N|
+ // Do not use std::sqrt to avoid changes in the bitstream.
+ t = sqrt((cn - pn * s).SquaredNorm() / pn_norm2_squared);
+ } else {
+ s = 0;
+ t = 0;
+ }
+
+ // Now we need to transform the point (s, t) to the texture coordinate space
+ // UV. We know the UV coordinates on points N and P (N_UV and P_UV). Lets
+ // denote P_UV - N_UV = PN_UV. PN_UV is then 2 dimensional vector that can
+ // be used to define transformation from the normalized coordinate system
+ // to the texture coordinate system using a 3x3 affine matrix M:
+ //
+ // M = | PN_UV[0] -PN_UV[1] N_UV[0] |
+ // | PN_UV[1] PN_UV[0] N_UV[1] |
+ // | 0 0 1 |
+ //
+ // The predicted point C_UV in the texture space is then equal to
+ // C_UV = M * (s, t, 1). Because the triangle in UV space may be flipped
+ // around the PN_UV axis, we also need to consider point C_UV' = M * (s, -t)
+ // as the prediction.
+ const Vector2f pn_uv = p_uv - n_uv;
+ const float pnus = pn_uv[0] * s + n_uv[0];
+ const float pnut = pn_uv[0] * t;
+ const float pnvs = pn_uv[1] * s + n_uv[1];
+ const float pnvt = pn_uv[1] * t;
+ Vector2f predicted_uv;
+
+ // When decoding the data, we already know which orientation to use.
+ const bool orientation = orientations_.back();
+ orientations_.pop_back();
+ if (orientation)
+ predicted_uv = Vector2f(pnus - pnvt, pnvs + pnut);
+ else
+ predicted_uv = Vector2f(pnus + pnvt, pnvs - pnut);
+
+ if (std::is_integral<DataTypeT>::value) {
+ // Round the predicted value for integer types.
+ if (std::isnan(predicted_uv[0])) {
+ predicted_value_[0] = INT_MIN;
+ } else {
+ predicted_value_[0] = static_cast<int>(floor(predicted_uv[0] + 0.5));
+ }
+ if (std::isnan(predicted_uv[1])) {
+ predicted_value_[1] = INT_MIN;
+ } else {
+ predicted_value_[1] = static_cast<int>(floor(predicted_uv[1] + 0.5));
+ }
+ } else {
+ predicted_value_[0] = static_cast<int>(predicted_uv[0]);
+ predicted_value_[1] = static_cast<int>(predicted_uv[1]);
+ }
+ return;
+ }
+ // Else we don't have available textures on both corners. For such case we
+ // can't use positions for predicting the uv value and we resort to delta
+ // coding.
+ int data_offset = 0;
+ if (prev_data_id < data_id) {
+ // Use the value on the previous corner as the prediction.
+ data_offset = prev_data_id * num_components_;
+ }
+ if (next_data_id < data_id) {
+ // Use the value on the next corner as the prediction.
+ data_offset = next_data_id * num_components_;
+ } else {
+ // None of the other corners have a valid value. Use the last encoded value
+ // as the prediction if possible.
+ if (data_id > 0) {
+ data_offset = (data_id - 1) * num_components_;
+ } else {
+ // We are encoding the first value. Predict 0.
+ for (int i = 0; i < num_components_; ++i) {
+ predicted_value_[i] = 0;
+ }
+ return;
+ }
+ }
+ for (int i = 0; i < num_components_; ++i) {
+ predicted_value_[i] = data[data_offset + i];
+ }
+}
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_TEX_COORDS_DECODER_H_
+#endif
diff --git a/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_tex_coords_encoder.h b/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_tex_coords_encoder.h
new file mode 100644
index 0000000..813b72a
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_tex_coords_encoder.h
@@ -0,0 +1,318 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_TEX_COORDS_ENCODER_H_
+#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_TEX_COORDS_ENCODER_H_
+
+#include <math.h>
+
+#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_encoder.h"
+#include "draco/compression/bit_coders/rans_bit_encoder.h"
+#include "draco/core/varint_encoding.h"
+#include "draco/core/vector_d.h"
+#include "draco/mesh/corner_table.h"
+
+namespace draco {
+
+// Prediction scheme designed for predicting texture coordinates from known
+// spatial position of vertices. For good parametrization, the ratios between
+// triangle edge lengths should be about the same in both the spatial and UV
+// coordinate spaces, which makes the positions a good predictor for the UV
+// coordinates.
+template <typename DataTypeT, class TransformT, class MeshDataT>
+class MeshPredictionSchemeTexCoordsEncoder
+ : public MeshPredictionSchemeEncoder<DataTypeT, TransformT, MeshDataT> {
+ public:
+ using CorrType = typename MeshPredictionSchemeEncoder<DataTypeT, TransformT,
+ MeshDataT>::CorrType;
+ MeshPredictionSchemeTexCoordsEncoder(const PointAttribute *attribute,
+ const TransformT &transform,
+ const MeshDataT &mesh_data)
+ : MeshPredictionSchemeEncoder<DataTypeT, TransformT, MeshDataT>(
+ attribute, transform, mesh_data),
+ pos_attribute_(nullptr),
+ entry_to_point_id_map_(nullptr),
+ num_components_(0) {}
+
+ bool ComputeCorrectionValues(
+ const DataTypeT *in_data, CorrType *out_corr, int size,
+ int num_components, const PointIndex *entry_to_point_id_map) override;
+
+ bool EncodePredictionData(EncoderBuffer *buffer) override;
+
+ PredictionSchemeMethod GetPredictionMethod() const override {
+ return MESH_PREDICTION_TEX_COORDS_DEPRECATED;
+ }
+
+ bool IsInitialized() const override {
+ if (pos_attribute_ == nullptr) {
+ return false;
+ }
+ if (!this->mesh_data().IsInitialized()) {
+ return false;
+ }
+ return true;
+ }
+
+ int GetNumParentAttributes() const override { return 1; }
+
+ GeometryAttribute::Type GetParentAttributeType(int i) const override {
+ DRACO_DCHECK_EQ(i, 0);
+ (void)i;
+ return GeometryAttribute::POSITION;
+ }
+
+ bool SetParentAttribute(const PointAttribute *att) override {
+ if (att->attribute_type() != GeometryAttribute::POSITION) {
+ return false; // Invalid attribute type.
+ }
+ if (att->num_components() != 3) {
+ return false; // Currently works only for 3 component positions.
+ }
+ pos_attribute_ = att;
+ return true;
+ }
+
+ protected:
+ Vector3f GetPositionForEntryId(int entry_id) const {
+ const PointIndex point_id = entry_to_point_id_map_[entry_id];
+ Vector3f pos;
+ pos_attribute_->ConvertValue(pos_attribute_->mapped_index(point_id),
+ &pos[0]);
+ return pos;
+ }
+
+ Vector2f GetTexCoordForEntryId(int entry_id, const DataTypeT *data) const {
+ const int data_offset = entry_id * num_components_;
+ return Vector2f(static_cast<float>(data[data_offset]),
+ static_cast<float>(data[data_offset + 1]));
+ }
+
+ void ComputePredictedValue(CornerIndex corner_id, const DataTypeT *data,
+ int data_id);
+
+ private:
+ const PointAttribute *pos_attribute_;
+ const PointIndex *entry_to_point_id_map_;
+ std::unique_ptr<DataTypeT[]> predicted_value_;
+ int num_components_;
+ // Encoded / decoded array of UV flips.
+ std::vector<bool> orientations_;
+};
+
+template <typename DataTypeT, class TransformT, class MeshDataT>
+bool MeshPredictionSchemeTexCoordsEncoder<DataTypeT, TransformT, MeshDataT>::
+ ComputeCorrectionValues(const DataTypeT *in_data, CorrType *out_corr,
+ int size, int num_components,
+ const PointIndex *entry_to_point_id_map) {
+ num_components_ = num_components;
+ entry_to_point_id_map_ = entry_to_point_id_map;
+ predicted_value_ =
+ std::unique_ptr<DataTypeT[]>(new DataTypeT[num_components]);
+ this->transform().Init(in_data, size, num_components);
+ // We start processing from the end because this prediction uses data from
+ // previous entries that could be overwritten when an entry is processed.
+ for (int p =
+ static_cast<int>(this->mesh_data().data_to_corner_map()->size()) - 1;
+ p >= 0; --p) {
+ const CornerIndex corner_id = this->mesh_data().data_to_corner_map()->at(p);
+ ComputePredictedValue(corner_id, in_data, p);
+
+ const int dst_offset = p * num_components;
+ this->transform().ComputeCorrection(
+ in_data + dst_offset, predicted_value_.get(), out_corr + dst_offset);
+ }
+ return true;
+}
+
+template <typename DataTypeT, class TransformT, class MeshDataT>
+bool MeshPredictionSchemeTexCoordsEncoder<DataTypeT, TransformT, MeshDataT>::
+ EncodePredictionData(EncoderBuffer *buffer) {
+ // Encode the delta-coded orientations using arithmetic coding.
+ const uint32_t num_orientations = static_cast<uint32_t>(orientations_.size());
+ EncodeVarint(num_orientations, buffer);
+ bool last_orientation = true;
+ RAnsBitEncoder encoder;
+ encoder.StartEncoding();
+ for (bool orientation : orientations_) {
+ encoder.EncodeBit(orientation == last_orientation);
+ last_orientation = orientation;
+ }
+ encoder.EndEncoding(buffer);
+ return MeshPredictionSchemeEncoder<DataTypeT, TransformT,
+ MeshDataT>::EncodePredictionData(buffer);
+}
+
+template <typename DataTypeT, class TransformT, class MeshDataT>
+void MeshPredictionSchemeTexCoordsEncoder<DataTypeT, TransformT, MeshDataT>::
+ ComputePredictedValue(CornerIndex corner_id, const DataTypeT *data,
+ int data_id) {
+ // Compute the predicted UV coordinate from the positions on all corners
+ // of the processed triangle. For the best prediction, the UV coordinates
+ // on the next/previous corners need to be already encoded/decoded.
+ const CornerIndex next_corner_id =
+ this->mesh_data().corner_table()->Next(corner_id);
+ const CornerIndex prev_corner_id =
+ this->mesh_data().corner_table()->Previous(corner_id);
+ // Get the encoded data ids from the next and previous corners.
+ // The data id is the encoding order of the UV coordinates.
+ int next_data_id, prev_data_id;
+
+ int next_vert_id, prev_vert_id;
+ next_vert_id =
+ this->mesh_data().corner_table()->Vertex(next_corner_id).value();
+ prev_vert_id =
+ this->mesh_data().corner_table()->Vertex(prev_corner_id).value();
+
+ next_data_id = this->mesh_data().vertex_to_data_map()->at(next_vert_id);
+ prev_data_id = this->mesh_data().vertex_to_data_map()->at(prev_vert_id);
+
+ if (prev_data_id < data_id && next_data_id < data_id) {
+ // Both other corners have available UV coordinates for prediction.
+ const Vector2f n_uv = GetTexCoordForEntryId(next_data_id, data);
+ const Vector2f p_uv = GetTexCoordForEntryId(prev_data_id, data);
+ if (p_uv == n_uv) {
+ // We cannot do a reliable prediction on degenerated UV triangles.
+ predicted_value_[0] = static_cast<int>(p_uv[0]);
+ predicted_value_[1] = static_cast<int>(p_uv[1]);
+ return;
+ }
+
+ // Get positions at all corners.
+ const Vector3f tip_pos = GetPositionForEntryId(data_id);
+ const Vector3f next_pos = GetPositionForEntryId(next_data_id);
+ const Vector3f prev_pos = GetPositionForEntryId(prev_data_id);
+ // Use the positions of the above triangle to predict the texture coordinate
+ // on the tip corner C.
+ // Convert the triangle into a new coordinate system defined by orthogonal
+ // bases vectors S, T, where S is vector prev_pos - next_pos and T is an
+ // perpendicular vector to S in the same plane as vector the
+ // tip_pos - next_pos.
+ // The transformed triangle in the new coordinate system is then going to
+ // be represented as:
+ //
+ // 1 ^
+ // |
+ // |
+ // | C
+ // | / \
+ // | / \
+ // |/ \
+ // N--------------P
+ // 0 1
+ //
+ // Where next_pos point (N) is at position (0, 0), prev_pos point (P) is
+ // at (1, 0). Our goal is to compute the position of the tip_pos point (C)
+ // in this new coordinate space (s, t).
+ //
+ const Vector3f pn = prev_pos - next_pos;
+ const Vector3f cn = tip_pos - next_pos;
+ const float pn_norm2_squared = pn.SquaredNorm();
+ // Coordinate s of the tip corner C is simply the dot product of the
+ // normalized vectors |pn| and |cn| (normalized by the length of |pn|).
+ // Since both of these vectors are normalized, we don't need to perform the
+ // normalization explicitly and instead we can just use the squared norm
+ // of |pn| as a denominator of the resulting dot product of non normalized
+ // vectors.
+ float s, t;
+ // |pn_norm2_squared| can be exactly 0 when the next_pos and prev_pos are
+ // the same positions (e.g. because they were quantized to the same
+ // location).
+ if (pn_norm2_squared > 0) {
+ s = pn.Dot(cn) / pn_norm2_squared;
+ // To get the coordinate t, we can use formula:
+ // t = |C-N - (P-N) * s| / |P-N|
+ // Do not use std::sqrt to avoid changes in the bitstream.
+ t = sqrt((cn - pn * s).SquaredNorm() / pn_norm2_squared);
+ } else {
+ s = 0;
+ t = 0;
+ }
+
+ // Now we need to transform the point (s, t) to the texture coordinate space
+ // UV. We know the UV coordinates on points N and P (N_UV and P_UV). Lets
+ // denote P_UV - N_UV = PN_UV. PN_UV is then 2 dimensional vector that can
+ // be used to define transformation from the normalized coordinate system
+ // to the texture coordinate system using a 3x3 affine matrix M:
+ //
+ // M = | PN_UV[0] -PN_UV[1] N_UV[0] |
+ // | PN_UV[1] PN_UV[0] N_UV[1] |
+ // | 0 0 1 |
+ //
+ // The predicted point C_UV in the texture space is then equal to
+ // C_UV = M * (s, t, 1). Because the triangle in UV space may be flipped
+ // around the PN_UV axis, we also need to consider point C_UV' = M * (s, -t)
+ // as the prediction.
+ const Vector2f pn_uv = p_uv - n_uv;
+ const float pnus = pn_uv[0] * s + n_uv[0];
+ const float pnut = pn_uv[0] * t;
+ const float pnvs = pn_uv[1] * s + n_uv[1];
+ const float pnvt = pn_uv[1] * t;
+ Vector2f predicted_uv;
+
+ // When encoding compute both possible vectors and determine which one
+ // results in a better prediction.
+ const Vector2f predicted_uv_0(pnus - pnvt, pnvs + pnut);
+ const Vector2f predicted_uv_1(pnus + pnvt, pnvs - pnut);
+ const Vector2f c_uv = GetTexCoordForEntryId(data_id, data);
+ if ((c_uv - predicted_uv_0).SquaredNorm() <
+ (c_uv - predicted_uv_1).SquaredNorm()) {
+ predicted_uv = predicted_uv_0;
+ orientations_.push_back(true);
+ } else {
+ predicted_uv = predicted_uv_1;
+ orientations_.push_back(false);
+ }
+ if (std::is_integral<DataTypeT>::value) {
+ // Round the predicted value for integer types.
+ predicted_value_[0] = static_cast<int>(floor(predicted_uv[0] + 0.5));
+ predicted_value_[1] = static_cast<int>(floor(predicted_uv[1] + 0.5));
+ } else {
+ predicted_value_[0] = static_cast<int>(predicted_uv[0]);
+ predicted_value_[1] = static_cast<int>(predicted_uv[1]);
+ }
+ return;
+ }
+ // Else we don't have available textures on both corners. For such case we
+ // can't use positions for predicting the uv value and we resort to delta
+ // coding.
+ int data_offset = 0;
+ if (prev_data_id < data_id) {
+ // Use the value on the previous corner as the prediction.
+ data_offset = prev_data_id * num_components_;
+ }
+ if (next_data_id < data_id) {
+ // Use the value on the next corner as the prediction.
+ data_offset = next_data_id * num_components_;
+ } else {
+ // None of the other corners have a valid value. Use the last encoded value
+ // as the prediction if possible.
+ if (data_id > 0) {
+ data_offset = (data_id - 1) * num_components_;
+ } else {
+ // We are encoding the first value. Predict 0.
+ for (int i = 0; i < num_components_; ++i) {
+ predicted_value_[i] = 0;
+ }
+ return;
+ }
+ }
+ for (int i = 0; i < num_components_; ++i) {
+ predicted_value_[i] = data[data_offset + i];
+ }
+}
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_TEX_COORDS_H_
diff --git a/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_tex_coords_portable_decoder.h b/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_tex_coords_portable_decoder.h
new file mode 100644
index 0000000..83d4966
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_tex_coords_portable_decoder.h
@@ -0,0 +1,143 @@
+// Copyright 2017 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_TEX_COORDS_PORTABLE_DECODER_H_
+#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_TEX_COORDS_PORTABLE_DECODER_H_
+
+#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_decoder.h"
+#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_tex_coords_portable_predictor.h"
+#include "draco/compression/bit_coders/rans_bit_decoder.h"
+
+namespace draco {
+
+// Decoder for predictions of UV coordinates encoded by our specialized and
+// portable texture coordinate predictor. See the corresponding encoder for more
+// details.
+template <typename DataTypeT, class TransformT, class MeshDataT>
+class MeshPredictionSchemeTexCoordsPortableDecoder
+ : public MeshPredictionSchemeDecoder<DataTypeT, TransformT, MeshDataT> {
+ public:
+ using CorrType = typename MeshPredictionSchemeDecoder<DataTypeT, TransformT,
+ MeshDataT>::CorrType;
+ MeshPredictionSchemeTexCoordsPortableDecoder(const PointAttribute *attribute,
+ const TransformT &transform,
+ const MeshDataT &mesh_data)
+ : MeshPredictionSchemeDecoder<DataTypeT, TransformT, MeshDataT>(
+ attribute, transform, mesh_data),
+ predictor_(mesh_data) {}
+
+ bool ComputeOriginalValues(const CorrType *in_corr, DataTypeT *out_data,
+ int size, int num_components,
+ const PointIndex *entry_to_point_id_map) override;
+
+ bool DecodePredictionData(DecoderBuffer *buffer) override;
+
+ PredictionSchemeMethod GetPredictionMethod() const override {
+ return MESH_PREDICTION_TEX_COORDS_PORTABLE;
+ }
+
+ bool IsInitialized() const override {
+ if (!predictor_.IsInitialized()) {
+ return false;
+ }
+ if (!this->mesh_data().IsInitialized()) {
+ return false;
+ }
+ return true;
+ }
+
+ int GetNumParentAttributes() const override { return 1; }
+
+ GeometryAttribute::Type GetParentAttributeType(int i) const override {
+ DRACO_DCHECK_EQ(i, 0);
+ (void)i;
+ return GeometryAttribute::POSITION;
+ }
+
+ bool SetParentAttribute(const PointAttribute *att) override {
+ if (!att || att->attribute_type() != GeometryAttribute::POSITION) {
+ return false; // Invalid attribute type.
+ }
+ if (att->num_components() != 3) {
+ return false; // Currently works only for 3 component positions.
+ }
+ predictor_.SetPositionAttribute(*att);
+ return true;
+ }
+
+ private:
+ MeshPredictionSchemeTexCoordsPortablePredictor<DataTypeT, MeshDataT>
+ predictor_;
+};
+
+template <typename DataTypeT, class TransformT, class MeshDataT>
+bool MeshPredictionSchemeTexCoordsPortableDecoder<
+ DataTypeT, TransformT,
+ MeshDataT>::ComputeOriginalValues(const CorrType *in_corr,
+ DataTypeT *out_data, int /* size */,
+ int num_components,
+ const PointIndex *entry_to_point_id_map) {
+ if (num_components != MeshPredictionSchemeTexCoordsPortablePredictor<
+ DataTypeT, MeshDataT>::kNumComponents) {
+ return false;
+ }
+ predictor_.SetEntryToPointIdMap(entry_to_point_id_map);
+ this->transform().Init(num_components);
+
+ const int corner_map_size =
+ static_cast<int>(this->mesh_data().data_to_corner_map()->size());
+ for (int p = 0; p < corner_map_size; ++p) {
+ const CornerIndex corner_id = this->mesh_data().data_to_corner_map()->at(p);
+ if (!predictor_.template ComputePredictedValue<false>(corner_id, out_data,
+ p)) {
+ return false;
+ }
+
+ const int dst_offset = p * num_components;
+ this->transform().ComputeOriginalValue(predictor_.predicted_value(),
+ in_corr + dst_offset,
+ out_data + dst_offset);
+ }
+ return true;
+}
+
+template <typename DataTypeT, class TransformT, class MeshDataT>
+bool MeshPredictionSchemeTexCoordsPortableDecoder<
+ DataTypeT, TransformT, MeshDataT>::DecodePredictionData(DecoderBuffer
+ *buffer) {
+ // Decode the delta coded orientations.
+ int32_t num_orientations = 0;
+ if (!buffer->Decode(&num_orientations) || num_orientations < 0) {
+ return false;
+ }
+ predictor_.ResizeOrientations(num_orientations);
+ bool last_orientation = true;
+ RAnsBitDecoder decoder;
+ if (!decoder.StartDecoding(buffer)) {
+ return false;
+ }
+ for (int i = 0; i < num_orientations; ++i) {
+ if (!decoder.DecodeNextBit()) {
+ last_orientation = !last_orientation;
+ }
+ predictor_.set_orientation(i, last_orientation);
+ }
+ decoder.EndDecoding();
+ return MeshPredictionSchemeDecoder<DataTypeT, TransformT,
+ MeshDataT>::DecodePredictionData(buffer);
+}
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_TEX_COORDS_PORTABLE_DECODER_H_
diff --git a/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_tex_coords_portable_encoder.h b/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_tex_coords_portable_encoder.h
new file mode 100644
index 0000000..741ec66
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_tex_coords_portable_encoder.h
@@ -0,0 +1,133 @@
+// Copyright 2017 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_TEX_COORDS_PORTABLE_ENCODER_H_
+#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_TEX_COORDS_PORTABLE_ENCODER_H_
+
+#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_encoder.h"
+#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_tex_coords_portable_predictor.h"
+#include "draco/compression/bit_coders/rans_bit_encoder.h"
+
+namespace draco {
+
+// Prediction scheme designed for predicting texture coordinates from known
+// spatial position of vertices. For isometric parametrizations, the ratios
+// between triangle edge lengths should be about the same in both the spatial
+// and UV coordinate spaces, which makes the positions a good predictor for the
+// UV coordinates. Note that this may not be the optimal approach for other
+// parametrizations such as projective ones.
+template <typename DataTypeT, class TransformT, class MeshDataT>
+class MeshPredictionSchemeTexCoordsPortableEncoder
+ : public MeshPredictionSchemeEncoder<DataTypeT, TransformT, MeshDataT> {
+ public:
+ using CorrType = typename MeshPredictionSchemeEncoder<DataTypeT, TransformT,
+ MeshDataT>::CorrType;
+ MeshPredictionSchemeTexCoordsPortableEncoder(const PointAttribute *attribute,
+ const TransformT &transform,
+ const MeshDataT &mesh_data)
+ : MeshPredictionSchemeEncoder<DataTypeT, TransformT, MeshDataT>(
+ attribute, transform, mesh_data),
+ predictor_(mesh_data) {}
+
+ bool ComputeCorrectionValues(
+ const DataTypeT *in_data, CorrType *out_corr, int size,
+ int num_components, const PointIndex *entry_to_point_id_map) override;
+
+ bool EncodePredictionData(EncoderBuffer *buffer) override;
+
+ PredictionSchemeMethod GetPredictionMethod() const override {
+ return MESH_PREDICTION_TEX_COORDS_PORTABLE;
+ }
+
+ bool IsInitialized() const override {
+ if (!predictor_.IsInitialized()) {
+ return false;
+ }
+ if (!this->mesh_data().IsInitialized()) {
+ return false;
+ }
+ return true;
+ }
+
+ int GetNumParentAttributes() const override { return 1; }
+
+ GeometryAttribute::Type GetParentAttributeType(int i) const override {
+ DRACO_DCHECK_EQ(i, 0);
+ (void)i;
+ return GeometryAttribute::POSITION;
+ }
+
+ bool SetParentAttribute(const PointAttribute *att) override {
+ if (att->attribute_type() != GeometryAttribute::POSITION) {
+ return false; // Invalid attribute type.
+ }
+ if (att->num_components() != 3) {
+ return false; // Currently works only for 3 component positions.
+ }
+ predictor_.SetPositionAttribute(*att);
+ return true;
+ }
+
+ private:
+ MeshPredictionSchemeTexCoordsPortablePredictor<DataTypeT, MeshDataT>
+ predictor_;
+};
+
+template <typename DataTypeT, class TransformT, class MeshDataT>
+bool MeshPredictionSchemeTexCoordsPortableEncoder<DataTypeT, TransformT,
+ MeshDataT>::
+ ComputeCorrectionValues(const DataTypeT *in_data, CorrType *out_corr,
+ int size, int num_components,
+ const PointIndex *entry_to_point_id_map) {
+ predictor_.SetEntryToPointIdMap(entry_to_point_id_map);
+ this->transform().Init(in_data, size, num_components);
+ // We start processing from the end because this prediction uses data from
+ // previous entries that could be overwritten when an entry is processed.
+ for (int p =
+ static_cast<int>(this->mesh_data().data_to_corner_map()->size() - 1);
+ p >= 0; --p) {
+ const CornerIndex corner_id = this->mesh_data().data_to_corner_map()->at(p);
+ predictor_.template ComputePredictedValue<true>(corner_id, in_data, p);
+
+ const int dst_offset = p * num_components;
+ this->transform().ComputeCorrection(in_data + dst_offset,
+ predictor_.predicted_value(),
+ out_corr + dst_offset);
+ }
+ return true;
+}
+
+template <typename DataTypeT, class TransformT, class MeshDataT>
+bool MeshPredictionSchemeTexCoordsPortableEncoder<
+ DataTypeT, TransformT, MeshDataT>::EncodePredictionData(EncoderBuffer
+ *buffer) {
+ // Encode the delta-coded orientations using arithmetic coding.
+ const int32_t num_orientations = predictor_.num_orientations();
+ buffer->Encode(num_orientations);
+ bool last_orientation = true;
+ RAnsBitEncoder encoder;
+ encoder.StartEncoding();
+ for (int i = 0; i < num_orientations; ++i) {
+ const bool orientation = predictor_.orientation(i);
+ encoder.EncodeBit(orientation == last_orientation);
+ last_orientation = orientation;
+ }
+ encoder.EndEncoding(buffer);
+ return MeshPredictionSchemeEncoder<DataTypeT, TransformT,
+ MeshDataT>::EncodePredictionData(buffer);
+}
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_TEX_COORDS_PORTABLE_ENCODER_H_
diff --git a/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_tex_coords_portable_predictor.h b/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_tex_coords_portable_predictor.h
new file mode 100644
index 0000000..f05e5dd
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_tex_coords_portable_predictor.h
@@ -0,0 +1,263 @@
+// Copyright 2017 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_TEX_COORDS_PORTABLE_PREDICTOR_H_
+#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_TEX_COORDS_PORTABLE_PREDICTOR_H_
+
+#include <math.h>
+
+#include "draco/attributes/point_attribute.h"
+#include "draco/core/math_utils.h"
+#include "draco/core/vector_d.h"
+#include "draco/mesh/corner_table.h"
+
+namespace draco {
+
+// Predictor functionality used for portable UV prediction by both encoder and
+// decoder.
+template <typename DataTypeT, class MeshDataT>
+class MeshPredictionSchemeTexCoordsPortablePredictor {
+ public:
+ static constexpr int kNumComponents = 2;
+
+ explicit MeshPredictionSchemeTexCoordsPortablePredictor(const MeshDataT &md)
+ : pos_attribute_(nullptr),
+ entry_to_point_id_map_(nullptr),
+ mesh_data_(md) {}
+ void SetPositionAttribute(const PointAttribute &position_attribute) {
+ pos_attribute_ = &position_attribute;
+ }
+ void SetEntryToPointIdMap(const PointIndex *map) {
+ entry_to_point_id_map_ = map;
+ }
+ bool IsInitialized() const { return pos_attribute_ != nullptr; }
+
+ VectorD<int64_t, 3> GetPositionForEntryId(int entry_id) const {
+ const PointIndex point_id = entry_to_point_id_map_[entry_id];
+ VectorD<int64_t, 3> pos;
+ pos_attribute_->ConvertValue(pos_attribute_->mapped_index(point_id),
+ &pos[0]);
+ return pos;
+ }
+
+ VectorD<int64_t, 2> GetTexCoordForEntryId(int entry_id,
+ const DataTypeT *data) const {
+ const int data_offset = entry_id * kNumComponents;
+ return VectorD<int64_t, 2>(data[data_offset], data[data_offset + 1]);
+ }
+
+ // Computes predicted UV coordinates on a given corner. The coordinates are
+ // stored in |predicted_value_| member.
+ template <bool is_encoder_t>
+ bool ComputePredictedValue(CornerIndex corner_id, const DataTypeT *data,
+ int data_id);
+
+ const DataTypeT *predicted_value() const { return predicted_value_; }
+ bool orientation(int i) const { return orientations_[i]; }
+ void set_orientation(int i, bool v) { orientations_[i] = v; }
+ size_t num_orientations() const { return orientations_.size(); }
+ void ResizeOrientations(int num_orientations) {
+ orientations_.resize(num_orientations);
+ }
+
+ private:
+ const PointAttribute *pos_attribute_;
+ const PointIndex *entry_to_point_id_map_;
+ DataTypeT predicted_value_[kNumComponents];
+ // Encoded / decoded array of UV flips.
+ // TODO(ostava): We should remove this and replace this with in-place encoding
+ // and decoding to avoid unnecessary copy.
+ std::vector<bool> orientations_;
+ MeshDataT mesh_data_;
+};
+
+template <typename DataTypeT, class MeshDataT>
+template <bool is_encoder_t>
+bool MeshPredictionSchemeTexCoordsPortablePredictor<
+ DataTypeT, MeshDataT>::ComputePredictedValue(CornerIndex corner_id,
+ const DataTypeT *data,
+ int data_id) {
+ // Compute the predicted UV coordinate from the positions on all corners
+ // of the processed triangle. For the best prediction, the UV coordinates
+ // on the next/previous corners need to be already encoded/decoded.
+ const CornerIndex next_corner_id = mesh_data_.corner_table()->Next(corner_id);
+ const CornerIndex prev_corner_id =
+ mesh_data_.corner_table()->Previous(corner_id);
+ // Get the encoded data ids from the next and previous corners.
+ // The data id is the encoding order of the UV coordinates.
+ int next_data_id, prev_data_id;
+
+ int next_vert_id, prev_vert_id;
+ next_vert_id = mesh_data_.corner_table()->Vertex(next_corner_id).value();
+ prev_vert_id = mesh_data_.corner_table()->Vertex(prev_corner_id).value();
+
+ next_data_id = mesh_data_.vertex_to_data_map()->at(next_vert_id);
+ prev_data_id = mesh_data_.vertex_to_data_map()->at(prev_vert_id);
+
+ if (prev_data_id < data_id && next_data_id < data_id) {
+ // Both other corners have available UV coordinates for prediction.
+ const VectorD<int64_t, 2> n_uv = GetTexCoordForEntryId(next_data_id, data);
+ const VectorD<int64_t, 2> p_uv = GetTexCoordForEntryId(prev_data_id, data);
+ if (p_uv == n_uv) {
+ // We cannot do a reliable prediction on degenerated UV triangles.
+ predicted_value_[0] = p_uv[0];
+ predicted_value_[1] = p_uv[1];
+ return true;
+ }
+
+ // Get positions at all corners.
+ const VectorD<int64_t, 3> tip_pos = GetPositionForEntryId(data_id);
+ const VectorD<int64_t, 3> next_pos = GetPositionForEntryId(next_data_id);
+ const VectorD<int64_t, 3> prev_pos = GetPositionForEntryId(prev_data_id);
+ // We use the positions of the above triangle to predict the texture
+ // coordinate on the tip corner C.
+ // To convert the triangle into the UV coordinate system we first compute
+ // position X on the vector |prev_pos - next_pos| that is the projection of
+ // point C onto vector |prev_pos - next_pos|:
+ //
+ // C
+ // /. \
+ // / . \
+ // / . \
+ // N---X----------P
+ //
+ // Where next_pos is point (N), prev_pos is point (P) and tip_pos is the
+ // position of predicted coordinate (C).
+ //
+ const VectorD<int64_t, 3> pn = prev_pos - next_pos;
+ const uint64_t pn_norm2_squared = pn.SquaredNorm();
+ if (pn_norm2_squared != 0) {
+ // Compute the projection of C onto PN by computing dot product of CN with
+ // PN and normalizing it by length of PN. This gives us a factor |s| where
+ // |s = PN.Dot(CN) / PN.SquaredNorm2()|. This factor can be used to
+ // compute X in UV space |X_UV| as |X_UV = N_UV + s * PN_UV|.
+ const VectorD<int64_t, 3> cn = tip_pos - next_pos;
+ const int64_t cn_dot_pn = pn.Dot(cn);
+
+ const VectorD<int64_t, 2> pn_uv = p_uv - n_uv;
+ // Because we perform all computations with integers, we don't explicitly
+ // compute the normalized factor |s|, but rather we perform all operations
+ // over UV vectors in a non-normalized coordinate system scaled with a
+ // scaling factor |pn_norm2_squared|:
+ //
+ // x_uv = X_UV * PN.Norm2Squared()
+ //
+ const VectorD<int64_t, 2> x_uv =
+ n_uv * pn_norm2_squared + (cn_dot_pn * pn_uv);
+
+ const int64_t pn_absmax_element =
+ std::max(std::max(std::abs(pn[0]), std::abs(pn[1])), std::abs(pn[2]));
+ if (cn_dot_pn > std::numeric_limits<int64_t>::max() / pn_absmax_element) {
+ // return false if squared length calculation would overflow.
+ return false;
+ }
+
+ // Compute squared length of vector CX in position coordinate system:
+ const VectorD<int64_t, 3> x_pos =
+ next_pos + (cn_dot_pn * pn) / pn_norm2_squared;
+ const uint64_t cx_norm2_squared = (tip_pos - x_pos).SquaredNorm();
+
+ // Compute vector CX_UV in the uv space by rotating vector PN_UV by 90
+ // degrees and scaling it with factor CX.Norm2() / PN.Norm2():
+ //
+ // CX_UV = (CX.Norm2() / PN.Norm2()) * Rot(PN_UV)
+ //
+ // To preserve precision, we perform all operations in scaled space as
+ // explained above, so we want the final vector to be:
+ //
+ // cx_uv = CX_UV * PN.Norm2Squared()
+ //
+ // We can then rewrite the formula as:
+ //
+ // cx_uv = CX.Norm2() * PN.Norm2() * Rot(PN_UV)
+ //
+ VectorD<int64_t, 2> cx_uv(pn_uv[1], -pn_uv[0]); // Rotated PN_UV.
+ // Compute CX.Norm2() * PN.Norm2()
+ const uint64_t norm_squared =
+ IntSqrt(cx_norm2_squared * pn_norm2_squared);
+ // Final cx_uv in the scaled coordinate space.
+ cx_uv = cx_uv * norm_squared;
+
+ // Predicted uv coordinate is then computed by either adding or
+ // subtracting CX_UV to/from X_UV.
+ VectorD<int64_t, 2> predicted_uv;
+ if (is_encoder_t) {
+ // When encoding, compute both possible vectors and determine which one
+ // results in a better prediction.
+ // Both vectors need to be transformed back from the scaled space to
+ // the real UV coordinate space.
+ const VectorD<int64_t, 2> predicted_uv_0((x_uv + cx_uv) /
+ pn_norm2_squared);
+ const VectorD<int64_t, 2> predicted_uv_1((x_uv - cx_uv) /
+ pn_norm2_squared);
+ const VectorD<int64_t, 2> c_uv = GetTexCoordForEntryId(data_id, data);
+ if ((c_uv - predicted_uv_0).SquaredNorm() <
+ (c_uv - predicted_uv_1).SquaredNorm()) {
+ predicted_uv = predicted_uv_0;
+ orientations_.push_back(true);
+ } else {
+ predicted_uv = predicted_uv_1;
+ orientations_.push_back(false);
+ }
+ } else {
+ // When decoding the data, we already know which orientation to use.
+ if (orientations_.empty()) {
+ return false;
+ }
+ const bool orientation = orientations_.back();
+ orientations_.pop_back();
+ if (orientation) {
+ predicted_uv = (x_uv + cx_uv) / pn_norm2_squared;
+ } else {
+ predicted_uv = (x_uv - cx_uv) / pn_norm2_squared;
+ }
+ }
+ predicted_value_[0] = static_cast<int>(predicted_uv[0]);
+ predicted_value_[1] = static_cast<int>(predicted_uv[1]);
+ return true;
+ }
+ }
+ // Else we don't have available textures on both corners or the position data
+ // is invalid. For such cases we can't use positions for predicting the uv
+ // value and we resort to delta coding.
+ int data_offset = 0;
+ if (prev_data_id < data_id) {
+ // Use the value on the previous corner as the prediction.
+ data_offset = prev_data_id * kNumComponents;
+ }
+ if (next_data_id < data_id) {
+ // Use the value on the next corner as the prediction.
+ data_offset = next_data_id * kNumComponents;
+ } else {
+ // None of the other corners have a valid value. Use the last encoded value
+ // as the prediction if possible.
+ if (data_id > 0) {
+ data_offset = (data_id - 1) * kNumComponents;
+ } else {
+ // We are encoding the first value. Predict 0.
+ for (int i = 0; i < kNumComponents; ++i) {
+ predicted_value_[i] = 0;
+ }
+ return true;
+ }
+ }
+ for (int i = 0; i < kNumComponents; ++i) {
+ predicted_value_[i] = data[data_offset + i];
+ }
+ return true;
+}
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_TEX_COORDS_PORTABLE_PREDICTOR_H_
diff --git a/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_decoder.h b/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_decoder.h
new file mode 100644
index 0000000..064e1b4
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_decoder.h
@@ -0,0 +1,90 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_DECODER_H_
+#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_DECODER_H_
+
+#include <type_traits>
+
+#include "draco/compression/attributes/prediction_schemes/prediction_scheme_decoder_interface.h"
+#include "draco/compression/attributes/prediction_schemes/prediction_scheme_decoding_transform.h"
+
+// Prediction schemes can be used during encoding and decoding of vertex
+// attributes to predict attribute values based on the previously
+// encoded/decoded data. The differences between the original and predicted
+// attribute values are used to compute correction values that can be usually
+// encoded with fewer bits compared to the original data.
+namespace draco {
+
+// Abstract base class for typed prediction schemes. It provides basic access
+// to the encoded attribute and to the supplied prediction transform.
+template <typename DataTypeT,
+ class TransformT =
+ PredictionSchemeDecodingTransform<DataTypeT, DataTypeT>>
+class PredictionSchemeDecoder : public PredictionSchemeTypedDecoderInterface<
+ DataTypeT, typename TransformT::CorrType> {
+ public:
+ typedef DataTypeT DataType;
+ typedef TransformT Transform;
+ // Correction type needs to be defined in the prediction transform class.
+ typedef typename Transform::CorrType CorrType;
+ explicit PredictionSchemeDecoder(const PointAttribute *attribute)
+ : PredictionSchemeDecoder(attribute, Transform()) {}
+ PredictionSchemeDecoder(const PointAttribute *attribute,
+ const Transform &transform)
+ : attribute_(attribute), transform_(transform) {}
+
+ bool DecodePredictionData(DecoderBuffer *buffer) override {
+ if (!transform_.DecodeTransformData(buffer)) {
+ return false;
+ }
+ return true;
+ }
+
+ const PointAttribute *GetAttribute() const override { return attribute(); }
+
+ // Returns the number of parent attributes that are needed for the prediction.
+ int GetNumParentAttributes() const override { return 0; }
+
+ // Returns the type of each of the parent attribute.
+ GeometryAttribute::Type GetParentAttributeType(int /* i */) const override {
+ return GeometryAttribute::INVALID;
+ }
+
+ // Sets the required parent attribute.
+ bool SetParentAttribute(const PointAttribute * /* att */) override {
+ return false;
+ }
+
+ bool AreCorrectionsPositive() override {
+ return transform_.AreCorrectionsPositive();
+ }
+
+ PredictionSchemeTransformType GetTransformType() const override {
+ return transform_.GetType();
+ }
+
+ protected:
+ inline const PointAttribute *attribute() const { return attribute_; }
+ inline const Transform &transform() const { return transform_; }
+ inline Transform &transform() { return transform_; }
+
+ private:
+ const PointAttribute *attribute_;
+ Transform transform_;
+};
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_DECODER_H_
diff --git a/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_decoder_factory.h b/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_decoder_factory.h
new file mode 100644
index 0000000..cf2a6ba
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_decoder_factory.h
@@ -0,0 +1,194 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Functions for creating prediction schemes for decoders using the provided
+// prediction method id.
+
+#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_DECODER_FACTORY_H_
+#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_DECODER_FACTORY_H_
+
+#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_constrained_multi_parallelogram_decoder.h"
+#include "draco/draco_features.h"
+#ifdef DRACO_NORMAL_ENCODING_SUPPORTED
+#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_geometric_normal_decoder.h"
+#endif
+#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_multi_parallelogram_decoder.h"
+#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_parallelogram_decoder.h"
+#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_tex_coords_decoder.h"
+#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_tex_coords_portable_decoder.h"
+#include "draco/compression/attributes/prediction_schemes/prediction_scheme_decoder.h"
+#include "draco/compression/attributes/prediction_schemes/prediction_scheme_delta_decoder.h"
+#include "draco/compression/attributes/prediction_schemes/prediction_scheme_factory.h"
+#include "draco/compression/mesh/mesh_decoder.h"
+
+namespace draco {
+
+// Factory class for creating mesh prediction schemes. The factory implements
+// operator() that is used to create an appropriate mesh prediction scheme in
+// CreateMeshPredictionScheme() function in prediction_scheme_factory.h
+template <typename DataTypeT>
+struct MeshPredictionSchemeDecoderFactory {
+ // Operator () specialized for the wrap transform. Wrap transform can be used
+ // for all mesh prediction schemes. The specialization is done in compile time
+ // to prevent instantiations of unneeded combinations of prediction schemes +
+ // prediction transforms.
+ template <class TransformT, class MeshDataT,
+ PredictionSchemeTransformType Method>
+ struct DispatchFunctor {
+ std::unique_ptr<PredictionSchemeDecoder<DataTypeT, TransformT>> operator()(
+ PredictionSchemeMethod method, const PointAttribute *attribute,
+ const TransformT &transform, const MeshDataT &mesh_data,
+ uint16_t bitstream_version) {
+ if (method == MESH_PREDICTION_PARALLELOGRAM) {
+ return std::unique_ptr<PredictionSchemeDecoder<DataTypeT, TransformT>>(
+ new MeshPredictionSchemeParallelogramDecoder<DataTypeT, TransformT,
+ MeshDataT>(
+ attribute, transform, mesh_data));
+ }
+#ifdef DRACO_BACKWARDS_COMPATIBILITY_SUPPORTED
+ else if (method == MESH_PREDICTION_MULTI_PARALLELOGRAM) {
+ return std::unique_ptr<PredictionSchemeDecoder<DataTypeT, TransformT>>(
+ new MeshPredictionSchemeMultiParallelogramDecoder<
+ DataTypeT, TransformT, MeshDataT>(attribute, transform,
+ mesh_data));
+ }
+#endif
+ else if (method == MESH_PREDICTION_CONSTRAINED_MULTI_PARALLELOGRAM) {
+ return std::unique_ptr<PredictionSchemeDecoder<DataTypeT, TransformT>>(
+ new MeshPredictionSchemeConstrainedMultiParallelogramDecoder<
+ DataTypeT, TransformT, MeshDataT>(attribute, transform,
+ mesh_data));
+ }
+#ifdef DRACO_BACKWARDS_COMPATIBILITY_SUPPORTED
+ else if (method == MESH_PREDICTION_TEX_COORDS_DEPRECATED) {
+ return std::unique_ptr<PredictionSchemeDecoder<DataTypeT, TransformT>>(
+ new MeshPredictionSchemeTexCoordsDecoder<DataTypeT, TransformT,
+ MeshDataT>(
+ attribute, transform, mesh_data, bitstream_version));
+ }
+#endif
+ else if (method == MESH_PREDICTION_TEX_COORDS_PORTABLE) {
+ return std::unique_ptr<PredictionSchemeDecoder<DataTypeT, TransformT>>(
+ new MeshPredictionSchemeTexCoordsPortableDecoder<
+ DataTypeT, TransformT, MeshDataT>(attribute, transform,
+ mesh_data));
+ }
+#ifdef DRACO_NORMAL_ENCODING_SUPPORTED
+ else if (method == MESH_PREDICTION_GEOMETRIC_NORMAL) {
+ return std::unique_ptr<PredictionSchemeDecoder<DataTypeT, TransformT>>(
+ new MeshPredictionSchemeGeometricNormalDecoder<
+ DataTypeT, TransformT, MeshDataT>(attribute, transform,
+ mesh_data));
+ }
+#endif
+ return nullptr;
+ }
+ };
+
+#ifdef DRACO_NORMAL_ENCODING_SUPPORTED
+ // Operator () specialized for normal octahedron transforms. These transforms
+ // are currently used only by the geometric normal prediction scheme (the
+ // transform is also used by delta coding, but delta predictor is not
+ // constructed in this function).
+ template <class TransformT, class MeshDataT>
+ struct DispatchFunctor<TransformT, MeshDataT,
+ PREDICTION_TRANSFORM_NORMAL_OCTAHEDRON_CANONICALIZED> {
+ std::unique_ptr<PredictionSchemeDecoder<DataTypeT, TransformT>> operator()(
+ PredictionSchemeMethod method, const PointAttribute *attribute,
+ const TransformT &transform, const MeshDataT &mesh_data,
+ uint16_t bitstream_version) {
+ if (method == MESH_PREDICTION_GEOMETRIC_NORMAL) {
+ return std::unique_ptr<PredictionSchemeDecoder<DataTypeT, TransformT>>(
+ new MeshPredictionSchemeGeometricNormalDecoder<
+ DataTypeT, TransformT, MeshDataT>(attribute, transform,
+ mesh_data));
+ }
+ return nullptr;
+ }
+ };
+ template <class TransformT, class MeshDataT>
+ struct DispatchFunctor<TransformT, MeshDataT,
+ PREDICTION_TRANSFORM_NORMAL_OCTAHEDRON> {
+ std::unique_ptr<PredictionSchemeDecoder<DataTypeT, TransformT>> operator()(
+ PredictionSchemeMethod method, const PointAttribute *attribute,
+ const TransformT &transform, const MeshDataT &mesh_data,
+ uint16_t bitstream_version) {
+ if (method == MESH_PREDICTION_GEOMETRIC_NORMAL) {
+ return std::unique_ptr<PredictionSchemeDecoder<DataTypeT, TransformT>>(
+ new MeshPredictionSchemeGeometricNormalDecoder<
+ DataTypeT, TransformT, MeshDataT>(attribute, transform,
+ mesh_data));
+ }
+ return nullptr;
+ }
+ };
+#endif
+
+ template <class TransformT, class MeshDataT>
+ std::unique_ptr<PredictionSchemeDecoder<DataTypeT, TransformT>> operator()(
+ PredictionSchemeMethod method, const PointAttribute *attribute,
+ const TransformT &transform, const MeshDataT &mesh_data,
+ uint16_t bitstream_version) {
+ return DispatchFunctor<TransformT, MeshDataT, TransformT::GetType()>()(
+ method, attribute, transform, mesh_data, bitstream_version);
+ }
+};
+
+// Creates a prediction scheme for a given decoder and given prediction method.
+// The prediction schemes are automatically initialized with decoder specific
+// data if needed.
+template <typename DataTypeT, class TransformT>
+std::unique_ptr<PredictionSchemeDecoder<DataTypeT, TransformT>>
+CreatePredictionSchemeForDecoder(PredictionSchemeMethod method, int att_id,
+ const PointCloudDecoder *decoder,
+ const TransformT &transform) {
+ if (method == PREDICTION_NONE) {
+ return nullptr;
+ }
+ const PointAttribute *const att = decoder->point_cloud()->attribute(att_id);
+ if (decoder->GetGeometryType() == TRIANGULAR_MESH) {
+ // Cast the decoder to mesh decoder. This is not necessarily safe if there
+ // is some other decoder decides to use TRIANGULAR_MESH as the return type,
+ // but unfortunately there is not nice work around for this without using
+ // RTTI (double dispatch and similar concepts will not work because of the
+ // template nature of the prediction schemes).
+ const MeshDecoder *const mesh_decoder =
+ static_cast<const MeshDecoder *>(decoder);
+
+ auto ret = CreateMeshPredictionScheme<
+ MeshDecoder, PredictionSchemeDecoder<DataTypeT, TransformT>,
+ MeshPredictionSchemeDecoderFactory<DataTypeT>>(
+ mesh_decoder, method, att_id, transform, decoder->bitstream_version());
+ if (ret) {
+ return ret;
+ }
+ // Otherwise try to create another prediction scheme.
+ }
+ // Create delta decoder.
+ return std::unique_ptr<PredictionSchemeDecoder<DataTypeT, TransformT>>(
+ new PredictionSchemeDeltaDecoder<DataTypeT, TransformT>(att, transform));
+}
+
+// Create a prediction scheme using a default transform constructor.
+template <typename DataTypeT, class TransformT>
+std::unique_ptr<PredictionSchemeDecoder<DataTypeT, TransformT>>
+CreatePredictionSchemeForDecoder(PredictionSchemeMethod method, int att_id,
+ const PointCloudDecoder *decoder) {
+ return CreatePredictionSchemeForDecoder<DataTypeT, TransformT>(
+ method, att_id, decoder, TransformT());
+}
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_DECODER_FACTORY_H_
diff --git a/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_decoder_interface.h b/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_decoder_interface.h
new file mode 100644
index 0000000..6f19f7f
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_decoder_interface.h
@@ -0,0 +1,53 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_DECODER_INTERFACE_H_
+#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_DECODER_INTERFACE_H_
+
+#include "draco/compression/attributes/prediction_schemes/prediction_scheme_interface.h"
+#include "draco/core/decoder_buffer.h"
+
+// Prediction schemes can be used during encoding and decoding of attributes
+// to predict attribute values based on the previously encoded/decoded data.
+// See prediction_scheme.h for more details.
+namespace draco {
+
+// Abstract interface for all prediction schemes used during attribute encoding.
+class PredictionSchemeDecoderInterface : public PredictionSchemeInterface {
+ public:
+ // Method that can be used to decode any prediction scheme specific data
+ // from the input buffer.
+ virtual bool DecodePredictionData(DecoderBuffer *buffer) = 0;
+};
+
+// A specialized version of the prediction scheme interface for specific
+// input and output data types.
+// |entry_to_point_id_map| is the mapping between value entries to point ids
+// of the associated point cloud, where one entry is defined as |num_components|
+// values of the |in_data|.
+// DataTypeT is the data type of input and predicted values.
+// CorrTypeT is the data type used for storing corrected values.
+template <typename DataTypeT, typename CorrTypeT = DataTypeT>
+class PredictionSchemeTypedDecoderInterface
+ : public PredictionSchemeDecoderInterface {
+ public:
+ // Reverts changes made by the prediction scheme during encoding.
+ virtual bool ComputeOriginalValues(
+ const CorrTypeT *in_corr, DataTypeT *out_data, int size,
+ int num_components, const PointIndex *entry_to_point_id_map) = 0;
+};
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_DECODER_INTERFACE_H_
diff --git a/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_decoding_transform.h b/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_decoding_transform.h
new file mode 100644
index 0000000..47c1532
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_decoding_transform.h
@@ -0,0 +1,65 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_DECODING_TRANSFORM_H_
+#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_DECODING_TRANSFORM_H_
+
+#include "draco/compression/config/compression_shared.h"
+#include "draco/core/decoder_buffer.h"
+
+namespace draco {
+
+// PredictionSchemeDecodingTransform is used to transform predicted values and
+// correction values into the final original attribute values.
+// DataTypeT is the data type of predicted values.
+// CorrTypeT is the data type used for storing corrected values. It allows
+// transforms to store corrections into a different type or format compared to
+// the predicted data.
+template <typename DataTypeT, typename CorrTypeT>
+class PredictionSchemeDecodingTransform {
+ public:
+ typedef CorrTypeT CorrType;
+ PredictionSchemeDecodingTransform() : num_components_(0) {}
+
+ void Init(int num_components) { num_components_ = num_components; }
+
+ // Computes the original value from the input predicted value and the decoded
+ // corrections. The default implementation is equal to std:plus.
+ inline void ComputeOriginalValue(const DataTypeT *predicted_vals,
+ const CorrTypeT *corr_vals,
+ DataTypeT *out_original_vals) const {
+ static_assert(std::is_same<DataTypeT, CorrTypeT>::value,
+ "For the default prediction transform, correction and input "
+ "data must be of the same type.");
+ for (int i = 0; i < num_components_; ++i) {
+ out_original_vals[i] = predicted_vals[i] + corr_vals[i];
+ }
+ }
+
+ // Decodes any transform specific data. Called before Init() method.
+ bool DecodeTransformData(DecoderBuffer * /* buffer */) { return true; }
+
+ // Should return true if all corrected values are guaranteed to be positive.
+ bool AreCorrectionsPositive() const { return false; }
+
+ protected:
+ int num_components() const { return num_components_; }
+
+ private:
+ int num_components_;
+};
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_DECODING_TRANSFORM_H_
diff --git a/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_delta_decoder.h b/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_delta_decoder.h
new file mode 100644
index 0000000..ae72c71
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_delta_decoder.h
@@ -0,0 +1,65 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_DELTA_DECODER_H_
+#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_DELTA_DECODER_H_
+
+#include "draco/compression/attributes/prediction_schemes/prediction_scheme_decoder.h"
+
+namespace draco {
+
+// Decoder for values encoded with delta coding. See the corresponding encoder
+// for more details.
+template <typename DataTypeT, class TransformT>
+class PredictionSchemeDeltaDecoder
+ : public PredictionSchemeDecoder<DataTypeT, TransformT> {
+ public:
+ using CorrType =
+ typename PredictionSchemeDecoder<DataTypeT, TransformT>::CorrType;
+ // Initialized the prediction scheme.
+ explicit PredictionSchemeDeltaDecoder(const PointAttribute *attribute)
+ : PredictionSchemeDecoder<DataTypeT, TransformT>(attribute) {}
+ PredictionSchemeDeltaDecoder(const PointAttribute *attribute,
+ const TransformT &transform)
+ : PredictionSchemeDecoder<DataTypeT, TransformT>(attribute, transform) {}
+
+ bool ComputeOriginalValues(const CorrType *in_corr, DataTypeT *out_data,
+ int size, int num_components,
+ const PointIndex *entry_to_point_id_map) override;
+ PredictionSchemeMethod GetPredictionMethod() const override {
+ return PREDICTION_DIFFERENCE;
+ }
+ bool IsInitialized() const override { return true; }
+};
+
+template <typename DataTypeT, class TransformT>
+bool PredictionSchemeDeltaDecoder<DataTypeT, TransformT>::ComputeOriginalValues(
+ const CorrType *in_corr, DataTypeT *out_data, int size, int num_components,
+ const PointIndex *) {
+ this->transform().Init(num_components);
+ // Decode the original value for the first element.
+ std::unique_ptr<DataTypeT[]> zero_vals(new DataTypeT[num_components]());
+ this->transform().ComputeOriginalValue(zero_vals.get(), in_corr, out_data);
+
+ // Decode data from the front using D(i) = D(i) + D(i - 1).
+ for (int i = num_components; i < size; i += num_components) {
+ this->transform().ComputeOriginalValue(out_data + i - num_components,
+ in_corr + i, out_data + i);
+ }
+ return true;
+}
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_DELTA_DECODER_H_
diff --git a/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_delta_encoder.h b/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_delta_encoder.h
new file mode 100644
index 0000000..324afaf
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_delta_encoder.h
@@ -0,0 +1,69 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_DELTA_ENCODER_H_
+#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_DELTA_ENCODER_H_
+
+#include "draco/compression/attributes/prediction_schemes/prediction_scheme_encoder.h"
+
+namespace draco {
+
+// Basic prediction scheme based on computing backward differences between
+// stored attribute values (also known as delta-coding). Usually works better
+// than the reference point prediction scheme, because nearby values are often
+// encoded next to each other.
+template <typename DataTypeT, class TransformT>
+class PredictionSchemeDeltaEncoder
+ : public PredictionSchemeEncoder<DataTypeT, TransformT> {
+ public:
+ using CorrType =
+ typename PredictionSchemeEncoder<DataTypeT, TransformT>::CorrType;
+ // Initialized the prediction scheme.
+ explicit PredictionSchemeDeltaEncoder(const PointAttribute *attribute)
+ : PredictionSchemeEncoder<DataTypeT, TransformT>(attribute) {}
+ PredictionSchemeDeltaEncoder(const PointAttribute *attribute,
+ const TransformT &transform)
+ : PredictionSchemeEncoder<DataTypeT, TransformT>(attribute, transform) {}
+
+ bool ComputeCorrectionValues(
+ const DataTypeT *in_data, CorrType *out_corr, int size,
+ int num_components, const PointIndex *entry_to_point_id_map) override;
+ PredictionSchemeMethod GetPredictionMethod() const override {
+ return PREDICTION_DIFFERENCE;
+ }
+ bool IsInitialized() const override { return true; }
+};
+
+template <typename DataTypeT, class TransformT>
+bool PredictionSchemeDeltaEncoder<
+ DataTypeT, TransformT>::ComputeCorrectionValues(const DataTypeT *in_data,
+ CorrType *out_corr,
+ int size,
+ int num_components,
+ const PointIndex *) {
+ this->transform().Init(in_data, size, num_components);
+ // Encode data from the back using D(i) = D(i) - D(i - 1).
+ for (int i = size - num_components; i > 0; i -= num_components) {
+ this->transform().ComputeCorrection(
+ in_data + i, in_data + i - num_components, out_corr + i);
+ }
+ // Encode correction for the first element.
+ std::unique_ptr<DataTypeT[]> zero_vals(new DataTypeT[num_components]());
+ this->transform().ComputeCorrection(in_data, zero_vals.get(), out_corr);
+ return true;
+}
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_DELTA_ENCODER_H_
diff --git a/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_encoder.h b/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_encoder.h
new file mode 100644
index 0000000..2a211a9
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_encoder.h
@@ -0,0 +1,90 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_ENCODER_H_
+#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_ENCODER_H_
+
+#include <type_traits>
+
+#include "draco/compression/attributes/prediction_schemes/prediction_scheme_encoder_interface.h"
+#include "draco/compression/attributes/prediction_schemes/prediction_scheme_encoding_transform.h"
+
+// Prediction schemes can be used during encoding and decoding of vertex
+// attributes to predict attribute values based on the previously
+// encoded/decoded data. The differences between the original and predicted
+// attribute values are used to compute correction values that can be usually
+// encoded with fewer bits compared to the original data.
+namespace draco {
+
+// Abstract base class for typed prediction schemes. It provides basic access
+// to the encoded attribute and to the supplied prediction transform.
+template <typename DataTypeT,
+ class TransformT =
+ PredictionSchemeEncodingTransform<DataTypeT, DataTypeT>>
+class PredictionSchemeEncoder : public PredictionSchemeTypedEncoderInterface<
+ DataTypeT, typename TransformT::CorrType> {
+ public:
+ typedef DataTypeT DataType;
+ typedef TransformT Transform;
+ // Correction type needs to be defined in the prediction transform class.
+ typedef typename Transform::CorrType CorrType;
+ explicit PredictionSchemeEncoder(const PointAttribute *attribute)
+ : PredictionSchemeEncoder(attribute, Transform()) {}
+ PredictionSchemeEncoder(const PointAttribute *attribute,
+ const Transform &transform)
+ : attribute_(attribute), transform_(transform) {}
+
+ bool EncodePredictionData(EncoderBuffer *buffer) override {
+ if (!transform_.EncodeTransformData(buffer)) {
+ return false;
+ }
+ return true;
+ }
+
+ const PointAttribute *GetAttribute() const override { return attribute(); }
+
+ // Returns the number of parent attributes that are needed for the prediction.
+ int GetNumParentAttributes() const override { return 0; }
+
+ // Returns the type of each of the parent attribute.
+ GeometryAttribute::Type GetParentAttributeType(int /* i */) const override {
+ return GeometryAttribute::INVALID;
+ }
+
+ // Sets the required parent attribute.
+ bool SetParentAttribute(const PointAttribute * /* att */) override {
+ return false;
+ }
+
+ bool AreCorrectionsPositive() override {
+ return transform_.AreCorrectionsPositive();
+ }
+
+ PredictionSchemeTransformType GetTransformType() const override {
+ return transform_.GetType();
+ }
+
+ protected:
+ inline const PointAttribute *attribute() const { return attribute_; }
+ inline const Transform &transform() const { return transform_; }
+ inline Transform &transform() { return transform_; }
+
+ private:
+ const PointAttribute *attribute_;
+ Transform transform_;
+};
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_ENCODER_H_
diff --git a/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_encoder_factory.cc b/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_encoder_factory.cc
new file mode 100644
index 0000000..f410a6c
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_encoder_factory.cc
@@ -0,0 +1,85 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/compression/attributes/prediction_schemes/prediction_scheme_encoder_factory.h"
+
+namespace draco {
+
+PredictionSchemeMethod SelectPredictionMethod(
+ int att_id, const PointCloudEncoder *encoder) {
+ if (encoder->options()->GetSpeed() >= 10) {
+ // Selected fastest, though still doing some compression.
+ return PREDICTION_DIFFERENCE;
+ }
+ if (encoder->GetGeometryType() == TRIANGULAR_MESH) {
+ // Use speed setting to select the best encoding method.
+ const PointAttribute *const att = encoder->point_cloud()->attribute(att_id);
+ if (att->attribute_type() == GeometryAttribute::TEX_COORD) {
+ if (encoder->options()->GetSpeed() < 4) {
+ // Use texture coordinate prediction for speeds 0, 1, 2, 3.
+ return MESH_PREDICTION_TEX_COORDS_PORTABLE;
+ }
+ }
+ if (att->attribute_type() == GeometryAttribute::NORMAL) {
+#ifdef DRACO_NORMAL_ENCODING_SUPPORTED
+ if (encoder->options()->GetSpeed() < 4) {
+ // Use geometric normal prediction for speeds 0, 1, 2, 3.
+ // For this prediction, the position attribute needs to be either
+ // integer or quantized as well.
+ const int pos_att_id = encoder->point_cloud()->GetNamedAttributeId(
+ GeometryAttribute::POSITION);
+ const PointAttribute *const pos_att =
+ encoder->point_cloud()->GetNamedAttribute(
+ GeometryAttribute::POSITION);
+ if (pos_att && (IsDataTypeIntegral(pos_att->data_type()) ||
+ encoder->options()->GetAttributeInt(
+ pos_att_id, "quantization_bits", -1) > 0)) {
+ return MESH_PREDICTION_GEOMETRIC_NORMAL;
+ }
+ }
+#endif
+ return PREDICTION_DIFFERENCE; // default
+ }
+ // Handle other attribute types.
+ if (encoder->options()->GetSpeed() >= 8) {
+ return PREDICTION_DIFFERENCE;
+ }
+ if (encoder->options()->GetSpeed() >= 2 ||
+ encoder->point_cloud()->num_points() < 40) {
+ // Parallelogram prediction is used for speeds 2 - 7 or when the overhead
+ // of using constrained multi-parallelogram would be too high.
+ return MESH_PREDICTION_PARALLELOGRAM;
+ }
+ // Multi-parallelogram is used for speeds 0, 1.
+ return MESH_PREDICTION_CONSTRAINED_MULTI_PARALLELOGRAM;
+ }
+ // Default option is delta coding.
+ return PREDICTION_DIFFERENCE;
+}
+
+// Returns the preferred prediction scheme based on the encoder options.
+PredictionSchemeMethod GetPredictionMethodFromOptions(
+ int att_id, const EncoderOptions &options) {
+ const int pred_type =
+ options.GetAttributeInt(att_id, "prediction_scheme", -1);
+ if (pred_type == -1) {
+ return PREDICTION_UNDEFINED;
+ }
+ if (pred_type < 0 || pred_type >= NUM_PREDICTION_SCHEMES) {
+ return PREDICTION_NONE;
+ }
+ return static_cast<PredictionSchemeMethod>(pred_type);
+}
+
+} // namespace draco
diff --git a/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_encoder_factory.h b/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_encoder_factory.h
new file mode 100644
index 0000000..40a7683
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_encoder_factory.h
@@ -0,0 +1,129 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Functions for creating prediction schemes for encoders using the provided
+// prediction method id.
+
+#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_ENCODER_FACTORY_H_
+#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_ENCODER_FACTORY_H_
+
+#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_constrained_multi_parallelogram_encoder.h"
+#ifdef DRACO_NORMAL_ENCODING_SUPPORTED
+#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_geometric_normal_encoder.h"
+#endif
+#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_multi_parallelogram_encoder.h"
+#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_parallelogram_encoder.h"
+#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_tex_coords_encoder.h"
+#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_tex_coords_portable_encoder.h"
+#include "draco/compression/attributes/prediction_schemes/prediction_scheme_delta_encoder.h"
+#include "draco/compression/attributes/prediction_schemes/prediction_scheme_encoder.h"
+#include "draco/compression/attributes/prediction_schemes/prediction_scheme_factory.h"
+#include "draco/compression/mesh/mesh_encoder.h"
+
+namespace draco {
+
+// Selects a prediction method based on the input geometry type and based on the
+// encoder options.
+PredictionSchemeMethod SelectPredictionMethod(int att_id,
+ const PointCloudEncoder *encoder);
+
+// Factory class for creating mesh prediction schemes.
+template <typename DataTypeT>
+struct MeshPredictionSchemeEncoderFactory {
+ template <class TransformT, class MeshDataT>
+ std::unique_ptr<PredictionSchemeEncoder<DataTypeT, TransformT>> operator()(
+ PredictionSchemeMethod method, const PointAttribute *attribute,
+ const TransformT &transform, const MeshDataT &mesh_data,
+ uint16_t bitstream_version) {
+ if (method == MESH_PREDICTION_PARALLELOGRAM) {
+ return std::unique_ptr<PredictionSchemeEncoder<DataTypeT, TransformT>>(
+ new MeshPredictionSchemeParallelogramEncoder<DataTypeT, TransformT,
+ MeshDataT>(
+ attribute, transform, mesh_data));
+ } else if (method == MESH_PREDICTION_CONSTRAINED_MULTI_PARALLELOGRAM) {
+ return std::unique_ptr<PredictionSchemeEncoder<DataTypeT, TransformT>>(
+ new MeshPredictionSchemeConstrainedMultiParallelogramEncoder<
+ DataTypeT, TransformT, MeshDataT>(attribute, transform,
+ mesh_data));
+ } else if (method == MESH_PREDICTION_TEX_COORDS_PORTABLE) {
+ return std::unique_ptr<PredictionSchemeEncoder<DataTypeT, TransformT>>(
+ new MeshPredictionSchemeTexCoordsPortableEncoder<
+ DataTypeT, TransformT, MeshDataT>(attribute, transform,
+ mesh_data));
+ }
+#ifdef DRACO_NORMAL_ENCODING_SUPPORTED
+ else if (method == MESH_PREDICTION_GEOMETRIC_NORMAL) {
+ return std::unique_ptr<PredictionSchemeEncoder<DataTypeT, TransformT>>(
+ new MeshPredictionSchemeGeometricNormalEncoder<DataTypeT, TransformT,
+ MeshDataT>(
+ attribute, transform, mesh_data));
+ }
+#endif
+ return nullptr;
+ }
+};
+
+// Creates a prediction scheme for a given encoder and given prediction method.
+// The prediction schemes are automatically initialized with encoder specific
+// data if needed.
+template <typename DataTypeT, class TransformT>
+std::unique_ptr<PredictionSchemeEncoder<DataTypeT, TransformT>>
+CreatePredictionSchemeForEncoder(PredictionSchemeMethod method, int att_id,
+ const PointCloudEncoder *encoder,
+ const TransformT &transform) {
+ const PointAttribute *const att = encoder->point_cloud()->attribute(att_id);
+ if (method == PREDICTION_UNDEFINED) {
+ method = SelectPredictionMethod(att_id, encoder);
+ }
+ if (method == PREDICTION_NONE) {
+ return nullptr; // No prediction is used.
+ }
+ if (encoder->GetGeometryType() == TRIANGULAR_MESH) {
+ // Cast the encoder to mesh encoder. This is not necessarily safe if there
+ // is some other encoder decides to use TRIANGULAR_MESH as the return type,
+ // but unfortunately there is not nice work around for this without using
+ // RTTI (double dispatch and similar concepts will not work because of the
+ // template nature of the prediction schemes).
+ const MeshEncoder *const mesh_encoder =
+ static_cast<const MeshEncoder *>(encoder);
+ auto ret = CreateMeshPredictionScheme<
+ MeshEncoder, PredictionSchemeEncoder<DataTypeT, TransformT>,
+ MeshPredictionSchemeEncoderFactory<DataTypeT>>(
+ mesh_encoder, method, att_id, transform, kDracoMeshBitstreamVersion);
+ if (ret) {
+ return ret;
+ }
+ // Otherwise try to create another prediction scheme.
+ }
+ // Create delta encoder.
+ return std::unique_ptr<PredictionSchemeEncoder<DataTypeT, TransformT>>(
+ new PredictionSchemeDeltaEncoder<DataTypeT, TransformT>(att, transform));
+}
+
+// Create a prediction scheme using a default transform constructor.
+template <typename DataTypeT, class TransformT>
+std::unique_ptr<PredictionSchemeEncoder<DataTypeT, TransformT>>
+CreatePredictionSchemeForEncoder(PredictionSchemeMethod method, int att_id,
+ const PointCloudEncoder *encoder) {
+ return CreatePredictionSchemeForEncoder<DataTypeT, TransformT>(
+ method, att_id, encoder, TransformT());
+}
+
+// Returns the preferred prediction scheme based on the encoder options.
+PredictionSchemeMethod GetPredictionMethodFromOptions(
+ int att_id, const EncoderOptions &options);
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_ENCODER_FACTORY_H_
diff --git a/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_encoder_interface.h b/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_encoder_interface.h
new file mode 100644
index 0000000..37aa9f7
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_encoder_interface.h
@@ -0,0 +1,55 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_ENCODER_INTERFACE_H_
+#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_ENCODER_INTERFACE_H_
+
+#include "draco/compression/attributes/prediction_schemes/prediction_scheme_interface.h"
+#include "draco/core/encoder_buffer.h"
+
+// Prediction schemes can be used during encoding and decoding of attributes
+// to predict attribute values based on the previously encoded/decoded data.
+// See prediction_scheme.h for more details.
+namespace draco {
+
+// Abstract interface for all prediction schemes used during attribute encoding.
+class PredictionSchemeEncoderInterface : public PredictionSchemeInterface {
+ public:
+ // Method that can be used to encode any prediction scheme specific data
+ // into the output buffer.
+ virtual bool EncodePredictionData(EncoderBuffer *buffer) = 0;
+};
+
+// A specialized version of the prediction scheme interface for specific
+// input and output data types.
+// |entry_to_point_id_map| is the mapping between value entries to point ids
+// of the associated point cloud, where one entry is defined as |num_components|
+// values of the |in_data|.
+// DataTypeT is the data type of input and predicted values.
+// CorrTypeT is the data type used for storing corrected values.
+template <typename DataTypeT, typename CorrTypeT = DataTypeT>
+class PredictionSchemeTypedEncoderInterface
+ : public PredictionSchemeEncoderInterface {
+ public:
+ // Applies the prediction scheme when encoding the attribute.
+ // |in_data| contains value entries to be encoded.
+ // |out_corr| is an output array containing the to be encoded corrections.
+ virtual bool ComputeCorrectionValues(
+ const DataTypeT *in_data, CorrTypeT *out_corr, int size,
+ int num_components, const PointIndex *entry_to_point_id_map) = 0;
+};
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_ENCODER_INTERFACE_H_
diff --git a/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_encoding_transform.h b/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_encoding_transform.h
new file mode 100644
index 0000000..0929492
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_encoding_transform.h
@@ -0,0 +1,77 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_ENCODING_TRANSFORM_H_
+#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_ENCODING_TRANSFORM_H_
+
+#include "draco/compression/config/compression_shared.h"
+#include "draco/core/encoder_buffer.h"
+
+namespace draco {
+
+// PredictionSchemeEncodingTransform is used to transform predicted values into
+// correction values.
+// CorrTypeT is the data type used for storing corrected values. It allows
+// transforms to store corrections into a different type or format compared to
+// the predicted data.
+template <typename DataTypeT, typename CorrTypeT>
+class PredictionSchemeEncodingTransform {
+ public:
+ typedef CorrTypeT CorrType;
+ PredictionSchemeEncodingTransform() : num_components_(0) {}
+
+ PredictionSchemeTransformType GetType() const {
+ return PREDICTION_TRANSFORM_DELTA;
+ }
+
+ // Performs any custom initialization of the transform for the encoder.
+ // |size| = total number of values in |orig_data| (i.e., number of entries *
+ // number of components).
+ void Init(const DataTypeT * /* orig_data */, int /* size */,
+ int num_components) {
+ num_components_ = num_components;
+ }
+
+ // Computes the corrections based on the input original values and the
+ // predicted values. The correction is always computed for all components
+ // of the input element. |val_id| is the id of the input value
+ // (i.e., element_id * num_components). The default implementation is equal to
+ // std::minus.
+ inline void ComputeCorrection(const DataTypeT *original_vals,
+ const DataTypeT *predicted_vals,
+ CorrTypeT *out_corr_vals) {
+ static_assert(std::is_same<DataTypeT, CorrTypeT>::value,
+ "For the default prediction transform, correction and input "
+ "data must be of the same type.");
+ for (int i = 0; i < num_components_; ++i) {
+ out_corr_vals[i] = original_vals[i] - predicted_vals[i];
+ }
+ }
+
+ // Encode any transform specific data.
+ bool EncodeTransformData(EncoderBuffer * /* buffer */) { return true; }
+
+ // Should return true if all corrected values are guaranteed to be positive.
+ bool AreCorrectionsPositive() const { return false; }
+
+ protected:
+ int num_components() const { return num_components_; }
+
+ private:
+ int num_components_;
+};
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_ENCODING_TRANSFORM_H_
diff --git a/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_factory.h b/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_factory.h
new file mode 100644
index 0000000..b36c4c8
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_factory.h
@@ -0,0 +1,85 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Functions for creating prediction schemes from a provided prediction method
+// name. The functions in this file can create only basic prediction schemes
+// that don't require any encoder or decoder specific data. To create more
+// sophisticated prediction schemes, use functions from either
+// prediction_scheme_encoder_factory.h or,
+// prediction_scheme_decoder_factory.h.
+
+#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_FACTORY_H_
+#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_FACTORY_H_
+
+#include "draco/compression/attributes/mesh_attribute_indices_encoding_data.h"
+#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_data.h"
+#include "draco/compression/config/compression_shared.h"
+#include "draco/mesh/mesh_attribute_corner_table.h"
+
+namespace draco {
+
+template <class EncodingDataSourceT, class PredictionSchemeT,
+ class MeshPredictionSchemeFactoryT>
+std::unique_ptr<PredictionSchemeT> CreateMeshPredictionScheme(
+ const EncodingDataSourceT *source, PredictionSchemeMethod method,
+ int att_id, const typename PredictionSchemeT::Transform &transform,
+ uint16_t bitstream_version) {
+ const PointAttribute *const att = source->point_cloud()->attribute(att_id);
+ if (source->GetGeometryType() == TRIANGULAR_MESH &&
+ (method == MESH_PREDICTION_PARALLELOGRAM ||
+ method == MESH_PREDICTION_MULTI_PARALLELOGRAM ||
+ method == MESH_PREDICTION_CONSTRAINED_MULTI_PARALLELOGRAM ||
+ method == MESH_PREDICTION_TEX_COORDS_PORTABLE ||
+ method == MESH_PREDICTION_GEOMETRIC_NORMAL ||
+ method == MESH_PREDICTION_TEX_COORDS_DEPRECATED)) {
+ const CornerTable *const ct = source->GetCornerTable();
+ const MeshAttributeIndicesEncodingData *const encoding_data =
+ source->GetAttributeEncodingData(att_id);
+ if (ct == nullptr || encoding_data == nullptr) {
+ // No connectivity data found.
+ return nullptr;
+ }
+ // Connectivity data exists.
+ const MeshAttributeCornerTable *const att_ct =
+ source->GetAttributeCornerTable(att_id);
+ if (att_ct != nullptr) {
+ typedef MeshPredictionSchemeData<MeshAttributeCornerTable> MeshData;
+ MeshData md;
+ md.Set(source->mesh(), att_ct,
+ &encoding_data->encoded_attribute_value_index_to_corner_map,
+ &encoding_data->vertex_to_encoded_attribute_value_index_map);
+ MeshPredictionSchemeFactoryT factory;
+ auto ret = factory(method, att, transform, md, bitstream_version);
+ if (ret) {
+ return ret;
+ }
+ } else {
+ typedef MeshPredictionSchemeData<CornerTable> MeshData;
+ MeshData md;
+ md.Set(source->mesh(), ct,
+ &encoding_data->encoded_attribute_value_index_to_corner_map,
+ &encoding_data->vertex_to_encoded_attribute_value_index_map);
+ MeshPredictionSchemeFactoryT factory;
+ auto ret = factory(method, att, transform, md, bitstream_version);
+ if (ret) {
+ return ret;
+ }
+ }
+ }
+ return nullptr;
+}
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_FACTORY_H_
diff --git a/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_interface.h b/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_interface.h
new file mode 100644
index 0000000..c9b3706
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_interface.h
@@ -0,0 +1,60 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_INTERFACE_H_
+#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_INTERFACE_H_
+
+#include "draco/attributes/point_attribute.h"
+#include "draco/compression/config/compression_shared.h"
+
+// Prediction schemes can be used during encoding and decoding of attributes
+// to predict attribute values based on the previously encoded/decoded data.
+// See prediction_scheme.h for more details.
+namespace draco {
+
+// Abstract interface for all prediction schemes used during attribute encoding.
+class PredictionSchemeInterface {
+ public:
+ virtual ~PredictionSchemeInterface() = default;
+ virtual PredictionSchemeMethod GetPredictionMethod() const = 0;
+
+ // Returns the encoded attribute.
+ virtual const PointAttribute *GetAttribute() const = 0;
+
+ // Returns true when the prediction scheme is initialized with all data it
+ // needs.
+ virtual bool IsInitialized() const = 0;
+
+ // Returns the number of parent attributes that are needed for the prediction.
+ virtual int GetNumParentAttributes() const = 0;
+
+ // Returns the type of each of the parent attribute.
+ virtual GeometryAttribute::Type GetParentAttributeType(int i) const = 0;
+
+ // Sets the required parent attribute.
+ // Returns false if the attribute doesn't meet the requirements of the
+ // prediction scheme.
+ virtual bool SetParentAttribute(const PointAttribute *att) = 0;
+
+ // Method should return true if the prediction scheme guarantees that all
+ // correction values are always positive (or at least non-negative).
+ virtual bool AreCorrectionsPositive() = 0;
+
+ // Returns the transform type used by the prediction scheme.
+ virtual PredictionSchemeTransformType GetTransformType() const = 0;
+};
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_INTERFACE_H_
diff --git a/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_canonicalized_decoding_transform.h b/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_canonicalized_decoding_transform.h
new file mode 100644
index 0000000..5a6c7c2
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_canonicalized_decoding_transform.h
@@ -0,0 +1,118 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_NORMAL_OCTAHEDRON_CANONICALIZED_DECODING_TRANSFORM_H_
+#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_NORMAL_OCTAHEDRON_CANONICALIZED_DECODING_TRANSFORM_H_
+
+#include <cmath>
+
+#include "draco/compression/attributes/normal_compression_utils.h"
+#include "draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_canonicalized_transform_base.h"
+#include "draco/core/decoder_buffer.h"
+#include "draco/core/macros.h"
+#include "draco/core/vector_d.h"
+
+namespace draco {
+
+// Class for converting correction values transformed by the canonicalized
+// normal octahedron transform back to the original values. See the
+// corresponding encoder for more details.
+template <typename DataTypeT>
+class PredictionSchemeNormalOctahedronCanonicalizedDecodingTransform
+ : public PredictionSchemeNormalOctahedronCanonicalizedTransformBase<
+ DataTypeT> {
+ public:
+ typedef VectorD<DataTypeT, 2> Point2;
+ typedef DataTypeT CorrType;
+ typedef DataTypeT DataType;
+
+ PredictionSchemeNormalOctahedronCanonicalizedDecodingTransform() {}
+
+ // Dummy to fulfill concept.
+ void Init(int num_components) {}
+
+ bool DecodeTransformData(DecoderBuffer *buffer) {
+ DataTypeT max_quantized_value, center_value;
+ if (!buffer->Decode(&max_quantized_value)) {
+ return false;
+ }
+ if (!buffer->Decode(&center_value)) {
+ return false;
+ }
+ (void)center_value;
+ if (!this->set_max_quantized_value(max_quantized_value)) {
+ return false;
+ }
+ // Account for reading wrong values, e.g., due to fuzzing.
+ if (this->quantization_bits() < 2) {
+ return false;
+ }
+ if (this->quantization_bits() > 30) {
+ return false;
+ }
+ return true;
+ }
+
+ inline void ComputeOriginalValue(const DataType *pred_vals,
+ const CorrType *corr_vals,
+ DataType *out_orig_vals) const {
+ DRACO_DCHECK_LE(pred_vals[0], 2 * this->center_value());
+ DRACO_DCHECK_LE(pred_vals[1], 2 * this->center_value());
+ DRACO_DCHECK_LE(corr_vals[0], 2 * this->center_value());
+ DRACO_DCHECK_LE(corr_vals[1], 2 * this->center_value());
+
+ DRACO_DCHECK_LE(0, pred_vals[0]);
+ DRACO_DCHECK_LE(0, pred_vals[1]);
+ DRACO_DCHECK_LE(0, corr_vals[0]);
+ DRACO_DCHECK_LE(0, corr_vals[1]);
+
+ const Point2 pred = Point2(pred_vals[0], pred_vals[1]);
+ const Point2 corr = Point2(corr_vals[0], corr_vals[1]);
+ const Point2 orig = ComputeOriginalValue(pred, corr);
+
+ out_orig_vals[0] = orig[0];
+ out_orig_vals[1] = orig[1];
+ }
+
+ private:
+ Point2 ComputeOriginalValue(Point2 pred, Point2 corr) const {
+ const Point2 t(this->center_value(), this->center_value());
+ pred = pred - t;
+ const bool pred_is_in_diamond = this->IsInDiamond(pred[0], pred[1]);
+ if (!pred_is_in_diamond) {
+ this->InvertDiamond(&pred[0], &pred[1]);
+ }
+ const bool pred_is_in_bottom_left = this->IsInBottomLeft(pred);
+ const int32_t rotation_count = this->GetRotationCount(pred);
+ if (!pred_is_in_bottom_left) {
+ pred = this->RotatePoint(pred, rotation_count);
+ }
+ Point2 orig = pred + corr;
+ orig[0] = this->ModMax(orig[0]);
+ orig[1] = this->ModMax(orig[1]);
+ if (!pred_is_in_bottom_left) {
+ const int32_t reverse_rotation_count = (4 - rotation_count) % 4;
+ orig = this->RotatePoint(orig, reverse_rotation_count);
+ }
+ if (!pred_is_in_diamond) {
+ this->InvertDiamond(&orig[0], &orig[1]);
+ }
+ orig = orig + t;
+ return orig;
+ }
+};
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_NORMAL_OCTAHEDRON_CANONICALIZED_DECODING_TRANSFORM_H_
diff --git a/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_canonicalized_encoding_transform.h b/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_canonicalized_encoding_transform.h
new file mode 100644
index 0000000..0dc9696
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_canonicalized_encoding_transform.h
@@ -0,0 +1,116 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_NORMAL_OCTAHEDRON_CANONICALIZED_ENCODING_TRANSFORM_H_
+#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_NORMAL_OCTAHEDRON_CANONICALIZED_ENCODING_TRANSFORM_H_
+
+#include <cmath>
+
+#include "draco/compression/attributes/normal_compression_utils.h"
+#include "draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_canonicalized_transform_base.h"
+#include "draco/core/encoder_buffer.h"
+#include "draco/core/macros.h"
+#include "draco/core/vector_d.h"
+
+namespace draco {
+
+// The transform works on octahedral coordinates for normals. The square is
+// subdivided into four inner triangles (diamond) and four outer triangles. The
+// inner triangles are associated with the upper part of the octahedron and the
+// outer triangles are associated with the lower part.
+// Given a prediction value P and the actual value Q that should be encoded,
+// this transform first checks if P is outside the diamond. If so, the outer
+// triangles are flipped towards the inside and vice versa. Then it checks if p
+// is in the bottom left quadrant. If it is not, it rotates p and q accordingly.
+// The actual correction value is then based on the mapped and rotated P and Q
+// values. The inversion tends to result in shorter correction vectors and the
+// rotation makes it so that all long correction values are positive, reducing
+// the possible value range of the correction values and increasing the
+// occurrences of positive large correction values, which helps the entropy
+// encoder. This is possible since P is also known by the decoder, see also
+// ComputeCorrection and ComputeOriginalValue functions.
+// Note that the tile is not periodic, which implies that the outer edges can
+// not be identified, which requires us to use an odd number of values on each
+// axis.
+// DataTypeT is expected to be some integral type.
+//
+template <typename DataTypeT>
+class PredictionSchemeNormalOctahedronCanonicalizedEncodingTransform
+ : public PredictionSchemeNormalOctahedronCanonicalizedTransformBase<
+ DataTypeT> {
+ public:
+ typedef PredictionSchemeNormalOctahedronCanonicalizedTransformBase<DataTypeT>
+ Base;
+ typedef VectorD<DataTypeT, 2> Point2;
+ typedef DataTypeT CorrType;
+ typedef DataTypeT DataType;
+
+ // We expect the mod value to be of the form 2^b-1.
+ explicit PredictionSchemeNormalOctahedronCanonicalizedEncodingTransform(
+ DataType max_quantized_value)
+ : Base(max_quantized_value) {}
+
+ // Dummy function to fulfill concept.
+ void Init(const DataTypeT *orig_data, int size, int num_components) {}
+
+ bool EncodeTransformData(EncoderBuffer *buffer) {
+ buffer->Encode(this->max_quantized_value());
+ buffer->Encode(this->center_value());
+ return true;
+ }
+
+ inline void ComputeCorrection(const DataType *orig_vals,
+ const DataType *pred_vals,
+ CorrType *out_corr_vals) const {
+ DRACO_DCHECK_LE(pred_vals[0], this->center_value() * 2);
+ DRACO_DCHECK_LE(pred_vals[1], this->center_value() * 2);
+ DRACO_DCHECK_LE(orig_vals[0], this->center_value() * 2);
+ DRACO_DCHECK_LE(orig_vals[1], this->center_value() * 2);
+ DRACO_DCHECK_LE(0, pred_vals[0]);
+ DRACO_DCHECK_LE(0, pred_vals[1]);
+ DRACO_DCHECK_LE(0, orig_vals[0]);
+ DRACO_DCHECK_LE(0, orig_vals[1]);
+
+ const Point2 orig = Point2(orig_vals[0], orig_vals[1]);
+ const Point2 pred = Point2(pred_vals[0], pred_vals[1]);
+ const Point2 corr = ComputeCorrection(orig, pred);
+
+ out_corr_vals[0] = corr[0];
+ out_corr_vals[1] = corr[1];
+ }
+
+ private:
+ Point2 ComputeCorrection(Point2 orig, Point2 pred) const {
+ const Point2 t(this->center_value(), this->center_value());
+ orig = orig - t;
+ pred = pred - t;
+ if (!this->IsInDiamond(pred[0], pred[1])) {
+ this->InvertDiamond(&orig[0], &orig[1]);
+ this->InvertDiamond(&pred[0], &pred[1]);
+ }
+ if (!this->IsInBottomLeft(pred)) {
+ const int32_t rotation_count = this->GetRotationCount(pred);
+ orig = this->RotatePoint(orig, rotation_count);
+ pred = this->RotatePoint(pred, rotation_count);
+ }
+ Point2 corr = orig - pred;
+ corr[0] = this->MakePositive(corr[0]);
+ corr[1] = this->MakePositive(corr[1]);
+ return corr;
+ }
+};
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_NORMAL_OCTAHEDRON_CANONICALIZED_ENCODING_TRANSFORM_H_
diff --git a/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_canonicalized_transform_base.h b/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_canonicalized_transform_base.h
new file mode 100644
index 0000000..4a1e3a6
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_canonicalized_transform_base.h
@@ -0,0 +1,102 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_NORMAL_OCTAHEDRON_CANONICALIZED_TRANSFORM_BASE_H_
+#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_NORMAL_OCTAHEDRON_CANONICALIZED_TRANSFORM_BASE_H_
+
+#include <cmath>
+
+#include "draco/compression/attributes/normal_compression_utils.h"
+#include "draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_transform_base.h"
+#include "draco/compression/config/compression_shared.h"
+#include "draco/core/bit_utils.h"
+#include "draco/core/macros.h"
+#include "draco/core/vector_d.h"
+
+namespace draco {
+
+// Base class containing shared functionality used by both encoding and decoding
+// canonicalized normal octahedron prediction scheme transforms. See the
+// encoding transform for more details about the method.
+template <typename DataTypeT>
+class PredictionSchemeNormalOctahedronCanonicalizedTransformBase
+ : public PredictionSchemeNormalOctahedronTransformBase<DataTypeT> {
+ public:
+ typedef PredictionSchemeNormalOctahedronTransformBase<DataTypeT> Base;
+ typedef VectorD<DataTypeT, 2> Point2;
+ typedef DataTypeT DataType;
+
+ PredictionSchemeNormalOctahedronCanonicalizedTransformBase() : Base() {}
+ // We expect the mod value to be of the form 2^b-1.
+ explicit PredictionSchemeNormalOctahedronCanonicalizedTransformBase(
+ DataType mod_value)
+ : Base(mod_value) {}
+
+ static constexpr PredictionSchemeTransformType GetType() {
+ return PREDICTION_TRANSFORM_NORMAL_OCTAHEDRON_CANONICALIZED;
+ }
+
+ int32_t GetRotationCount(Point2 pred) const {
+ const DataType sign_x = pred[0];
+ const DataType sign_y = pred[1];
+
+ int32_t rotation_count = 0;
+ if (sign_x == 0) {
+ if (sign_y == 0) {
+ rotation_count = 0;
+ } else if (sign_y > 0) {
+ rotation_count = 3;
+ } else {
+ rotation_count = 1;
+ }
+ } else if (sign_x > 0) {
+ if (sign_y >= 0) {
+ rotation_count = 2;
+ } else {
+ rotation_count = 1;
+ }
+ } else {
+ if (sign_y <= 0) {
+ rotation_count = 0;
+ } else {
+ rotation_count = 3;
+ }
+ }
+ return rotation_count;
+ }
+
+ Point2 RotatePoint(Point2 p, int32_t rotation_count) const {
+ switch (rotation_count) {
+ case 1:
+ return Point2(p[1], -p[0]);
+ case 2:
+ return Point2(-p[0], -p[1]);
+ case 3:
+ return Point2(-p[1], p[0]);
+ default:
+ return p;
+ }
+ }
+
+ bool IsInBottomLeft(const Point2 &p) const {
+ if (p[0] == 0 && p[1] == 0) {
+ return true;
+ }
+ return (p[0] < 0 && p[1] <= 0);
+ }
+};
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_NORMAL_OCTAHEDRON_CANONICALIZED_TRANSFORM_BASE_H_
diff --git a/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_canonicalized_transform_test.cc b/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_canonicalized_transform_test.cc
new file mode 100644
index 0000000..8c8932f
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_canonicalized_transform_test.cc
@@ -0,0 +1,192 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_canonicalized_encoding_transform.h"
+#include "draco/core/draco_test_base.h"
+
+namespace {
+
+class PredictionSchemeNormalOctahedronCanonicalizedTransformTest
+ : public ::testing::Test {
+ protected:
+ typedef draco::PredictionSchemeNormalOctahedronCanonicalizedEncodingTransform<
+ int32_t>
+ Transform;
+ typedef Transform::Point2 Point2;
+
+ void TestComputeCorrection(const Transform &transform, const int32_t &ox,
+ const int32_t &oy, const int32_t &px,
+ const int32_t &py, const int32_t &cx,
+ const int32_t &cy) {
+ const int32_t o[2] = {ox + 7, oy + 7};
+ const int32_t p[2] = {px + 7, py + 7};
+ int32_t corr[2] = {500, 500};
+ transform.ComputeCorrection(o, p, corr);
+ ASSERT_EQ(corr[0], (cx + 15) % 15);
+ ASSERT_EQ(corr[1], (cy + 15) % 15);
+ }
+
+ void TestGetRotationCount(const Transform &transform, const Point2 &pred,
+ const int32_t &rot_dir) {
+ const int32_t rotation_count = transform.GetRotationCount(pred);
+ ASSERT_EQ(rot_dir, rotation_count);
+ }
+
+ void TestRotateRepresentation(const Transform &transform, const Point2 &org,
+ const Point2 &pred, const Point2 &rot_org,
+ const Point2 &rot_pred) {
+ const int32_t rotation_count = transform.GetRotationCount(pred);
+ const Point2 res_org = transform.RotatePoint(org, rotation_count);
+ const Point2 res_pred = transform.RotatePoint(pred, rotation_count);
+ ASSERT_EQ(rot_org[0], res_org[0]);
+ ASSERT_EQ(rot_org[1], res_org[1]);
+ ASSERT_EQ(rot_pred[0], res_pred[0]);
+ ASSERT_EQ(rot_pred[1], res_pred[1]);
+ }
+};
+
+TEST_F(PredictionSchemeNormalOctahedronCanonicalizedTransformTest, Init) {
+ const Transform transform(15);
+ ASSERT_TRUE(transform.AreCorrectionsPositive());
+}
+
+TEST_F(PredictionSchemeNormalOctahedronCanonicalizedTransformTest,
+ IsInBottomLeft) {
+ const Transform transform(15);
+ ASSERT_TRUE(transform.IsInBottomLeft(Point2(0, 0)));
+ ASSERT_TRUE(transform.IsInBottomLeft(Point2(-1, -1)));
+ ASSERT_TRUE(transform.IsInBottomLeft(Point2(-7, -7)));
+
+ ASSERT_FALSE(transform.IsInBottomLeft(Point2(1, 1)));
+ ASSERT_FALSE(transform.IsInBottomLeft(Point2(7, 7)));
+ ASSERT_FALSE(transform.IsInBottomLeft(Point2(-1, 1)));
+ ASSERT_FALSE(transform.IsInBottomLeft(Point2(-7, 7)));
+ ASSERT_FALSE(transform.IsInBottomLeft(Point2(1, -1)));
+ ASSERT_FALSE(transform.IsInBottomLeft(Point2(7, -7)));
+}
+
+TEST_F(PredictionSchemeNormalOctahedronCanonicalizedTransformTest,
+ GetRotationCount) {
+ const Transform transform(15);
+ TestGetRotationCount(transform, Point2(1, 2), 2); // top right
+ TestGetRotationCount(transform, Point2(-1, 2), 3); // top left
+ TestGetRotationCount(transform, Point2(1, -2), 1); // bottom right
+ TestGetRotationCount(transform, Point2(-1, -2), 0); // bottom left
+ TestGetRotationCount(transform, Point2(0, 2), 3); // top left
+ TestGetRotationCount(transform, Point2(0, -2), 1); // bottom right
+ TestGetRotationCount(transform, Point2(2, 0), 2); // top right
+ TestGetRotationCount(transform, Point2(-2, 0), 0); // bottom left
+ TestGetRotationCount(transform, Point2(0, 0), 0); // bottom left
+}
+
+TEST_F(PredictionSchemeNormalOctahedronCanonicalizedTransformTest,
+ RotateRepresentation) {
+ const Transform transform(15);
+ // p top left; shift clockwise by 3
+ TestRotateRepresentation(transform, Point2(1, 2), Point2(-3, 1),
+ Point2(-2, 1), Point2(-1, -3)); // q top right
+ TestRotateRepresentation(transform, Point2(-1, -2), Point2(-3, 1),
+ Point2(2, -1), Point2(-1, -3)); // q bottom left
+ TestRotateRepresentation(transform, Point2(1, -2), Point2(-3, 1),
+ Point2(2, 1), Point2(-1, -3)); // q bottom right
+ TestRotateRepresentation(transform, Point2(-1, 2), Point2(-3, 1),
+ Point2(-2, -1), Point2(-1, -3)); // q top left
+ // p top right; shift clockwise by 2 (flip)
+ TestRotateRepresentation(transform, Point2(1, 1), Point2(1, 3),
+ Point2(-1, -1), Point2(-1, -3)); // q top right
+ TestRotateRepresentation(transform, Point2(-1, -2), Point2(1, 3),
+ Point2(1, 2), Point2(-1, -3)); // q bottom left
+ TestRotateRepresentation(transform, Point2(-1, 2), Point2(1, 3),
+ Point2(1, -2), Point2(-1, -3)); // q top left
+ TestRotateRepresentation(transform, Point2(1, -2), Point2(1, 3),
+ Point2(-1, 2), Point2(-1, -3)); // q bottom right
+ // p bottom right; shift clockwise by 1
+ TestRotateRepresentation(transform, Point2(1, 2), Point2(3, -1),
+ Point2(2, -1), Point2(-1, -3)); // q top right
+ TestRotateRepresentation(transform, Point2(1, -2), Point2(3, -1),
+ Point2(-2, -1), Point2(-1, -3)); // q bottom right
+ TestRotateRepresentation(transform, Point2(-1, -2), Point2(3, -1),
+ Point2(-2, 1), Point2(-1, -3)); // q bottom left
+ TestRotateRepresentation(transform, Point2(-1, 2), Point2(3, -1),
+ Point2(2, 1), Point2(-1, -3)); // q top left
+ // p bottom left; no change
+ TestRotateRepresentation(transform, Point2(1, 2), Point2(-1, -3),
+ Point2(1, 2), Point2(-1, -3)); // q top right
+ TestRotateRepresentation(transform, Point2(-1, 2), Point2(-1, -3),
+ Point2(-1, 2), Point2(-1, -3)); // q top left
+ TestRotateRepresentation(transform, Point2(1, -2), Point2(-1, -3),
+ Point2(1, -2), Point2(-1, -3)); // q bottom right
+ TestRotateRepresentation(transform, Point2(-1, -2), Point2(-1, -3),
+ Point2(-1, -2), Point2(-1, -3)); // q bottom left
+}
+
+TEST_F(PredictionSchemeNormalOctahedronCanonicalizedTransformTest,
+ ComputeCorrection) {
+ const Transform transform(15);
+ TestComputeCorrection(transform, 0, 0, 0, 0, 0, 0);
+ TestComputeCorrection(transform, 1, 1, 1, 1, 0, 0);
+ // inside diamond; p top right
+ TestComputeCorrection(transform, 3, 4, 1, 2, -2, -2); // q top right
+ TestComputeCorrection(transform, -3, 4, 1, 2, 4, -2); // q top left
+ TestComputeCorrection(transform, 3, -4, 1, 2, -2, 6); // q bottom right
+ TestComputeCorrection(transform, -3, -4, 1, 2, 4, 6); // q bottom left
+ // inside diamond; p top left
+ TestComputeCorrection(transform, 3, 4, -1, 2, -2, 4); // q top right
+ TestComputeCorrection(transform, -3, 4, -1, 2, -2, -2); // q top left
+ TestComputeCorrection(transform, 3, -4, -1, 2, 6, 4); // q bottom right
+ TestComputeCorrection(transform, -3, -4, -1, 2, 6, -2); // q bottom left
+ // inside diamond; p bottom right
+ TestComputeCorrection(transform, 3, 4, 1, -2, 6, -2); // q top right
+ TestComputeCorrection(transform, -3, 4, 1, -2, 6, 4); // q top left
+ TestComputeCorrection(transform, 3, -4, 1, -2, -2, -2); // q bottom right
+ TestComputeCorrection(transform, -3, -4, 1, -2, -2, 4); // q bottom left
+ // inside diamond; p bottom left
+ TestComputeCorrection(transform, 3, 4, -1, -2, 4, 6); // q top right
+ TestComputeCorrection(transform, -3, 4, -1, -2, -2, 6); // q top left
+ TestComputeCorrection(transform, 3, -4, -1, -2, 4, -2); // q bottom right
+ TestComputeCorrection(transform, -3, -4, -1, -2, -2, -2); // q bottom left
+ // outside diamond; p top right
+ TestComputeCorrection(transform, 1, 2, 5, 4, -2, -4); // q top right
+ TestComputeCorrection(transform, -1, 2, 5, 4, -7, -4); // q top left
+ TestComputeCorrection(transform, 1, -2, 5, 4, -2, -7); // q bottom right
+ TestComputeCorrection(transform, -1, -2, 5, 4, -7, -7); // q bottom left
+ // outside diamond; p top left
+ TestComputeCorrection(transform, 1, 2, -5, 4, -4, -7); // q top right
+ TestComputeCorrection(transform, -1, 2, -5, 4, -4, -2); // q top left
+ TestComputeCorrection(transform, 1, -2, -5, 4, -7, -7); // q bottom right
+ TestComputeCorrection(transform, -1, -2, -5, 4, -7, -2); // q bottom left
+ // outside diamond; p bottom right
+ TestComputeCorrection(transform, 1, 2, 5, -4, -7, -2); // q top right
+ TestComputeCorrection(transform, -1, 2, 5, -4, -7, -7); // q top left
+ TestComputeCorrection(transform, 1, -2, 5, -4, -4, -2); // q bottom right
+ TestComputeCorrection(transform, -1, -2, 5, -4, -4, -7); // q bottom left
+ // outside diamond; p bottom left
+ TestComputeCorrection(transform, 1, 2, -5, -4, -7, -7); // q top right
+ TestComputeCorrection(transform, -1, 2, -5, -4, -2, -7); // q top left
+ TestComputeCorrection(transform, 1, -2, -5, -4, -7, -4); // q bottom right
+ TestComputeCorrection(transform, -1, -2, -5, -4, -2, -4); // q bottom left
+
+ TestComputeCorrection(transform, -1, -2, 7, 7, -5, -6);
+ TestComputeCorrection(transform, 0, 0, 7, 7, 7, 7);
+ TestComputeCorrection(transform, -1, -2, 0, -2, 0, 1);
+}
+
+TEST_F(PredictionSchemeNormalOctahedronCanonicalizedTransformTest, Interface) {
+ const Transform transform(15);
+ ASSERT_EQ(transform.max_quantized_value(), 15);
+ ASSERT_EQ(transform.center_value(), 7);
+ ASSERT_EQ(transform.quantization_bits(), 4);
+}
+
+} // namespace
diff --git a/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_decoding_transform.h b/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_decoding_transform.h
new file mode 100644
index 0000000..a1bc4a3
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_decoding_transform.h
@@ -0,0 +1,103 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifdef DRACO_BACKWARDS_COMPATIBILITY_SUPPORTED
+#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_NORMAL_OCTAHEDRON_DECODING_TRANSFORM_H_
+#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_NORMAL_OCTAHEDRON_DECODING_TRANSFORM_H_
+
+#include <cmath>
+
+#include "draco/compression/attributes/normal_compression_utils.h"
+#include "draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_transform_base.h"
+#include "draco/core/decoder_buffer.h"
+#include "draco/core/macros.h"
+#include "draco/core/vector_d.h"
+#include "draco/draco_features.h"
+
+namespace draco {
+
+// Class for converting correction values transformed by the octahedral normal
+// transform back to the original values. See the corresponding encoder for more
+// details.
+template <typename DataTypeT>
+class PredictionSchemeNormalOctahedronDecodingTransform
+ : public PredictionSchemeNormalOctahedronTransformBase<DataTypeT> {
+ public:
+ typedef VectorD<DataTypeT, 2> Point2;
+ typedef DataTypeT CorrType;
+ typedef DataTypeT DataType;
+
+ PredictionSchemeNormalOctahedronDecodingTransform() {}
+
+ // Dummy function to fulfill concept.
+ void Init(int num_components) {}
+ bool DecodeTransformData(DecoderBuffer *buffer) {
+ DataTypeT max_quantized_value, center_value;
+ if (!buffer->Decode(&max_quantized_value)) {
+ return false;
+ }
+ if (buffer->bitstream_version() < DRACO_BITSTREAM_VERSION(2, 2)) {
+ if (!buffer->Decode(&center_value)) {
+ return false;
+ }
+ }
+ (void)center_value;
+ return this->set_max_quantized_value(max_quantized_value);
+ }
+
+ inline void ComputeOriginalValue(const DataType *pred_vals,
+ const CorrType *corr_vals,
+ DataType *out_orig_vals) const {
+ DRACO_DCHECK_LE(pred_vals[0], 2 * this->center_value());
+ DRACO_DCHECK_LE(pred_vals[1], 2 * this->center_value());
+ DRACO_DCHECK_LE(corr_vals[0], 2 * this->center_value());
+ DRACO_DCHECK_LE(corr_vals[1], 2 * this->center_value());
+
+ DRACO_DCHECK_LE(0, pred_vals[0]);
+ DRACO_DCHECK_LE(0, pred_vals[1]);
+ DRACO_DCHECK_LE(0, corr_vals[0]);
+ DRACO_DCHECK_LE(0, corr_vals[1]);
+
+ const Point2 pred = Point2(pred_vals[0], pred_vals[1]);
+ const Point2 corr = Point2(corr_vals[0], corr_vals[1]);
+ const Point2 orig = ComputeOriginalValue(pred, corr);
+
+ out_orig_vals[0] = orig[0];
+ out_orig_vals[1] = orig[1];
+ }
+
+ private:
+ Point2 ComputeOriginalValue(Point2 pred, const Point2 &corr) const {
+ const Point2 t(this->center_value(), this->center_value());
+ pred = pred - t;
+
+ const bool pred_is_in_diamond = this->IsInDiamond(pred[0], pred[1]);
+ if (!pred_is_in_diamond) {
+ this->InvertDiamond(&pred[0], &pred[1]);
+ }
+ Point2 orig = pred + corr;
+ orig[0] = this->ModMax(orig[0]);
+ orig[1] = this->ModMax(orig[1]);
+ if (!pred_is_in_diamond) {
+ this->InvertDiamond(&orig[0], &orig[1]);
+ }
+ orig = orig + t;
+ return orig;
+ }
+};
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_NORMAL_OCTAHEDRON_DECODING_TRANSFORM_H_
+#endif
diff --git a/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_encoding_transform.h b/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_encoding_transform.h
new file mode 100644
index 0000000..4abfef6
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_encoding_transform.h
@@ -0,0 +1,105 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_NORMAL_OCTAHEDRON_ENCODING_TRANSFORM_H_
+#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_NORMAL_OCTAHEDRON_ENCODING_TRANSFORM_H_
+
+#include <cmath>
+
+#include "draco/compression/attributes/normal_compression_utils.h"
+#include "draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_transform_base.h"
+#include "draco/core/encoder_buffer.h"
+#include "draco/core/macros.h"
+#include "draco/core/vector_d.h"
+
+namespace draco {
+
+// The transform works on octahedral coordinates for normals. The square is
+// subdivided into four inner triangles (diamond) and four outer triangles. The
+// inner triangles are associated with the upper part of the octahedron and the
+// outer triangles are associated with the lower part.
+// Given a prediction value P and the actual value Q that should be encoded,
+// this transform first checks if P is outside the diamond. If so, the outer
+// triangles are flipped towards the inside and vice versa. The actual
+// correction value is then based on the mapped P and Q values. This tends to
+// result in shorter correction vectors.
+// This is possible since the P value is also known by the decoder, see also
+// ComputeCorrection and ComputeOriginalValue functions.
+// Note that the tile is not periodic, which implies that the outer edges can
+// not be identified, which requires us to use an odd number of values on each
+// axis.
+// DataTypeT is expected to be some integral type.
+//
+template <typename DataTypeT>
+class PredictionSchemeNormalOctahedronEncodingTransform
+ : public PredictionSchemeNormalOctahedronTransformBase<DataTypeT> {
+ public:
+ typedef PredictionSchemeNormalOctahedronTransformBase<DataTypeT> Base;
+ typedef VectorD<DataTypeT, 2> Point2;
+ typedef DataTypeT CorrType;
+ typedef DataTypeT DataType;
+
+ // We expect the mod value to be of the form 2^b-1.
+ explicit PredictionSchemeNormalOctahedronEncodingTransform(
+ DataType max_quantized_value)
+ : Base(max_quantized_value) {}
+
+ void Init(const DataTypeT *orig_data, int size, int num_components) {}
+
+ bool EncodeTransformData(EncoderBuffer *buffer) {
+ buffer->Encode(this->max_quantized_value());
+ return true;
+ }
+
+ inline void ComputeCorrection(const DataType *orig_vals,
+ const DataType *pred_vals,
+ CorrType *out_corr_vals) const {
+ DRACO_DCHECK_LE(pred_vals[0], this->center_value() * 2);
+ DRACO_DCHECK_LE(pred_vals[1], this->center_value() * 2);
+ DRACO_DCHECK_LE(orig_vals[0], this->center_value() * 2);
+ DRACO_DCHECK_LE(orig_vals[1], this->center_value() * 2);
+ DRACO_DCHECK_LE(0, pred_vals[0]);
+ DRACO_DCHECK_LE(0, pred_vals[1]);
+ DRACO_DCHECK_LE(0, orig_vals[0]);
+ DRACO_DCHECK_LE(0, orig_vals[1]);
+
+ const Point2 orig = Point2(orig_vals[0], orig_vals[1]);
+ const Point2 pred = Point2(pred_vals[0], pred_vals[1]);
+ const Point2 corr = ComputeCorrection(orig, pred);
+
+ out_corr_vals[0] = corr[0];
+ out_corr_vals[1] = corr[1];
+ }
+
+ private:
+ Point2 ComputeCorrection(Point2 orig, Point2 pred) const {
+ const Point2 t(this->center_value(), this->center_value());
+ orig = orig - t;
+ pred = pred - t;
+
+ if (!this->IsInDiamond(pred[0], pred[1])) {
+ this->InvertDiamond(&orig[0], &orig[1]);
+ this->InvertDiamond(&pred[0], &pred[1]);
+ }
+
+ Point2 corr = orig - pred;
+ corr[0] = this->MakePositive(corr[0]);
+ corr[1] = this->MakePositive(corr[1]);
+ return corr;
+ }
+};
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_NORMAL_OCTAHEDRON_ENCODING_TRANSFORM_H_
diff --git a/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_transform_base.h b/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_transform_base.h
new file mode 100644
index 0000000..c9dd7d6
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_transform_base.h
@@ -0,0 +1,90 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_NORMAL_OCTAHEDRON_TRANSFORM_BASE_H_
+#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_NORMAL_OCTAHEDRON_TRANSFORM_BASE_H_
+
+#include <cmath>
+
+#include "draco/compression/attributes/normal_compression_utils.h"
+#include "draco/compression/config/compression_shared.h"
+#include "draco/core/bit_utils.h"
+#include "draco/core/macros.h"
+#include "draco/core/vector_d.h"
+
+namespace draco {
+
+// Base class containing shared functionality used by both encoding and decoding
+// octahedral normal prediction scheme transforms. See the encoding transform
+// for more details about the method.
+template <typename DataTypeT>
+class PredictionSchemeNormalOctahedronTransformBase {
+ public:
+ typedef VectorD<DataTypeT, 2> Point2;
+ typedef DataTypeT DataType;
+
+ PredictionSchemeNormalOctahedronTransformBase() {}
+ // We expect the mod value to be of the form 2^b-1.
+ explicit PredictionSchemeNormalOctahedronTransformBase(
+ DataType max_quantized_value) {
+ this->set_max_quantized_value(max_quantized_value);
+ }
+
+ static constexpr PredictionSchemeTransformType GetType() {
+ return PREDICTION_TRANSFORM_NORMAL_OCTAHEDRON;
+ }
+
+ // We can return true as we keep correction values positive.
+ bool AreCorrectionsPositive() const { return true; }
+
+ inline DataTypeT max_quantized_value() const {
+ return octahedron_tool_box_.max_quantized_value();
+ }
+ inline DataTypeT center_value() const {
+ return octahedron_tool_box_.center_value();
+ }
+ inline int32_t quantization_bits() const {
+ return octahedron_tool_box_.quantization_bits();
+ }
+
+ protected:
+ inline bool set_max_quantized_value(DataTypeT max_quantized_value) {
+ if (max_quantized_value % 2 == 0) {
+ return false;
+ }
+ int q = MostSignificantBit(max_quantized_value) + 1;
+ return octahedron_tool_box_.SetQuantizationBits(q);
+ }
+
+ bool IsInDiamond(DataTypeT s, DataTypeT t) const {
+ return octahedron_tool_box_.IsInDiamond(s, t);
+ }
+ void InvertDiamond(DataTypeT *s, DataTypeT *t) const {
+ return octahedron_tool_box_.InvertDiamond(s, t);
+ }
+
+ int32_t ModMax(int32_t x) const { return octahedron_tool_box_.ModMax(x); }
+
+ // For correction values.
+ int32_t MakePositive(int32_t x) const {
+ return octahedron_tool_box_.MakePositive(x);
+ }
+
+ private:
+ OctahedronToolBox octahedron_tool_box_;
+};
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_NORMAL_OCTAHEDRON_TRANSFORM_BASE_H_
diff --git a/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_transform_test.cc b/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_transform_test.cc
new file mode 100644
index 0000000..1001b19
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_transform_test.cc
@@ -0,0 +1,71 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_encoding_transform.h"
+#include "draco/core/draco_test_base.h"
+
+namespace {
+
+class PredictionSchemeNormalOctahedronTransformTest : public ::testing::Test {
+ protected:
+ typedef draco::PredictionSchemeNormalOctahedronEncodingTransform<int32_t>
+ Transform;
+ typedef Transform::Point2 Point2;
+
+ void TestComputeCorrection(const Transform &transform, const int32_t &ox,
+ const int32_t &oy, const int32_t &px,
+ const int32_t &py, const int32_t &cx,
+ const int32_t &cy) {
+ const int32_t o[2] = {ox + 7, oy + 7};
+ const int32_t p[2] = {px + 7, py + 7};
+ int32_t corr[2] = {500, 500};
+ transform.ComputeCorrection(o, p, corr);
+ ASSERT_EQ(corr[0], (cx + 15) % 15);
+ ASSERT_EQ(corr[1], (cy + 15) % 15);
+ }
+};
+
+TEST_F(PredictionSchemeNormalOctahedronTransformTest, Init) {
+ const Transform transform(15);
+ ASSERT_TRUE(transform.AreCorrectionsPositive());
+}
+
+TEST_F(PredictionSchemeNormalOctahedronTransformTest, ComputeCorrections) {
+ const Transform transform(15);
+ // checks inside diamond
+ TestComputeCorrection(transform, 0, 0, 0, 0, 0, 0);
+ TestComputeCorrection(transform, 1, 1, 1, 1, 0, 0);
+ TestComputeCorrection(transform, 3, 4, 1, 1, 2, 3);
+ TestComputeCorrection(transform, -1, -1, -1, -1, 0, 0);
+ TestComputeCorrection(transform, -3, -4, -1, -1, -2, -3);
+ // checks outside diamond
+ TestComputeCorrection(transform, 4, 4, 4, 4, 0, 0);
+ TestComputeCorrection(transform, 5, 6, 4, 4, -2, -1);
+ TestComputeCorrection(transform, 3, 2, 4, 4, 2, 1);
+ // checks on outer edges
+ TestComputeCorrection(transform, 7, 7, 4, 4, -3, -3);
+ TestComputeCorrection(transform, 6, 7, 4, 4, -3, -2);
+ TestComputeCorrection(transform, -6, 7, 4, 4, -3, -2);
+ TestComputeCorrection(transform, 7, 6, 4, 4, -2, -3);
+ TestComputeCorrection(transform, 7, -6, 4, 4, -2, -3);
+}
+
+TEST_F(PredictionSchemeNormalOctahedronTransformTest, Interface) {
+ const Transform transform(15);
+ ASSERT_EQ(transform.max_quantized_value(), 15);
+ ASSERT_EQ(transform.center_value(), 7);
+ ASSERT_EQ(transform.quantization_bits(), 4);
+}
+
+} // namespace
diff --git a/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_wrap_decoding_transform.h b/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_wrap_decoding_transform.h
new file mode 100644
index 0000000..e100c73
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_wrap_decoding_transform.h
@@ -0,0 +1,88 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_WRAP_DECODING_TRANSFORM_H_
+#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_WRAP_DECODING_TRANSFORM_H_
+
+#include "draco/compression/attributes/prediction_schemes/prediction_scheme_wrap_transform_base.h"
+#include "draco/core/decoder_buffer.h"
+
+namespace draco {
+
+// PredictionSchemeWrapDecodingTransform unwraps values encoded with the
+// PredictionSchemeWrapEncodingTransform.
+// See prediction_scheme_wrap_transform_base.h for more details about the
+// method.
+template <typename DataTypeT, typename CorrTypeT = DataTypeT>
+class PredictionSchemeWrapDecodingTransform
+ : public PredictionSchemeWrapTransformBase<DataTypeT> {
+ public:
+ typedef CorrTypeT CorrType;
+ PredictionSchemeWrapDecodingTransform() {}
+
+ // Computes the original value from the input predicted value and the decoded
+ // corrections. Values out of the bounds of the input values are unwrapped.
+ inline void ComputeOriginalValue(const DataTypeT *predicted_vals,
+ const CorrTypeT *corr_vals,
+ DataTypeT *out_original_vals) const {
+ // For now we assume both |DataTypeT| and |CorrTypeT| are equal.
+ static_assert(std::is_same<DataTypeT, CorrTypeT>::value,
+ "Predictions and corrections must have the same type.");
+
+ // The only valid implementation right now is for int32_t.
+ static_assert(std::is_same<DataTypeT, int32_t>::value,
+ "Only int32_t is supported for predicted values.");
+
+ predicted_vals = this->ClampPredictedValue(predicted_vals);
+
+ // Perform the wrapping using unsigned coordinates to avoid potential signed
+ // integer overflows caused by malformed input.
+ const uint32_t *const uint_predicted_vals =
+ reinterpret_cast<const uint32_t *>(predicted_vals);
+ const uint32_t *const uint_corr_vals =
+ reinterpret_cast<const uint32_t *>(corr_vals);
+ for (int i = 0; i < this->num_components(); ++i) {
+ out_original_vals[i] =
+ static_cast<DataTypeT>(uint_predicted_vals[i] + uint_corr_vals[i]);
+ if (out_original_vals[i] > this->max_value()) {
+ out_original_vals[i] -= this->max_dif();
+ } else if (out_original_vals[i] < this->min_value()) {
+ out_original_vals[i] += this->max_dif();
+ }
+ }
+ }
+
+ bool DecodeTransformData(DecoderBuffer *buffer) {
+ DataTypeT min_value, max_value;
+ if (!buffer->Decode(&min_value)) {
+ return false;
+ }
+ if (!buffer->Decode(&max_value)) {
+ return false;
+ }
+ if (min_value > max_value) {
+ return false;
+ }
+ this->set_min_value(min_value);
+ this->set_max_value(max_value);
+ if (!this->InitCorrectionBounds()) {
+ return false;
+ }
+ return true;
+ }
+};
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_WRAP_DECODING_TRANSFORM_H_
diff --git a/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_wrap_encoding_transform.h b/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_wrap_encoding_transform.h
new file mode 100644
index 0000000..1f5e8b1
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_wrap_encoding_transform.h
@@ -0,0 +1,81 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_WRAP_ENCODING_TRANSFORM_H_
+#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_WRAP_ENCODING_TRANSFORM_H_
+
+#include "draco/compression/attributes/prediction_schemes/prediction_scheme_wrap_transform_base.h"
+#include "draco/core/encoder_buffer.h"
+
+namespace draco {
+
+// PredictionSchemeWrapEncodingTransform wraps input values using the wrapping
+// scheme described in: prediction_scheme_wrap_transform_base.h .
+template <typename DataTypeT, typename CorrTypeT = DataTypeT>
+class PredictionSchemeWrapEncodingTransform
+ : public PredictionSchemeWrapTransformBase<DataTypeT> {
+ public:
+ typedef CorrTypeT CorrType;
+ PredictionSchemeWrapEncodingTransform() {}
+
+ void Init(const DataTypeT *orig_data, int size, int num_components) {
+ PredictionSchemeWrapTransformBase<DataTypeT>::Init(num_components);
+ // Go over the original values and compute the bounds.
+ if (size == 0) {
+ return;
+ }
+ DataTypeT min_value = orig_data[0];
+ DataTypeT max_value = min_value;
+ for (int i = 1; i < size; ++i) {
+ if (orig_data[i] < min_value) {
+ min_value = orig_data[i];
+ } else if (orig_data[i] > max_value) {
+ max_value = orig_data[i];
+ }
+ }
+ this->set_min_value(min_value);
+ this->set_max_value(max_value);
+ this->InitCorrectionBounds();
+ }
+
+ // Computes the corrections based on the input original value and the
+ // predicted value. Out of bound correction values are wrapped around the max
+ // range of input values.
+ inline void ComputeCorrection(const DataTypeT *original_vals,
+ const DataTypeT *predicted_vals,
+ CorrTypeT *out_corr_vals) const {
+ for (int i = 0; i < this->num_components(); ++i) {
+ predicted_vals = this->ClampPredictedValue(predicted_vals);
+ out_corr_vals[i] = original_vals[i] - predicted_vals[i];
+ // Wrap around if needed.
+ DataTypeT &corr_val = out_corr_vals[i];
+ if (corr_val < this->min_correction()) {
+ corr_val += this->max_dif();
+ } else if (corr_val > this->max_correction()) {
+ corr_val -= this->max_dif();
+ }
+ }
+ }
+
+ bool EncodeTransformData(EncoderBuffer *buffer) {
+ // Store the input value range as it is needed by the decoder.
+ buffer->Encode(this->min_value());
+ buffer->Encode(this->max_value());
+ return true;
+ }
+};
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_WRAP_ENCODING_TRANSFORM_H_
diff --git a/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_wrap_transform_base.h b/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_wrap_transform_base.h
new file mode 100644
index 0000000..26f61fb
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_wrap_transform_base.h
@@ -0,0 +1,120 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_WRAP_TRANSFORM_BASE_H_
+#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_WRAP_TRANSFORM_BASE_H_
+
+#include <limits>
+#include <vector>
+
+#include "draco/compression/config/compression_shared.h"
+#include "draco/core/macros.h"
+
+namespace draco {
+
+// PredictionSchemeWrapTransform uses the min and max bounds of the original
+// data to wrap stored correction values around these bounds centered at 0,
+// i.e., when the range of the original values O is between <MIN, MAX> and
+// N = MAX-MIN, we can then store any correction X = O - P, as:
+// X + N, if X < -N / 2
+// X - N, if X > N / 2
+// X otherwise
+// To unwrap this value, the decoder then simply checks whether the final
+// corrected value F = P + X is out of the bounds of the input values.
+// All out of bounds values are unwrapped using
+// F + N, if F < MIN
+// F - N, if F > MAX
+// This wrapping can reduce the number of unique values, which translates to a
+// better entropy of the stored values and better compression rates.
+template <typename DataTypeT>
+class PredictionSchemeWrapTransformBase {
+ public:
+ PredictionSchemeWrapTransformBase()
+ : num_components_(0),
+ min_value_(0),
+ max_value_(0),
+ max_dif_(0),
+ max_correction_(0),
+ min_correction_(0) {}
+
+ static constexpr PredictionSchemeTransformType GetType() {
+ return PREDICTION_TRANSFORM_WRAP;
+ }
+
+ void Init(int num_components) {
+ num_components_ = num_components;
+ clamped_value_.resize(num_components);
+ }
+
+ bool AreCorrectionsPositive() const { return false; }
+
+ inline const DataTypeT *ClampPredictedValue(
+ const DataTypeT *predicted_val) const {
+ for (int i = 0; i < this->num_components(); ++i) {
+ if (predicted_val[i] > max_value_) {
+ clamped_value_[i] = max_value_;
+ } else if (predicted_val[i] < min_value_) {
+ clamped_value_[i] = min_value_;
+ } else {
+ clamped_value_[i] = predicted_val[i];
+ }
+ }
+ return &clamped_value_[0];
+ }
+
+ // TODO(hemmer): Consider refactoring to avoid this dummy.
+ int quantization_bits() const {
+ DRACO_DCHECK(false);
+ return -1;
+ }
+
+ protected:
+ bool InitCorrectionBounds() {
+ const int64_t dif =
+ static_cast<int64_t>(max_value_) - static_cast<int64_t>(min_value_);
+ if (dif < 0 || dif >= std::numeric_limits<DataTypeT>::max()) {
+ return false;
+ }
+ max_dif_ = 1 + static_cast<DataTypeT>(dif);
+ max_correction_ = max_dif_ / 2;
+ min_correction_ = -max_correction_;
+ if ((max_dif_ & 1) == 0) {
+ max_correction_ -= 1;
+ }
+ return true;
+ }
+
+ inline int num_components() const { return num_components_; }
+ inline DataTypeT min_value() const { return min_value_; }
+ inline void set_min_value(const DataTypeT &v) { min_value_ = v; }
+ inline DataTypeT max_value() const { return max_value_; }
+ inline void set_max_value(const DataTypeT &v) { max_value_ = v; }
+ inline DataTypeT max_dif() const { return max_dif_; }
+ inline DataTypeT min_correction() const { return min_correction_; }
+ inline DataTypeT max_correction() const { return max_correction_; }
+
+ private:
+ int num_components_;
+ DataTypeT min_value_;
+ DataTypeT max_value_;
+ DataTypeT max_dif_;
+ DataTypeT max_correction_;
+ DataTypeT min_correction_;
+ // This is in fact just a tmp variable to avoid reallocation.
+ mutable std::vector<DataTypeT> clamped_value_;
+};
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_WRAP_TRANSFORM_BASE_H_
diff --git a/libs/assimp/contrib/draco/src/draco/compression/attributes/sequential_attribute_decoder.cc b/libs/assimp/contrib/draco/src/draco/compression/attributes/sequential_attribute_decoder.cc
new file mode 100644
index 0000000..b4ba24f
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/attributes/sequential_attribute_decoder.cc
@@ -0,0 +1,118 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/compression/attributes/sequential_attribute_decoder.h"
+
+namespace draco {
+
+SequentialAttributeDecoder::SequentialAttributeDecoder()
+ : decoder_(nullptr), attribute_(nullptr), attribute_id_(-1) {}
+
+bool SequentialAttributeDecoder::Init(PointCloudDecoder *decoder,
+ int attribute_id) {
+ decoder_ = decoder;
+ attribute_ = decoder->point_cloud()->attribute(attribute_id);
+ attribute_id_ = attribute_id;
+ return true;
+}
+
+bool SequentialAttributeDecoder::InitializeStandalone(
+ PointAttribute *attribute) {
+ attribute_ = attribute;
+ attribute_id_ = -1;
+ return true;
+}
+
+bool SequentialAttributeDecoder::DecodePortableAttribute(
+ const std::vector<PointIndex> &point_ids, DecoderBuffer *in_buffer) {
+ if (attribute_->num_components() <= 0 ||
+ !attribute_->Reset(point_ids.size())) {
+ return false;
+ }
+ if (!DecodeValues(point_ids, in_buffer)) {
+ return false;
+ }
+ return true;
+}
+
+bool SequentialAttributeDecoder::DecodeDataNeededByPortableTransform(
+ const std::vector<PointIndex> &point_ids, DecoderBuffer *in_buffer) {
+ // Default implementation does not apply any transform.
+ return true;
+}
+
+bool SequentialAttributeDecoder::TransformAttributeToOriginalFormat(
+ const std::vector<PointIndex> &point_ids) {
+ // Default implementation does not apply any transform.
+ return true;
+}
+
+const PointAttribute *SequentialAttributeDecoder::GetPortableAttribute() {
+ // If needed, copy point to attribute value index mapping from the final
+ // attribute to the portable attribute.
+ if (!attribute_->is_mapping_identity() && portable_attribute_ &&
+ portable_attribute_->is_mapping_identity()) {
+ portable_attribute_->SetExplicitMapping(attribute_->indices_map_size());
+ for (PointIndex i(0);
+ i < static_cast<uint32_t>(attribute_->indices_map_size()); ++i) {
+ portable_attribute_->SetPointMapEntry(i, attribute_->mapped_index(i));
+ }
+ }
+ return portable_attribute_.get();
+}
+
+bool SequentialAttributeDecoder::InitPredictionScheme(
+ PredictionSchemeInterface *ps) {
+ for (int i = 0; i < ps->GetNumParentAttributes(); ++i) {
+ const int att_id = decoder_->point_cloud()->GetNamedAttributeId(
+ ps->GetParentAttributeType(i));
+ if (att_id == -1) {
+ return false; // Requested attribute does not exist.
+ }
+#ifdef DRACO_BACKWARDS_COMPATIBILITY_SUPPORTED
+ if (decoder_->bitstream_version() < DRACO_BITSTREAM_VERSION(2, 0)) {
+ if (!ps->SetParentAttribute(decoder_->point_cloud()->attribute(att_id))) {
+ return false;
+ }
+ } else
+#endif
+ {
+ const PointAttribute *const pa = decoder_->GetPortableAttribute(att_id);
+ if (pa == nullptr || !ps->SetParentAttribute(pa)) {
+ return false;
+ }
+ }
+ }
+ return true;
+}
+
+bool SequentialAttributeDecoder::DecodeValues(
+ const std::vector<PointIndex> &point_ids, DecoderBuffer *in_buffer) {
+ const int32_t num_values = static_cast<uint32_t>(point_ids.size());
+ const int entry_size = static_cast<int>(attribute_->byte_stride());
+ std::unique_ptr<uint8_t[]> value_data_ptr(new uint8_t[entry_size]);
+ uint8_t *const value_data = value_data_ptr.get();
+ int out_byte_pos = 0;
+ // Decode raw attribute values in their original format.
+ for (int i = 0; i < num_values; ++i) {
+ if (!in_buffer->Decode(value_data, entry_size)) {
+ return false;
+ }
+ attribute_->buffer()->Write(out_byte_pos, value_data, entry_size);
+ out_byte_pos += entry_size;
+ }
+ return true;
+}
+
+} // namespace draco
diff --git a/libs/assimp/contrib/draco/src/draco/compression/attributes/sequential_attribute_decoder.h b/libs/assimp/contrib/draco/src/draco/compression/attributes/sequential_attribute_decoder.h
new file mode 100644
index 0000000..d481194
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/attributes/sequential_attribute_decoder.h
@@ -0,0 +1,86 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_COMPRESSION_ATTRIBUTES_SEQUENTIAL_ATTRIBUTE_DECODER_H_
+#define DRACO_COMPRESSION_ATTRIBUTES_SEQUENTIAL_ATTRIBUTE_DECODER_H_
+
+#include "draco/compression/attributes/prediction_schemes/prediction_scheme_interface.h"
+#include "draco/compression/point_cloud/point_cloud_decoder.h"
+#include "draco/draco_features.h"
+
+namespace draco {
+
+// A base class for decoding attribute values encoded by the
+// SequentialAttributeEncoder.
+class SequentialAttributeDecoder {
+ public:
+ SequentialAttributeDecoder();
+ virtual ~SequentialAttributeDecoder() = default;
+
+ virtual bool Init(PointCloudDecoder *decoder, int attribute_id);
+
+ // Initialization for a specific attribute. This can be used mostly for
+ // standalone decoding of an attribute without an PointCloudDecoder.
+ virtual bool InitializeStandalone(PointAttribute *attribute);
+
+ // Performs lossless decoding of the portable attribute data.
+ virtual bool DecodePortableAttribute(const std::vector<PointIndex> &point_ids,
+ DecoderBuffer *in_buffer);
+
+ // Decodes any data needed to revert portable transform of the decoded
+ // attribute.
+ virtual bool DecodeDataNeededByPortableTransform(
+ const std::vector<PointIndex> &point_ids, DecoderBuffer *in_buffer);
+
+ // Reverts transformation performed by encoder in
+ // SequentialAttributeEncoder::TransformAttributeToPortableFormat() method.
+ virtual bool TransformAttributeToOriginalFormat(
+ const std::vector<PointIndex> &point_ids);
+
+ const PointAttribute *GetPortableAttribute();
+
+ const PointAttribute *attribute() const { return attribute_; }
+ PointAttribute *attribute() { return attribute_; }
+ int attribute_id() const { return attribute_id_; }
+ PointCloudDecoder *decoder() const { return decoder_; }
+
+ protected:
+ // Should be used to initialize newly created prediction scheme.
+ // Returns false when the initialization failed (in which case the scheme
+ // cannot be used).
+ virtual bool InitPredictionScheme(PredictionSchemeInterface *ps);
+
+ // The actual implementation of the attribute decoding. Should be overridden
+ // for specialized decoders.
+ virtual bool DecodeValues(const std::vector<PointIndex> &point_ids,
+ DecoderBuffer *in_buffer);
+
+ void SetPortableAttribute(std::unique_ptr<PointAttribute> att) {
+ portable_attribute_ = std::move(att);
+ }
+
+ PointAttribute *portable_attribute() { return portable_attribute_.get(); }
+
+ private:
+ PointCloudDecoder *decoder_;
+ PointAttribute *attribute_;
+ int attribute_id_;
+
+ // Storage for decoded portable attribute (after lossless decoding).
+ std::unique_ptr<PointAttribute> portable_attribute_;
+};
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_ATTRIBUTES_SEQUENTIAL_ATTRIBUTE_DECODER_H_
diff --git a/libs/assimp/contrib/draco/src/draco/compression/attributes/sequential_attribute_decoders_controller.cc b/libs/assimp/contrib/draco/src/draco/compression/attributes/sequential_attribute_decoders_controller.cc
new file mode 100644
index 0000000..0e5e26b
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/attributes/sequential_attribute_decoders_controller.cc
@@ -0,0 +1,149 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/compression/attributes/sequential_attribute_decoders_controller.h"
+#ifdef DRACO_NORMAL_ENCODING_SUPPORTED
+#include "draco/compression/attributes/sequential_normal_attribute_decoder.h"
+#endif
+#include "draco/compression/attributes/sequential_quantization_attribute_decoder.h"
+#include "draco/compression/config/compression_shared.h"
+
+namespace draco {
+
+SequentialAttributeDecodersController::SequentialAttributeDecodersController(
+ std::unique_ptr<PointsSequencer> sequencer)
+ : sequencer_(std::move(sequencer)) {}
+
+bool SequentialAttributeDecodersController::DecodeAttributesDecoderData(
+ DecoderBuffer *buffer) {
+ if (!AttributesDecoder::DecodeAttributesDecoderData(buffer)) {
+ return false;
+ }
+ // Decode unique ids of all sequential encoders and create them.
+ const int32_t num_attributes = GetNumAttributes();
+ sequential_decoders_.resize(num_attributes);
+ for (int i = 0; i < num_attributes; ++i) {
+ uint8_t decoder_type;
+ if (!buffer->Decode(&decoder_type)) {
+ return false;
+ }
+ // Create the decoder from the id.
+ sequential_decoders_[i] = CreateSequentialDecoder(decoder_type);
+ if (!sequential_decoders_[i]) {
+ return false;
+ }
+ if (!sequential_decoders_[i]->Init(GetDecoder(), GetAttributeId(i))) {
+ return false;
+ }
+ }
+ return true;
+}
+
+bool SequentialAttributeDecodersController::DecodeAttributes(
+ DecoderBuffer *buffer) {
+ if (!sequencer_ || !sequencer_->GenerateSequence(&point_ids_)) {
+ return false;
+ }
+ // Initialize point to attribute value mapping for all decoded attributes.
+ const int32_t num_attributes = GetNumAttributes();
+ for (int i = 0; i < num_attributes; ++i) {
+ PointAttribute *const pa =
+ GetDecoder()->point_cloud()->attribute(GetAttributeId(i));
+ if (!sequencer_->UpdatePointToAttributeIndexMapping(pa)) {
+ return false;
+ }
+ }
+ return AttributesDecoder::DecodeAttributes(buffer);
+}
+
+bool SequentialAttributeDecodersController::DecodePortableAttributes(
+ DecoderBuffer *in_buffer) {
+ const int32_t num_attributes = GetNumAttributes();
+ for (int i = 0; i < num_attributes; ++i) {
+ if (!sequential_decoders_[i]->DecodePortableAttribute(point_ids_,
+ in_buffer)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+bool SequentialAttributeDecodersController::
+ DecodeDataNeededByPortableTransforms(DecoderBuffer *in_buffer) {
+ const int32_t num_attributes = GetNumAttributes();
+ for (int i = 0; i < num_attributes; ++i) {
+ if (!sequential_decoders_[i]->DecodeDataNeededByPortableTransform(
+ point_ids_, in_buffer)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+bool SequentialAttributeDecodersController::
+ TransformAttributesToOriginalFormat() {
+ const int32_t num_attributes = GetNumAttributes();
+ for (int i = 0; i < num_attributes; ++i) {
+ // Check whether the attribute transform should be skipped.
+ if (GetDecoder()->options()) {
+ const PointAttribute *const attribute =
+ sequential_decoders_[i]->attribute();
+ const PointAttribute *const portable_attribute =
+ sequential_decoders_[i]->GetPortableAttribute();
+ if (portable_attribute &&
+ GetDecoder()->options()->GetAttributeBool(
+ attribute->attribute_type(), "skip_attribute_transform", false)) {
+ // Attribute transform should not be performed. In this case, we replace
+ // the output geometry attribute with the portable attribute.
+ // TODO(ostava): We can potentially avoid this copy by introducing a new
+ // mechanism that would allow to use the final attributes as portable
+ // attributes for predictors that may need them.
+ sequential_decoders_[i]->attribute()->CopyFrom(*portable_attribute);
+ continue;
+ }
+ }
+ if (!sequential_decoders_[i]->TransformAttributeToOriginalFormat(
+ point_ids_)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+std::unique_ptr<SequentialAttributeDecoder>
+SequentialAttributeDecodersController::CreateSequentialDecoder(
+ uint8_t decoder_type) {
+ switch (decoder_type) {
+ case SEQUENTIAL_ATTRIBUTE_ENCODER_GENERIC:
+ return std::unique_ptr<SequentialAttributeDecoder>(
+ new SequentialAttributeDecoder());
+ case SEQUENTIAL_ATTRIBUTE_ENCODER_INTEGER:
+ return std::unique_ptr<SequentialAttributeDecoder>(
+ new SequentialIntegerAttributeDecoder());
+ case SEQUENTIAL_ATTRIBUTE_ENCODER_QUANTIZATION:
+ return std::unique_ptr<SequentialAttributeDecoder>(
+ new SequentialQuantizationAttributeDecoder());
+#ifdef DRACO_NORMAL_ENCODING_SUPPORTED
+ case SEQUENTIAL_ATTRIBUTE_ENCODER_NORMALS:
+ return std::unique_ptr<SequentialNormalAttributeDecoder>(
+ new SequentialNormalAttributeDecoder());
+#endif
+ default:
+ break;
+ }
+ // Unknown or unsupported decoder type.
+ return nullptr;
+}
+
+} // namespace draco
diff --git a/libs/assimp/contrib/draco/src/draco/compression/attributes/sequential_attribute_decoders_controller.h b/libs/assimp/contrib/draco/src/draco/compression/attributes/sequential_attribute_decoders_controller.h
new file mode 100644
index 0000000..abc1f36
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/attributes/sequential_attribute_decoders_controller.h
@@ -0,0 +1,61 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_COMPRESSION_ATTRIBUTES_SEQUENTIAL_ATTRIBUTE_DECODERS_CONTROLLER_H_
+#define DRACO_COMPRESSION_ATTRIBUTES_SEQUENTIAL_ATTRIBUTE_DECODERS_CONTROLLER_H_
+
+#include "draco/compression/attributes/attributes_decoder.h"
+#include "draco/compression/attributes/points_sequencer.h"
+#include "draco/compression/attributes/sequential_attribute_decoder.h"
+
+namespace draco {
+
+// A basic implementation of an attribute decoder that decodes data encoded by
+// the SequentialAttributeEncodersController class. The
+// SequentialAttributeDecodersController creates a single
+// AttributeIndexedValuesDecoder for each of the decoded attribute, where the
+// type of the values decoder is determined by the unique identifier that was
+// encoded by the encoder.
+class SequentialAttributeDecodersController : public AttributesDecoder {
+ public:
+ explicit SequentialAttributeDecodersController(
+ std::unique_ptr<PointsSequencer> sequencer);
+
+ bool DecodeAttributesDecoderData(DecoderBuffer *buffer) override;
+ bool DecodeAttributes(DecoderBuffer *buffer) override;
+ const PointAttribute *GetPortableAttribute(
+ int32_t point_attribute_id) override {
+ const int32_t loc_id = GetLocalIdForPointAttribute(point_attribute_id);
+ if (loc_id < 0) {
+ return nullptr;
+ }
+ return sequential_decoders_[loc_id]->GetPortableAttribute();
+ }
+
+ protected:
+ bool DecodePortableAttributes(DecoderBuffer *in_buffer) override;
+ bool DecodeDataNeededByPortableTransforms(DecoderBuffer *in_buffer) override;
+ bool TransformAttributesToOriginalFormat() override;
+ virtual std::unique_ptr<SequentialAttributeDecoder> CreateSequentialDecoder(
+ uint8_t decoder_type);
+
+ private:
+ std::vector<std::unique_ptr<SequentialAttributeDecoder>> sequential_decoders_;
+ std::vector<PointIndex> point_ids_;
+ std::unique_ptr<PointsSequencer> sequencer_;
+};
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_ATTRIBUTES_SEQUENTIAL_ATTRIBUTE_DECODERS_CONTROLLER_H_
diff --git a/libs/assimp/contrib/draco/src/draco/compression/attributes/sequential_attribute_encoder.cc b/libs/assimp/contrib/draco/src/draco/compression/attributes/sequential_attribute_encoder.cc
new file mode 100644
index 0000000..6bde3ee
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/attributes/sequential_attribute_encoder.cc
@@ -0,0 +1,108 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/compression/attributes/sequential_attribute_encoder.h"
+
+namespace draco {
+
+SequentialAttributeEncoder::SequentialAttributeEncoder()
+ : encoder_(nullptr),
+ attribute_(nullptr),
+ attribute_id_(-1),
+ is_parent_encoder_(false) {}
+
+bool SequentialAttributeEncoder::Init(PointCloudEncoder *encoder,
+ int attribute_id) {
+ encoder_ = encoder;
+ attribute_ = encoder_->point_cloud()->attribute(attribute_id);
+ attribute_id_ = attribute_id;
+ return true;
+}
+
+bool SequentialAttributeEncoder::InitializeStandalone(
+ PointAttribute *attribute) {
+ attribute_ = attribute;
+ attribute_id_ = -1;
+ return true;
+}
+
+bool SequentialAttributeEncoder::TransformAttributeToPortableFormat(
+ const std::vector<PointIndex> &point_ids) {
+ // Default implementation doesn't transform the input data.
+ return true;
+}
+
+bool SequentialAttributeEncoder::EncodePortableAttribute(
+ const std::vector<PointIndex> &point_ids, EncoderBuffer *out_buffer) {
+ // Lossless encoding of the input values.
+ if (!EncodeValues(point_ids, out_buffer)) {
+ return false;
+ }
+ return true;
+}
+
+bool SequentialAttributeEncoder::EncodeDataNeededByPortableTransform(
+ EncoderBuffer *out_buffer) {
+ // Default implementation doesn't transform the input data.
+ return true;
+}
+
+bool SequentialAttributeEncoder::EncodeValues(
+ const std::vector<PointIndex> &point_ids, EncoderBuffer *out_buffer) {
+ const int entry_size = static_cast<int>(attribute_->byte_stride());
+ const std::unique_ptr<uint8_t[]> value_data_ptr(new uint8_t[entry_size]);
+ uint8_t *const value_data = value_data_ptr.get();
+ // Encode all attribute values in their native raw format.
+ for (uint32_t i = 0; i < point_ids.size(); ++i) {
+ const AttributeValueIndex entry_id = attribute_->mapped_index(point_ids[i]);
+ attribute_->GetValue(entry_id, value_data);
+ out_buffer->Encode(value_data, entry_size);
+ }
+ return true;
+}
+
+void SequentialAttributeEncoder::MarkParentAttribute() {
+ is_parent_encoder_ = true;
+}
+
+bool SequentialAttributeEncoder::InitPredictionScheme(
+ PredictionSchemeInterface *ps) {
+ for (int i = 0; i < ps->GetNumParentAttributes(); ++i) {
+ const int att_id = encoder_->point_cloud()->GetNamedAttributeId(
+ ps->GetParentAttributeType(i));
+ if (att_id == -1) {
+ return false; // Requested attribute does not exist.
+ }
+ parent_attributes_.push_back(att_id);
+ encoder_->MarkParentAttribute(att_id);
+ }
+ return true;
+}
+
+bool SequentialAttributeEncoder::SetPredictionSchemeParentAttributes(
+ PredictionSchemeInterface *ps) {
+ for (int i = 0; i < ps->GetNumParentAttributes(); ++i) {
+ const int att_id = encoder_->point_cloud()->GetNamedAttributeId(
+ ps->GetParentAttributeType(i));
+ if (att_id == -1) {
+ return false; // Requested attribute does not exist.
+ }
+ if (!ps->SetParentAttribute(encoder_->GetPortableAttribute(att_id))) {
+ return false;
+ }
+ }
+ return true;
+}
+
+} // namespace draco
diff --git a/libs/assimp/contrib/draco/src/draco/compression/attributes/sequential_attribute_encoder.h b/libs/assimp/contrib/draco/src/draco/compression/attributes/sequential_attribute_encoder.h
new file mode 100644
index 0000000..00f62db
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/attributes/sequential_attribute_encoder.h
@@ -0,0 +1,134 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_COMPRESSION_ATTRIBUTES_SEQUENTIAL_ATTRIBUTE_ENCODER_H_
+#define DRACO_COMPRESSION_ATTRIBUTES_SEQUENTIAL_ATTRIBUTE_ENCODER_H_
+
+#include "draco/compression/attributes/prediction_schemes/prediction_scheme_interface.h"
+#include "draco/compression/point_cloud/point_cloud_encoder.h"
+
+namespace draco {
+
+// A base class for encoding attribute values of a single attribute using a
+// given sequence of point ids. The default implementation encodes all attribute
+// values directly to the buffer but derived classes can perform any custom
+// encoding (such as quantization) by overriding the EncodeValues() method.
+class SequentialAttributeEncoder {
+ public:
+ SequentialAttributeEncoder();
+ virtual ~SequentialAttributeEncoder() = default;
+
+ // Method that can be used for custom initialization of an attribute encoder,
+ // such as creation of prediction schemes and initialization of attribute
+ // encoder dependencies.
+ // |encoder| is the parent PointCloudEncoder,
+ // |attribute_id| is the id of the attribute that is being encoded by this
+ // encoder.
+ // This method is automatically called by the PointCloudEncoder after all
+ // attribute encoders are created and it should not be called explicitly from
+ // other places.
+ virtual bool Init(PointCloudEncoder *encoder, int attribute_id);
+
+ // Initialization for a specific attribute. This can be used mostly for
+ // standalone encoding of an attribute without an PointCloudEncoder.
+ virtual bool InitializeStandalone(PointAttribute *attribute);
+
+ // Transforms attribute data into format that is going to be encoded
+ // losslessly. The transform itself can be lossy.
+ virtual bool TransformAttributeToPortableFormat(
+ const std::vector<PointIndex> &point_ids);
+
+ // Performs lossless encoding of the transformed attribute data.
+ virtual bool EncodePortableAttribute(const std::vector<PointIndex> &point_ids,
+ EncoderBuffer *out_buffer);
+
+ // Encodes any data related to the portable attribute transform.
+ virtual bool EncodeDataNeededByPortableTransform(EncoderBuffer *out_buffer);
+
+ virtual bool IsLossyEncoder() const { return false; }
+
+ int NumParentAttributes() const {
+ return static_cast<int>(parent_attributes_.size());
+ }
+ int GetParentAttributeId(int i) const { return parent_attributes_[i]; }
+
+ const PointAttribute *GetPortableAttribute() const {
+ if (portable_attribute_ != nullptr) {
+ return portable_attribute_.get();
+ }
+ return attribute();
+ }
+
+ // Called when this attribute encoder becomes a parent encoder of another
+ // encoder.
+ void MarkParentAttribute();
+
+ virtual uint8_t GetUniqueId() const {
+ return SEQUENTIAL_ATTRIBUTE_ENCODER_GENERIC;
+ }
+
+ const PointAttribute *attribute() const { return attribute_; }
+ int attribute_id() const { return attribute_id_; }
+ PointCloudEncoder *encoder() const { return encoder_; }
+
+ protected:
+ // Should be used to initialize newly created prediction scheme.
+ // Returns false when the initialization failed (in which case the scheme
+ // cannot be used).
+ virtual bool InitPredictionScheme(PredictionSchemeInterface *ps);
+
+ // Sets parent attributes for a given prediction scheme. Must be called
+ // after all prediction schemes are initialized, but before the prediction
+ // scheme is used.
+ virtual bool SetPredictionSchemeParentAttributes(
+ PredictionSchemeInterface *ps);
+
+ // Encodes all attribute values in the specified order. Should be overridden
+ // for specialized encoders.
+ virtual bool EncodeValues(const std::vector<PointIndex> &point_ids,
+ EncoderBuffer *out_buffer);
+
+ bool is_parent_encoder() const { return is_parent_encoder_; }
+
+ void SetPortableAttribute(std::unique_ptr<PointAttribute> att) {
+ portable_attribute_ = std::move(att);
+ }
+
+ // Returns a mutable attribute that should be filled by derived encoders with
+ // the transformed version of the attribute data. To get a public const
+ // version, use the GetPortableAttribute() method.
+ PointAttribute *portable_attribute() { return portable_attribute_.get(); }
+
+ private:
+ PointCloudEncoder *encoder_;
+ const PointAttribute *attribute_;
+ int attribute_id_;
+
+ // List of attribute encoders that need to be encoded before this attribute.
+ // E.g. The parent attributes may be used to predict values used by this
+ // attribute encoder.
+ std::vector<int32_t> parent_attributes_;
+
+ bool is_parent_encoder_;
+
+ // Attribute that stores transformed data from the source attribute after it
+ // is processed through the ApplyTransform() method. Attribute data stored
+ // within this attribute is guaranteed to be encoded losslessly and it can be
+ // safely used for prediction of other attributes.
+ std::unique_ptr<PointAttribute> portable_attribute_;
+};
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_ATTRIBUTES_SEQUENTIAL_ATTRIBUTE_ENCODER_H_
diff --git a/libs/assimp/contrib/draco/src/draco/compression/attributes/sequential_attribute_encoders_controller.cc b/libs/assimp/contrib/draco/src/draco/compression/attributes/sequential_attribute_encoders_controller.cc
new file mode 100644
index 0000000..7d5d1ee
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/attributes/sequential_attribute_encoders_controller.cc
@@ -0,0 +1,159 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/compression/attributes/sequential_attribute_encoders_controller.h"
+#ifdef DRACO_NORMAL_ENCODING_SUPPORTED
+#include "draco/compression/attributes/sequential_normal_attribute_encoder.h"
+#endif
+#include "draco/compression/attributes/sequential_quantization_attribute_encoder.h"
+#include "draco/compression/point_cloud/point_cloud_encoder.h"
+
+namespace draco {
+
+SequentialAttributeEncodersController::SequentialAttributeEncodersController(
+ std::unique_ptr<PointsSequencer> sequencer)
+ : sequencer_(std::move(sequencer)) {}
+
+SequentialAttributeEncodersController::SequentialAttributeEncodersController(
+ std::unique_ptr<PointsSequencer> sequencer, int point_attrib_id)
+ : AttributesEncoder(point_attrib_id), sequencer_(std::move(sequencer)) {}
+
+bool SequentialAttributeEncodersController::Init(PointCloudEncoder *encoder,
+ const PointCloud *pc) {
+ if (!AttributesEncoder::Init(encoder, pc)) {
+ return false;
+ }
+ if (!CreateSequentialEncoders()) {
+ return false;
+ }
+ // Initialize all value encoders.
+ for (uint32_t i = 0; i < num_attributes(); ++i) {
+ const int32_t att_id = GetAttributeId(i);
+ if (!sequential_encoders_[i]->Init(encoder, att_id)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+bool SequentialAttributeEncodersController::EncodeAttributesEncoderData(
+ EncoderBuffer *out_buffer) {
+ if (!AttributesEncoder::EncodeAttributesEncoderData(out_buffer)) {
+ return false;
+ }
+ // Encode a unique id of every sequential encoder.
+ for (uint32_t i = 0; i < sequential_encoders_.size(); ++i) {
+ out_buffer->Encode(sequential_encoders_[i]->GetUniqueId());
+ }
+ return true;
+}
+
+bool SequentialAttributeEncodersController::EncodeAttributes(
+ EncoderBuffer *buffer) {
+ if (!sequencer_ || !sequencer_->GenerateSequence(&point_ids_)) {
+ return false;
+ }
+ return AttributesEncoder::EncodeAttributes(buffer);
+}
+
+bool SequentialAttributeEncodersController::
+ TransformAttributesToPortableFormat() {
+ for (uint32_t i = 0; i < sequential_encoders_.size(); ++i) {
+ if (!sequential_encoders_[i]->TransformAttributeToPortableFormat(
+ point_ids_)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+bool SequentialAttributeEncodersController::EncodePortableAttributes(
+ EncoderBuffer *out_buffer) {
+ for (uint32_t i = 0; i < sequential_encoders_.size(); ++i) {
+ if (!sequential_encoders_[i]->EncodePortableAttribute(point_ids_,
+ out_buffer)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+bool SequentialAttributeEncodersController::
+ EncodeDataNeededByPortableTransforms(EncoderBuffer *out_buffer) {
+ for (uint32_t i = 0; i < sequential_encoders_.size(); ++i) {
+ if (!sequential_encoders_[i]->EncodeDataNeededByPortableTransform(
+ out_buffer)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+bool SequentialAttributeEncodersController::CreateSequentialEncoders() {
+ sequential_encoders_.resize(num_attributes());
+ for (uint32_t i = 0; i < num_attributes(); ++i) {
+ sequential_encoders_[i] = CreateSequentialEncoder(i);
+ if (sequential_encoders_[i] == nullptr) {
+ return false;
+ }
+ if (i < sequential_encoder_marked_as_parent_.size()) {
+ if (sequential_encoder_marked_as_parent_[i]) {
+ sequential_encoders_[i]->MarkParentAttribute();
+ }
+ }
+ }
+ return true;
+}
+
+std::unique_ptr<SequentialAttributeEncoder>
+SequentialAttributeEncodersController::CreateSequentialEncoder(int i) {
+ const int32_t att_id = GetAttributeId(i);
+ const PointAttribute *const att = encoder()->point_cloud()->attribute(att_id);
+
+ switch (att->data_type()) {
+ case DT_UINT8:
+ case DT_INT8:
+ case DT_UINT16:
+ case DT_INT16:
+ case DT_UINT32:
+ case DT_INT32:
+ return std::unique_ptr<SequentialAttributeEncoder>(
+ new SequentialIntegerAttributeEncoder());
+ case DT_FLOAT32:
+ if (encoder()->options()->GetAttributeInt(att_id, "quantization_bits",
+ -1) > 0) {
+#ifdef DRACO_NORMAL_ENCODING_SUPPORTED
+ if (att->attribute_type() == GeometryAttribute::NORMAL) {
+ // We currently only support normals with float coordinates
+ // and must be quantized.
+ return std::unique_ptr<SequentialAttributeEncoder>(
+ new SequentialNormalAttributeEncoder());
+ } else {
+#endif
+ return std::unique_ptr<SequentialAttributeEncoder>(
+ new SequentialQuantizationAttributeEncoder());
+#ifdef DRACO_NORMAL_ENCODING_SUPPORTED
+ }
+#endif
+ }
+ break;
+ default:
+ break;
+ }
+ // Return the default attribute encoder.
+ return std::unique_ptr<SequentialAttributeEncoder>(
+ new SequentialAttributeEncoder());
+}
+
+} // namespace draco
diff --git a/libs/assimp/contrib/draco/src/draco/compression/attributes/sequential_attribute_encoders_controller.h b/libs/assimp/contrib/draco/src/draco/compression/attributes/sequential_attribute_encoders_controller.h
new file mode 100644
index 0000000..13c2704
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/attributes/sequential_attribute_encoders_controller.h
@@ -0,0 +1,115 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_COMPRESSION_ATTRIBUTES_SEQUENTIAL_ATTRIBUTE_ENCODERS_CONTROLLER_H_
+#define DRACO_COMPRESSION_ATTRIBUTES_SEQUENTIAL_ATTRIBUTE_ENCODERS_CONTROLLER_H_
+
+#include "draco/compression/attributes/attributes_encoder.h"
+#include "draco/compression/attributes/points_sequencer.h"
+#include "draco/compression/attributes/sequential_attribute_encoder.h"
+
+namespace draco {
+
+// A basic implementation of an attribute encoder that can be used to encode
+// an arbitrary set of attributes. The encoder creates a sequential attribute
+// encoder for each encoded attribute (see sequential_attribute_encoder.h) and
+// then it encodes all attribute values in an order defined by a point sequence
+// generated in the GeneratePointSequence() method. The default implementation
+// generates a linear sequence of all points, but derived classes can generate
+// any custom sequence.
+class SequentialAttributeEncodersController : public AttributesEncoder {
+ public:
+ explicit SequentialAttributeEncodersController(
+ std::unique_ptr<PointsSequencer> sequencer);
+ SequentialAttributeEncodersController(
+ std::unique_ptr<PointsSequencer> sequencer, int point_attrib_id);
+
+ bool Init(PointCloudEncoder *encoder, const PointCloud *pc) override;
+ bool EncodeAttributesEncoderData(EncoderBuffer *out_buffer) override;
+ bool EncodeAttributes(EncoderBuffer *buffer) override;
+ uint8_t GetUniqueId() const override { return BASIC_ATTRIBUTE_ENCODER; }
+
+ int NumParentAttributes(int32_t point_attribute_id) const override {
+ const int32_t loc_id = GetLocalIdForPointAttribute(point_attribute_id);
+ if (loc_id < 0) {
+ return 0;
+ }
+ return sequential_encoders_[loc_id]->NumParentAttributes();
+ }
+
+ int GetParentAttributeId(int32_t point_attribute_id,
+ int32_t parent_i) const override {
+ const int32_t loc_id = GetLocalIdForPointAttribute(point_attribute_id);
+ if (loc_id < 0) {
+ return -1;
+ }
+ return sequential_encoders_[loc_id]->GetParentAttributeId(parent_i);
+ }
+
+ bool MarkParentAttribute(int32_t point_attribute_id) override {
+ const int32_t loc_id = GetLocalIdForPointAttribute(point_attribute_id);
+ if (loc_id < 0) {
+ return false;
+ }
+ // Mark the attribute encoder as parent (even when if it is not created
+ // yet).
+ if (sequential_encoder_marked_as_parent_.size() <= loc_id) {
+ sequential_encoder_marked_as_parent_.resize(loc_id + 1, false);
+ }
+ sequential_encoder_marked_as_parent_[loc_id] = true;
+
+ if (sequential_encoders_.size() <= loc_id) {
+ return true; // Sequential encoders not generated yet.
+ }
+ sequential_encoders_[loc_id]->MarkParentAttribute();
+ return true;
+ }
+
+ const PointAttribute *GetPortableAttribute(
+ int32_t point_attribute_id) override {
+ const int32_t loc_id = GetLocalIdForPointAttribute(point_attribute_id);
+ if (loc_id < 0) {
+ return nullptr;
+ }
+ return sequential_encoders_[loc_id]->GetPortableAttribute();
+ }
+
+ protected:
+ bool TransformAttributesToPortableFormat() override;
+ bool EncodePortableAttributes(EncoderBuffer *out_buffer) override;
+ bool EncodeDataNeededByPortableTransforms(EncoderBuffer *out_buffer) override;
+
+ // Creates all sequential encoders (one for each attribute associated with the
+ // encoder).
+ virtual bool CreateSequentialEncoders();
+
+ // Create a sequential encoder for a given attribute based on the attribute
+ // type
+ // and the provided encoder options.
+ virtual std::unique_ptr<SequentialAttributeEncoder> CreateSequentialEncoder(
+ int i);
+
+ private:
+ std::vector<std::unique_ptr<SequentialAttributeEncoder>> sequential_encoders_;
+
+ // Flag for each sequential attribute encoder indicating whether it was marked
+ // as parent attribute or not.
+ std::vector<bool> sequential_encoder_marked_as_parent_;
+ std::vector<PointIndex> point_ids_;
+ std::unique_ptr<PointsSequencer> sequencer_;
+};
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_ATTRIBUTES_SEQUENTIAL_ATTRIBUTE_ENCODERS_CONTROLLER_H_
diff --git a/libs/assimp/contrib/draco/src/draco/compression/attributes/sequential_integer_attribute_decoder.cc b/libs/assimp/contrib/draco/src/draco/compression/attributes/sequential_integer_attribute_decoder.cc
new file mode 100644
index 0000000..83f4212
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/attributes/sequential_integer_attribute_decoder.cc
@@ -0,0 +1,240 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/compression/attributes/sequential_integer_attribute_decoder.h"
+
+#include "draco/compression/attributes/prediction_schemes/prediction_scheme_decoder_factory.h"
+#include "draco/compression/attributes/prediction_schemes/prediction_scheme_wrap_decoding_transform.h"
+#include "draco/compression/entropy/symbol_decoding.h"
+
+namespace draco {
+
+SequentialIntegerAttributeDecoder::SequentialIntegerAttributeDecoder() {}
+
+bool SequentialIntegerAttributeDecoder::Init(PointCloudDecoder *decoder,
+ int attribute_id) {
+ if (!SequentialAttributeDecoder::Init(decoder, attribute_id)) {
+ return false;
+ }
+ return true;
+}
+
+bool SequentialIntegerAttributeDecoder::TransformAttributeToOriginalFormat(
+ const std::vector<PointIndex> &point_ids) {
+#ifdef DRACO_BACKWARDS_COMPATIBILITY_SUPPORTED
+ if (decoder() &&
+ decoder()->bitstream_version() < DRACO_BITSTREAM_VERSION(2, 0)) {
+ return true; // Don't revert the transform here for older files.
+ }
+#endif
+ return StoreValues(static_cast<uint32_t>(point_ids.size()));
+}
+
+bool SequentialIntegerAttributeDecoder::DecodeValues(
+ const std::vector<PointIndex> &point_ids, DecoderBuffer *in_buffer) {
+ // Decode prediction scheme.
+ int8_t prediction_scheme_method;
+ if (!in_buffer->Decode(&prediction_scheme_method)) {
+ return false;
+ }
+ if (prediction_scheme_method != PREDICTION_NONE) {
+ int8_t prediction_transform_type;
+ if (!in_buffer->Decode(&prediction_transform_type)) {
+ return false;
+ }
+ // Check that decoded prediction scheme transform type is valid.
+ if (prediction_transform_type < PREDICTION_TRANSFORM_NONE ||
+ prediction_transform_type >= NUM_PREDICTION_SCHEME_TRANSFORM_TYPES) {
+ return false;
+ }
+ prediction_scheme_ = CreateIntPredictionScheme(
+ static_cast<PredictionSchemeMethod>(prediction_scheme_method),
+ static_cast<PredictionSchemeTransformType>(prediction_transform_type));
+ }
+
+ if (prediction_scheme_) {
+ if (!InitPredictionScheme(prediction_scheme_.get())) {
+ return false;
+ }
+ }
+
+ if (!DecodeIntegerValues(point_ids, in_buffer)) {
+ return false;
+ }
+
+#ifdef DRACO_BACKWARDS_COMPATIBILITY_SUPPORTED
+ const int32_t num_values = static_cast<uint32_t>(point_ids.size());
+ if (decoder() &&
+ decoder()->bitstream_version() < DRACO_BITSTREAM_VERSION(2, 0)) {
+ // For older files, revert the transform right after we decode the data.
+ if (!StoreValues(num_values)) {
+ return false;
+ }
+ }
+#endif
+ return true;
+}
+
+std::unique_ptr<PredictionSchemeTypedDecoderInterface<int32_t>>
+SequentialIntegerAttributeDecoder::CreateIntPredictionScheme(
+ PredictionSchemeMethod method,
+ PredictionSchemeTransformType transform_type) {
+ if (transform_type != PREDICTION_TRANSFORM_WRAP) {
+ return nullptr; // For now we support only wrap transform.
+ }
+ return CreatePredictionSchemeForDecoder<
+ int32_t, PredictionSchemeWrapDecodingTransform<int32_t>>(
+ method, attribute_id(), decoder());
+}
+
+bool SequentialIntegerAttributeDecoder::DecodeIntegerValues(
+ const std::vector<PointIndex> &point_ids, DecoderBuffer *in_buffer) {
+ const int num_components = GetNumValueComponents();
+ if (num_components <= 0) {
+ return false;
+ }
+ const size_t num_entries = point_ids.size();
+ const size_t num_values = num_entries * num_components;
+ PreparePortableAttribute(static_cast<int>(num_entries), num_components);
+ int32_t *const portable_attribute_data = GetPortableAttributeData();
+ if (portable_attribute_data == nullptr) {
+ return false;
+ }
+ uint8_t compressed;
+ if (!in_buffer->Decode(&compressed)) {
+ return false;
+ }
+ if (compressed > 0) {
+ // Decode compressed values.
+ if (!DecodeSymbols(static_cast<uint32_t>(num_values), num_components,
+ in_buffer,
+ reinterpret_cast<uint32_t *>(portable_attribute_data))) {
+ return false;
+ }
+ } else {
+ // Decode the integer data directly.
+ // Get the number of bytes for a given entry.
+ uint8_t num_bytes;
+ if (!in_buffer->Decode(&num_bytes)) {
+ return false;
+ }
+ if (num_bytes == DataTypeLength(DT_INT32)) {
+ if (portable_attribute()->buffer()->data_size() <
+ sizeof(int32_t) * num_values) {
+ return false;
+ }
+ if (!in_buffer->Decode(portable_attribute_data,
+ sizeof(int32_t) * num_values)) {
+ return false;
+ }
+ } else {
+ if (portable_attribute()->buffer()->data_size() <
+ num_bytes * num_values) {
+ return false;
+ }
+ if (in_buffer->remaining_size() <
+ static_cast<int64_t>(num_bytes) * static_cast<int64_t>(num_values)) {
+ return false;
+ }
+ for (size_t i = 0; i < num_values; ++i) {
+ if (!in_buffer->Decode(portable_attribute_data + i, num_bytes))
+ return false;
+ }
+ }
+ }
+
+ if (num_values > 0 && (prediction_scheme_ == nullptr ||
+ !prediction_scheme_->AreCorrectionsPositive())) {
+ // Convert the values back to the original signed format.
+ ConvertSymbolsToSignedInts(
+ reinterpret_cast<const uint32_t *>(portable_attribute_data),
+ static_cast<int>(num_values), portable_attribute_data);
+ }
+
+ // If the data was encoded with a prediction scheme, we must revert it.
+ if (prediction_scheme_) {
+ if (!prediction_scheme_->DecodePredictionData(in_buffer)) {
+ return false;
+ }
+
+ if (num_values > 0) {
+ if (!prediction_scheme_->ComputeOriginalValues(
+ portable_attribute_data, portable_attribute_data,
+ static_cast<int>(num_values), num_components, point_ids.data())) {
+ return false;
+ }
+ }
+ }
+ return true;
+}
+
+bool SequentialIntegerAttributeDecoder::StoreValues(uint32_t num_values) {
+ switch (attribute()->data_type()) {
+ case DT_UINT8:
+ StoreTypedValues<uint8_t>(num_values);
+ break;
+ case DT_INT8:
+ StoreTypedValues<int8_t>(num_values);
+ break;
+ case DT_UINT16:
+ StoreTypedValues<uint16_t>(num_values);
+ break;
+ case DT_INT16:
+ StoreTypedValues<int16_t>(num_values);
+ break;
+ case DT_UINT32:
+ StoreTypedValues<uint32_t>(num_values);
+ break;
+ case DT_INT32:
+ StoreTypedValues<int32_t>(num_values);
+ break;
+ default:
+ return false;
+ }
+ return true;
+}
+
+template <typename AttributeTypeT>
+void SequentialIntegerAttributeDecoder::StoreTypedValues(uint32_t num_values) {
+ const int num_components = attribute()->num_components();
+ const int entry_size = sizeof(AttributeTypeT) * num_components;
+ const std::unique_ptr<AttributeTypeT[]> att_val(
+ new AttributeTypeT[num_components]);
+ const int32_t *const portable_attribute_data = GetPortableAttributeData();
+ int val_id = 0;
+ int out_byte_pos = 0;
+ for (uint32_t i = 0; i < num_values; ++i) {
+ for (int c = 0; c < num_components; ++c) {
+ const AttributeTypeT value =
+ static_cast<AttributeTypeT>(portable_attribute_data[val_id++]);
+ att_val[c] = value;
+ }
+ // Store the integer value into the attribute buffer.
+ attribute()->buffer()->Write(out_byte_pos, att_val.get(), entry_size);
+ out_byte_pos += entry_size;
+ }
+}
+
+void SequentialIntegerAttributeDecoder::PreparePortableAttribute(
+ int num_entries, int num_components) {
+ GeometryAttribute va;
+ va.Init(attribute()->attribute_type(), nullptr, num_components, DT_INT32,
+ false, num_components * DataTypeLength(DT_INT32), 0);
+ std::unique_ptr<PointAttribute> port_att(new PointAttribute(va));
+ port_att->SetIdentityMapping();
+ port_att->Reset(num_entries);
+ SetPortableAttribute(std::move(port_att));
+}
+
+} // namespace draco
diff --git a/libs/assimp/contrib/draco/src/draco/compression/attributes/sequential_integer_attribute_decoder.h b/libs/assimp/contrib/draco/src/draco/compression/attributes/sequential_integer_attribute_decoder.h
new file mode 100644
index 0000000..ef48ed8
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/attributes/sequential_integer_attribute_decoder.h
@@ -0,0 +1,76 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_COMPRESSION_ATTRIBUTES_SEQUENTIAL_INTEGER_ATTRIBUTE_DECODER_H_
+#define DRACO_COMPRESSION_ATTRIBUTES_SEQUENTIAL_INTEGER_ATTRIBUTE_DECODER_H_
+
+#include "draco/compression/attributes/prediction_schemes/prediction_scheme_decoder.h"
+#include "draco/compression/attributes/sequential_attribute_decoder.h"
+#include "draco/draco_features.h"
+
+namespace draco {
+
+// Decoder for attributes encoded with the SequentialIntegerAttributeEncoder.
+class SequentialIntegerAttributeDecoder : public SequentialAttributeDecoder {
+ public:
+ SequentialIntegerAttributeDecoder();
+ bool Init(PointCloudDecoder *decoder, int attribute_id) override;
+
+ bool TransformAttributeToOriginalFormat(
+ const std::vector<PointIndex> &point_ids) override;
+
+ protected:
+ bool DecodeValues(const std::vector<PointIndex> &point_ids,
+ DecoderBuffer *in_buffer) override;
+ virtual bool DecodeIntegerValues(const std::vector<PointIndex> &point_ids,
+ DecoderBuffer *in_buffer);
+
+ // Returns a prediction scheme that should be used for decoding of the
+ // integer values.
+ virtual std::unique_ptr<PredictionSchemeTypedDecoderInterface<int32_t>>
+ CreateIntPredictionScheme(PredictionSchemeMethod method,
+ PredictionSchemeTransformType transform_type);
+
+ // Returns the number of integer attribute components. In general, this
+ // can be different from the number of components of the input attribute.
+ virtual int32_t GetNumValueComponents() const {
+ return attribute()->num_components();
+ }
+
+ // Called after all integer values are decoded. The implementation should
+ // use this method to store the values into the attribute.
+ virtual bool StoreValues(uint32_t num_values);
+
+ void PreparePortableAttribute(int num_entries, int num_components);
+
+ int32_t *GetPortableAttributeData() {
+ if (portable_attribute()->size() == 0) {
+ return nullptr;
+ }
+ return reinterpret_cast<int32_t *>(
+ portable_attribute()->GetAddress(AttributeValueIndex(0)));
+ }
+
+ private:
+ // Stores decoded values into the attribute with a data type AttributeTypeT.
+ template <typename AttributeTypeT>
+ void StoreTypedValues(uint32_t num_values);
+
+ std::unique_ptr<PredictionSchemeTypedDecoderInterface<int32_t>>
+ prediction_scheme_;
+};
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_ATTRIBUTES_SEQUENTIAL_INTEGER_ATTRIBUTE_DECODER_H_
diff --git a/libs/assimp/contrib/draco/src/draco/compression/attributes/sequential_integer_attribute_encoder.cc b/libs/assimp/contrib/draco/src/draco/compression/attributes/sequential_integer_attribute_encoder.cc
new file mode 100644
index 0000000..e66a0a8
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/attributes/sequential_integer_attribute_encoder.cc
@@ -0,0 +1,233 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/compression/attributes/sequential_integer_attribute_encoder.h"
+
+#include "draco/compression/attributes/prediction_schemes/prediction_scheme_encoder_factory.h"
+#include "draco/compression/attributes/prediction_schemes/prediction_scheme_wrap_encoding_transform.h"
+#include "draco/compression/entropy/symbol_encoding.h"
+#include "draco/core/bit_utils.h"
+
+namespace draco {
+
+SequentialIntegerAttributeEncoder::SequentialIntegerAttributeEncoder() {}
+
+bool SequentialIntegerAttributeEncoder::Init(PointCloudEncoder *encoder,
+ int attribute_id) {
+ if (!SequentialAttributeEncoder::Init(encoder, attribute_id)) {
+ return false;
+ }
+ if (GetUniqueId() == SEQUENTIAL_ATTRIBUTE_ENCODER_INTEGER) {
+ // When encoding integers, this encoder currently works only for integer
+ // attributes up to 32 bits.
+ switch (attribute()->data_type()) {
+ case DT_INT8:
+ case DT_UINT8:
+ case DT_INT16:
+ case DT_UINT16:
+ case DT_INT32:
+ case DT_UINT32:
+ break;
+ default:
+ return false;
+ }
+ }
+ // Init prediction scheme.
+ const PredictionSchemeMethod prediction_scheme_method =
+ GetPredictionMethodFromOptions(attribute_id, *encoder->options());
+
+ prediction_scheme_ = CreateIntPredictionScheme(prediction_scheme_method);
+
+ if (prediction_scheme_ && !InitPredictionScheme(prediction_scheme_.get())) {
+ prediction_scheme_ = nullptr;
+ }
+
+ return true;
+}
+
+bool SequentialIntegerAttributeEncoder::TransformAttributeToPortableFormat(
+ const std::vector<PointIndex> &point_ids) {
+ if (encoder()) {
+ if (!PrepareValues(point_ids, encoder()->point_cloud()->num_points())) {
+ return false;
+ }
+ } else {
+ if (!PrepareValues(point_ids, 0)) {
+ return false;
+ }
+ }
+
+ // Update point to attribute mapping with the portable attribute if the
+ // attribute is a parent attribute (for now, we can skip it otherwise).
+ if (is_parent_encoder()) {
+ // First create map between original attribute value indices and new ones
+ // (determined by the encoding order).
+ const PointAttribute *const orig_att = attribute();
+ PointAttribute *const portable_att = portable_attribute();
+ IndexTypeVector<AttributeValueIndex, AttributeValueIndex>
+ value_to_value_map(orig_att->size());
+ for (int i = 0; i < point_ids.size(); ++i) {
+ value_to_value_map[orig_att->mapped_index(point_ids[i])] =
+ AttributeValueIndex(i);
+ }
+ if (portable_att->is_mapping_identity()) {
+ portable_att->SetExplicitMapping(encoder()->point_cloud()->num_points());
+ }
+ // Go over all points of the original attribute and update the mapping in
+ // the portable attribute.
+ for (PointIndex i(0); i < encoder()->point_cloud()->num_points(); ++i) {
+ portable_att->SetPointMapEntry(
+ i, value_to_value_map[orig_att->mapped_index(i)]);
+ }
+ }
+ return true;
+}
+
+std::unique_ptr<PredictionSchemeTypedEncoderInterface<int32_t>>
+SequentialIntegerAttributeEncoder::CreateIntPredictionScheme(
+ PredictionSchemeMethod method) {
+ return CreatePredictionSchemeForEncoder<
+ int32_t, PredictionSchemeWrapEncodingTransform<int32_t>>(
+ method, attribute_id(), encoder());
+}
+
+bool SequentialIntegerAttributeEncoder::EncodeValues(
+ const std::vector<PointIndex> &point_ids, EncoderBuffer *out_buffer) {
+ // Initialize general quantization data.
+ const PointAttribute *const attrib = attribute();
+ if (attrib->size() == 0) {
+ return true;
+ }
+
+ int8_t prediction_scheme_method = PREDICTION_NONE;
+ if (prediction_scheme_) {
+ if (!SetPredictionSchemeParentAttributes(prediction_scheme_.get())) {
+ return false;
+ }
+ prediction_scheme_method =
+ static_cast<int8_t>(prediction_scheme_->GetPredictionMethod());
+ }
+ out_buffer->Encode(prediction_scheme_method);
+ if (prediction_scheme_) {
+ out_buffer->Encode(
+ static_cast<int8_t>(prediction_scheme_->GetTransformType()));
+ }
+
+ const int num_components = portable_attribute()->num_components();
+ const int num_values =
+ static_cast<int>(num_components * portable_attribute()->size());
+ const int32_t *const portable_attribute_data = GetPortableAttributeData();
+
+ // We need to keep the portable data intact, but several encoding steps can
+ // result in changes of this data, e.g., by applying prediction schemes that
+ // change the data in place. To preserve the portable data we store and
+ // process all encoded data in a separate array.
+ std::vector<int32_t> encoded_data(num_values);
+
+ // All integer values are initialized. Process them using the prediction
+ // scheme if we have one.
+ if (prediction_scheme_) {
+ prediction_scheme_->ComputeCorrectionValues(
+ portable_attribute_data, &encoded_data[0], num_values, num_components,
+ point_ids.data());
+ }
+
+ if (prediction_scheme_ == nullptr ||
+ !prediction_scheme_->AreCorrectionsPositive()) {
+ const int32_t *const input =
+ prediction_scheme_ ? encoded_data.data() : portable_attribute_data;
+ ConvertSignedIntsToSymbols(input, num_values,
+ reinterpret_cast<uint32_t *>(&encoded_data[0]));
+ }
+
+ if (encoder() == nullptr || encoder()->options()->GetGlobalBool(
+ "use_built_in_attribute_compression", true)) {
+ out_buffer->Encode(static_cast<uint8_t>(1));
+ Options symbol_encoding_options;
+ if (encoder() != nullptr) {
+ SetSymbolEncodingCompressionLevel(&symbol_encoding_options,
+ 10 - encoder()->options()->GetSpeed());
+ }
+ if (!EncodeSymbols(reinterpret_cast<uint32_t *>(encoded_data.data()),
+ static_cast<int>(point_ids.size()) * num_components,
+ num_components, &symbol_encoding_options, out_buffer)) {
+ return false;
+ }
+ } else {
+ // No compression. Just store the raw integer values, using the number of
+ // bytes as needed.
+
+ // To compute the maximum bit-length, first OR all values.
+ uint32_t masked_value = 0;
+ for (uint32_t i = 0; i < static_cast<uint32_t>(num_values); ++i) {
+ masked_value |= encoded_data[i];
+ }
+ // Compute the msb of the ORed value.
+ int value_msb_pos = 0;
+ if (masked_value != 0) {
+ value_msb_pos = MostSignificantBit(masked_value);
+ }
+ const int num_bytes = 1 + value_msb_pos / 8;
+
+ out_buffer->Encode(static_cast<uint8_t>(0));
+ out_buffer->Encode(static_cast<uint8_t>(num_bytes));
+
+ if (num_bytes == DataTypeLength(DT_INT32)) {
+ out_buffer->Encode(encoded_data.data(), sizeof(int32_t) * num_values);
+ } else {
+ for (uint32_t i = 0; i < static_cast<uint32_t>(num_values); ++i) {
+ out_buffer->Encode(encoded_data.data() + i, num_bytes);
+ }
+ }
+ }
+ if (prediction_scheme_) {
+ prediction_scheme_->EncodePredictionData(out_buffer);
+ }
+ return true;
+}
+
+bool SequentialIntegerAttributeEncoder::PrepareValues(
+ const std::vector<PointIndex> &point_ids, int num_points) {
+ // Convert all values to int32_t format.
+ const PointAttribute *const attrib = attribute();
+ const int num_components = attrib->num_components();
+ const int num_entries = static_cast<int>(point_ids.size());
+ PreparePortableAttribute(num_entries, num_components, num_points);
+ int32_t dst_index = 0;
+ int32_t *const portable_attribute_data = GetPortableAttributeData();
+ for (PointIndex pi : point_ids) {
+ const AttributeValueIndex att_id = attrib->mapped_index(pi);
+ if (!attrib->ConvertValue<int32_t>(att_id,
+ portable_attribute_data + dst_index)) {
+ return false;
+ }
+ dst_index += num_components;
+ }
+ return true;
+}
+
+void SequentialIntegerAttributeEncoder::PreparePortableAttribute(
+ int num_entries, int num_components, int num_points) {
+ GeometryAttribute va;
+ va.Init(attribute()->attribute_type(), nullptr, num_components, DT_INT32,
+ false, num_components * DataTypeLength(DT_INT32), 0);
+ std::unique_ptr<PointAttribute> port_att(new PointAttribute(va));
+ port_att->Reset(num_entries);
+ SetPortableAttribute(std::move(port_att));
+ if (num_points) {
+ portable_attribute()->SetExplicitMapping(num_points);
+ }
+}
+
+} // namespace draco
diff --git a/libs/assimp/contrib/draco/src/draco/compression/attributes/sequential_integer_attribute_encoder.h b/libs/assimp/contrib/draco/src/draco/compression/attributes/sequential_integer_attribute_encoder.h
new file mode 100644
index 0000000..c1d6222
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/attributes/sequential_integer_attribute_encoder.h
@@ -0,0 +1,67 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_COMPRESSION_ATTRIBUTES_SEQUENTIAL_INTEGER_ATTRIBUTE_ENCODER_H_
+#define DRACO_COMPRESSION_ATTRIBUTES_SEQUENTIAL_INTEGER_ATTRIBUTE_ENCODER_H_
+
+#include "draco/compression/attributes/prediction_schemes/prediction_scheme_encoder.h"
+#include "draco/compression/attributes/sequential_attribute_encoder.h"
+
+namespace draco {
+
+// Attribute encoder designed for lossless encoding of integer attributes. The
+// attribute values can be pre-processed by a prediction scheme and compressed
+// with a built-in entropy coder.
+class SequentialIntegerAttributeEncoder : public SequentialAttributeEncoder {
+ public:
+ SequentialIntegerAttributeEncoder();
+ uint8_t GetUniqueId() const override {
+ return SEQUENTIAL_ATTRIBUTE_ENCODER_INTEGER;
+ }
+
+ bool Init(PointCloudEncoder *encoder, int attribute_id) override;
+ bool TransformAttributeToPortableFormat(
+ const std::vector<PointIndex> &point_ids) override;
+
+ protected:
+ bool EncodeValues(const std::vector<PointIndex> &point_ids,
+ EncoderBuffer *out_buffer) override;
+
+ // Returns a prediction scheme that should be used for encoding of the
+ // integer values.
+ virtual std::unique_ptr<PredictionSchemeTypedEncoderInterface<int32_t>>
+ CreateIntPredictionScheme(PredictionSchemeMethod method);
+
+ // Prepares the integer values that are going to be encoded.
+ virtual bool PrepareValues(const std::vector<PointIndex> &point_ids,
+ int num_points);
+
+ void PreparePortableAttribute(int num_entries, int num_components,
+ int num_points);
+
+ int32_t *GetPortableAttributeData() {
+ return reinterpret_cast<int32_t *>(
+ portable_attribute()->GetAddress(AttributeValueIndex(0)));
+ }
+
+ private:
+ // Optional prediction scheme can be used to modify the integer values in
+ // order to make them easier to compress.
+ std::unique_ptr<PredictionSchemeTypedEncoderInterface<int32_t>>
+ prediction_scheme_;
+};
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_ATTRIBUTES_SEQUENTIAL_INTEGER_ATTRIBUTE_ENCODER_H_
diff --git a/libs/assimp/contrib/draco/src/draco/compression/attributes/sequential_integer_attribute_encoding_test.cc b/libs/assimp/contrib/draco/src/draco/compression/attributes/sequential_integer_attribute_encoding_test.cc
new file mode 100644
index 0000000..44485e6
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/attributes/sequential_integer_attribute_encoding_test.cc
@@ -0,0 +1,64 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include <numeric>
+
+#include "draco/compression/attributes/sequential_integer_attribute_decoder.h"
+#include "draco/compression/attributes/sequential_integer_attribute_encoder.h"
+#include "draco/compression/config/compression_shared.h"
+#include "draco/core/draco_test_base.h"
+
+namespace draco {
+
+class SequentialIntegerAttributeEncodingTest : public ::testing::Test {
+ protected:
+};
+
+TEST_F(SequentialIntegerAttributeEncodingTest, DoesCompress) {
+ // This test verifies that IntegerEncoding encodes and decodes the given data.
+ const std::vector<int32_t> values{1, 8, 7, 5, 5, 5, 9,
+ 155, -6, -9, 9, 125, 1, 0};
+ PointAttribute pa;
+ pa.Init(GeometryAttribute::GENERIC, 1, DT_INT32, false, values.size());
+ for (uint32_t i = 0; i < values.size(); ++i) {
+ pa.SetAttributeValue(AttributeValueIndex(i), &values[i]);
+ }
+ // List of point ids from 0 to point_ids.size() - 1.
+ std::vector<PointIndex> point_ids(values.size());
+ std::iota(point_ids.begin(), point_ids.end(), 0);
+
+ EncoderBuffer out_buf;
+ SequentialIntegerAttributeEncoder ie;
+ ASSERT_TRUE(ie.InitializeStandalone(&pa));
+ ASSERT_TRUE(ie.TransformAttributeToPortableFormat(point_ids));
+ ASSERT_TRUE(ie.EncodePortableAttribute(point_ids, &out_buf));
+ ASSERT_TRUE(ie.EncodeDataNeededByPortableTransform(&out_buf));
+
+ DecoderBuffer in_buf;
+ in_buf.Init(out_buf.data(), out_buf.size());
+ in_buf.set_bitstream_version(kDracoMeshBitstreamVersion);
+ SequentialIntegerAttributeDecoder id;
+ ASSERT_TRUE(id.InitializeStandalone(&pa));
+ ASSERT_TRUE(id.DecodePortableAttribute(point_ids, &in_buf));
+ ASSERT_TRUE(id.DecodeDataNeededByPortableTransform(point_ids, &in_buf));
+ ASSERT_TRUE(id.TransformAttributeToOriginalFormat(point_ids));
+
+ for (uint32_t i = 0; i < values.size(); ++i) {
+ int32_t entry_val;
+ pa.GetValue(AttributeValueIndex(i), &entry_val);
+ ASSERT_EQ(entry_val, values[i]);
+ }
+}
+
+} // namespace draco
diff --git a/libs/assimp/contrib/draco/src/draco/compression/attributes/sequential_normal_attribute_decoder.cc b/libs/assimp/contrib/draco/src/draco/compression/attributes/sequential_normal_attribute_decoder.cc
new file mode 100644
index 0000000..de36c1c
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/attributes/sequential_normal_attribute_decoder.cc
@@ -0,0 +1,76 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/compression/attributes/sequential_normal_attribute_decoder.h"
+
+#include "draco/compression/attributes/normal_compression_utils.h"
+
+namespace draco {
+
+SequentialNormalAttributeDecoder::SequentialNormalAttributeDecoder() {}
+
+bool SequentialNormalAttributeDecoder::Init(PointCloudDecoder *decoder,
+ int attribute_id) {
+ if (!SequentialIntegerAttributeDecoder::Init(decoder, attribute_id)) {
+ return false;
+ }
+ // Currently, this encoder works only for 3-component normal vectors.
+ if (attribute()->num_components() != 3) {
+ return false;
+ }
+ // Also the data type must be DT_FLOAT32.
+ if (attribute()->data_type() != DT_FLOAT32) {
+ return false;
+ }
+ return true;
+}
+
+bool SequentialNormalAttributeDecoder::DecodeIntegerValues(
+ const std::vector<PointIndex> &point_ids, DecoderBuffer *in_buffer) {
+#ifdef DRACO_BACKWARDS_COMPATIBILITY_SUPPORTED
+ if (decoder()->bitstream_version() < DRACO_BITSTREAM_VERSION(2, 0)) {
+ // Note: in older bitstreams, we do not have a PortableAttribute() decoded
+ // at this stage so we cannot pass it down to the DecodeParameters() call.
+ // It still works fine for octahedral transform because it does not need to
+ // use any data from the attribute.
+ if (!octahedral_transform_.DecodeParameters(*attribute(), in_buffer)) {
+ return false;
+ }
+ }
+#endif
+ return SequentialIntegerAttributeDecoder::DecodeIntegerValues(point_ids,
+ in_buffer);
+}
+
+bool SequentialNormalAttributeDecoder::DecodeDataNeededByPortableTransform(
+ const std::vector<PointIndex> &point_ids, DecoderBuffer *in_buffer) {
+ if (decoder()->bitstream_version() >= DRACO_BITSTREAM_VERSION(2, 0)) {
+ // For newer file version, decode attribute transform data here.
+ if (!octahedral_transform_.DecodeParameters(*GetPortableAttribute(),
+ in_buffer)) {
+ return false;
+ }
+ }
+
+ // Store the decoded transform data in portable attribute.
+ return octahedral_transform_.TransferToAttribute(portable_attribute());
+}
+
+bool SequentialNormalAttributeDecoder::StoreValues(uint32_t num_points) {
+ // Convert all quantized values back to floats.
+ return octahedral_transform_.InverseTransformAttribute(
+ *GetPortableAttribute(), attribute());
+}
+
+} // namespace draco
diff --git a/libs/assimp/contrib/draco/src/draco/compression/attributes/sequential_normal_attribute_decoder.h b/libs/assimp/contrib/draco/src/draco/compression/attributes/sequential_normal_attribute_decoder.h
new file mode 100644
index 0000000..8c2d801
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/attributes/sequential_normal_attribute_decoder.h
@@ -0,0 +1,83 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_COMPRESSION_ATTRIBUTES_SEQUENTIAL_NORMAL_ATTRIBUTE_DECODER_H_
+#define DRACO_COMPRESSION_ATTRIBUTES_SEQUENTIAL_NORMAL_ATTRIBUTE_DECODER_H_
+
+#include "draco/attributes/attribute_octahedron_transform.h"
+#include "draco/compression/attributes/prediction_schemes/prediction_scheme_decoder_factory.h"
+#include "draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_canonicalized_decoding_transform.h"
+#include "draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_decoding_transform.h"
+#include "draco/compression/attributes/sequential_integer_attribute_decoder.h"
+#include "draco/draco_features.h"
+
+namespace draco {
+
+// Decoder for attributes encoded with SequentialNormalAttributeEncoder.
+class SequentialNormalAttributeDecoder
+ : public SequentialIntegerAttributeDecoder {
+ public:
+ SequentialNormalAttributeDecoder();
+ bool Init(PointCloudDecoder *decoder, int attribute_id) override;
+
+ protected:
+ int32_t GetNumValueComponents() const override {
+ return 2; // We quantize everything into two components.
+ }
+ bool DecodeIntegerValues(const std::vector<PointIndex> &point_ids,
+ DecoderBuffer *in_buffer) override;
+ bool DecodeDataNeededByPortableTransform(
+ const std::vector<PointIndex> &point_ids,
+ DecoderBuffer *in_buffer) override;
+ bool StoreValues(uint32_t num_points) override;
+
+ private:
+ AttributeOctahedronTransform octahedral_transform_;
+
+ std::unique_ptr<PredictionSchemeTypedDecoderInterface<int32_t>>
+ CreateIntPredictionScheme(
+ PredictionSchemeMethod method,
+ PredictionSchemeTransformType transform_type) override {
+ switch (transform_type) {
+#ifdef DRACO_BACKWARDS_COMPATIBILITY_SUPPORTED
+ case PREDICTION_TRANSFORM_NORMAL_OCTAHEDRON: {
+ typedef PredictionSchemeNormalOctahedronDecodingTransform<int32_t>
+ Transform;
+ // At this point the decoder has not read the quantization bits,
+ // which is why we must construct the transform by default.
+ // See Transform.DecodeTransformData for more details.
+ return CreatePredictionSchemeForDecoder<int32_t, Transform>(
+ method, attribute_id(), decoder());
+ }
+#endif
+ case PREDICTION_TRANSFORM_NORMAL_OCTAHEDRON_CANONICALIZED: {
+ typedef PredictionSchemeNormalOctahedronCanonicalizedDecodingTransform<
+ int32_t>
+ Transform;
+ // At this point the decoder has not read the quantization bits,
+ // which is why we must construct the transform by default.
+ // See Transform.DecodeTransformData for more details.
+ return CreatePredictionSchemeForDecoder<int32_t, Transform>(
+ method, attribute_id(), decoder());
+ }
+ default:
+ return nullptr; // Currently, we support only octahedron transform and
+ // octahedron transform canonicalized.
+ }
+ }
+};
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_ATTRIBUTES_SEQUENTIAL_NORMAL_ATTRIBUTE_DECODER_H_
diff --git a/libs/assimp/contrib/draco/src/draco/compression/attributes/sequential_normal_attribute_encoder.cc b/libs/assimp/contrib/draco/src/draco/compression/attributes/sequential_normal_attribute_encoder.cc
new file mode 100644
index 0000000..2e20e89
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/attributes/sequential_normal_attribute_encoder.cc
@@ -0,0 +1,57 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/compression/attributes/sequential_normal_attribute_encoder.h"
+
+#include "draco/compression/attributes/normal_compression_utils.h"
+
+namespace draco {
+
+bool SequentialNormalAttributeEncoder::Init(PointCloudEncoder *encoder,
+ int attribute_id) {
+ if (!SequentialIntegerAttributeEncoder::Init(encoder, attribute_id))
+ return false;
+ // Currently this encoder works only for 3-component normal vectors.
+ if (attribute()->num_components() != 3) {
+ return false;
+ }
+
+ // Initialize AttributeOctahedronTransform.
+ const int quantization_bits = encoder->options()->GetAttributeInt(
+ attribute_id, "quantization_bits", -1);
+ if (quantization_bits < 1) {
+ return false;
+ }
+ attribute_octahedron_transform_.SetParameters(quantization_bits);
+ return true;
+}
+
+bool SequentialNormalAttributeEncoder::EncodeDataNeededByPortableTransform(
+ EncoderBuffer *out_buffer) {
+ return attribute_octahedron_transform_.EncodeParameters(out_buffer);
+}
+
+bool SequentialNormalAttributeEncoder::PrepareValues(
+ const std::vector<PointIndex> &point_ids, int num_points) {
+ auto portable_att = attribute_octahedron_transform_.InitTransformedAttribute(
+ *(attribute()), point_ids.size());
+ if (!attribute_octahedron_transform_.TransformAttribute(
+ *(attribute()), point_ids, portable_att.get())) {
+ return false;
+ }
+ SetPortableAttribute(std::move(portable_att));
+ return true;
+}
+
+} // namespace draco
diff --git a/libs/assimp/contrib/draco/src/draco/compression/attributes/sequential_normal_attribute_encoder.h b/libs/assimp/contrib/draco/src/draco/compression/attributes/sequential_normal_attribute_encoder.h
new file mode 100644
index 0000000..53705c5
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/attributes/sequential_normal_attribute_encoder.h
@@ -0,0 +1,82 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_COMPRESSION_ATTRIBUTES_SEQUENTIAL_NORMAL_ATTRIBUTE_ENCODER_H_
+#define DRACO_COMPRESSION_ATTRIBUTES_SEQUENTIAL_NORMAL_ATTRIBUTE_ENCODER_H_
+
+#include "draco/attributes/attribute_octahedron_transform.h"
+#include "draco/compression/attributes/prediction_schemes/prediction_scheme_encoder_factory.h"
+#include "draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_canonicalized_encoding_transform.h"
+#include "draco/compression/attributes/sequential_integer_attribute_encoder.h"
+#include "draco/compression/config/compression_shared.h"
+
+namespace draco {
+
+// Class for encoding normal vectors using an octahedral encoding, see Cigolle
+// et al.'14 “A Survey of Efficient Representations for Independent Unit
+// Vectors”. Compared to the basic quantization encoder, this encoder results
+// in a better compression rate under the same accuracy settings. Note that this
+// encoder doesn't preserve the lengths of input vectors, therefore it will not
+// work correctly when the input values are not normalized.
+class SequentialNormalAttributeEncoder
+ : public SequentialIntegerAttributeEncoder {
+ public:
+ uint8_t GetUniqueId() const override {
+ return SEQUENTIAL_ATTRIBUTE_ENCODER_NORMALS;
+ }
+ bool IsLossyEncoder() const override { return true; }
+
+ bool EncodeDataNeededByPortableTransform(EncoderBuffer *out_buffer) override;
+
+ protected:
+ bool Init(PointCloudEncoder *encoder, int attribute_id) override;
+
+ // Put quantized values in portable attribute for sequential encoding.
+ bool PrepareValues(const std::vector<PointIndex> &point_ids,
+ int num_points) override;
+
+ std::unique_ptr<PredictionSchemeTypedEncoderInterface<int32_t>>
+ CreateIntPredictionScheme(PredictionSchemeMethod /* method */) override {
+ typedef PredictionSchemeNormalOctahedronCanonicalizedEncodingTransform<
+ int32_t>
+ Transform;
+ const int32_t quantization_bits = encoder()->options()->GetAttributeInt(
+ attribute_id(), "quantization_bits", -1);
+ const int32_t max_value = (1 << quantization_bits) - 1;
+ const Transform transform(max_value);
+ const PredictionSchemeMethod default_prediction_method =
+ SelectPredictionMethod(attribute_id(), encoder());
+ const int32_t prediction_method = encoder()->options()->GetAttributeInt(
+ attribute_id(), "prediction_scheme", default_prediction_method);
+
+ if (prediction_method == MESH_PREDICTION_GEOMETRIC_NORMAL) {
+ return CreatePredictionSchemeForEncoder<int32_t, Transform>(
+ MESH_PREDICTION_GEOMETRIC_NORMAL, attribute_id(), encoder(),
+ transform);
+ }
+ if (prediction_method == PREDICTION_DIFFERENCE) {
+ return CreatePredictionSchemeForEncoder<int32_t, Transform>(
+ PREDICTION_DIFFERENCE, attribute_id(), encoder(), transform);
+ }
+ DRACO_DCHECK(false); // Should never be reached.
+ return nullptr;
+ }
+
+ // Used for the conversion to quantized normals in octahedral format.
+ AttributeOctahedronTransform attribute_octahedron_transform_;
+};
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_ATTRIBUTES_SEQUENTIAL_NORMAL_ATTRIBUTE_ENCODER_H_
diff --git a/libs/assimp/contrib/draco/src/draco/compression/attributes/sequential_quantization_attribute_decoder.cc b/libs/assimp/contrib/draco/src/draco/compression/attributes/sequential_quantization_attribute_decoder.cc
new file mode 100644
index 0000000..3d306e7
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/attributes/sequential_quantization_attribute_decoder.cc
@@ -0,0 +1,88 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/compression/attributes/sequential_quantization_attribute_decoder.h"
+
+#include "draco/core/quantization_utils.h"
+
+namespace draco {
+
+SequentialQuantizationAttributeDecoder::
+ SequentialQuantizationAttributeDecoder() {}
+
+bool SequentialQuantizationAttributeDecoder::Init(PointCloudDecoder *decoder,
+ int attribute_id) {
+ if (!SequentialIntegerAttributeDecoder::Init(decoder, attribute_id)) {
+ return false;
+ }
+ const PointAttribute *const attribute =
+ decoder->point_cloud()->attribute(attribute_id);
+ // Currently we can quantize only floating point arguments.
+ if (attribute->data_type() != DT_FLOAT32) {
+ return false;
+ }
+ return true;
+}
+
+bool SequentialQuantizationAttributeDecoder::DecodeIntegerValues(
+ const std::vector<PointIndex> &point_ids, DecoderBuffer *in_buffer) {
+#ifdef DRACO_BACKWARDS_COMPATIBILITY_SUPPORTED
+ if (decoder()->bitstream_version() < DRACO_BITSTREAM_VERSION(2, 0) &&
+ !DecodeQuantizedDataInfo()) {
+ return false;
+ }
+#endif
+ return SequentialIntegerAttributeDecoder::DecodeIntegerValues(point_ids,
+ in_buffer);
+}
+
+bool SequentialQuantizationAttributeDecoder::
+ DecodeDataNeededByPortableTransform(
+ const std::vector<PointIndex> &point_ids, DecoderBuffer *in_buffer) {
+ if (decoder()->bitstream_version() >= DRACO_BITSTREAM_VERSION(2, 0)) {
+ // Decode quantization data here only for files with bitstream version 2.0+
+ if (!DecodeQuantizedDataInfo()) {
+ return false;
+ }
+ }
+
+ // Store the decoded transform data in portable attribute;
+ return quantization_transform_.TransferToAttribute(portable_attribute());
+}
+
+bool SequentialQuantizationAttributeDecoder::StoreValues(uint32_t num_points) {
+ return DequantizeValues(num_points);
+}
+
+bool SequentialQuantizationAttributeDecoder::DecodeQuantizedDataInfo() {
+ // Get attribute used as source for decoding.
+ auto att = GetPortableAttribute();
+ if (att == nullptr) {
+ // This should happen only in the backward compatibility mode. It will still
+ // work fine for this case because the only thing the quantization transform
+ // cares about is the number of components that is the same for both source
+ // and target attributes.
+ att = attribute();
+ }
+ return quantization_transform_.DecodeParameters(*att, decoder()->buffer());
+}
+
+bool SequentialQuantizationAttributeDecoder::DequantizeValues(
+ uint32_t num_values) {
+ // Convert all quantized values back to floats.
+ return quantization_transform_.InverseTransformAttribute(
+ *GetPortableAttribute(), attribute());
+}
+
+} // namespace draco
diff --git a/libs/assimp/contrib/draco/src/draco/compression/attributes/sequential_quantization_attribute_decoder.h b/libs/assimp/contrib/draco/src/draco/compression/attributes/sequential_quantization_attribute_decoder.h
new file mode 100644
index 0000000..ad372dc
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/attributes/sequential_quantization_attribute_decoder.h
@@ -0,0 +1,52 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_COMPRESSION_ATTRIBUTES_SEQUENTIAL_QUANTIZATION_ATTRIBUTE_DECODER_H_
+#define DRACO_COMPRESSION_ATTRIBUTES_SEQUENTIAL_QUANTIZATION_ATTRIBUTE_DECODER_H_
+
+#include "draco/attributes/attribute_quantization_transform.h"
+#include "draco/compression/attributes/sequential_integer_attribute_decoder.h"
+#include "draco/draco_features.h"
+
+namespace draco {
+
+// Decoder for attribute values encoded with the
+// SequentialQuantizationAttributeEncoder.
+class SequentialQuantizationAttributeDecoder
+ : public SequentialIntegerAttributeDecoder {
+ public:
+ SequentialQuantizationAttributeDecoder();
+ bool Init(PointCloudDecoder *decoder, int attribute_id) override;
+
+ protected:
+ bool DecodeIntegerValues(const std::vector<PointIndex> &point_ids,
+ DecoderBuffer *in_buffer) override;
+ bool DecodeDataNeededByPortableTransform(
+ const std::vector<PointIndex> &point_ids,
+ DecoderBuffer *in_buffer) override;
+ bool StoreValues(uint32_t num_points) override;
+
+ // Decodes data necessary for dequantizing the encoded values.
+ virtual bool DecodeQuantizedDataInfo();
+
+ // Dequantizes all values and stores them into the output attribute.
+ virtual bool DequantizeValues(uint32_t num_values);
+
+ private:
+ AttributeQuantizationTransform quantization_transform_;
+};
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_ATTRIBUTES_SEQUENTIAL_QUANTIZATION_ATTRIBUTE_DECODER_H_
diff --git a/libs/assimp/contrib/draco/src/draco/compression/attributes/sequential_quantization_attribute_encoder.cc b/libs/assimp/contrib/draco/src/draco/compression/attributes/sequential_quantization_attribute_encoder.cc
new file mode 100644
index 0000000..d3666f7
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/attributes/sequential_quantization_attribute_encoder.cc
@@ -0,0 +1,86 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/compression/attributes/sequential_quantization_attribute_encoder.h"
+
+#include "draco/core/quantization_utils.h"
+
+namespace draco {
+
+SequentialQuantizationAttributeEncoder::
+ SequentialQuantizationAttributeEncoder() {}
+
+bool SequentialQuantizationAttributeEncoder::Init(PointCloudEncoder *encoder,
+ int attribute_id) {
+ if (!SequentialIntegerAttributeEncoder::Init(encoder, attribute_id)) {
+ return false;
+ }
+ // This encoder currently works only for floating point attributes.
+ const PointAttribute *const attribute =
+ encoder->point_cloud()->attribute(attribute_id);
+ if (attribute->data_type() != DT_FLOAT32) {
+ return false;
+ }
+
+ // Initialize AttributeQuantizationTransform.
+ const int quantization_bits = encoder->options()->GetAttributeInt(
+ attribute_id, "quantization_bits", -1);
+ if (quantization_bits < 1) {
+ return false;
+ }
+ if (encoder->options()->IsAttributeOptionSet(attribute_id,
+ "quantization_origin") &&
+ encoder->options()->IsAttributeOptionSet(attribute_id,
+ "quantization_range")) {
+ // Quantization settings are explicitly specified in the provided options.
+ std::vector<float> quantization_origin(attribute->num_components());
+ encoder->options()->GetAttributeVector(attribute_id, "quantization_origin",
+ attribute->num_components(),
+ &quantization_origin[0]);
+ const float range = encoder->options()->GetAttributeFloat(
+ attribute_id, "quantization_range", 1.f);
+ if (!attribute_quantization_transform_.SetParameters(
+ quantization_bits, quantization_origin.data(),
+ attribute->num_components(), range)) {
+ return false;
+ }
+ } else {
+ // Compute quantization settings from the attribute values.
+ if (!attribute_quantization_transform_.ComputeParameters(
+ *attribute, quantization_bits)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+bool SequentialQuantizationAttributeEncoder::
+ EncodeDataNeededByPortableTransform(EncoderBuffer *out_buffer) {
+ return attribute_quantization_transform_.EncodeParameters(out_buffer);
+}
+
+bool SequentialQuantizationAttributeEncoder::PrepareValues(
+ const std::vector<PointIndex> &point_ids, int num_points) {
+ auto portable_attribute =
+ attribute_quantization_transform_.InitTransformedAttribute(
+ *attribute(), point_ids.size());
+ if (!attribute_quantization_transform_.TransformAttribute(
+ *(attribute()), point_ids, portable_attribute.get())) {
+ return false;
+ }
+ SetPortableAttribute(std::move(portable_attribute));
+ return true;
+}
+
+} // namespace draco
diff --git a/libs/assimp/contrib/draco/src/draco/compression/attributes/sequential_quantization_attribute_encoder.h b/libs/assimp/contrib/draco/src/draco/compression/attributes/sequential_quantization_attribute_encoder.h
new file mode 100644
index 0000000..e9762bd
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/attributes/sequential_quantization_attribute_encoder.h
@@ -0,0 +1,52 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_COMPRESSION_ATTRIBUTES_SEQUENTIAL_QUANTIZATION_ATTRIBUTE_ENCODER_H_
+#define DRACO_COMPRESSION_ATTRIBUTES_SEQUENTIAL_QUANTIZATION_ATTRIBUTE_ENCODER_H_
+
+#include "draco/attributes/attribute_quantization_transform.h"
+#include "draco/compression/attributes/sequential_integer_attribute_encoder.h"
+
+namespace draco {
+
+class MeshEncoder;
+
+// Attribute encoder that quantizes floating point attribute values. The
+// quantized values can be optionally compressed using an entropy coding.
+class SequentialQuantizationAttributeEncoder
+ : public SequentialIntegerAttributeEncoder {
+ public:
+ SequentialQuantizationAttributeEncoder();
+ uint8_t GetUniqueId() const override {
+ return SEQUENTIAL_ATTRIBUTE_ENCODER_QUANTIZATION;
+ }
+ bool Init(PointCloudEncoder *encoder, int attribute_id) override;
+
+ bool IsLossyEncoder() const override { return true; }
+
+ bool EncodeDataNeededByPortableTransform(EncoderBuffer *out_buffer) override;
+
+ protected:
+ // Put quantized values in portable attribute for sequential encoding.
+ bool PrepareValues(const std::vector<PointIndex> &point_ids,
+ int num_points) override;
+
+ private:
+ // Used for the quantization.
+ AttributeQuantizationTransform attribute_quantization_transform_;
+};
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_ATTRIBUTES_SEQUENTIAL_QUANTIZATION_ATTRIBUTE_ENCODER_H_
diff --git a/libs/assimp/contrib/draco/src/draco/compression/bit_coders/adaptive_rans_bit_coding_shared.h b/libs/assimp/contrib/draco/src/draco/compression/bit_coders/adaptive_rans_bit_coding_shared.h
new file mode 100644
index 0000000..faacbd5
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/bit_coders/adaptive_rans_bit_coding_shared.h
@@ -0,0 +1,43 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// File provides shared functions for adaptive rANS bit coding.
+#ifndef DRACO_COMPRESSION_BIT_CODERS_ADAPTIVE_RANS_BIT_CODING_SHARED_H_
+#define DRACO_COMPRESSION_BIT_CODERS_ADAPTIVE_RANS_BIT_CODING_SHARED_H_
+
+#include "draco/core/macros.h"
+
+namespace draco {
+
+// Clamp the probability p to a uint8_t in the range [1,255].
+inline uint8_t clamp_probability(double p) {
+ DRACO_DCHECK_LE(p, 1.0);
+ DRACO_DCHECK_LE(0.0, p);
+ uint32_t p_int = static_cast<uint32_t>((p * 256) + 0.5);
+ p_int -= (p_int == 256);
+ p_int += (p_int == 0);
+ return static_cast<uint8_t>(p_int);
+}
+
+// Update the probability according to new incoming bit.
+inline double update_probability(double old_p, bool bit) {
+ static constexpr double w = 128.0;
+ static constexpr double w0 = (w - 1.0) / w;
+ static constexpr double w1 = 1.0 / w;
+ return old_p * w0 + (!bit) * w1;
+}
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_BIT_CODERS_ADAPTIVE_RANS_BIT_CODING_SHARED_H_
diff --git a/libs/assimp/contrib/draco/src/draco/compression/bit_coders/adaptive_rans_bit_decoder.cc b/libs/assimp/contrib/draco/src/draco/compression/bit_coders/adaptive_rans_bit_decoder.cc
new file mode 100644
index 0000000..056842c
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/bit_coders/adaptive_rans_bit_decoder.cc
@@ -0,0 +1,70 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/compression/bit_coders/adaptive_rans_bit_decoder.h"
+
+#include "draco/compression/bit_coders/adaptive_rans_bit_coding_shared.h"
+
+namespace draco {
+
+AdaptiveRAnsBitDecoder::AdaptiveRAnsBitDecoder() : p0_f_(0.5) {}
+
+AdaptiveRAnsBitDecoder::~AdaptiveRAnsBitDecoder() { Clear(); }
+
+bool AdaptiveRAnsBitDecoder::StartDecoding(DecoderBuffer *source_buffer) {
+ Clear();
+
+ uint32_t size_in_bytes;
+ if (!source_buffer->Decode(&size_in_bytes)) {
+ return false;
+ }
+ if (size_in_bytes > source_buffer->remaining_size()) {
+ return false;
+ }
+ if (ans_read_init(&ans_decoder_,
+ reinterpret_cast<uint8_t *>(
+ const_cast<char *>(source_buffer->data_head())),
+ size_in_bytes) != 0) {
+ return false;
+ }
+ source_buffer->Advance(size_in_bytes);
+ return true;
+}
+
+bool AdaptiveRAnsBitDecoder::DecodeNextBit() {
+ const uint8_t p0 = clamp_probability(p0_f_);
+ const bool bit = static_cast<bool>(rabs_read(&ans_decoder_, p0));
+ p0_f_ = update_probability(p0_f_, bit);
+ return bit;
+}
+
+void AdaptiveRAnsBitDecoder::DecodeLeastSignificantBits32(int nbits,
+ uint32_t *value) {
+ DRACO_DCHECK_EQ(true, nbits <= 32);
+ DRACO_DCHECK_EQ(true, nbits > 0);
+
+ uint32_t result = 0;
+ while (nbits) {
+ result = (result << 1) + DecodeNextBit();
+ --nbits;
+ }
+ *value = result;
+}
+
+void AdaptiveRAnsBitDecoder::Clear() {
+ ans_read_end(&ans_decoder_);
+ p0_f_ = 0.5;
+}
+
+} // namespace draco
diff --git a/libs/assimp/contrib/draco/src/draco/compression/bit_coders/adaptive_rans_bit_decoder.h b/libs/assimp/contrib/draco/src/draco/compression/bit_coders/adaptive_rans_bit_decoder.h
new file mode 100644
index 0000000..a1ea011
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/bit_coders/adaptive_rans_bit_decoder.h
@@ -0,0 +1,54 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// File provides basic classes and functions for rANS bit decoding.
+#ifndef DRACO_COMPRESSION_BIT_CODERS_ADAPTIVE_RANS_BIT_DECODER_H_
+#define DRACO_COMPRESSION_BIT_CODERS_ADAPTIVE_RANS_BIT_DECODER_H_
+
+#include <vector>
+
+#include "draco/compression/entropy/ans.h"
+#include "draco/core/decoder_buffer.h"
+
+namespace draco {
+
+// Class for decoding a sequence of bits that were encoded with
+// AdaptiveRAnsBitEncoder.
+class AdaptiveRAnsBitDecoder {
+ public:
+ AdaptiveRAnsBitDecoder();
+ ~AdaptiveRAnsBitDecoder();
+
+ // Sets |source_buffer| as the buffer to decode bits from.
+ bool StartDecoding(DecoderBuffer *source_buffer);
+
+ // Decode one bit. Returns true if the bit is a 1, otherwise false.
+ bool DecodeNextBit();
+
+ // Decode the next |nbits| and return the sequence in |value|. |nbits| must be
+ // > 0 and <= 32.
+ void DecodeLeastSignificantBits32(int nbits, uint32_t *value);
+
+ void EndDecoding() {}
+
+ private:
+ void Clear();
+
+ AnsDecoder ans_decoder_;
+ double p0_f_;
+};
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_BIT_CODERS_ADAPTIVE_RANS_BIT_DECODER_H_
diff --git a/libs/assimp/contrib/draco/src/draco/compression/bit_coders/adaptive_rans_bit_encoder.cc b/libs/assimp/contrib/draco/src/draco/compression/bit_coders/adaptive_rans_bit_encoder.cc
new file mode 100644
index 0000000..5ce9dc3
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/bit_coders/adaptive_rans_bit_encoder.cc
@@ -0,0 +1,59 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/compression/bit_coders/adaptive_rans_bit_encoder.h"
+
+#include "draco/compression/bit_coders/adaptive_rans_bit_coding_shared.h"
+
+namespace draco {
+
+AdaptiveRAnsBitEncoder::AdaptiveRAnsBitEncoder() {}
+
+AdaptiveRAnsBitEncoder::~AdaptiveRAnsBitEncoder() { Clear(); }
+
+void AdaptiveRAnsBitEncoder::StartEncoding() { Clear(); }
+
+void AdaptiveRAnsBitEncoder::EndEncoding(EncoderBuffer *target_buffer) {
+ // Buffer for ans to write.
+ std::vector<uint8_t> buffer(bits_.size() + 16);
+ AnsCoder ans_coder;
+ ans_write_init(&ans_coder, buffer.data());
+
+ // Unfortunately we have to encode the bits in reversed order, while the
+ // probabilities that should be given are those of the forward sequence.
+ double p0_f = 0.5;
+ std::vector<uint8_t> p0s;
+ p0s.reserve(bits_.size());
+ for (bool b : bits_) {
+ p0s.push_back(clamp_probability(p0_f));
+ p0_f = update_probability(p0_f, b);
+ }
+ auto bit = bits_.rbegin();
+ auto pit = p0s.rbegin();
+ while (bit != bits_.rend()) {
+ rabs_write(&ans_coder, *bit, *pit);
+ ++bit;
+ ++pit;
+ }
+
+ const uint32_t size_in_bytes = ans_write_end(&ans_coder);
+ target_buffer->Encode(size_in_bytes);
+ target_buffer->Encode(buffer.data(), size_in_bytes);
+
+ Clear();
+}
+
+void AdaptiveRAnsBitEncoder::Clear() { bits_.clear(); }
+
+} // namespace draco
diff --git a/libs/assimp/contrib/draco/src/draco/compression/bit_coders/adaptive_rans_bit_encoder.h b/libs/assimp/contrib/draco/src/draco/compression/bit_coders/adaptive_rans_bit_encoder.h
new file mode 100644
index 0000000..9b18328
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/bit_coders/adaptive_rans_bit_encoder.h
@@ -0,0 +1,61 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// File provides basic classes and functions for rANS bit encoding.
+#ifndef DRACO_COMPRESSION_BIT_CODERS_ADAPTIVE_RANS_BIT_ENCODER_H_
+#define DRACO_COMPRESSION_BIT_CODERS_ADAPTIVE_RANS_BIT_ENCODER_H_
+
+#include <vector>
+
+#include "draco/compression/entropy/ans.h"
+#include "draco/core/encoder_buffer.h"
+
+namespace draco {
+
+// Class for adaptive encoding a sequence of bits using rANS.
+class AdaptiveRAnsBitEncoder {
+ public:
+ AdaptiveRAnsBitEncoder();
+ ~AdaptiveRAnsBitEncoder();
+
+ // Must be called before any Encode* function is called.
+ void StartEncoding();
+
+ // Encode one bit. If |bit| is true encode a 1, otherwise encode a 0.
+ void EncodeBit(bool bit) { bits_.push_back(bit); }
+
+ // Encode |nbits| of |value|, starting from the least significant bit.
+ // |nbits| must be > 0 and <= 32.
+ void EncodeLeastSignificantBits32(int nbits, uint32_t value) {
+ DRACO_DCHECK_EQ(true, nbits <= 32);
+ DRACO_DCHECK_EQ(true, nbits > 0);
+ uint32_t selector = (1 << (nbits - 1));
+ while (selector) {
+ EncodeBit(value & selector);
+ selector = selector >> 1;
+ }
+ }
+
+ // Ends the bit encoding and stores the result into the target_buffer.
+ void EndEncoding(EncoderBuffer *target_buffer);
+
+ private:
+ void Clear();
+
+ std::vector<bool> bits_;
+};
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_BIT_CODERS_ADAPTIVE_RANS_BIT_ENCODER_H_
diff --git a/libs/assimp/contrib/draco/src/draco/compression/bit_coders/direct_bit_decoder.cc b/libs/assimp/contrib/draco/src/draco/compression/bit_coders/direct_bit_decoder.cc
new file mode 100644
index 0000000..2abe338
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/bit_coders/direct_bit_decoder.cc
@@ -0,0 +1,54 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/compression/bit_coders/direct_bit_decoder.h"
+
+namespace draco {
+
+DirectBitDecoder::DirectBitDecoder() : pos_(bits_.end()), num_used_bits_(0) {}
+
+DirectBitDecoder::~DirectBitDecoder() { Clear(); }
+
+bool DirectBitDecoder::StartDecoding(DecoderBuffer *source_buffer) {
+ Clear();
+ uint32_t size_in_bytes;
+ if (!source_buffer->Decode(&size_in_bytes)) {
+ return false;
+ }
+
+ // Check that size_in_bytes is > 0 and a multiple of 4 as the encoder always
+ // encodes 32 bit elements.
+ if (size_in_bytes == 0 || size_in_bytes & 0x3) {
+ return false;
+ }
+ if (size_in_bytes > source_buffer->remaining_size()) {
+ return false;
+ }
+ const uint32_t num_32bit_elements = size_in_bytes / 4;
+ bits_.resize(num_32bit_elements);
+ if (!source_buffer->Decode(bits_.data(), size_in_bytes)) {
+ return false;
+ }
+ pos_ = bits_.begin();
+ num_used_bits_ = 0;
+ return true;
+}
+
+void DirectBitDecoder::Clear() {
+ bits_.clear();
+ num_used_bits_ = 0;
+ pos_ = bits_.end();
+}
+
+} // namespace draco
diff --git a/libs/assimp/contrib/draco/src/draco/compression/bit_coders/direct_bit_decoder.h b/libs/assimp/contrib/draco/src/draco/compression/bit_coders/direct_bit_decoder.h
new file mode 100644
index 0000000..b9fbc2d
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/bit_coders/direct_bit_decoder.h
@@ -0,0 +1,90 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// File provides direct encoding of bits with arithmetic encoder interface.
+#ifndef DRACO_COMPRESSION_BIT_CODERS_DIRECT_BIT_DECODER_H_
+#define DRACO_COMPRESSION_BIT_CODERS_DIRECT_BIT_DECODER_H_
+
+#include <vector>
+
+#include "draco/core/decoder_buffer.h"
+
+namespace draco {
+
+class DirectBitDecoder {
+ public:
+ DirectBitDecoder();
+ ~DirectBitDecoder();
+
+ // Sets |source_buffer| as the buffer to decode bits from.
+ bool StartDecoding(DecoderBuffer *source_buffer);
+
+ // Decode one bit. Returns true if the bit is a 1, otherwise false.
+ bool DecodeNextBit() {
+ const uint32_t selector = 1 << (31 - num_used_bits_);
+ if (pos_ == bits_.end()) {
+ return false;
+ }
+ const bool bit = *pos_ & selector;
+ ++num_used_bits_;
+ if (num_used_bits_ == 32) {
+ ++pos_;
+ num_used_bits_ = 0;
+ }
+ return bit;
+ }
+
+ // Decode the next |nbits| and return the sequence in |value|. |nbits| must be
+ // > 0 and <= 32.
+ void DecodeLeastSignificantBits32(int nbits, uint32_t *value) {
+ DRACO_DCHECK_EQ(true, nbits <= 32);
+ DRACO_DCHECK_EQ(true, nbits > 0);
+ const int remaining = 32 - num_used_bits_;
+ if (nbits <= remaining) {
+ if (pos_ == bits_.end()) {
+ *value = 0;
+ return;
+ }
+ *value = (*pos_ << num_used_bits_) >> (32 - nbits);
+ num_used_bits_ += nbits;
+ if (num_used_bits_ == 32) {
+ ++pos_;
+ num_used_bits_ = 0;
+ }
+ } else {
+ if (pos_ + 1 == bits_.end()) {
+ *value = 0;
+ return;
+ }
+ const uint32_t value_l = ((*pos_) << num_used_bits_);
+ num_used_bits_ = nbits - remaining;
+ ++pos_;
+ const uint32_t value_r = (*pos_) >> (32 - num_used_bits_);
+ *value = (value_l >> (32 - num_used_bits_ - remaining)) | value_r;
+ }
+ }
+
+ void EndDecoding() {}
+
+ private:
+ void Clear();
+
+ std::vector<uint32_t> bits_;
+ std::vector<uint32_t>::const_iterator pos_;
+ uint32_t num_used_bits_;
+};
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_BIT_CODERS_DIRECT_BIT_DECODER_H_
diff --git a/libs/assimp/contrib/draco/src/draco/compression/bit_coders/direct_bit_encoder.cc b/libs/assimp/contrib/draco/src/draco/compression/bit_coders/direct_bit_encoder.cc
new file mode 100644
index 0000000..d39143c
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/bit_coders/direct_bit_encoder.cc
@@ -0,0 +1,39 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/compression/bit_coders/direct_bit_encoder.h"
+
+namespace draco {
+
+DirectBitEncoder::DirectBitEncoder() : local_bits_(0), num_local_bits_(0) {}
+
+DirectBitEncoder::~DirectBitEncoder() { Clear(); }
+
+void DirectBitEncoder::StartEncoding() { Clear(); }
+
+void DirectBitEncoder::EndEncoding(EncoderBuffer *target_buffer) {
+ bits_.push_back(local_bits_);
+ const uint32_t size_in_byte = static_cast<uint32_t>(bits_.size()) * 4;
+ target_buffer->Encode(size_in_byte);
+ target_buffer->Encode(bits_.data(), size_in_byte);
+ Clear();
+}
+
+void DirectBitEncoder::Clear() {
+ bits_.clear();
+ local_bits_ = 0;
+ num_local_bits_ = 0;
+}
+
+} // namespace draco
diff --git a/libs/assimp/contrib/draco/src/draco/compression/bit_coders/direct_bit_encoder.h b/libs/assimp/contrib/draco/src/draco/compression/bit_coders/direct_bit_encoder.h
new file mode 100644
index 0000000..705b2ca
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/bit_coders/direct_bit_encoder.h
@@ -0,0 +1,89 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// File provides direct encoding of bits with arithmetic encoder interface.
+#ifndef DRACO_COMPRESSION_BIT_CODERS_DIRECT_BIT_ENCODER_H_
+#define DRACO_COMPRESSION_BIT_CODERS_DIRECT_BIT_ENCODER_H_
+
+#include <vector>
+
+#include "draco/core/encoder_buffer.h"
+
+namespace draco {
+
+class DirectBitEncoder {
+ public:
+ DirectBitEncoder();
+ ~DirectBitEncoder();
+
+ // Must be called before any Encode* function is called.
+ void StartEncoding();
+
+ // Encode one bit. If |bit| is true encode a 1, otherwise encode a 0.
+ void EncodeBit(bool bit) {
+ if (bit) {
+ local_bits_ |= 1 << (31 - num_local_bits_);
+ }
+ num_local_bits_++;
+ if (num_local_bits_ == 32) {
+ bits_.push_back(local_bits_);
+ num_local_bits_ = 0;
+ local_bits_ = 0;
+ }
+ }
+
+ // Encode |nbits| of |value|, starting from the least significant bit.
+ // |nbits| must be > 0 and <= 32.
+ void EncodeLeastSignificantBits32(int nbits, uint32_t value) {
+ DRACO_DCHECK_EQ(true, nbits <= 32);
+ DRACO_DCHECK_EQ(true, nbits > 0);
+
+ const int remaining = 32 - num_local_bits_;
+
+ // Make sure there are no leading bits that should not be encoded and
+ // start from here.
+ value = value << (32 - nbits);
+ if (nbits <= remaining) {
+ value = value >> num_local_bits_;
+ local_bits_ = local_bits_ | value;
+ num_local_bits_ += nbits;
+ if (num_local_bits_ == 32) {
+ bits_.push_back(local_bits_);
+ local_bits_ = 0;
+ num_local_bits_ = 0;
+ }
+ } else {
+ value = value >> (32 - nbits);
+ num_local_bits_ = nbits - remaining;
+ const uint32_t value_l = value >> num_local_bits_;
+ local_bits_ = local_bits_ | value_l;
+ bits_.push_back(local_bits_);
+ local_bits_ = value << (32 - num_local_bits_);
+ }
+ }
+
+ // Ends the bit encoding and stores the result into the target_buffer.
+ void EndEncoding(EncoderBuffer *target_buffer);
+
+ private:
+ void Clear();
+
+ std::vector<uint32_t> bits_;
+ uint32_t local_bits_;
+ uint32_t num_local_bits_;
+};
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_BIT_CODERS_DIRECT_BIT_ENCODER_H_
diff --git a/libs/assimp/contrib/draco/src/draco/compression/bit_coders/folded_integer_bit_decoder.h b/libs/assimp/contrib/draco/src/draco/compression/bit_coders/folded_integer_bit_decoder.h
new file mode 100644
index 0000000..c14058b
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/bit_coders/folded_integer_bit_decoder.h
@@ -0,0 +1,77 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// File provides direct encoding of bits with arithmetic encoder interface.
+#ifndef DRACO_COMPRESSION_BIT_CODERS_FOLDED_INTEGER_BIT_DECODER_H_
+#define DRACO_COMPRESSION_BIT_CODERS_FOLDED_INTEGER_BIT_DECODER_H_
+
+#include <vector>
+
+#include "draco/core/decoder_buffer.h"
+
+namespace draco {
+
+// See FoldedBit32Encoder for more details.
+template <class BitDecoderT>
+class FoldedBit32Decoder {
+ public:
+ FoldedBit32Decoder() {}
+ ~FoldedBit32Decoder() {}
+
+ // Sets |source_buffer| as the buffer to decode bits from.
+ bool StartDecoding(DecoderBuffer *source_buffer) {
+ for (int i = 0; i < 32; i++) {
+ if (!folded_number_decoders_[i].StartDecoding(source_buffer)) {
+ return false;
+ }
+ }
+ return bit_decoder_.StartDecoding(source_buffer);
+ }
+
+ // Decode one bit. Returns true if the bit is a 1, otherwise false.
+ bool DecodeNextBit() { return bit_decoder_.DecodeNextBit(); }
+
+ // Decode the next |nbits| and return the sequence in |value|. |nbits| must be
+ // > 0 and <= 32.
+ void DecodeLeastSignificantBits32(int nbits, uint32_t *value) {
+ uint32_t result = 0;
+ for (int i = 0; i < nbits; ++i) {
+ const bool bit = folded_number_decoders_[i].DecodeNextBit();
+ result = (result << 1) + bit;
+ }
+ *value = result;
+ }
+
+ void EndDecoding() {
+ for (int i = 0; i < 32; i++) {
+ folded_number_decoders_[i].EndDecoding();
+ }
+ bit_decoder_.EndDecoding();
+ }
+
+ private:
+ void Clear() {
+ for (int i = 0; i < 32; i++) {
+ folded_number_decoders_[i].Clear();
+ }
+ bit_decoder_.Clear();
+ }
+
+ std::array<BitDecoderT, 32> folded_number_decoders_;
+ BitDecoderT bit_decoder_;
+};
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_BIT_CODERS_FOLDED_INTEGER_BIT_DECODER_H_
diff --git a/libs/assimp/contrib/draco/src/draco/compression/bit_coders/folded_integer_bit_encoder.h b/libs/assimp/contrib/draco/src/draco/compression/bit_coders/folded_integer_bit_encoder.h
new file mode 100644
index 0000000..375b38a
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/bit_coders/folded_integer_bit_encoder.h
@@ -0,0 +1,82 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// File provides direct encoding of bits with arithmetic encoder interface.
+#ifndef DRACO_COMPRESSION_BIT_CODERS_FOLDED_INTEGER_BIT_ENCODER_H_
+#define DRACO_COMPRESSION_BIT_CODERS_FOLDED_INTEGER_BIT_ENCODER_H_
+
+#include <vector>
+
+#include "draco/core/encoder_buffer.h"
+
+namespace draco {
+
+// This coding scheme considers every bit of an (up to) 32bit integer as a
+// separate context. This can be a significant advantage when encoding numbers
+// where it is more likely that the front bits are zero.
+// The behavior is essentially the same as other arithmetic encoding schemes,
+// the only difference is that encoding and decoding of bits must be absolutely
+// symmetric, bits handed in by EncodeBit32 must be also decoded in this way.
+// This is the FoldedBit32Encoder, see also FoldedBit32Decoder.
+template <class BitEncoderT>
+class FoldedBit32Encoder {
+ public:
+ FoldedBit32Encoder() {}
+ ~FoldedBit32Encoder() {}
+
+ // Must be called before any Encode* function is called.
+ void StartEncoding() {
+ for (int i = 0; i < 32; i++) {
+ folded_number_encoders_[i].StartEncoding();
+ }
+ bit_encoder_.StartEncoding();
+ }
+
+ // Encode one bit. If |bit| is true encode a 1, otherwise encode a 0.
+ void EncodeBit(bool bit) { bit_encoder_.EncodeBit(bit); }
+
+ // Encode |nbits| of |value|, starting from the least significant bit.
+ // |nbits| must be > 0 and <= 32.
+ void EncodeLeastSignificantBits32(int nbits, uint32_t value) {
+ uint32_t selector = 1 << (nbits - 1);
+ for (int i = 0; i < nbits; i++) {
+ const bool bit = (value & selector);
+ folded_number_encoders_[i].EncodeBit(bit);
+ selector = selector >> 1;
+ }
+ }
+
+ // Ends the bit encoding and stores the result into the target_buffer.
+ void EndEncoding(EncoderBuffer *target_buffer) {
+ for (int i = 0; i < 32; i++) {
+ folded_number_encoders_[i].EndEncoding(target_buffer);
+ }
+ bit_encoder_.EndEncoding(target_buffer);
+ }
+
+ private:
+ void Clear() {
+ for (int i = 0; i < 32; i++) {
+ folded_number_encoders_[i].Clear();
+ }
+ bit_encoder_.Clear();
+ }
+
+ std::array<BitEncoderT, 32> folded_number_encoders_;
+ BitEncoderT bit_encoder_;
+};
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_BIT_CODERS_FOLDED_INTEGER_BIT_ENCODER_H_
diff --git a/libs/assimp/contrib/draco/src/draco/compression/bit_coders/rans_bit_decoder.cc b/libs/assimp/contrib/draco/src/draco/compression/bit_coders/rans_bit_decoder.cc
new file mode 100644
index 0000000..a9b8fb9
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/bit_coders/rans_bit_decoder.cc
@@ -0,0 +1,82 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/compression/bit_coders/rans_bit_decoder.h"
+
+#include "draco/compression/config/compression_shared.h"
+#include "draco/core/bit_utils.h"
+#include "draco/core/varint_decoding.h"
+
+namespace draco {
+
+RAnsBitDecoder::RAnsBitDecoder() : prob_zero_(0) {}
+
+RAnsBitDecoder::~RAnsBitDecoder() { Clear(); }
+
+bool RAnsBitDecoder::StartDecoding(DecoderBuffer *source_buffer) {
+ Clear();
+
+ if (!source_buffer->Decode(&prob_zero_)) {
+ return false;
+ }
+
+ uint32_t size_in_bytes;
+#ifdef DRACO_BACKWARDS_COMPATIBILITY_SUPPORTED
+ if (source_buffer->bitstream_version() < DRACO_BITSTREAM_VERSION(2, 2)) {
+ if (!source_buffer->Decode(&size_in_bytes)) {
+ return false;
+ }
+
+ } else
+#endif
+ {
+ if (!DecodeVarint(&size_in_bytes, source_buffer)) {
+ return false;
+ }
+ }
+
+ if (size_in_bytes > source_buffer->remaining_size()) {
+ return false;
+ }
+
+ if (ans_read_init(&ans_decoder_,
+ reinterpret_cast<uint8_t *>(
+ const_cast<char *>(source_buffer->data_head())),
+ size_in_bytes) != 0) {
+ return false;
+ }
+ source_buffer->Advance(size_in_bytes);
+ return true;
+}
+
+bool RAnsBitDecoder::DecodeNextBit() {
+ const uint8_t bit = rabs_read(&ans_decoder_, prob_zero_);
+ return bit > 0;
+}
+
+void RAnsBitDecoder::DecodeLeastSignificantBits32(int nbits, uint32_t *value) {
+ DRACO_DCHECK_EQ(true, nbits <= 32);
+ DRACO_DCHECK_EQ(true, nbits > 0);
+
+ uint32_t result = 0;
+ while (nbits) {
+ result = (result << 1) + DecodeNextBit();
+ --nbits;
+ }
+ *value = result;
+}
+
+void RAnsBitDecoder::Clear() { ans_read_end(&ans_decoder_); }
+
+} // namespace draco
diff --git a/libs/assimp/contrib/draco/src/draco/compression/bit_coders/rans_bit_decoder.h b/libs/assimp/contrib/draco/src/draco/compression/bit_coders/rans_bit_decoder.h
new file mode 100644
index 0000000..25d243e
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/bit_coders/rans_bit_decoder.h
@@ -0,0 +1,55 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// File provides basic classes and functions for rANS coding.
+#ifndef DRACO_COMPRESSION_BIT_CODERS_RANS_BIT_DECODER_H_
+#define DRACO_COMPRESSION_BIT_CODERS_RANS_BIT_DECODER_H_
+
+#include <vector>
+
+#include "draco/compression/entropy/ans.h"
+#include "draco/core/decoder_buffer.h"
+#include "draco/draco_features.h"
+
+namespace draco {
+
+// Class for decoding a sequence of bits that were encoded with RAnsBitEncoder.
+class RAnsBitDecoder {
+ public:
+ RAnsBitDecoder();
+ ~RAnsBitDecoder();
+
+ // Sets |source_buffer| as the buffer to decode bits from.
+ // Returns false when the data is invalid.
+ bool StartDecoding(DecoderBuffer *source_buffer);
+
+ // Decode one bit. Returns true if the bit is a 1, otherwise false.
+ bool DecodeNextBit();
+
+ // Decode the next |nbits| and return the sequence in |value|. |nbits| must be
+ // > 0 and <= 32.
+ void DecodeLeastSignificantBits32(int nbits, uint32_t *value);
+
+ void EndDecoding() {}
+
+ private:
+ void Clear();
+
+ AnsDecoder ans_decoder_;
+ uint8_t prob_zero_;
+};
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_BIT_CODERS_RANS_BIT_DECODER_H_
diff --git a/libs/assimp/contrib/draco/src/draco/compression/bit_coders/rans_bit_encoder.cc b/libs/assimp/contrib/draco/src/draco/compression/bit_coders/rans_bit_encoder.cc
new file mode 100644
index 0000000..8d00ea3
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/bit_coders/rans_bit_encoder.cc
@@ -0,0 +1,125 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/compression/bit_coders/rans_bit_encoder.h"
+
+#include "draco/compression/entropy/ans.h"
+#include "draco/core/bit_utils.h"
+#include "draco/core/varint_encoding.h"
+
+namespace draco {
+
+RAnsBitEncoder::RAnsBitEncoder() : local_bits_(0), num_local_bits_(0) {}
+
+RAnsBitEncoder::~RAnsBitEncoder() { Clear(); }
+
+void RAnsBitEncoder::StartEncoding() { Clear(); }
+
+void RAnsBitEncoder::EncodeBit(bool bit) {
+ if (bit) {
+ bit_counts_[1]++;
+ local_bits_ |= 1 << num_local_bits_;
+ } else {
+ bit_counts_[0]++;
+ }
+ num_local_bits_++;
+
+ if (num_local_bits_ == 32) {
+ bits_.push_back(local_bits_);
+ num_local_bits_ = 0;
+ local_bits_ = 0;
+ }
+}
+
+void RAnsBitEncoder::EncodeLeastSignificantBits32(int nbits, uint32_t value) {
+ DRACO_DCHECK_EQ(true, nbits <= 32);
+ DRACO_DCHECK_EQ(true, nbits > 0);
+
+ const uint32_t reversed = ReverseBits32(value) >> (32 - nbits);
+ const int ones = CountOneBits32(reversed);
+ bit_counts_[0] += (nbits - ones);
+ bit_counts_[1] += ones;
+
+ const int remaining = 32 - num_local_bits_;
+
+ if (nbits <= remaining) {
+ CopyBits32(&local_bits_, num_local_bits_, reversed, 0, nbits);
+ num_local_bits_ += nbits;
+ if (num_local_bits_ == 32) {
+ bits_.push_back(local_bits_);
+ local_bits_ = 0;
+ num_local_bits_ = 0;
+ }
+ } else {
+ CopyBits32(&local_bits_, num_local_bits_, reversed, 0, remaining);
+ bits_.push_back(local_bits_);
+ local_bits_ = 0;
+ CopyBits32(&local_bits_, 0, reversed, remaining, nbits - remaining);
+ num_local_bits_ = nbits - remaining;
+ }
+}
+
+void RAnsBitEncoder::EndEncoding(EncoderBuffer *target_buffer) {
+ uint64_t total = bit_counts_[1] + bit_counts_[0];
+ if (total == 0) {
+ total++;
+ }
+
+ // The probability interval [0,1] is mapped to values of [0, 256]. However,
+ // the coding scheme can not deal with probabilities of 0 or 1, which is why
+ // we must clamp the values to interval [1, 255]. Specifically 128
+ // corresponds to 0.5 exactly. And the value can be given as uint8_t.
+ const uint32_t zero_prob_raw = static_cast<uint32_t>(
+ ((bit_counts_[0] / static_cast<double>(total)) * 256.0) + 0.5);
+
+ uint8_t zero_prob = 255;
+ if (zero_prob_raw < 255) {
+ zero_prob = static_cast<uint8_t>(zero_prob_raw);
+ }
+
+ zero_prob += (zero_prob == 0);
+
+ // Space for 32 bit integer and some extra space.
+ std::vector<uint8_t> buffer((bits_.size() + 8) * 8);
+ AnsCoder ans_coder;
+ ans_write_init(&ans_coder, buffer.data());
+
+ for (int i = num_local_bits_ - 1; i >= 0; --i) {
+ const uint8_t bit = (local_bits_ >> i) & 1;
+ rabs_write(&ans_coder, bit, zero_prob);
+ }
+ for (auto it = bits_.rbegin(); it != bits_.rend(); ++it) {
+ const uint32_t bits = *it;
+ for (int i = 31; i >= 0; --i) {
+ const uint8_t bit = (bits >> i) & 1;
+ rabs_write(&ans_coder, bit, zero_prob);
+ }
+ }
+
+ const int size_in_bytes = ans_write_end(&ans_coder);
+ target_buffer->Encode(zero_prob);
+ EncodeVarint(static_cast<uint32_t>(size_in_bytes), target_buffer);
+ target_buffer->Encode(buffer.data(), size_in_bytes);
+
+ Clear();
+}
+
+void RAnsBitEncoder::Clear() {
+ bit_counts_.assign(2, 0);
+ bits_.clear();
+ local_bits_ = 0;
+ num_local_bits_ = 0;
+}
+
+} // namespace draco
diff --git a/libs/assimp/contrib/draco/src/draco/compression/bit_coders/rans_bit_encoder.h b/libs/assimp/contrib/draco/src/draco/compression/bit_coders/rans_bit_encoder.h
new file mode 100644
index 0000000..1993dd3
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/bit_coders/rans_bit_encoder.h
@@ -0,0 +1,57 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// File provides basic classes and functions for rANS coding.
+#ifndef DRACO_COMPRESSION_BIT_CODERS_RANS_BIT_ENCODER_H_
+#define DRACO_COMPRESSION_BIT_CODERS_RANS_BIT_ENCODER_H_
+
+#include <vector>
+
+#include "draco/core/encoder_buffer.h"
+
+namespace draco {
+
+// Class for encoding a sequence of bits using rANS. The probability table used
+// to encode the bits is based off the total counts of bits.
+// TODO(fgalligan): Investigate using an adaptive table for more compression.
+class RAnsBitEncoder {
+ public:
+ RAnsBitEncoder();
+ ~RAnsBitEncoder();
+
+ // Must be called before any Encode* function is called.
+ void StartEncoding();
+
+ // Encode one bit. If |bit| is true encode a 1, otherwise encode a 0.
+ void EncodeBit(bool bit);
+
+ // Encode |nbits| of |value|, starting from the least significant bit.
+ // |nbits| must be > 0 and <= 32.
+ void EncodeLeastSignificantBits32(int nbits, uint32_t value);
+
+ // Ends the bit encoding and stores the result into the target_buffer.
+ void EndEncoding(EncoderBuffer *target_buffer);
+
+ private:
+ void Clear();
+
+ std::vector<uint64_t> bit_counts_;
+ std::vector<uint32_t> bits_;
+ uint32_t local_bits_;
+ uint32_t num_local_bits_;
+};
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_BIT_CODERS_RANS_BIT_ENCODER_H_
diff --git a/libs/assimp/contrib/draco/src/draco/compression/bit_coders/rans_coding_test.cc b/libs/assimp/contrib/draco/src/draco/compression/bit_coders/rans_coding_test.cc
new file mode 100644
index 0000000..9509ad9
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/bit_coders/rans_coding_test.cc
@@ -0,0 +1,9 @@
+#include "draco/compression/bit_coders/adaptive_rans_bit_decoder.h"
+#include "draco/compression/bit_coders/adaptive_rans_bit_encoder.h"
+#include "draco/compression/bit_coders/rans_bit_decoder.h"
+#include "draco/compression/bit_coders/rans_bit_encoder.h"
+#include "draco/core/draco_test_base.h"
+
+// Just including rans_coding.h and adaptive_rans_coding.h gets an asan error
+// when compiling (blaze test :rans_coding_test --config=asan)
+TEST(RansCodingTest, LinkerTest) {}
diff --git a/libs/assimp/contrib/draco/src/draco/compression/bit_coders/symbol_bit_decoder.cc b/libs/assimp/contrib/draco/src/draco/compression/bit_coders/symbol_bit_decoder.cc
new file mode 100644
index 0000000..8ed50ef
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/bit_coders/symbol_bit_decoder.cc
@@ -0,0 +1,49 @@
+#include "draco/compression/bit_coders/symbol_bit_decoder.h"
+
+#include "draco/compression/entropy/symbol_decoding.h"
+
+namespace draco {
+
+bool SymbolBitDecoder::StartDecoding(DecoderBuffer *source_buffer) {
+ uint32_t size;
+ if (!source_buffer->Decode(&size)) {
+ return false;
+ }
+
+ symbols_.resize(size);
+ if (!DecodeSymbols(size, 1, source_buffer, symbols_.data())) {
+ return false;
+ }
+ std::reverse(symbols_.begin(), symbols_.end());
+ return true;
+}
+
+bool SymbolBitDecoder::DecodeNextBit() {
+ uint32_t symbol;
+ DecodeLeastSignificantBits32(1, &symbol);
+ DRACO_DCHECK(symbol == 0 || symbol == 1);
+ return symbol == 1;
+}
+
+void SymbolBitDecoder::DecodeLeastSignificantBits32(int nbits,
+ uint32_t *value) {
+ DRACO_DCHECK_LE(1, nbits);
+ DRACO_DCHECK_LE(nbits, 32);
+ DRACO_DCHECK_NE(value, nullptr);
+ // Testing: check to make sure there is something to decode.
+ DRACO_DCHECK_GT(symbols_.size(), 0);
+
+ (*value) = symbols_.back();
+ symbols_.pop_back();
+
+ const int discarded_bits = 32 - nbits;
+ (*value) <<= discarded_bits;
+ (*value) >>= discarded_bits;
+}
+
+void SymbolBitDecoder::Clear() {
+ symbols_.clear();
+ symbols_.shrink_to_fit();
+}
+
+} // namespace draco
diff --git a/libs/assimp/contrib/draco/src/draco/compression/bit_coders/symbol_bit_decoder.h b/libs/assimp/contrib/draco/src/draco/compression/bit_coders/symbol_bit_decoder.h
new file mode 100644
index 0000000..909d717
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/bit_coders/symbol_bit_decoder.h
@@ -0,0 +1,36 @@
+#ifndef DRACO_COMPRESSION_BIT_CODERS_SYMBOL_BIT_DECODER_H_
+#define DRACO_COMPRESSION_BIT_CODERS_SYMBOL_BIT_DECODER_H_
+
+#include <algorithm>
+#include <vector>
+
+#include "draco/core/decoder_buffer.h"
+
+namespace draco {
+
+// Class for decoding bits using the symbol entropy encoding. Wraps
+// |DecodeSymbols|. Note that this uses a symbol-based encoding scheme for
+// encoding bits.
+class SymbolBitDecoder {
+ public:
+ // Sets |source_buffer| as the buffer to decode bits from.
+ bool StartDecoding(DecoderBuffer *source_buffer);
+
+ // Decode one bit. Returns true if the bit is a 1, otherwise false.
+ bool DecodeNextBit();
+
+ // Decode the next |nbits| and return the sequence in |value|. |nbits| must be
+ // > 0 and <= 32.
+ void DecodeLeastSignificantBits32(int nbits, uint32_t *value);
+
+ void EndDecoding() { Clear(); }
+
+ private:
+ void Clear();
+
+ std::vector<uint32_t> symbols_;
+};
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_BIT_CODERS_SYMBOL_BIT_DECODER_H_
diff --git a/libs/assimp/contrib/draco/src/draco/compression/bit_coders/symbol_bit_encoder.cc b/libs/assimp/contrib/draco/src/draco/compression/bit_coders/symbol_bit_encoder.cc
new file mode 100644
index 0000000..8383423
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/bit_coders/symbol_bit_encoder.cc
@@ -0,0 +1,30 @@
+#include "draco/compression/bit_coders/symbol_bit_encoder.h"
+
+#include "draco/compression/entropy/symbol_encoding.h"
+
+namespace draco {
+
+void SymbolBitEncoder::EncodeLeastSignificantBits32(int nbits, uint32_t value) {
+ DRACO_DCHECK_LE(1, nbits);
+ DRACO_DCHECK_LE(nbits, 32);
+
+ const int discarded_bits = 32 - nbits;
+ value <<= discarded_bits;
+ value >>= discarded_bits;
+
+ symbols_.push_back(value);
+}
+
+void SymbolBitEncoder::EndEncoding(EncoderBuffer *target_buffer) {
+ target_buffer->Encode(static_cast<uint32_t>(symbols_.size()));
+ EncodeSymbols(symbols_.data(), static_cast<int>(symbols_.size()), 1, nullptr,
+ target_buffer);
+ Clear();
+}
+
+void SymbolBitEncoder::Clear() {
+ symbols_.clear();
+ symbols_.shrink_to_fit();
+}
+
+} // namespace draco
diff --git a/libs/assimp/contrib/draco/src/draco/compression/bit_coders/symbol_bit_encoder.h b/libs/assimp/contrib/draco/src/draco/compression/bit_coders/symbol_bit_encoder.h
new file mode 100644
index 0000000..7f1570c
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/bit_coders/symbol_bit_encoder.h
@@ -0,0 +1,36 @@
+#ifndef DRACO_COMPRESSION_BIT_CODERS_SYMBOL_BIT_ENCODER_H_
+#define DRACO_COMPRESSION_BIT_CODERS_SYMBOL_BIT_ENCODER_H_
+
+#include <algorithm>
+#include <vector>
+
+#include "draco/core/encoder_buffer.h"
+
+namespace draco {
+
+// Class for encoding bits using the symbol entropy encoding. Wraps
+// |EncodeSymbols|. Note that this uses a symbol-based encoding scheme for
+// encoding bits.
+class SymbolBitEncoder {
+ public:
+ // Must be called before any Encode* function is called.
+ void StartEncoding() { Clear(); }
+
+ // Encode one bit. If |bit| is true encode a 1, otherwise encode a 0.
+ void EncodeBit(bool bit) { EncodeLeastSignificantBits32(1, bit ? 1 : 0); }
+
+ // Encode |nbits| LSBs of |value| as a symbol. |nbits| must be > 0 and <= 32.
+ void EncodeLeastSignificantBits32(int nbits, uint32_t value);
+
+ // Ends the bit encoding and stores the result into the target_buffer.
+ void EndEncoding(EncoderBuffer *target_buffer);
+
+ private:
+ void Clear();
+
+ std::vector<uint32_t> symbols_;
+};
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_BIT_CODERS_SYMBOL_BIT_ENCODER_H_
diff --git a/libs/assimp/contrib/draco/src/draco/compression/config/compression_shared.h b/libs/assimp/contrib/draco/src/draco/compression/config/compression_shared.h
new file mode 100644
index 0000000..c43f303
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/config/compression_shared.h
@@ -0,0 +1,155 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_COMPRESSION_CONFIG_COMPRESSION_SHARED_H_
+#define DRACO_COMPRESSION_CONFIG_COMPRESSION_SHARED_H_
+
+#include <stdint.h>
+
+#include "draco/core/macros.h"
+#include "draco/draco_features.h"
+
+namespace draco {
+
+// Latest Draco bit-stream version.
+static constexpr uint8_t kDracoPointCloudBitstreamVersionMajor = 2;
+static constexpr uint8_t kDracoPointCloudBitstreamVersionMinor = 3;
+static constexpr uint8_t kDracoMeshBitstreamVersionMajor = 2;
+static constexpr uint8_t kDracoMeshBitstreamVersionMinor = 2;
+
+// Concatenated latest bit-stream version.
+static constexpr uint16_t kDracoPointCloudBitstreamVersion =
+ DRACO_BITSTREAM_VERSION(kDracoPointCloudBitstreamVersionMajor,
+ kDracoPointCloudBitstreamVersionMinor);
+
+static constexpr uint16_t kDracoMeshBitstreamVersion = DRACO_BITSTREAM_VERSION(
+ kDracoMeshBitstreamVersionMajor, kDracoMeshBitstreamVersionMinor);
+
+// Currently, we support point cloud and triangular mesh encoding.
+// TODO(draco-eng) Convert enum to enum class (safety, not performance).
+enum EncodedGeometryType {
+ INVALID_GEOMETRY_TYPE = -1,
+ POINT_CLOUD = 0,
+ TRIANGULAR_MESH,
+ NUM_ENCODED_GEOMETRY_TYPES
+};
+
+// List of encoding methods for point clouds.
+enum PointCloudEncodingMethod {
+ POINT_CLOUD_SEQUENTIAL_ENCODING = 0,
+ POINT_CLOUD_KD_TREE_ENCODING
+};
+
+// List of encoding methods for meshes.
+enum MeshEncoderMethod {
+ MESH_SEQUENTIAL_ENCODING = 0,
+ MESH_EDGEBREAKER_ENCODING,
+};
+
+// List of various attribute encoders supported by our framework. The entries
+// are used as unique identifiers of the encoders and their values should not
+// be changed!
+enum AttributeEncoderType {
+ BASIC_ATTRIBUTE_ENCODER = 0,
+ MESH_TRAVERSAL_ATTRIBUTE_ENCODER,
+ KD_TREE_ATTRIBUTE_ENCODER,
+};
+
+// List of various sequential attribute encoder/decoders that can be used in our
+// pipeline. The values represent unique identifiers used by the decoder and
+// they should not be changed.
+enum SequentialAttributeEncoderType {
+ SEQUENTIAL_ATTRIBUTE_ENCODER_GENERIC = 0,
+ SEQUENTIAL_ATTRIBUTE_ENCODER_INTEGER,
+ SEQUENTIAL_ATTRIBUTE_ENCODER_QUANTIZATION,
+ SEQUENTIAL_ATTRIBUTE_ENCODER_NORMALS,
+};
+
+// List of all prediction methods currently supported by our framework.
+enum PredictionSchemeMethod {
+ // Special value indicating that no prediction scheme was used.
+ PREDICTION_NONE = -2,
+ // Used when no specific prediction scheme is required.
+ PREDICTION_UNDEFINED = -1,
+ PREDICTION_DIFFERENCE = 0,
+ MESH_PREDICTION_PARALLELOGRAM = 1,
+ MESH_PREDICTION_MULTI_PARALLELOGRAM = 2,
+ MESH_PREDICTION_TEX_COORDS_DEPRECATED = 3,
+ MESH_PREDICTION_CONSTRAINED_MULTI_PARALLELOGRAM = 4,
+ MESH_PREDICTION_TEX_COORDS_PORTABLE = 5,
+ MESH_PREDICTION_GEOMETRIC_NORMAL = 6,
+ NUM_PREDICTION_SCHEMES
+};
+
+// List of all prediction scheme transforms used by our framework.
+enum PredictionSchemeTransformType {
+ PREDICTION_TRANSFORM_NONE = -1,
+ // Basic delta transform where the prediction is computed as difference the
+ // predicted and original value.
+ PREDICTION_TRANSFORM_DELTA = 0,
+ // An improved delta transform where all computed delta values are wrapped
+ // around a fixed interval which lowers the entropy.
+ PREDICTION_TRANSFORM_WRAP = 1,
+ // Specialized transform for normal coordinates using inverted tiles.
+ PREDICTION_TRANSFORM_NORMAL_OCTAHEDRON = 2,
+ // Specialized transform for normal coordinates using canonicalized inverted
+ // tiles.
+ PREDICTION_TRANSFORM_NORMAL_OCTAHEDRON_CANONICALIZED = 3,
+ // The number of valid (non-negative) prediction scheme transform types.
+ NUM_PREDICTION_SCHEME_TRANSFORM_TYPES
+};
+
+// List of all mesh traversal methods supported by Draco framework.
+enum MeshTraversalMethod {
+ MESH_TRAVERSAL_DEPTH_FIRST = 0,
+ MESH_TRAVERSAL_PREDICTION_DEGREE = 1,
+ NUM_TRAVERSAL_METHODS
+};
+
+// List of all variant of the edgebreaker method that is used for compression
+// of mesh connectivity.
+enum MeshEdgebreakerConnectivityEncodingMethod {
+ MESH_EDGEBREAKER_STANDARD_ENCODING = 0,
+ MESH_EDGEBREAKER_PREDICTIVE_ENCODING = 1, // Deprecated.
+ MESH_EDGEBREAKER_VALENCE_ENCODING = 2,
+};
+
+// Draco header V1
+struct DracoHeader {
+ int8_t draco_string[5];
+ uint8_t version_major;
+ uint8_t version_minor;
+ uint8_t encoder_type;
+ uint8_t encoder_method;
+ uint16_t flags;
+};
+
+enum NormalPredictionMode {
+ ONE_TRIANGLE = 0, // To be deprecated.
+ TRIANGLE_AREA = 1,
+};
+
+// Different methods used for symbol entropy encoding.
+enum SymbolCodingMethod {
+ SYMBOL_CODING_TAGGED = 0,
+ SYMBOL_CODING_RAW = 1,
+ NUM_SYMBOL_CODING_METHODS,
+};
+
+// Mask for setting and getting the bit for metadata in |flags| of header.
+#define METADATA_FLAG_MASK 0x8000
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_CONFIG_COMPRESSION_SHARED_H_
diff --git a/libs/assimp/contrib/draco/src/draco/compression/config/decoder_options.h b/libs/assimp/contrib/draco/src/draco/compression/config/decoder_options.h
new file mode 100644
index 0000000..3b38899
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/config/decoder_options.h
@@ -0,0 +1,34 @@
+// Copyright 2017 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_COMPRESSION_CONFIG_DECODER_OPTIONS_H_
+#define DRACO_COMPRESSION_CONFIG_DECODER_OPTIONS_H_
+
+#include <map>
+#include <memory>
+
+#include "draco/attributes/geometry_attribute.h"
+#include "draco/compression/config/draco_options.h"
+
+namespace draco {
+
+// Class containing options that can be passed to PointCloudDecoder to control
+// decoding of the input geometry. The options can be specified either for the
+// whole geometry or for a specific attribute type. Each option is identified
+// by a unique name stored as an std::string.
+typedef DracoOptions<GeometryAttribute::Type> DecoderOptions;
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_CONFIG_DECODER_OPTIONS_H_
diff --git a/libs/assimp/contrib/draco/src/draco/compression/config/decoder_options_test.cc b/libs/assimp/contrib/draco/src/draco/compression/config/decoder_options_test.cc
new file mode 100644
index 0000000..a5cd7f1
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/config/decoder_options_test.cc
@@ -0,0 +1,67 @@
+// Copyright 2017 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/compression/config/decoder_options.h"
+
+#include "draco/core/draco_test_base.h"
+
+namespace {
+
+class DecoderOptionsTest : public ::testing::Test {
+ protected:
+ DecoderOptionsTest() {}
+};
+
+TEST_F(DecoderOptionsTest, TestOptions) {
+ // This test verifies that we can update global and attribute options of the
+ // DecoderOptions class instance.
+ draco::DecoderOptions options;
+ options.SetGlobalInt("test", 3);
+ ASSERT_EQ(options.GetGlobalInt("test", -1), 3);
+
+ options.SetAttributeInt(draco::GeometryAttribute::POSITION, "test", 1);
+ options.SetAttributeInt(draco::GeometryAttribute::GENERIC, "test", 2);
+ ASSERT_EQ(
+ options.GetAttributeInt(draco::GeometryAttribute::TEX_COORD, "test", -1),
+ 3);
+ ASSERT_EQ(
+ options.GetAttributeInt(draco::GeometryAttribute::POSITION, "test", -1),
+ 1);
+ ASSERT_EQ(
+ options.GetAttributeInt(draco::GeometryAttribute::GENERIC, "test", -1),
+ 2);
+}
+
+TEST_F(DecoderOptionsTest, TestAttributeOptionsAccessors) {
+ // This test verifies that we can query options stored in DecoderOptions
+ // class instance.
+ draco::DecoderOptions options;
+ options.SetGlobalInt("test", 1);
+ options.SetAttributeInt(draco::GeometryAttribute::POSITION, "test", 2);
+ options.SetAttributeInt(draco::GeometryAttribute::TEX_COORD, "test", 3);
+
+ ASSERT_EQ(
+ options.GetAttributeInt(draco::GeometryAttribute::POSITION, "test", -1),
+ 2);
+ ASSERT_EQ(
+ options.GetAttributeInt(draco::GeometryAttribute::POSITION, "test2", -1),
+ -1);
+ ASSERT_EQ(
+ options.GetAttributeInt(draco::GeometryAttribute::TEX_COORD, "test", -1),
+ 3);
+ ASSERT_EQ(
+ options.GetAttributeInt(draco::GeometryAttribute::NORMAL, "test", -1), 1);
+}
+
+} // namespace
diff --git a/libs/assimp/contrib/draco/src/draco/compression/config/draco_options.h b/libs/assimp/contrib/draco/src/draco/compression/config/draco_options.h
new file mode 100644
index 0000000..2bd4a3b
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/config/draco_options.h
@@ -0,0 +1,249 @@
+// Copyright 2017 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_COMPRESSION_CONFIG_DRACO_OPTIONS_H_
+#define DRACO_COMPRESSION_CONFIG_DRACO_OPTIONS_H_
+
+#include <map>
+#include <memory>
+
+#include "draco/core/options.h"
+
+namespace draco {
+
+// Base option class used to control encoding and decoding. The geometry coding
+// can be controlled through the following options:
+// 1. Global options - Options specific to overall geometry or options common
+// for all attributes
+// 2. Per attribute options - Options specific to a given attribute.
+// Each attribute is identified by the template
+// argument AttributeKeyT that can be for example
+// the attribute type or the attribute id.
+//
+// Example:
+//
+// DracoOptions<AttributeKey> options;
+//
+// // Set an option common for all attributes.
+// options.SetGlobalInt("some_option_name", 2);
+//
+// // Geometry with two attributes.
+// AttributeKey att_key0 = in_key0;
+// AttributeKey att_key1 = in_key1;
+//
+// options.SetAttributeInt(att_key0, "some_option_name", 3);
+//
+// options.GetAttributeInt(att_key0, "some_option_name"); // Returns 3
+// options.GetAttributeInt(att_key1, "some_option_name"); // Returns 2
+// options.GetGlobalInt("some_option_name"); // Returns 2
+//
+template <typename AttributeKeyT>
+class DracoOptions {
+ public:
+ typedef AttributeKeyT AttributeKey;
+
+ // Get an option for a specific attribute key. If the option is not found in
+ // an attribute specific storage, the implementation will return a global
+ // option of the given name (if available). If the option is not found, the
+ // provided default value |default_val| is returned instead.
+ int GetAttributeInt(const AttributeKey &att_key, const std::string &name,
+ int default_val) const;
+
+ // Sets an option for a specific attribute key.
+ void SetAttributeInt(const AttributeKey &att_key, const std::string &name,
+ int val);
+
+ float GetAttributeFloat(const AttributeKey &att_key, const std::string &name,
+ float default_val) const;
+ void SetAttributeFloat(const AttributeKey &att_key, const std::string &name,
+ float val);
+ bool GetAttributeBool(const AttributeKey &att_key, const std::string &name,
+ bool default_val) const;
+ void SetAttributeBool(const AttributeKey &att_key, const std::string &name,
+ bool val);
+ template <typename DataTypeT>
+ bool GetAttributeVector(const AttributeKey &att_key, const std::string &name,
+ int num_dims, DataTypeT *val) const;
+ template <typename DataTypeT>
+ void SetAttributeVector(const AttributeKey &att_key, const std::string &name,
+ int num_dims, const DataTypeT *val);
+
+ bool IsAttributeOptionSet(const AttributeKey &att_key,
+ const std::string &name) const;
+
+ // Gets/sets a global option that is not specific to any attribute.
+ int GetGlobalInt(const std::string &name, int default_val) const {
+ return global_options_.GetInt(name, default_val);
+ }
+ void SetGlobalInt(const std::string &name, int val) {
+ global_options_.SetInt(name, val);
+ }
+ float GetGlobalFloat(const std::string &name, float default_val) const {
+ return global_options_.GetFloat(name, default_val);
+ }
+ void SetGlobalFloat(const std::string &name, float val) {
+ global_options_.SetFloat(name, val);
+ }
+ bool GetGlobalBool(const std::string &name, bool default_val) const {
+ return global_options_.GetBool(name, default_val);
+ }
+ void SetGlobalBool(const std::string &name, bool val) {
+ global_options_.SetBool(name, val);
+ }
+ template <typename DataTypeT>
+ bool GetGlobalVector(const std::string &name, int num_dims,
+ DataTypeT *val) const {
+ return global_options_.GetVector(name, num_dims, val);
+ }
+ template <typename DataTypeT>
+ void SetGlobalVector(const std::string &name, int num_dims,
+ const DataTypeT *val) {
+ global_options_.SetVector(name, val, num_dims);
+ }
+ bool IsGlobalOptionSet(const std::string &name) const {
+ return global_options_.IsOptionSet(name);
+ }
+
+ // Sets or replaces attribute options with the provided |options|.
+ void SetAttributeOptions(const AttributeKey &att_key, const Options &options);
+ void SetGlobalOptions(const Options &options) { global_options_ = options; }
+
+ // Returns |Options| instance for the specified options class if it exists.
+ const Options *FindAttributeOptions(const AttributeKeyT &att_key) const;
+ const Options &GetGlobalOptions() const { return global_options_; }
+
+ private:
+ Options *GetAttributeOptions(const AttributeKeyT &att_key);
+
+ Options global_options_;
+
+ // Storage for options related to geometry attributes.
+ std::map<AttributeKey, Options> attribute_options_;
+};
+
+template <typename AttributeKeyT>
+const Options *DracoOptions<AttributeKeyT>::FindAttributeOptions(
+ const AttributeKeyT &att_key) const {
+ auto it = attribute_options_.find(att_key);
+ if (it == attribute_options_.end()) {
+ return nullptr;
+ }
+ return &it->second;
+}
+
+template <typename AttributeKeyT>
+Options *DracoOptions<AttributeKeyT>::GetAttributeOptions(
+ const AttributeKeyT &att_key) {
+ auto it = attribute_options_.find(att_key);
+ if (it != attribute_options_.end()) {
+ return &it->second;
+ }
+ Options new_options;
+ it = attribute_options_.insert(std::make_pair(att_key, new_options)).first;
+ return &it->second;
+}
+
+template <typename AttributeKeyT>
+int DracoOptions<AttributeKeyT>::GetAttributeInt(const AttributeKeyT &att_key,
+ const std::string &name,
+ int default_val) const {
+ const Options *const att_options = FindAttributeOptions(att_key);
+ if (att_options && att_options->IsOptionSet(name)) {
+ return att_options->GetInt(name, default_val);
+ }
+ return global_options_.GetInt(name, default_val);
+}
+
+template <typename AttributeKeyT>
+void DracoOptions<AttributeKeyT>::SetAttributeInt(const AttributeKeyT &att_key,
+ const std::string &name,
+ int val) {
+ GetAttributeOptions(att_key)->SetInt(name, val);
+}
+
+template <typename AttributeKeyT>
+float DracoOptions<AttributeKeyT>::GetAttributeFloat(
+ const AttributeKeyT &att_key, const std::string &name,
+ float default_val) const {
+ const Options *const att_options = FindAttributeOptions(att_key);
+ if (att_options && att_options->IsOptionSet(name)) {
+ return att_options->GetFloat(name, default_val);
+ }
+ return global_options_.GetFloat(name, default_val);
+}
+
+template <typename AttributeKeyT>
+void DracoOptions<AttributeKeyT>::SetAttributeFloat(
+ const AttributeKeyT &att_key, const std::string &name, float val) {
+ GetAttributeOptions(att_key)->SetFloat(name, val);
+}
+
+template <typename AttributeKeyT>
+bool DracoOptions<AttributeKeyT>::GetAttributeBool(const AttributeKeyT &att_key,
+ const std::string &name,
+ bool default_val) const {
+ const Options *const att_options = FindAttributeOptions(att_key);
+ if (att_options && att_options->IsOptionSet(name)) {
+ return att_options->GetBool(name, default_val);
+ }
+ return global_options_.GetBool(name, default_val);
+}
+
+template <typename AttributeKeyT>
+void DracoOptions<AttributeKeyT>::SetAttributeBool(const AttributeKeyT &att_key,
+ const std::string &name,
+ bool val) {
+ GetAttributeOptions(att_key)->SetBool(name, val);
+}
+
+template <typename AttributeKeyT>
+template <typename DataTypeT>
+bool DracoOptions<AttributeKeyT>::GetAttributeVector(
+ const AttributeKey &att_key, const std::string &name, int num_dims,
+ DataTypeT *val) const {
+ const Options *const att_options = FindAttributeOptions(att_key);
+ if (att_options && att_options->IsOptionSet(name)) {
+ return att_options->GetVector(name, num_dims, val);
+ }
+ return global_options_.GetVector(name, num_dims, val);
+}
+
+template <typename AttributeKeyT>
+template <typename DataTypeT>
+void DracoOptions<AttributeKeyT>::SetAttributeVector(
+ const AttributeKey &att_key, const std::string &name, int num_dims,
+ const DataTypeT *val) {
+ GetAttributeOptions(att_key)->SetVector(name, val, num_dims);
+}
+
+template <typename AttributeKeyT>
+bool DracoOptions<AttributeKeyT>::IsAttributeOptionSet(
+ const AttributeKey &att_key, const std::string &name) const {
+ const Options *const att_options = FindAttributeOptions(att_key);
+ if (att_options) {
+ return att_options->IsOptionSet(name);
+ }
+ return global_options_.IsOptionSet(name);
+}
+
+template <typename AttributeKeyT>
+void DracoOptions<AttributeKeyT>::SetAttributeOptions(
+ const AttributeKey &att_key, const Options &options) {
+ Options *att_options = GetAttributeOptions(att_key);
+ *att_options = options;
+}
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_CONFIG_DRACO_OPTIONS_H_
diff --git a/libs/assimp/contrib/draco/src/draco/compression/config/encoder_options.h b/libs/assimp/contrib/draco/src/draco/compression/config/encoder_options.h
new file mode 100644
index 0000000..ed1b020
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/config/encoder_options.h
@@ -0,0 +1,97 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_COMPRESSION_CONFIG_ENCODER_OPTIONS_H_
+#define DRACO_COMPRESSION_CONFIG_ENCODER_OPTIONS_H_
+
+#include "draco/attributes/geometry_attribute.h"
+#include "draco/compression/config/draco_options.h"
+#include "draco/compression/config/encoding_features.h"
+#include "draco/draco_features.h"
+
+namespace draco {
+
+// EncoderOptions allow users to specify so called feature options that are used
+// to inform the encoder which encoding features can be used (i.e. which
+// features are going to be available to the decoder).
+template <typename AttributeKeyT>
+class EncoderOptionsBase : public DracoOptions<AttributeKeyT> {
+ public:
+ static EncoderOptionsBase CreateDefaultOptions() {
+ EncoderOptionsBase options;
+#ifdef DRACO_STANDARD_EDGEBREAKER_SUPPORTED
+ options.SetSupportedFeature(features::kEdgebreaker, true);
+#endif
+#ifdef DRACO_PREDICTIVE_EDGEBREAKER_SUPPORTED
+ options.SetSupportedFeature(features::kPredictiveEdgebreaker, true);
+#endif
+ return options;
+ }
+ static EncoderOptionsBase CreateEmptyOptions() {
+ return EncoderOptionsBase();
+ }
+
+ // Returns speed options with default value of 5.
+ int GetEncodingSpeed() const {
+ return this->GetGlobalInt("encoding_speed", 5);
+ }
+ int GetDecodingSpeed() const {
+ return this->GetGlobalInt("decoding_speed", 5);
+ }
+
+ // Returns the maximum speed for both encoding/decoding.
+ int GetSpeed() const {
+ const int encoding_speed = this->GetGlobalInt("encoding_speed", -1);
+ const int decoding_speed = this->GetGlobalInt("decoding_speed", -1);
+ const int max_speed = std::max(encoding_speed, decoding_speed);
+ if (max_speed == -1) {
+ return 5; // Default value.
+ }
+ return max_speed;
+ }
+
+ void SetSpeed(int encoding_speed, int decoding_speed) {
+ this->SetGlobalInt("encoding_speed", encoding_speed);
+ this->SetGlobalInt("decoding_speed", decoding_speed);
+ }
+
+ // Sets a given feature as supported or unsupported by the target decoder.
+ // Encoder will always use only supported features when encoding the input
+ // geometry.
+ void SetSupportedFeature(const std::string &name, bool supported) {
+ feature_options_.SetBool(name, supported);
+ }
+ bool IsFeatureSupported(const std::string &name) const {
+ return feature_options_.GetBool(name);
+ }
+
+ void SetFeatureOptions(const Options &options) { feature_options_ = options; }
+ const Options &GetFeaturelOptions() const { return feature_options_; }
+
+ private:
+ // Use helper methods to construct the encoder options.
+ // See CreateDefaultOptions();
+ EncoderOptionsBase() {}
+
+ // List of supported/unsupported features that can be used by the encoder.
+ Options feature_options_;
+};
+
+// Encoder options where attributes are identified by their attribute id.
+// Used to set options that are specific to a given geometry.
+typedef EncoderOptionsBase<int32_t> EncoderOptions;
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_CONFIG_ENCODER_OPTIONS_H_
diff --git a/libs/assimp/contrib/draco/src/draco/compression/config/encoding_features.h b/libs/assimp/contrib/draco/src/draco/compression/config/encoding_features.h
new file mode 100644
index 0000000..d6a8b71
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/config/encoding_features.h
@@ -0,0 +1,39 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// File provides helpful macros that define features available for encoding
+// the input of the input geometry. These macros can be used as an input in
+// the EncoderOptions::SetSupportedFeature() method instead of the text.
+// The most recent set of features supported
+// by the default implementation is:
+//
+// kEdgebreaker
+// - edgebreaker method for encoding meshes.
+// kPredictiveEdgebreaker
+// - advanced version of the edgebreaker method (slower but better
+// compression).
+//
+#ifndef DRACO_COMPRESSION_CONFIG_ENCODING_FEATURES_H_
+#define DRACO_COMPRESSION_CONFIG_ENCODING_FEATURES_H_
+
+namespace draco {
+namespace features {
+
+constexpr const char *kEdgebreaker = "standard_edgebreaker";
+constexpr const char *kPredictiveEdgebreaker = "predictive_edgebreaker";
+
+} // namespace features
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_CONFIG_ENCODING_FEATURES_H_
diff --git a/libs/assimp/contrib/draco/src/draco/compression/decode.cc b/libs/assimp/contrib/draco/src/draco/compression/decode.cc
new file mode 100644
index 0000000..92ae4ff
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/decode.cc
@@ -0,0 +1,135 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/compression/decode.h"
+
+#include "draco/compression/config/compression_shared.h"
+
+#ifdef DRACO_MESH_COMPRESSION_SUPPORTED
+#include "draco/compression/mesh/mesh_edgebreaker_decoder.h"
+#include "draco/compression/mesh/mesh_sequential_decoder.h"
+#endif
+
+#ifdef DRACO_POINT_CLOUD_COMPRESSION_SUPPORTED
+#include "draco/compression/point_cloud/point_cloud_kd_tree_decoder.h"
+#include "draco/compression/point_cloud/point_cloud_sequential_decoder.h"
+#endif
+
+namespace draco {
+
+#ifdef DRACO_POINT_CLOUD_COMPRESSION_SUPPORTED
+StatusOr<std::unique_ptr<PointCloudDecoder>> CreatePointCloudDecoder(
+ int8_t method) {
+ if (method == POINT_CLOUD_SEQUENTIAL_ENCODING) {
+ return std::unique_ptr<PointCloudDecoder>(
+ new PointCloudSequentialDecoder());
+ } else if (method == POINT_CLOUD_KD_TREE_ENCODING) {
+ return std::unique_ptr<PointCloudDecoder>(new PointCloudKdTreeDecoder());
+ }
+ return Status(Status::DRACO_ERROR, "Unsupported encoding method.");
+}
+#endif
+
+#ifdef DRACO_MESH_COMPRESSION_SUPPORTED
+StatusOr<std::unique_ptr<MeshDecoder>> CreateMeshDecoder(uint8_t method) {
+ if (method == MESH_SEQUENTIAL_ENCODING) {
+ return std::unique_ptr<MeshDecoder>(new MeshSequentialDecoder());
+ } else if (method == MESH_EDGEBREAKER_ENCODING) {
+ return std::unique_ptr<MeshDecoder>(new MeshEdgebreakerDecoder());
+ }
+ return Status(Status::DRACO_ERROR, "Unsupported encoding method.");
+}
+#endif
+
+StatusOr<EncodedGeometryType> Decoder::GetEncodedGeometryType(
+ DecoderBuffer *in_buffer) {
+ DecoderBuffer temp_buffer(*in_buffer);
+ DracoHeader header;
+ DRACO_RETURN_IF_ERROR(PointCloudDecoder::DecodeHeader(&temp_buffer, &header));
+ if (header.encoder_type >= NUM_ENCODED_GEOMETRY_TYPES) {
+ return Status(Status::DRACO_ERROR, "Unsupported geometry type.");
+ }
+ return static_cast<EncodedGeometryType>(header.encoder_type);
+}
+
+StatusOr<std::unique_ptr<PointCloud>> Decoder::DecodePointCloudFromBuffer(
+ DecoderBuffer *in_buffer) {
+ DRACO_ASSIGN_OR_RETURN(EncodedGeometryType type,
+ GetEncodedGeometryType(in_buffer))
+ if (type == POINT_CLOUD) {
+#ifdef DRACO_POINT_CLOUD_COMPRESSION_SUPPORTED
+ std::unique_ptr<PointCloud> point_cloud(new PointCloud());
+ DRACO_RETURN_IF_ERROR(DecodeBufferToGeometry(in_buffer, point_cloud.get()))
+ return std::move(point_cloud);
+#endif
+ } else if (type == TRIANGULAR_MESH) {
+#ifdef DRACO_MESH_COMPRESSION_SUPPORTED
+ std::unique_ptr<Mesh> mesh(new Mesh());
+ DRACO_RETURN_IF_ERROR(DecodeBufferToGeometry(in_buffer, mesh.get()))
+ return static_cast<std::unique_ptr<PointCloud>>(std::move(mesh));
+#endif
+ }
+ return Status(Status::DRACO_ERROR, "Unsupported geometry type.");
+}
+
+StatusOr<std::unique_ptr<Mesh>> Decoder::DecodeMeshFromBuffer(
+ DecoderBuffer *in_buffer) {
+ std::unique_ptr<Mesh> mesh(new Mesh());
+ DRACO_RETURN_IF_ERROR(DecodeBufferToGeometry(in_buffer, mesh.get()))
+ return std::move(mesh);
+}
+
+Status Decoder::DecodeBufferToGeometry(DecoderBuffer *in_buffer,
+ PointCloud *out_geometry) {
+#ifdef DRACO_POINT_CLOUD_COMPRESSION_SUPPORTED
+ DecoderBuffer temp_buffer(*in_buffer);
+ DracoHeader header;
+ DRACO_RETURN_IF_ERROR(PointCloudDecoder::DecodeHeader(&temp_buffer, &header))
+ if (header.encoder_type != POINT_CLOUD) {
+ return Status(Status::DRACO_ERROR, "Input is not a point cloud.");
+ }
+ DRACO_ASSIGN_OR_RETURN(std::unique_ptr<PointCloudDecoder> decoder,
+ CreatePointCloudDecoder(header.encoder_method))
+
+ DRACO_RETURN_IF_ERROR(decoder->Decode(options_, in_buffer, out_geometry))
+ return OkStatus();
+#else
+ return Status(Status::DRACO_ERROR, "Unsupported geometry type.");
+#endif
+}
+
+Status Decoder::DecodeBufferToGeometry(DecoderBuffer *in_buffer,
+ Mesh *out_geometry) {
+#ifdef DRACO_MESH_COMPRESSION_SUPPORTED
+ DecoderBuffer temp_buffer(*in_buffer);
+ DracoHeader header;
+ DRACO_RETURN_IF_ERROR(PointCloudDecoder::DecodeHeader(&temp_buffer, &header))
+ if (header.encoder_type != TRIANGULAR_MESH) {
+ return Status(Status::DRACO_ERROR, "Input is not a mesh.");
+ }
+ DRACO_ASSIGN_OR_RETURN(std::unique_ptr<MeshDecoder> decoder,
+ CreateMeshDecoder(header.encoder_method))
+
+ DRACO_RETURN_IF_ERROR(decoder->Decode(options_, in_buffer, out_geometry))
+ return OkStatus();
+#else
+ return Status(Status::DRACO_ERROR, "Unsupported geometry type.");
+#endif
+}
+
+void Decoder::SetSkipAttributeTransform(GeometryAttribute::Type att_type) {
+ options_.SetAttributeBool(att_type, "skip_attribute_transform", true);
+}
+
+} // namespace draco
diff --git a/libs/assimp/contrib/draco/src/draco/compression/decode.h b/libs/assimp/contrib/draco/src/draco/compression/decode.h
new file mode 100644
index 0000000..5f3fad2
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/decode.h
@@ -0,0 +1,80 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_COMPRESSION_DECODE_H_
+#define DRACO_COMPRESSION_DECODE_H_
+
+#include "draco/compression/config/compression_shared.h"
+#include "draco/compression/config/decoder_options.h"
+#include "draco/core/decoder_buffer.h"
+#include "draco/core/status_or.h"
+#include "draco/draco_features.h"
+#include "draco/mesh/mesh.h"
+
+namespace draco {
+
+// Class responsible for decoding of meshes and point clouds that were
+// compressed by a Draco encoder.
+class Decoder {
+ public:
+ // Returns the geometry type encoded in the input |in_buffer|.
+ // The return value is one of POINT_CLOUD, MESH or INVALID_GEOMETRY in case
+ // the input data is invalid.
+ // The decoded geometry type can be used to choose an appropriate decoding
+ // function for a given geometry type (see below).
+ static StatusOr<EncodedGeometryType> GetEncodedGeometryType(
+ DecoderBuffer *in_buffer);
+
+ // Decodes point cloud from the provided buffer. The buffer must be filled
+ // with data that was encoded with either the EncodePointCloudToBuffer or
+ // EncodeMeshToBuffer methods in encode.h. In case the input buffer contains
+ // mesh, the returned instance can be down-casted to Mesh.
+ StatusOr<std::unique_ptr<PointCloud>> DecodePointCloudFromBuffer(
+ DecoderBuffer *in_buffer);
+
+ // Decodes a triangular mesh from the provided buffer. The mesh must be filled
+ // with data that was encoded using the EncodeMeshToBuffer method in encode.h.
+ // The function will return nullptr in case the input is invalid or if it was
+ // encoded with the EncodePointCloudToBuffer method.
+ StatusOr<std::unique_ptr<Mesh>> DecodeMeshFromBuffer(
+ DecoderBuffer *in_buffer);
+
+ // Decodes the buffer into a provided geometry. If the geometry is
+ // incompatible with the encoded data. For example, when |out_geometry| is
+ // draco::Mesh while the data contains a point cloud, the function will return
+ // an error status.
+ Status DecodeBufferToGeometry(DecoderBuffer *in_buffer,
+ PointCloud *out_geometry);
+ Status DecodeBufferToGeometry(DecoderBuffer *in_buffer, Mesh *out_geometry);
+
+ // When set, the decoder is going to skip attribute transform for a given
+ // attribute type. For example for quantized attributes, the decoder would
+ // skip the dequantization step and the returned geometry would contain an
+ // attribute with quantized values. The attribute would also contain an
+ // instance of AttributeTransform class that is used to describe the skipped
+ // transform, including all parameters that are needed to perform the
+ // transform manually.
+ void SetSkipAttributeTransform(GeometryAttribute::Type att_type);
+
+ // Returns the options instance used by the decoder that can be used by users
+ // to control the decoding process.
+ DecoderOptions *options() { return &options_; }
+
+ private:
+ DecoderOptions options_;
+};
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_DECODE_H_
diff --git a/libs/assimp/contrib/draco/src/draco/compression/decode_test.cc b/libs/assimp/contrib/draco/src/draco/compression/decode_test.cc
new file mode 100644
index 0000000..1987146
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/decode_test.cc
@@ -0,0 +1,169 @@
+// Copyright 2017 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/compression/decode.h"
+
+#include <cinttypes>
+#include <sstream>
+
+#include "draco/core/draco_test_base.h"
+#include "draco/core/draco_test_utils.h"
+#include "draco/io/file_utils.h"
+
+namespace {
+
+class DecodeTest : public ::testing::Test {
+ protected:
+ DecodeTest() {}
+};
+
+#ifdef DRACO_BACKWARDS_COMPATIBILITY_SUPPORTED
+TEST_F(DecodeTest, TestSkipAttributeTransform) {
+ const std::string file_name = "test_nm_quant.0.9.0.drc";
+ // Tests that decoders can successfully skip attribute transform.
+ std::vector<char> data;
+ ASSERT_TRUE(
+ draco::ReadFileToBuffer(draco::GetTestFileFullPath(file_name), &data));
+ ASSERT_FALSE(data.empty());
+
+ // Create a draco decoding buffer. Note that no data is copied in this step.
+ draco::DecoderBuffer buffer;
+ buffer.Init(data.data(), data.size());
+
+ draco::Decoder decoder;
+ // Make sure we skip dequantization for the position attribute.
+ decoder.SetSkipAttributeTransform(draco::GeometryAttribute::POSITION);
+
+ // Decode the input data into a geometry.
+ std::unique_ptr<draco::PointCloud> pc =
+ decoder.DecodePointCloudFromBuffer(&buffer).value();
+ ASSERT_NE(pc, nullptr);
+
+ const draco::PointAttribute *const pos_att =
+ pc->GetNamedAttribute(draco::GeometryAttribute::POSITION);
+ ASSERT_NE(pos_att, nullptr);
+
+ // Ensure the position attribute is of type int32_t and that it has a valid
+ // attribute transform.
+ ASSERT_EQ(pos_att->data_type(), draco::DT_INT32);
+ ASSERT_NE(pos_att->GetAttributeTransformData(), nullptr);
+
+ // Normal attribute should be left transformed.
+ const draco::PointAttribute *const norm_att =
+ pc->GetNamedAttribute(draco::GeometryAttribute::NORMAL);
+ ASSERT_EQ(norm_att->data_type(), draco::DT_FLOAT32);
+ ASSERT_EQ(norm_att->GetAttributeTransformData(), nullptr);
+}
+#endif
+
+void TestSkipAttributeTransformOnPointCloudWithColor(const std::string &file) {
+ std::vector<char> data;
+ ASSERT_TRUE(draco::ReadFileToBuffer(draco::GetTestFileFullPath(file), &data));
+ ASSERT_FALSE(data.empty());
+
+ // Create a draco decoding buffer. Note that no data is copied in this step.
+ draco::DecoderBuffer buffer;
+ buffer.Init(data.data(), data.size());
+
+ draco::Decoder decoder;
+ // Make sure we skip dequantization for the position attribute.
+ decoder.SetSkipAttributeTransform(draco::GeometryAttribute::POSITION);
+
+ // Decode the input data into a geometry.
+ std::unique_ptr<draco::PointCloud> pc =
+ decoder.DecodePointCloudFromBuffer(&buffer).value();
+ ASSERT_NE(pc, nullptr);
+
+ const draco::PointAttribute *const pos_att =
+ pc->GetNamedAttribute(draco::GeometryAttribute::POSITION);
+ ASSERT_NE(pos_att, nullptr);
+
+ // Ensure the position attribute is of type int32_t or uint32_t and that it
+ // has a valid attribute transform.
+ ASSERT_TRUE(pos_att->data_type() == draco::DT_INT32 ||
+ pos_att->data_type() == draco::DT_UINT32);
+ ASSERT_NE(pos_att->GetAttributeTransformData(), nullptr);
+
+ const draco::PointAttribute *const clr_att =
+ pc->GetNamedAttribute(draco::GeometryAttribute::COLOR);
+ ASSERT_EQ(clr_att->data_type(), draco::DT_UINT8);
+
+ // Ensure the color attribute was decoded correctly. Perform the decoding
+ // again without skipping the position dequantization and compare the
+ // attribute values.
+
+ draco::DecoderBuffer buffer_2;
+ buffer_2.Init(data.data(), data.size());
+
+ draco::Decoder decoder_2;
+
+ // Decode the input data into a geometry.
+ std::unique_ptr<draco::PointCloud> pc_2 =
+ decoder_2.DecodePointCloudFromBuffer(&buffer_2).value();
+ ASSERT_NE(pc_2, nullptr);
+
+ const draco::PointAttribute *const clr_att_2 =
+ pc_2->GetNamedAttribute(draco::GeometryAttribute::COLOR);
+ ASSERT_NE(clr_att_2, nullptr);
+ for (draco::PointIndex pi(0); pi < pc_2->num_points(); ++pi) {
+ // Colors should be exactly the same for both cases.
+ ASSERT_EQ(std::memcmp(clr_att->GetAddress(clr_att->mapped_index(pi)),
+ clr_att_2->GetAddress(clr_att_2->mapped_index(pi)),
+ clr_att->byte_stride()),
+ 0);
+ }
+}
+
+TEST_F(DecodeTest, TestSkipAttributeTransformOnPointCloud) {
+ // Tests that decoders can successfully skip attribute transform on a point
+ // cloud with multiple attributes encoded with one attributes encoder.
+ TestSkipAttributeTransformOnPointCloudWithColor("pc_color.drc");
+ TestSkipAttributeTransformOnPointCloudWithColor("pc_kd_color.drc");
+}
+
+TEST_F(DecodeTest, TestSkipAttributeTransformWithNoQuantization) {
+ // Tests that decoders can successfully skip attribute transform even though
+ // the input model was not quantized (it has no attribute transform).
+ const std::string file_name = "point_cloud_no_qp.drc";
+ std::vector<char> data;
+ ASSERT_TRUE(
+ draco::ReadFileToBuffer(draco::GetTestFileFullPath(file_name), &data));
+ ASSERT_FALSE(data.empty());
+
+ // Create a draco decoding buffer. Note that no data is copied in this step.
+ draco::DecoderBuffer buffer;
+ buffer.Init(data.data(), data.size());
+
+ draco::Decoder decoder;
+ // Make sure we skip dequantization for the position attribute.
+ decoder.SetSkipAttributeTransform(draco::GeometryAttribute::POSITION);
+
+ // Decode the input data into a geometry.
+ std::unique_ptr<draco::PointCloud> pc =
+ decoder.DecodePointCloudFromBuffer(&buffer).value();
+ ASSERT_NE(pc, nullptr);
+
+ const draco::PointAttribute *const pos_att =
+ pc->GetNamedAttribute(draco::GeometryAttribute::POSITION);
+ ASSERT_NE(pos_att, nullptr);
+
+ // Ensure the position attribute is of type float32 since the attribute was
+ // not quantized.
+ ASSERT_EQ(pos_att->data_type(), draco::DT_FLOAT32);
+
+ // Make sure there is no attribute transform available for the attribute.
+ ASSERT_EQ(pos_att->GetAttributeTransformData(), nullptr);
+}
+
+} // namespace
diff --git a/libs/assimp/contrib/draco/src/draco/compression/encode.cc b/libs/assimp/contrib/draco/src/draco/compression/encode.cc
new file mode 100644
index 0000000..f380aec
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/encode.cc
@@ -0,0 +1,96 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/compression/encode.h"
+
+#include "draco/compression/expert_encode.h"
+
+namespace draco {
+
+Encoder::Encoder() {}
+
+Status Encoder::EncodePointCloudToBuffer(const PointCloud &pc,
+ EncoderBuffer *out_buffer) {
+ ExpertEncoder encoder(pc);
+ encoder.Reset(CreateExpertEncoderOptions(pc));
+ return encoder.EncodeToBuffer(out_buffer);
+}
+
+Status Encoder::EncodeMeshToBuffer(const Mesh &m, EncoderBuffer *out_buffer) {
+ ExpertEncoder encoder(m);
+ encoder.Reset(CreateExpertEncoderOptions(m));
+ DRACO_RETURN_IF_ERROR(encoder.EncodeToBuffer(out_buffer));
+ set_num_encoded_points(encoder.num_encoded_points());
+ set_num_encoded_faces(encoder.num_encoded_faces());
+ return OkStatus();
+}
+
+EncoderOptions Encoder::CreateExpertEncoderOptions(const PointCloud &pc) const {
+ EncoderOptions ret_options = EncoderOptions::CreateEmptyOptions();
+ ret_options.SetGlobalOptions(options().GetGlobalOptions());
+ ret_options.SetFeatureOptions(options().GetFeaturelOptions());
+ // Convert type-based attribute options to specific attributes in the provided
+ // point cloud.
+ for (int i = 0; i < pc.num_attributes(); ++i) {
+ const Options *att_options =
+ options().FindAttributeOptions(pc.attribute(i)->attribute_type());
+ if (att_options) {
+ ret_options.SetAttributeOptions(i, *att_options);
+ }
+ }
+ return ret_options;
+}
+
+void Encoder::Reset(
+ const EncoderOptionsBase<GeometryAttribute::Type> &options) {
+ Base::Reset(options);
+}
+
+void Encoder::Reset() { Base::Reset(); }
+
+void Encoder::SetSpeedOptions(int encoding_speed, int decoding_speed) {
+ Base::SetSpeedOptions(encoding_speed, decoding_speed);
+}
+
+void Encoder::SetAttributeQuantization(GeometryAttribute::Type type,
+ int quantization_bits) {
+ options().SetAttributeInt(type, "quantization_bits", quantization_bits);
+}
+
+void Encoder::SetAttributeExplicitQuantization(GeometryAttribute::Type type,
+ int quantization_bits,
+ int num_dims,
+ const float *origin,
+ float range) {
+ options().SetAttributeInt(type, "quantization_bits", quantization_bits);
+ options().SetAttributeVector(type, "quantization_origin", num_dims, origin);
+ options().SetAttributeFloat(type, "quantization_range", range);
+}
+
+void Encoder::SetEncodingMethod(int encoding_method) {
+ Base::SetEncodingMethod(encoding_method);
+}
+
+Status Encoder::SetAttributePredictionScheme(GeometryAttribute::Type type,
+ int prediction_scheme_method) {
+ Status status = CheckPredictionScheme(type, prediction_scheme_method);
+ if (!status.ok()) {
+ return status;
+ }
+ options().SetAttributeInt(type, "prediction_scheme",
+ prediction_scheme_method);
+ return status;
+}
+
+} // namespace draco
diff --git a/libs/assimp/contrib/draco/src/draco/compression/encode.h b/libs/assimp/contrib/draco/src/draco/compression/encode.h
new file mode 100644
index 0000000..bce8b34
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/encode.h
@@ -0,0 +1,140 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_COMPRESSION_ENCODE_H_
+#define DRACO_COMPRESSION_ENCODE_H_
+
+#include "draco/compression/config/compression_shared.h"
+#include "draco/compression/config/encoder_options.h"
+#include "draco/compression/encode_base.h"
+#include "draco/core/encoder_buffer.h"
+#include "draco/core/status.h"
+#include "draco/mesh/mesh.h"
+
+namespace draco {
+
+// Basic helper class for encoding geometry using the Draco compression library.
+// The class provides various methods that can be used to control several common
+// options used during the encoding, such as the number of quantization bits for
+// a given attribute. All these options are defined per attribute type, i.e.,
+// if there are more attributes of the same type (such as multiple texture
+// coordinate attributes), the same options are going to be used for all of the
+// attributes of this type. If different attributes of the same type need to
+// use different options, use ExpertEncoder in expert_encode.h.
+class Encoder
+ : public EncoderBase<EncoderOptionsBase<GeometryAttribute::Type>> {
+ public:
+ typedef EncoderBase<EncoderOptionsBase<GeometryAttribute::Type>> Base;
+
+ Encoder();
+ virtual ~Encoder() {}
+
+ // Encodes a point cloud to the provided buffer.
+ virtual Status EncodePointCloudToBuffer(const PointCloud &pc,
+ EncoderBuffer *out_buffer);
+
+ // Encodes a mesh to the provided buffer.
+ virtual Status EncodeMeshToBuffer(const Mesh &m, EncoderBuffer *out_buffer);
+
+ // Set encoder options used during the geometry encoding. Note that this call
+ // overwrites any modifications to the options done with the functions below,
+ // i.e., it resets the encoder.
+ void Reset(const EncoderOptionsBase<GeometryAttribute::Type> &options);
+ void Reset();
+
+ // Sets the desired encoding and decoding speed for the given options.
+ //
+ // 0 = slowest speed, but the best compression.
+ // 10 = fastest, but the worst compression.
+ // -1 = undefined.
+ //
+ // Note that both speed options affect the encoder choice of used methods and
+ // algorithms. For example, a requirement for fast decoding may prevent the
+ // encoder from using the best compression methods even if the encoding speed
+ // is set to 0. In general, the faster of the two options limits the choice of
+ // features that can be used by the encoder. Additionally, setting
+ // |decoding_speed| to be faster than the |encoding_speed| may allow the
+ // encoder to choose the optimal method out of the available features for the
+ // given |decoding_speed|.
+ void SetSpeedOptions(int encoding_speed, int decoding_speed);
+
+ // Sets the quantization compression options for a named attribute. The
+ // attribute values will be quantized in a box defined by the maximum extent
+ // of the attribute values. I.e., the actual precision of this option depends
+ // on the scale of the attribute values.
+ void SetAttributeQuantization(GeometryAttribute::Type type,
+ int quantization_bits);
+
+ // Sets the explicit quantization compression for a named attribute. The
+ // attribute values will be quantized in a coordinate system defined by the
+ // provided origin and range (the input values should be within interval:
+ // <origin, origin + range>).
+ void SetAttributeExplicitQuantization(GeometryAttribute::Type type,
+ int quantization_bits, int num_dims,
+ const float *origin, float range);
+
+ // Sets the desired prediction method for a given attribute. By default,
+ // prediction scheme is selected automatically by the encoder using other
+ // provided options (such as speed) and input geometry type (mesh, point
+ // cloud). This function should be called only when a specific prediction is
+ // preferred (e.g., when it is known that the encoder would select a less
+ // optimal prediction for the given input data).
+ //
+ // |prediction_scheme_method| should be one of the entries defined in
+ // compression/config/compression_shared.h :
+ //
+ // PREDICTION_NONE - use no prediction.
+ // PREDICTION_DIFFERENCE - delta coding
+ // MESH_PREDICTION_PARALLELOGRAM - parallelogram prediction for meshes.
+ // MESH_PREDICTION_CONSTRAINED_PARALLELOGRAM
+ // - better and more costly version of the parallelogram prediction.
+ // MESH_PREDICTION_TEX_COORDS_PORTABLE
+ // - specialized predictor for tex coordinates.
+ // MESH_PREDICTION_GEOMETRIC_NORMAL
+ // - specialized predictor for normal coordinates.
+ //
+ // Note that in case the desired prediction cannot be used, the default
+ // prediction will be automatically used instead.
+ Status SetAttributePredictionScheme(GeometryAttribute::Type type,
+ int prediction_scheme_method);
+
+ // Sets the desired encoding method for a given geometry. By default, encoding
+ // method is selected based on the properties of the input geometry and based
+ // on the other options selected in the used EncoderOptions (such as desired
+ // encoding and decoding speed). This function should be called only when a
+ // specific method is required.
+ //
+ // |encoding_method| can be one of the values defined in
+ // compression/config/compression_shared.h based on the type of the input
+ // geometry that is going to be encoded. For point clouds, allowed entries are
+ // POINT_CLOUD_SEQUENTIAL_ENCODING
+ // POINT_CLOUD_KD_TREE_ENCODING
+ //
+ // For meshes the input can be
+ // MESH_SEQUENTIAL_ENCODING
+ // MESH_EDGEBREAKER_ENCODING
+ //
+ // If the selected method cannot be used for the given input, the subsequent
+ // call of EncodePointCloudToBuffer or EncodeMeshToBuffer is going to fail.
+ void SetEncodingMethod(int encoding_method);
+
+ protected:
+ // Creates encoder options for the expert encoder used during the actual
+ // encoding.
+ EncoderOptions CreateExpertEncoderOptions(const PointCloud &pc) const;
+};
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_ENCODE_H_
diff --git a/libs/assimp/contrib/draco/src/draco/compression/encode_base.h b/libs/assimp/contrib/draco/src/draco/compression/encode_base.h
new file mode 100644
index 0000000..c501bc4
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/encode_base.h
@@ -0,0 +1,131 @@
+// Copyright 2017 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_COMPRESSION_ENCODE_BASE_H_
+#define DRACO_COMPRESSION_ENCODE_BASE_H_
+
+#include "draco/attributes/geometry_attribute.h"
+#include "draco/compression/config/compression_shared.h"
+#include "draco/core/status.h"
+
+namespace draco {
+
+// Base class for our geometry encoder classes. |EncoderOptionsT| specifies
+// options class used by the encoder. Please, see encode.h and expert_encode.h
+// for more details and method descriptions.
+template <class EncoderOptionsT>
+class EncoderBase {
+ public:
+ typedef EncoderOptionsT OptionsType;
+
+ EncoderBase()
+ : options_(EncoderOptionsT::CreateDefaultOptions()),
+ num_encoded_points_(0),
+ num_encoded_faces_(0) {}
+ virtual ~EncoderBase() {}
+
+ const EncoderOptionsT &options() const { return options_; }
+ EncoderOptionsT &options() { return options_; }
+
+ // If enabled, it tells the encoder to keep track of the number of encoded
+ // points and faces (default = false).
+ // Note that this can slow down encoding for certain encoders.
+ void SetTrackEncodedProperties(bool flag);
+
+ // Returns the number of encoded points and faces during the last encoding
+ // operation. Returns 0 if SetTrackEncodedProperties() was not set.
+ size_t num_encoded_points() const { return num_encoded_points_; }
+ size_t num_encoded_faces() const { return num_encoded_faces_; }
+
+ protected:
+ void Reset(const EncoderOptionsT &options) { options_ = options; }
+
+ void Reset() { options_ = EncoderOptionsT::CreateDefaultOptions(); }
+
+ void SetSpeedOptions(int encoding_speed, int decoding_speed) {
+ options_.SetSpeed(encoding_speed, decoding_speed);
+ }
+
+ void SetEncodingMethod(int encoding_method) {
+ options_.SetGlobalInt("encoding_method", encoding_method);
+ }
+
+ void SetEncodingSubmethod(int encoding_submethod) {
+ options_.SetGlobalInt("encoding_submethod", encoding_submethod);
+ }
+
+ Status CheckPredictionScheme(GeometryAttribute::Type att_type,
+ int prediction_scheme) const {
+ // Out of bound checks:
+ if (prediction_scheme < PREDICTION_NONE) {
+ return Status(Status::DRACO_ERROR,
+ "Invalid prediction scheme requested.");
+ }
+ if (prediction_scheme >= NUM_PREDICTION_SCHEMES) {
+ return Status(Status::DRACO_ERROR,
+ "Invalid prediction scheme requested.");
+ }
+ // Deprecated prediction schemes:
+ if (prediction_scheme == MESH_PREDICTION_TEX_COORDS_DEPRECATED) {
+ return Status(Status::DRACO_ERROR,
+ "MESH_PREDICTION_TEX_COORDS_DEPRECATED is deprecated.");
+ }
+ if (prediction_scheme == MESH_PREDICTION_MULTI_PARALLELOGRAM) {
+ return Status(Status::DRACO_ERROR,
+ "MESH_PREDICTION_MULTI_PARALLELOGRAM is deprecated.");
+ }
+ // Attribute specific checks:
+ if (prediction_scheme == MESH_PREDICTION_TEX_COORDS_PORTABLE) {
+ if (att_type != GeometryAttribute::TEX_COORD) {
+ return Status(Status::DRACO_ERROR,
+ "Invalid prediction scheme for attribute type.");
+ }
+ }
+ if (prediction_scheme == MESH_PREDICTION_GEOMETRIC_NORMAL) {
+ if (att_type != GeometryAttribute::NORMAL) {
+ return Status(Status::DRACO_ERROR,
+ "Invalid prediction scheme for attribute type.");
+ }
+ }
+ // TODO(hemmer): Try to enable more prediction schemes for normals.
+ if (att_type == GeometryAttribute::NORMAL) {
+ if (!(prediction_scheme == PREDICTION_DIFFERENCE ||
+ prediction_scheme == MESH_PREDICTION_GEOMETRIC_NORMAL)) {
+ return Status(Status::DRACO_ERROR,
+ "Invalid prediction scheme for attribute type.");
+ }
+ }
+ return OkStatus();
+ }
+
+ protected:
+ void set_num_encoded_points(size_t num) { num_encoded_points_ = num; }
+ void set_num_encoded_faces(size_t num) { num_encoded_faces_ = num; }
+
+ private:
+ EncoderOptionsT options_;
+
+ size_t num_encoded_points_;
+ size_t num_encoded_faces_;
+};
+
+template <class EncoderOptionsT>
+void EncoderBase<EncoderOptionsT>::SetTrackEncodedProperties(bool flag) {
+ options_.SetGlobalBool("store_number_of_encoded_points", flag);
+ options_.SetGlobalBool("store_number_of_encoded_faces", flag);
+}
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_ENCODE_BASE_H_
diff --git a/libs/assimp/contrib/draco/src/draco/compression/encode_test.cc b/libs/assimp/contrib/draco/src/draco/compression/encode_test.cc
new file mode 100644
index 0000000..fde4f6f
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/encode_test.cc
@@ -0,0 +1,407 @@
+// Copyright 2017 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "draco/compression/encode.h"
+
+#include <cinttypes>
+#include <fstream>
+#include <sstream>
+
+#include "draco/attributes/attribute_quantization_transform.h"
+#include "draco/compression/config/compression_shared.h"
+#include "draco/compression/decode.h"
+#include "draco/compression/expert_encode.h"
+#include "draco/core/draco_test_base.h"
+#include "draco/core/draco_test_utils.h"
+#include "draco/core/vector_d.h"
+#include "draco/io/obj_decoder.h"
+#include "draco/mesh/triangle_soup_mesh_builder.h"
+#include "draco/point_cloud/point_cloud_builder.h"
+
+namespace {
+
+class EncodeTest : public ::testing::Test {
+ protected:
+ EncodeTest() {}
+ std::unique_ptr<draco::Mesh> CreateTestMesh() const {
+ draco::TriangleSoupMeshBuilder mesh_builder;
+
+ // Create a simple mesh with one face.
+ mesh_builder.Start(1);
+
+ // Add one position attribute and two texture coordinate attributes.
+ const int32_t pos_att_id = mesh_builder.AddAttribute(
+ draco::GeometryAttribute::POSITION, 3, draco::DT_FLOAT32);
+ const int32_t tex_att_id_0 = mesh_builder.AddAttribute(
+ draco::GeometryAttribute::TEX_COORD, 2, draco::DT_FLOAT32);
+ const int32_t tex_att_id_1 = mesh_builder.AddAttribute(
+ draco::GeometryAttribute::TEX_COORD, 2, draco::DT_FLOAT32);
+
+ // Initialize the attribute values.
+ mesh_builder.SetAttributeValuesForFace(
+ pos_att_id, draco::FaceIndex(0), draco::Vector3f(0.f, 0.f, 0.f).data(),
+ draco::Vector3f(1.f, 0.f, 0.f).data(),
+ draco::Vector3f(1.f, 1.f, 0.f).data());
+ mesh_builder.SetAttributeValuesForFace(
+ tex_att_id_0, draco::FaceIndex(0), draco::Vector2f(0.f, 0.f).data(),
+ draco::Vector2f(1.f, 0.f).data(), draco::Vector2f(1.f, 1.f).data());
+ mesh_builder.SetAttributeValuesForFace(
+ tex_att_id_1, draco::FaceIndex(0), draco::Vector2f(0.f, 0.f).data(),
+ draco::Vector2f(1.f, 0.f).data(), draco::Vector2f(1.f, 1.f).data());
+
+ return mesh_builder.Finalize();
+ }
+
+ std::unique_ptr<draco::PointCloud> CreateTestPointCloud() const {
+ draco::PointCloudBuilder pc_builder;
+
+ constexpr int kNumPoints = 100;
+ constexpr int kNumGenAttCoords0 = 4;
+ constexpr int kNumGenAttCoords1 = 6;
+ pc_builder.Start(kNumPoints);
+
+ // Add one position attribute and two generic attributes.
+ const int32_t pos_att_id = pc_builder.AddAttribute(
+ draco::GeometryAttribute::POSITION, 3, draco::DT_FLOAT32);
+ const int32_t gen_att_id_0 = pc_builder.AddAttribute(
+ draco::GeometryAttribute::GENERIC, kNumGenAttCoords0, draco::DT_UINT32);
+ const int32_t gen_att_id_1 = pc_builder.AddAttribute(
+ draco::GeometryAttribute::GENERIC, kNumGenAttCoords1, draco::DT_UINT8);
+
+ std::vector<uint32_t> gen_att_data_0(kNumGenAttCoords0);
+ std::vector<uint32_t> gen_att_data_1(kNumGenAttCoords1);
+
+ // Initialize the attribute values.
+ for (draco::PointIndex i(0); i < kNumPoints; ++i) {
+ const float pos_coord = static_cast<float>(i.value());
+ pc_builder.SetAttributeValueForPoint(
+ pos_att_id, i,
+ draco::Vector3f(pos_coord, -pos_coord, pos_coord).data());
+
+ for (int j = 0; j < kNumGenAttCoords0; ++j) {
+ gen_att_data_0[j] = i.value();
+ }
+ pc_builder.SetAttributeValueForPoint(gen_att_id_0, i,
+ gen_att_data_0.data());
+
+ for (int j = 0; j < kNumGenAttCoords1; ++j) {
+ gen_att_data_1[j] = -i.value();
+ }
+ pc_builder.SetAttributeValueForPoint(gen_att_id_1, i,
+ gen_att_data_1.data());
+ }
+ return pc_builder.Finalize(false);
+ }
+
+ std::unique_ptr<draco::PointCloud> CreateTestPointCloudPosNorm() const {
+ draco::PointCloudBuilder pc_builder;
+
+ constexpr int kNumPoints = 20;
+ pc_builder.Start(kNumPoints);
+
+ // Add one position attribute and a normal attribute.
+ const int32_t pos_att_id = pc_builder.AddAttribute(
+ draco::GeometryAttribute::POSITION, 3, draco::DT_FLOAT32);
+ const int32_t norm_att_id = pc_builder.AddAttribute(
+ draco::GeometryAttribute::NORMAL, 3, draco::DT_FLOAT32);
+
+ // Initialize the attribute values.
+ for (draco::PointIndex i(0); i < kNumPoints; ++i) {
+ const float pos_coord = static_cast<float>(i.value());
+ pc_builder.SetAttributeValueForPoint(
+ pos_att_id, i,
+ draco::Vector3f(pos_coord, -pos_coord, pos_coord).data());
+
+ // Pseudo-random normal.
+ draco::Vector3f norm(pos_coord * 2.f, pos_coord - 2.f, pos_coord * 3.f);
+ norm.Normalize();
+ pc_builder.SetAttributeValueForPoint(norm_att_id, i, norm.data());
+ }
+
+ return pc_builder.Finalize(false);
+ }
+
+ int GetQuantizationBitsFromAttribute(const draco::PointAttribute *att) const {
+ if (att == nullptr) {
+ return -1;
+ }
+ draco::AttributeQuantizationTransform transform;
+ if (!transform.InitFromAttribute(*att)) {
+ return -1;
+ }
+ return transform.quantization_bits();
+ }
+
+ void VerifyNumQuantizationBits(const draco::EncoderBuffer &buffer,
+ int pos_quantization,
+ int tex_coord_0_quantization,
+ int tex_coord_1_quantization) const {
+ draco::Decoder decoder;
+
+ // Skip the dequantization for the attributes which will allow us to get
+ // the number of quantization bits used during encoding.
+ decoder.SetSkipAttributeTransform(draco::GeometryAttribute::POSITION);
+ decoder.SetSkipAttributeTransform(draco::GeometryAttribute::TEX_COORD);
+
+ draco::DecoderBuffer in_buffer;
+ in_buffer.Init(buffer.data(), buffer.size());
+ auto mesh = decoder.DecodeMeshFromBuffer(&in_buffer).value();
+ ASSERT_NE(mesh, nullptr);
+ ASSERT_EQ(GetQuantizationBitsFromAttribute(mesh->attribute(0)),
+ pos_quantization);
+ ASSERT_EQ(GetQuantizationBitsFromAttribute(mesh->attribute(1)),
+ tex_coord_0_quantization);
+ ASSERT_EQ(GetQuantizationBitsFromAttribute(mesh->attribute(2)),
+ tex_coord_1_quantization);
+ }
+
+ // Tests that the encoder returns the correct number of encoded points and
+ // faces for a given mesh or point cloud.
+ void TestNumberOfEncodedEntries(const std::string &file_name,
+ int32_t encoding_method) {
+ std::unique_ptr<draco::PointCloud> geometry;
+ draco::Mesh *mesh = nullptr;
+
+ if (encoding_method == draco::MESH_EDGEBREAKER_ENCODING ||
+ encoding_method == draco::MESH_SEQUENTIAL_ENCODING) {
+ std::unique_ptr<draco::Mesh> mesh_tmp =
+ draco::ReadMeshFromTestFile(file_name);
+ mesh = mesh_tmp.get();
+ if (!mesh->DeduplicateAttributeValues()) {
+ return;
+ }
+ mesh->DeduplicatePointIds();
+ geometry = std::move(mesh_tmp);
+ } else {
+ geometry = draco::ReadPointCloudFromTestFile(file_name);
+ }
+ ASSERT_NE(mesh, nullptr);
+
+ draco::Encoder encoder;
+ encoder.SetAttributeQuantization(draco::GeometryAttribute::POSITION, 14);
+ encoder.SetAttributeQuantization(draco::GeometryAttribute::TEX_COORD, 12);
+ encoder.SetAttributeQuantization(draco::GeometryAttribute::NORMAL, 10);
+
+ encoder.SetEncodingMethod(encoding_method);
+
+ encoder.SetTrackEncodedProperties(true);
+
+ draco::EncoderBuffer buffer;
+ if (mesh) {
+ encoder.EncodeMeshToBuffer(*mesh, &buffer);
+ } else {
+ encoder.EncodePointCloudToBuffer(*geometry, &buffer);
+ }
+
+ // Ensure the logged number of encoded points and faces matches the number
+ // we get from the decoder.
+
+ draco::DecoderBuffer decoder_buffer;
+ decoder_buffer.Init(buffer.data(), buffer.size());
+ draco::Decoder decoder;
+
+ if (mesh) {
+ auto maybe_mesh = decoder.DecodeMeshFromBuffer(&decoder_buffer);
+ ASSERT_TRUE(maybe_mesh.ok());
+ auto decoded_mesh = std::move(maybe_mesh).value();
+ ASSERT_NE(decoded_mesh, nullptr);
+ ASSERT_EQ(decoded_mesh->num_points(), encoder.num_encoded_points());
+ ASSERT_EQ(decoded_mesh->num_faces(), encoder.num_encoded_faces());
+ } else {
+ auto maybe_pc = decoder.DecodePointCloudFromBuffer(&decoder_buffer);
+ ASSERT_TRUE(maybe_pc.ok());
+ auto decoded_pc = std::move(maybe_pc).value();
+ ASSERT_EQ(decoded_pc->num_points(), encoder.num_encoded_points());
+ }
+ }
+};
+
+TEST_F(EncodeTest, TestExpertEncoderQuantization) {
+ // This test verifies that the expert encoder can quantize individual
+ // attributes even if they have the same type.
+ auto mesh = CreateTestMesh();
+ ASSERT_NE(mesh, nullptr);
+
+ draco::ExpertEncoder encoder(*mesh);
+ encoder.SetAttributeQuantization(0, 16); // Position quantization.
+ encoder.SetAttributeQuantization(1, 15); // Tex-coord 0 quantization.
+ encoder.SetAttributeQuantization(2, 14); // Tex-coord 1 quantization.
+
+ draco::EncoderBuffer buffer;
+ encoder.EncodeToBuffer(&buffer);
+ VerifyNumQuantizationBits(buffer, 16, 15, 14);
+}
+
+TEST_F(EncodeTest, TestEncoderQuantization) {
+ // This test verifies that Encoder applies the same quantization to all
+ // attributes of the same type.
+ auto mesh = CreateTestMesh();
+ ASSERT_NE(mesh, nullptr);
+
+ draco::Encoder encoder;
+ encoder.SetAttributeQuantization(draco::GeometryAttribute::POSITION, 16);
+ encoder.SetAttributeQuantization(draco::GeometryAttribute::TEX_COORD, 15);
+
+ draco::EncoderBuffer buffer;
+ encoder.EncodeMeshToBuffer(*mesh, &buffer);
+ VerifyNumQuantizationBits(buffer, 16, 15, 15);
+}
+
+TEST_F(EncodeTest, TestLinesObj) {
+ // This test verifies that Encoder can encode file that contains only line
+ // segments (that are ignored).
+ std::unique_ptr<draco::Mesh> mesh(
+ draco::ReadMeshFromTestFile("test_lines.obj"));
+ ASSERT_NE(mesh, nullptr);
+ ASSERT_EQ(mesh->num_faces(), 0);
+ std::unique_ptr<draco::PointCloud> pc(
+ draco::ReadPointCloudFromTestFile("test_lines.obj"));
+ ASSERT_NE(pc, nullptr);
+
+ draco::Encoder encoder;
+ encoder.SetAttributeQuantization(draco::GeometryAttribute::POSITION, 16);
+
+ draco::EncoderBuffer buffer;
+ ASSERT_TRUE(encoder.EncodePointCloudToBuffer(*pc, &buffer).ok());
+}
+
+TEST_F(EncodeTest, TestQuantizedInfinity) {
+ // This test verifies that Encoder fails to encode point cloud when requesting
+ // quantization of attribute that contains infinity values.
+ std::unique_ptr<draco::PointCloud> pc(
+ draco::ReadPointCloudFromTestFile("float_inf_point_cloud.ply"));
+ ASSERT_NE(pc, nullptr);
+
+ {
+ draco::Encoder encoder;
+ encoder.SetEncodingMethod(draco::POINT_CLOUD_SEQUENTIAL_ENCODING);
+ encoder.SetAttributeQuantization(draco::GeometryAttribute::POSITION, 11);
+
+ draco::EncoderBuffer buffer;
+ ASSERT_FALSE(encoder.EncodePointCloudToBuffer(*pc, &buffer).ok());
+ }
+
+ {
+ draco::Encoder encoder;
+ encoder.SetEncodingMethod(draco::POINT_CLOUD_KD_TREE_ENCODING);
+ encoder.SetAttributeQuantization(draco::GeometryAttribute::POSITION, 11);
+
+ draco::EncoderBuffer buffer;
+ ASSERT_FALSE(encoder.EncodePointCloudToBuffer(*pc, &buffer).ok());
+ }
+}
+
+TEST_F(EncodeTest, TestUnquantizedInfinity) {
+ // This test verifies that Encoder can successfully encode point cloud when
+ // not requesting quantization of attribute that contains infinity values.
+ std::unique_ptr<draco::PointCloud> pc(
+ draco::ReadPointCloudFromTestFile("float_inf_point_cloud.ply"));
+ ASSERT_NE(pc, nullptr);
+
+ // Note that the KD tree encoding method is not applicable to float values.
+ draco::Encoder encoder;
+ encoder.SetEncodingMethod(draco::POINT_CLOUD_SEQUENTIAL_ENCODING);
+
+ draco::EncoderBuffer buffer;
+ ASSERT_TRUE(encoder.EncodePointCloudToBuffer(*pc, &buffer).ok());
+}
+
+TEST_F(EncodeTest, TestQuantizedAndUnquantizedAttributes) {
+ // This test verifies that Encoder can successfully encode point cloud with
+ // two float attribiutes - one quantized and another unquantized. The encoder
+ // defaults to sequential encoding in this case.
+ std::unique_ptr<draco::PointCloud> pc(
+ draco::ReadPointCloudFromTestFile("float_two_att_point_cloud.ply"));
+ ASSERT_NE(pc, nullptr);
+
+ draco::Encoder encoder;
+ encoder.SetAttributeQuantization(draco::GeometryAttribute::POSITION, 11);
+ encoder.SetAttributeQuantization(draco::GeometryAttribute::NORMAL, 0);
+ draco::EncoderBuffer buffer;
+ ASSERT_TRUE(encoder.EncodePointCloudToBuffer(*pc, &buffer).ok());
+}
+
+TEST_F(EncodeTest, TestKdTreeEncoding) {
+ // This test verifies that the API can successfully encode a point cloud
+ // defined by several attributes using the kd tree method.
+ std::unique_ptr<draco::PointCloud> pc = CreateTestPointCloud();
+ ASSERT_NE(pc, nullptr);
+
+ draco::EncoderBuffer buffer;
+ draco::Encoder encoder;
+ encoder.SetEncodingMethod(draco::POINT_CLOUD_KD_TREE_ENCODING);
+ // First try it without quantizing positions which should fail.
+ ASSERT_FALSE(encoder.EncodePointCloudToBuffer(*pc, &buffer).ok());
+
+ // Now set quantization for the position attribute which should make
+ // the encoder happy.
+ encoder.SetAttributeQuantization(draco::GeometryAttribute::POSITION, 16);
+ ASSERT_TRUE(encoder.EncodePointCloudToBuffer(*pc, &buffer).ok());
+}
+
+TEST_F(EncodeTest, TestTrackingOfNumberOfEncodedEntries) {
+ TestNumberOfEncodedEntries("deg_faces.obj", draco::MESH_EDGEBREAKER_ENCODING);
+ TestNumberOfEncodedEntries("deg_faces.obj", draco::MESH_SEQUENTIAL_ENCODING);
+ TestNumberOfEncodedEntries("cube_att.obj", draco::MESH_EDGEBREAKER_ENCODING);
+ TestNumberOfEncodedEntries("test_nm.obj", draco::MESH_EDGEBREAKER_ENCODING);
+ TestNumberOfEncodedEntries("test_nm.obj", draco::MESH_SEQUENTIAL_ENCODING);
+ TestNumberOfEncodedEntries("cube_subd.obj",
+ draco::POINT_CLOUD_KD_TREE_ENCODING);
+ TestNumberOfEncodedEntries("cube_subd.obj",
+ draco::POINT_CLOUD_SEQUENTIAL_ENCODING);
+}
+
+TEST_F(EncodeTest, TestTrackingOfNumberOfEncodedEntriesNotSet) {
+ // Tests that when tracing of encoded properties is disabled, the returned
+ // number of encoded faces and points is 0.
+ std::unique_ptr<draco::Mesh> mesh(
+ draco::ReadMeshFromTestFile("cube_att.obj"));
+ ASSERT_NE(mesh, nullptr);
+
+ draco::EncoderBuffer buffer;
+ draco::Encoder encoder;
+
+ ASSERT_TRUE(encoder.EncodeMeshToBuffer(*mesh, &buffer).ok());
+ ASSERT_EQ(encoder.num_encoded_points(), 0);
+ ASSERT_EQ(encoder.num_encoded_faces(), 0);
+}
+
+TEST_F(EncodeTest, TestNoPosQuantizationNormalCoding) {
+ // Tests that we can encode and decode a file with quantized normals but
+ // non-quantized positions.
+ const auto mesh = draco::ReadMeshFromTestFile("test_nm.obj");
+ ASSERT_NE(mesh, nullptr);
+
+ // The mesh should have positions and normals.
+ ASSERT_NE(mesh->GetNamedAttribute(draco::GeometryAttribute::POSITION),
+ nullptr);
+ ASSERT_NE(mesh->GetNamedAttribute(draco::GeometryAttribute::NORMAL), nullptr);
+
+ draco::EncoderBuffer buffer;
+ draco::Encoder encoder;
+ // No quantization for positions.
+ encoder.SetAttributeQuantization(draco::GeometryAttribute::NORMAL, 8);
+
+ DRACO_ASSERT_OK(encoder.EncodeMeshToBuffer(*mesh, &buffer));
+
+ draco::Decoder decoder;
+
+ draco::DecoderBuffer in_buffer;
+ in_buffer.Init(buffer.data(), buffer.size());
+ const auto decoded_mesh = decoder.DecodeMeshFromBuffer(&in_buffer).value();
+ ASSERT_NE(decoded_mesh, nullptr);
+}
+
+} // namespace
diff --git a/libs/assimp/contrib/draco/src/draco/compression/entropy/ans.h b/libs/assimp/contrib/draco/src/draco/compression/entropy/ans.h
new file mode 100644
index 0000000..c71d589
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/entropy/ans.h
@@ -0,0 +1,527 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_COMPRESSION_ENTROPY_ANS_H_
+#define DRACO_COMPRESSION_ENTROPY_ANS_H_
+// An implementation of Asymmetric Numeral Systems (rANS).
+// See http://arxiv.org/abs/1311.2540v2 for more information on rANS.
+// This file is based off libvpx's ans.h.
+
+#include <vector>
+
+#define DRACO_ANS_DIVIDE_BY_MULTIPLY 1
+#if DRACO_ANS_DIVIDE_BY_MULTIPLY
+#include "draco/core/divide.h"
+#endif
+#include "draco/core/macros.h"
+
+namespace draco {
+
+#if DRACO_ANS_DIVIDE_BY_MULTIPLY
+
+#define DRACO_ANS_DIVREM(quotient, remainder, dividend, divisor) \
+ do { \
+ quotient = fastdiv(dividend, divisor); \
+ remainder = dividend - quotient * divisor; \
+ } while (0)
+#define DRACO_ANS_DIV(dividend, divisor) fastdiv(dividend, divisor)
+#else
+#define DRACO_ANS_DIVREM(quotient, remainder, dividend, divisor) \
+ do { \
+ quotient = dividend / divisor; \
+ remainder = dividend % divisor; \
+ } while (0)
+#define DRACO_ANS_DIV(dividend, divisor) ((dividend) / (divisor))
+#endif
+
+struct AnsCoder {
+ AnsCoder() : buf(nullptr), buf_offset(0), state(0) {}
+ uint8_t *buf;
+ int buf_offset;
+ uint32_t state;
+};
+
+struct AnsDecoder {
+ AnsDecoder() : buf(nullptr), buf_offset(0), state(0) {}
+ const uint8_t *buf;
+ int buf_offset;
+ uint32_t state;
+};
+
+typedef uint8_t AnsP8;
+#define DRACO_ANS_P8_PRECISION 256u
+#define DRACO_ANS_L_BASE (4096u)
+#define DRACO_ANS_IO_BASE 256
+
+static uint32_t mem_get_le16(const void *vmem) {
+ uint32_t val;
+ const uint8_t *mem = (const uint8_t *)vmem;
+
+ val = mem[1] << 8;
+ val |= mem[0];
+ return val;
+}
+
+static uint32_t mem_get_le24(const void *vmem) {
+ uint32_t val;
+ const uint8_t *mem = (const uint8_t *)vmem;
+
+ val = mem[2] << 16;
+ val |= mem[1] << 8;
+ val |= mem[0];
+ return val;
+}
+
+static inline uint32_t mem_get_le32(const void *vmem) {
+ uint32_t val;
+ const uint8_t *mem = (const uint8_t *)vmem;
+
+ val = mem[3] << 24;
+ val |= mem[2] << 16;
+ val |= mem[1] << 8;
+ val |= mem[0];
+ return val;
+}
+
+static inline void mem_put_le16(void *vmem, uint32_t val) {
+ uint8_t *mem = reinterpret_cast<uint8_t *>(vmem);
+
+ mem[0] = (val >> 0) & 0xff;
+ mem[1] = (val >> 8) & 0xff;
+}
+
+static inline void mem_put_le24(void *vmem, uint32_t val) {
+ uint8_t *mem = reinterpret_cast<uint8_t *>(vmem);
+
+ mem[0] = (val >> 0) & 0xff;
+ mem[1] = (val >> 8) & 0xff;
+ mem[2] = (val >> 16) & 0xff;
+}
+
+static inline void mem_put_le32(void *vmem, uint32_t val) {
+ uint8_t *mem = reinterpret_cast<uint8_t *>(vmem);
+
+ mem[0] = (val >> 0) & 0xff;
+ mem[1] = (val >> 8) & 0xff;
+ mem[2] = (val >> 16) & 0xff;
+ mem[3] = (val >> 24) & 0xff;
+}
+
+static inline void ans_write_init(struct AnsCoder *const ans,
+ uint8_t *const buf) {
+ ans->buf = buf;
+ ans->buf_offset = 0;
+ ans->state = DRACO_ANS_L_BASE;
+}
+
+static inline int ans_write_end(struct AnsCoder *const ans) {
+ uint32_t state;
+ DRACO_DCHECK_GE(ans->state, DRACO_ANS_L_BASE);
+ DRACO_DCHECK_LT(ans->state, DRACO_ANS_L_BASE * DRACO_ANS_IO_BASE);
+ state = ans->state - DRACO_ANS_L_BASE;
+ if (state < (1 << 6)) {
+ ans->buf[ans->buf_offset] = (0x00 << 6) + state;
+ return ans->buf_offset + 1;
+ } else if (state < (1 << 14)) {
+ mem_put_le16(ans->buf + ans->buf_offset, (0x01 << 14) + state);
+ return ans->buf_offset + 2;
+ } else if (state < (1 << 22)) {
+ mem_put_le24(ans->buf + ans->buf_offset, (0x02 << 22) + state);
+ return ans->buf_offset + 3;
+ } else {
+ DRACO_DCHECK(0 && "State is too large to be serialized");
+ return ans->buf_offset;
+ }
+}
+
+// rABS with descending spread.
+// p or p0 takes the place of l_s from the paper.
+// DRACO_ANS_P8_PRECISION is m.
+static inline void rabs_desc_write(struct AnsCoder *ans, int val, AnsP8 p0) {
+ const AnsP8 p = DRACO_ANS_P8_PRECISION - p0;
+ const unsigned l_s = val ? p : p0;
+ unsigned quot, rem;
+ if (ans->state >=
+ DRACO_ANS_L_BASE / DRACO_ANS_P8_PRECISION * DRACO_ANS_IO_BASE * l_s) {
+ ans->buf[ans->buf_offset++] = ans->state % DRACO_ANS_IO_BASE;
+ ans->state /= DRACO_ANS_IO_BASE;
+ }
+ DRACO_ANS_DIVREM(quot, rem, ans->state, l_s);
+ ans->state = quot * DRACO_ANS_P8_PRECISION + rem + (val ? 0 : p);
+}
+
+#define DRACO_ANS_IMPL1 0
+#define UNPREDICTABLE(x) x
+static inline int rabs_desc_read(struct AnsDecoder *ans, AnsP8 p0) {
+ int val;
+#if DRACO_ANS_IMPL1
+ unsigned l_s;
+#else
+ unsigned quot, rem, x, xn;
+#endif
+ const AnsP8 p = DRACO_ANS_P8_PRECISION - p0;
+ if (ans->state < DRACO_ANS_L_BASE && ans->buf_offset > 0) {
+ ans->state = ans->state * DRACO_ANS_IO_BASE + ans->buf[--ans->buf_offset];
+ }
+#if DRACO_ANS_IMPL1
+ val = ans->state % DRACO_ANS_P8_PRECISION < p;
+ l_s = val ? p : p0;
+ ans->state = (ans->state / DRACO_ANS_P8_PRECISION) * l_s +
+ ans->state % DRACO_ANS_P8_PRECISION - (!val * p);
+#else
+ x = ans->state;
+ quot = x / DRACO_ANS_P8_PRECISION;
+ rem = x % DRACO_ANS_P8_PRECISION;
+ xn = quot * p;
+ val = rem < p;
+ if (UNPREDICTABLE(val)) {
+ ans->state = xn + rem;
+ } else {
+ // ans->state = quot * p0 + rem - p;
+ ans->state = x - xn - p;
+ }
+#endif
+ return val;
+}
+
+// rABS with ascending spread.
+// p or p0 takes the place of l_s from the paper.
+// DRACO_ANS_P8_PRECISION is m.
+static inline void rabs_asc_write(struct AnsCoder *ans, int val, AnsP8 p0) {
+ const AnsP8 p = DRACO_ANS_P8_PRECISION - p0;
+ const unsigned l_s = val ? p : p0;
+ unsigned quot, rem;
+ if (ans->state >=
+ DRACO_ANS_L_BASE / DRACO_ANS_P8_PRECISION * DRACO_ANS_IO_BASE * l_s) {
+ ans->buf[ans->buf_offset++] = ans->state % DRACO_ANS_IO_BASE;
+ ans->state /= DRACO_ANS_IO_BASE;
+ }
+ DRACO_ANS_DIVREM(quot, rem, ans->state, l_s);
+ ans->state = quot * DRACO_ANS_P8_PRECISION + rem + (val ? p0 : 0);
+}
+
+static inline int rabs_asc_read(struct AnsDecoder *ans, AnsP8 p0) {
+ int val;
+#if DRACO_ANS_IMPL1
+ unsigned l_s;
+#else
+ unsigned quot, rem, x, xn;
+#endif
+ const AnsP8 p = DRACO_ANS_P8_PRECISION - p0;
+ if (ans->state < DRACO_ANS_L_BASE) {
+ ans->state = ans->state * DRACO_ANS_IO_BASE + ans->buf[--ans->buf_offset];
+ }
+#if DRACO_ANS_IMPL1
+ val = ans->state % DRACO_ANS_P8_PRECISION < p;
+ l_s = val ? p : p0;
+ ans->state = (ans->state / DRACO_ANS_P8_PRECISION) * l_s +
+ ans->state % DRACO_ANS_P8_PRECISION - (!val * p);
+#else
+ x = ans->state;
+ quot = x / DRACO_ANS_P8_PRECISION;
+ rem = x % DRACO_ANS_P8_PRECISION;
+ xn = quot * p;
+ val = rem >= p0;
+ if (UNPREDICTABLE(val)) {
+ ans->state = xn + rem - p0;
+ } else {
+ // ans->state = quot * p0 + rem - p0;
+ ans->state = x - xn;
+ }
+#endif
+ return val;
+}
+
+#define rabs_read rabs_desc_read
+#define rabs_write rabs_desc_write
+
+// uABS with normalization.
+static inline void uabs_write(struct AnsCoder *ans, int val, AnsP8 p0) {
+ AnsP8 p = DRACO_ANS_P8_PRECISION - p0;
+ const unsigned l_s = val ? p : p0;
+ while (ans->state >=
+ DRACO_ANS_L_BASE / DRACO_ANS_P8_PRECISION * DRACO_ANS_IO_BASE * l_s) {
+ ans->buf[ans->buf_offset++] = ans->state % DRACO_ANS_IO_BASE;
+ ans->state /= DRACO_ANS_IO_BASE;
+ }
+ if (!val) {
+ ans->state = DRACO_ANS_DIV(ans->state * DRACO_ANS_P8_PRECISION, p0);
+ } else {
+ ans->state =
+ DRACO_ANS_DIV((ans->state + 1) * DRACO_ANS_P8_PRECISION + p - 1, p) - 1;
+ }
+}
+
+static inline int uabs_read(struct AnsDecoder *ans, AnsP8 p0) {
+ AnsP8 p = DRACO_ANS_P8_PRECISION - p0;
+ int s;
+ // unsigned int xp1;
+ unsigned xp, sp;
+ unsigned state = ans->state;
+ while (state < DRACO_ANS_L_BASE && ans->buf_offset > 0) {
+ state = state * DRACO_ANS_IO_BASE + ans->buf[--ans->buf_offset];
+ }
+ sp = state * p;
+ // xp1 = (sp + p) / DRACO_ANS_P8_PRECISION;
+ xp = sp / DRACO_ANS_P8_PRECISION;
+ // s = xp1 - xp;
+ s = (sp & 0xFF) >= p0;
+ if (UNPREDICTABLE(s)) {
+ ans->state = xp;
+ } else {
+ ans->state = state - xp;
+ }
+ return s;
+}
+
+static inline int uabs_read_bit(struct AnsDecoder *ans) {
+ int s;
+ unsigned state = ans->state;
+ while (state < DRACO_ANS_L_BASE && ans->buf_offset > 0) {
+ state = state * DRACO_ANS_IO_BASE + ans->buf[--ans->buf_offset];
+ }
+ s = static_cast<int>(state & 1);
+ ans->state = state >> 1;
+ return s;
+}
+
+static inline int ans_read_init(struct AnsDecoder *const ans,
+ const uint8_t *const buf, int offset) {
+ unsigned x;
+ if (offset < 1) {
+ return 1;
+ }
+ ans->buf = buf;
+ x = buf[offset - 1] >> 6;
+ if (x == 0) {
+ ans->buf_offset = offset - 1;
+ ans->state = buf[offset - 1] & 0x3F;
+ } else if (x == 1) {
+ if (offset < 2) {
+ return 1;
+ }
+ ans->buf_offset = offset - 2;
+ ans->state = mem_get_le16(buf + offset - 2) & 0x3FFF;
+ } else if (x == 2) {
+ if (offset < 3) {
+ return 1;
+ }
+ ans->buf_offset = offset - 3;
+ ans->state = mem_get_le24(buf + offset - 3) & 0x3FFFFF;
+ } else {
+ return 1;
+ }
+ ans->state += DRACO_ANS_L_BASE;
+ if (ans->state >= DRACO_ANS_L_BASE * DRACO_ANS_IO_BASE) {
+ return 1;
+ }
+ return 0;
+}
+
+static inline int ans_read_end(struct AnsDecoder *const ans) {
+ return ans->state == DRACO_ANS_L_BASE;
+}
+
+static inline int ans_reader_has_error(const struct AnsDecoder *const ans) {
+ return ans->state < DRACO_ANS_L_BASE && ans->buf_offset == 0;
+}
+
+struct rans_sym {
+ uint32_t prob;
+ uint32_t cum_prob; // not-inclusive.
+};
+
+// Class for performing rANS encoding using a desired number of precision bits.
+// The max number of precision bits is currently 19. The actual number of
+// symbols in the input alphabet should be (much) smaller than that, otherwise
+// the compression rate may suffer.
+template <int rans_precision_bits_t>
+class RAnsEncoder {
+ public:
+ RAnsEncoder() {}
+
+ // Provides the input buffer where the data is going to be stored.
+ inline void write_init(uint8_t *const buf) {
+ ans_.buf = buf;
+ ans_.buf_offset = 0;
+ ans_.state = l_rans_base;
+ }
+
+ // Needs to be called after all symbols are encoded.
+ inline int write_end() {
+ uint32_t state;
+ DRACO_DCHECK_GE(ans_.state, l_rans_base);
+ DRACO_DCHECK_LT(ans_.state, l_rans_base * DRACO_ANS_IO_BASE);
+ state = ans_.state - l_rans_base;
+ if (state < (1 << 6)) {
+ ans_.buf[ans_.buf_offset] = (0x00 << 6) + state;
+ return ans_.buf_offset + 1;
+ } else if (state < (1 << 14)) {
+ mem_put_le16(ans_.buf + ans_.buf_offset, (0x01 << 14) + state);
+ return ans_.buf_offset + 2;
+ } else if (state < (1 << 22)) {
+ mem_put_le24(ans_.buf + ans_.buf_offset, (0x02 << 22) + state);
+ return ans_.buf_offset + 3;
+ } else if (state < (1 << 30)) {
+ mem_put_le32(ans_.buf + ans_.buf_offset, (0x03u << 30u) + state);
+ return ans_.buf_offset + 4;
+ } else {
+ DRACO_DCHECK(0 && "State is too large to be serialized");
+ return ans_.buf_offset;
+ }
+ }
+
+ // rANS with normalization.
+ // sym->prob takes the place of l_s from the paper.
+ // rans_precision is m.
+ inline void rans_write(const struct rans_sym *const sym) {
+ const uint32_t p = sym->prob;
+ while (ans_.state >= l_rans_base / rans_precision * DRACO_ANS_IO_BASE * p) {
+ ans_.buf[ans_.buf_offset++] = ans_.state % DRACO_ANS_IO_BASE;
+ ans_.state /= DRACO_ANS_IO_BASE;
+ }
+ // TODO(ostava): The division and multiplication should be optimized.
+ ans_.state =
+ (ans_.state / p) * rans_precision + ans_.state % p + sym->cum_prob;
+ }
+
+ private:
+ static constexpr int rans_precision = 1 << rans_precision_bits_t;
+ static constexpr int l_rans_base = rans_precision * 4;
+ AnsCoder ans_;
+};
+
+struct rans_dec_sym {
+ uint32_t val;
+ uint32_t prob;
+ uint32_t cum_prob; // not-inclusive.
+};
+
+// Class for performing rANS decoding using a desired number of precision bits.
+// The number of precision bits needs to be the same as with the RAnsEncoder
+// that was used to encode the input data.
+template <int rans_precision_bits_t>
+class RAnsDecoder {
+ public:
+ RAnsDecoder() {}
+
+ // Initializes the decoder from the input buffer. The |offset| specifies the
+ // number of bytes encoded by the encoder. A non zero return value is an
+ // error.
+ inline int read_init(const uint8_t *const buf, int offset) {
+ unsigned x;
+ if (offset < 1) {
+ return 1;
+ }
+ ans_.buf = buf;
+ x = buf[offset - 1] >> 6;
+ if (x == 0) {
+ ans_.buf_offset = offset - 1;
+ ans_.state = buf[offset - 1] & 0x3F;
+ } else if (x == 1) {
+ if (offset < 2) {
+ return 1;
+ }
+ ans_.buf_offset = offset - 2;
+ ans_.state = mem_get_le16(buf + offset - 2) & 0x3FFF;
+ } else if (x == 2) {
+ if (offset < 3) {
+ return 1;
+ }
+ ans_.buf_offset = offset - 3;
+ ans_.state = mem_get_le24(buf + offset - 3) & 0x3FFFFF;
+ } else if (x == 3) {
+ ans_.buf_offset = offset - 4;
+ ans_.state = mem_get_le32(buf + offset - 4) & 0x3FFFFFFF;
+ } else {
+ return 1;
+ }
+ ans_.state += l_rans_base;
+ if (ans_.state >= l_rans_base * DRACO_ANS_IO_BASE) {
+ return 1;
+ }
+ return 0;
+ }
+
+ inline int read_end() { return ans_.state == l_rans_base; }
+
+ inline int reader_has_error() {
+ return ans_.state < l_rans_base && ans_.buf_offset == 0;
+ }
+
+ inline int rans_read() {
+ unsigned rem;
+ unsigned quo;
+ struct rans_dec_sym sym;
+ while (ans_.state < l_rans_base && ans_.buf_offset > 0) {
+ ans_.state = ans_.state * DRACO_ANS_IO_BASE + ans_.buf[--ans_.buf_offset];
+ }
+ // |rans_precision| is a power of two compile time constant, and the below
+ // division and modulo are going to be optimized by the compiler.
+ quo = ans_.state / rans_precision;
+ rem = ans_.state % rans_precision;
+ fetch_sym(&sym, rem);
+ ans_.state = quo * sym.prob + rem - sym.cum_prob;
+ return sym.val;
+ }
+
+ // Construct a lookup table with |rans_precision| number of entries.
+ // Returns false if the table couldn't be built (because of wrong input data).
+ inline bool rans_build_look_up_table(const uint32_t token_probs[],
+ uint32_t num_symbols) {
+ lut_table_.resize(rans_precision);
+ probability_table_.resize(num_symbols);
+ uint32_t cum_prob = 0;
+ uint32_t act_prob = 0;
+ for (uint32_t i = 0; i < num_symbols; ++i) {
+ probability_table_[i].prob = token_probs[i];
+ probability_table_[i].cum_prob = cum_prob;
+ cum_prob += token_probs[i];
+ if (cum_prob > rans_precision) {
+ return false;
+ }
+ for (uint32_t j = act_prob; j < cum_prob; ++j) {
+ lut_table_[j] = i;
+ }
+ act_prob = cum_prob;
+ }
+ if (cum_prob != rans_precision) {
+ return false;
+ }
+ return true;
+ }
+
+ private:
+ inline void fetch_sym(struct rans_dec_sym *out, uint32_t rem) {
+ uint32_t symbol = lut_table_[rem];
+ out->val = symbol;
+ out->prob = probability_table_[symbol].prob;
+ out->cum_prob = probability_table_[symbol].cum_prob;
+ }
+
+ static constexpr int rans_precision = 1 << rans_precision_bits_t;
+ static constexpr int l_rans_base = rans_precision * 4;
+ std::vector<uint32_t> lut_table_;
+ std::vector<rans_sym> probability_table_;
+ AnsDecoder ans_;
+};
+
+#undef DRACO_ANS_DIVREM
+#undef DRACO_ANS_P8_PRECISION
+#undef DRACO_ANS_L_BASE
+#undef DRACO_ANS_IO_BASE
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_ENTROPY_ANS_H_
diff --git a/libs/assimp/contrib/draco/src/draco/compression/entropy/rans_symbol_coding.h b/libs/assimp/contrib/draco/src/draco/compression/entropy/rans_symbol_coding.h
new file mode 100644
index 0000000..cd42711
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/entropy/rans_symbol_coding.h
@@ -0,0 +1,53 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// File providing shared functionality for RAnsSymbolEncoder and
+// RAnsSymbolDecoder (see rans_symbol_encoder.h / rans_symbol_decoder.h).
+#ifndef DRACO_COMPRESSION_ENTROPY_RANS_SYMBOL_CODING_H_
+#define DRACO_COMPRESSION_ENTROPY_RANS_SYMBOL_CODING_H_
+
+#include "draco/compression/entropy/ans.h"
+
+namespace draco {
+
+// Computes the desired precision of the rANS method for the specified number of
+// unique symbols the input data (defined by their bit_length).
+constexpr int ComputeRAnsUnclampedPrecision(int symbols_bit_length) {
+ return (3 * symbols_bit_length) / 2;
+}
+
+// Computes the desired precision clamped to guarantee a valid functionality of
+// our rANS library (which is between 12 to 20 bits).
+constexpr int ComputeRAnsPrecisionFromUniqueSymbolsBitLength(
+ int symbols_bit_length) {
+ return ComputeRAnsUnclampedPrecision(symbols_bit_length) < 12 ? 12
+ : ComputeRAnsUnclampedPrecision(symbols_bit_length) > 20
+ ? 20
+ : ComputeRAnsUnclampedPrecision(symbols_bit_length);
+}
+
+// Compute approximate frequency table size needed for storing the provided
+// symbols.
+static inline int64_t ApproximateRAnsFrequencyTableBits(
+ int32_t max_value, int num_unique_symbols) {
+ // Approximate number of bits for storing zero frequency entries using the
+ // run length encoding (with max length of 64).
+ const int64_t table_zero_frequency_bits =
+ 8 * (num_unique_symbols + (max_value - num_unique_symbols) / 64);
+ return 8 * num_unique_symbols + table_zero_frequency_bits;
+}
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_ENTROPY_RANS_SYMBOL_CODING_H_
diff --git a/libs/assimp/contrib/draco/src/draco/compression/entropy/rans_symbol_decoder.h b/libs/assimp/contrib/draco/src/draco/compression/entropy/rans_symbol_decoder.h
new file mode 100644
index 0000000..10cdc67
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/entropy/rans_symbol_decoder.h
@@ -0,0 +1,164 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_COMPRESSION_ENTROPY_RANS_SYMBOL_DECODER_H_
+#define DRACO_COMPRESSION_ENTROPY_RANS_SYMBOL_DECODER_H_
+
+#include "draco/compression/config/compression_shared.h"
+#include "draco/compression/entropy/rans_symbol_coding.h"
+#include "draco/core/decoder_buffer.h"
+#include "draco/core/varint_decoding.h"
+#include "draco/draco_features.h"
+
+namespace draco {
+
+// A helper class for decoding symbols using the rANS algorithm (see ans.h).
+// The class can be used to decode the probability table and the data encoded
+// by the RAnsSymbolEncoder. |unique_symbols_bit_length_t| must be the same as
+// the one used for the corresponding RAnsSymbolEncoder.
+template <int unique_symbols_bit_length_t>
+class RAnsSymbolDecoder {
+ public:
+ RAnsSymbolDecoder() : num_symbols_(0) {}
+
+ // Initialize the decoder and decode the probability table.
+ bool Create(DecoderBuffer *buffer);
+
+ uint32_t num_symbols() const { return num_symbols_; }
+
+ // Starts decoding from the buffer. The buffer will be advanced past the
+ // encoded data after this call.
+ bool StartDecoding(DecoderBuffer *buffer);
+ uint32_t DecodeSymbol() { return ans_.rans_read(); }
+ void EndDecoding();
+
+ private:
+ static constexpr int rans_precision_bits_ =
+ ComputeRAnsPrecisionFromUniqueSymbolsBitLength(
+ unique_symbols_bit_length_t);
+ static constexpr int rans_precision_ = 1 << rans_precision_bits_;
+
+ std::vector<uint32_t> probability_table_;
+ uint32_t num_symbols_;
+ RAnsDecoder<rans_precision_bits_> ans_;
+};
+
+template <int unique_symbols_bit_length_t>
+bool RAnsSymbolDecoder<unique_symbols_bit_length_t>::Create(
+ DecoderBuffer *buffer) {
+ // Check that the DecoderBuffer version is set.
+ if (buffer->bitstream_version() == 0) {
+ return false;
+ }
+ // Decode the number of alphabet symbols.
+#ifdef DRACO_BACKWARDS_COMPATIBILITY_SUPPORTED
+ if (buffer->bitstream_version() < DRACO_BITSTREAM_VERSION(2, 0)) {
+ if (!buffer->Decode(&num_symbols_)) {
+ return false;
+ }
+
+ } else
+#endif
+ {
+ if (!DecodeVarint(&num_symbols_, buffer)) {
+ return false;
+ }
+ }
+ probability_table_.resize(num_symbols_);
+ if (num_symbols_ == 0) {
+ return true;
+ }
+ // Decode the table.
+ for (uint32_t i = 0; i < num_symbols_; ++i) {
+ uint8_t prob_data = 0;
+ // Decode the first byte and extract the number of extra bytes we need to
+ // get, or the offset to the next symbol with non-zero probability.
+ if (!buffer->Decode(&prob_data)) {
+ return false;
+ }
+ // Token is stored in the first two bits of the first byte. Values 0-2 are
+ // used to indicate the number of extra bytes, and value 3 is a special
+ // symbol used to denote run-length coding of zero probability entries.
+ // See rans_symbol_encoder.h for more details.
+ const int token = prob_data & 3;
+ if (token == 3) {
+ const uint32_t offset = prob_data >> 2;
+ if (i + offset >= num_symbols_) {
+ return false;
+ }
+ // Set zero probability for all symbols in the specified range.
+ for (uint32_t j = 0; j < offset + 1; ++j) {
+ probability_table_[i + j] = 0;
+ }
+ i += offset;
+ } else {
+ const int extra_bytes = token;
+ uint32_t prob = prob_data >> 2;
+ for (int b = 0; b < extra_bytes; ++b) {
+ uint8_t eb;
+ if (!buffer->Decode(&eb)) {
+ return false;
+ }
+ // Shift 8 bits for each extra byte and subtract 2 for the two first
+ // bits.
+ prob |= static_cast<uint32_t>(eb) << (8 * (b + 1) - 2);
+ }
+ probability_table_[i] = prob;
+ }
+ }
+ if (!ans_.rans_build_look_up_table(&probability_table_[0], num_symbols_)) {
+ return false;
+ }
+ return true;
+}
+
+template <int unique_symbols_bit_length_t>
+bool RAnsSymbolDecoder<unique_symbols_bit_length_t>::StartDecoding(
+ DecoderBuffer *buffer) {
+ uint64_t bytes_encoded;
+ // Decode the number of bytes encoded by the encoder.
+#ifdef DRACO_BACKWARDS_COMPATIBILITY_SUPPORTED
+ if (buffer->bitstream_version() < DRACO_BITSTREAM_VERSION(2, 0)) {
+ if (!buffer->Decode(&bytes_encoded)) {
+ return false;
+ }
+
+ } else
+#endif
+ {
+ if (!DecodeVarint<uint64_t>(&bytes_encoded, buffer)) {
+ return false;
+ }
+ }
+ if (bytes_encoded > static_cast<uint64_t>(buffer->remaining_size())) {
+ return false;
+ }
+ const uint8_t *const data_head =
+ reinterpret_cast<const uint8_t *>(buffer->data_head());
+ // Advance the buffer past the rANS data.
+ buffer->Advance(bytes_encoded);
+ if (ans_.read_init(data_head, static_cast<int>(bytes_encoded)) != 0) {
+ return false;
+ }
+ return true;
+}
+
+template <int unique_symbols_bit_length_t>
+void RAnsSymbolDecoder<unique_symbols_bit_length_t>::EndDecoding() {
+ ans_.read_end();
+}
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_ENTROPY_RANS_SYMBOL_DECODER_H_
diff --git a/libs/assimp/contrib/draco/src/draco/compression/entropy/rans_symbol_encoder.h b/libs/assimp/contrib/draco/src/draco/compression/entropy/rans_symbol_encoder.h
new file mode 100644
index 0000000..4e07ec8
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/entropy/rans_symbol_encoder.h
@@ -0,0 +1,290 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_COMPRESSION_ENTROPY_RANS_SYMBOL_ENCODER_H_
+#define DRACO_COMPRESSION_ENTROPY_RANS_SYMBOL_ENCODER_H_
+
+#include <algorithm>
+#include <cmath>
+#include <cstring>
+
+#include "draco/compression/entropy/ans.h"
+#include "draco/compression/entropy/rans_symbol_coding.h"
+#include "draco/core/encoder_buffer.h"
+#include "draco/core/varint_encoding.h"
+
+namespace draco {
+
+// A helper class for encoding symbols using the rANS algorithm (see ans.h).
+// The class can be used to initialize and encode probability table needed by
+// rANS, and to perform encoding of symbols into the provided EncoderBuffer.
+template <int unique_symbols_bit_length_t>
+class RAnsSymbolEncoder {
+ public:
+ RAnsSymbolEncoder()
+ : num_symbols_(0), num_expected_bits_(0), buffer_offset_(0) {}
+
+ // Creates a probability table needed by the rANS library and encode it into
+ // the provided buffer.
+ bool Create(const uint64_t *frequencies, int num_symbols,
+ EncoderBuffer *buffer);
+
+ void StartEncoding(EncoderBuffer *buffer);
+ void EncodeSymbol(uint32_t symbol) {
+ ans_.rans_write(&probability_table_[symbol]);
+ }
+ void EndEncoding(EncoderBuffer *buffer);
+
+ // rANS requires to encode the input symbols in the reverse order.
+ static constexpr bool needs_reverse_encoding() { return true; }
+
+ private:
+ // Functor used for sorting symbol ids according to their probabilities.
+ // The functor sorts symbol indices that index an underlying map between
+ // symbol ids and their probabilities. We don't sort the probability table
+ // directly, because that would require an additional indirection during the
+ // EncodeSymbol() function.
+ struct ProbabilityLess {
+ explicit ProbabilityLess(const std::vector<rans_sym> *probs)
+ : probabilities(probs) {}
+ bool operator()(int i, int j) const {
+ return probabilities->at(i).prob < probabilities->at(j).prob;
+ }
+ const std::vector<rans_sym> *probabilities;
+ };
+
+ // Encodes the probability table into the output buffer.
+ bool EncodeTable(EncoderBuffer *buffer);
+
+ static constexpr int rans_precision_bits_ =
+ ComputeRAnsPrecisionFromUniqueSymbolsBitLength(
+ unique_symbols_bit_length_t);
+ static constexpr int rans_precision_ = 1 << rans_precision_bits_;
+
+ std::vector<rans_sym> probability_table_;
+ // The number of symbols in the input alphabet.
+ uint32_t num_symbols_;
+ // Expected number of bits that is needed to encode the input.
+ uint64_t num_expected_bits_;
+
+ RAnsEncoder<rans_precision_bits_> ans_;
+ // Initial offset of the encoder buffer before any ans data was encoded.
+ uint64_t buffer_offset_;
+};
+
+template <int unique_symbols_bit_length_t>
+bool RAnsSymbolEncoder<unique_symbols_bit_length_t>::Create(
+ const uint64_t *frequencies, int num_symbols, EncoderBuffer *buffer) {
+ // Compute the total of the input frequencies.
+ uint64_t total_freq = 0;
+ int max_valid_symbol = 0;
+ for (int i = 0; i < num_symbols; ++i) {
+ total_freq += frequencies[i];
+ if (frequencies[i] > 0) {
+ max_valid_symbol = i;
+ }
+ }
+ num_symbols = max_valid_symbol + 1;
+ num_symbols_ = num_symbols;
+ probability_table_.resize(num_symbols);
+ const double total_freq_d = static_cast<double>(total_freq);
+ const double rans_precision_d = static_cast<double>(rans_precision_);
+ // Compute probabilities by rescaling the normalized frequencies into interval
+ // [1, rans_precision - 1]. The total probability needs to be equal to
+ // rans_precision.
+ int total_rans_prob = 0;
+ for (int i = 0; i < num_symbols; ++i) {
+ const uint64_t freq = frequencies[i];
+
+ // Normalized probability.
+ const double prob = static_cast<double>(freq) / total_freq_d;
+
+ // RAns probability in range of [1, rans_precision - 1].
+ uint32_t rans_prob = static_cast<uint32_t>(prob * rans_precision_d + 0.5f);
+ if (rans_prob == 0 && freq > 0) {
+ rans_prob = 1;
+ }
+ probability_table_[i].prob = rans_prob;
+ total_rans_prob += rans_prob;
+ }
+ // Because of rounding errors, the total precision may not be exactly accurate
+ // and we may need to adjust the entries a little bit.
+ if (total_rans_prob != rans_precision_) {
+ std::vector<int> sorted_probabilities(num_symbols);
+ for (int i = 0; i < num_symbols; ++i) {
+ sorted_probabilities[i] = i;
+ }
+ std::sort(sorted_probabilities.begin(), sorted_probabilities.end(),
+ ProbabilityLess(&probability_table_));
+ if (total_rans_prob < rans_precision_) {
+ // This happens rather infrequently, just add the extra needed precision
+ // to the most frequent symbol.
+ probability_table_[sorted_probabilities.back()].prob +=
+ rans_precision_ - total_rans_prob;
+ } else {
+ // We have over-allocated the precision, which is quite common.
+ // Rescale the probabilities of all symbols.
+ int32_t error = total_rans_prob - rans_precision_;
+ while (error > 0) {
+ const double act_total_prob_d = static_cast<double>(total_rans_prob);
+ const double act_rel_error_d = rans_precision_d / act_total_prob_d;
+ for (int j = num_symbols - 1; j > 0; --j) {
+ int symbol_id = sorted_probabilities[j];
+ if (probability_table_[symbol_id].prob <= 1) {
+ if (j == num_symbols - 1) {
+ return false; // Most frequent symbol would be empty.
+ }
+ break;
+ }
+ const int32_t new_prob = static_cast<int32_t>(
+ floor(act_rel_error_d *
+ static_cast<double>(probability_table_[symbol_id].prob)));
+ int32_t fix = probability_table_[symbol_id].prob - new_prob;
+ if (fix == 0u) {
+ fix = 1;
+ }
+ if (fix >= static_cast<int32_t>(probability_table_[symbol_id].prob)) {
+ fix = probability_table_[symbol_id].prob - 1;
+ }
+ if (fix > error) {
+ fix = error;
+ }
+ probability_table_[symbol_id].prob -= fix;
+ total_rans_prob -= fix;
+ error -= fix;
+ if (total_rans_prob == rans_precision_) {
+ break;
+ }
+ }
+ }
+ }
+ }
+
+ // Compute the cumulative probability (cdf).
+ uint32_t total_prob = 0;
+ for (int i = 0; i < num_symbols; ++i) {
+ probability_table_[i].cum_prob = total_prob;
+ total_prob += probability_table_[i].prob;
+ }
+ if (total_prob != rans_precision_) {
+ return false;
+ }
+
+ // Estimate the number of bits needed to encode the input.
+ // From Shannon entropy the total number of bits N is:
+ // N = -sum{i : all_symbols}(F(i) * log2(P(i)))
+ // where P(i) is the normalized probability of symbol i and F(i) is the
+ // symbol's frequency in the input data.
+ double num_bits = 0;
+ for (int i = 0; i < num_symbols; ++i) {
+ if (probability_table_[i].prob == 0) {
+ continue;
+ }
+ const double norm_prob =
+ static_cast<double>(probability_table_[i].prob) / rans_precision_d;
+ num_bits += static_cast<double>(frequencies[i]) * log2(norm_prob);
+ }
+ num_expected_bits_ = static_cast<uint64_t>(ceil(-num_bits));
+ if (!EncodeTable(buffer)) {
+ return false;
+ }
+ return true;
+}
+
+template <int unique_symbols_bit_length_t>
+bool RAnsSymbolEncoder<unique_symbols_bit_length_t>::EncodeTable(
+ EncoderBuffer *buffer) {
+ EncodeVarint(num_symbols_, buffer);
+ // Use varint encoding for the probabilities (first two bits represent the
+ // number of bytes used - 1).
+ for (uint32_t i = 0; i < num_symbols_; ++i) {
+ const uint32_t prob = probability_table_[i].prob;
+ int num_extra_bytes = 0;
+ if (prob >= (1 << 6)) {
+ num_extra_bytes++;
+ if (prob >= (1 << 14)) {
+ num_extra_bytes++;
+ if (prob >= (1 << 22)) {
+ // The maximum number of precision bits is 20 so we should not really
+ // get to this point.
+ return false;
+ }
+ }
+ }
+ if (prob == 0) {
+ // When the probability of the symbol is 0, set the first two bits to 1
+ // (unique identifier) and use the remaining 6 bits to store the offset
+ // to the next symbol with non-zero probability.
+ uint32_t offset = 0;
+ for (; offset < (1 << 6) - 1; ++offset) {
+ // Note: we don't have to check whether the next symbol id is larger
+ // than num_symbols_ because we know that the last symbol always has
+ // non-zero probability.
+ const uint32_t next_prob = probability_table_[i + offset + 1].prob;
+ if (next_prob > 0) {
+ break;
+ }
+ }
+ buffer->Encode(static_cast<uint8_t>((offset << 2) | 3));
+ i += offset;
+ } else {
+ // Encode the first byte (including the number of extra bytes).
+ buffer->Encode(static_cast<uint8_t>((prob << 2) | (num_extra_bytes & 3)));
+ // Encode the extra bytes.
+ for (int b = 0; b < num_extra_bytes; ++b) {
+ buffer->Encode(static_cast<uint8_t>(prob >> (8 * (b + 1) - 2)));
+ }
+ }
+ }
+ return true;
+}
+
+template <int unique_symbols_bit_length_t>
+void RAnsSymbolEncoder<unique_symbols_bit_length_t>::StartEncoding(
+ EncoderBuffer *buffer) {
+ // Allocate extra storage just in case.
+ const uint64_t required_bits = 2 * num_expected_bits_ + 32;
+
+ buffer_offset_ = buffer->size();
+ const int64_t required_bytes = (required_bits + 7) / 8;
+ buffer->Resize(buffer_offset_ + required_bytes + sizeof(buffer_offset_));
+ uint8_t *const data =
+ reinterpret_cast<uint8_t *>(const_cast<char *>(buffer->data()));
+ ans_.write_init(data + buffer_offset_);
+}
+
+template <int unique_symbols_bit_length_t>
+void RAnsSymbolEncoder<unique_symbols_bit_length_t>::EndEncoding(
+ EncoderBuffer *buffer) {
+ char *const src = const_cast<char *>(buffer->data()) + buffer_offset_;
+
+ // TODO(fgalligan): Look into changing this to uint32_t as write_end()
+ // returns an int.
+ const uint64_t bytes_written = static_cast<uint64_t>(ans_.write_end());
+ EncoderBuffer var_size_buffer;
+ EncodeVarint(bytes_written, &var_size_buffer);
+ const uint32_t size_len = static_cast<uint32_t>(var_size_buffer.size());
+ char *const dst = src + size_len;
+ memmove(dst, src, bytes_written);
+
+ // Store the size of the encoded data.
+ memcpy(src, var_size_buffer.data(), size_len);
+
+ // Resize the buffer to match the number of encoded bytes.
+ buffer->Resize(buffer_offset_ + bytes_written + size_len);
+}
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_ENTROPY_RANS_SYMBOL_ENCODER_H_
diff --git a/libs/assimp/contrib/draco/src/draco/compression/entropy/shannon_entropy.cc b/libs/assimp/contrib/draco/src/draco/compression/entropy/shannon_entropy.cc
new file mode 100644
index 0000000..137eafe
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/entropy/shannon_entropy.cc
@@ -0,0 +1,147 @@
+#include "draco/compression/entropy/shannon_entropy.h"
+
+#include <cmath>
+#include <vector>
+
+#include "draco/compression/entropy/rans_symbol_coding.h"
+
+namespace draco {
+
+int64_t ComputeShannonEntropy(const uint32_t *symbols, int num_symbols,
+ int max_value, int *out_num_unique_symbols) {
+ // First find frequency of all unique symbols in the input array.
+ int num_unique_symbols = 0;
+ std::vector<int> symbol_frequencies(max_value + 1, 0);
+ for (int i = 0; i < num_symbols; ++i) {
+ ++symbol_frequencies[symbols[i]];
+ }
+ double total_bits = 0;
+ double num_symbols_d = num_symbols;
+ for (int i = 0; i < max_value + 1; ++i) {
+ if (symbol_frequencies[i] > 0) {
+ ++num_unique_symbols;
+ // Compute Shannon entropy for the symbol.
+ // We don't want to use std::log2 here for Android build.
+ total_bits +=
+ symbol_frequencies[i] *
+ log2(static_cast<double>(symbol_frequencies[i]) / num_symbols_d);
+ }
+ }
+ if (out_num_unique_symbols) {
+ *out_num_unique_symbols = num_unique_symbols;
+ }
+ // Entropy is always negative.
+ return static_cast<int64_t>(-total_bits);
+}
+
+double ComputeBinaryShannonEntropy(uint32_t num_values,
+ uint32_t num_true_values) {
+ if (num_values == 0) {
+ return 0;
+ }
+
+ // We can exit early if the data set has 0 entropy.
+ if (num_true_values == 0 || num_values == num_true_values) {
+ return 0;
+ }
+ const double true_freq =
+ static_cast<double>(num_true_values) / static_cast<double>(num_values);
+ const double false_freq = 1.0 - true_freq;
+ return -(true_freq * std::log2(true_freq) +
+ false_freq * std::log2(false_freq));
+}
+
+ShannonEntropyTracker::ShannonEntropyTracker() {}
+
+ShannonEntropyTracker::EntropyData ShannonEntropyTracker::Peek(
+ const uint32_t *symbols, int num_symbols) {
+ return UpdateSymbols(symbols, num_symbols, false);
+}
+
+ShannonEntropyTracker::EntropyData ShannonEntropyTracker::Push(
+ const uint32_t *symbols, int num_symbols) {
+ return UpdateSymbols(symbols, num_symbols, true);
+}
+
+ShannonEntropyTracker::EntropyData ShannonEntropyTracker::UpdateSymbols(
+ const uint32_t *symbols, int num_symbols, bool push_changes) {
+ EntropyData ret_data = entropy_data_;
+ ret_data.num_values += num_symbols;
+ for (int i = 0; i < num_symbols; ++i) {
+ const uint32_t symbol = symbols[i];
+ if (frequencies_.size() <= symbol) {
+ frequencies_.resize(symbol + 1, 0);
+ }
+
+ // Update the entropy of the stream. Note that entropy of |N| values
+ // represented by |S| unique symbols is defined as:
+ //
+ // entropy = -sum_over_S(symbol_frequency / N * log2(symbol_frequency / N))
+ //
+ // To avoid the need to recompute the entire sum when new values are added,
+ // we can instead update a so called entropy norm that is defined as:
+ //
+ // entropy_norm = sum_over_S(symbol_frequency * log2(symbol_frequency))
+ //
+ // In this case, all we need to do is update entries on the symbols where
+ // the frequency actually changed.
+ //
+ // Note that entropy_norm and entropy can be easily transformed to the
+ // actual entropy as:
+ //
+ // entropy = log2(N) - entropy_norm / N
+ //
+ double old_symbol_entropy_norm = 0;
+ int &frequency = frequencies_[symbol];
+ if (frequency > 1) {
+ old_symbol_entropy_norm = frequency * std::log2(frequency);
+ } else if (frequency == 0) {
+ ret_data.num_unique_symbols++;
+ if (symbol > static_cast<uint32_t>(ret_data.max_symbol)) {
+ ret_data.max_symbol = symbol;
+ }
+ }
+ frequency++;
+ const double new_symbol_entropy_norm = frequency * std::log2(frequency);
+
+ // Update the final entropy.
+ ret_data.entropy_norm += new_symbol_entropy_norm - old_symbol_entropy_norm;
+ }
+ if (push_changes) {
+ // Update entropy data of the stream.
+ entropy_data_ = ret_data;
+ } else {
+ // We are only peeking so do not update the stream.
+ // Revert changes in the frequency table.
+ for (int i = 0; i < num_symbols; ++i) {
+ const uint32_t symbol = symbols[i];
+ frequencies_[symbol]--;
+ }
+ }
+ return ret_data;
+}
+
+int64_t ShannonEntropyTracker::GetNumberOfDataBits(
+ const EntropyData &entropy_data) {
+ if (entropy_data.num_values < 2) {
+ return 0;
+ }
+ // We need to compute the number of bits required to represent the stream
+ // using the entropy norm. Note that:
+ //
+ // entropy = log2(num_values) - entropy_norm / num_values
+ //
+ // and number of bits required for the entropy is: num_values * entropy
+ //
+ return static_cast<int64_t>(
+ ceil(entropy_data.num_values * std::log2(entropy_data.num_values) -
+ entropy_data.entropy_norm));
+}
+
+int64_t ShannonEntropyTracker::GetNumberOfRAnsTableBits(
+ const EntropyData &entropy_data) {
+ return ApproximateRAnsFrequencyTableBits(entropy_data.max_symbol + 1,
+ entropy_data.num_unique_symbols);
+}
+
+} // namespace draco
diff --git a/libs/assimp/contrib/draco/src/draco/compression/entropy/shannon_entropy.h b/libs/assimp/contrib/draco/src/draco/compression/entropy/shannon_entropy.h
new file mode 100644
index 0000000..85165f4
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/entropy/shannon_entropy.h
@@ -0,0 +1,110 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_COMPRESSION_ENTROPY_SHANNON_ENTROPY_H_
+#define DRACO_COMPRESSION_ENTROPY_SHANNON_ENTROPY_H_
+
+#include <stdint.h>
+
+#include <vector>
+
+namespace draco {
+
+// Computes an approximate Shannon entropy of symbols stored in the provided
+// input array |symbols|. The entropy corresponds to the number of bits that is
+// required to represent/store all the symbols using an optimal entropy coding
+// algorithm. See for example "A mathematical theory of communication" by
+// Shannon'48 (http://ieeexplore.ieee.org/document/6773024/).
+//
+// |max_value| is a required input that define the maximum value in the input
+// |symbols| array.
+//
+// |out_num_unique_symbols| is an optional output argument that stores the
+// number of unique symbols contained within the |symbols| array.
+// TODO(ostava): This should be renamed or the return value should be changed to
+// return the actual entropy and not the number of bits needed to represent the
+// input symbols.
+int64_t ComputeShannonEntropy(const uint32_t *symbols, int num_symbols,
+ int max_value, int *out_num_unique_symbols);
+
+// Computes the Shannon entropy of |num_values| Boolean entries, where
+// |num_true_values| are set to true.
+// Returns entropy between 0-1.
+double ComputeBinaryShannonEntropy(uint32_t num_values,
+ uint32_t num_true_values);
+
+// Class that can be used to keep track of the Shannon entropy on streamed data.
+// As new symbols are pushed to the tracker, the entropy is automatically
+// recomputed. The class also support recomputing the entropy without actually
+// pushing the symbols to the tracker through the Peek() method.
+class ShannonEntropyTracker {
+ public:
+ ShannonEntropyTracker();
+
+ // Struct for holding entropy data about the symbols added to the tracker.
+ // It can be used to compute the number of bits needed to store the data using
+ // the method:
+ // ShannonEntropyTracker::GetNumberOfDataBits(entropy_data);
+ // or to compute the approximate size of the frequency table needed by the
+ // rans coding using method:
+ // ShannonEntropyTracker::GetNumberOfRAnsTableBits(entropy_data);
+ struct EntropyData {
+ double entropy_norm;
+ int num_values;
+ int max_symbol;
+ int num_unique_symbols;
+ EntropyData()
+ : entropy_norm(0.0),
+ num_values(0),
+ max_symbol(0),
+ num_unique_symbols(0) {}
+ };
+
+ // Adds new symbols to the tracker and recomputes the entropy accordingly.
+ EntropyData Push(const uint32_t *symbols, int num_symbols);
+
+ // Returns new entropy data for the tracker as if |symbols| were added to the
+ // tracker without actually changing the status of the tracker.
+ EntropyData Peek(const uint32_t *symbols, int num_symbols);
+
+ // Gets the number of bits needed for encoding symbols added to the tracker.
+ int64_t GetNumberOfDataBits() const {
+ return GetNumberOfDataBits(entropy_data_);
+ }
+
+ // Gets the number of bits needed for encoding frequency table using the rans
+ // encoder.
+ int64_t GetNumberOfRAnsTableBits() const {
+ return GetNumberOfRAnsTableBits(entropy_data_);
+ }
+
+ // Gets the number of bits needed for encoding given |entropy_data|.
+ static int64_t GetNumberOfDataBits(const EntropyData &entropy_data);
+
+ // Gets the number of bits needed for encoding frequency table using the rans
+ // encoder for the given |entropy_data|.
+ static int64_t GetNumberOfRAnsTableBits(const EntropyData &entropy_data);
+
+ private:
+ EntropyData UpdateSymbols(const uint32_t *symbols, int num_symbols,
+ bool push_changes);
+
+ std::vector<int32_t> frequencies_;
+
+ EntropyData entropy_data_;
+};
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_ENTROPY_SHANNON_ENTROPY_H_
diff --git a/libs/assimp/contrib/draco/src/draco/compression/entropy/shannon_entropy_test.cc b/libs/assimp/contrib/draco/src/draco/compression/entropy/shannon_entropy_test.cc
new file mode 100644
index 0000000..732c7d2
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/entropy/shannon_entropy_test.cc
@@ -0,0 +1,58 @@
+#include "draco/compression/entropy/shannon_entropy.h"
+
+#include "draco/core/draco_test_base.h"
+
+namespace {
+
+TEST(ShannonEntropyTest, TestBinaryEntropy) {
+ // Test verifies that computing binary entropy works as expected.
+ ASSERT_EQ(draco::ComputeBinaryShannonEntropy(0, 0), 0);
+ ASSERT_EQ(draco::ComputeBinaryShannonEntropy(10, 0), 0);
+ ASSERT_EQ(draco::ComputeBinaryShannonEntropy(10, 10), 0);
+ ASSERT_NEAR(draco::ComputeBinaryShannonEntropy(10, 5), 1.0, 1e-4);
+}
+
+TEST(ShannonEntropyTest, TestStreamEntropy) {
+ // Test verifies that the entropy of streamed data is computed correctly.
+ const std::vector<uint32_t> symbols = {1, 5, 1, 100, 2, 1};
+
+ draco::ShannonEntropyTracker entropy_tracker;
+
+ // Nothing added, 0 entropy.
+ ASSERT_EQ(entropy_tracker.GetNumberOfDataBits(), 0);
+
+ // Try to push symbols one by one.
+ uint32_t max_symbol = 0;
+ for (int i = 0; i < symbols.size(); ++i) {
+ if (symbols[i] > max_symbol) {
+ max_symbol = symbols[i];
+ }
+ const auto entropy_data = entropy_tracker.Push(&symbols[i], 1);
+
+ const int64_t stream_entropy_bits = entropy_tracker.GetNumberOfDataBits();
+ // Ensure the returned entropy_data is in sync with the stream.
+ ASSERT_EQ(draco::ShannonEntropyTracker::GetNumberOfDataBits(entropy_data),
+ stream_entropy_bits);
+
+ // Make sure the entropy is approximately the same as the one we compute
+ // directly from all symbols.
+ const int64_t expected_entropy_bits = draco::ComputeShannonEntropy(
+ symbols.data(), i + 1, max_symbol, nullptr);
+
+ // For now hardcoded tolerance of 2 bits.
+ ASSERT_NEAR(expected_entropy_bits, stream_entropy_bits, 2);
+ }
+
+ // Compare it also to the case when we add all symbols in one call.
+ draco::ShannonEntropyTracker entropy_tracker_2;
+ entropy_tracker_2.Push(symbols.data(), symbols.size());
+ const int64_t stream_2_entropy_bits = entropy_tracker_2.GetNumberOfDataBits();
+ ASSERT_EQ(entropy_tracker.GetNumberOfDataBits(), stream_2_entropy_bits);
+
+ // Ensure that peeking does not change the entropy.
+ entropy_tracker_2.Peek(symbols.data(), 1);
+
+ ASSERT_EQ(stream_2_entropy_bits, entropy_tracker_2.GetNumberOfDataBits());
+}
+
+} // namespace
diff --git a/libs/assimp/contrib/draco/src/draco/compression/entropy/symbol_coding_test.cc b/libs/assimp/contrib/draco/src/draco/compression/entropy/symbol_coding_test.cc
new file mode 100644
index 0000000..ba7166b
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/entropy/symbol_coding_test.cc
@@ -0,0 +1,170 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/compression/config/compression_shared.h"
+#include "draco/compression/entropy/symbol_decoding.h"
+#include "draco/compression/entropy/symbol_encoding.h"
+#include "draco/core/bit_utils.h"
+#include "draco/core/decoder_buffer.h"
+#include "draco/core/draco_test_base.h"
+#include "draco/core/encoder_buffer.h"
+
+namespace draco {
+
+class SymbolCodingTest : public ::testing::Test {
+ protected:
+ SymbolCodingTest() : bitstream_version_(kDracoMeshBitstreamVersion) {}
+
+ template <class SignedIntTypeT>
+ void TestConvertToSymbolAndBack(SignedIntTypeT x) {
+ typedef typename std::make_unsigned<SignedIntTypeT>::type Symbol;
+ Symbol symbol = ConvertSignedIntToSymbol(x);
+ SignedIntTypeT y = ConvertSymbolToSignedInt(symbol);
+ ASSERT_EQ(x, y);
+ }
+
+ uint16_t bitstream_version_;
+};
+
+TEST_F(SymbolCodingTest, TestLargeNumbers) {
+ // This test verifies that SymbolCoding successfully encodes an array of large
+ // numbers.
+ const uint32_t in[] = {12345678, 1223333, 111, 5};
+ const int num_values = sizeof(in) / sizeof(uint32_t);
+ EncoderBuffer eb;
+ ASSERT_TRUE(EncodeSymbols(in, num_values, 1, nullptr, &eb));
+
+ std::vector<uint32_t> out;
+ out.resize(num_values);
+ DecoderBuffer db;
+ db.Init(eb.data(), eb.size());
+ db.set_bitstream_version(bitstream_version_);
+ ASSERT_TRUE(DecodeSymbols(num_values, 1, &db, &out[0]));
+ for (int i = 0; i < num_values; ++i) {
+ EXPECT_EQ(in[i], out[i]);
+ }
+}
+
+TEST_F(SymbolCodingTest, TestManyNumbers) {
+ // This test verifies that SymbolCoding successfully encodes an array of
+ // several numbers that repeat many times.
+
+ // Value/frequency pairs.
+ const std::pair<uint32_t, uint32_t> in[] = {
+ {12, 1500}, {1025, 31000}, {7, 1}, {9, 5}, {0, 6432}};
+
+ const int num_pairs = sizeof(in) / sizeof(std::pair<uint32_t, uint32_t>);
+
+ std::vector<uint32_t> in_values;
+ for (int i = 0; i < num_pairs; ++i) {
+ in_values.insert(in_values.end(), in[i].second, in[i].first);
+ }
+ for (int method = 0; method < NUM_SYMBOL_CODING_METHODS; ++method) {
+ // Test the encoding using all available symbol coding methods.
+ Options options;
+ SetSymbolEncodingMethod(&options, static_cast<SymbolCodingMethod>(method));
+
+ EncoderBuffer eb;
+ ASSERT_TRUE(
+ EncodeSymbols(in_values.data(), in_values.size(), 1, &options, &eb));
+ std::vector<uint32_t> out_values;
+ out_values.resize(in_values.size());
+ DecoderBuffer db;
+ db.Init(eb.data(), eb.size());
+ db.set_bitstream_version(bitstream_version_);
+ ASSERT_TRUE(DecodeSymbols(in_values.size(), 1, &db, &out_values[0]));
+ for (uint32_t i = 0; i < in_values.size(); ++i) {
+ ASSERT_EQ(in_values[i], out_values[i]);
+ }
+ }
+}
+
+TEST_F(SymbolCodingTest, TestEmpty) {
+ // This test verifies that SymbolCoding successfully encodes an empty array.
+ EncoderBuffer eb;
+ ASSERT_TRUE(EncodeSymbols(nullptr, 0, 1, nullptr, &eb));
+ DecoderBuffer db;
+ db.Init(eb.data(), eb.size());
+ db.set_bitstream_version(bitstream_version_);
+ ASSERT_TRUE(DecodeSymbols(0, 1, &db, nullptr));
+}
+
+TEST_F(SymbolCodingTest, TestOneSymbol) {
+ // This test verifies that SymbolCoding successfully encodes an a single
+ // symbol.
+ EncoderBuffer eb;
+ const std::vector<uint32_t> in(1200, 0);
+ ASSERT_TRUE(EncodeSymbols(in.data(), in.size(), 1, nullptr, &eb));
+
+ std::vector<uint32_t> out(in.size());
+ DecoderBuffer db;
+ db.Init(eb.data(), eb.size());
+ db.set_bitstream_version(bitstream_version_);
+ ASSERT_TRUE(DecodeSymbols(in.size(), 1, &db, &out[0]));
+ for (uint32_t i = 0; i < in.size(); ++i) {
+ ASSERT_EQ(in[i], out[i]);
+ }
+}
+
+TEST_F(SymbolCodingTest, TestBitLengths) {
+ // This test verifies that SymbolCoding successfully encodes symbols of
+ // various bit lengths
+ EncoderBuffer eb;
+ std::vector<uint32_t> in;
+ constexpr int bit_lengths = 18;
+ for (int i = 0; i < bit_lengths; ++i) {
+ in.push_back(1 << i);
+ }
+ std::vector<uint32_t> out(in.size());
+ for (int i = 0; i < bit_lengths; ++i) {
+ eb.Clear();
+ ASSERT_TRUE(EncodeSymbols(in.data(), i + 1, 1, nullptr, &eb));
+ DecoderBuffer db;
+ db.Init(eb.data(), eb.size());
+ db.set_bitstream_version(bitstream_version_);
+ ASSERT_TRUE(DecodeSymbols(i + 1, 1, &db, &out[0]));
+ for (int j = 0; j < i + 1; ++j) {
+ ASSERT_EQ(in[j], out[j]);
+ }
+ }
+}
+
+TEST_F(SymbolCodingTest, TestLargeNumberCondition) {
+ // This test verifies that SymbolCoding successfully encodes large symbols
+ // that are on the boundary between raw scheme and tagged scheme (18 bits).
+ EncoderBuffer eb;
+ constexpr int num_symbols = 1000000;
+ const std::vector<uint32_t> in(num_symbols, 1 << 18);
+ ASSERT_TRUE(EncodeSymbols(in.data(), in.size(), 1, nullptr, &eb));
+
+ std::vector<uint32_t> out(in.size());
+ DecoderBuffer db;
+ db.Init(eb.data(), eb.size());
+ db.set_bitstream_version(bitstream_version_);
+ ASSERT_TRUE(DecodeSymbols(in.size(), 1, &db, &out[0]));
+ for (uint32_t i = 0; i < in.size(); ++i) {
+ ASSERT_EQ(in[i], out[i]);
+ }
+}
+
+TEST_F(SymbolCodingTest, TestConversionFullRange) {
+ TestConvertToSymbolAndBack(static_cast<int8_t>(-128));
+ TestConvertToSymbolAndBack(static_cast<int8_t>(-127));
+ TestConvertToSymbolAndBack(static_cast<int8_t>(-1));
+ TestConvertToSymbolAndBack(static_cast<int8_t>(0));
+ TestConvertToSymbolAndBack(static_cast<int8_t>(1));
+ TestConvertToSymbolAndBack(static_cast<int8_t>(127));
+}
+
+} // namespace draco
diff --git a/libs/assimp/contrib/draco/src/draco/compression/entropy/symbol_decoding.cc b/libs/assimp/contrib/draco/src/draco/compression/entropy/symbol_decoding.cc
new file mode 100644
index 0000000..93d2997
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/entropy/symbol_decoding.cc
@@ -0,0 +1,181 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/compression/entropy/symbol_decoding.h"
+
+#include <algorithm>
+#include <cmath>
+
+#include "draco/compression/entropy/rans_symbol_decoder.h"
+
+namespace draco {
+
+template <template <int> class SymbolDecoderT>
+bool DecodeTaggedSymbols(uint32_t num_values, int num_components,
+ DecoderBuffer *src_buffer, uint32_t *out_values);
+
+template <template <int> class SymbolDecoderT>
+bool DecodeRawSymbols(uint32_t num_values, DecoderBuffer *src_buffer,
+ uint32_t *out_values);
+
+bool DecodeSymbols(uint32_t num_values, int num_components,
+ DecoderBuffer *src_buffer, uint32_t *out_values) {
+ if (num_values == 0) {
+ return true;
+ }
+ // Decode which scheme to use.
+ uint8_t scheme;
+ if (!src_buffer->Decode(&scheme)) {
+ return false;
+ }
+ if (scheme == SYMBOL_CODING_TAGGED) {
+ return DecodeTaggedSymbols<RAnsSymbolDecoder>(num_values, num_components,
+ src_buffer, out_values);
+ } else if (scheme == SYMBOL_CODING_RAW) {
+ return DecodeRawSymbols<RAnsSymbolDecoder>(num_values, src_buffer,
+ out_values);
+ }
+ return false;
+}
+
+template <template <int> class SymbolDecoderT>
+bool DecodeTaggedSymbols(uint32_t num_values, int num_components,
+ DecoderBuffer *src_buffer, uint32_t *out_values) {
+ // Decode the encoded data.
+ SymbolDecoderT<5> tag_decoder;
+ if (!tag_decoder.Create(src_buffer)) {
+ return false;
+ }
+
+ if (!tag_decoder.StartDecoding(src_buffer)) {
+ return false;
+ }
+
+ if (num_values > 0 && tag_decoder.num_symbols() == 0) {
+ return false; // Wrong number of symbols.
+ }
+
+ // src_buffer now points behind the encoded tag data (to the place where the
+ // values are encoded).
+ src_buffer->StartBitDecoding(false, nullptr);
+ int value_id = 0;
+ for (uint32_t i = 0; i < num_values; i += num_components) {
+ // Decode the tag.
+ const int bit_length = tag_decoder.DecodeSymbol();
+ // Decode the actual value.
+ for (int j = 0; j < num_components; ++j) {
+ uint32_t val;
+ if (!src_buffer->DecodeLeastSignificantBits32(bit_length, &val)) {
+ return false;
+ }
+ out_values[value_id++] = val;
+ }
+ }
+ tag_decoder.EndDecoding();
+ src_buffer->EndBitDecoding();
+ return true;
+}
+
+template <class SymbolDecoderT>
+bool DecodeRawSymbolsInternal(uint32_t num_values, DecoderBuffer *src_buffer,
+ uint32_t *out_values) {
+ SymbolDecoderT decoder;
+ if (!decoder.Create(src_buffer)) {
+ return false;
+ }
+
+ if (num_values > 0 && decoder.num_symbols() == 0) {
+ return false; // Wrong number of symbols.
+ }
+
+ if (!decoder.StartDecoding(src_buffer)) {
+ return false;
+ }
+ for (uint32_t i = 0; i < num_values; ++i) {
+ // Decode a symbol into the value.
+ const uint32_t value = decoder.DecodeSymbol();
+ out_values[i] = value;
+ }
+ decoder.EndDecoding();
+ return true;
+}
+
+template <template <int> class SymbolDecoderT>
+bool DecodeRawSymbols(uint32_t num_values, DecoderBuffer *src_buffer,
+ uint32_t *out_values) {
+ uint8_t max_bit_length;
+ if (!src_buffer->Decode(&max_bit_length)) {
+ return false;
+ }
+ switch (max_bit_length) {
+ case 1:
+ return DecodeRawSymbolsInternal<SymbolDecoderT<1>>(num_values, src_buffer,
+ out_values);
+ case 2:
+ return DecodeRawSymbolsInternal<SymbolDecoderT<2>>(num_values, src_buffer,
+ out_values);
+ case 3:
+ return DecodeRawSymbolsInternal<SymbolDecoderT<3>>(num_values, src_buffer,
+ out_values);
+ case 4:
+ return DecodeRawSymbolsInternal<SymbolDecoderT<4>>(num_values, src_buffer,
+ out_values);
+ case 5:
+ return DecodeRawSymbolsInternal<SymbolDecoderT<5>>(num_values, src_buffer,
+ out_values);
+ case 6:
+ return DecodeRawSymbolsInternal<SymbolDecoderT<6>>(num_values, src_buffer,
+ out_values);
+ case 7:
+ return DecodeRawSymbolsInternal<SymbolDecoderT<7>>(num_values, src_buffer,
+ out_values);
+ case 8:
+ return DecodeRawSymbolsInternal<SymbolDecoderT<8>>(num_values, src_buffer,
+ out_values);
+ case 9:
+ return DecodeRawSymbolsInternal<SymbolDecoderT<9>>(num_values, src_buffer,
+ out_values);
+ case 10:
+ return DecodeRawSymbolsInternal<SymbolDecoderT<10>>(
+ num_values, src_buffer, out_values);
+ case 11:
+ return DecodeRawSymbolsInternal<SymbolDecoderT<11>>(
+ num_values, src_buffer, out_values);
+ case 12:
+ return DecodeRawSymbolsInternal<SymbolDecoderT<12>>(
+ num_values, src_buffer, out_values);
+ case 13:
+ return DecodeRawSymbolsInternal<SymbolDecoderT<13>>(
+ num_values, src_buffer, out_values);
+ case 14:
+ return DecodeRawSymbolsInternal<SymbolDecoderT<14>>(
+ num_values, src_buffer, out_values);
+ case 15:
+ return DecodeRawSymbolsInternal<SymbolDecoderT<15>>(
+ num_values, src_buffer, out_values);
+ case 16:
+ return DecodeRawSymbolsInternal<SymbolDecoderT<16>>(
+ num_values, src_buffer, out_values);
+ case 17:
+ return DecodeRawSymbolsInternal<SymbolDecoderT<17>>(
+ num_values, src_buffer, out_values);
+ case 18:
+ return DecodeRawSymbolsInternal<SymbolDecoderT<18>>(
+ num_values, src_buffer, out_values);
+ default:
+ return false;
+ }
+}
+
+} // namespace draco
diff --git a/libs/assimp/contrib/draco/src/draco/compression/entropy/symbol_decoding.h b/libs/assimp/contrib/draco/src/draco/compression/entropy/symbol_decoding.h
new file mode 100644
index 0000000..ea11165
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/entropy/symbol_decoding.h
@@ -0,0 +1,29 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_COMPRESSION_ENTROPY_SYMBOL_DECODING_H_
+#define DRACO_COMPRESSION_ENTROPY_SYMBOL_DECODING_H_
+
+#include "draco/core/decoder_buffer.h"
+
+namespace draco {
+
+// Decodes an array of symbols that was previously encoded with an entropy code.
+// Returns false on error.
+bool DecodeSymbols(uint32_t num_values, int num_components,
+ DecoderBuffer *src_buffer, uint32_t *out_values);
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_ENTROPY_SYMBOL_DECODING_H_
diff --git a/libs/assimp/contrib/draco/src/draco/compression/entropy/symbol_encoding.cc b/libs/assimp/contrib/draco/src/draco/compression/entropy/symbol_encoding.cc
new file mode 100644
index 0000000..710c962
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/entropy/symbol_encoding.cc
@@ -0,0 +1,376 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/compression/entropy/symbol_encoding.h"
+
+#include <algorithm>
+#include <cmath>
+
+#include "draco/compression/entropy/rans_symbol_encoder.h"
+#include "draco/compression/entropy/shannon_entropy.h"
+#include "draco/core/bit_utils.h"
+#include "draco/core/macros.h"
+
+namespace draco {
+
+constexpr int32_t kMaxTagSymbolBitLength = 32;
+constexpr int kMaxRawEncodingBitLength = 18;
+constexpr int kDefaultSymbolCodingCompressionLevel = 7;
+
+typedef uint64_t TaggedBitLengthFrequencies[kMaxTagSymbolBitLength];
+
+void SetSymbolEncodingMethod(Options *options, SymbolCodingMethod method) {
+ options->SetInt("symbol_encoding_method", method);
+}
+
+bool SetSymbolEncodingCompressionLevel(Options *options,
+ int compression_level) {
+ if (compression_level < 0 || compression_level > 10) {
+ return false;
+ }
+ options->SetInt("symbol_encoding_compression_level", compression_level);
+ return true;
+}
+
+// Computes bit lengths of the input values. If num_components > 1, the values
+// are processed in "num_components" sized chunks and the bit length is always
+// computed for the largest value from the chunk.
+static void ComputeBitLengths(const uint32_t *symbols, int num_values,
+ int num_components,
+ std::vector<uint32_t> *out_bit_lengths,
+ uint32_t *out_max_value) {
+ out_bit_lengths->reserve(num_values);
+ *out_max_value = 0;
+ // Maximum integer value across all components.
+ for (int i = 0; i < num_values; i += num_components) {
+ // Get the maximum value for a given entry across all attribute components.
+ uint32_t max_component_value = symbols[i];
+ for (int j = 1; j < num_components; ++j) {
+ if (max_component_value < symbols[i + j]) {
+ max_component_value = symbols[i + j];
+ }
+ }
+ int value_msb_pos = 0;
+ if (max_component_value > 0) {
+ value_msb_pos = MostSignificantBit(max_component_value);
+ }
+ if (max_component_value > *out_max_value) {
+ *out_max_value = max_component_value;
+ }
+ out_bit_lengths->push_back(value_msb_pos + 1);
+ }
+}
+
+static int64_t ApproximateTaggedSchemeBits(
+ const std::vector<uint32_t> bit_lengths, int num_components) {
+ // Compute the total bit length used by all values (the length of data encode
+ // after tags).
+ uint64_t total_bit_length = 0;
+ for (size_t i = 0; i < bit_lengths.size(); ++i) {
+ total_bit_length += bit_lengths[i];
+ }
+ // Compute the number of entropy bits for tags.
+ int num_unique_symbols;
+ const int64_t tag_bits = ComputeShannonEntropy(
+ bit_lengths.data(), static_cast<int>(bit_lengths.size()), 32,
+ &num_unique_symbols);
+ const int64_t tag_table_bits =
+ ApproximateRAnsFrequencyTableBits(num_unique_symbols, num_unique_symbols);
+ return tag_bits + tag_table_bits + total_bit_length * num_components;
+}
+
+static int64_t ApproximateRawSchemeBits(const uint32_t *symbols,
+ int num_symbols, uint32_t max_value,
+ int *out_num_unique_symbols) {
+ int num_unique_symbols;
+ const int64_t data_bits = ComputeShannonEntropy(
+ symbols, num_symbols, max_value, &num_unique_symbols);
+ const int64_t table_bits =
+ ApproximateRAnsFrequencyTableBits(max_value, num_unique_symbols);
+ *out_num_unique_symbols = num_unique_symbols;
+ return table_bits + data_bits;
+}
+
+template <template <int> class SymbolEncoderT>
+bool EncodeTaggedSymbols(const uint32_t *symbols, int num_values,
+ int num_components,
+ const std::vector<uint32_t> &bit_lengths,
+ EncoderBuffer *target_buffer);
+
+template <template <int> class SymbolEncoderT>
+bool EncodeRawSymbols(const uint32_t *symbols, int num_values,
+ uint32_t max_entry_value, int32_t num_unique_symbols,
+ const Options *options, EncoderBuffer *target_buffer);
+
+bool EncodeSymbols(const uint32_t *symbols, int num_values, int num_components,
+ const Options *options, EncoderBuffer *target_buffer) {
+ if (num_values < 0) {
+ return false;
+ }
+ if (num_values == 0) {
+ return true;
+ }
+ if (num_components <= 0) {
+ num_components = 1;
+ }
+ std::vector<uint32_t> bit_lengths;
+ uint32_t max_value;
+ ComputeBitLengths(symbols, num_values, num_components, &bit_lengths,
+ &max_value);
+
+ // Approximate number of bits needed for storing the symbols using the tagged
+ // scheme.
+ const int64_t tagged_scheme_total_bits =
+ ApproximateTaggedSchemeBits(bit_lengths, num_components);
+
+ // Approximate number of bits needed for storing the symbols using the raw
+ // scheme.
+ int num_unique_symbols = 0;
+ const int64_t raw_scheme_total_bits = ApproximateRawSchemeBits(
+ symbols, num_values, max_value, &num_unique_symbols);
+
+ // The maximum bit length of a single entry value that we can encode using
+ // the raw scheme.
+ const int max_value_bit_length =
+ MostSignificantBit(std::max(1u, max_value)) + 1;
+
+ int method = -1;
+ if (options != nullptr && options->IsOptionSet("symbol_encoding_method")) {
+ method = options->GetInt("symbol_encoding_method");
+ } else {
+ if (tagged_scheme_total_bits < raw_scheme_total_bits ||
+ max_value_bit_length > kMaxRawEncodingBitLength) {
+ method = SYMBOL_CODING_TAGGED;
+ } else {
+ method = SYMBOL_CODING_RAW;
+ }
+ }
+ // Use the tagged scheme.
+ target_buffer->Encode(static_cast<uint8_t>(method));
+ if (method == SYMBOL_CODING_TAGGED) {
+ return EncodeTaggedSymbols<RAnsSymbolEncoder>(
+ symbols, num_values, num_components, bit_lengths, target_buffer);
+ }
+ if (method == SYMBOL_CODING_RAW) {
+ return EncodeRawSymbols<RAnsSymbolEncoder>(symbols, num_values, max_value,
+ num_unique_symbols, options,
+ target_buffer);
+ }
+ // Unknown method selected.
+ return false;
+}
+
+template <template <int> class SymbolEncoderT>
+bool EncodeTaggedSymbols(const uint32_t *symbols, int num_values,
+ int num_components,
+ const std::vector<uint32_t> &bit_lengths,
+ EncoderBuffer *target_buffer) {
+ // Create entries for entropy coding. Each entry corresponds to a different
+ // number of bits that are necessary to encode a given value. Every value
+ // has at most 32 bits. Therefore, we need 32 different entries (for
+ // bit_length [1-32]). For each entry we compute the frequency of a given
+ // bit-length in our data set.
+ TaggedBitLengthFrequencies frequencies;
+ // Set frequency for each entry to zero.
+ memset(frequencies, 0, sizeof(frequencies));
+
+ // Compute the frequencies from input data.
+ // Maximum integer value for the values across all components.
+ for (size_t i = 0; i < bit_lengths.size(); ++i) {
+ // Update the frequency of the associated entry id.
+ ++frequencies[bit_lengths[i]];
+ }
+
+ // Create one extra buffer to store raw value.
+ EncoderBuffer value_buffer;
+ // Number of expected bits we need to store the values (can be optimized if
+ // needed).
+ const uint64_t value_bits =
+ kMaxTagSymbolBitLength * static_cast<uint64_t>(num_values);
+
+ // Create encoder for encoding the bit tags.
+ SymbolEncoderT<5> tag_encoder;
+ tag_encoder.Create(frequencies, kMaxTagSymbolBitLength, target_buffer);
+
+ // Start encoding bit tags.
+ tag_encoder.StartEncoding(target_buffer);
+
+ // Also start encoding the values.
+ value_buffer.StartBitEncoding(value_bits, false);
+
+ if (tag_encoder.needs_reverse_encoding()) {
+ // Encoder needs the values to be encoded in the reverse order.
+ for (int i = num_values - num_components; i >= 0; i -= num_components) {
+ const int bit_length = bit_lengths[i / num_components];
+ tag_encoder.EncodeSymbol(bit_length);
+
+ // Values are always encoded in the normal order
+ const int j = num_values - num_components - i;
+ const int value_bit_length = bit_lengths[j / num_components];
+ for (int c = 0; c < num_components; ++c) {
+ value_buffer.EncodeLeastSignificantBits32(value_bit_length,
+ symbols[j + c]);
+ }
+ }
+ } else {
+ for (int i = 0; i < num_values; i += num_components) {
+ const int bit_length = bit_lengths[i / num_components];
+ // First encode the tag.
+ tag_encoder.EncodeSymbol(bit_length);
+ // Now encode all values using the stored bit_length.
+ for (int j = 0; j < num_components; ++j) {
+ value_buffer.EncodeLeastSignificantBits32(bit_length, symbols[i + j]);
+ }
+ }
+ }
+ tag_encoder.EndEncoding(target_buffer);
+ value_buffer.EndBitEncoding();
+
+ // Append the values to the end of the target buffer.
+ target_buffer->Encode(value_buffer.data(), value_buffer.size());
+ return true;
+}
+
+template <class SymbolEncoderT>
+bool EncodeRawSymbolsInternal(const uint32_t *symbols, int num_values,
+ uint32_t max_entry_value,
+ EncoderBuffer *target_buffer) {
+ // Count the frequency of each entry value.
+ std::vector<uint64_t> frequencies(max_entry_value + 1, 0);
+ for (int i = 0; i < num_values; ++i) {
+ ++frequencies[symbols[i]];
+ }
+
+ SymbolEncoderT encoder;
+ encoder.Create(frequencies.data(), static_cast<int>(frequencies.size()),
+ target_buffer);
+ encoder.StartEncoding(target_buffer);
+ // Encode all values.
+ if (SymbolEncoderT::needs_reverse_encoding()) {
+ for (int i = num_values - 1; i >= 0; --i) {
+ encoder.EncodeSymbol(symbols[i]);
+ }
+ } else {
+ for (int i = 0; i < num_values; ++i) {
+ encoder.EncodeSymbol(symbols[i]);
+ }
+ }
+ encoder.EndEncoding(target_buffer);
+ return true;
+}
+
+template <template <int> class SymbolEncoderT>
+bool EncodeRawSymbols(const uint32_t *symbols, int num_values,
+ uint32_t max_entry_value, int32_t num_unique_symbols,
+ const Options *options, EncoderBuffer *target_buffer) {
+ int symbol_bits = 0;
+ if (num_unique_symbols > 0) {
+ symbol_bits = MostSignificantBit(num_unique_symbols);
+ }
+ int unique_symbols_bit_length = symbol_bits + 1;
+ // Currently, we don't support encoding of more than 2^18 unique symbols.
+ if (unique_symbols_bit_length > kMaxRawEncodingBitLength) {
+ return false;
+ }
+ int compression_level = kDefaultSymbolCodingCompressionLevel;
+ if (options != nullptr &&
+ options->IsOptionSet("symbol_encoding_compression_level")) {
+ compression_level = options->GetInt("symbol_encoding_compression_level");
+ }
+
+ // Adjust the bit_length based on compression level. Lower compression levels
+ // will use fewer bits while higher compression levels use more bits. Note
+ // that this is going to work for all valid bit_lengths because the actual
+ // number of bits allocated for rANS encoding is hard coded as:
+ // std::max(12, 3 * bit_length / 2) , therefore there will be always a
+ // sufficient number of bits available for all symbols.
+ // See ComputeRAnsPrecisionFromUniqueSymbolsBitLength() for the formula.
+ // This hardcoded equation cannot be changed without changing the bitstream.
+ if (compression_level < 4) {
+ unique_symbols_bit_length -= 2;
+ } else if (compression_level < 6) {
+ unique_symbols_bit_length -= 1;
+ } else if (compression_level > 9) {
+ unique_symbols_bit_length += 2;
+ } else if (compression_level > 7) {
+ unique_symbols_bit_length += 1;
+ }
+ // Clamp the bit_length to a valid range.
+ unique_symbols_bit_length = std::min(std::max(1, unique_symbols_bit_length),
+ kMaxRawEncodingBitLength);
+ target_buffer->Encode(static_cast<uint8_t>(unique_symbols_bit_length));
+ // Use appropriate symbol encoder based on the maximum symbol bit length.
+ switch (unique_symbols_bit_length) {
+ case 0:
+ FALLTHROUGH_INTENDED;
+ case 1:
+ return EncodeRawSymbolsInternal<SymbolEncoderT<1>>(
+ symbols, num_values, max_entry_value, target_buffer);
+ case 2:
+ return EncodeRawSymbolsInternal<SymbolEncoderT<2>>(
+ symbols, num_values, max_entry_value, target_buffer);
+ case 3:
+ return EncodeRawSymbolsInternal<SymbolEncoderT<3>>(
+ symbols, num_values, max_entry_value, target_buffer);
+ case 4:
+ return EncodeRawSymbolsInternal<SymbolEncoderT<4>>(
+ symbols, num_values, max_entry_value, target_buffer);
+ case 5:
+ return EncodeRawSymbolsInternal<SymbolEncoderT<5>>(
+ symbols, num_values, max_entry_value, target_buffer);
+ case 6:
+ return EncodeRawSymbolsInternal<SymbolEncoderT<6>>(
+ symbols, num_values, max_entry_value, target_buffer);
+ case 7:
+ return EncodeRawSymbolsInternal<SymbolEncoderT<7>>(
+ symbols, num_values, max_entry_value, target_buffer);
+ case 8:
+ return EncodeRawSymbolsInternal<SymbolEncoderT<8>>(
+ symbols, num_values, max_entry_value, target_buffer);
+ case 9:
+ return EncodeRawSymbolsInternal<SymbolEncoderT<9>>(
+ symbols, num_values, max_entry_value, target_buffer);
+ case 10:
+ return EncodeRawSymbolsInternal<SymbolEncoderT<10>>(
+ symbols, num_values, max_entry_value, target_buffer);
+ case 11:
+ return EncodeRawSymbolsInternal<SymbolEncoderT<11>>(
+ symbols, num_values, max_entry_value, target_buffer);
+ case 12:
+ return EncodeRawSymbolsInternal<SymbolEncoderT<12>>(
+ symbols, num_values, max_entry_value, target_buffer);
+ case 13:
+ return EncodeRawSymbolsInternal<SymbolEncoderT<13>>(
+ symbols, num_values, max_entry_value, target_buffer);
+ case 14:
+ return EncodeRawSymbolsInternal<SymbolEncoderT<14>>(
+ symbols, num_values, max_entry_value, target_buffer);
+ case 15:
+ return EncodeRawSymbolsInternal<SymbolEncoderT<15>>(
+ symbols, num_values, max_entry_value, target_buffer);
+ case 16:
+ return EncodeRawSymbolsInternal<SymbolEncoderT<16>>(
+ symbols, num_values, max_entry_value, target_buffer);
+ case 17:
+ return EncodeRawSymbolsInternal<SymbolEncoderT<17>>(
+ symbols, num_values, max_entry_value, target_buffer);
+ case 18:
+ return EncodeRawSymbolsInternal<SymbolEncoderT<18>>(
+ symbols, num_values, max_entry_value, target_buffer);
+ default:
+ return false;
+ }
+}
+
+} // namespace draco
diff --git a/libs/assimp/contrib/draco/src/draco/compression/entropy/symbol_encoding.h b/libs/assimp/contrib/draco/src/draco/compression/entropy/symbol_encoding.h
new file mode 100644
index 0000000..839b28b
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/entropy/symbol_encoding.h
@@ -0,0 +1,47 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_COMPRESSION_ENTROPY_SYMBOL_ENCODING_H_
+#define DRACO_COMPRESSION_ENTROPY_SYMBOL_ENCODING_H_
+
+#include "draco/compression/config/compression_shared.h"
+#include "draco/core/encoder_buffer.h"
+#include "draco/core/options.h"
+
+namespace draco {
+
+// Encodes an array of symbols using an entropy coding. This function
+// automatically decides whether to encode the symbol values using bit
+// length tags (see EncodeTaggedSymbols), or whether to encode them directly
+// (see EncodeRawSymbols). The symbols can be grouped into separate components
+// that can be used for better compression. |options| is an optional parameter
+// that allows more direct control over various stages of the symbol encoding
+// (see below for functions that are used to set valid options).
+// Returns false on error.
+bool EncodeSymbols(const uint32_t *symbols, int num_values, int num_components,
+ const Options *options, EncoderBuffer *target_buffer);
+
+// Sets an option that forces symbol encoder to use the specified encoding
+// method.
+void SetSymbolEncodingMethod(Options *options, SymbolCodingMethod method);
+
+// Sets the desired compression level for symbol encoding in range <0, 10> where
+// 0 is the worst but fastest compression and 10 is the best but slowest
+// compression. If the option is not set, default value of 7 is used.
+// Returns false if an invalid level has been set.
+bool SetSymbolEncodingCompressionLevel(Options *options, int compression_level);
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_ENTROPY_SYMBOL_ENCODING_H_
diff --git a/libs/assimp/contrib/draco/src/draco/compression/expert_encode.cc b/libs/assimp/contrib/draco/src/draco/compression/expert_encode.cc
new file mode 100644
index 0000000..f9aec15
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/expert_encode.cc
@@ -0,0 +1,182 @@
+// Copyright 2017 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/compression/expert_encode.h"
+
+#include "draco/compression/mesh/mesh_edgebreaker_encoder.h"
+#include "draco/compression/mesh/mesh_sequential_encoder.h"
+#ifdef DRACO_POINT_CLOUD_COMPRESSION_SUPPORTED
+#include "draco/compression/point_cloud/point_cloud_kd_tree_encoder.h"
+#include "draco/compression/point_cloud/point_cloud_sequential_encoder.h"
+#endif
+
+namespace draco {
+
+ExpertEncoder::ExpertEncoder(const PointCloud &point_cloud)
+ : point_cloud_(&point_cloud), mesh_(nullptr) {}
+
+ExpertEncoder::ExpertEncoder(const Mesh &mesh)
+ : point_cloud_(&mesh), mesh_(&mesh) {}
+
+Status ExpertEncoder::EncodeToBuffer(EncoderBuffer *out_buffer) {
+ if (point_cloud_ == nullptr) {
+ return Status(Status::DRACO_ERROR, "Invalid input geometry.");
+ }
+ if (mesh_ == nullptr) {
+ return EncodePointCloudToBuffer(*point_cloud_, out_buffer);
+ }
+ return EncodeMeshToBuffer(*mesh_, out_buffer);
+}
+
+Status ExpertEncoder::EncodePointCloudToBuffer(const PointCloud &pc,
+ EncoderBuffer *out_buffer) {
+#ifdef DRACO_POINT_CLOUD_COMPRESSION_SUPPORTED
+ std::unique_ptr<PointCloudEncoder> encoder;
+ const int encoding_method = options().GetGlobalInt("encoding_method", -1);
+
+ if (encoding_method == POINT_CLOUD_SEQUENTIAL_ENCODING) {
+ // Use sequential encoding if requested.
+ encoder.reset(new PointCloudSequentialEncoder());
+ } else if (encoding_method == -1 && options().GetSpeed() == 10) {
+ // Use sequential encoding if speed is at max.
+ encoder.reset(new PointCloudSequentialEncoder());
+ } else {
+ // Speed < 10, use POINT_CLOUD_KD_TREE_ENCODING if possible.
+ bool kd_tree_possible = true;
+ // Kd-Tree encoder can be currently used only when the following conditions
+ // are satisfied for all attributes:
+ // -data type is float32 and quantization is enabled, OR
+ // -data type is uint32, uint16, uint8 or int32, int16, int8
+ for (int i = 0; i < pc.num_attributes(); ++i) {
+ const PointAttribute *const att = pc.attribute(i);
+ if (kd_tree_possible && att->data_type() != DT_FLOAT32 &&
+ att->data_type() != DT_UINT32 && att->data_type() != DT_UINT16 &&
+ att->data_type() != DT_UINT8 && att->data_type() != DT_INT32 &&
+ att->data_type() != DT_INT16 && att->data_type() != DT_INT8) {
+ kd_tree_possible = false;
+ }
+ if (kd_tree_possible && att->data_type() == DT_FLOAT32 &&
+ options().GetAttributeInt(i, "quantization_bits", -1) <= 0) {
+ kd_tree_possible = false; // Quantization not enabled.
+ }
+ if (!kd_tree_possible) {
+ break;
+ }
+ }
+
+ if (kd_tree_possible) {
+ // Create kD-tree encoder (all checks passed).
+ encoder.reset(new PointCloudKdTreeEncoder());
+ } else if (encoding_method == POINT_CLOUD_KD_TREE_ENCODING) {
+ // Encoding method was explicitly specified but we cannot use it for
+ // the given input (some of the checks above failed).
+ return Status(Status::DRACO_ERROR, "Invalid encoding method.");
+ }
+ }
+ if (!encoder) {
+ // Default choice.
+ encoder.reset(new PointCloudSequentialEncoder());
+ }
+ encoder->SetPointCloud(pc);
+ DRACO_RETURN_IF_ERROR(encoder->Encode(options(), out_buffer));
+
+ set_num_encoded_points(encoder->num_encoded_points());
+ set_num_encoded_faces(0);
+ return OkStatus();
+#else
+ return Status(Status::DRACO_ERROR, "Point cloud encoding is not enabled.");
+#endif
+}
+
+Status ExpertEncoder::EncodeMeshToBuffer(const Mesh &m,
+ EncoderBuffer *out_buffer) {
+ std::unique_ptr<MeshEncoder> encoder;
+ // Select the encoding method only based on the provided options.
+ int encoding_method = options().GetGlobalInt("encoding_method", -1);
+ if (encoding_method == -1) {
+ // For now select the edgebreaker for all options expect of speed 10
+ if (options().GetSpeed() == 10) {
+ encoding_method = MESH_SEQUENTIAL_ENCODING;
+ } else {
+ encoding_method = MESH_EDGEBREAKER_ENCODING;
+ }
+ }
+ if (encoding_method == MESH_EDGEBREAKER_ENCODING) {
+ encoder = std::unique_ptr<MeshEncoder>(new MeshEdgebreakerEncoder());
+ } else {
+ encoder = std::unique_ptr<MeshEncoder>(new MeshSequentialEncoder());
+ }
+ encoder->SetMesh(m);
+ DRACO_RETURN_IF_ERROR(encoder->Encode(options(), out_buffer));
+
+ set_num_encoded_points(encoder->num_encoded_points());
+ set_num_encoded_faces(encoder->num_encoded_faces());
+ return OkStatus();
+}
+
+void ExpertEncoder::Reset(const EncoderOptions &options) {
+ Base::Reset(options);
+}
+
+void ExpertEncoder::Reset() { Base::Reset(); }
+
+void ExpertEncoder::SetSpeedOptions(int encoding_speed, int decoding_speed) {
+ Base::SetSpeedOptions(encoding_speed, decoding_speed);
+}
+
+void ExpertEncoder::SetAttributeQuantization(int32_t attribute_id,
+ int quantization_bits) {
+ options().SetAttributeInt(attribute_id, "quantization_bits",
+ quantization_bits);
+}
+
+void ExpertEncoder::SetAttributeExplicitQuantization(int32_t attribute_id,
+ int quantization_bits,
+ int num_dims,
+ const float *origin,
+ float range) {
+ options().SetAttributeInt(attribute_id, "quantization_bits",
+ quantization_bits);
+ options().SetAttributeVector(attribute_id, "quantization_origin", num_dims,
+ origin);
+ options().SetAttributeFloat(attribute_id, "quantization_range", range);
+}
+
+void ExpertEncoder::SetUseBuiltInAttributeCompression(bool enabled) {
+ options().SetGlobalBool("use_built_in_attribute_compression", enabled);
+}
+
+void ExpertEncoder::SetEncodingMethod(int encoding_method) {
+ Base::SetEncodingMethod(encoding_method);
+}
+
+void ExpertEncoder::SetEncodingSubmethod(int encoding_submethod) {
+ Base::SetEncodingSubmethod(encoding_submethod);
+}
+
+Status ExpertEncoder::SetAttributePredictionScheme(
+ int32_t attribute_id, int prediction_scheme_method) {
+ auto att = point_cloud_->attribute(attribute_id);
+ auto att_type = att->attribute_type();
+ const Status status =
+ CheckPredictionScheme(att_type, prediction_scheme_method);
+ if (!status.ok()) {
+ return status;
+ }
+ options().SetAttributeInt(attribute_id, "prediction_scheme",
+ prediction_scheme_method);
+ return status;
+}
+
+} // namespace draco
diff --git a/libs/assimp/contrib/draco/src/draco/compression/expert_encode.h b/libs/assimp/contrib/draco/src/draco/compression/expert_encode.h
new file mode 100644
index 0000000..ea59393
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/expert_encode.h
@@ -0,0 +1,147 @@
+// Copyright 2017 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_COMPRESSION_EXPERT_ENCODE_H_
+#define DRACO_COMPRESSION_EXPERT_ENCODE_H_
+
+#include "draco/compression/config/compression_shared.h"
+#include "draco/compression/config/encoder_options.h"
+#include "draco/compression/encode_base.h"
+#include "draco/core/encoder_buffer.h"
+#include "draco/core/status.h"
+#include "draco/mesh/mesh.h"
+
+namespace draco {
+
+// Advanced helper class for encoding geometry using the Draco compression
+// library. Unlike the basic Encoder (encode.h), this class allows users to
+// specify options for each attribute individually using provided attribute ids.
+// The drawback of this encoder is that it can be used to encode only one model
+// at a time, and for each new model the options need to be set again,
+class ExpertEncoder : public EncoderBase<EncoderOptions> {
+ public:
+ typedef EncoderBase<EncoderOptions> Base;
+ typedef EncoderOptions OptionsType;
+
+ explicit ExpertEncoder(const PointCloud &point_cloud);
+ explicit ExpertEncoder(const Mesh &mesh);
+
+ // Encodes the geometry provided in the constructor to the target buffer.
+ Status EncodeToBuffer(EncoderBuffer *out_buffer);
+
+ // Set encoder options used during the geometry encoding. Note that this call
+ // overwrites any modifications to the options done with the functions below.
+ void Reset(const EncoderOptions &options);
+ void Reset();
+
+ // Sets the desired encoding and decoding speed for the given options.
+ //
+ // 0 = slowest speed, but the best compression.
+ // 10 = fastest, but the worst compression.
+ // -1 = undefined.
+ //
+ // Note that both speed options affect the encoder choice of used methods and
+ // algorithms. For example, a requirement for fast decoding may prevent the
+ // encoder from using the best compression methods even if the encoding speed
+ // is set to 0. In general, the faster of the two options limits the choice of
+ // features that can be used by the encoder. Additionally, setting
+ // |decoding_speed| to be faster than the |encoding_speed| may allow the
+ // encoder to choose the optimal method out of the available features for the
+ // given |decoding_speed|.
+ void SetSpeedOptions(int encoding_speed, int decoding_speed);
+
+ // Sets the quantization compression options for a specific attribute. The
+ // attribute values will be quantized in a box defined by the maximum extent
+ // of the attribute values. I.e., the actual precision of this option depends
+ // on the scale of the attribute values.
+ void SetAttributeQuantization(int32_t attribute_id, int quantization_bits);
+
+ // Sets the explicit quantization compression for a named attribute. The
+ // attribute values will be quantized in a coordinate system defined by the
+ // provided origin and range (the input values should be within interval:
+ // <origin, origin + range>).
+ void SetAttributeExplicitQuantization(int32_t attribute_id,
+ int quantization_bits, int num_dims,
+ const float *origin, float range);
+
+ // Enables/disables built in entropy coding of attribute values. Disabling
+ // this option may be useful to improve the performance when third party
+ // compression is used on top of the Draco compression. Default: [true].
+ void SetUseBuiltInAttributeCompression(bool enabled);
+
+ // Sets the desired encoding method for a given geometry. By default, encoding
+ // method is selected based on the properties of the input geometry and based
+ // on the other options selected in the used EncoderOptions (such as desired
+ // encoding and decoding speed). This function should be called only when a
+ // specific method is required.
+ //
+ // |encoding_method| can be one of the values defined in
+ // compression/config/compression_shared.h based on the type of the input
+ // geometry that is going to be encoded. For point clouds, allowed entries are
+ // POINT_CLOUD_SEQUENTIAL_ENCODING
+ // POINT_CLOUD_KD_TREE_ENCODING
+ //
+ // For meshes the input can be
+ // MESH_SEQUENTIAL_ENCODING
+ // MESH_EDGEBREAKER_ENCODING
+ //
+ // If the selected method cannot be used for the given input, the subsequent
+ // call of EncodePointCloudToBuffer or EncodeMeshToBuffer is going to fail.
+ void SetEncodingMethod(int encoding_method);
+
+ // Sets the desired encoding submethod, only for MESH_EDGEBREAKER_ENCODING.
+ // Valid values for |encoding_submethod| are:
+ // MESH_EDGEBREAKER_STANDARD_ENCODING
+ // MESH_EDGEBREAKER_VALENCE_ENCODING
+ // see also compression/config/compression_shared.h.
+ void SetEncodingSubmethod(int encoding_submethod);
+
+ // Sets the desired prediction method for a given attribute. By default,
+ // prediction scheme is selected automatically by the encoder using other
+ // provided options (such as speed) and input geometry type (mesh, point
+ // cloud). This function should be called only when a specific prediction is
+ // preferred (e.g., when it is known that the encoder would select a less
+ // optimal prediction for the given input data).
+ //
+ // |prediction_scheme_method| should be one of the entries defined in
+ // compression/config/compression_shared.h :
+ //
+ // PREDICTION_NONE - use no prediction.
+ // PREDICTION_DIFFERENCE - delta coding
+ // MESH_PREDICTION_PARALLELOGRAM - parallelogram prediction for meshes.
+ // MESH_PREDICTION_CONSTRAINED_PARALLELOGRAM
+ // - better and more costly version of the parallelogram prediction.
+ // MESH_PREDICTION_TEX_COORDS_PORTABLE
+ // - specialized predictor for tex coordinates.
+ // MESH_PREDICTION_GEOMETRIC_NORMAL
+ // - specialized predictor for normal coordinates.
+ //
+ // Note that in case the desired prediction cannot be used, the default
+ // prediction will be automatically used instead.
+ Status SetAttributePredictionScheme(int32_t attribute_id,
+ int prediction_scheme_method);
+
+ private:
+ Status EncodePointCloudToBuffer(const PointCloud &pc,
+ EncoderBuffer *out_buffer);
+
+ Status EncodeMeshToBuffer(const Mesh &m, EncoderBuffer *out_buffer);
+
+ const PointCloud *point_cloud_;
+ const Mesh *mesh_;
+};
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_EXPERT_ENCODE_H_
diff --git a/libs/assimp/contrib/draco/src/draco/compression/mesh/mesh_decoder.cc b/libs/assimp/contrib/draco/src/draco/compression/mesh/mesh_decoder.cc
new file mode 100644
index 0000000..6e48e56
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/mesh/mesh_decoder.cc
@@ -0,0 +1,37 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/compression/mesh/mesh_decoder.h"
+
+namespace draco {
+
+MeshDecoder::MeshDecoder() : mesh_(nullptr) {}
+
+Status MeshDecoder::Decode(const DecoderOptions &options,
+ DecoderBuffer *in_buffer, Mesh *out_mesh) {
+ mesh_ = out_mesh;
+ return PointCloudDecoder::Decode(options, in_buffer, out_mesh);
+}
+
+bool MeshDecoder::DecodeGeometryData() {
+ if (mesh_ == nullptr) {
+ return false;
+ }
+ if (!DecodeConnectivity()) {
+ return false;
+ }
+ return PointCloudDecoder::DecodeGeometryData();
+}
+
+} // namespace draco
diff --git a/libs/assimp/contrib/draco/src/draco/compression/mesh/mesh_decoder.h b/libs/assimp/contrib/draco/src/draco/compression/mesh/mesh_decoder.h
new file mode 100644
index 0000000..397a679
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/mesh/mesh_decoder.h
@@ -0,0 +1,68 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_COMPRESSION_MESH_MESH_DECODER_H_
+#define DRACO_COMPRESSION_MESH_MESH_DECODER_H_
+
+#include "draco/compression/attributes/mesh_attribute_indices_encoding_data.h"
+#include "draco/compression/point_cloud/point_cloud_decoder.h"
+#include "draco/mesh/mesh.h"
+#include "draco/mesh/mesh_attribute_corner_table.h"
+
+namespace draco {
+
+// Class that reconstructs a 3D mesh from input data that was encoded by
+// MeshEncoder.
+class MeshDecoder : public PointCloudDecoder {
+ public:
+ MeshDecoder();
+
+ EncodedGeometryType GetGeometryType() const override {
+ return TRIANGULAR_MESH;
+ }
+
+ // The main entry point for mesh decoding.
+ Status Decode(const DecoderOptions &options, DecoderBuffer *in_buffer,
+ Mesh *out_mesh);
+
+ // Returns the base connectivity of the decoded mesh (or nullptr if it is not
+ // initialized).
+ virtual const CornerTable *GetCornerTable() const { return nullptr; }
+
+ // Returns the attribute connectivity data or nullptr if it does not exist.
+ virtual const MeshAttributeCornerTable *GetAttributeCornerTable(
+ int /* att_id */) const {
+ return nullptr;
+ }
+
+ // Returns the decoding data for a given attribute or nullptr when the data
+ // does not exist.
+ virtual const MeshAttributeIndicesEncodingData *GetAttributeEncodingData(
+ int /* att_id */) const {
+ return nullptr;
+ }
+
+ Mesh *mesh() const { return mesh_; }
+
+ protected:
+ bool DecodeGeometryData() override;
+ virtual bool DecodeConnectivity() = 0;
+
+ private:
+ Mesh *mesh_;
+};
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_MESH_MESH_DECODER_H_
diff --git a/libs/assimp/contrib/draco/src/draco/compression/mesh/mesh_edgebreaker_decoder.cc b/libs/assimp/contrib/draco/src/draco/compression/mesh/mesh_edgebreaker_decoder.cc
new file mode 100644
index 0000000..427dd59
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/mesh/mesh_edgebreaker_decoder.cc
@@ -0,0 +1,70 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/compression/mesh/mesh_edgebreaker_decoder.h"
+
+#include "draco/compression/mesh/mesh_edgebreaker_decoder_impl.h"
+#include "draco/compression/mesh/mesh_edgebreaker_traversal_predictive_decoder.h"
+#include "draco/compression/mesh/mesh_edgebreaker_traversal_valence_decoder.h"
+
+namespace draco {
+
+MeshEdgebreakerDecoder::MeshEdgebreakerDecoder() {}
+
+bool MeshEdgebreakerDecoder::CreateAttributesDecoder(int32_t att_decoder_id) {
+ return impl_->CreateAttributesDecoder(att_decoder_id);
+}
+
+bool MeshEdgebreakerDecoder::InitializeDecoder() {
+ uint8_t traversal_decoder_type;
+ if (!buffer()->Decode(&traversal_decoder_type)) {
+ return false;
+ }
+ impl_ = nullptr;
+ if (traversal_decoder_type == MESH_EDGEBREAKER_STANDARD_ENCODING) {
+#ifdef DRACO_STANDARD_EDGEBREAKER_SUPPORTED
+ impl_ = std::unique_ptr<MeshEdgebreakerDecoderImplInterface>(
+ new MeshEdgebreakerDecoderImpl<MeshEdgebreakerTraversalDecoder>());
+#endif
+ } else if (traversal_decoder_type == MESH_EDGEBREAKER_PREDICTIVE_ENCODING) {
+#ifdef DRACO_BACKWARDS_COMPATIBILITY_SUPPORTED
+#ifdef DRACO_PREDICTIVE_EDGEBREAKER_SUPPORTED
+ impl_ = std::unique_ptr<MeshEdgebreakerDecoderImplInterface>(
+ new MeshEdgebreakerDecoderImpl<
+ MeshEdgebreakerTraversalPredictiveDecoder>());
+#endif
+#endif
+ } else if (traversal_decoder_type == MESH_EDGEBREAKER_VALENCE_ENCODING) {
+ impl_ = std::unique_ptr<MeshEdgebreakerDecoderImplInterface>(
+ new MeshEdgebreakerDecoderImpl<
+ MeshEdgebreakerTraversalValenceDecoder>());
+ }
+ if (!impl_) {
+ return false;
+ }
+ if (!impl_->Init(this)) {
+ return false;
+ }
+ return true;
+}
+
+bool MeshEdgebreakerDecoder::DecodeConnectivity() {
+ return impl_->DecodeConnectivity();
+}
+
+bool MeshEdgebreakerDecoder::OnAttributesDecoded() {
+ return impl_->OnAttributesDecoded();
+}
+
+} // namespace draco
diff --git a/libs/assimp/contrib/draco/src/draco/compression/mesh/mesh_edgebreaker_decoder.h b/libs/assimp/contrib/draco/src/draco/compression/mesh/mesh_edgebreaker_decoder.h
new file mode 100644
index 0000000..5c16179
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/mesh/mesh_edgebreaker_decoder.h
@@ -0,0 +1,54 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_COMPRESSION_MESH_MESH_EDGEBREAKER_DECODER_H_
+#define DRACO_COMPRESSION_MESH_MESH_EDGEBREAKER_DECODER_H_
+
+#include "draco/compression/mesh/mesh_decoder.h"
+#include "draco/compression/mesh/mesh_edgebreaker_decoder_impl_interface.h"
+#include "draco/draco_features.h"
+
+namespace draco {
+
+// Class for decoding data encoded by MeshEdgebreakerEncoder.
+class MeshEdgebreakerDecoder : public MeshDecoder {
+ public:
+ MeshEdgebreakerDecoder();
+
+ const CornerTable *GetCornerTable() const override {
+ return impl_->GetCornerTable();
+ }
+
+ const MeshAttributeCornerTable *GetAttributeCornerTable(
+ int att_id) const override {
+ return impl_->GetAttributeCornerTable(att_id);
+ }
+
+ const MeshAttributeIndicesEncodingData *GetAttributeEncodingData(
+ int att_id) const override {
+ return impl_->GetAttributeEncodingData(att_id);
+ }
+
+ protected:
+ bool InitializeDecoder() override;
+ bool CreateAttributesDecoder(int32_t att_decoder_id) override;
+ bool DecodeConnectivity() override;
+ bool OnAttributesDecoded() override;
+
+ std::unique_ptr<MeshEdgebreakerDecoderImplInterface> impl_;
+};
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_MESH_MESH_EDGEBREAKER_DECODER_H_
diff --git a/libs/assimp/contrib/draco/src/draco/compression/mesh/mesh_edgebreaker_decoder_impl.cc b/libs/assimp/contrib/draco/src/draco/compression/mesh/mesh_edgebreaker_decoder_impl.cc
new file mode 100644
index 0000000..0bbbea4
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/mesh/mesh_edgebreaker_decoder_impl.cc
@@ -0,0 +1,1231 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/compression/mesh/mesh_edgebreaker_decoder_impl.h"
+
+#include <algorithm>
+
+#include "draco/compression/attributes/sequential_attribute_decoders_controller.h"
+#include "draco/compression/mesh/mesh_edgebreaker_decoder.h"
+#include "draco/compression/mesh/mesh_edgebreaker_traversal_predictive_decoder.h"
+#include "draco/compression/mesh/mesh_edgebreaker_traversal_valence_decoder.h"
+#include "draco/compression/mesh/traverser/depth_first_traverser.h"
+#include "draco/compression/mesh/traverser/max_prediction_degree_traverser.h"
+#include "draco/compression/mesh/traverser/mesh_attribute_indices_encoding_observer.h"
+#include "draco/compression/mesh/traverser/mesh_traversal_sequencer.h"
+#include "draco/compression/mesh/traverser/traverser_base.h"
+#include "draco/mesh/corner_table_iterators.h"
+
+namespace draco {
+
+// Types of "free" edges that are used during topology decoding.
+// A free edge is an edge that is connected to one face only.
+// All edge types are stored in the opposite_corner_id_ array, where each
+// edge "e" is uniquely identified by the opposite corner "C" in its parent
+// triangle:
+// *
+// /C\
+// / \
+// / e \
+// *-------*
+// For more description about how the edges are used, see comment inside
+// ZipConnectivity() method.
+
+template <class TraversalDecoder>
+MeshEdgebreakerDecoderImpl<TraversalDecoder>::MeshEdgebreakerDecoderImpl()
+ : decoder_(nullptr),
+ last_symbol_id_(-1),
+ last_vert_id_(-1),
+ last_face_id_(-1),
+ num_new_vertices_(0),
+ num_encoded_vertices_(0),
+ pos_data_decoder_id_(-1) {}
+
+template <class TraversalDecoder>
+bool MeshEdgebreakerDecoderImpl<TraversalDecoder>::Init(
+ MeshEdgebreakerDecoder *decoder) {
+ decoder_ = decoder;
+ return true;
+}
+
+template <class TraversalDecoder>
+const MeshAttributeCornerTable *
+MeshEdgebreakerDecoderImpl<TraversalDecoder>::GetAttributeCornerTable(
+ int att_id) const {
+ for (uint32_t i = 0; i < attribute_data_.size(); ++i) {
+ const int decoder_id = attribute_data_[i].decoder_id;
+ if (decoder_id < 0 || decoder_id >= decoder_->num_attributes_decoders()) {
+ continue;
+ }
+ const AttributesDecoderInterface *const dec =
+ decoder_->attributes_decoder(decoder_id);
+ for (int j = 0; j < dec->GetNumAttributes(); ++j) {
+ if (dec->GetAttributeId(j) == att_id) {
+ if (attribute_data_[i].is_connectivity_used) {
+ return &attribute_data_[i].connectivity_data;
+ }
+ return nullptr;
+ }
+ }
+ }
+ return nullptr;
+}
+
+template <class TraversalDecoder>
+const MeshAttributeIndicesEncodingData *
+MeshEdgebreakerDecoderImpl<TraversalDecoder>::GetAttributeEncodingData(
+ int att_id) const {
+ for (uint32_t i = 0; i < attribute_data_.size(); ++i) {
+ const int decoder_id = attribute_data_[i].decoder_id;
+ if (decoder_id < 0 || decoder_id >= decoder_->num_attributes_decoders()) {
+ continue;
+ }
+ const AttributesDecoderInterface *const dec =
+ decoder_->attributes_decoder(decoder_id);
+ for (int j = 0; j < dec->GetNumAttributes(); ++j) {
+ if (dec->GetAttributeId(j) == att_id) {
+ return &attribute_data_[i].encoding_data;
+ }
+ }
+ }
+ return &pos_encoding_data_;
+}
+
+template <class TraversalDecoder>
+template <class TraverserT>
+std::unique_ptr<PointsSequencer>
+MeshEdgebreakerDecoderImpl<TraversalDecoder>::CreateVertexTraversalSequencer(
+ MeshAttributeIndicesEncodingData *encoding_data) {
+ typedef typename TraverserT::TraversalObserver AttObserver;
+ typedef typename TraverserT::CornerTable CornerTable;
+
+ const Mesh *mesh = decoder_->mesh();
+ std::unique_ptr<MeshTraversalSequencer<TraverserT>> traversal_sequencer(
+ new MeshTraversalSequencer<TraverserT>(mesh, encoding_data));
+
+ AttObserver att_observer(corner_table_.get(), mesh, traversal_sequencer.get(),
+ encoding_data);
+
+ TraverserT att_traverser;
+ att_traverser.Init(corner_table_.get(), att_observer);
+
+ traversal_sequencer->SetTraverser(att_traverser);
+ return std::move(traversal_sequencer);
+}
+
+template <class TraversalDecoder>
+bool MeshEdgebreakerDecoderImpl<TraversalDecoder>::CreateAttributesDecoder(
+ int32_t att_decoder_id) {
+ int8_t att_data_id;
+ if (!decoder_->buffer()->Decode(&att_data_id)) {
+ return false;
+ }
+ uint8_t decoder_type;
+ if (!decoder_->buffer()->Decode(&decoder_type)) {
+ return false;
+ }
+
+ if (att_data_id >= 0) {
+ if (att_data_id >= attribute_data_.size()) {
+ return false; // Unexpected attribute data.
+ }
+
+ // Ensure that the attribute data is not mapped to a different attributes
+ // decoder already.
+ if (attribute_data_[att_data_id].decoder_id >= 0) {
+ return false;
+ }
+
+ attribute_data_[att_data_id].decoder_id = att_decoder_id;
+ } else {
+ // Assign the attributes decoder to |pos_encoding_data_|.
+ if (pos_data_decoder_id_ >= 0) {
+ return false; // Some other decoder is already using the data. Error.
+ }
+ pos_data_decoder_id_ = att_decoder_id;
+ }
+
+ MeshTraversalMethod traversal_method = MESH_TRAVERSAL_DEPTH_FIRST;
+ if (decoder_->bitstream_version() >= DRACO_BITSTREAM_VERSION(1, 2)) {
+ uint8_t traversal_method_encoded;
+ if (!decoder_->buffer()->Decode(&traversal_method_encoded)) {
+ return false;
+ }
+ // Check that decoded traversal method is valid.
+ if (traversal_method_encoded >= NUM_TRAVERSAL_METHODS) {
+ return false;
+ }
+ traversal_method =
+ static_cast<MeshTraversalMethod>(traversal_method_encoded);
+ }
+
+ const Mesh *mesh = decoder_->mesh();
+ std::unique_ptr<PointsSequencer> sequencer;
+
+ if (decoder_type == MESH_VERTEX_ATTRIBUTE) {
+ // Per-vertex attribute decoder.
+
+ MeshAttributeIndicesEncodingData *encoding_data = nullptr;
+ if (att_data_id < 0) {
+ encoding_data = &pos_encoding_data_;
+ } else {
+ encoding_data = &attribute_data_[att_data_id].encoding_data;
+ // Mark the attribute connectivity data invalid to ensure it's not used
+ // later on.
+ attribute_data_[att_data_id].is_connectivity_used = false;
+ }
+ // Defining sequencer via a traversal scheme.
+ if (traversal_method == MESH_TRAVERSAL_PREDICTION_DEGREE) {
+ typedef MeshAttributeIndicesEncodingObserver<CornerTable> AttObserver;
+ typedef MaxPredictionDegreeTraverser<CornerTable, AttObserver>
+ AttTraverser;
+ sequencer = CreateVertexTraversalSequencer<AttTraverser>(encoding_data);
+ } else if (traversal_method == MESH_TRAVERSAL_DEPTH_FIRST) {
+ typedef MeshAttributeIndicesEncodingObserver<CornerTable> AttObserver;
+ typedef DepthFirstTraverser<CornerTable, AttObserver> AttTraverser;
+ sequencer = CreateVertexTraversalSequencer<AttTraverser>(encoding_data);
+ } else {
+ return false; // Unsupported method
+ }
+ } else {
+ if (traversal_method != MESH_TRAVERSAL_DEPTH_FIRST) {
+ return false; // Unsupported method.
+ }
+ if (att_data_id < 0) {
+ return false; // Attribute data must be specified.
+ }
+
+ // Per-corner attribute decoder.
+
+ typedef MeshAttributeIndicesEncodingObserver<MeshAttributeCornerTable>
+ AttObserver;
+ typedef DepthFirstTraverser<MeshAttributeCornerTable, AttObserver>
+ AttTraverser;
+
+ MeshAttributeIndicesEncodingData *const encoding_data =
+ &attribute_data_[att_data_id].encoding_data;
+ const MeshAttributeCornerTable *const corner_table =
+ &attribute_data_[att_data_id].connectivity_data;
+
+ std::unique_ptr<MeshTraversalSequencer<AttTraverser>> traversal_sequencer(
+ new MeshTraversalSequencer<AttTraverser>(mesh, encoding_data));
+
+ AttObserver att_observer(corner_table, mesh, traversal_sequencer.get(),
+ encoding_data);
+
+ AttTraverser att_traverser;
+ att_traverser.Init(corner_table, att_observer);
+
+ traversal_sequencer->SetTraverser(att_traverser);
+ sequencer = std::move(traversal_sequencer);
+ }
+
+ if (!sequencer) {
+ return false;
+ }
+
+ std::unique_ptr<SequentialAttributeDecodersController> att_controller(
+ new SequentialAttributeDecodersController(std::move(sequencer)));
+
+ return decoder_->SetAttributesDecoder(att_decoder_id,
+ std::move(att_controller));
+}
+
+template <class TraversalDecoder>
+bool MeshEdgebreakerDecoderImpl<TraversalDecoder>::DecodeConnectivity() {
+ num_new_vertices_ = 0;
+ new_to_parent_vertex_map_.clear();
+#ifdef DRACO_BACKWARDS_COMPATIBILITY_SUPPORTED
+ if (decoder_->bitstream_version() < DRACO_BITSTREAM_VERSION(2, 2)) {
+ uint32_t num_new_verts;
+ if (decoder_->bitstream_version() < DRACO_BITSTREAM_VERSION(2, 0)) {
+ if (!decoder_->buffer()->Decode(&num_new_verts)) {
+ return false;
+ }
+ } else {
+ if (!DecodeVarint(&num_new_verts, decoder_->buffer())) {
+ return false;
+ }
+ }
+ num_new_vertices_ = num_new_verts;
+ }
+#endif
+
+ uint32_t num_encoded_vertices;
+#ifdef DRACO_BACKWARDS_COMPATIBILITY_SUPPORTED
+ if (decoder_->bitstream_version() < DRACO_BITSTREAM_VERSION(2, 0)) {
+ if (!decoder_->buffer()->Decode(&num_encoded_vertices)) {
+ return false;
+ }
+
+ } else
+#endif
+ {
+ if (!DecodeVarint(&num_encoded_vertices, decoder_->buffer())) {
+ return false;
+ }
+ }
+ num_encoded_vertices_ = num_encoded_vertices;
+
+ uint32_t num_faces;
+#ifdef DRACO_BACKWARDS_COMPATIBILITY_SUPPORTED
+ if (decoder_->bitstream_version() < DRACO_BITSTREAM_VERSION(2, 0)) {
+ if (!decoder_->buffer()->Decode(&num_faces)) {
+ return false;
+ }
+
+ } else
+#endif
+ {
+ if (!DecodeVarint(&num_faces, decoder_->buffer())) {
+ return false;
+ }
+ }
+ if (num_faces > std::numeric_limits<CornerIndex::ValueType>::max() / 3) {
+ return false; // Draco cannot handle this many faces.
+ }
+
+ if (static_cast<uint32_t>(num_encoded_vertices_) > num_faces * 3) {
+ return false; // There cannot be more vertices than 3 * num_faces.
+ }
+ uint8_t num_attribute_data;
+ if (!decoder_->buffer()->Decode(&num_attribute_data)) {
+ return false;
+ }
+
+ uint32_t num_encoded_symbols;
+#ifdef DRACO_BACKWARDS_COMPATIBILITY_SUPPORTED
+ if (decoder_->bitstream_version() < DRACO_BITSTREAM_VERSION(2, 0)) {
+ if (!decoder_->buffer()->Decode(&num_encoded_symbols)) {
+ return false;
+ }
+
+ } else
+#endif
+ {
+ if (!DecodeVarint(&num_encoded_symbols, decoder_->buffer())) {
+ return false;
+ }
+ }
+
+ if (num_faces < num_encoded_symbols) {
+ // Number of faces needs to be the same or greater than the number of
+ // symbols (it can be greater because the initial face may not be encoded as
+ // a symbol).
+ return false;
+ }
+ const uint32_t max_encoded_faces =
+ num_encoded_symbols + (num_encoded_symbols / 3);
+ if (num_faces > max_encoded_faces) {
+ // Faces can only be 1 1/3 times bigger than number of encoded symbols. This
+ // could only happen if all new encoded components started with interior
+ // triangles. E.g. A mesh with multiple tetrahedrons.
+ return false;
+ }
+
+ uint32_t num_encoded_split_symbols;
+#ifdef DRACO_BACKWARDS_COMPATIBILITY_SUPPORTED
+ if (decoder_->bitstream_version() < DRACO_BITSTREAM_VERSION(2, 0)) {
+ if (!decoder_->buffer()->Decode(&num_encoded_split_symbols)) {
+ return false;
+ }
+
+ } else
+#endif
+ {
+ if (!DecodeVarint(&num_encoded_split_symbols, decoder_->buffer())) {
+ return false;
+ }
+ }
+
+ if (num_encoded_split_symbols > num_encoded_symbols) {
+ return false; // Split symbols are a sub-set of all symbols.
+ }
+
+ // Decode topology (connectivity).
+ vertex_traversal_length_.clear();
+ corner_table_ = std::unique_ptr<CornerTable>(new CornerTable());
+ if (corner_table_ == nullptr) {
+ return false;
+ }
+ processed_corner_ids_.clear();
+ processed_corner_ids_.reserve(num_faces);
+ processed_connectivity_corners_.clear();
+ processed_connectivity_corners_.reserve(num_faces);
+ topology_split_data_.clear();
+ hole_event_data_.clear();
+ init_face_configurations_.clear();
+ init_corners_.clear();
+
+ last_symbol_id_ = -1;
+ last_face_id_ = -1;
+ last_vert_id_ = -1;
+
+ attribute_data_.clear();
+ // Add one attribute data for each attribute decoder.
+ attribute_data_.resize(num_attribute_data);
+
+ if (!corner_table_->Reset(
+ num_faces, num_encoded_vertices_ + num_encoded_split_symbols)) {
+ return false;
+ }
+
+ // Start with all vertices marked as holes (boundaries).
+ // Only vertices decoded with TOPOLOGY_C symbol (and the initial face) will
+ // be marked as non hole vertices. We need to allocate the array larger
+ // because split symbols can create extra vertices during the decoding
+ // process (these extra vertices are then eliminated during deduplication).
+ is_vert_hole_.assign(num_encoded_vertices_ + num_encoded_split_symbols, true);
+
+#ifdef DRACO_BACKWARDS_COMPATIBILITY_SUPPORTED
+ int32_t topology_split_decoded_bytes = -1;
+ if (decoder_->bitstream_version() < DRACO_BITSTREAM_VERSION(2, 2)) {
+ uint32_t encoded_connectivity_size;
+ if (decoder_->bitstream_version() < DRACO_BITSTREAM_VERSION(2, 0)) {
+ if (!decoder_->buffer()->Decode(&encoded_connectivity_size)) {
+ return false;
+ }
+ } else {
+ if (!DecodeVarint(&encoded_connectivity_size, decoder_->buffer())) {
+ return false;
+ }
+ }
+ if (encoded_connectivity_size == 0 ||
+ encoded_connectivity_size > decoder_->buffer()->remaining_size()) {
+ return false;
+ }
+ DecoderBuffer event_buffer;
+ event_buffer.Init(
+ decoder_->buffer()->data_head() + encoded_connectivity_size,
+ decoder_->buffer()->remaining_size() - encoded_connectivity_size,
+ decoder_->buffer()->bitstream_version());
+ // Decode hole and topology split events.
+ topology_split_decoded_bytes =
+ DecodeHoleAndTopologySplitEvents(&event_buffer);
+ if (topology_split_decoded_bytes == -1) {
+ return false;
+ }
+
+ } else
+#endif
+ {
+ if (DecodeHoleAndTopologySplitEvents(decoder_->buffer()) == -1) {
+ return false;
+ }
+ }
+
+ traversal_decoder_.Init(this);
+ // Add one extra vertex for each split symbol.
+ traversal_decoder_.SetNumEncodedVertices(num_encoded_vertices_ +
+ num_encoded_split_symbols);
+ traversal_decoder_.SetNumAttributeData(num_attribute_data);
+
+ DecoderBuffer traversal_end_buffer;
+ if (!traversal_decoder_.Start(&traversal_end_buffer)) {
+ return false;
+ }
+
+ const int num_connectivity_verts = DecodeConnectivity(num_encoded_symbols);
+ if (num_connectivity_verts == -1) {
+ return false;
+ }
+
+ // Set the main buffer to the end of the traversal.
+ decoder_->buffer()->Init(traversal_end_buffer.data_head(),
+ traversal_end_buffer.remaining_size(),
+ decoder_->buffer()->bitstream_version());
+
+#ifdef DRACO_BACKWARDS_COMPATIBILITY_SUPPORTED
+ if (decoder_->bitstream_version() < DRACO_BITSTREAM_VERSION(2, 2)) {
+ // Skip topology split data that was already decoded earlier.
+ decoder_->buffer()->Advance(topology_split_decoded_bytes);
+ }
+#endif
+
+ // Decode connectivity of non-position attributes.
+ if (attribute_data_.size() > 0) {
+#ifdef DRACO_BACKWARDS_COMPATIBILITY_SUPPORTED
+ if (decoder_->bitstream_version() < DRACO_BITSTREAM_VERSION(2, 1)) {
+ for (CornerIndex ci(0); ci < corner_table_->num_corners(); ci += 3) {
+ if (!DecodeAttributeConnectivitiesOnFaceLegacy(ci)) {
+ return false;
+ }
+ }
+
+ } else
+#endif
+ {
+ for (CornerIndex ci(0); ci < corner_table_->num_corners(); ci += 3) {
+ if (!DecodeAttributeConnectivitiesOnFace(ci)) {
+ return false;
+ }
+ }
+ }
+ }
+ traversal_decoder_.Done();
+
+ // Decode attribute connectivity.
+ // Prepare data structure for decoding non-position attribute connectivity.
+ for (uint32_t i = 0; i < attribute_data_.size(); ++i) {
+ attribute_data_[i].connectivity_data.InitEmpty(corner_table_.get());
+ // Add all seams.
+ for (int32_t c : attribute_data_[i].attribute_seam_corners) {
+ attribute_data_[i].connectivity_data.AddSeamEdge(CornerIndex(c));
+ }
+ // Recompute vertices from the newly added seam edges.
+ attribute_data_[i].connectivity_data.RecomputeVertices(nullptr, nullptr);
+ }
+
+ pos_encoding_data_.Init(corner_table_->num_vertices());
+ for (uint32_t i = 0; i < attribute_data_.size(); ++i) {
+ // For non-position attributes, preallocate the vertex to value mapping
+ // using the maximum number of vertices from the base corner table and the
+ // attribute corner table (since the attribute decoder may use either of
+ // it).
+ int32_t att_connectivity_verts =
+ attribute_data_[i].connectivity_data.num_vertices();
+ if (att_connectivity_verts < corner_table_->num_vertices()) {
+ att_connectivity_verts = corner_table_->num_vertices();
+ }
+ attribute_data_[i].encoding_data.Init(att_connectivity_verts);
+ }
+ if (!AssignPointsToCorners(num_connectivity_verts)) {
+ return false;
+ }
+ return true;
+}
+
+template <class TraversalDecoder>
+bool MeshEdgebreakerDecoderImpl<TraversalDecoder>::OnAttributesDecoded() {
+ return true;
+}
+
+template <class TraversalDecoder>
+int MeshEdgebreakerDecoderImpl<TraversalDecoder>::DecodeConnectivity(
+ int num_symbols) {
+ // Algorithm does the reverse decoding of the symbols encoded with the
+ // edgebreaker method. The reverse decoding always keeps track of the active
+ // edge identified by its opposite corner (active corner). New faces are
+ // always added to this active edge. There may be multiple active corners at
+ // one time that either correspond to separate mesh components or to
+ // sub-components of one mesh that are going to be merged together using the
+ // TOPOLOGY_S symbol. We can store these active edges on a stack, because the
+ // decoder always processes only the latest active edge. TOPOLOGY_S then
+ // removes the top edge from the stack and TOPOLOGY_E adds a new edge to the
+ // stack.
+ std::vector<CornerIndex> active_corner_stack;
+
+ // Additional active edges may be added as a result of topology split events.
+ // They can be added in arbitrary order, but we always know the split symbol
+ // id they belong to, so we can address them using this symbol id.
+ std::unordered_map<int, CornerIndex> topology_split_active_corners;
+
+ // Vector used for storing vertices that were marked as isolated during the
+ // decoding process. Currently used only when the mesh doesn't contain any
+ // non-position connectivity data.
+ std::vector<VertexIndex> invalid_vertices;
+ const bool remove_invalid_vertices = attribute_data_.empty();
+
+ int max_num_vertices = static_cast<int>(is_vert_hole_.size());
+ int num_faces = 0;
+ for (int symbol_id = 0; symbol_id < num_symbols; ++symbol_id) {
+ const FaceIndex face(num_faces++);
+ // Used to flag cases where we need to look for topology split events.
+ bool check_topology_split = false;
+ const uint32_t symbol = traversal_decoder_.DecodeSymbol();
+ if (symbol == TOPOLOGY_C) {
+ // Create a new face between two edges on the open boundary.
+ // The first edge is opposite to the corner "a" from the image below.
+ // The other edge is opposite to the corner "b" that can be reached
+ // through a CCW traversal around the vertex "v".
+ // One new active boundary edge is created, opposite to the new corner
+ // "x".
+ //
+ // *-------*
+ // / \ / \
+ // / \ / \
+ // / \ / \
+ // *-------v-------*
+ // \b /x\ a/
+ // \ / \ /
+ // \ / C \ /
+ // *.......*
+
+ // Find the corner "b" from the corner "a" which is the corner on the
+ // top of the active stack.
+ if (active_corner_stack.empty()) {
+ return -1;
+ }
+
+ const CornerIndex corner_a = active_corner_stack.back();
+ const VertexIndex vertex_x =
+ corner_table_->Vertex(corner_table_->Next(corner_a));
+ const CornerIndex corner_b =
+ corner_table_->Next(corner_table_->LeftMostCorner(vertex_x));
+
+ // New tip corner.
+ const CornerIndex corner(3 * face.value());
+ // Update opposite corner mappings.
+ SetOppositeCorners(corner_a, corner + 1);
+ SetOppositeCorners(corner_b, corner + 2);
+
+ // Update vertex mapping.
+ const VertexIndex vert_a_prev =
+ corner_table_->Vertex(corner_table_->Previous(corner_a));
+ const VertexIndex vert_b_next =
+ corner_table_->Vertex(corner_table_->Next(corner_b));
+ if (vertex_x == vert_a_prev || vertex_x == vert_b_next) {
+ // Encoding is invalid, because face vertices are degenerate.
+ return -1;
+ }
+ corner_table_->MapCornerToVertex(corner, vertex_x);
+ corner_table_->MapCornerToVertex(corner + 1, vert_b_next);
+ corner_table_->MapCornerToVertex(corner + 2, vert_a_prev);
+ corner_table_->SetLeftMostCorner(vert_a_prev, corner + 2);
+ // Mark the vertex |x| as interior.
+ is_vert_hole_[vertex_x.value()] = false;
+ // Update the corner on the active stack.
+ active_corner_stack.back() = corner;
+ } else if (symbol == TOPOLOGY_R || symbol == TOPOLOGY_L) {
+ // Create a new face extending from the open boundary edge opposite to the
+ // corner "a" from the image below. Two new boundary edges are created
+ // opposite to corners "r" and "l". New active corner is set to either "r"
+ // or "l" depending on the decoded symbol. One new vertex is created
+ // at the opposite corner to corner "a".
+ // *-------*
+ // /a\ / \
+ // / \ / \
+ // / \ / \
+ // *-------v-------*
+ // .l r.
+ // . .
+ // . .
+ // *
+ if (active_corner_stack.empty()) {
+ return -1;
+ }
+ const CornerIndex corner_a = active_corner_stack.back();
+
+ // First corner on the new face is either corner "l" or "r".
+ const CornerIndex corner(3 * face.value());
+ CornerIndex opp_corner, corner_l, corner_r;
+ if (symbol == TOPOLOGY_R) {
+ // "r" is the new first corner.
+ opp_corner = corner + 2;
+ corner_l = corner + 1;
+ corner_r = corner;
+ } else {
+ // "l" is the new first corner.
+ opp_corner = corner + 1;
+ corner_l = corner;
+ corner_r = corner + 2;
+ }
+ SetOppositeCorners(opp_corner, corner_a);
+ // Update vertex mapping.
+ const VertexIndex new_vert_index = corner_table_->AddNewVertex();
+
+ if (corner_table_->num_vertices() > max_num_vertices) {
+ return -1; // Unexpected number of decoded vertices.
+ }
+
+ corner_table_->MapCornerToVertex(opp_corner, new_vert_index);
+ corner_table_->SetLeftMostCorner(new_vert_index, opp_corner);
+
+ const VertexIndex vertex_r =
+ corner_table_->Vertex(corner_table_->Previous(corner_a));
+ corner_table_->MapCornerToVertex(corner_r, vertex_r);
+ // Update left-most corner on the vertex on the |corner_r|.
+ corner_table_->SetLeftMostCorner(vertex_r, corner_r);
+
+ corner_table_->MapCornerToVertex(
+ corner_l, corner_table_->Vertex(corner_table_->Next(corner_a)));
+ active_corner_stack.back() = corner;
+ check_topology_split = true;
+ } else if (symbol == TOPOLOGY_S) {
+ // Create a new face that merges two last active edges from the active
+ // stack. No new vertex is created, but two vertices at corners "p" and
+ // "n" need to be merged into a single vertex.
+ //
+ // *-------v-------*
+ // \a p/x\n b/
+ // \ / \ /
+ // \ / S \ /
+ // *.......*
+ //
+ if (active_corner_stack.empty()) {
+ return -1;
+ }
+ const CornerIndex corner_b = active_corner_stack.back();
+ active_corner_stack.pop_back();
+
+ // Corner "a" can correspond either to a normal active edge, or to an edge
+ // created from the topology split event.
+ const auto it = topology_split_active_corners.find(symbol_id);
+ if (it != topology_split_active_corners.end()) {
+ // Topology split event. Move the retrieved edge to the stack.
+ active_corner_stack.push_back(it->second);
+ }
+ if (active_corner_stack.empty()) {
+ return -1;
+ }
+ const CornerIndex corner_a = active_corner_stack.back();
+
+ if (corner_table_->Opposite(corner_a) != kInvalidCornerIndex ||
+ corner_table_->Opposite(corner_b) != kInvalidCornerIndex) {
+ // One of the corners is already opposite to an existing face, which
+ // should not happen unless the input was tempered with.
+ return -1;
+ }
+
+ // First corner on the new face is corner "x" from the image above.
+ const CornerIndex corner(3 * face.value());
+ // Update the opposite corner mapping.
+ SetOppositeCorners(corner_a, corner + 2);
+ SetOppositeCorners(corner_b, corner + 1);
+ // Update vertices. For the vertex at corner "x", use the vertex id from
+ // the corner "p".
+ const VertexIndex vertex_p =
+ corner_table_->Vertex(corner_table_->Previous(corner_a));
+ corner_table_->MapCornerToVertex(corner, vertex_p);
+ corner_table_->MapCornerToVertex(
+ corner + 1, corner_table_->Vertex(corner_table_->Next(corner_a)));
+ const VertexIndex vert_b_prev =
+ corner_table_->Vertex(corner_table_->Previous(corner_b));
+ corner_table_->MapCornerToVertex(corner + 2, vert_b_prev);
+ corner_table_->SetLeftMostCorner(vert_b_prev, corner + 2);
+ CornerIndex corner_n = corner_table_->Next(corner_b);
+ const VertexIndex vertex_n = corner_table_->Vertex(corner_n);
+ traversal_decoder_.MergeVertices(vertex_p, vertex_n);
+ // Update the left most corner on the newly merged vertex.
+ corner_table_->SetLeftMostCorner(vertex_p,
+ corner_table_->LeftMostCorner(vertex_n));
+
+ // Also update the vertex id at corner "n" and all corners that are
+ // connected to it in the CCW direction.
+ while (corner_n != kInvalidCornerIndex) {
+ corner_table_->MapCornerToVertex(corner_n, vertex_p);
+ corner_n = corner_table_->SwingLeft(corner_n);
+ }
+ // Make sure the old vertex n is now mapped to an invalid corner (make it
+ // isolated).
+ corner_table_->MakeVertexIsolated(vertex_n);
+ if (remove_invalid_vertices) {
+ invalid_vertices.push_back(vertex_n);
+ }
+ active_corner_stack.back() = corner;
+ } else if (symbol == TOPOLOGY_E) {
+ const CornerIndex corner(3 * face.value());
+ const VertexIndex first_vert_index = corner_table_->AddNewVertex();
+ // Create three new vertices at the corners of the new face.
+ corner_table_->MapCornerToVertex(corner, first_vert_index);
+ corner_table_->MapCornerToVertex(corner + 1,
+ corner_table_->AddNewVertex());
+ corner_table_->MapCornerToVertex(corner + 2,
+ corner_table_->AddNewVertex());
+
+ if (corner_table_->num_vertices() > max_num_vertices) {
+ return -1; // Unexpected number of decoded vertices.
+ }
+
+ corner_table_->SetLeftMostCorner(first_vert_index, corner);
+ corner_table_->SetLeftMostCorner(first_vert_index + 1, corner + 1);
+ corner_table_->SetLeftMostCorner(first_vert_index + 2, corner + 2);
+ // Add the tip corner to the active stack.
+ active_corner_stack.push_back(corner);
+ check_topology_split = true;
+ } else {
+ // Error. Unknown symbol decoded.
+ return -1;
+ }
+ // Inform the traversal decoder that a new corner has been reached.
+ traversal_decoder_.NewActiveCornerReached(active_corner_stack.back());
+
+ if (check_topology_split) {
+ // Check for topology splits happens only for TOPOLOGY_L, TOPOLOGY_R and
+ // TOPOLOGY_E symbols because those are the symbols that correspond to
+ // faces that can be directly connected a TOPOLOGY_S face through the
+ // topology split event.
+ // If a topology split is detected, we need to add a new active edge
+ // onto the active_corner_stack because it will be used later when the
+ // corresponding TOPOLOGY_S event is decoded.
+
+ // Symbol id used by the encoder (reverse).
+ const int encoder_symbol_id = num_symbols - symbol_id - 1;
+ EdgeFaceName split_edge;
+ int encoder_split_symbol_id;
+ while (IsTopologySplit(encoder_symbol_id, &split_edge,
+ &encoder_split_symbol_id)) {
+ if (encoder_split_symbol_id < 0) {
+ return -1; // Wrong split symbol id.
+ }
+ // Symbol was part of a topology split. Now we need to determine which
+ // edge should be added to the active edges stack.
+ const CornerIndex act_top_corner = active_corner_stack.back();
+ // The current symbol has one active edge (stored in act_top_corner) and
+ // two remaining inactive edges that are attached to it.
+ // *
+ // / \
+ // left_edge / \ right_edge
+ // / \
+ // *.......*
+ // active_edge
+
+ CornerIndex new_active_corner;
+ if (split_edge == RIGHT_FACE_EDGE) {
+ new_active_corner = corner_table_->Next(act_top_corner);
+ } else {
+ new_active_corner = corner_table_->Previous(act_top_corner);
+ }
+ // Add the new active edge.
+ // Convert the encoder split symbol id to decoder symbol id.
+ const int decoder_split_symbol_id =
+ num_symbols - encoder_split_symbol_id - 1;
+ topology_split_active_corners[decoder_split_symbol_id] =
+ new_active_corner;
+ }
+ }
+ }
+ if (corner_table_->num_vertices() > max_num_vertices) {
+ return -1; // Unexpected number of decoded vertices.
+ }
+ // Decode start faces and connect them to the faces from the active stack.
+ while (active_corner_stack.size() > 0) {
+ const CornerIndex corner = active_corner_stack.back();
+ active_corner_stack.pop_back();
+ const bool interior_face =
+ traversal_decoder_.DecodeStartFaceConfiguration();
+ if (interior_face) {
+ // The start face is interior, we need to find three corners that are
+ // opposite to it. The first opposite corner "a" is the corner from the
+ // top of the active corner stack and the remaining two corners "b" and
+ // "c" are then the next corners from the left-most corners of vertices
+ // "n" and "x" respectively.
+ //
+ // *-------*
+ // / \ / \
+ // / \ / \
+ // / \ / \
+ // *-------p-------*
+ // / \a . . c/ \
+ // / \ . . / \
+ // / \ . I . / \
+ // *-------n.......x------*
+ // \ / \ / \ /
+ // \ / \ / \ /
+ // \ / \b/ \ /
+ // *-------*-------*
+ //
+
+ if (num_faces >= corner_table_->num_faces()) {
+ return -1; // More faces than expected added to the mesh.
+ }
+
+ const CornerIndex corner_a = corner;
+ const VertexIndex vert_n =
+ corner_table_->Vertex(corner_table_->Next(corner_a));
+ const CornerIndex corner_b =
+ corner_table_->Next(corner_table_->LeftMostCorner(vert_n));
+
+ const VertexIndex vert_x =
+ corner_table_->Vertex(corner_table_->Next(corner_b));
+ const CornerIndex corner_c =
+ corner_table_->Next(corner_table_->LeftMostCorner(vert_x));
+
+ const VertexIndex vert_p =
+ corner_table_->Vertex(corner_table_->Next(corner_c));
+
+ const FaceIndex face(num_faces++);
+ // The first corner of the initial face is the corner opposite to "a".
+ const CornerIndex new_corner(3 * face.value());
+ SetOppositeCorners(new_corner, corner);
+ SetOppositeCorners(new_corner + 1, corner_b);
+ SetOppositeCorners(new_corner + 2, corner_c);
+
+ // Map new corners to existing vertices.
+ corner_table_->MapCornerToVertex(new_corner, vert_x);
+ corner_table_->MapCornerToVertex(new_corner + 1, vert_p);
+ corner_table_->MapCornerToVertex(new_corner + 2, vert_n);
+
+ // Mark all three vertices as interior.
+ for (int ci = 0; ci < 3; ++ci) {
+ is_vert_hole_[corner_table_->Vertex(new_corner + ci).value()] = false;
+ }
+
+ init_face_configurations_.push_back(true);
+ init_corners_.push_back(new_corner);
+ } else {
+ // The initial face wasn't interior and the traversal had to start from
+ // an open boundary. In this case no new face is added, but we need to
+ // keep record about the first opposite corner to this boundary.
+ init_face_configurations_.push_back(false);
+ init_corners_.push_back(corner);
+ }
+ }
+ if (num_faces != corner_table_->num_faces()) {
+ return -1; // Unexpected number of decoded faces.
+ }
+
+ int num_vertices = corner_table_->num_vertices();
+ // If any vertex was marked as isolated, we want to remove it from the corner
+ // table to ensure that all vertices in range <0, num_vertices> are valid.
+ for (const VertexIndex invalid_vert : invalid_vertices) {
+ // Find the last valid vertex and swap it with the isolated vertex.
+ VertexIndex src_vert(num_vertices - 1);
+ while (corner_table_->LeftMostCorner(src_vert) == kInvalidCornerIndex) {
+ // The last vertex is invalid, proceed to the previous one.
+ src_vert = VertexIndex(--num_vertices - 1);
+ }
+ if (src_vert < invalid_vert) {
+ continue; // No need to swap anything.
+ }
+
+ // Remap all corners mapped to |src_vert| to |invalid_vert|.
+ VertexCornersIterator<CornerTable> vcit(corner_table_.get(), src_vert);
+ for (; !vcit.End(); ++vcit) {
+ const CornerIndex cid = vcit.Corner();
+ corner_table_->MapCornerToVertex(cid, invalid_vert);
+ }
+ corner_table_->SetLeftMostCorner(invalid_vert,
+ corner_table_->LeftMostCorner(src_vert));
+
+ // Make the |src_vert| invalid.
+ corner_table_->MakeVertexIsolated(src_vert);
+ is_vert_hole_[invalid_vert.value()] = is_vert_hole_[src_vert.value()];
+ is_vert_hole_[src_vert.value()] = false;
+
+ // The last vertex is now invalid.
+ num_vertices--;
+ }
+ return num_vertices;
+}
+
+template <class TraversalDecoder>
+int32_t
+MeshEdgebreakerDecoderImpl<TraversalDecoder>::DecodeHoleAndTopologySplitEvents(
+ DecoderBuffer *decoder_buffer) {
+ // Prepare a new decoder from the provided buffer offset.
+ uint32_t num_topology_splits;
+#ifdef DRACO_BACKWARDS_COMPATIBILITY_SUPPORTED
+ if (decoder_->bitstream_version() < DRACO_BITSTREAM_VERSION(2, 0)) {
+ if (!decoder_buffer->Decode(&num_topology_splits)) {
+ return -1;
+ }
+
+ } else
+#endif
+ {
+ if (!DecodeVarint(&num_topology_splits, decoder_buffer)) {
+ return -1;
+ }
+ }
+ if (num_topology_splits > 0) {
+ if (num_topology_splits >
+ static_cast<uint32_t>(corner_table_->num_faces())) {
+ return -1;
+ }
+#ifdef DRACO_BACKWARDS_COMPATIBILITY_SUPPORTED
+ if (decoder_->bitstream_version() < DRACO_BITSTREAM_VERSION(1, 2)) {
+ for (uint32_t i = 0; i < num_topology_splits; ++i) {
+ TopologySplitEventData event_data;
+ if (!decoder_buffer->Decode(&event_data.split_symbol_id)) {
+ return -1;
+ }
+ if (!decoder_buffer->Decode(&event_data.source_symbol_id)) {
+ return -1;
+ }
+ uint8_t edge_data;
+ if (!decoder_buffer->Decode(&edge_data)) {
+ return -1;
+ }
+ event_data.source_edge = edge_data & 1;
+ topology_split_data_.push_back(event_data);
+ }
+
+ } else
+#endif
+ {
+ // Decode source and split symbol ids using delta and varint coding. See
+ // description in mesh_edgebreaker_encoder_impl.cc for more details.
+ int last_source_symbol_id = 0;
+ for (uint32_t i = 0; i < num_topology_splits; ++i) {
+ TopologySplitEventData event_data;
+ uint32_t delta;
+ if (!DecodeVarint<uint32_t>(&delta, decoder_buffer)) {
+ return -1;
+ }
+ event_data.source_symbol_id = delta + last_source_symbol_id;
+ if (!DecodeVarint<uint32_t>(&delta, decoder_buffer)) {
+ return -1;
+ }
+ if (delta > event_data.source_symbol_id) {
+ return -1;
+ }
+ event_data.split_symbol_id =
+ event_data.source_symbol_id - static_cast<int32_t>(delta);
+ last_source_symbol_id = event_data.source_symbol_id;
+ topology_split_data_.push_back(event_data);
+ }
+ // Split edges are decoded from a direct bit decoder.
+ decoder_buffer->StartBitDecoding(false, nullptr);
+ for (uint32_t i = 0; i < num_topology_splits; ++i) {
+ uint32_t edge_data;
+ if (decoder_->bitstream_version() < DRACO_BITSTREAM_VERSION(2, 2)) {
+ decoder_buffer->DecodeLeastSignificantBits32(2, &edge_data);
+ } else {
+ decoder_buffer->DecodeLeastSignificantBits32(1, &edge_data);
+ }
+ TopologySplitEventData &event_data = topology_split_data_[i];
+ event_data.source_edge = edge_data & 1;
+ }
+ decoder_buffer->EndBitDecoding();
+ }
+ }
+ uint32_t num_hole_events = 0;
+#ifdef DRACO_BACKWARDS_COMPATIBILITY_SUPPORTED
+ if (decoder_->bitstream_version() < DRACO_BITSTREAM_VERSION(2, 0)) {
+ if (!decoder_buffer->Decode(&num_hole_events)) {
+ return -1;
+ }
+ } else if (decoder_->bitstream_version() < DRACO_BITSTREAM_VERSION(2, 1)) {
+ if (!DecodeVarint(&num_hole_events, decoder_buffer)) {
+ return -1;
+ }
+ }
+#endif
+ if (num_hole_events > 0) {
+#ifdef DRACO_BACKWARDS_COMPATIBILITY_SUPPORTED
+ if (decoder_->bitstream_version() < DRACO_BITSTREAM_VERSION(1, 2)) {
+ for (uint32_t i = 0; i < num_hole_events; ++i) {
+ HoleEventData event_data;
+ if (!decoder_buffer->Decode(&event_data)) {
+ return -1;
+ }
+ hole_event_data_.push_back(event_data);
+ }
+
+ } else
+#endif
+ {
+ // Decode hole symbol ids using delta and varint coding.
+ int last_symbol_id = 0;
+ for (uint32_t i = 0; i < num_hole_events; ++i) {
+ HoleEventData event_data;
+ uint32_t delta;
+ if (!DecodeVarint<uint32_t>(&delta, decoder_buffer)) {
+ return -1;
+ }
+ event_data.symbol_id = delta + last_symbol_id;
+ last_symbol_id = event_data.symbol_id;
+ hole_event_data_.push_back(event_data);
+ }
+ }
+ }
+ return static_cast<int32_t>(decoder_buffer->decoded_size());
+}
+
+#ifdef DRACO_BACKWARDS_COMPATIBILITY_SUPPORTED
+template <class TraversalDecoder>
+bool MeshEdgebreakerDecoderImpl<TraversalDecoder>::
+ DecodeAttributeConnectivitiesOnFaceLegacy(CornerIndex corner) {
+ // Three corners of the face.
+ const CornerIndex corners[3] = {corner, corner_table_->Next(corner),
+ corner_table_->Previous(corner)};
+
+ for (int c = 0; c < 3; ++c) {
+ const CornerIndex opp_corner = corner_table_->Opposite(corners[c]);
+ if (opp_corner == kInvalidCornerIndex) {
+ // Don't decode attribute seams on boundary edges (every boundary edge
+ // is automatically an attribute seam).
+ for (uint32_t i = 0; i < attribute_data_.size(); ++i) {
+ attribute_data_[i].attribute_seam_corners.push_back(corners[c].value());
+ }
+ continue;
+ }
+
+ for (uint32_t i = 0; i < attribute_data_.size(); ++i) {
+ const bool is_seam = traversal_decoder_.DecodeAttributeSeam(i);
+ if (is_seam) {
+ attribute_data_[i].attribute_seam_corners.push_back(corners[c].value());
+ }
+ }
+ }
+ return true;
+}
+#endif
+
+template <class TraversalDecoder>
+bool MeshEdgebreakerDecoderImpl<
+ TraversalDecoder>::DecodeAttributeConnectivitiesOnFace(CornerIndex corner) {
+ // Three corners of the face.
+ const CornerIndex corners[3] = {corner, corner_table_->Next(corner),
+ corner_table_->Previous(corner)};
+
+ const FaceIndex src_face_id = corner_table_->Face(corner);
+ for (int c = 0; c < 3; ++c) {
+ const CornerIndex opp_corner = corner_table_->Opposite(corners[c]);
+ if (opp_corner == kInvalidCornerIndex) {
+ // Don't decode attribute seams on boundary edges (every boundary edge
+ // is automatically an attribute seam).
+ for (uint32_t i = 0; i < attribute_data_.size(); ++i) {
+ attribute_data_[i].attribute_seam_corners.push_back(corners[c].value());
+ }
+ continue;
+ }
+ const FaceIndex opp_face_id = corner_table_->Face(opp_corner);
+ // Don't decode edges when the opposite face has been already processed.
+ if (opp_face_id < src_face_id) {
+ continue;
+ }
+
+ for (uint32_t i = 0; i < attribute_data_.size(); ++i) {
+ const bool is_seam = traversal_decoder_.DecodeAttributeSeam(i);
+ if (is_seam) {
+ attribute_data_[i].attribute_seam_corners.push_back(corners[c].value());
+ }
+ }
+ }
+ return true;
+}
+
+template <class TraversalDecoder>
+bool MeshEdgebreakerDecoderImpl<TraversalDecoder>::AssignPointsToCorners(
+ int num_connectivity_verts) {
+ // Map between the existing and deduplicated point ids.
+ // Note that at this point we have one point id for each corner of the
+ // mesh so there is corner_table_->num_corners() point ids.
+ decoder_->mesh()->SetNumFaces(corner_table_->num_faces());
+
+ if (attribute_data_.empty()) {
+ // We have connectivity for position only. In this case all vertex indices
+ // are equal to point indices.
+ for (FaceIndex f(0); f < decoder_->mesh()->num_faces(); ++f) {
+ Mesh::Face face;
+ const CornerIndex start_corner(3 * f.value());
+ for (int c = 0; c < 3; ++c) {
+ // Get the vertex index on the corner and use it as a point index.
+ const int32_t vert_id = corner_table_->Vertex(start_corner + c).value();
+ face[c] = vert_id;
+ }
+ decoder_->mesh()->SetFace(f, face);
+ }
+ decoder_->point_cloud()->set_num_points(num_connectivity_verts);
+ return true;
+ }
+ // Else we need to deduplicate multiple attributes.
+
+ // Map between point id and an associated corner id. Only one corner for
+ // each point is stored. The corners are used to sample the attribute values
+ // in the last stage of the deduplication.
+ std::vector<int32_t> point_to_corner_map;
+ // Map between every corner and their new point ids.
+ std::vector<int32_t> corner_to_point_map(corner_table_->num_corners());
+ for (int v = 0; v < corner_table_->num_vertices(); ++v) {
+ CornerIndex c = corner_table_->LeftMostCorner(VertexIndex(v));
+ if (c == kInvalidCornerIndex) {
+ continue; // Isolated vertex.
+ }
+ CornerIndex deduplication_first_corner = c;
+ if (is_vert_hole_[v]) {
+ // If the vertex is on a boundary, start deduplication from the left most
+ // corner that is guaranteed to lie on the boundary.
+ deduplication_first_corner = c;
+ } else {
+ // If we are not on the boundary we need to find the first seam (of any
+ // attribute).
+ for (uint32_t i = 0; i < attribute_data_.size(); ++i) {
+ if (!attribute_data_[i].connectivity_data.IsCornerOnSeam(c)) {
+ continue; // No seam for this attribute, ignore it.
+ }
+ // Else there needs to be at least one seam edge.
+
+ // At this point, we use identity mapping between corners and point ids.
+ const VertexIndex vert_id =
+ attribute_data_[i].connectivity_data.Vertex(c);
+ CornerIndex act_c = corner_table_->SwingRight(c);
+ bool seam_found = false;
+ while (act_c != c) {
+ if (act_c == kInvalidCornerIndex) {
+ return false;
+ }
+ if (attribute_data_[i].connectivity_data.Vertex(act_c) != vert_id) {
+ // Attribute seam found. Stop.
+ deduplication_first_corner = act_c;
+ seam_found = true;
+ break;
+ }
+ act_c = corner_table_->SwingRight(act_c);
+ }
+ if (seam_found) {
+ break; // No reason to process other attributes if we found a seam.
+ }
+ }
+ }
+
+ // Do a deduplication pass over the corners on the processed vertex.
+ // At this point each corner corresponds to one point id and our goal is to
+ // merge similar points into a single point id.
+ // We do a single pass in a clockwise direction over the corners and we add
+ // a new point id whenever one of the attributes change.
+ c = deduplication_first_corner;
+ // Create a new point.
+ corner_to_point_map[c.value()] =
+ static_cast<uint32_t>(point_to_corner_map.size());
+ point_to_corner_map.push_back(c.value());
+ // Traverse in CW direction.
+ CornerIndex prev_c = c;
+ c = corner_table_->SwingRight(c);
+ while (c != kInvalidCornerIndex && c != deduplication_first_corner) {
+ bool attribute_seam = false;
+ for (uint32_t i = 0; i < attribute_data_.size(); ++i) {
+ if (attribute_data_[i].connectivity_data.Vertex(c) !=
+ attribute_data_[i].connectivity_data.Vertex(prev_c)) {
+ // Attribute index changed from the previous corner. We need to add a
+ // new point here.
+ attribute_seam = true;
+ break;
+ }
+ }
+ if (attribute_seam) {
+ corner_to_point_map[c.value()] =
+ static_cast<uint32_t>(point_to_corner_map.size());
+ point_to_corner_map.push_back(c.value());
+ } else {
+ corner_to_point_map[c.value()] = corner_to_point_map[prev_c.value()];
+ }
+ prev_c = c;
+ c = corner_table_->SwingRight(c);
+ }
+ }
+ // Add faces.
+ for (FaceIndex f(0); f < decoder_->mesh()->num_faces(); ++f) {
+ Mesh::Face face;
+ for (int c = 0; c < 3; ++c) {
+ // Remap old points to the new ones.
+ face[c] = corner_to_point_map[3 * f.value() + c];
+ }
+ decoder_->mesh()->SetFace(f, face);
+ }
+ decoder_->point_cloud()->set_num_points(
+ static_cast<uint32_t>(point_to_corner_map.size()));
+ return true;
+}
+
+template class MeshEdgebreakerDecoderImpl<MeshEdgebreakerTraversalDecoder>;
+#ifdef DRACO_BACKWARDS_COMPATIBILITY_SUPPORTED
+template class MeshEdgebreakerDecoderImpl<
+ MeshEdgebreakerTraversalPredictiveDecoder>;
+#endif
+template class MeshEdgebreakerDecoderImpl<
+ MeshEdgebreakerTraversalValenceDecoder>;
+} // namespace draco
diff --git a/libs/assimp/contrib/draco/src/draco/compression/mesh/mesh_edgebreaker_decoder_impl.h b/libs/assimp/contrib/draco/src/draco/compression/mesh/mesh_edgebreaker_decoder_impl.h
new file mode 100644
index 0000000..78053f9
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/mesh/mesh_edgebreaker_decoder_impl.h
@@ -0,0 +1,228 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_COMPRESSION_MESH_MESH_EDGEBREAKER_DECODER_IMPL_H_
+#define DRACO_COMPRESSION_MESH_MESH_EDGEBREAKER_DECODER_IMPL_H_
+
+#include <unordered_map>
+#include <unordered_set>
+
+#include "draco/compression/attributes/mesh_attribute_indices_encoding_data.h"
+#include "draco/compression/mesh/mesh_edgebreaker_decoder_impl_interface.h"
+#include "draco/compression/mesh/mesh_edgebreaker_shared.h"
+#include "draco/compression/mesh/traverser/mesh_traversal_sequencer.h"
+#include "draco/core/decoder_buffer.h"
+#include "draco/draco_features.h"
+#include "draco/mesh/corner_table.h"
+#include "draco/mesh/mesh_attribute_corner_table.h"
+
+namespace draco {
+
+// Implementation of the edgebreaker decoder that decodes data encoded with the
+// MeshEdgebreakerEncoderImpl class. The implementation of the decoder is based
+// on the algorithm presented in Isenburg et al'02 "Spirale Reversi: Reverse
+// decoding of the Edgebreaker encoding". Note that the encoding is still based
+// on the standard edgebreaker method as presented in "3D Compression
+// Made Simple: Edgebreaker on a Corner-Table" by Rossignac at al.'01.
+// http://www.cc.gatech.edu/~jarek/papers/CornerTableSMI.pdf. One difference is
+// caused by the properties of the spirale reversi algorithm that decodes the
+// symbols from the last one to the first one. To make the decoding more
+// efficient, we encode all symbols in the reverse order, therefore the decoder
+// can process them one by one.
+// The main advantage of the spirale reversi method is that the partially
+// decoded mesh has valid connectivity data at any time during the decoding
+// process (valid with respect to the decoded portion of the mesh). The standard
+// Edgebreaker decoder used two passes (forward decoding + zipping) which not
+// only prevented us from having a valid connectivity but it was also slower.
+// The main benefit of having the valid connectivity is that we can use the
+// known connectivity to predict encoded symbols that can improve the
+// compression rate.
+template <class TraversalDecoderT>
+class MeshEdgebreakerDecoderImpl : public MeshEdgebreakerDecoderImplInterface {
+ public:
+ MeshEdgebreakerDecoderImpl();
+ bool Init(MeshEdgebreakerDecoder *decoder) override;
+
+ const MeshAttributeCornerTable *GetAttributeCornerTable(
+ int att_id) const override;
+ const MeshAttributeIndicesEncodingData *GetAttributeEncodingData(
+ int att_id) const override;
+
+ bool CreateAttributesDecoder(int32_t att_decoder_id) override;
+ bool DecodeConnectivity() override;
+ bool OnAttributesDecoded() override;
+ MeshEdgebreakerDecoder *GetDecoder() const override { return decoder_; }
+ const CornerTable *GetCornerTable() const override {
+ return corner_table_.get();
+ }
+
+ private:
+ // Creates a vertex traversal sequencer for the specified |TraverserT| type.
+ template <class TraverserT>
+ std::unique_ptr<PointsSequencer> CreateVertexTraversalSequencer(
+ MeshAttributeIndicesEncodingData *encoding_data);
+
+ // Decodes connectivity between vertices (vertex indices).
+ // Returns the number of vertices created by the decoder or -1 on error.
+ int DecodeConnectivity(int num_symbols);
+
+ // Returns true if the current symbol was part of a topology split event. This
+ // means that the current face was connected to the left edge of a face
+ // encoded with the TOPOLOGY_S symbol. |out_symbol_edge| can be used to
+ // identify which edge of the source symbol was connected to the TOPOLOGY_S
+ // symbol.
+ bool IsTopologySplit(int encoder_symbol_id, EdgeFaceName *out_face_edge,
+ int *out_encoder_split_symbol_id) {
+ if (topology_split_data_.size() == 0) {
+ return false;
+ }
+ if (topology_split_data_.back().source_symbol_id >
+ static_cast<uint32_t>(encoder_symbol_id)) {
+ // Something is wrong; if the desired source symbol is greater than the
+ // current encoder_symbol_id, we missed it, or the input was tampered
+ // (|encoder_symbol_id| keeps decreasing).
+ // Return invalid symbol id to notify the decoder that there was an
+ // error.
+ *out_encoder_split_symbol_id = -1;
+ return true;
+ }
+ if (topology_split_data_.back().source_symbol_id != encoder_symbol_id) {
+ return false;
+ }
+ *out_face_edge =
+ static_cast<EdgeFaceName>(topology_split_data_.back().source_edge);
+ *out_encoder_split_symbol_id = topology_split_data_.back().split_symbol_id;
+ // Remove the latest split event.
+ topology_split_data_.pop_back();
+ return true;
+ }
+
+ // Decodes event data for hole and topology split events and stores them for
+ // future use.
+ // Returns the number of parsed bytes, or -1 on error.
+ int32_t DecodeHoleAndTopologySplitEvents(DecoderBuffer *decoder_buffer);
+
+ // Decodes all non-position attribute connectivity on the currently
+ // processed face.
+#ifdef DRACO_BACKWARDS_COMPATIBILITY_SUPPORTED
+ bool DecodeAttributeConnectivitiesOnFaceLegacy(CornerIndex corner);
+#endif
+ bool DecodeAttributeConnectivitiesOnFace(CornerIndex corner);
+
+ // Initializes mapping between corners and point ids.
+ bool AssignPointsToCorners(int num_connectivity_verts);
+
+ bool IsFaceVisited(CornerIndex corner_id) const {
+ if (corner_id < 0) {
+ return true; // Invalid corner signalizes that the face does not exist.
+ }
+ return visited_faces_[corner_table_->Face(corner_id).value()];
+ }
+
+ void SetOppositeCorners(CornerIndex corner_0, CornerIndex corner_1) {
+ corner_table_->SetOppositeCorner(corner_0, corner_1);
+ corner_table_->SetOppositeCorner(corner_1, corner_0);
+ }
+
+ MeshEdgebreakerDecoder *decoder_;
+
+ std::unique_ptr<CornerTable> corner_table_;
+
+ // Stack used for storing corners that need to be traversed when decoding
+ // mesh vertices. New corner is added for each initial face and a split
+ // symbol, and one corner is removed when the end symbol is reached.
+ // Stored as member variable to prevent frequent memory reallocations when
+ // handling meshes with lots of disjoint components. Originally, we used
+ // recursive functions to handle this behavior, but that can cause stack
+ // memory overflow when compressing huge meshes.
+ std::vector<CornerIndex> corner_traversal_stack_;
+
+ // Array stores the number of visited visited for each mesh traversal.
+ std::vector<int> vertex_traversal_length_;
+
+ // List of decoded topology split events.
+ std::vector<TopologySplitEventData> topology_split_data_;
+
+ // List of decoded hole events.
+ std::vector<HoleEventData> hole_event_data_;
+
+ // Configuration of the initial face for each mesh component.
+ std::vector<bool> init_face_configurations_;
+
+ // Initial corner for each traversal.
+ std::vector<CornerIndex> init_corners_;
+
+ // Id of the last processed input symbol.
+ int last_symbol_id_;
+
+ // Id of the last decoded vertex.
+ int last_vert_id_;
+
+ // Id of the last decoded face.
+ int last_face_id_;
+
+ // Array for marking visited faces.
+ std::vector<bool> visited_faces_;
+ // Array for marking visited vertices.
+ std::vector<bool> visited_verts_;
+ // Array for marking vertices on open boundaries.
+ std::vector<bool> is_vert_hole_;
+
+ // The number of new vertices added by the encoder (because of non-manifold
+ // vertices on the input mesh).
+ // If there are no non-manifold edges/vertices on the input mesh, this should
+ // be 0.
+ int num_new_vertices_;
+ // For every newly added vertex, this array stores it's mapping to the
+ // parent vertex id of the encoded mesh.
+ std::unordered_map<int, int> new_to_parent_vertex_map_;
+ // The number of vertices that were encoded (can be different from the number
+ // of vertices of the input mesh).
+ int num_encoded_vertices_;
+
+ // Array for storing the encoded corner ids in the order their associated
+ // vertices were decoded.
+ std::vector<int32_t> processed_corner_ids_;
+
+ // Array storing corners in the order they were visited during the
+ // connectivity decoding (always storing the tip corner of each newly visited
+ // face).
+ std::vector<int> processed_connectivity_corners_;
+
+ MeshAttributeIndicesEncodingData pos_encoding_data_;
+
+ // Id of an attributes decoder that uses |pos_encoding_data_|.
+ int pos_data_decoder_id_;
+
+ // Data for non-position attributes used by the decoder.
+ struct AttributeData {
+ AttributeData() : decoder_id(-1), is_connectivity_used(true) {}
+ // Id of the attribute decoder that was used to decode this attribute data.
+ int decoder_id;
+ MeshAttributeCornerTable connectivity_data;
+ // Flag that can mark the connectivity_data invalid. In such case the base
+ // corner table of the mesh should be used instead.
+ bool is_connectivity_used;
+ MeshAttributeIndicesEncodingData encoding_data;
+ // Opposite corners to attribute seam edges.
+ std::vector<int32_t> attribute_seam_corners;
+ };
+ std::vector<AttributeData> attribute_data_;
+
+ TraversalDecoderT traversal_decoder_;
+};
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_MESH_MESH_EDGEBREAKER_DECODER_IMPL_H_
diff --git a/libs/assimp/contrib/draco/src/draco/compression/mesh/mesh_edgebreaker_decoder_impl_interface.h b/libs/assimp/contrib/draco/src/draco/compression/mesh/mesh_edgebreaker_decoder_impl_interface.h
new file mode 100644
index 0000000..31fabf2
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/mesh/mesh_edgebreaker_decoder_impl_interface.h
@@ -0,0 +1,47 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_COMPRESSION_MESH_MESH_EDGEBREAKER_DECODER_IMPL_INTERFACE_H_
+#define DRACO_COMPRESSION_MESH_MESH_EDGEBREAKER_DECODER_IMPL_INTERFACE_H_
+
+#include "draco/compression/attributes/mesh_attribute_indices_encoding_data.h"
+#include "draco/mesh/mesh_attribute_corner_table.h"
+
+namespace draco {
+
+// Forward declaration is necessary here to avoid circular dependencies.
+class MeshEdgebreakerDecoder;
+
+// Abstract interface used by MeshEdgebreakerDecoder to interact with the actual
+// implementation of the edgebreaker decoding method.
+class MeshEdgebreakerDecoderImplInterface {
+ public:
+ virtual ~MeshEdgebreakerDecoderImplInterface() = default;
+ virtual bool Init(MeshEdgebreakerDecoder *decoder) = 0;
+
+ virtual const MeshAttributeCornerTable *GetAttributeCornerTable(
+ int att_id) const = 0;
+ virtual const MeshAttributeIndicesEncodingData *GetAttributeEncodingData(
+ int att_id) const = 0;
+ virtual bool CreateAttributesDecoder(int32_t att_decoder_id) = 0;
+ virtual bool DecodeConnectivity() = 0;
+ virtual bool OnAttributesDecoded() = 0;
+
+ virtual MeshEdgebreakerDecoder *GetDecoder() const = 0;
+ virtual const CornerTable *GetCornerTable() const = 0;
+};
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_MESH_MESH_EDGEBREAKER_DECODER_IMPL_INTERFACE_H_
diff --git a/libs/assimp/contrib/draco/src/draco/compression/mesh/mesh_edgebreaker_encoder.cc b/libs/assimp/contrib/draco/src/draco/compression/mesh/mesh_edgebreaker_encoder.cc
new file mode 100644
index 0000000..5aff5d8
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/mesh/mesh_edgebreaker_encoder.cc
@@ -0,0 +1,195 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/compression/mesh/mesh_edgebreaker_encoder.h"
+
+#include "draco/compression/mesh/mesh_edgebreaker_encoder_impl.h"
+#include "draco/compression/mesh/mesh_edgebreaker_traversal_predictive_encoder.h"
+#include "draco/compression/mesh/mesh_edgebreaker_traversal_valence_encoder.h"
+
+namespace draco {
+
+MeshEdgebreakerEncoder::MeshEdgebreakerEncoder() {}
+
+bool MeshEdgebreakerEncoder::InitializeEncoder() {
+ const bool is_standard_edgebreaker_available =
+ options()->IsFeatureSupported(features::kEdgebreaker);
+ const bool is_predictive_edgebreaker_available =
+ options()->IsFeatureSupported(features::kPredictiveEdgebreaker);
+
+ impl_ = nullptr;
+ // For tiny meshes it's usually better to use the basic edgebreaker as the
+ // overhead of the predictive one may turn out to be too big.
+ // TODO(b/111065939): Check if this can be improved.
+ const bool is_tiny_mesh = mesh()->num_faces() < 1000;
+
+ int selected_edgebreaker_method =
+ options()->GetGlobalInt("edgebreaker_method", -1);
+ if (selected_edgebreaker_method == -1) {
+ if (is_standard_edgebreaker_available &&
+ (options()->GetSpeed() >= 5 || !is_predictive_edgebreaker_available ||
+ is_tiny_mesh)) {
+ selected_edgebreaker_method = MESH_EDGEBREAKER_STANDARD_ENCODING;
+ } else {
+ selected_edgebreaker_method = MESH_EDGEBREAKER_VALENCE_ENCODING;
+ }
+ }
+
+ if (selected_edgebreaker_method == MESH_EDGEBREAKER_STANDARD_ENCODING) {
+ if (is_standard_edgebreaker_available) {
+ buffer()->Encode(
+ static_cast<uint8_t>(MESH_EDGEBREAKER_STANDARD_ENCODING));
+ impl_ = std::unique_ptr<MeshEdgebreakerEncoderImplInterface>(
+ new MeshEdgebreakerEncoderImpl<MeshEdgebreakerTraversalEncoder>());
+ }
+ } else if (selected_edgebreaker_method == MESH_EDGEBREAKER_VALENCE_ENCODING) {
+ buffer()->Encode(static_cast<uint8_t>(MESH_EDGEBREAKER_VALENCE_ENCODING));
+ impl_ = std::unique_ptr<MeshEdgebreakerEncoderImplInterface>(
+ new MeshEdgebreakerEncoderImpl<
+ MeshEdgebreakerTraversalValenceEncoder>());
+ }
+ if (!impl_) {
+ return false;
+ }
+ if (!impl_->Init(this)) {
+ return false;
+ }
+ return true;
+}
+
+bool MeshEdgebreakerEncoder::GenerateAttributesEncoder(int32_t att_id) {
+ if (!impl_->GenerateAttributesEncoder(att_id)) {
+ return false;
+ }
+ return true;
+}
+
+bool MeshEdgebreakerEncoder::EncodeAttributesEncoderIdentifier(
+ int32_t att_encoder_id) {
+ if (!impl_->EncodeAttributesEncoderIdentifier(att_encoder_id)) {
+ return false;
+ }
+ return true;
+}
+
+Status MeshEdgebreakerEncoder::EncodeConnectivity() {
+ return impl_->EncodeConnectivity();
+}
+
+void MeshEdgebreakerEncoder::ComputeNumberOfEncodedPoints() {
+ if (!impl_) {
+ return;
+ }
+ const CornerTable *const corner_table = impl_->GetCornerTable();
+ if (!corner_table) {
+ return;
+ }
+ size_t num_points =
+ corner_table->num_vertices() - corner_table->NumIsolatedVertices();
+
+ if (mesh()->num_attributes() > 1) {
+ // Gather all corner tables for all non-position attributes.
+ std::vector<const MeshAttributeCornerTable *> attribute_corner_tables;
+ for (int i = 0; i < mesh()->num_attributes(); ++i) {
+ if (mesh()->attribute(i)->attribute_type() ==
+ GeometryAttribute::POSITION) {
+ continue;
+ }
+ const MeshAttributeCornerTable *const att_corner_table =
+ GetAttributeCornerTable(i);
+ // Attribute corner table may not be used in some configurations. For
+ // these cases we can assume the attribute connectivity to be the same as
+ // the connectivity of the position data.
+ if (att_corner_table) {
+ attribute_corner_tables.push_back(att_corner_table);
+ }
+ }
+
+ // Add a new point based on the configuration of interior attribute seams
+ // (replicating what the decoder would do).
+ for (VertexIndex vi(0); vi < corner_table->num_vertices(); ++vi) {
+ if (corner_table->IsVertexIsolated(vi)) {
+ continue;
+ }
+ // Go around all corners of the vertex and keep track of the observed
+ // attribute seams.
+ const CornerIndex first_corner_index = corner_table->LeftMostCorner(vi);
+ const PointIndex first_point_index =
+ mesh()->CornerToPointId(first_corner_index);
+
+ PointIndex last_point_index = first_point_index;
+ CornerIndex last_corner_index = first_corner_index;
+ CornerIndex corner_index = corner_table->SwingRight(first_corner_index);
+ size_t num_attribute_seams = 0;
+ while (corner_index != kInvalidCornerIndex) {
+ const PointIndex point_index = mesh()->CornerToPointId(corner_index);
+ bool seam_found = false;
+ if (point_index != last_point_index) {
+ // Point index changed - new attribute seam detected.
+ seam_found = true;
+ last_point_index = point_index;
+ } else {
+ // Even though point indices matches, there still may be a seam caused
+ // by non-manifold connectivity of non-position attribute data.
+ for (int i = 0; i < attribute_corner_tables.size(); ++i) {
+ if (attribute_corner_tables[i]->Vertex(corner_index) !=
+ attribute_corner_tables[i]->Vertex(last_corner_index)) {
+ seam_found = true;
+ break; // No need to process other attributes.
+ }
+ }
+ }
+ if (seam_found) {
+ ++num_attribute_seams;
+ }
+
+ if (corner_index == first_corner_index) {
+ break;
+ }
+
+ // Proceed to the next corner
+ last_corner_index = corner_index;
+ corner_index = corner_table->SwingRight(corner_index);
+ }
+
+ if (!corner_table->IsOnBoundary(vi) && num_attribute_seams > 0) {
+ // If the last visited point index is the same as the first point index
+ // we traveled all the way around the vertex. In this case the number of
+ // new points should be num_attribute_seams - 1
+ num_points += num_attribute_seams - 1;
+ } else {
+ // Else the vertex was either on a boundary (i.e. we couldn't travel all
+ // around the vertex), or we ended up at a different point. In both of
+ // these cases, the number of new points is equal to the number of
+ // attribute seams.
+ num_points += num_attribute_seams;
+ }
+ }
+ }
+ set_num_encoded_points(num_points);
+}
+
+void MeshEdgebreakerEncoder::ComputeNumberOfEncodedFaces() {
+ if (!impl_) {
+ return;
+ }
+ const CornerTable *const corner_table = impl_->GetCornerTable();
+ if (!corner_table) {
+ return;
+ }
+ set_num_encoded_faces(corner_table->num_faces() -
+ corner_table->NumDegeneratedFaces());
+}
+
+} // namespace draco
diff --git a/libs/assimp/contrib/draco/src/draco/compression/mesh/mesh_edgebreaker_encoder.h b/libs/assimp/contrib/draco/src/draco/compression/mesh/mesh_edgebreaker_encoder.h
new file mode 100644
index 0000000..70d4d50
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/mesh/mesh_edgebreaker_encoder.h
@@ -0,0 +1,73 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_COMPRESSION_MESH_MESH_EDGEBREAKER_ENCODER_H_
+#define DRACO_COMPRESSION_MESH_MESH_EDGEBREAKER_ENCODER_H_
+
+#include <unordered_map>
+
+#include "draco/compression/mesh/mesh_edgebreaker_encoder_impl_interface.h"
+#include "draco/compression/mesh/mesh_edgebreaker_shared.h"
+#include "draco/compression/mesh/mesh_encoder.h"
+#include "draco/mesh/corner_table.h"
+
+namespace draco {
+
+// Class implements the edge breaker geometry compression method as described
+// in "3D Compression Made Simple: Edgebreaker on a Corner-Table" by Rossignac
+// at al.'01. http://www.cc.gatech.edu/~jarek/papers/CornerTableSMI.pdf
+class MeshEdgebreakerEncoder : public MeshEncoder {
+ public:
+ MeshEdgebreakerEncoder();
+
+ const CornerTable *GetCornerTable() const override {
+ return impl_->GetCornerTable();
+ }
+
+ const MeshAttributeCornerTable *GetAttributeCornerTable(
+ int att_id) const override {
+ return impl_->GetAttributeCornerTable(att_id);
+ }
+
+ const MeshAttributeIndicesEncodingData *GetAttributeEncodingData(
+ int att_id) const override {
+ return impl_->GetAttributeEncodingData(att_id);
+ }
+
+ uint8_t GetEncodingMethod() const override {
+ return MESH_EDGEBREAKER_ENCODING;
+ }
+
+ protected:
+ bool InitializeEncoder() override;
+ Status EncodeConnectivity() override;
+ bool GenerateAttributesEncoder(int32_t att_id) override;
+ bool EncodeAttributesEncoderIdentifier(int32_t att_encoder_id) override;
+ void ComputeNumberOfEncodedPoints() override;
+ void ComputeNumberOfEncodedFaces() override;
+
+ private:
+ // The actual implementation of the edge breaker method. The implementations
+ // are in general specializations of a template class
+ // MeshEdgebreakerEncoderImpl where the template arguments control encoding
+ // of the connectivity data. The actual implementation is selected in this
+ // class based on the provided encoding options. Because this choice is done
+ // in run-time, the actual implementation has to be hidden behind the
+ // abstract interface MeshEdgebreakerEncoderImplInterface.
+ std::unique_ptr<MeshEdgebreakerEncoderImplInterface> impl_;
+};
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_MESH_MESH_EDGEBREAKER_ENCODER_H_
diff --git a/libs/assimp/contrib/draco/src/draco/compression/mesh/mesh_edgebreaker_encoder_impl.cc b/libs/assimp/contrib/draco/src/draco/compression/mesh/mesh_edgebreaker_encoder_impl.cc
new file mode 100644
index 0000000..0791dc6
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/mesh/mesh_edgebreaker_encoder_impl.cc
@@ -0,0 +1,854 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/compression/mesh/mesh_edgebreaker_encoder_impl.h"
+
+#include <algorithm>
+
+#include "draco/compression/attributes/sequential_attribute_encoders_controller.h"
+#include "draco/compression/mesh/mesh_edgebreaker_encoder.h"
+#include "draco/compression/mesh/mesh_edgebreaker_traversal_predictive_encoder.h"
+#include "draco/compression/mesh/mesh_edgebreaker_traversal_valence_encoder.h"
+#include "draco/compression/mesh/traverser/depth_first_traverser.h"
+#include "draco/compression/mesh/traverser/max_prediction_degree_traverser.h"
+#include "draco/compression/mesh/traverser/mesh_attribute_indices_encoding_observer.h"
+#include "draco/compression/mesh/traverser/mesh_traversal_sequencer.h"
+#include "draco/compression/mesh/traverser/traverser_base.h"
+#include "draco/mesh/corner_table_iterators.h"
+#include "draco/mesh/mesh_misc_functions.h"
+
+namespace draco {
+// TODO(draco-eng) consider converting 'typedef' to 'using' and deduplicate.
+typedef CornerIndex CornerIndex;
+typedef FaceIndex FaceIndex;
+typedef VertexIndex VertexIndex;
+
+template <class TraversalEncoder>
+MeshEdgebreakerEncoderImpl<TraversalEncoder>::MeshEdgebreakerEncoderImpl()
+ : encoder_(nullptr),
+ mesh_(nullptr),
+ last_encoded_symbol_id_(-1),
+ num_split_symbols_(0),
+ use_single_connectivity_(false) {}
+
+template <class TraversalEncoder>
+bool MeshEdgebreakerEncoderImpl<TraversalEncoder>::Init(
+ MeshEdgebreakerEncoder *encoder) {
+ encoder_ = encoder;
+ mesh_ = encoder->mesh();
+ attribute_encoder_to_data_id_map_.clear();
+
+ if (encoder_->options()->IsGlobalOptionSet("split_mesh_on_seams")) {
+ use_single_connectivity_ =
+ encoder_->options()->GetGlobalBool("split_mesh_on_seams", false);
+ } else if (encoder_->options()->GetSpeed() >= 6) {
+ // Else use default setting based on speed.
+ use_single_connectivity_ = true;
+ } else {
+ use_single_connectivity_ = false;
+ }
+ return true;
+}
+
+template <class TraversalEncoder>
+const MeshAttributeCornerTable *
+MeshEdgebreakerEncoderImpl<TraversalEncoder>::GetAttributeCornerTable(
+ int att_id) const {
+ for (uint32_t i = 0; i < attribute_data_.size(); ++i) {
+ if (attribute_data_[i].attribute_index == att_id) {
+ if (attribute_data_[i].is_connectivity_used) {
+ return &attribute_data_[i].connectivity_data;
+ }
+ return nullptr;
+ }
+ }
+ return nullptr;
+}
+
+template <class TraversalEncoder>
+const MeshAttributeIndicesEncodingData *
+MeshEdgebreakerEncoderImpl<TraversalEncoder>::GetAttributeEncodingData(
+ int att_id) const {
+ for (uint32_t i = 0; i < attribute_data_.size(); ++i) {
+ if (attribute_data_[i].attribute_index == att_id) {
+ return &attribute_data_[i].encoding_data;
+ }
+ }
+ return &pos_encoding_data_;
+}
+
+template <class TraversalEncoder>
+template <class TraverserT>
+std::unique_ptr<PointsSequencer>
+MeshEdgebreakerEncoderImpl<TraversalEncoder>::CreateVertexTraversalSequencer(
+ MeshAttributeIndicesEncodingData *encoding_data) {
+ typedef typename TraverserT::TraversalObserver AttObserver;
+ typedef typename TraverserT::CornerTable CornerTable;
+
+ std::unique_ptr<MeshTraversalSequencer<TraverserT>> traversal_sequencer(
+ new MeshTraversalSequencer<TraverserT>(mesh_, encoding_data));
+
+ AttObserver att_observer(corner_table_.get(), mesh_,
+ traversal_sequencer.get(), encoding_data);
+
+ TraverserT att_traverser;
+ att_traverser.Init(corner_table_.get(), att_observer);
+
+ // Set order of corners to simulate the corner order of the decoder.
+ traversal_sequencer->SetCornerOrder(processed_connectivity_corners_);
+ traversal_sequencer->SetTraverser(att_traverser);
+ return std::move(traversal_sequencer);
+}
+
+template <class TraversalEncoder>
+bool MeshEdgebreakerEncoderImpl<TraversalEncoder>::GenerateAttributesEncoder(
+ int32_t att_id) {
+ // For now, either create one encoder for each attribute or use a single
+ // encoder for all attributes. Ideally we can share the same encoder for
+ // a sub-set of attributes with the same connectivity (this is especially true
+ // for per-vertex attributes).
+ if (use_single_connectivity_ && GetEncoder()->num_attributes_encoders() > 0) {
+ // We are using single connectivity and we already have an attribute
+ // encoder. Add the attribute to the encoder and return.
+ GetEncoder()->attributes_encoder(0)->AddAttributeId(att_id);
+ return true;
+ }
+ const int32_t element_type =
+ GetEncoder()->mesh()->GetAttributeElementType(att_id);
+ const PointAttribute *const att =
+ GetEncoder()->point_cloud()->attribute(att_id);
+ int32_t att_data_id = -1;
+ for (uint32_t i = 0; i < attribute_data_.size(); ++i) {
+ if (attribute_data_[i].attribute_index == att_id) {
+ att_data_id = i;
+ break;
+ }
+ }
+ MeshTraversalMethod traversal_method = MESH_TRAVERSAL_DEPTH_FIRST;
+ std::unique_ptr<PointsSequencer> sequencer;
+ if (use_single_connectivity_ ||
+ att->attribute_type() == GeometryAttribute::POSITION ||
+ element_type == MESH_VERTEX_ATTRIBUTE ||
+ (element_type == MESH_CORNER_ATTRIBUTE &&
+ attribute_data_[att_data_id].connectivity_data.no_interior_seams())) {
+ // Per-vertex attribute reached, use the basic corner table to traverse the
+ // mesh.
+ MeshAttributeIndicesEncodingData *encoding_data;
+ if (use_single_connectivity_ ||
+ att->attribute_type() == GeometryAttribute::POSITION) {
+ encoding_data = &pos_encoding_data_;
+ } else {
+ encoding_data = &attribute_data_[att_data_id].encoding_data;
+
+ // Ensure we use the correct number of vertices in the encoding data.
+ encoding_data->vertex_to_encoded_attribute_value_index_map.assign(
+ corner_table_->num_vertices(), -1);
+
+ // Mark the attribute specific connectivity data as not used as we use the
+ // position attribute connectivity data.
+ attribute_data_[att_data_id].is_connectivity_used = false;
+ }
+
+ if (GetEncoder()->options()->GetSpeed() == 0 &&
+ att->attribute_type() == GeometryAttribute::POSITION) {
+ traversal_method = MESH_TRAVERSAL_PREDICTION_DEGREE;
+ if (use_single_connectivity_ && mesh_->num_attributes() > 1) {
+ // Make sure we don't use the prediction degree traversal when we encode
+ // multiple attributes using the same connectivity.
+ // TODO(ostava): We should investigate this and see if the prediction
+ // degree can be actually used efficiently for non-position attributes.
+ traversal_method = MESH_TRAVERSAL_DEPTH_FIRST;
+ }
+ }
+ // Defining sequencer via a traversal scheme.
+ if (traversal_method == MESH_TRAVERSAL_PREDICTION_DEGREE) {
+ typedef MeshAttributeIndicesEncodingObserver<CornerTable> AttObserver;
+ typedef MaxPredictionDegreeTraverser<CornerTable, AttObserver>
+ AttTraverser;
+ sequencer = CreateVertexTraversalSequencer<AttTraverser>(encoding_data);
+ } else if (traversal_method == MESH_TRAVERSAL_DEPTH_FIRST) {
+ typedef MeshAttributeIndicesEncodingObserver<CornerTable> AttObserver;
+ typedef DepthFirstTraverser<CornerTable, AttObserver> AttTraverser;
+ sequencer = CreateVertexTraversalSequencer<AttTraverser>(encoding_data);
+ }
+ } else {
+ // Per-corner attribute encoder.
+ typedef MeshAttributeIndicesEncodingObserver<MeshAttributeCornerTable>
+ AttObserver;
+ typedef DepthFirstTraverser<MeshAttributeCornerTable, AttObserver>
+ AttTraverser;
+
+ MeshAttributeIndicesEncodingData *const encoding_data =
+ &attribute_data_[att_data_id].encoding_data;
+ const MeshAttributeCornerTable *const corner_table =
+ &attribute_data_[att_data_id].connectivity_data;
+
+ // Ensure we use the correct number of vertices in the encoding data.
+ attribute_data_[att_data_id]
+ .encoding_data.vertex_to_encoded_attribute_value_index_map.assign(
+ attribute_data_[att_data_id].connectivity_data.num_vertices(), -1);
+
+ std::unique_ptr<MeshTraversalSequencer<AttTraverser>> traversal_sequencer(
+ new MeshTraversalSequencer<AttTraverser>(mesh_, encoding_data));
+
+ AttObserver att_observer(corner_table, mesh_, traversal_sequencer.get(),
+ encoding_data);
+
+ AttTraverser att_traverser;
+ att_traverser.Init(corner_table, att_observer);
+
+ // Set order of corners to simulate the corner order of the decoder.
+ traversal_sequencer->SetCornerOrder(processed_connectivity_corners_);
+ traversal_sequencer->SetTraverser(att_traverser);
+ sequencer = std::move(traversal_sequencer);
+ }
+
+ if (!sequencer) {
+ return false;
+ }
+
+ if (att_data_id == -1) {
+ pos_traversal_method_ = traversal_method;
+ } else {
+ attribute_data_[att_data_id].traversal_method = traversal_method;
+ }
+
+ std::unique_ptr<SequentialAttributeEncodersController> att_controller(
+ new SequentialAttributeEncodersController(std::move(sequencer), att_id));
+
+ // Update the mapping between the encoder id and the attribute data id.
+ // This will be used by the decoder to select the appropriate attribute
+ // decoder and the correct connectivity.
+ attribute_encoder_to_data_id_map_.push_back(att_data_id);
+ GetEncoder()->AddAttributesEncoder(std::move(att_controller));
+ return true;
+} // namespace draco
+
+template <class TraversalEncoder>
+bool MeshEdgebreakerEncoderImpl<TraversalEncoder>::
+ EncodeAttributesEncoderIdentifier(int32_t att_encoder_id) {
+ const int8_t att_data_id = attribute_encoder_to_data_id_map_[att_encoder_id];
+ encoder_->buffer()->Encode(att_data_id);
+
+ // Also encode the type of the encoder that we used.
+ int32_t element_type = MESH_VERTEX_ATTRIBUTE;
+ MeshTraversalMethod traversal_method;
+ if (att_data_id >= 0) {
+ const int32_t att_id = attribute_data_[att_data_id].attribute_index;
+ element_type = GetEncoder()->mesh()->GetAttributeElementType(att_id);
+ traversal_method = attribute_data_[att_data_id].traversal_method;
+ } else {
+ traversal_method = pos_traversal_method_;
+ }
+ if (element_type == MESH_VERTEX_ATTRIBUTE ||
+ (element_type == MESH_CORNER_ATTRIBUTE &&
+ attribute_data_[att_data_id].connectivity_data.no_interior_seams())) {
+ // Per-vertex encoder.
+ encoder_->buffer()->Encode(static_cast<uint8_t>(MESH_VERTEX_ATTRIBUTE));
+ } else {
+ // Per-corner encoder.
+ encoder_->buffer()->Encode(static_cast<uint8_t>(MESH_CORNER_ATTRIBUTE));
+ }
+ // Encode the mesh traversal method.
+ encoder_->buffer()->Encode(static_cast<uint8_t>(traversal_method));
+ return true;
+}
+
+template <class TraversalEncoder>
+Status MeshEdgebreakerEncoderImpl<TraversalEncoder>::EncodeConnectivity() {
+ // To encode the mesh, we need face connectivity data stored in a corner
+ // table. To compute the connectivity we must use indices associated with
+ // POSITION attribute, because they define which edges can be connected
+ // together, unless the option |use_single_connectivity_| is set in which case
+ // we break the mesh along attribute seams and use the same connectivity for
+ // all attributes.
+ if (use_single_connectivity_) {
+ corner_table_ = CreateCornerTableFromAllAttributes(mesh_);
+ } else {
+ corner_table_ = CreateCornerTableFromPositionAttribute(mesh_);
+ }
+ if (corner_table_ == nullptr ||
+ corner_table_->num_faces() == corner_table_->NumDegeneratedFaces()) {
+ // Failed to construct the corner table.
+ // TODO(ostava): Add better error reporting.
+ return Status(Status::DRACO_ERROR, "All triangles are degenerate.");
+ }
+
+ traversal_encoder_.Init(this);
+
+ // Also encode the total number of vertices that is going to be encoded.
+ // This can be different from the mesh_->num_points() + num_new_vertices,
+ // because some of the vertices of the input mesh can be ignored (e.g.
+ // vertices on degenerated faces or isolated vertices not attached to any
+ // face).
+ const uint32_t num_vertices_to_be_encoded =
+ corner_table_->num_vertices() - corner_table_->NumIsolatedVertices();
+ EncodeVarint(num_vertices_to_be_encoded, encoder_->buffer());
+
+ const uint32_t num_faces =
+ corner_table_->num_faces() - corner_table_->NumDegeneratedFaces();
+ EncodeVarint(num_faces, encoder_->buffer());
+
+ // Reset encoder data that may have been initialized in previous runs.
+ visited_faces_.assign(mesh_->num_faces(), false);
+ pos_encoding_data_.vertex_to_encoded_attribute_value_index_map.assign(
+ corner_table_->num_vertices(), -1);
+ pos_encoding_data_.encoded_attribute_value_index_to_corner_map.clear();
+ pos_encoding_data_.encoded_attribute_value_index_to_corner_map.reserve(
+ corner_table_->num_faces() * 3);
+ visited_vertex_ids_.assign(corner_table_->num_vertices(), false);
+ vertex_traversal_length_.clear();
+ last_encoded_symbol_id_ = -1;
+ num_split_symbols_ = 0;
+ topology_split_event_data_.clear();
+ face_to_split_symbol_map_.clear();
+ visited_holes_.clear();
+ vertex_hole_id_.assign(corner_table_->num_vertices(), -1);
+ processed_connectivity_corners_.clear();
+ processed_connectivity_corners_.reserve(corner_table_->num_faces());
+ pos_encoding_data_.num_values = 0;
+
+ if (!FindHoles()) {
+ return Status(Status::DRACO_ERROR, "Failed to process mesh holes.");
+ }
+
+ if (!InitAttributeData()) {
+ return Status(Status::DRACO_ERROR, "Failed to initialize attribute data.");
+ }
+
+ const uint8_t num_attribute_data =
+ static_cast<uint8_t>(attribute_data_.size());
+ encoder_->buffer()->Encode(num_attribute_data);
+ traversal_encoder_.SetNumAttributeData(num_attribute_data);
+
+ const int num_corners = corner_table_->num_corners();
+
+ traversal_encoder_.Start();
+
+ std::vector<CornerIndex> init_face_connectivity_corners;
+ // Traverse the surface starting from each unvisited corner.
+ for (int c_id = 0; c_id < num_corners; ++c_id) {
+ CornerIndex corner_index(c_id);
+ const FaceIndex face_id = corner_table_->Face(corner_index);
+ if (visited_faces_[face_id.value()]) {
+ continue; // Face has been already processed.
+ }
+ if (corner_table_->IsDegenerated(face_id)) {
+ continue; // Ignore degenerated faces.
+ }
+
+ CornerIndex start_corner;
+ const bool interior_config =
+ FindInitFaceConfiguration(face_id, &start_corner);
+ traversal_encoder_.EncodeStartFaceConfiguration(interior_config);
+
+ if (interior_config) {
+ // Select the correct vertex on the face as the root.
+ corner_index = start_corner;
+ const VertexIndex vert_id = corner_table_->Vertex(corner_index);
+ // Mark all vertices of a given face as visited.
+ const VertexIndex next_vert_id =
+ corner_table_->Vertex(corner_table_->Next(corner_index));
+ const VertexIndex prev_vert_id =
+ corner_table_->Vertex(corner_table_->Previous(corner_index));
+
+ visited_vertex_ids_[vert_id.value()] = true;
+ visited_vertex_ids_[next_vert_id.value()] = true;
+ visited_vertex_ids_[prev_vert_id.value()] = true;
+ // New traversal started. Initiate it's length with the first vertex.
+ vertex_traversal_length_.push_back(1);
+
+ // Mark the face as visited.
+ visited_faces_[face_id.value()] = true;
+ // Start compressing from the opposite face of the "next" corner. This way
+ // the first encoded corner corresponds to the tip corner of the regular
+ // edgebreaker traversal (essentially the initial face can be then viewed
+ // as a TOPOLOGY_C face).
+ init_face_connectivity_corners.push_back(
+ corner_table_->Next(corner_index));
+ const CornerIndex opp_id =
+ corner_table_->Opposite(corner_table_->Next(corner_index));
+ const FaceIndex opp_face_id = corner_table_->Face(opp_id);
+ if (opp_face_id != kInvalidFaceIndex &&
+ !visited_faces_[opp_face_id.value()]) {
+ if (!EncodeConnectivityFromCorner(opp_id)) {
+ return Status(Status::DRACO_ERROR,
+ "Failed to encode mesh component.");
+ }
+ }
+ } else {
+ // Boundary configuration. We start on a boundary rather than on a face.
+ // First encode the hole that's opposite to the start_corner.
+ EncodeHole(corner_table_->Next(start_corner), true);
+ // Start processing the face opposite to the boundary edge (the face
+ // containing the start_corner).
+ if (!EncodeConnectivityFromCorner(start_corner)) {
+ return Status(Status::DRACO_ERROR, "Failed to encode mesh component.");
+ }
+ }
+ }
+ // Reverse the order of connectivity corners to match the order in which
+ // they are going to be decoded.
+ std::reverse(processed_connectivity_corners_.begin(),
+ processed_connectivity_corners_.end());
+ // Append the init face connectivity corners (which are processed in order by
+ // the decoder after the regular corners.
+ processed_connectivity_corners_.insert(processed_connectivity_corners_.end(),
+ init_face_connectivity_corners.begin(),
+ init_face_connectivity_corners.end());
+ // Encode connectivity for all non-position attributes.
+ if (attribute_data_.size() > 0) {
+ // Use the same order of corner that will be used by the decoder.
+ visited_faces_.assign(mesh_->num_faces(), false);
+ for (CornerIndex ci : processed_connectivity_corners_) {
+ EncodeAttributeConnectivitiesOnFace(ci);
+ }
+ }
+ traversal_encoder_.Done();
+
+ // Encode the number of symbols.
+ const uint32_t num_encoded_symbols =
+ static_cast<uint32_t>(traversal_encoder_.NumEncodedSymbols());
+ EncodeVarint(num_encoded_symbols, encoder_->buffer());
+
+ // Encode the number of split symbols.
+ EncodeVarint(num_split_symbols_, encoder_->buffer());
+
+ // Append the traversal buffer.
+ if (!EncodeSplitData()) {
+ return Status(Status::DRACO_ERROR, "Failed to encode split data.");
+ }
+ encoder_->buffer()->Encode(traversal_encoder_.buffer().data(),
+ traversal_encoder_.buffer().size());
+
+ return OkStatus();
+}
+
+template <class TraversalEncoder>
+bool MeshEdgebreakerEncoderImpl<TraversalEncoder>::EncodeSplitData() {
+ uint32_t num_events =
+ static_cast<uint32_t>(topology_split_event_data_.size());
+ EncodeVarint(num_events, encoder_->buffer());
+ if (num_events > 0) {
+ // Encode split symbols using delta and varint coding. Split edges are
+ // encoded using direct bit coding.
+ int last_source_symbol_id = 0; // Used for delta coding.
+ for (uint32_t i = 0; i < num_events; ++i) {
+ const TopologySplitEventData &event_data = topology_split_event_data_[i];
+ // Encode source symbol id as delta from the previous source symbol id.
+ // Source symbol ids are always stored in increasing order so the delta is
+ // going to be positive.
+ EncodeVarint<uint32_t>(
+ event_data.source_symbol_id - last_source_symbol_id,
+ encoder_->buffer());
+ // Encode split symbol id as delta from the current source symbol id.
+ // Split symbol id is always smaller than source symbol id so the below
+ // delta is going to be positive.
+ EncodeVarint<uint32_t>(
+ event_data.source_symbol_id - event_data.split_symbol_id,
+ encoder_->buffer());
+ last_source_symbol_id = event_data.source_symbol_id;
+ }
+ encoder_->buffer()->StartBitEncoding(num_events, false);
+ for (uint32_t i = 0; i < num_events; ++i) {
+ const TopologySplitEventData &event_data = topology_split_event_data_[i];
+ encoder_->buffer()->EncodeLeastSignificantBits32(1,
+ event_data.source_edge);
+ }
+ encoder_->buffer()->EndBitEncoding();
+ }
+ return true;
+}
+
+template <class TraversalEncoder>
+bool MeshEdgebreakerEncoderImpl<TraversalEncoder>::FindInitFaceConfiguration(
+ FaceIndex face_id, CornerIndex *out_corner) const {
+ CornerIndex corner_index = CornerIndex(3 * face_id.value());
+ for (int i = 0; i < 3; ++i) {
+ if (corner_table_->Opposite(corner_index) == kInvalidCornerIndex) {
+ // If there is a boundary edge, the configuration is exterior and return
+ // the CornerIndex opposite to the first boundary edge.
+ *out_corner = corner_index;
+ return false;
+ }
+ if (vertex_hole_id_[corner_table_->Vertex(corner_index).value()] != -1) {
+ // Boundary vertex found. Find the first boundary edge attached to the
+ // point and return the corner opposite to it.
+ CornerIndex right_corner = corner_index;
+ while (right_corner != kInvalidCornerIndex) {
+ corner_index = right_corner;
+ right_corner = corner_table_->SwingRight(right_corner);
+ }
+ // |corner_index| now lies on a boundary edge and its previous corner is
+ // guaranteed to be the opposite corner of the boundary edge.
+ *out_corner = corner_table_->Previous(corner_index);
+ return false;
+ }
+ corner_index = corner_table_->Next(corner_index);
+ }
+ // Else we have an interior configuration. Return the first corner id.
+ *out_corner = corner_index;
+ return true;
+}
+
+template <class TraversalEncoder>
+bool MeshEdgebreakerEncoderImpl<TraversalEncoder>::EncodeConnectivityFromCorner(
+ CornerIndex corner_id) {
+ corner_traversal_stack_.clear();
+ corner_traversal_stack_.push_back(corner_id);
+ const int num_faces = mesh_->num_faces();
+ while (!corner_traversal_stack_.empty()) {
+ // Currently processed corner.
+ corner_id = corner_traversal_stack_.back();
+ // Make sure the face hasn't been visited yet.
+ if (corner_id == kInvalidCornerIndex ||
+ visited_faces_[corner_table_->Face(corner_id).value()]) {
+ // This face has been already traversed.
+ corner_traversal_stack_.pop_back();
+ continue;
+ }
+ int num_visited_faces = 0;
+ while (num_visited_faces < num_faces) {
+ // Mark the current face as visited.
+ ++num_visited_faces;
+ ++last_encoded_symbol_id_;
+
+ const FaceIndex face_id = corner_table_->Face(corner_id);
+ visited_faces_[face_id.value()] = true;
+ processed_connectivity_corners_.push_back(corner_id);
+ traversal_encoder_.NewCornerReached(corner_id);
+ const VertexIndex vert_id = corner_table_->Vertex(corner_id);
+ const bool on_boundary = (vertex_hole_id_[vert_id.value()] != -1);
+ if (!IsVertexVisited(vert_id)) {
+ // A new unvisited vertex has been reached. We need to store its
+ // position difference using next, prev, and opposite vertices.
+ visited_vertex_ids_[vert_id.value()] = true;
+ if (!on_boundary) {
+ // If the vertex is on boundary it must correspond to an unvisited
+ // hole and it will be encoded with TOPOLOGY_S symbol later).
+ traversal_encoder_.EncodeSymbol(TOPOLOGY_C);
+ // Move to the right triangle.
+ corner_id = GetRightCorner(corner_id);
+ continue;
+ }
+ }
+ // The current vertex has been already visited or it was on a boundary.
+ // We need to determine whether we can visit any of it's neighboring
+ // faces.
+ const CornerIndex right_corner_id = GetRightCorner(corner_id);
+ const CornerIndex left_corner_id = GetLeftCorner(corner_id);
+ const FaceIndex right_face_id = corner_table_->Face(right_corner_id);
+ const FaceIndex left_face_id = corner_table_->Face(left_corner_id);
+ if (IsRightFaceVisited(corner_id)) {
+ // Right face has been already visited.
+ // Check whether there is a topology split event.
+ if (right_face_id != kInvalidFaceIndex) {
+ CheckAndStoreTopologySplitEvent(last_encoded_symbol_id_,
+ face_id.value(), RIGHT_FACE_EDGE,
+ right_face_id.value());
+ }
+ if (IsLeftFaceVisited(corner_id)) {
+ // Both neighboring faces are visited. End reached.
+ // Check whether there is a topology split event on the left face.
+ if (left_face_id != kInvalidFaceIndex) {
+ CheckAndStoreTopologySplitEvent(last_encoded_symbol_id_,
+ face_id.value(), LEFT_FACE_EDGE,
+ left_face_id.value());
+ }
+ traversal_encoder_.EncodeSymbol(TOPOLOGY_E);
+ corner_traversal_stack_.pop_back();
+ break; // Break from the while (num_visited_faces < num_faces) loop.
+ } else {
+ traversal_encoder_.EncodeSymbol(TOPOLOGY_R);
+ // Go to the left face.
+ corner_id = left_corner_id;
+ }
+ } else {
+ // Right face was not visited.
+ if (IsLeftFaceVisited(corner_id)) {
+ // Check whether there is a topology split event on the left face.
+ if (left_face_id != kInvalidFaceIndex) {
+ CheckAndStoreTopologySplitEvent(last_encoded_symbol_id_,
+ face_id.value(), LEFT_FACE_EDGE,
+ left_face_id.value());
+ }
+ traversal_encoder_.EncodeSymbol(TOPOLOGY_L);
+ // Left face visited, go to the right one.
+ corner_id = right_corner_id;
+ } else {
+ traversal_encoder_.EncodeSymbol(TOPOLOGY_S);
+ ++num_split_symbols_;
+ // Both neighboring faces are unvisited, we need to visit both of
+ // them.
+ if (on_boundary) {
+ // The tip vertex is on a hole boundary. If the hole hasn't been
+ // visited yet we need to encode it.
+ const int hole_id = vertex_hole_id_[vert_id.value()];
+ if (!visited_holes_[hole_id]) {
+ EncodeHole(corner_id, false);
+ }
+ }
+ face_to_split_symbol_map_[face_id.value()] = last_encoded_symbol_id_;
+ // Split the traversal.
+ // First make the top of the current corner stack point to the left
+ // face (this one will be processed second).
+ corner_traversal_stack_.back() = left_corner_id;
+ // Add a new corner to the top of the stack (right face needs to
+ // be traversed first).
+ corner_traversal_stack_.push_back(right_corner_id);
+ // Break from the while (num_visited_faces < num_faces) loop.
+ break;
+ }
+ }
+ }
+ }
+ return true; // All corners have been processed.
+}
+
+template <class TraversalEncoder>
+int MeshEdgebreakerEncoderImpl<TraversalEncoder>::EncodeHole(
+ CornerIndex start_corner_id, bool encode_first_vertex) {
+ // We know that the start corner lies on a hole but we first need to find the
+ // boundary edge going from that vertex. It is the first edge in CW
+ // direction.
+ CornerIndex corner_id = start_corner_id;
+ corner_id = corner_table_->Previous(corner_id);
+ while (corner_table_->Opposite(corner_id) != kInvalidCornerIndex) {
+ corner_id = corner_table_->Opposite(corner_id);
+ corner_id = corner_table_->Next(corner_id);
+ }
+ const VertexIndex start_vertex_id = corner_table_->Vertex(start_corner_id);
+
+ int num_encoded_hole_verts = 0;
+ if (encode_first_vertex) {
+ visited_vertex_ids_[start_vertex_id.value()] = true;
+ ++num_encoded_hole_verts;
+ }
+
+ // corner_id is now opposite to the boundary edge.
+ // Mark the hole as visited.
+ visited_holes_[vertex_hole_id_[start_vertex_id.value()]] = true;
+ // Get the start vertex of the edge and use it as a reference.
+ VertexIndex start_vert_id =
+ corner_table_->Vertex(corner_table_->Next(corner_id));
+ // Get the end vertex of the edge.
+ VertexIndex act_vertex_id =
+ corner_table_->Vertex(corner_table_->Previous(corner_id));
+ while (act_vertex_id != start_vertex_id) {
+ // Encode the end vertex of the boundary edge.
+
+ start_vert_id = act_vertex_id;
+
+ // Mark the vertex as visited.
+ visited_vertex_ids_[act_vertex_id.value()] = true;
+ ++num_encoded_hole_verts;
+ corner_id = corner_table_->Next(corner_id);
+ // Look for the next attached open boundary edge.
+ while (corner_table_->Opposite(corner_id) != kInvalidCornerIndex) {
+ corner_id = corner_table_->Opposite(corner_id);
+ corner_id = corner_table_->Next(corner_id);
+ }
+ act_vertex_id = corner_table_->Vertex(corner_table_->Previous(corner_id));
+ }
+ return num_encoded_hole_verts;
+}
+
+template <class TraversalEncoder>
+CornerIndex MeshEdgebreakerEncoderImpl<TraversalEncoder>::GetRightCorner(
+ CornerIndex corner_id) const {
+ const CornerIndex next_corner_id = corner_table_->Next(corner_id);
+ return corner_table_->Opposite(next_corner_id);
+}
+
+template <class TraversalEncoder>
+CornerIndex MeshEdgebreakerEncoderImpl<TraversalEncoder>::GetLeftCorner(
+ CornerIndex corner_id) const {
+ const CornerIndex prev_corner_id = corner_table_->Previous(corner_id);
+ return corner_table_->Opposite(prev_corner_id);
+}
+
+template <class TraversalEncoder>
+bool MeshEdgebreakerEncoderImpl<TraversalEncoder>::IsRightFaceVisited(
+ CornerIndex corner_id) const {
+ const CornerIndex next_corner_id = corner_table_->Next(corner_id);
+ const CornerIndex opp_corner_id = corner_table_->Opposite(next_corner_id);
+ if (opp_corner_id != kInvalidCornerIndex) {
+ return visited_faces_[corner_table_->Face(opp_corner_id).value()];
+ }
+ // Else we are on a boundary.
+ return true;
+}
+
+template <class TraversalEncoder>
+bool MeshEdgebreakerEncoderImpl<TraversalEncoder>::IsLeftFaceVisited(
+ CornerIndex corner_id) const {
+ const CornerIndex prev_corner_id = corner_table_->Previous(corner_id);
+ const CornerIndex opp_corner_id = corner_table_->Opposite(prev_corner_id);
+ if (opp_corner_id != kInvalidCornerIndex) {
+ return visited_faces_[corner_table_->Face(opp_corner_id).value()];
+ }
+ // Else we are on a boundary.
+ return true;
+}
+
+template <class TraversalEncoder>
+bool MeshEdgebreakerEncoderImpl<TraversalEncoder>::FindHoles() {
+ // TODO(ostava): Add more error checking for invalid geometry data.
+ const int num_corners = corner_table_->num_corners();
+ // Go over all corners and detect non-visited open boundaries
+ for (CornerIndex i(0); i < num_corners; ++i) {
+ if (corner_table_->IsDegenerated(corner_table_->Face(i))) {
+ continue; // Don't process corners assigned to degenerated faces.
+ }
+ if (corner_table_->Opposite(i) == kInvalidCornerIndex) {
+ // No opposite corner means no opposite face, so the opposite edge
+ // of the corner is an open boundary.
+ // Check whether we have already traversed the boundary.
+ VertexIndex boundary_vert_id =
+ corner_table_->Vertex(corner_table_->Next(i));
+ if (vertex_hole_id_[boundary_vert_id.value()] != -1) {
+ // The start vertex of the boundary edge is already assigned to an
+ // open boundary. No need to traverse it again.
+ continue;
+ }
+ // Else we found a new open boundary and we are going to traverse along it
+ // and mark all visited vertices.
+ const int boundary_id = static_cast<int>(visited_holes_.size());
+ visited_holes_.push_back(false);
+
+ CornerIndex corner_id = i;
+ while (vertex_hole_id_[boundary_vert_id.value()] == -1) {
+ // Mark the first vertex on the open boundary.
+ vertex_hole_id_[boundary_vert_id.value()] = boundary_id;
+ corner_id = corner_table_->Next(corner_id);
+ // Look for the next attached open boundary edge.
+ while (corner_table_->Opposite(corner_id) != kInvalidCornerIndex) {
+ corner_id = corner_table_->Opposite(corner_id);
+ corner_id = corner_table_->Next(corner_id);
+ }
+ // Id of the next vertex in the vertex on the hole.
+ boundary_vert_id =
+ corner_table_->Vertex(corner_table_->Next(corner_id));
+ }
+ }
+ }
+ return true;
+}
+
+template <class TraversalEncoder>
+int MeshEdgebreakerEncoderImpl<TraversalEncoder>::GetSplitSymbolIdOnFace(
+ int face_id) const {
+ auto it = face_to_split_symbol_map_.find(face_id);
+ if (it == face_to_split_symbol_map_.end()) {
+ return -1;
+ }
+ return it->second;
+}
+
+template <class TraversalEncoder>
+void MeshEdgebreakerEncoderImpl<
+ TraversalEncoder>::CheckAndStoreTopologySplitEvent(int src_symbol_id,
+ int /* src_face_id */,
+ EdgeFaceName src_edge,
+ int neighbor_face_id) {
+ const int symbol_id = GetSplitSymbolIdOnFace(neighbor_face_id);
+ if (symbol_id == -1) {
+ return; // Not a split symbol, no topology split event could happen.
+ }
+ TopologySplitEventData event_data;
+
+ event_data.split_symbol_id = symbol_id;
+ event_data.source_symbol_id = src_symbol_id;
+ event_data.source_edge = src_edge;
+ topology_split_event_data_.push_back(event_data);
+}
+
+template <class TraversalEncoder>
+bool MeshEdgebreakerEncoderImpl<TraversalEncoder>::InitAttributeData() {
+ if (use_single_connectivity_) {
+ return true; // All attributes use the same connectivity.
+ }
+
+ const int num_attributes = mesh_->num_attributes();
+ // Ignore the position attribute. It's decoded separately.
+ attribute_data_.resize(num_attributes - 1);
+ if (num_attributes == 1) {
+ return true;
+ }
+ int data_index = 0;
+ for (int i = 0; i < num_attributes; ++i) {
+ const int32_t att_index = i;
+ if (mesh_->attribute(att_index)->attribute_type() ==
+ GeometryAttribute::POSITION) {
+ continue;
+ }
+ const PointAttribute *const att = mesh_->attribute(att_index);
+ attribute_data_[data_index].attribute_index = att_index;
+ attribute_data_[data_index]
+ .encoding_data.encoded_attribute_value_index_to_corner_map.clear();
+ attribute_data_[data_index]
+ .encoding_data.encoded_attribute_value_index_to_corner_map.reserve(
+ corner_table_->num_corners());
+ attribute_data_[data_index].encoding_data.num_values = 0;
+ attribute_data_[data_index].connectivity_data.InitFromAttribute(
+ mesh_, corner_table_.get(), att);
+ ++data_index;
+ }
+ return true;
+}
+
+// TODO(ostava): Note that if the input mesh used the same attribute index on
+// multiple different vertices, such attribute will be duplicated using the
+// encoding below. Eventually, we may consider either using a different encoding
+// scheme for such cases, or at least deduplicating the attributes in the
+// decoder.
+template <class TraversalEncoder>
+bool MeshEdgebreakerEncoderImpl<
+ TraversalEncoder>::EncodeAttributeConnectivitiesOnFace(CornerIndex corner) {
+ // Three corners of the face.
+ const CornerIndex corners[3] = {corner, corner_table_->Next(corner),
+ corner_table_->Previous(corner)};
+
+ const FaceIndex src_face_id = corner_table_->Face(corner);
+ visited_faces_[src_face_id.value()] = true;
+ for (int c = 0; c < 3; ++c) {
+ const CornerIndex opp_corner = corner_table_->Opposite(corners[c]);
+ if (opp_corner == kInvalidCornerIndex) {
+ continue; // Don't encode attribute seams on boundary edges.
+ }
+ const FaceIndex opp_face_id = corner_table_->Face(opp_corner);
+ // Don't encode edges when the opposite face has been already processed.
+ if (visited_faces_[opp_face_id.value()]) {
+ continue;
+ }
+
+ for (uint32_t i = 0; i < attribute_data_.size(); ++i) {
+ if (attribute_data_[i].connectivity_data.IsCornerOppositeToSeamEdge(
+ corners[c])) {
+ traversal_encoder_.EncodeAttributeSeam(i, true);
+ } else {
+ traversal_encoder_.EncodeAttributeSeam(i, false);
+ }
+ }
+ }
+ return true;
+}
+
+template class MeshEdgebreakerEncoderImpl<MeshEdgebreakerTraversalEncoder>;
+template class MeshEdgebreakerEncoderImpl<
+ MeshEdgebreakerTraversalPredictiveEncoder>;
+template class MeshEdgebreakerEncoderImpl<
+ MeshEdgebreakerTraversalValenceEncoder>;
+
+} // namespace draco
diff --git a/libs/assimp/contrib/draco/src/draco/compression/mesh/mesh_edgebreaker_encoder_impl.h b/libs/assimp/contrib/draco/src/draco/compression/mesh/mesh_edgebreaker_encoder_impl.h
new file mode 100644
index 0000000..fb33771
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/mesh/mesh_edgebreaker_encoder_impl.h
@@ -0,0 +1,210 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_COMPRESSION_MESH_MESH_EDGEBREAKER_ENCODER_IMPL_H_
+#define DRACO_COMPRESSION_MESH_MESH_EDGEBREAKER_ENCODER_IMPL_H_
+
+#include <unordered_map>
+
+#include "draco/compression/attributes/mesh_attribute_indices_encoding_data.h"
+#include "draco/compression/config/compression_shared.h"
+#include "draco/compression/mesh/mesh_edgebreaker_encoder_impl_interface.h"
+#include "draco/compression/mesh/mesh_edgebreaker_shared.h"
+#include "draco/compression/mesh/traverser/mesh_traversal_sequencer.h"
+#include "draco/core/encoder_buffer.h"
+#include "draco/mesh/mesh_attribute_corner_table.h"
+
+namespace draco {
+
+// Class implementing the edgebreaker encoding as described in "3D Compression
+// Made Simple: Edgebreaker on a Corner-Table" by Rossignac at al.'01.
+// http://www.cc.gatech.edu/~jarek/papers/CornerTableSMI.pdf
+template <class TraversalEncoderT>
+class MeshEdgebreakerEncoderImpl : public MeshEdgebreakerEncoderImplInterface {
+ public:
+ MeshEdgebreakerEncoderImpl();
+ explicit MeshEdgebreakerEncoderImpl(
+ const TraversalEncoderT &traversal_encoder);
+ bool Init(MeshEdgebreakerEncoder *encoder) override;
+
+ const MeshAttributeCornerTable *GetAttributeCornerTable(
+ int att_id) const override;
+ const MeshAttributeIndicesEncodingData *GetAttributeEncodingData(
+ int att_id) const override;
+
+ bool GenerateAttributesEncoder(int32_t att_id) override;
+ bool EncodeAttributesEncoderIdentifier(int32_t att_encoder_id) override;
+ Status EncodeConnectivity() override;
+
+ const CornerTable *GetCornerTable() const override {
+ return corner_table_.get();
+ }
+ bool IsFaceEncoded(FaceIndex fi) const override {
+ return visited_faces_[fi.value()];
+ }
+ MeshEdgebreakerEncoder *GetEncoder() const override { return encoder_; }
+
+ private:
+ // Initializes data needed for encoding non-position attributes.
+ // Returns false on error.
+ bool InitAttributeData();
+
+ // Creates a vertex traversal sequencer for the specified |TraverserT| type.
+ template <class TraverserT>
+ std::unique_ptr<PointsSequencer> CreateVertexTraversalSequencer(
+ MeshAttributeIndicesEncodingData *encoding_data);
+
+ // Finds the configuration of the initial face that starts the traversal.
+ // Configurations are determined by location of holes around the init face
+ // and they are described in mesh_edgebreaker_shared.h.
+ // Returns true if the face configuration is interior and false if it is
+ // exterior.
+ bool FindInitFaceConfiguration(FaceIndex face_id,
+ CornerIndex *out_corner_id) const;
+
+ // Encodes the connectivity between vertices.
+ bool EncodeConnectivityFromCorner(CornerIndex corner_id);
+
+ // Encodes all vertices of a hole starting at start_corner_id.
+ // The vertex associated with the first corner is encoded only if
+ // |encode_first_vertex| is true.
+ // Returns the number of encoded hole vertices.
+ int EncodeHole(CornerIndex start_corner_id, bool encode_first_vertex);
+
+ // Encodes topology split data.
+ // Returns nullptr on error.
+ bool EncodeSplitData();
+
+ CornerIndex GetRightCorner(CornerIndex corner_id) const;
+ CornerIndex GetLeftCorner(CornerIndex corner_id) const;
+
+ bool IsRightFaceVisited(CornerIndex corner_id) const;
+ bool IsLeftFaceVisited(CornerIndex corner_id) const;
+ bool IsVertexVisited(VertexIndex vert_id) const {
+ return visited_vertex_ids_[vert_id.value()];
+ }
+
+ // Finds and stores data about all holes in the input mesh.
+ bool FindHoles();
+
+ // For faces encoded with symbol TOPOLOGY_S (split), this method returns
+ // the encoded symbol id or -1 if the face wasn't encoded by a split symbol.
+ int GetSplitSymbolIdOnFace(int face_id) const;
+
+ // Checks whether there is a topology split event on a neighboring face and
+ // stores the event data if necessary. For more info about topology split
+ // events, see description of TopologySplitEventData in
+ // mesh_edgebreaker_shared.h.
+ void CheckAndStoreTopologySplitEvent(int src_symbol_id, int src_face_id,
+ EdgeFaceName src_edge,
+ int neighbor_face_id);
+
+ // Encodes connectivity of all attributes on a newly traversed face.
+ bool EncodeAttributeConnectivitiesOnFace(CornerIndex corner);
+
+ // This function is used to to assign correct encoding order of attributes
+ // to unprocessed corners. The encoding order is equal to the order in which
+ // the attributes are going to be processed by the decoder and it is necessary
+ // for proper prediction of attribute values.
+ bool AssignPositionEncodingOrderToAllCorners();
+
+ // This function is used to generate encoding order for all non-position
+ // attributes.
+ // Returns false when one or more attributes failed to be processed.
+ bool GenerateEncodingOrderForAttributes();
+
+ // The main encoder that owns this class.
+ MeshEdgebreakerEncoder *encoder_;
+ // Mesh that's being encoded.
+ const Mesh *mesh_;
+ // Corner table stores the mesh face connectivity data.
+ std::unique_ptr<CornerTable> corner_table_;
+ // Stack used for storing corners that need to be traversed when encoding
+ // the connectivity. New corner is added for each initial face and a split
+ // symbol, and one corner is removed when the end symbol is reached.
+ // Stored as member variable to prevent frequent memory reallocations when
+ // handling meshes with lots of disjoint components. Originally, we used
+ // recursive functions to handle this behavior, but that can cause stack
+ // memory overflow when compressing huge meshes.
+ std::vector<CornerIndex> corner_traversal_stack_;
+ // Array for marking visited faces.
+ std::vector<bool> visited_faces_;
+
+ // Attribute data for position encoding.
+ MeshAttributeIndicesEncodingData pos_encoding_data_;
+
+ // Traversal method used for the position attribute.
+ MeshTraversalMethod pos_traversal_method_;
+
+ // Array storing corners in the order they were visited during the
+ // connectivity encoding (always storing the tip corner of each newly visited
+ // face).
+ std::vector<CornerIndex> processed_connectivity_corners_;
+
+ // Array for storing visited vertex ids of all input vertices.
+ std::vector<bool> visited_vertex_ids_;
+
+ // For each traversal, this array stores the number of visited vertices.
+ std::vector<int> vertex_traversal_length_;
+ // Array for storing all topology split events encountered during the mesh
+ // traversal.
+ std::vector<TopologySplitEventData> topology_split_event_data_;
+ // Map between face_id and symbol_id. Contains entries only for faces that
+ // were encoded with TOPOLOGY_S symbol.
+ std::unordered_map<int, int> face_to_split_symbol_map_;
+
+ // Array for marking holes that has been reached during the traversal.
+ std::vector<bool> visited_holes_;
+ // Array for mapping vertices to hole ids. If a vertex is not on a hole, the
+ // stored value is -1.
+ std::vector<int> vertex_hole_id_;
+
+ // Id of the last encoded symbol.
+ int last_encoded_symbol_id_;
+
+ // The number of encoded split symbols.
+ uint32_t num_split_symbols_;
+
+ // Struct holding data used for encoding each non-position attribute.
+ // TODO(ostava): This should be probably renamed to something better.
+ struct AttributeData {
+ AttributeData() : attribute_index(-1), is_connectivity_used(true) {}
+ int attribute_index;
+ MeshAttributeCornerTable connectivity_data;
+ // Flag that can mark the connectivity_data invalid. In such case the base
+ // corner table of the mesh should be used instead.
+ bool is_connectivity_used;
+ // Data about attribute encoding order.
+ MeshAttributeIndicesEncodingData encoding_data;
+ // Traversal method used to generate the encoding data for this attribute.
+ MeshTraversalMethod traversal_method;
+ };
+ std::vector<AttributeData> attribute_data_;
+
+ // Array storing mapping between attribute encoder id and attribute data id.
+ std::vector<int32_t> attribute_encoder_to_data_id_map_;
+
+ TraversalEncoderT traversal_encoder_;
+
+ // If set, the encoder is going to use the same connectivity for all
+ // attributes. This effectively breaks the mesh along all attribute seams.
+ // In general, this approach should be much faster compared to encoding each
+ // connectivity separately, but the decoded model may contain higher number of
+ // duplicate attribute values which may decrease the compression ratio.
+ bool use_single_connectivity_;
+};
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_MESH_MESH_EDGEBREAKER_ENCODER_IMPL_H_
diff --git a/libs/assimp/contrib/draco/src/draco/compression/mesh/mesh_edgebreaker_encoder_impl_interface.h b/libs/assimp/contrib/draco/src/draco/compression/mesh/mesh_edgebreaker_encoder_impl_interface.h
new file mode 100644
index 0000000..627d512
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/mesh/mesh_edgebreaker_encoder_impl_interface.h
@@ -0,0 +1,57 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_COMPRESSION_MESH_MESH_EDGEBREAKER_ENCODER_IMPL_INTERFACE_H_
+#define DRACO_COMPRESSION_MESH_MESH_EDGEBREAKER_ENCODER_IMPL_INTERFACE_H_
+
+#include "draco/compression/attributes/mesh_attribute_indices_encoding_data.h"
+#include "draco/mesh/corner_table.h"
+#include "draco/mesh/mesh_attribute_corner_table.h"
+
+namespace draco {
+
+// Forward declaration is necessary here to avoid circular dependencies.
+class MeshEdgebreakerEncoder;
+
+// Abstract interface used by MeshEdgebreakerEncoder to interact with the actual
+// implementation of the edgebreaker method. The implementations are in general
+// specializations of a template class MeshEdgebreakerEncoderImpl where the
+// template arguments control encoding of the connectivity data. Because the
+// choice of the implementation is done in run-time, we need to hide it behind
+// the abstract interface MeshEdgebreakerEncoderImplInterface.
+class MeshEdgebreakerEncoderImplInterface {
+ public:
+ virtual ~MeshEdgebreakerEncoderImplInterface() = default;
+ virtual bool Init(MeshEdgebreakerEncoder *encoder) = 0;
+
+ virtual const MeshAttributeCornerTable *GetAttributeCornerTable(
+ int att_id) const = 0;
+ virtual const MeshAttributeIndicesEncodingData *GetAttributeEncodingData(
+ int att_id) const = 0;
+ virtual bool GenerateAttributesEncoder(int32_t att_id) = 0;
+ virtual bool EncodeAttributesEncoderIdentifier(int32_t att_encoder_id) = 0;
+ virtual Status EncodeConnectivity() = 0;
+
+ // Returns corner table of the encoded mesh.
+ virtual const CornerTable *GetCornerTable() const = 0;
+
+ // Returns true if a given face has been already encoded.
+ virtual bool IsFaceEncoded(FaceIndex fi) const = 0;
+
+ virtual MeshEdgebreakerEncoder *GetEncoder() const = 0;
+};
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_MESH_MESH_EDGEBREAKER_ENCODER_IMPL_INTERFACE_H_
diff --git a/libs/assimp/contrib/draco/src/draco/compression/mesh/mesh_edgebreaker_encoding_test.cc b/libs/assimp/contrib/draco/src/draco/compression/mesh/mesh_edgebreaker_encoding_test.cc
new file mode 100644
index 0000000..8313882
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/mesh/mesh_edgebreaker_encoding_test.cc
@@ -0,0 +1,247 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include <sstream>
+
+#include "draco/compression/encode.h"
+#include "draco/compression/mesh/mesh_edgebreaker_decoder.h"
+#include "draco/compression/mesh/mesh_edgebreaker_encoder.h"
+#include "draco/core/draco_test_base.h"
+#include "draco/core/draco_test_utils.h"
+#include "draco/io/mesh_io.h"
+#include "draco/io/obj_decoder.h"
+#include "draco/mesh/mesh_are_equivalent.h"
+#include "draco/mesh/mesh_cleanup.h"
+#include "draco/mesh/triangle_soup_mesh_builder.h"
+
+namespace draco {
+
+class MeshEdgebreakerEncodingTest : public ::testing::Test {
+ protected:
+ void TestFile(const std::string &file_name) { TestFile(file_name, -1); }
+
+ void TestFile(const std::string &file_name, int compression_level) {
+ const std::unique_ptr<Mesh> mesh(ReadMeshFromTestFile(file_name));
+ ASSERT_NE(mesh, nullptr) << "Failed to load test model " << file_name;
+
+ TestMesh(mesh.get(), compression_level);
+ }
+
+ void TestMesh(Mesh *mesh, int compression_level) {
+ EncoderBuffer buffer;
+ MeshEdgebreakerEncoder encoder;
+ EncoderOptions encoder_options = EncoderOptions::CreateDefaultOptions();
+ encoder_options.SetSpeed(10 - compression_level, 10 - compression_level);
+ encoder.SetMesh(*mesh);
+ ASSERT_TRUE(encoder.Encode(encoder_options, &buffer).ok());
+
+ DecoderBuffer dec_buffer;
+ dec_buffer.Init(buffer.data(), buffer.size());
+ MeshEdgebreakerDecoder decoder;
+
+ std::unique_ptr<Mesh> decoded_mesh(new Mesh());
+ DecoderOptions dec_options;
+ ASSERT_TRUE(
+ decoder.Decode(dec_options, &dec_buffer, decoded_mesh.get()).ok());
+
+ // Cleanup the input mesh to make sure that input and output can be
+ // compared (edgebreaker method discards degenerated triangles and isolated
+ // vertices).
+ const MeshCleanupOptions options;
+ MeshCleanup cleanup;
+ ASSERT_TRUE(cleanup(mesh, options)) << "Failed to clean the input mesh.";
+
+ MeshAreEquivalent eq;
+ ASSERT_TRUE(eq(*mesh, *decoded_mesh.get()))
+ << "Decoded mesh is not the same as the input";
+ }
+};
+
+TEST_F(MeshEdgebreakerEncodingTest, TestNmOBJ) {
+ const std::string file_name = "test_nm.obj";
+ TestFile(file_name);
+}
+
+TEST_F(MeshEdgebreakerEncodingTest, ThreeFacesOBJ) {
+ const std::string file_name = "extra_vertex.obj";
+ TestFile(file_name);
+}
+
+TEST_F(MeshEdgebreakerEncodingTest, TestPly) {
+ // Tests whether the edgebreaker successfully encodes and decodes the test
+ // file (ply with color).
+ const std::string file_name = "test_pos_color.ply";
+ TestFile(file_name);
+}
+
+TEST_F(MeshEdgebreakerEncodingTest, TestMultiAttributes) {
+ // Tests encoding of model with many attributes.
+ const std::string file_name = "cube_att.obj";
+ TestFile(file_name, 10);
+}
+
+TEST_F(MeshEdgebreakerEncodingTest, TestEncoderReuse) {
+ // Tests whether the edgebreaker encoder can be reused multiple times to
+ // encode a given mesh.
+ const std::string file_name = "test_pos_color.ply";
+ const std::unique_ptr<Mesh> mesh(ReadMeshFromTestFile(file_name));
+ ASSERT_NE(mesh, nullptr) << "Failed to load test model " << file_name;
+
+ MeshEdgebreakerEncoder encoder;
+ EncoderOptions encoder_options = EncoderOptions::CreateDefaultOptions();
+ encoder.SetMesh(*mesh);
+ EncoderBuffer buffer_0, buffer_1;
+ ASSERT_TRUE(encoder.Encode(encoder_options, &buffer_0).ok());
+ ASSERT_TRUE(encoder.Encode(encoder_options, &buffer_1).ok());
+
+ // Make sure both buffer are identical.
+ ASSERT_EQ(buffer_0.size(), buffer_1.size());
+ for (int i = 0; i < buffer_0.size(); ++i) {
+ ASSERT_EQ(buffer_0.data()[i], buffer_1.data()[i]);
+ }
+}
+
+TEST_F(MeshEdgebreakerEncodingTest, TestDecoderReuse) {
+ // Tests whether the edgebreaker decoder can be reused multiple times to
+ // decode a given mesh.
+ const std::string file_name = "test_pos_color.ply";
+ const std::unique_ptr<Mesh> mesh(ReadMeshFromTestFile(file_name));
+ ASSERT_NE(mesh, nullptr) << "Failed to load test model " << file_name;
+
+ MeshEdgebreakerEncoder encoder;
+ EncoderOptions encoder_options = EncoderOptions::CreateDefaultOptions();
+ encoder.SetMesh(*mesh);
+ EncoderBuffer buffer;
+ ASSERT_TRUE(encoder.Encode(encoder_options, &buffer).ok());
+
+ DecoderBuffer dec_buffer;
+ dec_buffer.Init(buffer.data(), buffer.size());
+
+ MeshEdgebreakerDecoder decoder;
+
+ // Decode the mesh two times.
+ std::unique_ptr<Mesh> decoded_mesh_0(new Mesh());
+ DecoderOptions dec_options;
+ ASSERT_TRUE(
+ decoder.Decode(dec_options, &dec_buffer, decoded_mesh_0.get()).ok());
+
+ dec_buffer.Init(buffer.data(), buffer.size());
+ std::unique_ptr<Mesh> decoded_mesh_1(new Mesh());
+ ASSERT_TRUE(
+ decoder.Decode(dec_options, &dec_buffer, decoded_mesh_1.get()).ok());
+
+ // Make sure both of the meshes are identical.
+ MeshAreEquivalent eq;
+ ASSERT_TRUE(eq(*decoded_mesh_0.get(), *decoded_mesh_1.get()))
+ << "Decoded meshes are not the same";
+}
+
+TEST_F(MeshEdgebreakerEncodingTest, TestSingleConnectivityEncoding) {
+ // Tests whether the edgebreaker method successfully encodes a mesh with
+ // multiple attributes using single connectivity by breaking the mesh along
+ // attribute seams.
+ const std::string file_name = "cube_att.obj";
+ const std::unique_ptr<Mesh> mesh(ReadMeshFromTestFile(file_name));
+ ASSERT_NE(mesh, nullptr) << "Failed to load test model " << file_name;
+
+ for (int i = 0; i < 2; ++i) {
+ // Set the option to enable/disable single connectivity encoding.
+ EncoderOptionsBase<GeometryAttribute::Type> options =
+ EncoderOptionsBase<GeometryAttribute::Type>::CreateDefaultOptions();
+ options.SetGlobalBool("split_mesh_on_seams", i == 0 ? true : false);
+
+ EncoderBuffer buffer;
+ draco::Encoder encoder;
+ encoder.Reset(options);
+ encoder.SetSpeedOptions(0, 0);
+ encoder.SetAttributeQuantization(GeometryAttribute::POSITION, 8);
+ encoder.SetAttributeQuantization(GeometryAttribute::TEX_COORD, 8);
+ encoder.SetAttributeQuantization(GeometryAttribute::NORMAL, 8);
+ encoder.SetEncodingMethod(MESH_EDGEBREAKER_ENCODING);
+ ASSERT_TRUE(encoder.EncodeMeshToBuffer(*mesh, &buffer).ok());
+
+ DecoderBuffer dec_buffer;
+ dec_buffer.Init(buffer.data(), buffer.size());
+
+ Decoder decoder;
+ auto dec_mesh = decoder.DecodeMeshFromBuffer(&dec_buffer).value();
+ ASSERT_NE(dec_mesh, nullptr);
+ ASSERT_EQ(dec_mesh->num_points(), 24);
+ ASSERT_EQ(dec_mesh->num_attributes(), 3);
+ ASSERT_EQ(dec_mesh->attribute(0)->size(), i == 0 ? 24 : 8);
+ ASSERT_EQ(dec_mesh->attribute(1)->size(), 24);
+ ASSERT_EQ(dec_mesh->attribute(2)->size(), 24);
+ }
+}
+
+TEST_F(MeshEdgebreakerEncodingTest, TestWrongAttributeOrder) {
+ // Tests whether the edgebreaker method successfully encodes a mesh where the
+ // input attributes are in wrong order (because of their internal
+ // dependencies). In such case the attributes should be rearranged to the
+ // correct order.
+ TriangleSoupMeshBuilder mb;
+ mb.Start(1);
+ const int32_t norm_att_id =
+ mb.AddAttribute(GeometryAttribute::NORMAL, 3, DT_FLOAT32);
+ const int32_t pos_att_id =
+ mb.AddAttribute(GeometryAttribute::POSITION, 3, DT_FLOAT32);
+
+ mb.SetAttributeValuesForFace(
+ pos_att_id, FaceIndex(0), Vector3f(0.f, 0.f, 0.f).data(),
+ Vector3f(1.f, 0.f, 0.f).data(), Vector3f(0.f, 1.f, 0.f).data());
+
+ mb.SetAttributeValuesForFace(
+ norm_att_id, FaceIndex(0), Vector3f(0.f, 0.f, 1.f).data(),
+ Vector3f(0.f, 0.f, 0.f).data(), Vector3f(0.f, 0.f, 1.f).data());
+ std::unique_ptr<Mesh> mesh = mb.Finalize();
+ ASSERT_NE(mesh, nullptr);
+ ASSERT_EQ(mesh->num_attributes(), 2);
+ ASSERT_EQ(mesh->attribute(0)->attribute_type(), GeometryAttribute::NORMAL);
+ ASSERT_EQ(mesh->attribute(1)->attribute_type(), GeometryAttribute::POSITION);
+
+ EncoderBuffer buffer;
+ draco::Encoder encoder;
+ encoder.SetSpeedOptions(3, 3);
+ encoder.SetAttributeQuantization(GeometryAttribute::POSITION, 8);
+ encoder.SetAttributeQuantization(GeometryAttribute::NORMAL, 8);
+ encoder.SetEncodingMethod(MESH_EDGEBREAKER_ENCODING);
+ ASSERT_TRUE(encoder.EncodeMeshToBuffer(*mesh, &buffer).ok());
+
+ DecoderBuffer dec_buffer;
+ dec_buffer.Init(buffer.data(), buffer.size());
+
+ Decoder decoder;
+ auto dec_mesh = decoder.DecodeMeshFromBuffer(&dec_buffer).value();
+ ASSERT_NE(dec_mesh, nullptr);
+ ASSERT_EQ(dec_mesh->num_attributes(), 2);
+ ASSERT_EQ(dec_mesh->attribute(0)->attribute_type(),
+ GeometryAttribute::POSITION);
+ ASSERT_EQ(dec_mesh->attribute(1)->attribute_type(),
+ GeometryAttribute::NORMAL);
+}
+
+TEST_F(MeshEdgebreakerEncodingTest, TestDegenerateMesh) {
+ // Tests whether we can process a mesh that contains degenerate faces only.
+ const std::string file_name = "degenerate_mesh.obj";
+ const std::unique_ptr<Mesh> mesh(ReadMeshFromTestFile(file_name));
+ ASSERT_NE(mesh, nullptr) << "Failed to load test model " << file_name;
+ EncoderBuffer buffer;
+ MeshEdgebreakerEncoder encoder;
+ EncoderOptions encoder_options = EncoderOptions::CreateDefaultOptions();
+ encoder.SetMesh(*mesh);
+ // We expect the encoding to fail as edgebreaker can only process valid faces.
+ ASSERT_FALSE(encoder.Encode(encoder_options, &buffer).ok());
+}
+
+} // namespace draco
diff --git a/libs/assimp/contrib/draco/src/draco/compression/mesh/mesh_edgebreaker_shared.h b/libs/assimp/contrib/draco/src/draco/compression/mesh/mesh_edgebreaker_shared.h
new file mode 100644
index 0000000..cb3c29d
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/mesh/mesh_edgebreaker_shared.h
@@ -0,0 +1,131 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_COMPRESSION_MESH_MESH_EDGEBREAKER_SHARED_H_
+#define DRACO_COMPRESSION_MESH_MESH_EDGEBREAKER_SHARED_H_
+
+#include <stdint.h>
+
+namespace draco {
+
+// Shared declarations used by both edgebreaker encoder and decoder.
+
+// A variable length encoding for storing all possible topology configurations
+// during traversal of mesh's surface. The configurations are based on visited
+// state of neighboring triangles around a currently processed face corner.
+// Note that about half of the encountered configurations is expected to be of
+// type TOPOLOGY_C. It's guaranteed that the encoding will use at most 2 bits
+// per triangle for meshes with no holes and up to 6 bits per triangle for
+// general meshes. In addition, the encoding will take up to 4 bits per triangle
+// for each non-position attribute attached to the mesh.
+//
+// *-------* *-------* *-------*
+// / \ / \ / \ / \ / \ / \
+// / \ / \ / \ / \ / \ / \
+// / \ / \ / \ / \ / \ / \
+// *-------v-------* *-------v-------* *-------v-------*
+// \ /x\ / /x\ / \ /x\
+// \ / \ / / \ / \ / \
+// \ / C \ / / L \ / \ / R \
+// *-------* *-------* *-------*
+//
+// * *
+// / \ / \
+// / \ / \
+// / \ / \
+// *-------v-------* v
+// \ /x\ / /x\
+// \ / \ / / \
+// \ / S \ / / E \
+// *-------* *-------*
+//
+// TODO(ostava): Get rid of the topology bit pattern. It's important only for
+// encoding but the algorithms should use EdgebreakerSymbol instead.
+enum EdgebreakerTopologyBitPattern {
+ TOPOLOGY_C = 0x0, // 0
+ TOPOLOGY_S = 0x1, // 1 0 0
+ TOPOLOGY_L = 0x3, // 1 1 0
+ TOPOLOGY_R = 0x5, // 1 0 1
+ TOPOLOGY_E = 0x7, // 1 1 1
+ // A special symbol that's not actually encoded, but it can be used to mark
+ // the initial face that triggers the mesh encoding of a single connected
+ // component.
+ TOPOLOGY_INIT_FACE,
+ // A special value used to indicate an invalid symbol.
+ TOPOLOGY_INVALID
+};
+
+enum EdgebreakerSymbol {
+ EDGEBREAKER_SYMBOL_C = 0,
+ EDGEBREAKER_SYMBOL_S,
+ EDGEBREAKER_SYMBOL_L,
+ EDGEBREAKER_SYMBOL_R,
+ EDGEBREAKER_SYMBOL_E,
+ EDGEBREAKER_SYMBOL_INVALID
+};
+
+// Bit-length of symbols in the EdgebreakerTopologyBitPattern stored as a
+// lookup table for faster indexing.
+constexpr int32_t edge_breaker_topology_bit_pattern_length[] = {1, 3, 0, 3,
+ 0, 3, 0, 3};
+
+// Zero-indexed symbol id for each of topology pattern.
+constexpr EdgebreakerSymbol edge_breaker_topology_to_symbol_id[] = {
+ EDGEBREAKER_SYMBOL_C, EDGEBREAKER_SYMBOL_S,
+ EDGEBREAKER_SYMBOL_INVALID, EDGEBREAKER_SYMBOL_L,
+ EDGEBREAKER_SYMBOL_INVALID, EDGEBREAKER_SYMBOL_R,
+ EDGEBREAKER_SYMBOL_INVALID, EDGEBREAKER_SYMBOL_E};
+
+// Reverse mapping between symbol id and topology pattern symbol.
+constexpr EdgebreakerTopologyBitPattern edge_breaker_symbol_to_topology_id[] = {
+ TOPOLOGY_C, TOPOLOGY_S, TOPOLOGY_L, TOPOLOGY_R, TOPOLOGY_E};
+
+// Types of edges used during mesh traversal relative to the tip vertex of a
+// visited triangle.
+enum EdgeFaceName : uint8_t { LEFT_FACE_EDGE = 0, RIGHT_FACE_EDGE = 1 };
+
+// Struct used for storing data about a source face that connects to an
+// already traversed face that was either the initial face or a face encoded
+// with either topology S (split) symbol. Such connection can be only caused by
+// topology changes on the traversed surface (if its genus != 0, i.e. when the
+// surface has topological handles or holes).
+// For each occurrence of such event we always encode the split symbol id,
+// source symbol id and source edge id (left, or right). There will be always
+// exactly two occurrences of this event for every topological handle on the
+// traversed mesh and one occurrence for a hole.
+struct TopologySplitEventData {
+ uint32_t split_symbol_id;
+ uint32_t source_symbol_id;
+ // We need to use uint32_t instead of EdgeFaceName because the most recent
+ // version of gcc does not allow that when optimizations are turned on.
+ uint32_t source_edge : 1;
+};
+
+// Hole event is used to store info about the first symbol that reached a
+// vertex of so far unvisited hole. This can happen only on either the initial
+// face or during a regular traversal when TOPOLOGY_S is encountered.
+struct HoleEventData {
+ int32_t symbol_id;
+ HoleEventData() : symbol_id(0) {}
+ explicit HoleEventData(int32_t sym_id) : symbol_id(sym_id) {}
+};
+
+// List of supported modes for valence based edgebreaker coding.
+enum EdgebreakerValenceCodingMode {
+ EDGEBREAKER_VALENCE_MODE_2_7 = 0, // Use contexts for valences in range 2-7.
+};
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_MESH_MESH_EDGEBREAKER_SHARED_H_
diff --git a/libs/assimp/contrib/draco/src/draco/compression/mesh/mesh_edgebreaker_traversal_decoder.h b/libs/assimp/contrib/draco/src/draco/compression/mesh/mesh_edgebreaker_traversal_decoder.h
new file mode 100644
index 0000000..ce91adc
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/mesh/mesh_edgebreaker_traversal_decoder.h
@@ -0,0 +1,201 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_COMPRESSION_MESH_MESH_EDGEBREAKER_TRAVERSAL_DECODER_H_
+#define DRACO_COMPRESSION_MESH_MESH_EDGEBREAKER_TRAVERSAL_DECODER_H_
+
+#include "draco/compression/bit_coders/rans_bit_decoder.h"
+#include "draco/compression/mesh/mesh_edgebreaker_decoder.h"
+#include "draco/compression/mesh/mesh_edgebreaker_decoder_impl_interface.h"
+#include "draco/compression/mesh/mesh_edgebreaker_shared.h"
+#include "draco/draco_features.h"
+
+namespace draco {
+
+typedef RAnsBitDecoder BinaryDecoder;
+
+// Default implementation of the edgebreaker traversal decoder that reads the
+// traversal data directly from a buffer.
+class MeshEdgebreakerTraversalDecoder {
+ public:
+ MeshEdgebreakerTraversalDecoder()
+ : attribute_connectivity_decoders_(nullptr),
+ num_attribute_data_(0),
+ decoder_impl_(nullptr) {}
+ void Init(MeshEdgebreakerDecoderImplInterface *decoder) {
+ decoder_impl_ = decoder;
+ buffer_.Init(decoder->GetDecoder()->buffer()->data_head(),
+ decoder->GetDecoder()->buffer()->remaining_size(),
+ decoder->GetDecoder()->buffer()->bitstream_version());
+ }
+
+ // Returns the Draco bitstream version.
+ uint16_t BitstreamVersion() const {
+ return decoder_impl_->GetDecoder()->bitstream_version();
+ }
+
+ // Used to tell the decoder what is the number of expected decoded vertices.
+ // Ignored by default.
+ void SetNumEncodedVertices(int /* num_vertices */) {}
+
+ // Set the number of non-position attribute data for which we need to decode
+ // the connectivity.
+ void SetNumAttributeData(int num_data) { num_attribute_data_ = num_data; }
+
+ // Called before the traversal decoding is started.
+ // Returns a buffer decoder that points to data that was encoded after the
+ // traversal.
+ bool Start(DecoderBuffer *out_buffer) {
+ // Decode symbols from the main buffer decoder and face configurations from
+ // the start_face_buffer decoder.
+ if (!DecodeTraversalSymbols()) {
+ return false;
+ }
+
+ if (!DecodeStartFaces()) {
+ return false;
+ }
+
+ if (!DecodeAttributeSeams()) {
+ return false;
+ }
+ *out_buffer = buffer_;
+ return true;
+ }
+
+ // Returns the configuration of a new initial face.
+ inline bool DecodeStartFaceConfiguration() {
+ uint32_t face_configuration;
+#ifdef DRACO_BACKWARDS_COMPATIBILITY_SUPPORTED
+ if (buffer_.bitstream_version() < DRACO_BITSTREAM_VERSION(2, 2)) {
+ start_face_buffer_.DecodeLeastSignificantBits32(1, &face_configuration);
+
+ } else
+#endif
+ {
+ face_configuration = start_face_decoder_.DecodeNextBit();
+ }
+ return face_configuration;
+ }
+
+ // Returns the next edgebreaker symbol that was reached during the traversal.
+ inline uint32_t DecodeSymbol() {
+ uint32_t symbol;
+ symbol_buffer_.DecodeLeastSignificantBits32(1, &symbol);
+ if (symbol == TOPOLOGY_C) {
+ return symbol;
+ }
+ // Else decode two additional bits.
+ uint32_t symbol_suffix;
+ symbol_buffer_.DecodeLeastSignificantBits32(2, &symbol_suffix);
+ symbol |= (symbol_suffix << 1);
+ return symbol;
+ }
+
+ // Called whenever a new active corner is set in the decoder.
+ inline void NewActiveCornerReached(CornerIndex /* corner */) {}
+
+ // Called whenever |source| vertex is about to be merged into the |dest|
+ // vertex.
+ inline void MergeVertices(VertexIndex /* dest */, VertexIndex /* source */) {}
+
+ // Returns true if there is an attribute seam for the next processed pair
+ // of visited faces.
+ // |attribute| is used to mark the id of the non-position attribute (in range
+ // of <0, num_attributes - 1>).
+ inline bool DecodeAttributeSeam(int attribute) {
+ return attribute_connectivity_decoders_[attribute].DecodeNextBit();
+ }
+
+ // Called when the traversal is finished.
+ void Done() {
+ if (symbol_buffer_.bit_decoder_active()) {
+ symbol_buffer_.EndBitDecoding();
+ }
+#ifdef DRACO_BACKWARDS_COMPATIBILITY_SUPPORTED
+ if (buffer_.bitstream_version() < DRACO_BITSTREAM_VERSION(2, 2)) {
+ start_face_buffer_.EndBitDecoding();
+
+ } else
+#endif
+ {
+ start_face_decoder_.EndDecoding();
+ }
+ }
+
+ protected:
+ DecoderBuffer *buffer() { return &buffer_; }
+
+ bool DecodeTraversalSymbols() {
+ uint64_t traversal_size;
+ symbol_buffer_ = buffer_;
+ if (!symbol_buffer_.StartBitDecoding(true, &traversal_size)) {
+ return false;
+ }
+ buffer_ = symbol_buffer_;
+ if (traversal_size > static_cast<uint64_t>(buffer_.remaining_size())) {
+ return false;
+ }
+ buffer_.Advance(traversal_size);
+ return true;
+ }
+
+ bool DecodeStartFaces() {
+ // Create a decoder that is set to the end of the encoded traversal data.
+#ifdef DRACO_BACKWARDS_COMPATIBILITY_SUPPORTED
+ if (buffer_.bitstream_version() < DRACO_BITSTREAM_VERSION(2, 2)) {
+ start_face_buffer_ = buffer_;
+ uint64_t traversal_size;
+ if (!start_face_buffer_.StartBitDecoding(true, &traversal_size)) {
+ return false;
+ }
+ buffer_ = start_face_buffer_;
+ if (traversal_size > static_cast<uint64_t>(buffer_.remaining_size())) {
+ return false;
+ }
+ buffer_.Advance(traversal_size);
+ return true;
+ }
+#endif
+ return start_face_decoder_.StartDecoding(&buffer_);
+ }
+
+ bool DecodeAttributeSeams() {
+ // Prepare attribute decoding.
+ if (num_attribute_data_ > 0) {
+ attribute_connectivity_decoders_ = std::unique_ptr<BinaryDecoder[]>(
+ new BinaryDecoder[num_attribute_data_]);
+ for (int i = 0; i < num_attribute_data_; ++i) {
+ if (!attribute_connectivity_decoders_[i].StartDecoding(&buffer_)) {
+ return false;
+ }
+ }
+ }
+ return true;
+ }
+
+ private:
+ // Buffer that contains the encoded data.
+ DecoderBuffer buffer_;
+ DecoderBuffer symbol_buffer_;
+ BinaryDecoder start_face_decoder_;
+ DecoderBuffer start_face_buffer_;
+ std::unique_ptr<BinaryDecoder[]> attribute_connectivity_decoders_;
+ int num_attribute_data_;
+ const MeshEdgebreakerDecoderImplInterface *decoder_impl_;
+};
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_MESH_MESH_EDGEBREAKER_TRAVERSAL_DECODER_H_
diff --git a/libs/assimp/contrib/draco/src/draco/compression/mesh/mesh_edgebreaker_traversal_encoder.h b/libs/assimp/contrib/draco/src/draco/compression/mesh/mesh_edgebreaker_traversal_encoder.h
new file mode 100644
index 0000000..08cb66e
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/mesh/mesh_edgebreaker_traversal_encoder.h
@@ -0,0 +1,139 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_COMPRESSION_MESH_MESH_EDGEBREAKER_TRAVERSAL_ENCODER_H_
+#define DRACO_COMPRESSION_MESH_MESH_EDGEBREAKER_TRAVERSAL_ENCODER_H_
+
+#include "draco/compression/bit_coders/rans_bit_encoder.h"
+#include "draco/compression/mesh/mesh_edgebreaker_encoder.h"
+#include "draco/compression/mesh/mesh_edgebreaker_encoder_impl_interface.h"
+#include "draco/core/macros.h"
+
+namespace draco {
+
+typedef RAnsBitEncoder BinaryEncoder;
+
+// Default implementation of the edgebreaker traversal encoder. Face
+// configurations are stored directly into the output buffer and the symbols
+// are first collected and then encoded in the reverse order to make the
+// decoding faster.
+class MeshEdgebreakerTraversalEncoder {
+ public:
+ MeshEdgebreakerTraversalEncoder()
+ : encoder_impl_(nullptr),
+ attribute_connectivity_encoders_(nullptr),
+ num_attribute_data_(0) {}
+ bool Init(MeshEdgebreakerEncoderImplInterface *encoder) {
+ encoder_impl_ = encoder;
+ return true;
+ }
+
+ // Set the number of non-position attribute data for which we need to encode
+ // the connectivity.
+ void SetNumAttributeData(int num_data) { num_attribute_data_ = num_data; }
+
+ // Called before the traversal encoding is started.
+ void Start() {
+ start_face_encoder_.StartEncoding();
+ if (num_attribute_data_ > 0) {
+ // Init and start arithmetic encoders for storing configuration types
+ // of non-position attributes.
+ attribute_connectivity_encoders_ = std::unique_ptr<BinaryEncoder[]>(
+ new BinaryEncoder[num_attribute_data_]);
+ for (int i = 0; i < num_attribute_data_; ++i) {
+ attribute_connectivity_encoders_[i].StartEncoding();
+ }
+ }
+ }
+
+ // Called when a traversal starts from a new initial face.
+ inline void EncodeStartFaceConfiguration(bool interior) {
+ start_face_encoder_.EncodeBit(interior);
+ }
+
+ // Called when a new corner is reached during the traversal. No-op for the
+ // default encoder.
+ inline void NewCornerReached(CornerIndex /* corner */) {}
+
+ // Called whenever a new symbol is reached during the edgebreaker traversal.
+ inline void EncodeSymbol(EdgebreakerTopologyBitPattern symbol) {
+ // Store the symbol. It will be encoded after all symbols are processed.
+ symbols_.push_back(symbol);
+ }
+
+ // Called for every pair of connected and visited faces. |is_seam| specifies
+ // whether there is an attribute seam between the two faces.
+
+ inline void EncodeAttributeSeam(int attribute, bool is_seam) {
+ attribute_connectivity_encoders_[attribute].EncodeBit(is_seam ? 1 : 0);
+ }
+
+ // Called when the traversal is finished.
+ void Done() {
+ EncodeTraversalSymbols();
+ EncodeStartFaces();
+ EncodeAttributeSeams();
+ }
+
+ // Returns the number of encoded symbols.
+ int NumEncodedSymbols() const { return static_cast<int>(symbols_.size()); }
+
+ const EncoderBuffer &buffer() const { return traversal_buffer_; }
+
+ protected:
+ void EncodeTraversalSymbols() {
+ // Bit encode the collected symbols.
+ // Allocate enough storage for the bit encoder.
+ // It's guaranteed that each face will need only up to 3 bits.
+ traversal_buffer_.StartBitEncoding(
+ encoder_impl_->GetEncoder()->mesh()->num_faces() * 3, true);
+ for (int i = static_cast<int>(symbols_.size() - 1); i >= 0; --i) {
+ traversal_buffer_.EncodeLeastSignificantBits32(
+ edge_breaker_topology_bit_pattern_length[symbols_[i]], symbols_[i]);
+ }
+ traversal_buffer_.EndBitEncoding();
+ }
+
+ void EncodeStartFaces() {
+ start_face_encoder_.EndEncoding(&traversal_buffer_);
+ }
+
+ void EncodeAttributeSeams() {
+ if (attribute_connectivity_encoders_ != nullptr) {
+ for (int i = 0; i < num_attribute_data_; ++i) {
+ attribute_connectivity_encoders_[i].EndEncoding(&traversal_buffer_);
+ }
+ }
+ }
+
+ EncoderBuffer *GetOutputBuffer() { return &traversal_buffer_; }
+ const MeshEdgebreakerEncoderImplInterface *encoder_impl() const {
+ return encoder_impl_;
+ }
+
+ private:
+ BinaryEncoder start_face_encoder_;
+ EncoderBuffer traversal_buffer_;
+ const MeshEdgebreakerEncoderImplInterface *encoder_impl_;
+ // Symbols collected during the traversal.
+ std::vector<EdgebreakerTopologyBitPattern> symbols_;
+ // Arithmetic encoder for encoding attribute seams.
+ // One context for each non-position attribute.
+ std::unique_ptr<BinaryEncoder[]> attribute_connectivity_encoders_;
+ int num_attribute_data_;
+};
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_MESH_MESH_EDGEBREAKER_TRAVERSAL_ENCODER_H_
diff --git a/libs/assimp/contrib/draco/src/draco/compression/mesh/mesh_edgebreaker_traversal_predictive_decoder.h b/libs/assimp/contrib/draco/src/draco/compression/mesh/mesh_edgebreaker_traversal_predictive_decoder.h
new file mode 100644
index 0000000..3f90045
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/mesh/mesh_edgebreaker_traversal_predictive_decoder.h
@@ -0,0 +1,134 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifdef DRACO_BACKWARDS_COMPATIBILITY_SUPPORTED
+#ifndef DRACO_COMPRESSION_MESH_MESH_EDGEBREAKER_TRAVERSAL_PREDICTIVE_DECODER_H_
+#define DRACO_COMPRESSION_MESH_MESH_EDGEBREAKER_TRAVERSAL_PREDICTIVE_DECODER_H_
+
+#include "draco/compression/mesh/mesh_edgebreaker_traversal_decoder.h"
+#include "draco/draco_features.h"
+
+namespace draco {
+
+// Decoder for traversal encoded with the
+// MeshEdgebreakerTraversalPredictiveEncoder. The decoder maintains valences
+// of the decoded portion of the traversed mesh and it uses them to predict
+// symbols that are about to be decoded.
+class MeshEdgebreakerTraversalPredictiveDecoder
+ : public MeshEdgebreakerTraversalDecoder {
+ public:
+ MeshEdgebreakerTraversalPredictiveDecoder()
+ : corner_table_(nullptr),
+ num_vertices_(0),
+ last_symbol_(-1),
+ predicted_symbol_(-1) {}
+ void Init(MeshEdgebreakerDecoderImplInterface *decoder) {
+ MeshEdgebreakerTraversalDecoder::Init(decoder);
+ corner_table_ = decoder->GetCornerTable();
+ }
+ void SetNumEncodedVertices(int num_vertices) { num_vertices_ = num_vertices; }
+
+ bool Start(DecoderBuffer *out_buffer) {
+ if (!MeshEdgebreakerTraversalDecoder::Start(out_buffer)) {
+ return false;
+ }
+ int32_t num_split_symbols;
+ if (!out_buffer->Decode(&num_split_symbols) || num_split_symbols < 0)
+ return false;
+ if (num_split_symbols >= num_vertices_) {
+ return false;
+ }
+ // Set the valences of all initial vertices to 0.
+ vertex_valences_.resize(num_vertices_, 0);
+ if (!prediction_decoder_.StartDecoding(out_buffer)) {
+ return false;
+ }
+ return true;
+ }
+
+ inline uint32_t DecodeSymbol() {
+ // First check if we have a predicted symbol.
+ if (predicted_symbol_ != -1) {
+ // Double check that the predicted symbol was predicted correctly.
+ if (prediction_decoder_.DecodeNextBit()) {
+ last_symbol_ = predicted_symbol_;
+ return predicted_symbol_;
+ }
+ }
+ // We don't have a predicted symbol or the symbol was mis-predicted.
+ // Decode it directly.
+ last_symbol_ = MeshEdgebreakerTraversalDecoder::DecodeSymbol();
+ return last_symbol_;
+ }
+
+ inline void NewActiveCornerReached(CornerIndex corner) {
+ const CornerIndex next = corner_table_->Next(corner);
+ const CornerIndex prev = corner_table_->Previous(corner);
+ // Update valences.
+ switch (last_symbol_) {
+ case TOPOLOGY_C:
+ case TOPOLOGY_S:
+ vertex_valences_[corner_table_->Vertex(next).value()] += 1;
+ vertex_valences_[corner_table_->Vertex(prev).value()] += 1;
+ break;
+ case TOPOLOGY_R:
+ vertex_valences_[corner_table_->Vertex(corner).value()] += 1;
+ vertex_valences_[corner_table_->Vertex(next).value()] += 1;
+ vertex_valences_[corner_table_->Vertex(prev).value()] += 2;
+ break;
+ case TOPOLOGY_L:
+ vertex_valences_[corner_table_->Vertex(corner).value()] += 1;
+ vertex_valences_[corner_table_->Vertex(next).value()] += 2;
+ vertex_valences_[corner_table_->Vertex(prev).value()] += 1;
+ break;
+ case TOPOLOGY_E:
+ vertex_valences_[corner_table_->Vertex(corner).value()] += 2;
+ vertex_valences_[corner_table_->Vertex(next).value()] += 2;
+ vertex_valences_[corner_table_->Vertex(prev).value()] += 2;
+ break;
+ default:
+ break;
+ }
+ // Compute the new predicted symbol.
+ if (last_symbol_ == TOPOLOGY_C || last_symbol_ == TOPOLOGY_R) {
+ const VertexIndex pivot =
+ corner_table_->Vertex(corner_table_->Next(corner));
+ if (vertex_valences_[pivot.value()] < 6) {
+ predicted_symbol_ = TOPOLOGY_R;
+ } else {
+ predicted_symbol_ = TOPOLOGY_C;
+ }
+ } else {
+ predicted_symbol_ = -1;
+ }
+ }
+
+ inline void MergeVertices(VertexIndex dest, VertexIndex source) {
+ // Update valences on the merged vertices.
+ vertex_valences_[dest.value()] += vertex_valences_[source.value()];
+ }
+
+ private:
+ const CornerTable *corner_table_;
+ int num_vertices_;
+ std::vector<int> vertex_valences_;
+ BinaryDecoder prediction_decoder_;
+ int last_symbol_;
+ int predicted_symbol_;
+};
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_MESH_MESH_EDGEBREAKER_TRAVERSAL_PREDICTIVE_DECODER_H_
+#endif
diff --git a/libs/assimp/contrib/draco/src/draco/compression/mesh/mesh_edgebreaker_traversal_predictive_encoder.h b/libs/assimp/contrib/draco/src/draco/compression/mesh/mesh_edgebreaker_traversal_predictive_encoder.h
new file mode 100644
index 0000000..eb937fe
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/mesh/mesh_edgebreaker_traversal_predictive_encoder.h
@@ -0,0 +1,172 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_COMPRESSION_MESH_MESH_EDGEBREAKER_TRAVERSAL_PREDICTIVE_ENCODER_H_
+#define DRACO_COMPRESSION_MESH_MESH_EDGEBREAKER_TRAVERSAL_PREDICTIVE_ENCODER_H_
+
+#include "draco/compression/mesh/mesh_edgebreaker_traversal_encoder.h"
+
+namespace draco {
+
+// Encoder that tries to predict the edgebreaker traversal symbols based on the
+// vertex valences of the unencoded portion of the mesh. The current prediction
+// scheme assumes that each vertex has valence 6 which can be used to predict
+// the symbol preceding the one that is currently encoded. Predictions are
+// encoded using an arithmetic coding which can lead to less than 1 bit per
+// triangle encoding for highly regular meshes.
+class MeshEdgebreakerTraversalPredictiveEncoder
+ : public MeshEdgebreakerTraversalEncoder {
+ public:
+ MeshEdgebreakerTraversalPredictiveEncoder()
+ : corner_table_(nullptr),
+ prev_symbol_(-1),
+ num_split_symbols_(0),
+ last_corner_(kInvalidCornerIndex),
+ num_symbols_(0) {}
+
+ bool Init(MeshEdgebreakerEncoderImplInterface *encoder) {
+ if (!MeshEdgebreakerTraversalEncoder::Init(encoder)) {
+ return false;
+ }
+ corner_table_ = encoder->GetCornerTable();
+ // Initialize valences of all vertices.
+ vertex_valences_.resize(corner_table_->num_vertices());
+ for (uint32_t i = 0; i < vertex_valences_.size(); ++i) {
+ vertex_valences_[i] = corner_table_->Valence(VertexIndex(i));
+ }
+ return true;
+ }
+
+ inline void NewCornerReached(CornerIndex corner) { last_corner_ = corner; }
+
+ inline int32_t ComputePredictedSymbol(VertexIndex pivot) {
+ const int valence = vertex_valences_[pivot.value()];
+ if (valence < 0) {
+ // This situation can happen only for split vertices. Returning
+ // TOPOLOGY_INVALID always cases misprediction.
+ return TOPOLOGY_INVALID;
+ }
+ if (valence < 6) {
+ return TOPOLOGY_R;
+ }
+ return TOPOLOGY_C;
+ }
+
+ inline void EncodeSymbol(EdgebreakerTopologyBitPattern symbol) {
+ ++num_symbols_;
+ // Update valences on the mesh. And compute the predicted preceding symbol.
+ // Note that the valences are computed for the so far unencoded part of the
+ // mesh. Adding a new symbol either reduces valences on the vertices or
+ // leaves the valence unchanged.
+ int32_t predicted_symbol = -1;
+ const CornerIndex next = corner_table_->Next(last_corner_);
+ const CornerIndex prev = corner_table_->Previous(last_corner_);
+ switch (symbol) {
+ case TOPOLOGY_C:
+ // Compute prediction.
+ predicted_symbol = ComputePredictedSymbol(corner_table_->Vertex(next));
+ FALLTHROUGH_INTENDED;
+ case TOPOLOGY_S:
+ // Update valences.
+ vertex_valences_[corner_table_->Vertex(next).value()] -= 1;
+ vertex_valences_[corner_table_->Vertex(prev).value()] -= 1;
+ if (symbol == TOPOLOGY_S) {
+ // Whenever we reach a split symbol, mark its tip vertex as invalid by
+ // setting the valence to a negative value. Any prediction that will
+ // use this vertex will then cause a misprediction. This is currently
+ // necessary because the decoding works in the reverse direction and
+ // the decoder doesn't know about these vertices until the split
+ // symbol is decoded at which point two vertices are merged into one.
+ // This can be most likely solved on the encoder side by splitting the
+ // tip vertex into two, but since split symbols are relatively rare,
+ // it's probably not worth doing it.
+ vertex_valences_[corner_table_->Vertex(last_corner_).value()] = -1;
+ ++num_split_symbols_;
+ }
+ break;
+ case TOPOLOGY_R:
+ // Compute prediction.
+ predicted_symbol = ComputePredictedSymbol(corner_table_->Vertex(next));
+ // Update valences.
+ vertex_valences_[corner_table_->Vertex(last_corner_).value()] -= 1;
+ vertex_valences_[corner_table_->Vertex(next).value()] -= 1;
+ vertex_valences_[corner_table_->Vertex(prev).value()] -= 2;
+ break;
+ case TOPOLOGY_L:
+ vertex_valences_[corner_table_->Vertex(last_corner_).value()] -= 1;
+ vertex_valences_[corner_table_->Vertex(next).value()] -= 2;
+ vertex_valences_[corner_table_->Vertex(prev).value()] -= 1;
+ break;
+ case TOPOLOGY_E:
+ vertex_valences_[corner_table_->Vertex(last_corner_).value()] -= 2;
+ vertex_valences_[corner_table_->Vertex(next).value()] -= 2;
+ vertex_valences_[corner_table_->Vertex(prev).value()] -= 2;
+ break;
+ default:
+ break;
+ }
+ // Flag used when it's necessary to explicitly store the previous symbol.
+ bool store_prev_symbol = true;
+ if (predicted_symbol != -1) {
+ if (predicted_symbol == prev_symbol_) {
+ predictions_.push_back(true);
+ store_prev_symbol = false;
+ } else if (prev_symbol_ != -1) {
+ predictions_.push_back(false);
+ }
+ }
+ if (store_prev_symbol && prev_symbol_ != -1) {
+ MeshEdgebreakerTraversalEncoder::EncodeSymbol(
+ static_cast<EdgebreakerTopologyBitPattern>(prev_symbol_));
+ }
+ prev_symbol_ = symbol;
+ }
+
+ void Done() {
+ // We still need to store the last encoded symbol.
+ if (prev_symbol_ != -1) {
+ MeshEdgebreakerTraversalEncoder::EncodeSymbol(
+ static_cast<EdgebreakerTopologyBitPattern>(prev_symbol_));
+ }
+ // Store the init face configurations and the explicitly encoded symbols.
+ MeshEdgebreakerTraversalEncoder::Done();
+ // Encode the number of split symbols.
+ GetOutputBuffer()->Encode(num_split_symbols_);
+ // Store the predictions.
+ BinaryEncoder prediction_encoder;
+ prediction_encoder.StartEncoding();
+ for (int i = static_cast<int>(predictions_.size()) - 1; i >= 0; --i) {
+ prediction_encoder.EncodeBit(predictions_[i]);
+ }
+ prediction_encoder.EndEncoding(GetOutputBuffer());
+ }
+
+ int NumEncodedSymbols() const { return num_symbols_; }
+
+ private:
+ const CornerTable *corner_table_;
+ std::vector<int> vertex_valences_;
+ std::vector<bool> predictions_;
+ // Previously encoded symbol.
+ int32_t prev_symbol_;
+ // The total number of encoded split symbols.
+ int32_t num_split_symbols_;
+ CornerIndex last_corner_;
+ // Explicitly count the number of encoded symbols.
+ int num_symbols_;
+};
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_MESH_MESH_EDGEBREAKER_TRAVERSAL_PREDICTIVE_ENCODER_H_
diff --git a/libs/assimp/contrib/draco/src/draco/compression/mesh/mesh_edgebreaker_traversal_valence_decoder.h b/libs/assimp/contrib/draco/src/draco/compression/mesh/mesh_edgebreaker_traversal_valence_decoder.h
new file mode 100644
index 0000000..c003737
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/mesh/mesh_edgebreaker_traversal_valence_decoder.h
@@ -0,0 +1,215 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_COMPRESSION_MESH_MESH_EDGEBREAKER_TRAVERSAL_VALENCE_DECODER_H_
+#define DRACO_COMPRESSION_MESH_MESH_EDGEBREAKER_TRAVERSAL_VALENCE_DECODER_H_
+
+#include "draco/compression/entropy/symbol_decoding.h"
+#include "draco/compression/mesh/mesh_edgebreaker_traversal_decoder.h"
+#include "draco/core/varint_decoding.h"
+#include "draco/draco_features.h"
+
+namespace draco {
+
+// Decoder for traversal encoded with MeshEdgebreakerTraversalValenceEncoder.
+// The decoder maintains valences of the decoded portion of the traversed mesh
+// and it uses them to select entropy context used for decoding of the actual
+// symbols.
+class MeshEdgebreakerTraversalValenceDecoder
+ : public MeshEdgebreakerTraversalDecoder {
+ public:
+ MeshEdgebreakerTraversalValenceDecoder()
+ : corner_table_(nullptr),
+ num_vertices_(0),
+ last_symbol_(-1),
+ active_context_(-1),
+ min_valence_(2),
+ max_valence_(7) {}
+ void Init(MeshEdgebreakerDecoderImplInterface *decoder) {
+ MeshEdgebreakerTraversalDecoder::Init(decoder);
+ corner_table_ = decoder->GetCornerTable();
+ }
+ void SetNumEncodedVertices(int num_vertices) { num_vertices_ = num_vertices; }
+
+ bool Start(DecoderBuffer *out_buffer) {
+#ifdef DRACO_BACKWARDS_COMPATIBILITY_SUPPORTED
+ if (BitstreamVersion() < DRACO_BITSTREAM_VERSION(2, 2)) {
+ if (!MeshEdgebreakerTraversalDecoder::DecodeTraversalSymbols()) {
+ return false;
+ }
+ }
+#endif
+ if (!MeshEdgebreakerTraversalDecoder::DecodeStartFaces()) {
+ return false;
+ }
+ if (!MeshEdgebreakerTraversalDecoder::DecodeAttributeSeams()) {
+ return false;
+ }
+ *out_buffer = *buffer();
+
+#ifdef DRACO_BACKWARDS_COMPATIBILITY_SUPPORTED
+ if (BitstreamVersion() < DRACO_BITSTREAM_VERSION(2, 2)) {
+ uint32_t num_split_symbols;
+ if (BitstreamVersion() < DRACO_BITSTREAM_VERSION(2, 0)) {
+ if (!out_buffer->Decode(&num_split_symbols)) {
+ return false;
+ }
+ } else {
+ if (!DecodeVarint(&num_split_symbols, out_buffer)) {
+ return false;
+ }
+ }
+ if (num_split_symbols >= static_cast<uint32_t>(num_vertices_)) {
+ return false;
+ }
+
+ int8_t mode;
+ if (!out_buffer->Decode(&mode)) {
+ return false;
+ }
+ if (mode == EDGEBREAKER_VALENCE_MODE_2_7) {
+ min_valence_ = 2;
+ max_valence_ = 7;
+ } else {
+ // Unsupported mode.
+ return false;
+ }
+
+ } else
+#endif
+ {
+ min_valence_ = 2;
+ max_valence_ = 7;
+ }
+
+ if (num_vertices_ < 0) {
+ return false;
+ }
+ // Set the valences of all initial vertices to 0.
+ vertex_valences_.resize(num_vertices_, 0);
+
+ const int num_unique_valences = max_valence_ - min_valence_ + 1;
+
+ // Decode all symbols for all contexts.
+ context_symbols_.resize(num_unique_valences);
+ context_counters_.resize(context_symbols_.size());
+ for (int i = 0; i < context_symbols_.size(); ++i) {
+ uint32_t num_symbols;
+ if (!DecodeVarint<uint32_t>(&num_symbols, out_buffer)) {
+ return false;
+ }
+ if (num_symbols > static_cast<uint32_t>(corner_table_->num_faces())) {
+ return false;
+ }
+ if (num_symbols > 0) {
+ context_symbols_[i].resize(num_symbols);
+ DecodeSymbols(num_symbols, 1, out_buffer, context_symbols_[i].data());
+ // All symbols are going to be processed from the back.
+ context_counters_[i] = num_symbols;
+ }
+ }
+ return true;
+ }
+
+ inline uint32_t DecodeSymbol() {
+ // First check if we have a valid context.
+ if (active_context_ != -1) {
+ const int context_counter = --context_counters_[active_context_];
+ if (context_counter < 0) {
+ return TOPOLOGY_INVALID;
+ }
+ const int symbol_id = context_symbols_[active_context_][context_counter];
+ last_symbol_ = edge_breaker_symbol_to_topology_id[symbol_id];
+ } else {
+#ifdef DRACO_BACKWARDS_COMPATIBILITY_SUPPORTED
+ if (BitstreamVersion() < DRACO_BITSTREAM_VERSION(2, 2)) {
+ // We don't have a predicted symbol or the symbol was mis-predicted.
+ // Decode it directly.
+ last_symbol_ = MeshEdgebreakerTraversalDecoder::DecodeSymbol();
+
+ } else
+#endif
+ {
+ // The first symbol must be E.
+ last_symbol_ = TOPOLOGY_E;
+ }
+ }
+ return last_symbol_;
+ }
+
+ inline void NewActiveCornerReached(CornerIndex corner) {
+ const CornerIndex next = corner_table_->Next(corner);
+ const CornerIndex prev = corner_table_->Previous(corner);
+ // Update valences.
+ switch (last_symbol_) {
+ case TOPOLOGY_C:
+ case TOPOLOGY_S:
+ vertex_valences_[corner_table_->Vertex(next)] += 1;
+ vertex_valences_[corner_table_->Vertex(prev)] += 1;
+ break;
+ case TOPOLOGY_R:
+ vertex_valences_[corner_table_->Vertex(corner)] += 1;
+ vertex_valences_[corner_table_->Vertex(next)] += 1;
+ vertex_valences_[corner_table_->Vertex(prev)] += 2;
+ break;
+ case TOPOLOGY_L:
+ vertex_valences_[corner_table_->Vertex(corner)] += 1;
+ vertex_valences_[corner_table_->Vertex(next)] += 2;
+ vertex_valences_[corner_table_->Vertex(prev)] += 1;
+ break;
+ case TOPOLOGY_E:
+ vertex_valences_[corner_table_->Vertex(corner)] += 2;
+ vertex_valences_[corner_table_->Vertex(next)] += 2;
+ vertex_valences_[corner_table_->Vertex(prev)] += 2;
+ break;
+ default:
+ break;
+ }
+ // Compute the new context that is going to be used to decode the next
+ // symbol.
+ const int active_valence = vertex_valences_[corner_table_->Vertex(next)];
+ int clamped_valence;
+ if (active_valence < min_valence_) {
+ clamped_valence = min_valence_;
+ } else if (active_valence > max_valence_) {
+ clamped_valence = max_valence_;
+ } else {
+ clamped_valence = active_valence;
+ }
+
+ active_context_ = (clamped_valence - min_valence_);
+ }
+
+ inline void MergeVertices(VertexIndex dest, VertexIndex source) {
+ // Update valences on the merged vertices.
+ vertex_valences_[dest] += vertex_valences_[source];
+ }
+
+ private:
+ const CornerTable *corner_table_;
+ int num_vertices_;
+ IndexTypeVector<VertexIndex, int> vertex_valences_;
+ int last_symbol_;
+ int active_context_;
+
+ int min_valence_;
+ int max_valence_;
+ std::vector<std::vector<uint32_t>> context_symbols_;
+ // Points to the active symbol in each context.
+ std::vector<int> context_counters_;
+};
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_MESH_MESH_EDGEBREAKER_TRAVERSAL_VALENCE_DECODER_H_
diff --git a/libs/assimp/contrib/draco/src/draco/compression/mesh/mesh_edgebreaker_traversal_valence_encoder.h b/libs/assimp/contrib/draco/src/draco/compression/mesh/mesh_edgebreaker_traversal_valence_encoder.h
new file mode 100644
index 0000000..c492c84
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/mesh/mesh_edgebreaker_traversal_valence_encoder.h
@@ -0,0 +1,226 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_COMPRESSION_MESH_MESH_EDGEBREAKER_TRAVERSAL_VALENCE_ENCODER_H_
+#define DRACO_COMPRESSION_MESH_MESH_EDGEBREAKER_TRAVERSAL_VALENCE_ENCODER_H_
+
+#include "draco/compression/entropy/symbol_encoding.h"
+#include "draco/compression/mesh/mesh_edgebreaker_traversal_encoder.h"
+#include "draco/core/varint_encoding.h"
+
+namespace draco {
+
+// Predictive encoder for the Edgebreaker symbols based on valences of the
+// previously encoded vertices, following the method described in: Szymczak'02,
+// "Optimized Edgebreaker Encoding for Large and Regular Triangle Meshes". Each
+// valence is used to specify a different entropy context for encoding of the
+// symbols.
+// Encoder can operate in various predefined modes that can be used to select
+// the way in which the entropy contexts are computed (e.g. using different
+// clamping for valences, or even using different inputs to compute the
+// contexts), see EdgebreakerValenceCodingMode in mesh_edgebreaker_shared.h for
+// a list of supported modes.
+class MeshEdgebreakerTraversalValenceEncoder
+ : public MeshEdgebreakerTraversalEncoder {
+ public:
+ MeshEdgebreakerTraversalValenceEncoder()
+ : corner_table_(nullptr),
+ prev_symbol_(-1),
+ last_corner_(kInvalidCornerIndex),
+ num_symbols_(0),
+ min_valence_(2),
+ max_valence_(7) {}
+
+ bool Init(MeshEdgebreakerEncoderImplInterface *encoder) {
+ if (!MeshEdgebreakerTraversalEncoder::Init(encoder)) {
+ return false;
+ }
+ min_valence_ = 2;
+ max_valence_ = 7;
+ corner_table_ = encoder->GetCornerTable();
+
+ // Initialize valences of all vertices.
+ vertex_valences_.resize(corner_table_->num_vertices());
+ for (VertexIndex i(0); i < static_cast<uint32_t>(vertex_valences_.size());
+ ++i) {
+ vertex_valences_[i] = corner_table_->Valence(VertexIndex(i));
+ }
+
+ // Replicate the corner to vertex map from the corner table. We need to do
+ // this because the map may get updated during encoding because we add new
+ // vertices when we encounter split symbols.
+ corner_to_vertex_map_.resize(corner_table_->num_corners());
+ for (CornerIndex i(0); i < corner_table_->num_corners(); ++i) {
+ corner_to_vertex_map_[i] = corner_table_->Vertex(i);
+ }
+ const int32_t num_unique_valences = max_valence_ - min_valence_ + 1;
+
+ context_symbols_.resize(num_unique_valences);
+ return true;
+ }
+
+ inline void NewCornerReached(CornerIndex corner) { last_corner_ = corner; }
+
+ inline void EncodeSymbol(EdgebreakerTopologyBitPattern symbol) {
+ ++num_symbols_;
+ // Update valences on the mesh and compute the context that is going to be
+ // used to encode the processed symbol.
+ // Note that the valences are computed for the so far unencoded part of the
+ // mesh (i.e. the decoding is reverse). Adding a new symbol either reduces
+ // valences on the vertices or leaves the valence unchanged.
+
+ const CornerIndex next = corner_table_->Next(last_corner_);
+ const CornerIndex prev = corner_table_->Previous(last_corner_);
+
+ // Get valence on the tip corner of the active edge (outgoing edge that is
+ // going to be used in reverse decoding of the connectivity to predict the
+ // next symbol).
+ const int active_valence = vertex_valences_[corner_to_vertex_map_[next]];
+ switch (symbol) {
+ case TOPOLOGY_C:
+ // Compute prediction.
+ FALLTHROUGH_INTENDED;
+ case TOPOLOGY_S:
+ // Update valences.
+ vertex_valences_[corner_to_vertex_map_[next]] -= 1;
+ vertex_valences_[corner_to_vertex_map_[prev]] -= 1;
+ if (symbol == TOPOLOGY_S) {
+ // Whenever we reach a split symbol, we need to split the vertex into
+ // two and attach all corners on the left and right sides of the split
+ // vertex to the respective vertices (see image below). This is
+ // necessary since the decoder works in the reverse order and it
+ // merges the two vertices only after the split symbol is processed.
+ //
+ // * -----
+ // / \--------
+ // / \--------
+ // / \-------
+ // *-------v-------*
+ // \ /c\ /
+ // \ / \ /
+ // \ /n S p\ /
+ // *.......*
+ //
+
+ // Count the number of faces on the left side of the split vertex and
+ // update the valence on the "left vertex".
+ int num_left_faces = 0;
+ CornerIndex act_c = corner_table_->Opposite(prev);
+ while (act_c != kInvalidCornerIndex) {
+ if (encoder_impl()->IsFaceEncoded(corner_table_->Face(act_c))) {
+ break; // Stop when we reach the first visited face.
+ }
+ ++num_left_faces;
+ act_c = corner_table_->Opposite(corner_table_->Next(act_c));
+ }
+ vertex_valences_[corner_to_vertex_map_[last_corner_]] =
+ num_left_faces + 1;
+
+ // Create a new vertex for the right side and count the number of
+ // faces that should be attached to this vertex.
+ const int new_vert_id = static_cast<int>(vertex_valences_.size());
+ int num_right_faces = 0;
+
+ act_c = corner_table_->Opposite(next);
+ while (act_c != kInvalidCornerIndex) {
+ if (encoder_impl()->IsFaceEncoded(corner_table_->Face(act_c))) {
+ break; // Stop when we reach the first visited face.
+ }
+ ++num_right_faces;
+ // Map corners on the right side to the newly created vertex.
+ corner_to_vertex_map_[corner_table_->Next(act_c)] = new_vert_id;
+ act_c = corner_table_->Opposite(corner_table_->Previous(act_c));
+ }
+ vertex_valences_.push_back(num_right_faces + 1);
+ }
+ break;
+ case TOPOLOGY_R:
+ // Update valences.
+ vertex_valences_[corner_to_vertex_map_[last_corner_]] -= 1;
+ vertex_valences_[corner_to_vertex_map_[next]] -= 1;
+ vertex_valences_[corner_to_vertex_map_[prev]] -= 2;
+ break;
+ case TOPOLOGY_L:
+
+ vertex_valences_[corner_to_vertex_map_[last_corner_]] -= 1;
+ vertex_valences_[corner_to_vertex_map_[next]] -= 2;
+ vertex_valences_[corner_to_vertex_map_[prev]] -= 1;
+ break;
+ case TOPOLOGY_E:
+ vertex_valences_[corner_to_vertex_map_[last_corner_]] -= 2;
+ vertex_valences_[corner_to_vertex_map_[next]] -= 2;
+ vertex_valences_[corner_to_vertex_map_[prev]] -= 2;
+ break;
+ default:
+ break;
+ }
+
+ if (prev_symbol_ != -1) {
+ int clamped_valence;
+ if (active_valence < min_valence_) {
+ clamped_valence = min_valence_;
+ } else if (active_valence > max_valence_) {
+ clamped_valence = max_valence_;
+ } else {
+ clamped_valence = active_valence;
+ }
+
+ const int context = clamped_valence - min_valence_;
+ context_symbols_[context].push_back(
+ edge_breaker_topology_to_symbol_id[prev_symbol_]);
+ }
+
+ prev_symbol_ = symbol;
+ }
+
+ void Done() {
+ // Store the init face configurations and attribute seam data
+ MeshEdgebreakerTraversalEncoder::EncodeStartFaces();
+ MeshEdgebreakerTraversalEncoder::EncodeAttributeSeams();
+
+ // Store the contexts.
+ for (int i = 0; i < context_symbols_.size(); ++i) {
+ EncodeVarint<uint32_t>(static_cast<uint32_t>(context_symbols_[i].size()),
+ GetOutputBuffer());
+ if (context_symbols_[i].size() > 0) {
+ EncodeSymbols(context_symbols_[i].data(),
+ static_cast<int>(context_symbols_[i].size()), 1, nullptr,
+ GetOutputBuffer());
+ }
+ }
+ }
+
+ int NumEncodedSymbols() const { return num_symbols_; }
+
+ private:
+ const CornerTable *corner_table_;
+ // Explicit map between corners and vertices. We cannot use the one stored
+ // in the |corner_table_| because we may need to add additional vertices to
+ // handle split symbols.
+ IndexTypeVector<CornerIndex, VertexIndex> corner_to_vertex_map_;
+ IndexTypeVector<VertexIndex, int> vertex_valences_;
+ // Previously encoded symbol.
+ int32_t prev_symbol_;
+ CornerIndex last_corner_;
+ // Explicitly count the number of encoded symbols.
+ int num_symbols_;
+
+ int min_valence_;
+ int max_valence_;
+ std::vector<std::vector<uint32_t>> context_symbols_;
+};
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_MESH_MESH_EDGEBREAKER_TRAVERSAL_VALENCE_ENCODER_H_
diff --git a/libs/assimp/contrib/draco/src/draco/compression/mesh/mesh_encoder.cc b/libs/assimp/contrib/draco/src/draco/compression/mesh/mesh_encoder.cc
new file mode 100644
index 0000000..483ea02
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/mesh/mesh_encoder.cc
@@ -0,0 +1,34 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/compression/mesh/mesh_encoder.h"
+
+namespace draco {
+
+MeshEncoder::MeshEncoder() : mesh_(nullptr), num_encoded_faces_(0) {}
+
+void MeshEncoder::SetMesh(const Mesh &m) {
+ mesh_ = &m;
+ SetPointCloud(m);
+}
+
+Status MeshEncoder::EncodeGeometryData() {
+ DRACO_RETURN_IF_ERROR(EncodeConnectivity());
+ if (options()->GetGlobalBool("store_number_of_encoded_faces", false)) {
+ ComputeNumberOfEncodedFaces();
+ }
+ return OkStatus();
+}
+
+} // namespace draco
diff --git a/libs/assimp/contrib/draco/src/draco/compression/mesh/mesh_encoder.h b/libs/assimp/contrib/draco/src/draco/compression/mesh/mesh_encoder.h
new file mode 100644
index 0000000..30ec4fa
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/mesh/mesh_encoder.h
@@ -0,0 +1,84 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_COMPRESSION_MESH_MESH_ENCODER_H_
+#define DRACO_COMPRESSION_MESH_MESH_ENCODER_H_
+
+#include "draco/compression/attributes/mesh_attribute_indices_encoding_data.h"
+#include "draco/compression/point_cloud/point_cloud_encoder.h"
+#include "draco/mesh/mesh.h"
+#include "draco/mesh/mesh_attribute_corner_table.h"
+
+namespace draco {
+
+// Abstract base class for all mesh encoders. It provides some basic
+// functionality that's shared between different encoders.
+class MeshEncoder : public PointCloudEncoder {
+ public:
+ MeshEncoder();
+
+ // Sets the mesh that is going be encoded. Must be called before the Encode()
+ // method.
+ void SetMesh(const Mesh &m);
+
+ EncodedGeometryType GetGeometryType() const override {
+ return TRIANGULAR_MESH;
+ }
+
+ // Returns the number of faces that were encoded during the last Encode().
+ // function call. Valid only if "store_number_of_encoded_faces" flag was set
+ // in the provided EncoderOptions.
+ size_t num_encoded_faces() const { return num_encoded_faces_; }
+
+ // Returns the base connectivity of the encoded mesh (or nullptr if it is not
+ // initialized).
+ virtual const CornerTable *GetCornerTable() const { return nullptr; }
+
+ // Returns the attribute connectivity data or nullptr if it does not exist.
+ virtual const MeshAttributeCornerTable *GetAttributeCornerTable(
+ int /* att_id */) const {
+ return nullptr;
+ }
+
+ // Returns the encoding data for a given attribute or nullptr when the data
+ // does not exist.
+ virtual const MeshAttributeIndicesEncodingData *GetAttributeEncodingData(
+ int /* att_id */) const {
+ return nullptr;
+ }
+
+ const Mesh *mesh() const { return mesh_; }
+
+ protected:
+ Status EncodeGeometryData() override;
+
+ // Needs to be implemented by the derived classes.
+ virtual Status EncodeConnectivity() = 0;
+
+ // Computes and sets the num_encoded_faces_ for the encoder.
+ virtual void ComputeNumberOfEncodedFaces() = 0;
+
+ void set_mesh(const Mesh *mesh) { mesh_ = mesh; }
+ void set_num_encoded_faces(size_t num_faces) {
+ num_encoded_faces_ = num_faces;
+ }
+
+ private:
+ const Mesh *mesh_;
+ size_t num_encoded_faces_;
+};
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_MESH_MESH_ENCODER_H_
diff --git a/libs/assimp/contrib/draco/src/draco/compression/mesh/mesh_encoder_test.cc b/libs/assimp/contrib/draco/src/draco/compression/mesh/mesh_encoder_test.cc
new file mode 100644
index 0000000..55f6836
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/mesh/mesh_encoder_test.cc
@@ -0,0 +1,116 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/compression/mesh/mesh_encoder.h"
+
+#include "draco/compression/expert_encode.h"
+#include "draco/core/decoder_buffer.h"
+#include "draco/core/draco_test_base.h"
+#include "draco/core/draco_test_utils.h"
+#include "draco/io/obj_decoder.h"
+
+namespace draco {
+
+struct MeshEncoderTestParams {
+ MeshEncoderTestParams(const std::string &encoding_method, int cl)
+ : encoding_method(encoding_method), cl(cl) {}
+ std::string encoding_method;
+ int cl;
+};
+
+class MeshEncoderTest : public ::testing::TestWithParam<MeshEncoderTestParams> {
+ protected:
+ MeshEncoderTest() {}
+
+ // Fills out_method with id of the encoding method used for the test.
+ // Returns false if the encoding method is not set properly.
+ bool GetMethod(MeshEncoderMethod *out_method) const {
+ if (GetParam().encoding_method == "sequential") {
+ *out_method = MESH_SEQUENTIAL_ENCODING;
+ return true;
+ }
+ if (GetParam().encoding_method == "edgebreaker") {
+ *out_method = MESH_EDGEBREAKER_ENCODING;
+ return true;
+ }
+ return false;
+ }
+
+ void TestGolden(const std::string &file_name) {
+ // This test verifies that a given set of meshes are encoded to an expected
+ // output. This is useful for catching bugs in code changes that are not
+ // supposed to change the encoding.
+ // The test is expected to fail when the encoding is modified. In such case,
+ // the golden files need to be updated to reflect the changes.
+ MeshEncoderMethod method;
+ ASSERT_TRUE(GetMethod(&method))
+ << "Test is run for an unknown encoding method";
+
+ std::string golden_file_name = file_name;
+ golden_file_name += '.';
+ golden_file_name += GetParam().encoding_method;
+ golden_file_name += ".cl";
+ golden_file_name += std::to_string(GetParam().cl);
+ golden_file_name += ".";
+ golden_file_name += std::to_string(kDracoMeshBitstreamVersionMajor);
+ golden_file_name += ".";
+ golden_file_name += std::to_string(kDracoMeshBitstreamVersionMinor);
+ golden_file_name += ".drc";
+ const std::unique_ptr<Mesh> mesh(ReadMeshFromTestFile(file_name));
+ ASSERT_NE(mesh, nullptr) << "Failed to load test model " << file_name;
+
+ ExpertEncoder encoder(*mesh);
+ encoder.SetEncodingMethod(method);
+ encoder.SetSpeedOptions(10 - GetParam().cl, 10 - GetParam().cl);
+ encoder.SetAttributeQuantization(0, 20);
+ for (int i = 1; i < mesh->num_attributes(); ++i) {
+ encoder.SetAttributeQuantization(i, 12);
+ }
+ EncoderBuffer buffer;
+ ASSERT_TRUE(encoder.EncodeToBuffer(&buffer).ok())
+ << "Failed encoding test mesh " << file_name << " with method "
+ << GetParam().encoding_method;
+ // Check that the encoded mesh was really encoded with the selected method.
+ DecoderBuffer decoder_buffer;
+ decoder_buffer.Init(buffer.data(), buffer.size());
+ decoder_buffer.Advance(8); // Skip the header to the encoding method id.
+ uint8_t encoded_method;
+ ASSERT_TRUE(decoder_buffer.Decode(&encoded_method));
+ ASSERT_EQ(encoded_method, method);
+ if (!FLAGS_update_golden_files) {
+ EXPECT_TRUE(
+ CompareGoldenFile(golden_file_name, buffer.data(), buffer.size()))
+ << "Encoded data is different from the golden file. Please verify "
+ "that the encoding works as expected and update the golden file "
+ "if necessary (run the test with --update_golden_files flag).";
+ } else {
+ // Save the files into the local folder.
+ EXPECT_TRUE(
+ GenerateGoldenFile(golden_file_name, buffer.data(), buffer.size()))
+ << "Failed to generate new golden file for " << file_name;
+ }
+ }
+};
+
+TEST_P(MeshEncoderTest, EncodeGoldenMeshTestNm) { TestGolden("test_nm.obj"); }
+
+TEST_P(MeshEncoderTest, EncodeGoldenMeshCubeAtt) { TestGolden("cube_att.obj"); }
+
+INSTANTIATE_TEST_SUITE_P(
+ MeshEncoderTests, MeshEncoderTest,
+ ::testing::Values(MeshEncoderTestParams("sequential", 3),
+ MeshEncoderTestParams("edgebreaker", 4),
+ MeshEncoderTestParams("edgebreaker", 10)));
+
+} // namespace draco
diff --git a/libs/assimp/contrib/draco/src/draco/compression/mesh/mesh_sequential_decoder.cc b/libs/assimp/contrib/draco/src/draco/compression/mesh/mesh_sequential_decoder.cc
new file mode 100644
index 0000000..be349f5
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/mesh/mesh_sequential_decoder.cc
@@ -0,0 +1,169 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/compression/mesh/mesh_sequential_decoder.h"
+
+#include "draco/compression/attributes/linear_sequencer.h"
+#include "draco/compression/attributes/sequential_attribute_decoders_controller.h"
+#include "draco/compression/entropy/symbol_decoding.h"
+#include "draco/core/varint_decoding.h"
+
+namespace draco {
+
+MeshSequentialDecoder::MeshSequentialDecoder() {}
+
+bool MeshSequentialDecoder::DecodeConnectivity() {
+ uint32_t num_faces;
+ uint32_t num_points;
+#ifdef DRACO_BACKWARDS_COMPATIBILITY_SUPPORTED
+ if (bitstream_version() < DRACO_BITSTREAM_VERSION(2, 2)) {
+ if (!buffer()->Decode(&num_faces)) {
+ return false;
+ }
+ if (!buffer()->Decode(&num_points)) {
+ return false;
+ }
+
+ } else
+#endif
+ {
+ if (!DecodeVarint(&num_faces, buffer())) {
+ return false;
+ }
+ if (!DecodeVarint(&num_points, buffer())) {
+ return false;
+ }
+ }
+
+ // Check that num_faces and num_points are valid values.
+ const uint64_t faces_64 = static_cast<uint64_t>(num_faces);
+ const uint64_t points_64 = static_cast<uint64_t>(num_points);
+ // Compressed sequential encoding can only handle (2^32 - 1) / 3 indices.
+ if (faces_64 > 0xffffffff / 3) {
+ return false;
+ }
+ if (faces_64 > buffer()->remaining_size() / 3) {
+ // The number of faces is unreasonably high, because face indices do not
+ // fit in the remaining size of the buffer.
+ return false;
+ }
+ if (points_64 > faces_64 * 3) {
+ return false;
+ }
+ uint8_t connectivity_method;
+ if (!buffer()->Decode(&connectivity_method)) {
+ return false;
+ }
+ if (connectivity_method == 0) {
+ if (!DecodeAndDecompressIndices(num_faces)) {
+ return false;
+ }
+ } else {
+ if (num_points < 256) {
+ // Decode indices as uint8_t.
+ for (uint32_t i = 0; i < num_faces; ++i) {
+ Mesh::Face face;
+ for (int j = 0; j < 3; ++j) {
+ uint8_t val;
+ if (!buffer()->Decode(&val)) {
+ return false;
+ }
+ face[j] = val;
+ }
+ mesh()->AddFace(face);
+ }
+ } else if (num_points < (1 << 16)) {
+ // Decode indices as uint16_t.
+ for (uint32_t i = 0; i < num_faces; ++i) {
+ Mesh::Face face;
+ for (int j = 0; j < 3; ++j) {
+ uint16_t val;
+ if (!buffer()->Decode(&val)) {
+ return false;
+ }
+ face[j] = val;
+ }
+ mesh()->AddFace(face);
+ }
+ } else if (mesh()->num_points() < (1 << 21) &&
+ bitstream_version() >= DRACO_BITSTREAM_VERSION(2, 2)) {
+ // Decode indices as uint32_t.
+ for (uint32_t i = 0; i < num_faces; ++i) {
+ Mesh::Face face;
+ for (int j = 0; j < 3; ++j) {
+ uint32_t val;
+ if (!DecodeVarint(&val, buffer())) {
+ return false;
+ }
+ face[j] = val;
+ }
+ mesh()->AddFace(face);
+ }
+ } else {
+ // Decode faces as uint32_t (default).
+ for (uint32_t i = 0; i < num_faces; ++i) {
+ Mesh::Face face;
+ for (int j = 0; j < 3; ++j) {
+ uint32_t val;
+ if (!buffer()->Decode(&val)) {
+ return false;
+ }
+ face[j] = val;
+ }
+ mesh()->AddFace(face);
+ }
+ }
+ }
+ point_cloud()->set_num_points(num_points);
+ return true;
+}
+
+bool MeshSequentialDecoder::CreateAttributesDecoder(int32_t att_decoder_id) {
+ // Always create the basic attribute decoder.
+ return SetAttributesDecoder(
+ att_decoder_id,
+ std::unique_ptr<AttributesDecoder>(
+ new SequentialAttributeDecodersController(
+ std::unique_ptr<PointsSequencer>(
+ new LinearSequencer(point_cloud()->num_points())))));
+}
+
+bool MeshSequentialDecoder::DecodeAndDecompressIndices(uint32_t num_faces) {
+ // Get decoded indices differences that were encoded with an entropy code.
+ std::vector<uint32_t> indices_buffer(num_faces * 3);
+ if (!DecodeSymbols(num_faces * 3, 1, buffer(), indices_buffer.data())) {
+ return false;
+ }
+ // Reconstruct the indices from the differences.
+ // See MeshSequentialEncoder::CompressAndEncodeIndices() for more details.
+ int32_t last_index_value = 0;
+ int vertex_index = 0;
+ for (uint32_t i = 0; i < num_faces; ++i) {
+ Mesh::Face face;
+ for (int j = 0; j < 3; ++j) {
+ const uint32_t encoded_val = indices_buffer[vertex_index++];
+ int32_t index_diff = (encoded_val >> 1);
+ if (encoded_val & 1) {
+ index_diff = -index_diff;
+ }
+ const int32_t index_value = index_diff + last_index_value;
+ face[j] = index_value;
+ last_index_value = index_value;
+ }
+ mesh()->AddFace(face);
+ }
+ return true;
+}
+
+} // namespace draco
diff --git a/libs/assimp/contrib/draco/src/draco/compression/mesh/mesh_sequential_decoder.h b/libs/assimp/contrib/draco/src/draco/compression/mesh/mesh_sequential_decoder.h
new file mode 100644
index 0000000..3a86c75
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/mesh/mesh_sequential_decoder.h
@@ -0,0 +1,39 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_COMPRESSION_MESH_MESH_SEQUENTIAL_DECODER_H_
+#define DRACO_COMPRESSION_MESH_MESH_SEQUENTIAL_DECODER_H_
+
+#include "draco/compression/mesh/mesh_decoder.h"
+
+namespace draco {
+
+// Class for decoding data encoded by MeshSequentialEncoder.
+class MeshSequentialDecoder : public MeshDecoder {
+ public:
+ MeshSequentialDecoder();
+
+ protected:
+ bool DecodeConnectivity() override;
+ bool CreateAttributesDecoder(int32_t att_decoder_id) override;
+
+ private:
+ // Decodes face indices that were compressed with an entropy code.
+ // Returns false on error.
+ bool DecodeAndDecompressIndices(uint32_t num_faces);
+};
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_MESH_MESH_SEQUENTIAL_DECODER_H_
diff --git a/libs/assimp/contrib/draco/src/draco/compression/mesh/mesh_sequential_encoder.cc b/libs/assimp/contrib/draco/src/draco/compression/mesh/mesh_sequential_encoder.cc
new file mode 100644
index 0000000..02ac777
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/mesh/mesh_sequential_encoder.cc
@@ -0,0 +1,132 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/compression/mesh/mesh_sequential_encoder.h"
+
+#include <cstdlib>
+
+#include "draco/compression/attributes/linear_sequencer.h"
+#include "draco/compression/attributes/sequential_attribute_encoders_controller.h"
+#include "draco/compression/entropy/symbol_encoding.h"
+#include "draco/core/varint_encoding.h"
+
+namespace draco {
+
+MeshSequentialEncoder::MeshSequentialEncoder() {}
+
+Status MeshSequentialEncoder::EncodeConnectivity() {
+ // Serialize indices.
+ const uint32_t num_faces = mesh()->num_faces();
+ EncodeVarint(num_faces, buffer());
+ EncodeVarint(static_cast<uint32_t>(mesh()->num_points()), buffer());
+
+ // We encode all attributes in the original (possibly duplicated) format.
+ // TODO(ostava): This may not be optimal if we have only one attribute or if
+ // all attributes share the same index mapping.
+ if (options()->GetGlobalBool("compress_connectivity", false)) {
+ // 0 = Encode compressed indices.
+ buffer()->Encode(static_cast<uint8_t>(0));
+ if (!CompressAndEncodeIndices()) {
+ return Status(Status::DRACO_ERROR, "Failed to compress connectivity.");
+ }
+ } else {
+ // 1 = Encode indices directly.
+ buffer()->Encode(static_cast<uint8_t>(1));
+ // Store vertex indices using a smallest data type that fits their range.
+ // TODO(ostava): This can be potentially improved by using a tighter
+ // fit that is not bound by a bit-length of any particular data type.
+ if (mesh()->num_points() < 256) {
+ // Serialize indices as uint8_t.
+ for (FaceIndex i(0); i < num_faces; ++i) {
+ const auto &face = mesh()->face(i);
+ buffer()->Encode(static_cast<uint8_t>(face[0].value()));
+ buffer()->Encode(static_cast<uint8_t>(face[1].value()));
+ buffer()->Encode(static_cast<uint8_t>(face[2].value()));
+ }
+ } else if (mesh()->num_points() < (1 << 16)) {
+ // Serialize indices as uint16_t.
+ for (FaceIndex i(0); i < num_faces; ++i) {
+ const auto &face = mesh()->face(i);
+ buffer()->Encode(static_cast<uint16_t>(face[0].value()));
+ buffer()->Encode(static_cast<uint16_t>(face[1].value()));
+ buffer()->Encode(static_cast<uint16_t>(face[2].value()));
+ }
+ } else if (mesh()->num_points() < (1 << 21)) {
+ // Serialize indices as varint.
+ for (FaceIndex i(0); i < num_faces; ++i) {
+ const auto &face = mesh()->face(i);
+ EncodeVarint(static_cast<uint32_t>(face[0].value()), buffer());
+ EncodeVarint(static_cast<uint32_t>(face[1].value()), buffer());
+ EncodeVarint(static_cast<uint32_t>(face[2].value()), buffer());
+ }
+ } else {
+ // Serialize faces as uint32_t (default).
+ for (FaceIndex i(0); i < num_faces; ++i) {
+ const auto &face = mesh()->face(i);
+ buffer()->Encode(face);
+ }
+ }
+ }
+ return OkStatus();
+}
+
+bool MeshSequentialEncoder::GenerateAttributesEncoder(int32_t att_id) {
+ // Create only one attribute encoder that is going to encode all points in a
+ // linear sequence.
+ if (att_id == 0) {
+ // Create a new attribute encoder only for the first attribute.
+ AddAttributesEncoder(std::unique_ptr<AttributesEncoder>(
+ new SequentialAttributeEncodersController(
+ std::unique_ptr<PointsSequencer>(
+ new LinearSequencer(point_cloud()->num_points())),
+ att_id)));
+ } else {
+ // Reuse the existing attribute encoder for other attributes.
+ attributes_encoder(0)->AddAttributeId(att_id);
+ }
+ return true;
+}
+
+bool MeshSequentialEncoder::CompressAndEncodeIndices() {
+ // Collect all indices to a buffer and encode them.
+ // Each new index is a difference from the previous value.
+ std::vector<uint32_t> indices_buffer;
+ int32_t last_index_value = 0;
+ const int num_faces = mesh()->num_faces();
+ for (FaceIndex i(0); i < num_faces; ++i) {
+ const auto &face = mesh()->face(i);
+ for (int j = 0; j < 3; ++j) {
+ const int32_t index_value = face[j].value();
+ const int32_t index_diff = index_value - last_index_value;
+ // Encode signed value to an unsigned one (put the sign to lsb pos).
+ const uint32_t encoded_val =
+ (abs(index_diff) << 1) | (index_diff < 0 ? 1 : 0);
+ indices_buffer.push_back(encoded_val);
+ last_index_value = index_value;
+ }
+ }
+ EncodeSymbols(indices_buffer.data(), static_cast<int>(indices_buffer.size()),
+ 1, nullptr, buffer());
+ return true;
+}
+
+void MeshSequentialEncoder::ComputeNumberOfEncodedPoints() {
+ set_num_encoded_points(mesh()->num_points());
+}
+
+void MeshSequentialEncoder::ComputeNumberOfEncodedFaces() {
+ set_num_encoded_faces(mesh()->num_faces());
+}
+
+} // namespace draco
diff --git a/libs/assimp/contrib/draco/src/draco/compression/mesh/mesh_sequential_encoder.h b/libs/assimp/contrib/draco/src/draco/compression/mesh/mesh_sequential_encoder.h
new file mode 100644
index 0000000..6726096
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/mesh/mesh_sequential_encoder.h
@@ -0,0 +1,57 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// The encoder compresses all attribute values using an order preserving
+// attribute encoder (that can still support quantization, prediction schemes,
+// and other features).
+// The mesh connectivity data can be encoded using two modes that are controlled
+// using a global encoder options flag called "compress_connectivity"
+// 1. When "compress_connectivity" == true:
+// All point ids are first delta coded and then compressed using an entropy
+// coding.
+// 2. When "compress_connectivity" == false:
+// All point ids are encoded directly using either 8, 16, or 32 bits per
+// value based on the maximum point id value.
+
+#ifndef DRACO_COMPRESSION_MESH_MESH_SEQUENTIAL_ENCODER_H_
+#define DRACO_COMPRESSION_MESH_MESH_SEQUENTIAL_ENCODER_H_
+
+#include "draco/compression/mesh/mesh_encoder.h"
+
+namespace draco {
+
+// Class that encodes mesh data using a simple binary representation of mesh's
+// connectivity and geometry.
+// TODO(ostava): Use a better name.
+class MeshSequentialEncoder : public MeshEncoder {
+ public:
+ MeshSequentialEncoder();
+ uint8_t GetEncodingMethod() const override {
+ return MESH_SEQUENTIAL_ENCODING;
+ }
+
+ protected:
+ Status EncodeConnectivity() override;
+ bool GenerateAttributesEncoder(int32_t att_id) override;
+ void ComputeNumberOfEncodedPoints() override;
+ void ComputeNumberOfEncodedFaces() override;
+
+ private:
+ // Returns false on error.
+ bool CompressAndEncodeIndices();
+};
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_MESH_MESH_SEQUENTIAL_ENCODER_H_
diff --git a/libs/assimp/contrib/draco/src/draco/compression/mesh/traverser/depth_first_traverser.h b/libs/assimp/contrib/draco/src/draco/compression/mesh/traverser/depth_first_traverser.h
new file mode 100644
index 0000000..0b387ec
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/mesh/traverser/depth_first_traverser.h
@@ -0,0 +1,172 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_COMPRESSION_MESH_TRAVERSER_DEPTH_FIRST_TRAVERSER_H_
+#define DRACO_COMPRESSION_MESH_TRAVERSER_DEPTH_FIRST_TRAVERSER_H_
+
+#include <vector>
+
+#include "draco/compression/mesh/traverser/traverser_base.h"
+#include "draco/mesh/corner_table.h"
+
+namespace draco {
+
+// Basic traverser that traverses a mesh in a DFS like fashion using the
+// CornerTable data structure. The necessary bookkeeping is available via the
+// TraverserBase. Callbacks are handled through template argument
+// TraversalObserverT.
+//
+// TraversalObserverT can perform an action on a traversal event such as newly
+// visited face, or corner, but it does not affect the traversal itself.
+//
+// Concept TraversalObserverT requires:
+//
+// public:
+// void OnNewFaceVisited(FaceIndex face);
+// - Called whenever a previously unvisited face is reached.
+//
+// void OnNewVertexVisited(VertexIndex vert, CornerIndex corner)
+// - Called when a new vertex is visited. |corner| is used to indicate the
+// which of the vertex's corners has been reached.
+
+template <class CornerTableT, class TraversalObserverT>
+class DepthFirstTraverser
+ : public TraverserBase<CornerTableT, TraversalObserverT> {
+ public:
+ typedef CornerTableT CornerTable;
+ typedef TraversalObserverT TraversalObserver;
+ typedef TraverserBase<CornerTable, TraversalObserver> Base;
+
+ DepthFirstTraverser() {}
+
+ // Called before any traversing starts.
+ void OnTraversalStart() {}
+
+ // Called when all the traversing is done.
+ void OnTraversalEnd() {}
+
+ bool TraverseFromCorner(CornerIndex corner_id) {
+ if (this->IsFaceVisited(corner_id)) {
+ return true; // Already traversed.
+ }
+
+ corner_traversal_stack_.clear();
+ corner_traversal_stack_.push_back(corner_id);
+ // For the first face, check the remaining corners as they may not be
+ // processed yet.
+ const VertexIndex next_vert =
+ this->corner_table()->Vertex(this->corner_table()->Next(corner_id));
+ const VertexIndex prev_vert =
+ this->corner_table()->Vertex(this->corner_table()->Previous(corner_id));
+ if (next_vert == kInvalidVertexIndex || prev_vert == kInvalidVertexIndex) {
+ return false;
+ }
+ if (!this->IsVertexVisited(next_vert)) {
+ this->MarkVertexVisited(next_vert);
+ this->traversal_observer().OnNewVertexVisited(
+ next_vert, this->corner_table()->Next(corner_id));
+ }
+ if (!this->IsVertexVisited(prev_vert)) {
+ this->MarkVertexVisited(prev_vert);
+ this->traversal_observer().OnNewVertexVisited(
+ prev_vert, this->corner_table()->Previous(corner_id));
+ }
+
+ // Start the actual traversal.
+ while (!corner_traversal_stack_.empty()) {
+ // Currently processed corner.
+ corner_id = corner_traversal_stack_.back();
+ FaceIndex face_id(corner_id.value() / 3);
+ // Make sure the face hasn't been visited yet.
+ if (corner_id == kInvalidCornerIndex || this->IsFaceVisited(face_id)) {
+ // This face has been already traversed.
+ corner_traversal_stack_.pop_back();
+ continue;
+ }
+ while (true) {
+ this->MarkFaceVisited(face_id);
+ this->traversal_observer().OnNewFaceVisited(face_id);
+ const VertexIndex vert_id = this->corner_table()->Vertex(corner_id);
+ if (vert_id == kInvalidVertexIndex) {
+ return false;
+ }
+ if (!this->IsVertexVisited(vert_id)) {
+ const bool on_boundary = this->corner_table()->IsOnBoundary(vert_id);
+ this->MarkVertexVisited(vert_id);
+ this->traversal_observer().OnNewVertexVisited(vert_id, corner_id);
+ if (!on_boundary) {
+ corner_id = this->corner_table()->GetRightCorner(corner_id);
+ face_id = FaceIndex(corner_id.value() / 3);
+ continue;
+ }
+ }
+ // The current vertex has been already visited or it was on a boundary.
+ // We need to determine whether we can visit any of it's neighboring
+ // faces.
+ const CornerIndex right_corner_id =
+ this->corner_table()->GetRightCorner(corner_id);
+ const CornerIndex left_corner_id =
+ this->corner_table()->GetLeftCorner(corner_id);
+ const FaceIndex right_face_id(
+ (right_corner_id == kInvalidCornerIndex
+ ? kInvalidFaceIndex
+ : FaceIndex(right_corner_id.value() / 3)));
+ const FaceIndex left_face_id(
+ (left_corner_id == kInvalidCornerIndex
+ ? kInvalidFaceIndex
+ : FaceIndex(left_corner_id.value() / 3)));
+ if (this->IsFaceVisited(right_face_id)) {
+ // Right face has been already visited.
+ if (this->IsFaceVisited(left_face_id)) {
+ // Both neighboring faces are visited. End reached.
+ corner_traversal_stack_.pop_back();
+ break; // Break from the while (true) loop.
+ } else {
+ // Go to the left face.
+ corner_id = left_corner_id;
+ face_id = left_face_id;
+ }
+ } else {
+ // Right face was not visited.
+ if (this->IsFaceVisited(left_face_id)) {
+ // Left face visited, go to the right one.
+ corner_id = right_corner_id;
+ face_id = right_face_id;
+ } else {
+ // Both neighboring faces are unvisited, we need to visit both of
+ // them.
+
+ // Split the traversal.
+ // First make the top of the current corner stack point to the left
+ // face (this one will be processed second).
+ corner_traversal_stack_.back() = left_corner_id;
+ // Add a new corner to the top of the stack (right face needs to
+ // be traversed first).
+ corner_traversal_stack_.push_back(right_corner_id);
+ // Break from the while (true) loop.
+ break;
+ }
+ }
+ }
+ }
+ return true;
+ }
+
+ private:
+ std::vector<CornerIndex> corner_traversal_stack_;
+};
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_MESH_TRAVERSER_DEPTH_FIRST_TRAVERSER_H_
diff --git a/libs/assimp/contrib/draco/src/draco/compression/mesh/traverser/max_prediction_degree_traverser.h b/libs/assimp/contrib/draco/src/draco/compression/mesh/traverser/max_prediction_degree_traverser.h
new file mode 100644
index 0000000..514193e
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/mesh/traverser/max_prediction_degree_traverser.h
@@ -0,0 +1,226 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_COMPRESSION_MESH_TRAVERSER_MAX_PREDICTION_DEGREE_TRAVERSER_H_
+#define DRACO_COMPRESSION_MESH_TRAVERSER_MAX_PREDICTION_DEGREE_TRAVERSER_H_
+
+#include <vector>
+
+#include "draco/compression/mesh/traverser/traverser_base.h"
+#include "draco/mesh/corner_table.h"
+
+namespace draco {
+
+// PredictionDegreeTraverser provides framework for traversal over a corner
+// table data structure following paper "Multi-way Geometry Encoding" by
+// Cohen-or at al.'02. The traversal is implicitly guided by prediction degree
+// of the destination vertices. A prediction degree is computed as the number of
+// possible faces that can be used as source points for traversal to the given
+// destination vertex (see image below, where faces F1 and F2 are already
+// traversed and face F0 is not traversed yet. The prediction degree of vertex
+// V is then equal to two).
+//
+// X-----V-----o
+// / \ / \ / \
+// / F0\ / \ / F2\
+// X-----o-----o-----B
+// \ F1/
+// \ /
+// A
+//
+// The class implements the same interface as the DepthFirstTraverser
+// (depth_first_traverser.h) and it can be controlled via the same template
+// trait classes |CornerTableT| and |TraversalObserverT|, that are used
+// for controlling and monitoring of the traversal respectively. For details,
+// please see depth_first_traverser.h.
+template <class CornerTableT, class TraversalObserverT>
+class MaxPredictionDegreeTraverser
+ : public TraverserBase<CornerTable, TraversalObserverT> {
+ public:
+ typedef CornerTableT CornerTable;
+ typedef TraversalObserverT TraversalObserver;
+ typedef TraverserBase<CornerTable, TraversalObserver> Base;
+
+ MaxPredictionDegreeTraverser() {}
+
+ // Called before any traversing starts.
+ void OnTraversalStart() {
+ prediction_degree_.resize(this->corner_table()->num_vertices(), 0);
+ }
+
+ // Called when all the traversing is done.
+ void OnTraversalEnd() {}
+
+ bool TraverseFromCorner(CornerIndex corner_id) {
+ if (prediction_degree_.size() == 0) {
+ return true;
+ }
+
+ // Traversal starts from the |corner_id|. It's going to follow either the
+ // right or the left neighboring faces to |corner_id| based on their
+ // prediction degree.
+ traversal_stacks_[0].push_back(corner_id);
+ best_priority_ = 0;
+ // For the first face, check the remaining corners as they may not be
+ // processed yet.
+ const VertexIndex next_vert =
+ this->corner_table()->Vertex(this->corner_table()->Next(corner_id));
+ const VertexIndex prev_vert =
+ this->corner_table()->Vertex(this->corner_table()->Previous(corner_id));
+ if (!this->IsVertexVisited(next_vert)) {
+ this->MarkVertexVisited(next_vert);
+ this->traversal_observer().OnNewVertexVisited(
+ next_vert, this->corner_table()->Next(corner_id));
+ }
+ if (!this->IsVertexVisited(prev_vert)) {
+ this->MarkVertexVisited(prev_vert);
+ this->traversal_observer().OnNewVertexVisited(
+ prev_vert, this->corner_table()->Previous(corner_id));
+ }
+ const VertexIndex tip_vertex = this->corner_table()->Vertex(corner_id);
+ if (!this->IsVertexVisited(tip_vertex)) {
+ this->MarkVertexVisited(tip_vertex);
+ this->traversal_observer().OnNewVertexVisited(tip_vertex, corner_id);
+ }
+ // Start the actual traversal.
+ while ((corner_id = PopNextCornerToTraverse()) != kInvalidCornerIndex) {
+ FaceIndex face_id(corner_id.value() / 3);
+ // Make sure the face hasn't been visited yet.
+ if (this->IsFaceVisited(face_id)) {
+ // This face has been already traversed.
+ continue;
+ }
+
+ while (true) {
+ face_id = FaceIndex(corner_id.value() / 3);
+ this->MarkFaceVisited(face_id);
+ this->traversal_observer().OnNewFaceVisited(face_id);
+
+ // If the newly reached vertex hasn't been visited, mark it and notify
+ // the observer.
+ const VertexIndex vert_id = this->corner_table()->Vertex(corner_id);
+ if (!this->IsVertexVisited(vert_id)) {
+ this->MarkVertexVisited(vert_id);
+ this->traversal_observer().OnNewVertexVisited(vert_id, corner_id);
+ }
+
+ // Check whether we can traverse to the right and left neighboring
+ // faces.
+ const CornerIndex right_corner_id =
+ this->corner_table()->GetRightCorner(corner_id);
+ const CornerIndex left_corner_id =
+ this->corner_table()->GetLeftCorner(corner_id);
+ const FaceIndex right_face_id(
+ (right_corner_id == kInvalidCornerIndex
+ ? kInvalidFaceIndex
+ : FaceIndex(right_corner_id.value() / 3)));
+ const FaceIndex left_face_id(
+ (left_corner_id == kInvalidCornerIndex
+ ? kInvalidFaceIndex
+ : FaceIndex(left_corner_id.value() / 3)));
+ const bool is_right_face_visited = this->IsFaceVisited(right_face_id);
+ const bool is_left_face_visited = this->IsFaceVisited(left_face_id);
+
+ if (!is_left_face_visited) {
+ // We can go to the left face.
+ const int priority = ComputePriority(left_corner_id);
+ if (is_right_face_visited && priority <= best_priority_) {
+ // Right face has been already visited and the priority is equal or
+ // better than the best priority. We are sure that the left face
+ // would be traversed next so there is no need to put it onto the
+ // stack.
+ corner_id = left_corner_id;
+ continue;
+ } else {
+ AddCornerToTraversalStack(left_corner_id, priority);
+ }
+ }
+ if (!is_right_face_visited) {
+ // Go to the right face.
+ const int priority = ComputePriority(right_corner_id);
+ if (priority <= best_priority_) {
+ // We are sure that the right face would be traversed next so there
+ // is no need to put it onto the stack.
+ corner_id = right_corner_id;
+ continue;
+ } else {
+ AddCornerToTraversalStack(right_corner_id, priority);
+ }
+ }
+
+ // Couldn't proceed directly to the next corner
+ break;
+ }
+ }
+ return true;
+ }
+
+ private:
+ // Retrieves the next available corner (edge) to traverse. Edges are processed
+ // based on their priorities.
+ // Returns kInvalidCornerIndex when there is no edge available.
+ CornerIndex PopNextCornerToTraverse() {
+ for (int i = best_priority_; i < kMaxPriority; ++i) {
+ if (!traversal_stacks_[i].empty()) {
+ const CornerIndex ret = traversal_stacks_[i].back();
+ traversal_stacks_[i].pop_back();
+ best_priority_ = i;
+ return ret;
+ }
+ }
+ return kInvalidCornerIndex;
+ }
+
+ inline void AddCornerToTraversalStack(CornerIndex ci, int priority) {
+ traversal_stacks_[priority].push_back(ci);
+ // Make sure that the best available priority is up to date.
+ if (priority < best_priority_) {
+ best_priority_ = priority;
+ }
+ }
+
+ // Returns the priority of traversing edge leading to |corner_id|.
+ inline int ComputePriority(CornerIndex corner_id) {
+ const VertexIndex v_tip = this->corner_table()->Vertex(corner_id);
+ // Priority 0 when traversing to already visited vertices.
+ int priority = 0;
+ if (!this->IsVertexVisited(v_tip)) {
+ const int degree = ++prediction_degree_[v_tip];
+ // Priority 1 when prediction degree > 1, otherwise 2.
+ priority = (degree > 1 ? 1 : 2);
+ }
+ // Clamp the priority to the maximum number of buckets.
+ if (priority >= kMaxPriority) {
+ priority = kMaxPriority - 1;
+ }
+ return priority;
+ }
+
+ // For efficiency reasons, the priority traversal is implemented using buckets
+ // where each buckets represent a stack of available corners for a given
+ // priority. Corners with the highest priority are always processed first.
+ static constexpr int kMaxPriority = 3;
+ std::vector<CornerIndex> traversal_stacks_[kMaxPriority];
+
+ // Keep the track of the best available priority to improve the performance
+ // of PopNextCornerToTraverse() method.
+ int best_priority_;
+
+ // Prediction degree available for each vertex.
+ IndexTypeVector<VertexIndex, int> prediction_degree_;
+};
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_MESH_TRAVERSER_MAX_PREDICTION_DEGREE_TRAVERSER_H_
diff --git a/libs/assimp/contrib/draco/src/draco/compression/mesh/traverser/mesh_attribute_indices_encoding_observer.h b/libs/assimp/contrib/draco/src/draco/compression/mesh/traverser/mesh_attribute_indices_encoding_observer.h
new file mode 100644
index 0000000..e66dd14
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/mesh/traverser/mesh_attribute_indices_encoding_observer.h
@@ -0,0 +1,76 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_COMPRESSION_MESH_TRAVERSER_MESH_ATTRIBUTE_INDICES_ENCODING_OBSERVER_H_
+#define DRACO_COMPRESSION_MESH_TRAVERSER_MESH_ATTRIBUTE_INDICES_ENCODING_OBSERVER_H_
+
+#include "draco/compression/attributes/mesh_attribute_indices_encoding_data.h"
+#include "draco/compression/attributes/points_sequencer.h"
+#include "draco/mesh/mesh.h"
+
+namespace draco {
+
+// Class that can be used to generate encoding (and decoding) order of attribute
+// values based on the traversal of the encoded mesh. The class should be used
+// as the TraversalObserverT member of a Traverser class such as the
+// DepthFirstTraverser (depth_first_traverser.h).
+// TODO(hemmer): rename to AttributeIndicesCodingTraverserObserver
+template <class CornerTableT>
+class MeshAttributeIndicesEncodingObserver {
+ public:
+ MeshAttributeIndicesEncodingObserver()
+ : att_connectivity_(nullptr),
+ encoding_data_(nullptr),
+ mesh_(nullptr),
+ sequencer_(nullptr) {}
+ MeshAttributeIndicesEncodingObserver(
+ const CornerTableT *connectivity, const Mesh *mesh,
+ PointsSequencer *sequencer,
+ MeshAttributeIndicesEncodingData *encoding_data)
+ : att_connectivity_(connectivity),
+ encoding_data_(encoding_data),
+ mesh_(mesh),
+ sequencer_(sequencer) {}
+
+ // Interface for TraversalObserverT
+
+ void OnNewFaceVisited(FaceIndex /* face */) {}
+
+ inline void OnNewVertexVisited(VertexIndex vertex, CornerIndex corner) {
+ const PointIndex point_id =
+ mesh_->face(FaceIndex(corner.value() / 3))[corner.value() % 3];
+ // Append the visited attribute to the encoding order.
+ sequencer_->AddPointId(point_id);
+
+ // Keep track of visited corners.
+ encoding_data_->encoded_attribute_value_index_to_corner_map.push_back(
+ corner);
+
+ encoding_data_
+ ->vertex_to_encoded_attribute_value_index_map[vertex.value()] =
+ encoding_data_->num_values;
+
+ encoding_data_->num_values++;
+ }
+
+ private:
+ const CornerTableT *att_connectivity_;
+ MeshAttributeIndicesEncodingData *encoding_data_;
+ const Mesh *mesh_;
+ PointsSequencer *sequencer_;
+};
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_MESH_TRAVERSER_MESH_ATTRIBUTE_INDICES_ENCODING_OBSERVER_H_
diff --git a/libs/assimp/contrib/draco/src/draco/compression/mesh/traverser/mesh_traversal_sequencer.h b/libs/assimp/contrib/draco/src/draco/compression/mesh/traverser/mesh_traversal_sequencer.h
new file mode 100644
index 0000000..ebe1d5f
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/mesh/traverser/mesh_traversal_sequencer.h
@@ -0,0 +1,113 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_COMPRESSION_MESH_TRAVERSER_MESH_TRAVERSAL_SEQUENCER_H_
+#define DRACO_COMPRESSION_MESH_TRAVERSER_MESH_TRAVERSAL_SEQUENCER_H_
+
+#include "draco/attributes/geometry_indices.h"
+#include "draco/compression/attributes/mesh_attribute_indices_encoding_data.h"
+#include "draco/compression/attributes/points_sequencer.h"
+#include "draco/mesh/mesh.h"
+
+namespace draco {
+
+// Sequencer that generates point sequence in an order given by a deterministic
+// traversal on the mesh surface. Note that all attributes encoded with this
+// sequence must share the same connectivity.
+// TODO(hemmer): Consider refactoring such that this is an observer.
+template <class TraverserT>
+class MeshTraversalSequencer : public PointsSequencer {
+ public:
+ MeshTraversalSequencer(const Mesh *mesh,
+ const MeshAttributeIndicesEncodingData *encoding_data)
+ : mesh_(mesh), encoding_data_(encoding_data), corner_order_(nullptr) {}
+ void SetTraverser(const TraverserT &t) { traverser_ = t; }
+
+ // Function that can be used to set an order in which the mesh corners should
+ // be processed. This is an optional flag used usually only by the encoder
+ // to match the same corner order that is going to be used by the decoder.
+ // Note that |corner_order| should contain only one corner per face (it can
+ // have all corners but only the first encountered corner for each face is
+ // going to be used to start a traversal). If the corner order is not set, the
+ // corners are processed sequentially based on their ids.
+ void SetCornerOrder(const std::vector<CornerIndex> &corner_order) {
+ corner_order_ = &corner_order;
+ }
+
+ bool UpdatePointToAttributeIndexMapping(PointAttribute *attribute) override {
+ const auto *corner_table = traverser_.corner_table();
+ attribute->SetExplicitMapping(mesh_->num_points());
+ const size_t num_faces = mesh_->num_faces();
+ const size_t num_points = mesh_->num_points();
+ for (FaceIndex f(0); f < static_cast<uint32_t>(num_faces); ++f) {
+ const auto &face = mesh_->face(f);
+ for (int p = 0; p < 3; ++p) {
+ const PointIndex point_id = face[p];
+ const VertexIndex vert_id =
+ corner_table->Vertex(CornerIndex(3 * f.value() + p));
+ if (vert_id == kInvalidVertexIndex) {
+ return false;
+ }
+ const AttributeValueIndex att_entry_id(
+ encoding_data_
+ ->vertex_to_encoded_attribute_value_index_map[vert_id.value()]);
+ if (point_id >= num_points || att_entry_id.value() >= num_points) {
+ // There cannot be more attribute values than the number of points.
+ return false;
+ }
+ attribute->SetPointMapEntry(point_id, att_entry_id);
+ }
+ }
+ return true;
+ }
+
+ protected:
+ bool GenerateSequenceInternal() override {
+ // Preallocate memory for storing point indices. We expect the number of
+ // points to be the same as the number of corner table vertices.
+ out_point_ids()->reserve(traverser_.corner_table()->num_vertices());
+
+ traverser_.OnTraversalStart();
+ if (corner_order_) {
+ for (uint32_t i = 0; i < corner_order_->size(); ++i) {
+ if (!ProcessCorner(corner_order_->at(i))) {
+ return false;
+ }
+ }
+ } else {
+ const int32_t num_faces = traverser_.corner_table()->num_faces();
+ for (int i = 0; i < num_faces; ++i) {
+ if (!ProcessCorner(CornerIndex(3 * i))) {
+ return false;
+ }
+ }
+ }
+ traverser_.OnTraversalEnd();
+ return true;
+ }
+
+ private:
+ bool ProcessCorner(CornerIndex corner_id) {
+ return traverser_.TraverseFromCorner(corner_id);
+ }
+
+ TraverserT traverser_;
+ const Mesh *mesh_;
+ const MeshAttributeIndicesEncodingData *encoding_data_;
+ const std::vector<CornerIndex> *corner_order_;
+};
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_MESH_TRAVERSER_MESH_TRAVERSAL_SEQUENCER_H_
diff --git a/libs/assimp/contrib/draco/src/draco/compression/mesh/traverser/traverser_base.h b/libs/assimp/contrib/draco/src/draco/compression/mesh/traverser/traverser_base.h
new file mode 100644
index 0000000..f2f8da7
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/mesh/traverser/traverser_base.h
@@ -0,0 +1,87 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_COMPRESSION_MESH_TRAVERSER_TRAVERSER_BASE_H_
+#define DRACO_COMPRESSION_MESH_TRAVERSER_TRAVERSER_BASE_H_
+
+#include "draco/mesh/corner_table.h"
+
+namespace draco {
+
+// Class providing the basic traversal functionality needed by traversers (such
+// as the DepthFirstTraverser, see depth_first_traverser.h). It keeps a pointer
+// to the corner table that is used for the traversal, plus it provides a basic
+// bookkeeping of visited faces and vertices during the traversal.
+template <class CornerTableT, class TraversalObserverT>
+class TraverserBase {
+ public:
+ typedef CornerTableT CornerTable;
+ typedef TraversalObserverT TraversalObserver;
+
+ TraverserBase() : corner_table_(nullptr) {}
+ virtual ~TraverserBase() = default;
+
+ virtual void Init(const CornerTable *corner_table,
+ TraversalObserver traversal_observer) {
+ corner_table_ = corner_table;
+ is_face_visited_.assign(corner_table->num_faces(), false);
+ is_vertex_visited_.assign(corner_table_->num_vertices(), false);
+ traversal_observer_ = traversal_observer;
+ }
+
+ const CornerTable &GetCornerTable() const { return *corner_table_; }
+
+ inline bool IsFaceVisited(FaceIndex face_id) const {
+ if (face_id == kInvalidFaceIndex) {
+ return true; // Invalid faces are always considered as visited.
+ }
+ return is_face_visited_[face_id.value()];
+ }
+
+ // Returns true if the face containing the given corner was visited.
+ inline bool IsFaceVisited(CornerIndex corner_id) const {
+ if (corner_id == kInvalidCornerIndex) {
+ return true; // Invalid faces are always considered as visited.
+ }
+ return is_face_visited_[corner_id.value() / 3];
+ }
+
+ inline void MarkFaceVisited(FaceIndex face_id) {
+ is_face_visited_[face_id.value()] = true;
+ }
+ inline bool IsVertexVisited(VertexIndex vert_id) const {
+ return is_vertex_visited_[vert_id.value()];
+ }
+ inline void MarkVertexVisited(VertexIndex vert_id) {
+ is_vertex_visited_[vert_id.value()] = true;
+ }
+
+ inline const CornerTable *corner_table() const { return corner_table_; }
+ inline const TraversalObserverT &traversal_observer() const {
+ return traversal_observer_;
+ }
+ inline TraversalObserverT &traversal_observer() {
+ return traversal_observer_;
+ }
+
+ private:
+ const CornerTable *corner_table_;
+ TraversalObserverT traversal_observer_;
+ std::vector<bool> is_face_visited_;
+ std::vector<bool> is_vertex_visited_;
+};
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_MESH_TRAVERSER_TRAVERSER_BASE_H_
diff --git a/libs/assimp/contrib/draco/src/draco/compression/point_cloud/algorithms/dynamic_integer_points_kd_tree_decoder.cc b/libs/assimp/contrib/draco/src/draco/compression/point_cloud/algorithms/dynamic_integer_points_kd_tree_decoder.cc
new file mode 100644
index 0000000..de46f05
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/point_cloud/algorithms/dynamic_integer_points_kd_tree_decoder.cc
@@ -0,0 +1,26 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/compression/point_cloud/algorithms/dynamic_integer_points_kd_tree_decoder.h"
+
+#include "draco/compression/point_cloud/algorithms/point_cloud_types.h"
+
+namespace draco {
+
+template class DynamicIntegerPointsKdTreeDecoder<0>;
+template class DynamicIntegerPointsKdTreeDecoder<2>;
+template class DynamicIntegerPointsKdTreeDecoder<4>;
+template class DynamicIntegerPointsKdTreeDecoder<6>;
+
+} // namespace draco
diff --git a/libs/assimp/contrib/draco/src/draco/compression/point_cloud/algorithms/dynamic_integer_points_kd_tree_decoder.h b/libs/assimp/contrib/draco/src/draco/compression/point_cloud/algorithms/dynamic_integer_points_kd_tree_decoder.h
new file mode 100644
index 0000000..87bc2b7
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/point_cloud/algorithms/dynamic_integer_points_kd_tree_decoder.h
@@ -0,0 +1,330 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// See dynamic_integer_points_kd_tree_encoder.h for documentation.
+
+#ifndef DRACO_COMPRESSION_POINT_CLOUD_ALGORITHMS_DYNAMIC_INTEGER_POINTS_KD_TREE_DECODER_H_
+#define DRACO_COMPRESSION_POINT_CLOUD_ALGORITHMS_DYNAMIC_INTEGER_POINTS_KD_TREE_DECODER_H_
+
+#include <array>
+#include <memory>
+#include <stack>
+
+#include "draco/compression/bit_coders/adaptive_rans_bit_decoder.h"
+#include "draco/compression/bit_coders/direct_bit_decoder.h"
+#include "draco/compression/bit_coders/folded_integer_bit_decoder.h"
+#include "draco/compression/bit_coders/rans_bit_decoder.h"
+#include "draco/compression/point_cloud/algorithms/point_cloud_types.h"
+#include "draco/core/bit_utils.h"
+#include "draco/core/decoder_buffer.h"
+#include "draco/core/math_utils.h"
+
+namespace draco {
+
+template <int compression_level_t>
+struct DynamicIntegerPointsKdTreeDecoderCompressionPolicy
+ : public DynamicIntegerPointsKdTreeDecoderCompressionPolicy<
+ compression_level_t - 1> {};
+
+template <>
+struct DynamicIntegerPointsKdTreeDecoderCompressionPolicy<0> {
+ typedef DirectBitDecoder NumbersDecoder;
+ typedef DirectBitDecoder AxisDecoder;
+ typedef DirectBitDecoder HalfDecoder;
+ typedef DirectBitDecoder RemainingBitsDecoder;
+ static constexpr bool select_axis = false;
+};
+
+template <>
+struct DynamicIntegerPointsKdTreeDecoderCompressionPolicy<2>
+ : public DynamicIntegerPointsKdTreeDecoderCompressionPolicy<1> {
+ typedef RAnsBitDecoder NumbersDecoder;
+};
+
+template <>
+struct DynamicIntegerPointsKdTreeDecoderCompressionPolicy<4>
+ : public DynamicIntegerPointsKdTreeDecoderCompressionPolicy<3> {
+ typedef FoldedBit32Decoder<RAnsBitDecoder> NumbersDecoder;
+};
+
+template <>
+struct DynamicIntegerPointsKdTreeDecoderCompressionPolicy<6>
+ : public DynamicIntegerPointsKdTreeDecoderCompressionPolicy<5> {
+ static constexpr bool select_axis = true;
+};
+
+// Decodes a point cloud encoded by DynamicIntegerPointsKdTreeEncoder.
+template <int compression_level_t>
+class DynamicIntegerPointsKdTreeDecoder {
+ static_assert(compression_level_t >= 0, "Compression level must in [0..6].");
+ static_assert(compression_level_t <= 6, "Compression level must in [0..6].");
+ typedef DynamicIntegerPointsKdTreeDecoderCompressionPolicy<
+ compression_level_t>
+ Policy;
+
+ typedef typename Policy::NumbersDecoder NumbersDecoder;
+ typedef typename Policy::AxisDecoder AxisDecoder;
+ typedef typename Policy::HalfDecoder HalfDecoder;
+ typedef typename Policy::RemainingBitsDecoder RemainingBitsDecoder;
+ typedef std::vector<uint32_t> VectorUint32;
+
+ public:
+ explicit DynamicIntegerPointsKdTreeDecoder(uint32_t dimension)
+ : bit_length_(0),
+ num_points_(0),
+ num_decoded_points_(0),
+ dimension_(dimension),
+ p_(dimension, 0),
+ axes_(dimension, 0),
+ // Init the stack with the maximum depth of the tree.
+ // +1 for a second leaf.
+ base_stack_(32 * dimension + 1, VectorUint32(dimension, 0)),
+ levels_stack_(32 * dimension + 1, VectorUint32(dimension, 0)) {}
+
+ // Decodes a integer point cloud from |buffer|.
+ template <class OutputIteratorT>
+ bool DecodePoints(DecoderBuffer *buffer, OutputIteratorT &oit);
+
+#ifndef DRACO_OLD_GCC
+ template <class OutputIteratorT>
+ bool DecodePoints(DecoderBuffer *buffer, OutputIteratorT &&oit);
+#endif // DRACO_OLD_GCC
+
+ const uint32_t dimension() const { return dimension_; }
+
+ private:
+ uint32_t GetAxis(uint32_t num_remaining_points, const VectorUint32 &levels,
+ uint32_t last_axis);
+
+ template <class OutputIteratorT>
+ bool DecodeInternal(uint32_t num_points, OutputIteratorT &oit);
+
+ void DecodeNumber(int nbits, uint32_t *value) {
+ numbers_decoder_.DecodeLeastSignificantBits32(nbits, value);
+ }
+
+ struct DecodingStatus {
+ DecodingStatus(uint32_t num_remaining_points_, uint32_t last_axis_,
+ uint32_t stack_pos_)
+ : num_remaining_points(num_remaining_points_),
+ last_axis(last_axis_),
+ stack_pos(stack_pos_) {}
+
+ uint32_t num_remaining_points;
+ uint32_t last_axis;
+ uint32_t stack_pos; // used to get base and levels
+ };
+
+ uint32_t bit_length_;
+ uint32_t num_points_;
+ uint32_t num_decoded_points_;
+ uint32_t dimension_;
+ NumbersDecoder numbers_decoder_;
+ RemainingBitsDecoder remaining_bits_decoder_;
+ AxisDecoder axis_decoder_;
+ HalfDecoder half_decoder_;
+ VectorUint32 p_;
+ VectorUint32 axes_;
+ std::vector<VectorUint32> base_stack_;
+ std::vector<VectorUint32> levels_stack_;
+};
+
+// Decodes a point cloud from |buffer|.
+#ifndef DRACO_OLD_GCC
+template <int compression_level_t>
+template <class OutputIteratorT>
+bool DynamicIntegerPointsKdTreeDecoder<compression_level_t>::DecodePoints(
+ DecoderBuffer *buffer, OutputIteratorT &&oit) {
+ OutputIteratorT local = std::forward<OutputIteratorT>(oit);
+ return DecodePoints(buffer, local);
+}
+#endif // DRACO_OLD_GCC
+
+template <int compression_level_t>
+template <class OutputIteratorT>
+bool DynamicIntegerPointsKdTreeDecoder<compression_level_t>::DecodePoints(
+ DecoderBuffer *buffer, OutputIteratorT &oit) {
+ if (!buffer->Decode(&bit_length_)) {
+ return false;
+ }
+ if (bit_length_ > 32) {
+ return false;
+ }
+ if (!buffer->Decode(&num_points_)) {
+ return false;
+ }
+ if (num_points_ == 0) {
+ return true;
+ }
+ num_decoded_points_ = 0;
+
+ if (!numbers_decoder_.StartDecoding(buffer)) {
+ return false;
+ }
+ if (!remaining_bits_decoder_.StartDecoding(buffer)) {
+ return false;
+ }
+ if (!axis_decoder_.StartDecoding(buffer)) {
+ return false;
+ }
+ if (!half_decoder_.StartDecoding(buffer)) {
+ return false;
+ }
+
+ if (!DecodeInternal(num_points_, oit)) {
+ return false;
+ }
+
+ numbers_decoder_.EndDecoding();
+ remaining_bits_decoder_.EndDecoding();
+ axis_decoder_.EndDecoding();
+ half_decoder_.EndDecoding();
+
+ return true;
+}
+
+template <int compression_level_t>
+uint32_t DynamicIntegerPointsKdTreeDecoder<compression_level_t>::GetAxis(
+ uint32_t num_remaining_points, const VectorUint32 &levels,
+ uint32_t last_axis) {
+ if (!Policy::select_axis) {
+ return DRACO_INCREMENT_MOD(last_axis, dimension_);
+ }
+
+ uint32_t best_axis = 0;
+ if (num_remaining_points < 64) {
+ for (uint32_t axis = 1; axis < dimension_; ++axis) {
+ if (levels[best_axis] > levels[axis]) {
+ best_axis = axis;
+ }
+ }
+ } else {
+ axis_decoder_.DecodeLeastSignificantBits32(4, &best_axis);
+ }
+
+ return best_axis;
+}
+
+template <int compression_level_t>
+template <class OutputIteratorT>
+bool DynamicIntegerPointsKdTreeDecoder<compression_level_t>::DecodeInternal(
+ uint32_t num_points, OutputIteratorT &oit) {
+ typedef DecodingStatus Status;
+ base_stack_[0] = VectorUint32(dimension_, 0);
+ levels_stack_[0] = VectorUint32(dimension_, 0);
+ DecodingStatus init_status(num_points, 0, 0);
+ std::stack<Status> status_stack;
+ status_stack.push(init_status);
+
+ // TODO(hemmer): use preallocated vector instead of stack.
+ while (!status_stack.empty()) {
+ const DecodingStatus status = status_stack.top();
+ status_stack.pop();
+
+ const uint32_t num_remaining_points = status.num_remaining_points;
+ const uint32_t last_axis = status.last_axis;
+ const uint32_t stack_pos = status.stack_pos;
+ const VectorUint32 &old_base = base_stack_[stack_pos];
+ const VectorUint32 &levels = levels_stack_[stack_pos];
+
+ if (num_remaining_points > num_points) {
+ return false;
+ }
+
+ const uint32_t axis = GetAxis(num_remaining_points, levels, last_axis);
+ if (axis >= dimension_) {
+ return false;
+ }
+
+ const uint32_t level = levels[axis];
+
+ // All axes have been fully subdivided, just output points.
+ if ((bit_length_ - level) == 0) {
+ for (uint32_t i = 0; i < num_remaining_points; i++) {
+ *oit = old_base;
+ ++oit;
+ ++num_decoded_points_;
+ }
+ continue;
+ }
+
+ DRACO_DCHECK_EQ(true, num_remaining_points != 0);
+
+ // Fast decoding of remaining bits if number of points is 1 or 2.
+ if (num_remaining_points <= 2) {
+ // TODO(hemmer): axes_ not necessary, remove would change bitstream!
+ axes_[0] = axis;
+ for (uint32_t i = 1; i < dimension_; i++) {
+ axes_[i] = DRACO_INCREMENT_MOD(axes_[i - 1], dimension_);
+ }
+ for (uint32_t i = 0; i < num_remaining_points; ++i) {
+ for (uint32_t j = 0; j < dimension_; j++) {
+ p_[axes_[j]] = 0;
+ const uint32_t num_remaining_bits = bit_length_ - levels[axes_[j]];
+ if (num_remaining_bits) {
+ remaining_bits_decoder_.DecodeLeastSignificantBits32(
+ num_remaining_bits, &p_[axes_[j]]);
+ }
+ p_[axes_[j]] = old_base[axes_[j]] | p_[axes_[j]];
+ }
+ *oit = p_;
+ ++oit;
+ ++num_decoded_points_;
+ }
+ continue;
+ }
+
+ if (num_decoded_points_ > num_points_) {
+ return false;
+ }
+
+ const int num_remaining_bits = bit_length_ - level;
+ const uint32_t modifier = 1 << (num_remaining_bits - 1);
+ base_stack_[stack_pos + 1] = old_base; // copy
+ base_stack_[stack_pos + 1][axis] += modifier; // new base
+
+ const int incoming_bits = MostSignificantBit(num_remaining_points);
+
+ uint32_t number = 0;
+ DecodeNumber(incoming_bits, &number);
+
+ uint32_t first_half = num_remaining_points / 2 - number;
+ uint32_t second_half = num_remaining_points - first_half;
+
+ if (first_half != second_half) {
+ if (!half_decoder_.DecodeNextBit()) {
+ std::swap(first_half, second_half);
+ }
+ }
+
+ levels_stack_[stack_pos][axis] += 1;
+ levels_stack_[stack_pos + 1] = levels_stack_[stack_pos]; // copy
+ if (first_half) {
+ status_stack.push(DecodingStatus(first_half, axis, stack_pos));
+ }
+ if (second_half) {
+ status_stack.push(DecodingStatus(second_half, axis, stack_pos + 1));
+ }
+ }
+ return true;
+}
+
+extern template class DynamicIntegerPointsKdTreeDecoder<0>;
+extern template class DynamicIntegerPointsKdTreeDecoder<2>;
+extern template class DynamicIntegerPointsKdTreeDecoder<4>;
+extern template class DynamicIntegerPointsKdTreeDecoder<6>;
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_POINT_CLOUD_ALGORITHMS_DYNAMIC_INTEGER_POINTS_KD_TREE_DECODER_H_
diff --git a/libs/assimp/contrib/draco/src/draco/compression/point_cloud/algorithms/dynamic_integer_points_kd_tree_encoder.cc b/libs/assimp/contrib/draco/src/draco/compression/point_cloud/algorithms/dynamic_integer_points_kd_tree_encoder.cc
new file mode 100644
index 0000000..e7abf52
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/point_cloud/algorithms/dynamic_integer_points_kd_tree_encoder.cc
@@ -0,0 +1,26 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/compression/point_cloud/algorithms/dynamic_integer_points_kd_tree_encoder.h"
+
+#include "draco/compression/point_cloud/algorithms/point_cloud_types.h"
+
+namespace draco {
+
+template class DynamicIntegerPointsKdTreeEncoder<0>;
+template class DynamicIntegerPointsKdTreeEncoder<2>;
+template class DynamicIntegerPointsKdTreeEncoder<4>;
+template class DynamicIntegerPointsKdTreeEncoder<6>;
+
+} // namespace draco
diff --git a/libs/assimp/contrib/draco/src/draco/compression/point_cloud/algorithms/dynamic_integer_points_kd_tree_encoder.h b/libs/assimp/contrib/draco/src/draco/compression/point_cloud/algorithms/dynamic_integer_points_kd_tree_encoder.h
new file mode 100644
index 0000000..14fa32d
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/point_cloud/algorithms/dynamic_integer_points_kd_tree_encoder.h
@@ -0,0 +1,371 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_COMPRESSION_POINT_CLOUD_ALGORITHMS_DYNAMIC_INTEGER_POINTS_KD_TREE_ENCODER_H_
+#define DRACO_COMPRESSION_POINT_CLOUD_ALGORITHMS_DYNAMIC_INTEGER_POINTS_KD_TREE_ENCODER_H_
+
+#include <algorithm>
+#include <array>
+#include <memory>
+#include <stack>
+#include <vector>
+
+#include "draco/compression/bit_coders/adaptive_rans_bit_encoder.h"
+#include "draco/compression/bit_coders/direct_bit_encoder.h"
+#include "draco/compression/bit_coders/folded_integer_bit_encoder.h"
+#include "draco/compression/bit_coders/rans_bit_encoder.h"
+#include "draco/compression/point_cloud/algorithms/point_cloud_types.h"
+#include "draco/core/bit_utils.h"
+#include "draco/core/encoder_buffer.h"
+#include "draco/core/math_utils.h"
+
+namespace draco {
+
+// This policy class provides several configurations for the encoder that allow
+// to trade speed vs compression rate. Level 0 is fastest while 6 is the best
+// compression rate. The decoder must select the same level.
+template <int compression_level_t>
+struct DynamicIntegerPointsKdTreeEncoderCompressionPolicy
+ : public DynamicIntegerPointsKdTreeEncoderCompressionPolicy<
+ compression_level_t - 1> {};
+
+template <>
+struct DynamicIntegerPointsKdTreeEncoderCompressionPolicy<0> {
+ typedef DirectBitEncoder NumbersEncoder;
+ typedef DirectBitEncoder AxisEncoder;
+ typedef DirectBitEncoder HalfEncoder;
+ typedef DirectBitEncoder RemainingBitsEncoder;
+ static constexpr bool select_axis = false;
+};
+
+template <>
+struct DynamicIntegerPointsKdTreeEncoderCompressionPolicy<2>
+ : public DynamicIntegerPointsKdTreeEncoderCompressionPolicy<1> {
+ typedef RAnsBitEncoder NumbersEncoder;
+};
+
+template <>
+struct DynamicIntegerPointsKdTreeEncoderCompressionPolicy<4>
+ : public DynamicIntegerPointsKdTreeEncoderCompressionPolicy<3> {
+ typedef FoldedBit32Encoder<RAnsBitEncoder> NumbersEncoder;
+};
+
+template <>
+struct DynamicIntegerPointsKdTreeEncoderCompressionPolicy<6>
+ : public DynamicIntegerPointsKdTreeEncoderCompressionPolicy<5> {
+ static constexpr bool select_axis = true;
+};
+
+// This class encodes a given integer point cloud based on the point cloud
+// compression algorithm in:
+// Olivier Devillers and Pierre-Marie Gandoin
+// "Geometric compression for interactive transmission"
+//
+// In principle the algorithm keeps on splitting the point cloud in the middle
+// while alternating the axes. In 3D this results in an Octree like structure.
+// In each step we encode the number of points in the first half.
+// The algorithm does not preserve the order of points.
+//
+// However, the algorithm here differs from the original as follows:
+// The algorithm keeps on splitting the point cloud in the middle of the axis
+// that keeps the point cloud as clustered as possible, which gives a better
+// compression rate.
+// The number of points is encode by the deviation from the half of the points
+// in the smaller half of the two. This results in a better compression rate as
+// there are more leading zeros, which is then compressed better by the
+// arithmetic encoding.
+template <int compression_level_t>
+class DynamicIntegerPointsKdTreeEncoder {
+ static_assert(compression_level_t >= 0, "Compression level must in [0..6].");
+ static_assert(compression_level_t <= 6, "Compression level must in [0..6].");
+ typedef DynamicIntegerPointsKdTreeEncoderCompressionPolicy<
+ compression_level_t>
+ Policy;
+ typedef typename Policy::NumbersEncoder NumbersEncoder;
+ typedef typename Policy::AxisEncoder AxisEncoder;
+ typedef typename Policy::HalfEncoder HalfEncoder;
+ typedef typename Policy::RemainingBitsEncoder RemainingBitsEncoder;
+ typedef std::vector<uint32_t> VectorUint32;
+
+ public:
+ explicit DynamicIntegerPointsKdTreeEncoder(uint32_t dimension)
+ : bit_length_(0),
+ dimension_(dimension),
+ deviations_(dimension, 0),
+ num_remaining_bits_(dimension, 0),
+ axes_(dimension, 0),
+ base_stack_(32 * dimension + 1, VectorUint32(dimension, 0)),
+ levels_stack_(32 * dimension + 1, VectorUint32(dimension, 0)) {}
+
+ // Encodes an integer point cloud given by [begin,end) into buffer.
+ // |bit_length| gives the highest bit used for all coordinates.
+ template <class RandomAccessIteratorT>
+ bool EncodePoints(RandomAccessIteratorT begin, RandomAccessIteratorT end,
+ const uint32_t &bit_length, EncoderBuffer *buffer);
+
+ // Encodes an integer point cloud given by [begin,end) into buffer.
+ template <class RandomAccessIteratorT>
+ bool EncodePoints(RandomAccessIteratorT begin, RandomAccessIteratorT end,
+ EncoderBuffer *buffer) {
+ return EncodePoints(begin, end, 32, buffer);
+ }
+
+ const uint32_t dimension() const { return dimension_; }
+
+ private:
+ template <class RandomAccessIteratorT>
+ uint32_t GetAndEncodeAxis(RandomAccessIteratorT begin,
+ RandomAccessIteratorT end,
+ const VectorUint32 &old_base,
+ const VectorUint32 &levels, uint32_t last_axis);
+ template <class RandomAccessIteratorT>
+ void EncodeInternal(RandomAccessIteratorT begin, RandomAccessIteratorT end);
+
+ class Splitter {
+ public:
+ Splitter(uint32_t axis, uint32_t value) : axis_(axis), value_(value) {}
+ template <class PointT>
+ bool operator()(const PointT &a) const {
+ return a[axis_] < value_;
+ }
+
+ private:
+ const uint32_t axis_;
+ const uint32_t value_;
+ };
+
+ void EncodeNumber(int nbits, uint32_t value) {
+ numbers_encoder_.EncodeLeastSignificantBits32(nbits, value);
+ }
+
+ template <class RandomAccessIteratorT>
+ struct EncodingStatus {
+ EncodingStatus(RandomAccessIteratorT begin_, RandomAccessIteratorT end_,
+ uint32_t last_axis_, uint32_t stack_pos_)
+ : begin(begin_),
+ end(end_),
+ last_axis(last_axis_),
+ stack_pos(stack_pos_) {
+ num_remaining_points = static_cast<uint32_t>(end - begin);
+ }
+
+ RandomAccessIteratorT begin;
+ RandomAccessIteratorT end;
+ uint32_t last_axis;
+ uint32_t num_remaining_points;
+ uint32_t stack_pos; // used to get base and levels
+ };
+
+ uint32_t bit_length_;
+ uint32_t num_points_;
+ uint32_t dimension_;
+ NumbersEncoder numbers_encoder_;
+ RemainingBitsEncoder remaining_bits_encoder_;
+ AxisEncoder axis_encoder_;
+ HalfEncoder half_encoder_;
+ VectorUint32 deviations_;
+ VectorUint32 num_remaining_bits_;
+ VectorUint32 axes_;
+ std::vector<VectorUint32> base_stack_;
+ std::vector<VectorUint32> levels_stack_;
+};
+
+template <int compression_level_t>
+template <class RandomAccessIteratorT>
+bool DynamicIntegerPointsKdTreeEncoder<compression_level_t>::EncodePoints(
+ RandomAccessIteratorT begin, RandomAccessIteratorT end,
+ const uint32_t &bit_length, EncoderBuffer *buffer) {
+ bit_length_ = bit_length;
+ num_points_ = static_cast<uint32_t>(end - begin);
+
+ buffer->Encode(bit_length_);
+ buffer->Encode(num_points_);
+ if (num_points_ == 0) {
+ return true;
+ }
+
+ numbers_encoder_.StartEncoding();
+ remaining_bits_encoder_.StartEncoding();
+ axis_encoder_.StartEncoding();
+ half_encoder_.StartEncoding();
+
+ EncodeInternal(begin, end);
+
+ numbers_encoder_.EndEncoding(buffer);
+ remaining_bits_encoder_.EndEncoding(buffer);
+ axis_encoder_.EndEncoding(buffer);
+ half_encoder_.EndEncoding(buffer);
+
+ return true;
+}
+template <int compression_level_t>
+template <class RandomAccessIteratorT>
+uint32_t
+DynamicIntegerPointsKdTreeEncoder<compression_level_t>::GetAndEncodeAxis(
+ RandomAccessIteratorT begin, RandomAccessIteratorT end,
+ const VectorUint32 &old_base, const VectorUint32 &levels,
+ uint32_t last_axis) {
+ if (!Policy::select_axis) {
+ return DRACO_INCREMENT_MOD(last_axis, dimension_);
+ }
+
+ // For many points this function selects the axis that should be used
+ // for the split by keeping as many points as possible bundled.
+ // In the best case we do not split the point cloud at all.
+ // For lower number of points, we simply choose the axis that is refined the
+ // least so far.
+
+ DRACO_DCHECK_EQ(true, end - begin != 0);
+
+ uint32_t best_axis = 0;
+ if (end - begin < 64) {
+ for (uint32_t axis = 1; axis < dimension_; ++axis) {
+ if (levels[best_axis] > levels[axis]) {
+ best_axis = axis;
+ }
+ }
+ } else {
+ const uint32_t size = static_cast<uint32_t>(end - begin);
+ for (uint32_t i = 0; i < dimension_; i++) {
+ deviations_[i] = 0;
+ num_remaining_bits_[i] = bit_length_ - levels[i];
+ if (num_remaining_bits_[i] > 0) {
+ const uint32_t split =
+ old_base[i] + (1 << (num_remaining_bits_[i] - 1));
+ for (auto it = begin; it != end; ++it) {
+ deviations_[i] += ((*it)[i] < split);
+ }
+ deviations_[i] = std::max(size - deviations_[i], deviations_[i]);
+ }
+ }
+
+ uint32_t max_value = 0;
+ best_axis = 0;
+ for (uint32_t i = 0; i < dimension_; i++) {
+ // If axis can be subdivided.
+ if (num_remaining_bits_[i]) {
+ // Check if this is the better axis.
+ if (max_value < deviations_[i]) {
+ max_value = deviations_[i];
+ best_axis = i;
+ }
+ }
+ }
+ axis_encoder_.EncodeLeastSignificantBits32(4, best_axis);
+ }
+
+ return best_axis;
+}
+
+template <int compression_level_t>
+template <class RandomAccessIteratorT>
+void DynamicIntegerPointsKdTreeEncoder<compression_level_t>::EncodeInternal(
+ RandomAccessIteratorT begin, RandomAccessIteratorT end) {
+ typedef EncodingStatus<RandomAccessIteratorT> Status;
+
+ base_stack_[0] = VectorUint32(dimension_, 0);
+ levels_stack_[0] = VectorUint32(dimension_, 0);
+ Status init_status(begin, end, 0, 0);
+ std::stack<Status> status_stack;
+ status_stack.push(init_status);
+
+ // TODO(hemmer): use preallocated vector instead of stack.
+ while (!status_stack.empty()) {
+ Status status = status_stack.top();
+ status_stack.pop();
+
+ begin = status.begin;
+ end = status.end;
+ const uint32_t last_axis = status.last_axis;
+ const uint32_t stack_pos = status.stack_pos;
+ const VectorUint32 &old_base = base_stack_[stack_pos];
+ const VectorUint32 &levels = levels_stack_[stack_pos];
+
+ const uint32_t axis =
+ GetAndEncodeAxis(begin, end, old_base, levels, last_axis);
+ const uint32_t level = levels[axis];
+ const uint32_t num_remaining_points = static_cast<uint32_t>(end - begin);
+
+ // If this happens all axis are subdivided to the end.
+ if ((bit_length_ - level) == 0) {
+ continue;
+ }
+
+ // Fast encoding of remaining bits if number of points is 1 or 2.
+ // Doing this also for 2 gives a slight additional speed up.
+ if (num_remaining_points <= 2) {
+ // TODO(hemmer): axes_ not necessary, remove would change bitstream!
+ axes_[0] = axis;
+ for (uint32_t i = 1; i < dimension_; i++) {
+ axes_[i] = DRACO_INCREMENT_MOD(axes_[i - 1], dimension_);
+ }
+ for (uint32_t i = 0; i < num_remaining_points; ++i) {
+ const auto &p = *(begin + i);
+ for (uint32_t j = 0; j < dimension_; j++) {
+ const uint32_t num_remaining_bits = bit_length_ - levels[axes_[j]];
+ if (num_remaining_bits) {
+ remaining_bits_encoder_.EncodeLeastSignificantBits32(
+ num_remaining_bits, p[axes_[j]]);
+ }
+ }
+ }
+ continue;
+ }
+
+ const uint32_t num_remaining_bits = bit_length_ - level;
+ const uint32_t modifier = 1 << (num_remaining_bits - 1);
+ base_stack_[stack_pos + 1] = old_base; // copy
+ base_stack_[stack_pos + 1][axis] += modifier;
+ const VectorUint32 &new_base = base_stack_[stack_pos + 1];
+
+ const RandomAccessIteratorT split =
+ std::partition(begin, end, Splitter(axis, new_base[axis]));
+
+ DRACO_DCHECK_EQ(true, (end - begin) > 0);
+
+ // Encode number of points in first and second half.
+ const int required_bits = MostSignificantBit(num_remaining_points);
+
+ const uint32_t first_half = static_cast<uint32_t>(split - begin);
+ const uint32_t second_half = static_cast<uint32_t>(end - split);
+ const bool left = first_half < second_half;
+
+ if (first_half != second_half) {
+ half_encoder_.EncodeBit(left);
+ }
+
+ if (left) {
+ EncodeNumber(required_bits, num_remaining_points / 2 - first_half);
+ } else {
+ EncodeNumber(required_bits, num_remaining_points / 2 - second_half);
+ }
+
+ levels_stack_[stack_pos][axis] += 1;
+ levels_stack_[stack_pos + 1] = levels_stack_[stack_pos]; // copy
+ if (split != begin) {
+ status_stack.push(Status(begin, split, axis, stack_pos));
+ }
+ if (split != end) {
+ status_stack.push(Status(split, end, axis, stack_pos + 1));
+ }
+ }
+}
+extern template class DynamicIntegerPointsKdTreeEncoder<0>;
+extern template class DynamicIntegerPointsKdTreeEncoder<2>;
+extern template class DynamicIntegerPointsKdTreeEncoder<4>;
+extern template class DynamicIntegerPointsKdTreeEncoder<6>;
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_POINT_CLOUD_ALGORITHMS_DYNAMIC_INTEGER_POINTS_KD_TREE_ENCODER_H_
diff --git a/libs/assimp/contrib/draco/src/draco/compression/point_cloud/algorithms/float_points_tree_decoder.cc b/libs/assimp/contrib/draco/src/draco/compression/point_cloud/algorithms/float_points_tree_decoder.cc
new file mode 100644
index 0000000..dffaa4c
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/point_cloud/algorithms/float_points_tree_decoder.cc
@@ -0,0 +1,152 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/compression/point_cloud/algorithms/float_points_tree_decoder.h"
+
+#include <algorithm>
+
+#include "draco/compression/point_cloud/algorithms/dynamic_integer_points_kd_tree_decoder.h"
+#include "draco/compression/point_cloud/algorithms/quantize_points_3.h"
+#include "draco/core/math_utils.h"
+#include "draco/core/quantization_utils.h"
+
+namespace draco {
+
+struct Converter {
+ typedef std::vector<uint32_t> SourceType;
+ typedef Point3ui TargetType;
+ Point3ui operator()(const std::vector<uint32_t> &v) {
+ return Point3ui(v[0], v[1], v[2]);
+ }
+};
+
+// Output iterator that is used to decode values directly into the data buffer
+// of the modified PointAttribute.
+template <class OutputIterator, class Converter>
+class ConversionOutputIterator {
+ typedef ConversionOutputIterator<OutputIterator, Converter> Self;
+ typedef typename Converter::SourceType SourceType;
+ typedef typename Converter::TargetType TargetType;
+
+ public:
+ explicit ConversionOutputIterator(OutputIterator oit) : oit_(oit) {}
+
+ const Self &operator++() {
+ ++oit_;
+ return *this;
+ }
+ Self operator++(int) {
+ Self copy = *this;
+ ++oit_;
+ return copy;
+ }
+ Self &operator*() { return *this; }
+ const Self &operator=(const SourceType &source) {
+ *oit_ = Converter()(source);
+ return *this;
+ }
+
+ private:
+ OutputIterator oit_;
+};
+
+FloatPointsTreeDecoder::FloatPointsTreeDecoder()
+ : num_points_(0), compression_level_(0), num_points_from_header_(0) {
+ qinfo_.quantization_bits = 0;
+ qinfo_.range = 0;
+}
+
+bool FloatPointsTreeDecoder::DecodePointCloudKdTreeInternal(
+ DecoderBuffer *buffer, std::vector<Point3ui> *qpoints) {
+ if (!buffer->Decode(&qinfo_.quantization_bits)) {
+ return false;
+ }
+ if (qinfo_.quantization_bits > 31) {
+ return false;
+ }
+ if (!buffer->Decode(&qinfo_.range)) {
+ return false;
+ }
+ if (!buffer->Decode(&num_points_)) {
+ return false;
+ }
+ if (num_points_from_header_ > 0 && num_points_ != num_points_from_header_) {
+ return false;
+ }
+ if (!buffer->Decode(&compression_level_)) {
+ return false;
+ }
+
+ // Only allow compression level in [0..6].
+ if (6 < compression_level_) {
+ DRACO_LOGE("FloatPointsTreeDecoder: compression level %i not supported.\n",
+ compression_level_);
+ return false;
+ }
+
+ std::back_insert_iterator<std::vector<Point3ui>> oit_qpoints =
+ std::back_inserter(*qpoints);
+ ConversionOutputIterator<std::back_insert_iterator<std::vector<Point3ui>>,
+ Converter>
+ oit(oit_qpoints);
+ if (num_points_ > 0) {
+ qpoints->reserve(num_points_);
+ switch (compression_level_) {
+ case 0: {
+ DynamicIntegerPointsKdTreeDecoder<0> qpoints_decoder(3);
+ qpoints_decoder.DecodePoints(buffer, oit);
+ break;
+ }
+ case 1: {
+ DynamicIntegerPointsKdTreeDecoder<1> qpoints_decoder(3);
+ qpoints_decoder.DecodePoints(buffer, oit);
+ break;
+ }
+ case 2: {
+ DynamicIntegerPointsKdTreeDecoder<2> qpoints_decoder(3);
+ qpoints_decoder.DecodePoints(buffer, oit);
+ break;
+ }
+ case 3: {
+ DynamicIntegerPointsKdTreeDecoder<3> qpoints_decoder(3);
+ qpoints_decoder.DecodePoints(buffer, oit);
+ break;
+ }
+ case 4: {
+ DynamicIntegerPointsKdTreeDecoder<4> qpoints_decoder(3);
+ qpoints_decoder.DecodePoints(buffer, oit);
+ break;
+ }
+ case 5: {
+ DynamicIntegerPointsKdTreeDecoder<5> qpoints_decoder(3);
+ qpoints_decoder.DecodePoints(buffer, oit);
+ break;
+ }
+ case 6: {
+ DynamicIntegerPointsKdTreeDecoder<6> qpoints_decoder(3);
+ qpoints_decoder.DecodePoints(buffer, oit);
+ break;
+ }
+ default:
+ return false;
+ }
+ }
+
+ if (qpoints->size() != num_points_) {
+ return false;
+ }
+ return true;
+}
+
+} // namespace draco
diff --git a/libs/assimp/contrib/draco/src/draco/compression/point_cloud/algorithms/float_points_tree_decoder.h b/libs/assimp/contrib/draco/src/draco/compression/point_cloud/algorithms/float_points_tree_decoder.h
new file mode 100644
index 0000000..4f09ed2
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/point_cloud/algorithms/float_points_tree_decoder.h
@@ -0,0 +1,141 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_COMPRESSION_POINT_CLOUD_ALGORITHMS_FLOAT_POINTS_TREE_DECODER_H_
+#define DRACO_COMPRESSION_POINT_CLOUD_ALGORITHMS_FLOAT_POINTS_TREE_DECODER_H_
+
+#include <memory>
+
+#include "draco/compression/config/compression_shared.h"
+#include "draco/compression/point_cloud/algorithms/point_cloud_compression_method.h"
+#include "draco/compression/point_cloud/algorithms/point_cloud_types.h"
+#include "draco/compression/point_cloud/algorithms/quantize_points_3.h"
+#include "draco/core/decoder_buffer.h"
+
+namespace draco {
+
+// Decodes a point cloud encoded by PointCloudTreeEncoder.
+class FloatPointsTreeDecoder {
+ public:
+ FloatPointsTreeDecoder();
+
+ // Decodes a point cloud from |buffer|.
+ template <class OutputIteratorT>
+ bool DecodePointCloud(DecoderBuffer *buffer, OutputIteratorT &out);
+
+#ifndef DRACO_OLD_GCC
+ template <class OutputIteratorT>
+ bool DecodePointCloud(DecoderBuffer *buffer, OutputIteratorT &&out);
+#endif // DRACO_OLD_GCC
+
+ // Initializes a DecoderBuffer from |data|, and calls function above.
+ template <class OutputIteratorT>
+ bool DecodePointCloud(const char *data, size_t data_size,
+ OutputIteratorT out) {
+ if (data == 0 || data_size <= 0) {
+ return false;
+ }
+
+ DecoderBuffer buffer;
+ buffer.Init(data, data_size);
+ buffer.set_bitstream_version(kDracoPointCloudBitstreamVersion);
+ return DecodePointCloud(&buffer, out);
+ }
+
+ uint32_t quantization_bits() const { return qinfo_.quantization_bits; }
+ uint32_t compression_level() const { return compression_level_; }
+ float range() const { return qinfo_.range; }
+ uint32_t num_points() const { return num_points_; }
+ uint32_t version() const { return version_; }
+ std::string identification_string() const {
+ if (method_ == KDTREE) {
+ return "FloatPointsTreeDecoder: IntegerPointsKDTreeDecoder";
+ } else {
+ return "FloatPointsTreeDecoder: Unsupported Method";
+ }
+ }
+
+ void set_num_points_from_header(uint32_t num_points) {
+ num_points_from_header_ = num_points;
+ }
+
+ private:
+ bool DecodePointCloudKdTreeInternal(DecoderBuffer *buffer,
+ std::vector<Point3ui> *qpoints);
+
+ static const uint32_t version_ = 3;
+ QuantizationInfo qinfo_;
+ PointCloudCompressionMethod method_;
+ uint32_t num_points_;
+ uint32_t compression_level_;
+
+ // Member variable to check if the number of points from the file header
+ // matches the number of points in the compression header. If
+ // |num_points_from_header_| is 0, do not perform the check. Defaults to 0.
+ uint32_t num_points_from_header_;
+};
+
+#ifndef DRACO_OLD_GCC
+// TODO(vytyaz): Reenable once USD migrates from GCC 4.8 to a higher version
+// that can disambiguate calls to overloaded methods taking rvalue reference.
+template <class OutputIteratorT>
+bool FloatPointsTreeDecoder::DecodePointCloud(DecoderBuffer *buffer,
+ OutputIteratorT &&out) {
+ OutputIteratorT local = std::forward<OutputIteratorT>(out);
+ return DecodePointCloud(buffer, local);
+}
+#endif // DRACO_OLD_GCC
+
+template <class OutputIteratorT>
+bool FloatPointsTreeDecoder::DecodePointCloud(DecoderBuffer *buffer,
+ OutputIteratorT &out) {
+ std::vector<Point3ui> qpoints;
+
+ uint32_t decoded_version;
+ if (!buffer->Decode(&decoded_version)) {
+ return false;
+ }
+
+ if (decoded_version == 3) {
+ int8_t method_number;
+ if (!buffer->Decode(&method_number)) {
+ return false;
+ }
+
+ method_ = static_cast<PointCloudCompressionMethod>(method_number);
+
+ if (method_ == KDTREE) {
+ if (!DecodePointCloudKdTreeInternal(buffer, &qpoints)) {
+ return false;
+ }
+ } else { // Unsupported method.
+ fprintf(stderr, "Method not supported. \n");
+ return false;
+ }
+ } else if (decoded_version == 2) { // Version 2 only uses KDTREE method.
+ if (!DecodePointCloudKdTreeInternal(buffer, &qpoints)) {
+ return false;
+ }
+ } else { // Unsupported version.
+ fprintf(stderr, "Version not supported. \n");
+ return false;
+ }
+
+ DequantizePoints3(qpoints.begin(), qpoints.end(), qinfo_, out);
+ return true;
+}
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_POINT_CLOUD_ALGORITHMS_FLOAT_POINTS_TREE_DECODER_H_
diff --git a/libs/assimp/contrib/draco/src/draco/compression/point_cloud/algorithms/float_points_tree_encoder.cc b/libs/assimp/contrib/draco/src/draco/compression/point_cloud/algorithms/float_points_tree_encoder.cc
new file mode 100644
index 0000000..317430f
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/point_cloud/algorithms/float_points_tree_encoder.cc
@@ -0,0 +1,94 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/compression/point_cloud/algorithms/float_points_tree_encoder.h"
+
+#include <algorithm>
+#include <cmath>
+
+#include "draco/compression/point_cloud/algorithms/dynamic_integer_points_kd_tree_encoder.h"
+#include "draco/core/math_utils.h"
+
+namespace draco {
+
+const uint32_t FloatPointsTreeEncoder::version_ = 3;
+
+FloatPointsTreeEncoder::FloatPointsTreeEncoder(
+ PointCloudCompressionMethod method)
+ : method_(method), num_points_(0), compression_level_(6) {
+ qinfo_.quantization_bits = 16;
+ qinfo_.range = 0;
+}
+
+FloatPointsTreeEncoder::FloatPointsTreeEncoder(
+ PointCloudCompressionMethod method, uint32_t quantization_bits,
+ uint32_t compression_level)
+ : method_(method), num_points_(0), compression_level_(compression_level) {
+ DRACO_DCHECK_LE(compression_level_, 6);
+ qinfo_.quantization_bits = quantization_bits;
+ qinfo_.range = 0;
+}
+
+bool FloatPointsTreeEncoder::EncodePointCloudKdTreeInternal(
+ std::vector<Point3ui> *qpoints) {
+ DRACO_DCHECK_LE(compression_level_, 6);
+ switch (compression_level_) {
+ case 0: {
+ DynamicIntegerPointsKdTreeEncoder<0> qpoints_encoder(3);
+ qpoints_encoder.EncodePoints(qpoints->begin(), qpoints->end(),
+ qinfo_.quantization_bits + 1, &buffer_);
+ break;
+ }
+ case 1: {
+ DynamicIntegerPointsKdTreeEncoder<1> qpoints_encoder(3);
+ qpoints_encoder.EncodePoints(qpoints->begin(), qpoints->end(),
+ qinfo_.quantization_bits + 1, &buffer_);
+ break;
+ }
+ case 2: {
+ DynamicIntegerPointsKdTreeEncoder<2> qpoints_encoder(3);
+ qpoints_encoder.EncodePoints(qpoints->begin(), qpoints->end(),
+ qinfo_.quantization_bits + 1, &buffer_);
+ break;
+ }
+ case 3: {
+ DynamicIntegerPointsKdTreeEncoder<3> qpoints_encoder(3);
+ qpoints_encoder.EncodePoints(qpoints->begin(), qpoints->end(),
+ qinfo_.quantization_bits + 1, &buffer_);
+ break;
+ }
+ case 4: {
+ DynamicIntegerPointsKdTreeEncoder<4> qpoints_encoder(3);
+ qpoints_encoder.EncodePoints(qpoints->begin(), qpoints->end(),
+ qinfo_.quantization_bits + 1, &buffer_);
+ break;
+ }
+ case 5: {
+ DynamicIntegerPointsKdTreeEncoder<5> qpoints_encoder(3);
+ qpoints_encoder.EncodePoints(qpoints->begin(), qpoints->end(),
+ qinfo_.quantization_bits + 1, &buffer_);
+ break;
+ }
+ default: {
+ DynamicIntegerPointsKdTreeEncoder<6> qpoints_encoder(3);
+ qpoints_encoder.EncodePoints(qpoints->begin(), qpoints->end(),
+ qinfo_.quantization_bits + 1, &buffer_);
+ break;
+ }
+ }
+
+ return true;
+}
+
+} // namespace draco
diff --git a/libs/assimp/contrib/draco/src/draco/compression/point_cloud/algorithms/float_points_tree_encoder.h b/libs/assimp/contrib/draco/src/draco/compression/point_cloud/algorithms/float_points_tree_encoder.h
new file mode 100644
index 0000000..26ba94f
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/point_cloud/algorithms/float_points_tree_encoder.h
@@ -0,0 +1,126 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_COMPRESSION_POINT_CLOUD_ALGORITHMS_FLOAT_POINTS_TREE_ENCODER_H_
+#define DRACO_COMPRESSION_POINT_CLOUD_ALGORITHMS_FLOAT_POINTS_TREE_ENCODER_H_
+
+#include <memory>
+#include <vector>
+
+#include "draco/compression/point_cloud/algorithms/point_cloud_compression_method.h"
+#include "draco/compression/point_cloud/algorithms/point_cloud_types.h"
+#include "draco/compression/point_cloud/algorithms/quantize_points_3.h"
+#include "draco/core/encoder_buffer.h"
+
+namespace draco {
+
+// This class encodes a given point cloud based on the point cloud compression
+// algorithm in:
+// Olivier Devillers and Pierre-Marie Gandoin
+// "Geometric compression for interactive transmission"
+//
+// In principle the algorithm keeps on splitting the point cloud in the middle
+// while alternating the axes. For 3D this results in an Octree like structure.
+// In each step we encode the number of points in the first half.
+// The algorithm uses quantization and does not preserve the order of points.
+//
+// However, the algorithm here differs from the original as follows:
+// The algorithm keeps on splitting the point cloud in the middle of the axis
+// that keeps the point cloud as clustered as possible, which gives a better
+// compression rate.
+// The number of points is encode by the deviation from the half of the points
+// in the smaller half of the two. This results in a better compression rate as
+// there are more leading zeros, which is then compressed better by the
+// arithmetic encoding.
+
+// TODO(hemmer): Remove class because it duplicates quantization code.
+class FloatPointsTreeEncoder {
+ public:
+ explicit FloatPointsTreeEncoder(PointCloudCompressionMethod method);
+ explicit FloatPointsTreeEncoder(PointCloudCompressionMethod method,
+ uint32_t quantization_bits,
+ uint32_t compression_level);
+
+ template <class InputIteratorT>
+ bool EncodePointCloud(InputIteratorT points_begin, InputIteratorT points_end);
+ EncoderBuffer *buffer() { return &buffer_; }
+
+ uint32_t version() const { return version_; }
+ uint32_t quantization_bits() const { return qinfo_.quantization_bits; }
+ uint32_t &quantization_bits() { return qinfo_.quantization_bits; }
+ uint32_t compression_level() const { return compression_level_; }
+ uint32_t &compression_level() { return compression_level_; }
+ float range() const { return qinfo_.range; }
+ uint32_t num_points() const { return num_points_; }
+ std::string identification_string() const {
+ if (method_ == KDTREE) {
+ return "FloatPointsTreeEncoder: IntegerPointsKDTreeEncoder";
+ } else {
+ return "FloatPointsTreeEncoder: Unsupported Method";
+ }
+ }
+
+ private:
+ void Clear() { buffer_.Clear(); }
+ bool EncodePointCloudKdTreeInternal(std::vector<Point3ui> *qpoints);
+
+ static const uint32_t version_;
+ QuantizationInfo qinfo_;
+ PointCloudCompressionMethod method_;
+ uint32_t num_points_;
+ EncoderBuffer buffer_;
+ uint32_t compression_level_;
+};
+
+template <class InputIteratorT>
+bool FloatPointsTreeEncoder::EncodePointCloud(InputIteratorT points_begin,
+ InputIteratorT points_end) {
+ Clear();
+
+ // Collect necessary data for encoding.
+ num_points_ = std::distance(points_begin, points_end);
+
+ // TODO(hemmer): Extend quantization tools to make this more automatic.
+ // Compute range of points for quantization
+ std::vector<Point3ui> qpoints;
+ qpoints.reserve(num_points_);
+ QuantizePoints3(points_begin, points_end, &qinfo_,
+ std::back_inserter(qpoints));
+
+ // Encode header.
+ buffer()->Encode(version_);
+ buffer()->Encode(static_cast<int8_t>(method_));
+ buffer()->Encode(qinfo_.quantization_bits);
+ buffer()->Encode(qinfo_.range);
+ buffer()->Encode(num_points_);
+
+ if (method_ == KDTREE) {
+ buffer()->Encode(compression_level_);
+ }
+
+ if (num_points_ == 0) {
+ return true;
+ }
+
+ if (method_ == KDTREE) {
+ return EncodePointCloudKdTreeInternal(&qpoints);
+ } else { // Unsupported method.
+ fprintf(stderr, "Method not supported. \n");
+ return false;
+ }
+}
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_POINT_CLOUD_ALGORITHMS_FLOAT_POINTS_TREE_ENCODER_H_
diff --git a/libs/assimp/contrib/draco/src/draco/compression/point_cloud/algorithms/integer_points_kd_tree_decoder.cc b/libs/assimp/contrib/draco/src/draco/compression/point_cloud/algorithms/integer_points_kd_tree_decoder.cc
new file mode 100644
index 0000000..d0428a2
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/point_cloud/algorithms/integer_points_kd_tree_decoder.cc
@@ -0,0 +1,45 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/compression/point_cloud/algorithms/integer_points_kd_tree_decoder.h"
+
+#include "draco/compression/point_cloud/algorithms/point_cloud_types.h"
+
+namespace draco {
+
+template class IntegerPointsKdTreeDecoder<Point3ui, 0>;
+template class IntegerPointsKdTreeDecoder<Point3ui, 1>;
+template class IntegerPointsKdTreeDecoder<Point3ui, 2>;
+template class IntegerPointsKdTreeDecoder<Point3ui, 3>;
+template class IntegerPointsKdTreeDecoder<Point3ui, 4>;
+template class IntegerPointsKdTreeDecoder<Point3ui, 5>;
+template class IntegerPointsKdTreeDecoder<Point3ui, 6>;
+template class IntegerPointsKdTreeDecoder<Point3ui, 7>;
+template class IntegerPointsKdTreeDecoder<Point3ui, 8>;
+template class IntegerPointsKdTreeDecoder<Point3ui, 9>;
+template class IntegerPointsKdTreeDecoder<Point3ui, 10>;
+
+template class IntegerPointsKdTreeDecoder<Point4ui, 0>;
+template class IntegerPointsKdTreeDecoder<Point4ui, 1>;
+template class IntegerPointsKdTreeDecoder<Point4ui, 2>;
+template class IntegerPointsKdTreeDecoder<Point4ui, 3>;
+template class IntegerPointsKdTreeDecoder<Point4ui, 4>;
+template class IntegerPointsKdTreeDecoder<Point4ui, 5>;
+template class IntegerPointsKdTreeDecoder<Point4ui, 6>;
+template class IntegerPointsKdTreeDecoder<Point4ui, 7>;
+template class IntegerPointsKdTreeDecoder<Point4ui, 8>;
+template class IntegerPointsKdTreeDecoder<Point4ui, 9>;
+template class IntegerPointsKdTreeDecoder<Point4ui, 10>;
+
+} // namespace draco
diff --git a/libs/assimp/contrib/draco/src/draco/compression/point_cloud/algorithms/integer_points_kd_tree_decoder.h b/libs/assimp/contrib/draco/src/draco/compression/point_cloud/algorithms/integer_points_kd_tree_decoder.h
new file mode 100644
index 0000000..94e523c
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/point_cloud/algorithms/integer_points_kd_tree_decoder.h
@@ -0,0 +1,314 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// TODO(hemmer): Make this a wrapper using DynamicIntegerPointsKdTreeDecoder.
+//
+// See integer_points_kd_tree_encoder.h for documentation.
+
+#ifndef DRACO_COMPRESSION_POINT_CLOUD_ALGORITHMS_INTEGER_POINTS_KD_TREE_DECODER_H_
+#define DRACO_COMPRESSION_POINT_CLOUD_ALGORITHMS_INTEGER_POINTS_KD_TREE_DECODER_H_
+
+#include <array>
+#include <memory>
+
+#include "draco/compression/bit_coders/adaptive_rans_bit_decoder.h"
+#include "draco/compression/bit_coders/direct_bit_decoder.h"
+#include "draco/compression/bit_coders/folded_integer_bit_decoder.h"
+#include "draco/compression/bit_coders/rans_bit_decoder.h"
+#include "draco/compression/point_cloud/algorithms/point_cloud_types.h"
+#include "draco/compression/point_cloud/algorithms/queuing_policy.h"
+#include "draco/core/bit_utils.h"
+#include "draco/core/decoder_buffer.h"
+#include "draco/core/math_utils.h"
+
+namespace draco {
+
+template <int compression_level_t>
+struct IntegerPointsKdTreeDecoderCompressionPolicy
+ : public IntegerPointsKdTreeDecoderCompressionPolicy<compression_level_t -
+ 1> {};
+
+template <>
+struct IntegerPointsKdTreeDecoderCompressionPolicy<0> {
+ typedef DirectBitDecoder NumbersDecoder;
+ typedef DirectBitDecoder AxisDecoder;
+ typedef DirectBitDecoder HalfDecoder;
+ typedef DirectBitDecoder RemainingBitsDecoder;
+ static constexpr bool select_axis = false;
+
+ template <class T>
+ using QueuingStrategy = Stack<T>;
+};
+
+template <>
+struct IntegerPointsKdTreeDecoderCompressionPolicy<2>
+ : public IntegerPointsKdTreeDecoderCompressionPolicy<1> {
+ typedef RAnsBitDecoder NumbersDecoder;
+};
+
+template <>
+struct IntegerPointsKdTreeDecoderCompressionPolicy<4>
+ : public IntegerPointsKdTreeDecoderCompressionPolicy<3> {
+ typedef FoldedBit32Decoder<RAnsBitDecoder> NumbersDecoder;
+};
+
+template <>
+struct IntegerPointsKdTreeDecoderCompressionPolicy<6>
+ : public IntegerPointsKdTreeDecoderCompressionPolicy<5> {
+ static constexpr bool select_axis = true;
+};
+
+template <>
+struct IntegerPointsKdTreeDecoderCompressionPolicy<8>
+ : public IntegerPointsKdTreeDecoderCompressionPolicy<7> {
+ typedef FoldedBit32Decoder<AdaptiveRAnsBitDecoder> NumbersDecoder;
+ template <class T>
+ using QueuingStrategy = Queue<T>;
+};
+
+template <>
+struct IntegerPointsKdTreeDecoderCompressionPolicy<10>
+ : public IntegerPointsKdTreeDecoderCompressionPolicy<9> {
+ template <class T>
+ using QueuingStrategy = PriorityQueue<T>;
+};
+
+// Decodes a point cloud encoded by IntegerPointsKdTreeEncoder.
+// |PointDiT| is a type representing a point with uint32_t coordinates.
+// must provide construction from three uint32_t and operator[].
+template <class PointDiT, int compression_level_t>
+class IntegerPointsKdTreeDecoder {
+ typedef IntegerPointsKdTreeDecoderCompressionPolicy<compression_level_t>
+ Policy;
+
+ typedef typename Policy::NumbersDecoder NumbersDecoder;
+ typedef typename Policy::AxisDecoder AxisDecoder;
+ typedef typename Policy::HalfDecoder HalfDecoder;
+ typedef typename Policy::RemainingBitsDecoder RemainingBitsDecoder;
+
+ public:
+ IntegerPointsKdTreeDecoder() : bit_length_(0) {}
+
+ // Decodes a integer point cloud from |buffer|.
+ template <class OutputIteratorT>
+ bool DecodePoints(DecoderBuffer *buffer, OutputIteratorT oit);
+
+ private:
+ // For the sake of readability of code, we decided to make this exception
+ // from the naming scheme.
+ static constexpr int D = PointTraits<PointDiT>::Dimension();
+
+ uint32_t GetAxis(uint32_t num_remaining_points, const PointDiT &base,
+ std::array<uint32_t, D> levels, uint32_t last_axis);
+
+ template <class OutputIteratorT>
+ void DecodeInternal(uint32_t num_remaining_points, PointDiT base,
+ std::array<uint32_t, D> levels, uint32_t last_axis,
+ OutputIteratorT oit);
+
+ void DecodeNumber(int nbits, uint32_t *value) {
+ numbers_decoder_.DecodeLeastSignificantBits32(nbits, value);
+ }
+
+ struct DecodingStatus {
+ DecodingStatus(
+ uint32_t num_remaining_points_, const PointDiT &old_base_,
+ std::array<uint32_t, PointTraits<PointDiT>::Dimension()> levels_,
+ uint32_t last_axis_)
+ : num_remaining_points(num_remaining_points_),
+ old_base(old_base_),
+ levels(levels_),
+ last_axis(last_axis_) {}
+
+ uint32_t num_remaining_points;
+ PointDiT old_base;
+ std::array<uint32_t, D> levels;
+ uint32_t last_axis;
+ friend bool operator<(const DecodingStatus &l, const DecodingStatus &r) {
+ return l.num_remaining_points < r.num_remaining_points;
+ }
+ };
+
+ uint32_t bit_length_;
+ uint32_t num_points_;
+ NumbersDecoder numbers_decoder_;
+ RemainingBitsDecoder remaining_bits_decoder_;
+ AxisDecoder axis_decoder_;
+ HalfDecoder half_decoder_;
+};
+
+// Decodes a point cloud from |buffer|.
+template <class PointDiT, int compression_level_t>
+template <class OutputIteratorT>
+bool IntegerPointsKdTreeDecoder<PointDiT, compression_level_t>::DecodePoints(
+ DecoderBuffer *buffer, OutputIteratorT oit) {
+ if (!buffer->Decode(&bit_length_)) {
+ return false;
+ }
+ if (!buffer->Decode(&num_points_)) {
+ return false;
+ }
+ if (num_points_ == 0) {
+ return true;
+ }
+
+ if (!numbers_decoder_.StartDecoding(buffer)) {
+ return false;
+ }
+ if (!remaining_bits_decoder_.StartDecoding(buffer)) {
+ return false;
+ }
+ if (!axis_decoder_.StartDecoding(buffer)) {
+ return false;
+ }
+ if (!half_decoder_.StartDecoding(buffer)) {
+ return false;
+ }
+
+ DecodeInternal(num_points_, PointTraits<PointDiT>::Origin(),
+ PointTraits<PointDiT>::ZeroArray(), 0, oit);
+
+ numbers_decoder_.EndDecoding();
+ remaining_bits_decoder_.EndDecoding();
+ axis_decoder_.EndDecoding();
+ half_decoder_.EndDecoding();
+
+ return true;
+}
+
+template <class PointDiT, int compression_level_t>
+uint32_t IntegerPointsKdTreeDecoder<PointDiT, compression_level_t>::GetAxis(
+ uint32_t num_remaining_points, const PointDiT & /* base */,
+ std::array<uint32_t, D> levels, uint32_t last_axis) {
+ if (!Policy::select_axis) {
+ return DRACO_INCREMENT_MOD(last_axis, D);
+ }
+
+ uint32_t best_axis = 0;
+ if (num_remaining_points < 64) {
+ for (uint32_t axis = 1; axis < D; ++axis) {
+ if (levels[best_axis] > levels[axis]) {
+ best_axis = axis;
+ }
+ }
+ } else {
+ axis_decoder_.DecodeLeastSignificantBits32(4, &best_axis);
+ }
+
+ return best_axis;
+}
+
+template <class PointDiT, int compression_level_t>
+template <class OutputIteratorT>
+void IntegerPointsKdTreeDecoder<PointDiT, compression_level_t>::DecodeInternal(
+ uint32_t num_remaining_points, PointDiT old_base,
+ std::array<uint32_t, D> levels, uint32_t last_axis, OutputIteratorT oit) {
+ DecodingStatus init_status(num_remaining_points, old_base, levels, last_axis);
+ typename Policy::template QueuingStrategy<DecodingStatus> status_q;
+ status_q.push(init_status);
+
+ while (!status_q.empty()) {
+ const DecodingStatus status = status_q.front();
+ status_q.pop();
+
+ num_remaining_points = status.num_remaining_points;
+ old_base = status.old_base;
+ levels = status.levels;
+ last_axis = status.last_axis;
+
+ const uint32_t axis =
+ GetAxis(num_remaining_points, old_base, levels, last_axis);
+
+ const uint32_t level = levels[axis];
+
+ // All axes have been fully subdivided, just output points.
+ if ((bit_length_ - level) == 0) {
+ for (int i = 0; i < static_cast<int>(num_remaining_points); i++) {
+ *oit++ = old_base;
+ }
+ continue;
+ }
+
+ DRACO_DCHECK_EQ(true, num_remaining_points != 0);
+ if (num_remaining_points <= 2) {
+ std::array<uint32_t, D> axes;
+ axes[0] = axis;
+ for (int i = 1; i < D; i++) {
+ axes[i] = DRACO_INCREMENT_MOD(axes[i - 1], D);
+ }
+
+ std::array<uint32_t, D> num_remaining_bits;
+ for (int i = 0; i < D; i++) {
+ num_remaining_bits[i] = bit_length_ - levels[axes[i]];
+ }
+
+ for (uint32_t i = 0; i < num_remaining_points; ++i) {
+ // Get remaining bits, mind the carry if not starting at x.
+ PointDiT p = PointTraits<PointDiT>::Origin();
+ for (int j = 0; j < static_cast<int>(D); j++) {
+ if (num_remaining_bits[j]) {
+ remaining_bits_decoder_.DecodeLeastSignificantBits32(
+ num_remaining_bits[j], &p[axes[j]]);
+ }
+ p[axes[j]] = old_base[axes[j]] | p[axes[j]];
+ }
+ *oit++ = p;
+ }
+ continue;
+ }
+
+ const int num_remaining_bits = bit_length_ - level;
+ const uint32_t modifier = 1 << (num_remaining_bits - 1);
+ PointDiT new_base(old_base);
+ new_base[axis] += modifier;
+
+ const int incoming_bits = MostSignificantBit(num_remaining_points);
+
+ uint32_t number = 0;
+ DecodeNumber(incoming_bits, &number);
+
+ uint32_t first_half = num_remaining_points / 2 - number;
+ uint32_t second_half = num_remaining_points - first_half;
+
+ if (first_half != second_half) {
+ if (!half_decoder_.DecodeNextBit()) {
+ std::swap(first_half, second_half);
+ }
+ }
+
+ levels[axis] += 1;
+ if (first_half) {
+ status_q.push(DecodingStatus(first_half, old_base, levels, axis));
+ }
+ if (second_half) {
+ status_q.push(DecodingStatus(second_half, new_base, levels, axis));
+ }
+ }
+}
+
+extern template class IntegerPointsKdTreeDecoder<Point3ui, 0>;
+extern template class IntegerPointsKdTreeDecoder<Point3ui, 1>;
+extern template class IntegerPointsKdTreeDecoder<Point3ui, 2>;
+extern template class IntegerPointsKdTreeDecoder<Point3ui, 3>;
+extern template class IntegerPointsKdTreeDecoder<Point3ui, 4>;
+extern template class IntegerPointsKdTreeDecoder<Point3ui, 5>;
+extern template class IntegerPointsKdTreeDecoder<Point3ui, 6>;
+extern template class IntegerPointsKdTreeDecoder<Point3ui, 7>;
+extern template class IntegerPointsKdTreeDecoder<Point3ui, 8>;
+extern template class IntegerPointsKdTreeDecoder<Point3ui, 9>;
+extern template class IntegerPointsKdTreeDecoder<Point3ui, 10>;
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_POINT_CLOUD_ALGORITHMS_INTEGER_POINTS_KD_TREE_DECODER_H_
diff --git a/libs/assimp/contrib/draco/src/draco/compression/point_cloud/algorithms/integer_points_kd_tree_encoder.cc b/libs/assimp/contrib/draco/src/draco/compression/point_cloud/algorithms/integer_points_kd_tree_encoder.cc
new file mode 100644
index 0000000..ee10595
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/point_cloud/algorithms/integer_points_kd_tree_encoder.cc
@@ -0,0 +1,45 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/compression/point_cloud/algorithms/integer_points_kd_tree_encoder.h"
+
+#include "draco/compression/point_cloud/algorithms/point_cloud_types.h"
+
+namespace draco {
+
+template class IntegerPointsKdTreeEncoder<Point3ui, 0>;
+template class IntegerPointsKdTreeEncoder<Point3ui, 1>;
+template class IntegerPointsKdTreeEncoder<Point3ui, 2>;
+template class IntegerPointsKdTreeEncoder<Point3ui, 3>;
+template class IntegerPointsKdTreeEncoder<Point3ui, 4>;
+template class IntegerPointsKdTreeEncoder<Point3ui, 5>;
+template class IntegerPointsKdTreeEncoder<Point3ui, 6>;
+template class IntegerPointsKdTreeEncoder<Point3ui, 7>;
+template class IntegerPointsKdTreeEncoder<Point3ui, 8>;
+template class IntegerPointsKdTreeEncoder<Point3ui, 9>;
+template class IntegerPointsKdTreeEncoder<Point3ui, 10>;
+
+template class IntegerPointsKdTreeEncoder<Point4ui, 0>;
+template class IntegerPointsKdTreeEncoder<Point4ui, 1>;
+template class IntegerPointsKdTreeEncoder<Point4ui, 2>;
+template class IntegerPointsKdTreeEncoder<Point4ui, 3>;
+template class IntegerPointsKdTreeEncoder<Point4ui, 4>;
+template class IntegerPointsKdTreeEncoder<Point4ui, 5>;
+template class IntegerPointsKdTreeEncoder<Point4ui, 6>;
+template class IntegerPointsKdTreeEncoder<Point4ui, 7>;
+template class IntegerPointsKdTreeEncoder<Point4ui, 8>;
+template class IntegerPointsKdTreeEncoder<Point4ui, 9>;
+template class IntegerPointsKdTreeEncoder<Point4ui, 10>;
+
+} // namespace draco
diff --git a/libs/assimp/contrib/draco/src/draco/compression/point_cloud/algorithms/integer_points_kd_tree_encoder.h b/libs/assimp/contrib/draco/src/draco/compression/point_cloud/algorithms/integer_points_kd_tree_encoder.h
new file mode 100644
index 0000000..b881109
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/point_cloud/algorithms/integer_points_kd_tree_encoder.h
@@ -0,0 +1,404 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// TODO(hemmer): Make this a wrapper using DynamicIntegerPointsKdTreeEncoder.
+#ifndef DRACO_COMPRESSION_POINT_CLOUD_ALGORITHMS_INTEGER_POINTS_KD_TREE_ENCODER_H_
+#define DRACO_COMPRESSION_POINT_CLOUD_ALGORITHMS_INTEGER_POINTS_KD_TREE_ENCODER_H_
+
+#include <algorithm>
+#include <array>
+#include <memory>
+#include <vector>
+
+#include "draco/compression/bit_coders/adaptive_rans_bit_encoder.h"
+#include "draco/compression/bit_coders/direct_bit_encoder.h"
+#include "draco/compression/bit_coders/folded_integer_bit_encoder.h"
+#include "draco/compression/bit_coders/rans_bit_encoder.h"
+#include "draco/compression/point_cloud/algorithms/point_cloud_types.h"
+#include "draco/compression/point_cloud/algorithms/queuing_policy.h"
+#include "draco/core/bit_utils.h"
+#include "draco/core/encoder_buffer.h"
+#include "draco/core/math_utils.h"
+
+namespace draco {
+
+// This policy class provides several configurations for the encoder that allow
+// to trade speed vs compression rate. Level 0 is fastest while 10 is the best
+// compression rate. The decoder must select the same level.
+template <int compression_level_t>
+struct IntegerPointsKdTreeEncoderCompressionPolicy
+ : public IntegerPointsKdTreeEncoderCompressionPolicy<compression_level_t -
+ 1> {};
+
+template <>
+struct IntegerPointsKdTreeEncoderCompressionPolicy<0> {
+ typedef DirectBitEncoder NumbersEncoder;
+ typedef DirectBitEncoder AxisEncoder;
+ typedef DirectBitEncoder HalfEncoder;
+ typedef DirectBitEncoder RemainingBitsEncoder;
+ static constexpr bool select_axis = false;
+
+ template <class T>
+ using QueuingStrategy = Stack<T>;
+};
+
+template <>
+struct IntegerPointsKdTreeEncoderCompressionPolicy<2>
+ : public IntegerPointsKdTreeEncoderCompressionPolicy<1> {
+ typedef RAnsBitEncoder NumbersEncoder;
+};
+
+template <>
+struct IntegerPointsKdTreeEncoderCompressionPolicy<4>
+ : public IntegerPointsKdTreeEncoderCompressionPolicy<3> {
+ typedef FoldedBit32Encoder<RAnsBitEncoder> NumbersEncoder;
+};
+
+template <>
+struct IntegerPointsKdTreeEncoderCompressionPolicy<6>
+ : public IntegerPointsKdTreeEncoderCompressionPolicy<5> {
+ static constexpr bool select_axis = true;
+};
+
+template <>
+struct IntegerPointsKdTreeEncoderCompressionPolicy<8>
+ : public IntegerPointsKdTreeEncoderCompressionPolicy<7> {
+ typedef FoldedBit32Encoder<AdaptiveRAnsBitEncoder> NumbersEncoder;
+ template <class T>
+ using QueuingStrategy = Queue<T>;
+};
+
+template <>
+struct IntegerPointsKdTreeEncoderCompressionPolicy<10>
+ : public IntegerPointsKdTreeEncoderCompressionPolicy<9> {
+ template <class T>
+ using QueuingStrategy = PriorityQueue<T>;
+};
+
+// This class encodes a given integer point cloud based on the point cloud
+// compression algorithm in:
+// Olivier Devillers and Pierre-Marie Gandoin
+// "Geometric compression for interactive transmission"
+//
+// In principle the algorithm keeps on splitting the point cloud in the middle
+// while alternating the axes. In 3D this results in an Octree like structure.
+// In each step we encode the number of points in the first half.
+// The algorithm does not preserve the order of points.
+//
+// However, the algorithm here differs from the original as follows:
+// The algorithm keeps on splitting the point cloud in the middle of the axis
+// that keeps the point cloud as clustered as possible, which gives a better
+// compression rate.
+// The number of points is encode by the deviation from the half of the points
+// in the smaller half of the two. This results in a better compression rate as
+// there are more leading zeros, which is then compressed better by the
+// arithmetic encoding.
+//
+// |PointDiT| is a type representing a point with uint32_t coordinates.
+// must provide construction from three uint32_t and operator[].
+template <class PointDiT, int compression_level_t>
+class IntegerPointsKdTreeEncoder {
+ typedef IntegerPointsKdTreeEncoderCompressionPolicy<compression_level_t>
+ Policy;
+ typedef typename Policy::NumbersEncoder NumbersEncoder;
+ typedef typename Policy::AxisEncoder AxisEncoder;
+ typedef typename Policy::HalfEncoder HalfEncoder;
+ typedef typename Policy::RemainingBitsEncoder RemainingBitsEncoder;
+
+ public:
+ IntegerPointsKdTreeEncoder() : bit_length_(0) {}
+
+ // Encodes an integer point cloud given by [begin,end) into buffer.
+ // |bit_length| gives the highest bit used for all coordinates.
+ template <class RandomAccessIteratorT>
+ bool EncodePoints(RandomAccessIteratorT begin, RandomAccessIteratorT end,
+ const uint32_t &bit_length, EncoderBuffer *buffer);
+
+ // Encodes an integer point cloud given by [begin,end) into buffer.
+ template <class RandomAccessIteratorT>
+ bool EncodePoints(RandomAccessIteratorT begin, RandomAccessIteratorT end,
+ EncoderBuffer *buffer) {
+ return EncodePoints(begin, end, 32, buffer);
+ }
+
+ private:
+ // For the sack of readability of code, we decided to make this exception
+ // from the naming scheme.
+ static constexpr int D = PointTraits<PointDiT>::Dimension();
+ template <class RandomAccessIteratorT>
+ uint32_t GetAxis(RandomAccessIteratorT begin, RandomAccessIteratorT end,
+ const PointDiT &old_base, std::array<uint32_t, D> levels,
+ uint32_t last_axis);
+
+ template <class RandomAccessIteratorT>
+ void EncodeInternal(RandomAccessIteratorT begin, RandomAccessIteratorT end,
+ PointDiT old_base, std::array<uint32_t, D> levels,
+ uint32_t last_axis);
+
+ class Splitter {
+ public:
+ Splitter(int axis, uint32_t value) : axis_(axis), value_(value) {}
+ bool operator()(const PointDiT &a) { return a[axis_] < value_; }
+
+ private:
+ int axis_;
+ uint32_t value_;
+ };
+
+ void EncodeNumber(int nbits, uint32_t value) {
+ numbers_encoder_.EncodeLeastSignificantBits32(nbits, value);
+ }
+
+ template <class RandomAccessIteratorT>
+ struct EncodingStatus {
+ EncodingStatus(
+ RandomAccessIteratorT begin_, RandomAccessIteratorT end_,
+ const PointDiT &old_base_,
+ std::array<uint32_t, PointTraits<PointDiT>::Dimension()> levels_,
+ uint32_t last_axis_)
+ : begin(begin_),
+ end(end_),
+ old_base(old_base_),
+ levels(levels_),
+ last_axis(last_axis_) {
+ num_remaining_points = end - begin;
+ }
+
+ RandomAccessIteratorT begin;
+ RandomAccessIteratorT end;
+ PointDiT old_base;
+ std::array<uint32_t, D> levels;
+ uint32_t last_axis;
+ uint32_t num_remaining_points;
+ friend bool operator<(const EncodingStatus &l, const EncodingStatus &r) {
+ return l.num_remaining_points < r.num_remaining_points;
+ }
+ };
+
+ uint32_t bit_length_;
+ uint32_t num_points_;
+ NumbersEncoder numbers_encoder_;
+ RemainingBitsEncoder remaining_bits_encoder_;
+ AxisEncoder axis_encoder_;
+ HalfEncoder half_encoder_;
+};
+
+template <class PointDiT, int compression_level_t>
+template <class RandomAccessIteratorT>
+bool IntegerPointsKdTreeEncoder<PointDiT, compression_level_t>::EncodePoints(
+ RandomAccessIteratorT begin, RandomAccessIteratorT end,
+ const uint32_t &bit_length, EncoderBuffer *buffer) {
+ bit_length_ = bit_length;
+ num_points_ = end - begin;
+
+ buffer->Encode(bit_length_);
+ buffer->Encode(num_points_);
+ if (num_points_ == 0) {
+ return true;
+ }
+
+ numbers_encoder_.StartEncoding();
+ remaining_bits_encoder_.StartEncoding();
+ axis_encoder_.StartEncoding();
+ half_encoder_.StartEncoding();
+
+ EncodeInternal(begin, end, PointTraits<PointDiT>::Origin(),
+ PointTraits<PointDiT>::ZeroArray(), 0);
+
+ numbers_encoder_.EndEncoding(buffer);
+ remaining_bits_encoder_.EndEncoding(buffer);
+ axis_encoder_.EndEncoding(buffer);
+ half_encoder_.EndEncoding(buffer);
+
+ return true;
+}
+template <class PointDiT, int compression_level_t>
+template <class RandomAccessIteratorT>
+uint32_t IntegerPointsKdTreeEncoder<PointDiT, compression_level_t>::GetAxis(
+ RandomAccessIteratorT begin, RandomAccessIteratorT end,
+ const PointDiT &old_base, std::array<uint32_t, D> levels,
+ uint32_t last_axis) {
+ if (!Policy::select_axis) {
+ return DRACO_INCREMENT_MOD(last_axis, D);
+ }
+
+ // For many points this function selects the axis that should be used
+ // for the split by keeping as many points as possible bundled.
+ // In the best case we do not split the point cloud at all.
+ // For lower number of points, we simply choose the axis that is refined the
+ // least so far.
+
+ DRACO_DCHECK_EQ(true, end - begin != 0);
+
+ uint32_t best_axis = 0;
+ if (end - begin < 64) {
+ for (uint32_t axis = 1; axis < D; ++axis) {
+ if (levels[best_axis] > levels[axis]) {
+ best_axis = axis;
+ }
+ }
+ } else {
+ const uint32_t size = (end - begin);
+ std::array<uint32_t, D> num_remaining_bits =
+ PointTraits<PointDiT>::ZeroArray();
+ for (int i = 0; i < D; i++) {
+ num_remaining_bits[i] = bit_length_ - levels[i];
+ }
+ PointDiT split(old_base);
+
+ for (int i = 0; i < D; i++) {
+ if (num_remaining_bits[i]) {
+ split[i] += 1 << (num_remaining_bits[i] - 1);
+ }
+ }
+
+ std::array<uint32_t, D> deviations = PointTraits<PointDiT>::ZeroArray();
+ for (auto it = begin; it != end; ++it) {
+ for (int i = 0; i < D; i++) {
+ deviations[i] += ((*it)[i] < split[i]);
+ }
+ }
+ for (int i = 0; i < D; i++) {
+ deviations[i] = std::max(size - deviations[i], deviations[i]);
+ }
+
+ uint32_t max_value = 0;
+ best_axis = 0;
+ for (int i = 0; i < D; i++) {
+ // If axis can be subdivided.
+ if (num_remaining_bits[i]) {
+ // Check if this is the better axis.
+ if (max_value < deviations[i]) {
+ max_value = deviations[i];
+ best_axis = i;
+ }
+ }
+ }
+ axis_encoder_.EncodeLeastSignificantBits32(4, best_axis);
+ }
+
+ return best_axis;
+}
+
+template <class PointDiT, int compression_level_t>
+template <class RandomAccessIteratorT>
+void IntegerPointsKdTreeEncoder<PointDiT, compression_level_t>::EncodeInternal(
+ RandomAccessIteratorT begin, RandomAccessIteratorT end, PointDiT old_base,
+ std::array<uint32_t, D> levels, uint32_t last_axis) {
+ EncodingStatus<RandomAccessIteratorT> init_status(begin, end, old_base,
+ levels, last_axis);
+ typename Policy::template QueuingStrategy<
+ EncodingStatus<RandomAccessIteratorT>>
+ status_q;
+
+ status_q.push(init_status);
+
+ while (!status_q.empty()) {
+ EncodingStatus<RandomAccessIteratorT> status = status_q.front();
+ status_q.pop();
+
+ begin = status.begin;
+ end = status.end;
+ old_base = status.old_base;
+ levels = status.levels;
+ last_axis = status.last_axis;
+
+ const uint32_t axis = GetAxis(begin, end, old_base, levels, last_axis);
+ const uint32_t level = levels[axis];
+ const uint32_t num_remaining_points = end - begin;
+
+ // If this happens all axis are subdivided to the end.
+ if ((bit_length_ - level) == 0) {
+ continue;
+ }
+
+ // Fast encoding of remaining bits if number of points is 1.
+ // Doing this also for 2 gives a slight additional speed up.
+ if (num_remaining_points <= 2) {
+ std::array<uint32_t, D> axes;
+ axes[0] = axis;
+ for (int i = 1; i < D; i++) {
+ axes[i] = DRACO_INCREMENT_MOD(axes[i - 1], D);
+ }
+
+ std::array<uint32_t, D> num_remaining_bits;
+ for (int i = 0; i < D; i++) {
+ num_remaining_bits[i] = bit_length_ - levels[axes[i]];
+ }
+
+ for (uint32_t i = 0; i < num_remaining_points; ++i) {
+ const PointDiT &p = *(begin + i);
+ for (int j = 0; j < D; j++) {
+ if (num_remaining_bits[j]) {
+ remaining_bits_encoder_.EncodeLeastSignificantBits32(
+ num_remaining_bits[j], p[axes[j]]);
+ }
+ }
+ }
+ continue;
+ }
+
+ const uint32_t num_remaining_bits = bit_length_ - level;
+ const uint32_t modifier = 1 << (num_remaining_bits - 1);
+ PointDiT new_base(old_base);
+ new_base[axis] += modifier;
+ const RandomAccessIteratorT split =
+ std::partition(begin, end, Splitter(axis, new_base[axis]));
+
+ DRACO_DCHECK_EQ(true, (end - begin) > 0);
+
+ // Encode number of points in first and second half.
+ const int required_bits = MostSignificantBit(num_remaining_points);
+
+ const uint32_t first_half = split - begin;
+ const uint32_t second_half = end - split;
+ const bool left = first_half < second_half;
+
+ if (first_half != second_half) {
+ half_encoder_.EncodeBit(left);
+ }
+
+ if (left) {
+ EncodeNumber(required_bits, num_remaining_points / 2 - first_half);
+ } else {
+ EncodeNumber(required_bits, num_remaining_points / 2 - second_half);
+ }
+
+ levels[axis] += 1;
+ if (split != begin) {
+ status_q.push(EncodingStatus<RandomAccessIteratorT>(
+ begin, split, old_base, levels, axis));
+ }
+ if (split != end) {
+ status_q.push(EncodingStatus<RandomAccessIteratorT>(split, end, new_base,
+ levels, axis));
+ }
+ }
+}
+
+extern template class IntegerPointsKdTreeEncoder<Point3ui, 0>;
+extern template class IntegerPointsKdTreeEncoder<Point3ui, 1>;
+extern template class IntegerPointsKdTreeEncoder<Point3ui, 2>;
+extern template class IntegerPointsKdTreeEncoder<Point3ui, 3>;
+extern template class IntegerPointsKdTreeEncoder<Point3ui, 4>;
+extern template class IntegerPointsKdTreeEncoder<Point3ui, 5>;
+extern template class IntegerPointsKdTreeEncoder<Point3ui, 6>;
+extern template class IntegerPointsKdTreeEncoder<Point3ui, 7>;
+extern template class IntegerPointsKdTreeEncoder<Point3ui, 8>;
+extern template class IntegerPointsKdTreeEncoder<Point3ui, 9>;
+extern template class IntegerPointsKdTreeEncoder<Point3ui, 10>;
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_POINT_CLOUD_ALGORITHMS_INTEGER_POINTS_KD_TREE_ENCODER_H_
diff --git a/libs/assimp/contrib/draco/src/draco/compression/point_cloud/algorithms/point_cloud_compression_method.h b/libs/assimp/contrib/draco/src/draco/compression/point_cloud/algorithms/point_cloud_compression_method.h
new file mode 100644
index 0000000..9541c96
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/point_cloud/algorithms/point_cloud_compression_method.h
@@ -0,0 +1,34 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_COMPRESSION_POINT_CLOUD_ALGORITHMS_POINT_CLOUD_COMPRESSION_METHOD_H_
+#define DRACO_COMPRESSION_POINT_CLOUD_ALGORITHMS_POINT_CLOUD_COMPRESSION_METHOD_H_
+
+namespace draco {
+
+// Enum indicating the used compression method, used by Encoder and Decoder.
+enum PointCloudCompressionMethod {
+ RESERVED_POINT_CLOUD_METHOD_0 = 0, // Reserved for internal use.
+ // Generalized version of Encoding using the Octree method by Olivier
+ // Devillers to d dimensions.
+ // "Progressive lossless compression of arbitrary simplicial complexes"
+ // https://doi.org/10.1145/566570.566591
+ KDTREE = 1,
+ RESERVED_POINT_CLOUD_METHOD_2 = 2, // Reserved for internal use.
+ RESERVED_POINT_CLOUD_METHOD_3 = 0, // Reserved for internal use.
+};
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_POINT_CLOUD_ALGORITHMS_POINT_CLOUD_COMPRESSION_METHOD_H_
diff --git a/libs/assimp/contrib/draco/src/draco/compression/point_cloud/algorithms/point_cloud_types.h b/libs/assimp/contrib/draco/src/draco/compression/point_cloud/algorithms/point_cloud_types.h
new file mode 100644
index 0000000..893efbe
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/point_cloud/algorithms/point_cloud_types.h
@@ -0,0 +1,76 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_COMPRESSION_POINT_CLOUD_ALGORITHMS_POINT_CLOUD_TYPES_H_
+#define DRACO_COMPRESSION_POINT_CLOUD_ALGORITHMS_POINT_CLOUD_TYPES_H_
+
+#include <inttypes.h>
+
+#include <vector>
+
+#include "draco/core/vector_d.h"
+
+namespace draco {
+
+// Using Eigen as this is favored by project Cartographer.
+typedef Vector3f Point3f;
+typedef Vector4f Point4f;
+typedef Vector3ui Point3ui;
+typedef Vector4ui Point4ui;
+typedef Vector5ui Point5ui;
+typedef Vector6ui Point6ui;
+typedef Vector7ui Point7ui;
+
+typedef std::vector<Point3f> PointCloud3f;
+
+template <class PointDT>
+struct PointDLess;
+
+template <class CoeffT, int dimension_t>
+struct PointDLess<VectorD<CoeffT, dimension_t>> {
+ bool operator()(const VectorD<CoeffT, dimension_t> &a,
+ const VectorD<CoeffT, dimension_t> &b) const {
+ return a < b;
+ }
+};
+
+template <class PointDT>
+class PointTraits {};
+
+template <class CoordinateTypeT, int dimension_t>
+class PointTraits<VectorD<CoordinateTypeT, dimension_t>> {
+ public:
+ typedef VectorD<CoordinateTypeT, dimension_t> PointD;
+ typedef CoordinateTypeT CoordinateType;
+
+ static constexpr uint32_t Dimension() { return dimension_t; }
+ static PointD Origin() {
+ PointD origin;
+ for (uint32_t i = 0; i < dimension_t; i++) {
+ origin(i) = 0;
+ }
+ return origin;
+ }
+ static std::array<uint32_t, dimension_t> ZeroArray() {
+ std::array<uint32_t, dimension_t> zero;
+ for (uint32_t i = 0; i < dimension_t; i++) {
+ zero[i] = 0;
+ }
+ return zero;
+ }
+};
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_POINT_CLOUD_ALGORITHMS_POINT_CLOUD_TYPES_H_
diff --git a/libs/assimp/contrib/draco/src/draco/compression/point_cloud/algorithms/quantize_points_3.h b/libs/assimp/contrib/draco/src/draco/compression/point_cloud/algorithms/quantize_points_3.h
new file mode 100644
index 0000000..01943ad
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/point_cloud/algorithms/quantize_points_3.h
@@ -0,0 +1,84 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_COMPRESSION_POINT_CLOUD_ALGORITHMS_QUANTIZE_POINTS_3_H_
+#define DRACO_COMPRESSION_POINT_CLOUD_ALGORITHMS_QUANTIZE_POINTS_3_H_
+
+#include <inttypes.h>
+
+#include "draco/compression/point_cloud/algorithms/point_cloud_types.h"
+#include "draco/core/quantization_utils.h"
+
+namespace draco {
+
+// TODO(hemmer): Make this a stable bounding box.
+struct QuantizationInfo {
+ uint32_t quantization_bits;
+ float range;
+};
+
+template <class PointIterator, class OutputIterator>
+OutputIterator QuantizePoints3(const PointIterator &begin,
+ const PointIterator &end, QuantizationInfo *info,
+ OutputIterator oit) {
+ DRACO_DCHECK_GE(info->quantization_bits, 0);
+
+ float max_range = 0;
+ for (auto it = begin; it != end; ++it) {
+ max_range = std::max(std::fabs((*it)[0]), max_range);
+ max_range = std::max(std::fabs((*it)[1]), max_range);
+ max_range = std::max(std::fabs((*it)[2]), max_range);
+ }
+
+ const uint32_t max_quantized_value((1 << info->quantization_bits) - 1);
+ Quantizer quantize;
+ quantize.Init(max_range, max_quantized_value);
+ info->range = max_range;
+
+ Point3ui qpoint;
+ for (auto it = begin; it != end; ++it) {
+ // Quantize and all positive.
+ qpoint[0] = quantize((*it)[0]) + max_quantized_value;
+ qpoint[1] = quantize((*it)[1]) + max_quantized_value;
+ qpoint[2] = quantize((*it)[2]) + max_quantized_value;
+ *oit++ = (qpoint);
+ }
+
+ return oit;
+}
+
+template <class QPointIterator, class OutputIterator>
+void DequantizePoints3(const QPointIterator &begin, const QPointIterator &end,
+ const QuantizationInfo &info, OutputIterator &oit) {
+ DRACO_DCHECK_GE(info.quantization_bits, 0);
+ DRACO_DCHECK_GE(info.range, 0);
+
+ const uint32_t quantization_bits = info.quantization_bits;
+ const float range = info.range;
+ const uint32_t max_quantized_value((1 << quantization_bits) - 1);
+ Dequantizer dequantize;
+ dequantize.Init(range, max_quantized_value);
+
+ for (auto it = begin; it != end; ++it) {
+ const float x = dequantize((*it)[0] - max_quantized_value);
+ const float y = dequantize((*it)[1] - max_quantized_value);
+ const float z = dequantize((*it)[2] - max_quantized_value);
+ *oit = Point3f(x, y, z);
+ ++oit;
+ }
+}
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_POINT_CLOUD_ALGORITHMS_QUANTIZE_POINTS_3_H_
diff --git a/libs/assimp/contrib/draco/src/draco/compression/point_cloud/algorithms/queuing_policy.h b/libs/assimp/contrib/draco/src/draco/compression/point_cloud/algorithms/queuing_policy.h
new file mode 100644
index 0000000..2db0ea2
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/point_cloud/algorithms/queuing_policy.h
@@ -0,0 +1,75 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// File defining a coherent interface for different queuing strategies.
+
+#ifndef DRACO_COMPRESSION_POINT_CLOUD_ALGORITHMS_QUEUING_POLICY_H_
+#define DRACO_COMPRESSION_POINT_CLOUD_ALGORITHMS_QUEUING_POLICY_H_
+
+#include <queue>
+#include <stack>
+#include <utility>
+
+namespace draco {
+
+template <class T>
+class Queue {
+ public:
+ bool empty() const { return q_.empty(); }
+ typename std::queue<T>::size_type size() const { return q_.size(); }
+ void clear() { return q_.clear(); }
+ void push(const T &value) { q_.push(value); }
+ void push(T &&value) { q_.push(std::move(value)); }
+ void pop() { q_.pop(); }
+ typename std::queue<T>::const_reference front() const { return q_.front(); }
+
+ private:
+ std::queue<T> q_;
+};
+
+template <class T>
+class Stack {
+ public:
+ bool empty() const { return s_.empty(); }
+ typename std::stack<T>::size_type size() const { return s_.size(); }
+ void clear() { return s_.clear(); }
+ void push(const T &value) { s_.push(value); }
+ void push(T &&value) { s_.push(std::move(value)); }
+ void pop() { s_.pop(); }
+ typename std::stack<T>::const_reference front() const { return s_.top(); }
+
+ private:
+ std::stack<T> s_;
+};
+
+template <class T, class Compare = std::less<T> >
+class PriorityQueue {
+ typedef std::priority_queue<T, std::vector<T>, Compare> QType;
+
+ public:
+ bool empty() const { return s_.empty(); }
+ typename QType::size_type size() const { return s_.size(); }
+ void clear() { return s_.clear(); }
+ void push(const T &value) { s_.push(value); }
+ void push(T &&value) { s_.push(std::move(value)); }
+ void pop() { s_.pop(); }
+ typename QType::const_reference front() const { return s_.top(); }
+
+ private:
+ QType s_;
+};
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_POINT_CLOUD_ALGORITHMS_QUEUING_POLICY_H_
diff --git a/libs/assimp/contrib/draco/src/draco/compression/point_cloud/point_cloud_decoder.cc b/libs/assimp/contrib/draco/src/draco/compression/point_cloud/point_cloud_decoder.cc
new file mode 100644
index 0000000..85f7bc9
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/point_cloud/point_cloud_decoder.cc
@@ -0,0 +1,199 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/compression/point_cloud/point_cloud_decoder.h"
+
+#include "draco/metadata/metadata_decoder.h"
+
+namespace draco {
+
+PointCloudDecoder::PointCloudDecoder()
+ : point_cloud_(nullptr),
+ buffer_(nullptr),
+ version_major_(0),
+ version_minor_(0),
+ options_(nullptr) {}
+
+Status PointCloudDecoder::DecodeHeader(DecoderBuffer *buffer,
+ DracoHeader *out_header) {
+ constexpr char kIoErrorMsg[] = "Failed to parse Draco header.";
+ if (!buffer->Decode(out_header->draco_string, 5)) {
+ return Status(Status::IO_ERROR, kIoErrorMsg);
+ }
+ if (memcmp(out_header->draco_string, "DRACO", 5) != 0) {
+ return Status(Status::DRACO_ERROR, "Not a Draco file.");
+ }
+ if (!buffer->Decode(&(out_header->version_major))) {
+ return Status(Status::IO_ERROR, kIoErrorMsg);
+ }
+ if (!buffer->Decode(&(out_header->version_minor))) {
+ return Status(Status::IO_ERROR, kIoErrorMsg);
+ }
+ if (!buffer->Decode(&(out_header->encoder_type))) {
+ return Status(Status::IO_ERROR, kIoErrorMsg);
+ }
+ if (!buffer->Decode(&(out_header->encoder_method))) {
+ return Status(Status::IO_ERROR, kIoErrorMsg);
+ }
+ if (!buffer->Decode(&(out_header->flags))) {
+ return Status(Status::IO_ERROR, kIoErrorMsg);
+ }
+ return OkStatus();
+}
+
+Status PointCloudDecoder::DecodeMetadata() {
+ std::unique_ptr<GeometryMetadata> metadata =
+ std::unique_ptr<GeometryMetadata>(new GeometryMetadata());
+ MetadataDecoder metadata_decoder;
+ if (!metadata_decoder.DecodeGeometryMetadata(buffer_, metadata.get())) {
+ return Status(Status::DRACO_ERROR, "Failed to decode metadata.");
+ }
+ point_cloud_->AddMetadata(std::move(metadata));
+ return OkStatus();
+}
+
+Status PointCloudDecoder::Decode(const DecoderOptions &options,
+ DecoderBuffer *in_buffer,
+ PointCloud *out_point_cloud) {
+ options_ = &options;
+ buffer_ = in_buffer;
+ point_cloud_ = out_point_cloud;
+ DracoHeader header;
+ DRACO_RETURN_IF_ERROR(DecodeHeader(buffer_, &header))
+ // Sanity check that we are really using the right decoder (mostly for cases
+ // where the Decode method was called manually outside of our main API.
+ if (header.encoder_type != GetGeometryType()) {
+ return Status(Status::DRACO_ERROR,
+ "Using incompatible decoder for the input geometry.");
+ }
+ // TODO(ostava): We should check the method as well, but currently decoders
+ // don't expose the decoding method id.
+ version_major_ = header.version_major;
+ version_minor_ = header.version_minor;
+
+ const uint8_t max_supported_major_version =
+ header.encoder_type == POINT_CLOUD ? kDracoPointCloudBitstreamVersionMajor
+ : kDracoMeshBitstreamVersionMajor;
+ const uint8_t max_supported_minor_version =
+ header.encoder_type == POINT_CLOUD ? kDracoPointCloudBitstreamVersionMinor
+ : kDracoMeshBitstreamVersionMinor;
+
+ // Check for version compatibility.
+#ifdef DRACO_BACKWARDS_COMPATIBILITY_SUPPORTED
+ if (version_major_ < 1 || version_major_ > max_supported_major_version) {
+ return Status(Status::UNKNOWN_VERSION, "Unknown major version.");
+ }
+ if (version_major_ == max_supported_major_version &&
+ version_minor_ > max_supported_minor_version) {
+ return Status(Status::UNKNOWN_VERSION, "Unknown minor version.");
+ }
+#else
+ if (version_major_ != max_supported_major_version) {
+ return Status(Status::UNKNOWN_VERSION, "Unsupported major version.");
+ }
+ if (version_minor_ != max_supported_minor_version) {
+ return Status(Status::UNKNOWN_VERSION, "Unsupported minor version.");
+ }
+#endif
+ buffer_->set_bitstream_version(
+ DRACO_BITSTREAM_VERSION(version_major_, version_minor_));
+
+ if (bitstream_version() >= DRACO_BITSTREAM_VERSION(1, 3) &&
+ (header.flags & METADATA_FLAG_MASK)) {
+ DRACO_RETURN_IF_ERROR(DecodeMetadata())
+ }
+ if (!InitializeDecoder()) {
+ return Status(Status::DRACO_ERROR, "Failed to initialize the decoder.");
+ }
+ if (!DecodeGeometryData()) {
+ return Status(Status::DRACO_ERROR, "Failed to decode geometry data.");
+ }
+ if (!DecodePointAttributes()) {
+ return Status(Status::DRACO_ERROR, "Failed to decode point attributes.");
+ }
+ return OkStatus();
+}
+
+bool PointCloudDecoder::DecodePointAttributes() {
+ uint8_t num_attributes_decoders;
+ if (!buffer_->Decode(&num_attributes_decoders)) {
+ return false;
+ }
+ // Create all attribute decoders. This is implementation specific and the
+ // derived classes can use any data encoded in the
+ // PointCloudEncoder::EncodeAttributesEncoderIdentifier() call.
+ for (int i = 0; i < num_attributes_decoders; ++i) {
+ if (!CreateAttributesDecoder(i)) {
+ return false;
+ }
+ }
+
+ // Initialize all attributes decoders. No data is decoded here.
+ for (auto &att_dec : attributes_decoders_) {
+ if (!att_dec->Init(this, point_cloud_)) {
+ return false;
+ }
+ }
+
+ // Decode any data needed by the attribute decoders.
+ for (int i = 0; i < num_attributes_decoders; ++i) {
+ if (!attributes_decoders_[i]->DecodeAttributesDecoderData(buffer_)) {
+ return false;
+ }
+ }
+
+ // Create map between attribute and decoder ids.
+ for (int i = 0; i < num_attributes_decoders; ++i) {
+ const int32_t num_attributes = attributes_decoders_[i]->GetNumAttributes();
+ for (int j = 0; j < num_attributes; ++j) {
+ int att_id = attributes_decoders_[i]->GetAttributeId(j);
+ if (att_id >= attribute_to_decoder_map_.size()) {
+ attribute_to_decoder_map_.resize(att_id + 1);
+ }
+ attribute_to_decoder_map_[att_id] = i;
+ }
+ }
+
+ // Decode the actual attributes using the created attribute decoders.
+ if (!DecodeAllAttributes()) {
+ return false;
+ }
+
+ if (!OnAttributesDecoded()) {
+ return false;
+ }
+ return true;
+}
+
+bool PointCloudDecoder::DecodeAllAttributes() {
+ for (auto &att_dec : attributes_decoders_) {
+ if (!att_dec->DecodeAttributes(buffer_)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+const PointAttribute *PointCloudDecoder::GetPortableAttribute(
+ int32_t parent_att_id) {
+ if (parent_att_id < 0 || parent_att_id >= point_cloud_->num_attributes()) {
+ return nullptr;
+ }
+ const int32_t parent_att_decoder_id =
+ attribute_to_decoder_map_[parent_att_id];
+ return attributes_decoders_[parent_att_decoder_id]->GetPortableAttribute(
+ parent_att_id);
+}
+
+} // namespace draco
diff --git a/libs/assimp/contrib/draco/src/draco/compression/point_cloud/point_cloud_decoder.h b/libs/assimp/contrib/draco/src/draco/compression/point_cloud/point_cloud_decoder.h
new file mode 100644
index 0000000..4af7f5c
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/point_cloud/point_cloud_decoder.h
@@ -0,0 +1,118 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_COMPRESSION_POINT_CLOUD_POINT_CLOUD_DECODER_H_
+#define DRACO_COMPRESSION_POINT_CLOUD_POINT_CLOUD_DECODER_H_
+
+#include "draco/compression/attributes/attributes_decoder_interface.h"
+#include "draco/compression/config/compression_shared.h"
+#include "draco/compression/config/decoder_options.h"
+#include "draco/core/status.h"
+#include "draco/point_cloud/point_cloud.h"
+
+namespace draco {
+
+// Abstract base class for all point cloud and mesh decoders. It provides a
+// basic functionality that is shared between different decoders.
+class PointCloudDecoder {
+ public:
+ PointCloudDecoder();
+ virtual ~PointCloudDecoder() = default;
+
+ virtual EncodedGeometryType GetGeometryType() const { return POINT_CLOUD; }
+
+ // Decodes a Draco header int other provided |out_header|.
+ // Returns false on error.
+ static Status DecodeHeader(DecoderBuffer *buffer, DracoHeader *out_header);
+
+ // The main entry point for point cloud decoding.
+ Status Decode(const DecoderOptions &options, DecoderBuffer *in_buffer,
+ PointCloud *out_point_cloud);
+
+ bool SetAttributesDecoder(
+ int att_decoder_id, std::unique_ptr<AttributesDecoderInterface> decoder) {
+ if (att_decoder_id < 0) {
+ return false;
+ }
+ if (att_decoder_id >= static_cast<int>(attributes_decoders_.size())) {
+ attributes_decoders_.resize(att_decoder_id + 1);
+ }
+ attributes_decoders_[att_decoder_id] = std::move(decoder);
+ return true;
+ }
+
+ // Returns an attribute containing decoded data in their portable form that
+ // is guaranteed to be the same for both encoder and decoder. I.e., it returns
+ // an attribute before it was transformed back into its final form which may
+ // be slightly different (non-portable) across platforms. For example, for
+ // attributes encoded with quantization, this method returns an attribute
+ // that contains the quantized values (before the dequantization step).
+ const PointAttribute *GetPortableAttribute(int32_t point_attribute_id);
+
+ uint16_t bitstream_version() const {
+ return DRACO_BITSTREAM_VERSION(version_major_, version_minor_);
+ }
+
+ const AttributesDecoderInterface *attributes_decoder(int dec_id) {
+ return attributes_decoders_[dec_id].get();
+ }
+ int32_t num_attributes_decoders() const {
+ return static_cast<int32_t>(attributes_decoders_.size());
+ }
+
+ // Get a mutable pointer to the decoded point cloud. This is intended to be
+ // used mostly by other decoder subsystems.
+ PointCloud *point_cloud() { return point_cloud_; }
+ const PointCloud *point_cloud() const { return point_cloud_; }
+
+ DecoderBuffer *buffer() { return buffer_; }
+ const DecoderOptions *options() const { return options_; }
+
+ protected:
+ // Can be implemented by derived classes to perform any custom initialization
+ // of the decoder. Called in the Decode() method.
+ virtual bool InitializeDecoder() { return true; }
+
+ // Creates an attribute decoder.
+ virtual bool CreateAttributesDecoder(int32_t att_decoder_id) = 0;
+ virtual bool DecodeGeometryData() { return true; }
+ virtual bool DecodePointAttributes();
+
+ virtual bool DecodeAllAttributes();
+ virtual bool OnAttributesDecoded() { return true; }
+
+ Status DecodeMetadata();
+
+ private:
+ // Point cloud that is being filled in by the decoder.
+ PointCloud *point_cloud_;
+
+ std::vector<std::unique_ptr<AttributesDecoderInterface>> attributes_decoders_;
+
+ // Map between attribute id and decoder id.
+ std::vector<int32_t> attribute_to_decoder_map_;
+
+ // Input buffer holding the encoded data.
+ DecoderBuffer *buffer_;
+
+ // Bit-stream version of the encoder that encoded the input data.
+ uint8_t version_major_;
+ uint8_t version_minor_;
+
+ const DecoderOptions *options_;
+};
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_POINT_CLOUD_POINT_CLOUD_DECODER_H_
diff --git a/libs/assimp/contrib/draco/src/draco/compression/point_cloud/point_cloud_encoder.cc b/libs/assimp/contrib/draco/src/draco/compression/point_cloud/point_cloud_encoder.cc
new file mode 100644
index 0000000..a1fda8d
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/point_cloud/point_cloud_encoder.cc
@@ -0,0 +1,306 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/compression/point_cloud/point_cloud_encoder.h"
+
+#include "draco/metadata/metadata_encoder.h"
+
+namespace draco {
+
+PointCloudEncoder::PointCloudEncoder()
+ : point_cloud_(nullptr), buffer_(nullptr), num_encoded_points_(0) {}
+
+void PointCloudEncoder::SetPointCloud(const PointCloud &pc) {
+ point_cloud_ = &pc;
+}
+
+Status PointCloudEncoder::Encode(const EncoderOptions &options,
+ EncoderBuffer *out_buffer) {
+ options_ = &options;
+ buffer_ = out_buffer;
+
+ // Cleanup from previous runs.
+ attributes_encoders_.clear();
+ attribute_to_encoder_map_.clear();
+ attributes_encoder_ids_order_.clear();
+
+ if (!point_cloud_) {
+ return Status(Status::DRACO_ERROR, "Invalid input geometry.");
+ }
+ DRACO_RETURN_IF_ERROR(EncodeHeader())
+ DRACO_RETURN_IF_ERROR(EncodeMetadata())
+ if (!InitializeEncoder()) {
+ return Status(Status::DRACO_ERROR, "Failed to initialize encoder.");
+ }
+ if (!EncodeEncoderData()) {
+ return Status(Status::DRACO_ERROR, "Failed to encode internal data.");
+ }
+ DRACO_RETURN_IF_ERROR(EncodeGeometryData());
+ if (!EncodePointAttributes()) {
+ return Status(Status::DRACO_ERROR, "Failed to encode point attributes.");
+ }
+ if (options.GetGlobalBool("store_number_of_encoded_points", false)) {
+ ComputeNumberOfEncodedPoints();
+ }
+ return OkStatus();
+}
+
+Status PointCloudEncoder::EncodeHeader() {
+ // Encode the header according to our v1 specification.
+ // Five bytes for Draco format.
+ buffer_->Encode("DRACO", 5);
+ // Version (major, minor).
+ const uint8_t encoder_type = GetGeometryType();
+ uint8_t version_major, version_minor;
+ version_major = encoder_type == POINT_CLOUD
+ ? kDracoPointCloudBitstreamVersionMajor
+ : kDracoMeshBitstreamVersionMajor;
+ version_minor = encoder_type == POINT_CLOUD
+ ? kDracoPointCloudBitstreamVersionMinor
+ : kDracoMeshBitstreamVersionMinor;
+
+ buffer_->Encode(version_major);
+ buffer_->Encode(version_minor);
+ // Type of the encoder (point cloud, mesh, ...).
+ buffer_->Encode(encoder_type);
+ // Unique identifier for the selected encoding method (edgebreaker, etc...).
+ buffer_->Encode(GetEncodingMethod());
+ // Reserved for flags.
+ uint16_t flags = 0;
+ // First bit of |flags| is reserved for metadata.
+ if (point_cloud_->GetMetadata()) {
+ flags |= METADATA_FLAG_MASK;
+ }
+ buffer_->Encode(flags);
+ return OkStatus();
+}
+
+Status PointCloudEncoder::EncodeMetadata() {
+ if (!point_cloud_->GetMetadata()) {
+ return OkStatus();
+ }
+ MetadataEncoder metadata_encoder;
+ if (!metadata_encoder.EncodeGeometryMetadata(buffer_,
+ point_cloud_->GetMetadata())) {
+ return Status(Status::DRACO_ERROR, "Failed to encode metadata.");
+ }
+ return OkStatus();
+}
+
+bool PointCloudEncoder::EncodePointAttributes() {
+ if (!GenerateAttributesEncoders()) {
+ return false;
+ }
+
+ // Encode the number of attribute encoders.
+ buffer_->Encode(static_cast<uint8_t>(attributes_encoders_.size()));
+
+ // Initialize all the encoders (this is used for example to init attribute
+ // dependencies, no data is encoded in this step).
+ for (auto &att_enc : attributes_encoders_) {
+ if (!att_enc->Init(this, point_cloud_)) {
+ return false;
+ }
+ }
+
+ // Rearrange attributes to respect dependencies between individual attributes.
+ if (!RearrangeAttributesEncoders()) {
+ return false;
+ }
+
+ // Encode any data that is necessary to create the corresponding attribute
+ // decoder.
+ for (int att_encoder_id : attributes_encoder_ids_order_) {
+ if (!EncodeAttributesEncoderIdentifier(att_encoder_id)) {
+ return false;
+ }
+ }
+
+ // Also encode any attribute encoder data (such as the info about encoded
+ // attributes).
+ for (int att_encoder_id : attributes_encoder_ids_order_) {
+ if (!attributes_encoders_[att_encoder_id]->EncodeAttributesEncoderData(
+ buffer_)) {
+ return false;
+ }
+ }
+
+ // Lastly encode all the attributes using the provided attribute encoders.
+ if (!EncodeAllAttributes()) {
+ return false;
+ }
+ return true;
+}
+
+bool PointCloudEncoder::GenerateAttributesEncoders() {
+ for (int i = 0; i < point_cloud_->num_attributes(); ++i) {
+ if (!GenerateAttributesEncoder(i)) {
+ return false;
+ }
+ }
+ attribute_to_encoder_map_.resize(point_cloud_->num_attributes());
+ for (uint32_t i = 0; i < attributes_encoders_.size(); ++i) {
+ for (uint32_t j = 0; j < attributes_encoders_[i]->num_attributes(); ++j) {
+ attribute_to_encoder_map_[attributes_encoders_[i]->GetAttributeId(j)] = i;
+ }
+ }
+ return true;
+}
+
+bool PointCloudEncoder::EncodeAllAttributes() {
+ for (int att_encoder_id : attributes_encoder_ids_order_) {
+ if (!attributes_encoders_[att_encoder_id]->EncodeAttributes(buffer_)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+bool PointCloudEncoder::MarkParentAttribute(int32_t parent_att_id) {
+ if (parent_att_id < 0 || parent_att_id >= point_cloud_->num_attributes()) {
+ return false;
+ }
+ const int32_t parent_att_encoder_id =
+ attribute_to_encoder_map_[parent_att_id];
+ if (!attributes_encoders_[parent_att_encoder_id]->MarkParentAttribute(
+ parent_att_id)) {
+ return false;
+ }
+ return true;
+}
+
+const PointAttribute *PointCloudEncoder::GetPortableAttribute(
+ int32_t parent_att_id) {
+ if (parent_att_id < 0 || parent_att_id >= point_cloud_->num_attributes()) {
+ return nullptr;
+ }
+ const int32_t parent_att_encoder_id =
+ attribute_to_encoder_map_[parent_att_id];
+ return attributes_encoders_[parent_att_encoder_id]->GetPortableAttribute(
+ parent_att_id);
+}
+
+bool PointCloudEncoder::RearrangeAttributesEncoders() {
+ // Find the encoding order of the attribute encoders that is determined by
+ // the parent dependencies between individual encoders. Instead of traversing
+ // a graph we encode the attributes in multiple iterations where encoding of
+ // attributes that depend on other attributes may get postponed until the
+ // parent attributes are processed.
+ // This is simpler to implement than graph traversal and it automatically
+ // detects any cycles in the dependency graph.
+ // TODO(ostava): Current implementation needs to encode all attributes of a
+ // single encoder to be encoded in a single "chunk", therefore we need to sort
+ // attribute encoders before we sort individual attributes. This requirement
+ // can be lifted for encoders that can encode individual attributes separately
+ // but it will require changes in the current API.
+ attributes_encoder_ids_order_.resize(attributes_encoders_.size());
+ std::vector<bool> is_encoder_processed(attributes_encoders_.size(), false);
+ uint32_t num_processed_encoders = 0;
+ while (num_processed_encoders < attributes_encoders_.size()) {
+ // Flagged when any of the encoder get processed.
+ bool encoder_processed = false;
+ for (uint32_t i = 0; i < attributes_encoders_.size(); ++i) {
+ if (is_encoder_processed[i]) {
+ continue; // Encoder already processed.
+ }
+ // Check if all parent encoders are already processed.
+ bool can_be_processed = true;
+ for (uint32_t p = 0; p < attributes_encoders_[i]->num_attributes(); ++p) {
+ const int32_t att_id = attributes_encoders_[i]->GetAttributeId(p);
+ for (int ap = 0;
+ ap < attributes_encoders_[i]->NumParentAttributes(att_id); ++ap) {
+ const uint32_t parent_att_id =
+ attributes_encoders_[i]->GetParentAttributeId(att_id, ap);
+ const int32_t parent_encoder_id =
+ attribute_to_encoder_map_[parent_att_id];
+ if (parent_att_id != i && !is_encoder_processed[parent_encoder_id]) {
+ can_be_processed = false;
+ break;
+ }
+ }
+ }
+ if (!can_be_processed) {
+ continue; // Try to process the encoder in the next iteration.
+ }
+ // Encoder can be processed. Update the encoding order.
+ attributes_encoder_ids_order_[num_processed_encoders++] = i;
+ is_encoder_processed[i] = true;
+ encoder_processed = true;
+ }
+ if (!encoder_processed &&
+ num_processed_encoders < attributes_encoders_.size()) {
+ // No encoder was processed but there are still some remaining unprocessed
+ // encoders.
+ return false;
+ }
+ }
+
+ // Now for every encoder, reorder the attributes to satisfy their
+ // dependencies (an attribute may still depend on other attributes within an
+ // encoder).
+ std::vector<int32_t> attribute_encoding_order;
+ std::vector<bool> is_attribute_processed(point_cloud_->num_attributes(),
+ false);
+ int num_processed_attributes;
+ for (uint32_t ae_order = 0; ae_order < attributes_encoders_.size();
+ ++ae_order) {
+ const int ae = attributes_encoder_ids_order_[ae_order];
+ const int32_t num_encoder_attributes =
+ attributes_encoders_[ae]->num_attributes();
+ if (num_encoder_attributes < 2) {
+ continue; // No need to resolve dependencies for a single attribute.
+ }
+ num_processed_attributes = 0;
+ attribute_encoding_order.resize(num_encoder_attributes);
+ while (num_processed_attributes < num_encoder_attributes) {
+ // Flagged when any of the attributes get processed.
+ bool attribute_processed = false;
+ for (int i = 0; i < num_encoder_attributes; ++i) {
+ const int32_t att_id = attributes_encoders_[ae]->GetAttributeId(i);
+ if (is_attribute_processed[i]) {
+ continue; // Attribute already processed.
+ }
+ // Check if all parent attributes are already processed.
+ bool can_be_processed = true;
+ for (int p = 0;
+ p < attributes_encoders_[ae]->NumParentAttributes(att_id); ++p) {
+ const int32_t parent_att_id =
+ attributes_encoders_[ae]->GetParentAttributeId(att_id, p);
+ if (!is_attribute_processed[parent_att_id]) {
+ can_be_processed = false;
+ break;
+ }
+ }
+ if (!can_be_processed) {
+ continue; // Try to process the attribute in the next iteration.
+ }
+ // Attribute can be processed. Update the encoding order.
+ attribute_encoding_order[num_processed_attributes++] = i;
+ is_attribute_processed[i] = true;
+ attribute_processed = true;
+ }
+ if (!attribute_processed &&
+ num_processed_attributes < num_encoder_attributes) {
+ // No attribute was processed but there are still some remaining
+ // unprocessed attributes.
+ return false;
+ }
+ }
+ // Update the order of the attributes within the encoder.
+ attributes_encoders_[ae]->SetAttributeIds(attribute_encoding_order);
+ }
+ return true;
+}
+
+} // namespace draco
diff --git a/libs/assimp/contrib/draco/src/draco/compression/point_cloud/point_cloud_encoder.h b/libs/assimp/contrib/draco/src/draco/compression/point_cloud/point_cloud_encoder.h
new file mode 100644
index 0000000..8883f17
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/point_cloud/point_cloud_encoder.h
@@ -0,0 +1,158 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_COMPRESSION_POINT_CLOUD_POINT_CLOUD_ENCODER_H_
+#define DRACO_COMPRESSION_POINT_CLOUD_POINT_CLOUD_ENCODER_H_
+
+#include "draco/compression/attributes/attributes_encoder.h"
+#include "draco/compression/config/compression_shared.h"
+#include "draco/compression/config/encoder_options.h"
+#include "draco/core/encoder_buffer.h"
+#include "draco/core/status.h"
+#include "draco/point_cloud/point_cloud.h"
+
+namespace draco {
+
+// Abstract base class for all point cloud and mesh encoders. It provides a
+// basic functionality that's shared between different encoders.
+class PointCloudEncoder {
+ public:
+ PointCloudEncoder();
+ virtual ~PointCloudEncoder() = default;
+
+ // Sets the point cloud that is going be encoded. Must be called before the
+ // Encode() method.
+ void SetPointCloud(const PointCloud &pc);
+
+ // The main entry point that encodes provided point cloud.
+ Status Encode(const EncoderOptions &options, EncoderBuffer *out_buffer);
+
+ virtual EncodedGeometryType GetGeometryType() const { return POINT_CLOUD; }
+
+ // Returns the unique identifier of the encoding method (such as Edgebreaker
+ // for mesh compression).
+ virtual uint8_t GetEncodingMethod() const = 0;
+
+ // Returns the number of points that were encoded during the last Encode()
+ // function call. Valid only if "store_number_of_encoded_points" flag was set
+ // in the provided EncoderOptions.
+ size_t num_encoded_points() const { return num_encoded_points_; }
+
+ int num_attributes_encoders() const {
+ return static_cast<int>(attributes_encoders_.size());
+ }
+ AttributesEncoder *attributes_encoder(int i) {
+ return attributes_encoders_[i].get();
+ }
+
+ // Adds a new attribute encoder, returning its id.
+ int AddAttributesEncoder(std::unique_ptr<AttributesEncoder> att_enc) {
+ attributes_encoders_.push_back(std::move(att_enc));
+ return static_cast<int>(attributes_encoders_.size() - 1);
+ }
+
+ // Marks one attribute as a parent of another attribute. Must be called after
+ // all attribute encoders are created (usually in the
+ // AttributeEncoder::Init() method).
+ bool MarkParentAttribute(int32_t parent_att_id);
+
+ // Returns an attribute containing portable version of the attribute data that
+ // is guaranteed to be encoded losslessly. This attribute can be used safely
+ // as predictor for other attributes.
+ const PointAttribute *GetPortableAttribute(int32_t point_attribute_id);
+
+ EncoderBuffer *buffer() { return buffer_; }
+ const EncoderOptions *options() const { return options_; }
+ const PointCloud *point_cloud() const { return point_cloud_; }
+
+ protected:
+ // Can be implemented by derived classes to perform any custom initialization
+ // of the encoder. Called in the Encode() method.
+ virtual bool InitializeEncoder() { return true; }
+
+ // Should be used to encode any encoder-specific data.
+ virtual bool EncodeEncoderData() { return true; }
+
+ // Encodes any global geometry data (such as the number of points).
+ virtual Status EncodeGeometryData() { return OkStatus(); }
+
+ // encode all attribute values. The attribute encoders are sorted to resolve
+ // any attribute dependencies and all the encoded data is stored into the
+ // |buffer_|.
+ // Returns false if the encoding failed.
+ virtual bool EncodePointAttributes();
+
+ // Generate attribute encoders that are going to be used for encoding
+ // point attribute data. Calls GenerateAttributesEncoder() for every attribute
+ // of the encoded PointCloud.
+ virtual bool GenerateAttributesEncoders();
+
+ // Creates attribute encoder for a specific point attribute. This function
+ // needs to be implemented by the derived classes. The derived classes need
+ // to either 1. Create a new attribute encoder and add it using the
+ // AddAttributeEncoder method, or 2. add the attribute to an existing
+ // attribute encoder (using AttributesEncoder::AddAttributeId() method).
+ virtual bool GenerateAttributesEncoder(int32_t att_id) = 0;
+
+ // Encodes any data that is necessary to recreate a given attribute encoder.
+ // Note: this is called in order in which the attribute encoders are going to
+ // be encoded.
+ virtual bool EncodeAttributesEncoderIdentifier(int32_t /* att_encoder_id */) {
+ return true;
+ }
+
+ // Encodes all the attribute data using the created attribute encoders.
+ virtual bool EncodeAllAttributes();
+
+ // Computes and sets the num_encoded_points_ for the encoder.
+ virtual void ComputeNumberOfEncodedPoints() = 0;
+
+ void set_num_encoded_points(size_t num_points) {
+ num_encoded_points_ = num_points;
+ }
+
+ private:
+ // Encodes Draco header that is the same for all encoders.
+ Status EncodeHeader();
+
+ // Encode metadata.
+ Status EncodeMetadata();
+
+ // Rearranges attribute encoders and their attributes to reflect the
+ // underlying attribute dependencies. This ensures that the attributes are
+ // encoded in the correct order (parent attributes before their children).
+ bool RearrangeAttributesEncoders();
+
+ const PointCloud *point_cloud_;
+ std::vector<std::unique_ptr<AttributesEncoder>> attributes_encoders_;
+
+ // Map between attribute id and encoder id.
+ std::vector<int32_t> attribute_to_encoder_map_;
+
+ // Encoding order of individual attribute encoders (i.e., the order in which
+ // they are processed during encoding that may be different from the order
+ // in which they were created because of attribute dependencies.
+ std::vector<int32_t> attributes_encoder_ids_order_;
+
+ // This buffer holds the final encoded data.
+ EncoderBuffer *buffer_;
+
+ const EncoderOptions *options_;
+
+ size_t num_encoded_points_;
+};
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_POINT_CLOUD_POINT_CLOUD_ENCODER_H_
diff --git a/libs/assimp/contrib/draco/src/draco/compression/point_cloud/point_cloud_kd_tree_decoder.cc b/libs/assimp/contrib/draco/src/draco/compression/point_cloud/point_cloud_kd_tree_decoder.cc
new file mode 100644
index 0000000..2deebbc
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/point_cloud/point_cloud_kd_tree_decoder.cc
@@ -0,0 +1,40 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/compression/point_cloud/point_cloud_kd_tree_decoder.h"
+
+#include "draco/compression/attributes/kd_tree_attributes_decoder.h"
+
+namespace draco {
+
+bool PointCloudKdTreeDecoder::DecodeGeometryData() {
+ int32_t num_points;
+ if (!buffer()->Decode(&num_points)) {
+ return false;
+ }
+ if (num_points < 0) {
+ return false;
+ }
+ point_cloud()->set_num_points(num_points);
+ return true;
+}
+
+bool PointCloudKdTreeDecoder::CreateAttributesDecoder(int32_t att_decoder_id) {
+ // Always create the basic attribute decoder.
+ return SetAttributesDecoder(
+ att_decoder_id,
+ std::unique_ptr<AttributesDecoder>(new KdTreeAttributesDecoder()));
+}
+
+} // namespace draco
diff --git a/libs/assimp/contrib/draco/src/draco/compression/point_cloud/point_cloud_kd_tree_decoder.h b/libs/assimp/contrib/draco/src/draco/compression/point_cloud/point_cloud_kd_tree_decoder.h
new file mode 100644
index 0000000..6e192f2
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/point_cloud/point_cloud_kd_tree_decoder.h
@@ -0,0 +1,31 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_COMPRESSION_POINT_CLOUD_POINT_CLOUD_KD_TREE_DECODER_H_
+#define DRACO_COMPRESSION_POINT_CLOUD_POINT_CLOUD_KD_TREE_DECODER_H_
+
+#include "draco/compression/point_cloud/point_cloud_decoder.h"
+
+namespace draco {
+
+// Decodes PointCloud encoded with the PointCloudKdTreeEncoder.
+class PointCloudKdTreeDecoder : public PointCloudDecoder {
+ protected:
+ bool DecodeGeometryData() override;
+ bool CreateAttributesDecoder(int32_t att_decoder_id) override;
+};
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_POINT_CLOUD_POINT_CLOUD_KD_TREE_DECODER_H_
diff --git a/libs/assimp/contrib/draco/src/draco/compression/point_cloud/point_cloud_kd_tree_encoder.cc b/libs/assimp/contrib/draco/src/draco/compression/point_cloud/point_cloud_kd_tree_encoder.cc
new file mode 100644
index 0000000..92b6c84
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/point_cloud/point_cloud_kd_tree_encoder.cc
@@ -0,0 +1,43 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/compression/point_cloud/point_cloud_kd_tree_encoder.h"
+
+#include "draco/compression/attributes/kd_tree_attributes_encoder.h"
+
+namespace draco {
+
+Status PointCloudKdTreeEncoder::EncodeGeometryData() {
+ const int32_t num_points = point_cloud()->num_points();
+ buffer()->Encode(num_points);
+ return OkStatus();
+}
+
+bool PointCloudKdTreeEncoder::GenerateAttributesEncoder(int32_t att_id) {
+ if (num_attributes_encoders() == 0) {
+ // Create a new attribute encoder only for the first attribute.
+ AddAttributesEncoder(std::unique_ptr<AttributesEncoder>(
+ new KdTreeAttributesEncoder(att_id)));
+ return true;
+ }
+ // Add a new attribute to the attribute encoder.
+ attributes_encoder(0)->AddAttributeId(att_id);
+ return true;
+}
+
+void PointCloudKdTreeEncoder::ComputeNumberOfEncodedPoints() {
+ set_num_encoded_points(point_cloud()->num_points());
+}
+
+} // namespace draco
diff --git a/libs/assimp/contrib/draco/src/draco/compression/point_cloud/point_cloud_kd_tree_encoder.h b/libs/assimp/contrib/draco/src/draco/compression/point_cloud/point_cloud_kd_tree_encoder.h
new file mode 100644
index 0000000..6acbb94
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/point_cloud/point_cloud_kd_tree_encoder.h
@@ -0,0 +1,45 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_COMPRESSION_POINT_CLOUD_POINT_CLOUD_KD_TREE_ENCODER_H_
+#define DRACO_COMPRESSION_POINT_CLOUD_POINT_CLOUD_KD_TREE_ENCODER_H_
+
+#include "draco/compression/point_cloud/point_cloud_encoder.h"
+
+namespace draco {
+
+// Encodes a PointCloud using one of the available Kd-tree compression methods.
+// See FloatPointsKdTreeEncoder and DynamicIntegerPointsKdTreeEncoder for more
+// details. Currently, the input PointCloud must satisfy the following
+// requirements to use this encoder:
+// 1. PointCloud has only one attribute of type GeometryAttribute::POSITION.
+// 2. The position attribute has three components (x,y,z).
+// 3. The position values are stored as either DT_FLOAT32 or DT_UINT32.
+// 4. If the position values are stored as DT_FLOAT32, quantization needs to
+// be enabled for the position attribute.
+class PointCloudKdTreeEncoder : public PointCloudEncoder {
+ public:
+ uint8_t GetEncodingMethod() const override {
+ return POINT_CLOUD_KD_TREE_ENCODING;
+ }
+
+ protected:
+ Status EncodeGeometryData() override;
+ bool GenerateAttributesEncoder(int32_t att_id) override;
+ void ComputeNumberOfEncodedPoints() override;
+};
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_POINT_CLOUD_POINT_CLOUD_KD_TREE_ENCODER_H_
diff --git a/libs/assimp/contrib/draco/src/draco/compression/point_cloud/point_cloud_kd_tree_encoding_test.cc b/libs/assimp/contrib/draco/src/draco/compression/point_cloud/point_cloud_kd_tree_encoding_test.cc
new file mode 100644
index 0000000..2249bb0
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/point_cloud/point_cloud_kd_tree_encoding_test.cc
@@ -0,0 +1,458 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/compression/point_cloud/point_cloud_kd_tree_decoder.h"
+#include "draco/compression/point_cloud/point_cloud_kd_tree_encoder.h"
+#include "draco/core/draco_test_base.h"
+#include "draco/core/draco_test_utils.h"
+#include "draco/core/vector_d.h"
+#include "draco/io/obj_decoder.h"
+#include "draco/point_cloud/point_cloud_builder.h"
+
+namespace draco {
+
+class PointCloudKdTreeEncodingTest : public ::testing::Test {
+ protected:
+ void ComparePointClouds(const PointCloud &p0, const PointCloud &p1) const {
+ ASSERT_EQ(p0.num_points(), p1.num_points());
+ ASSERT_EQ(p0.num_attributes(), p1.num_attributes());
+ // Currently works only with one attribute.
+ ASSERT_EQ(p0.num_attributes(), p1.num_attributes());
+ for (auto index = 0; index < p0.num_attributes(); index += 1) {
+ ASSERT_EQ(p0.attribute(index)->num_components(),
+ p1.attribute(index)->num_components());
+ std::vector<double> points_0, points_1;
+ std::vector<double> att_entry_0(p0.attribute(index)->num_components());
+ std::vector<double> att_entry_1(p0.attribute(index)->num_components());
+ for (PointIndex i(0); i < p0.num_points(); ++i) {
+ p0.attribute(index)->ConvertValue(p0.attribute(index)->mapped_index(i),
+ &att_entry_0[0]);
+ p1.attribute(index)->ConvertValue(p1.attribute(index)->mapped_index(i),
+ &att_entry_1[0]);
+ for (int d = 0; d < p0.attribute(index)->num_components(); ++d) {
+ points_0.push_back(att_entry_0[d]);
+ points_1.push_back(att_entry_1[d]);
+ }
+ }
+ // To compare the point clouds we sort points components from both inputs
+ // separately, and then we compare all matching coordinates one by one.
+ // TODO(ostava): Note that this is not guaranteed to work for quantized
+ // point clouds because the order of points may actually change because
+ // of the quantization. The test should be make more robust to handle such
+ // case.
+ std::sort(points_0.begin(), points_0.end());
+ std::sort(points_1.begin(), points_1.end());
+ for (uint32_t i = 0; i < points_0.size(); ++i) {
+ ASSERT_LE(std::fabs(points_0[i] - points_1[i]), 1e-2);
+ }
+ }
+ }
+
+ void TestKdTreeEncoding(const PointCloud &pc) {
+ EncoderBuffer buffer;
+ PointCloudKdTreeEncoder encoder;
+ EncoderOptions options = EncoderOptions::CreateDefaultOptions();
+ options.SetGlobalInt("quantization_bits", 16);
+ for (int compression_level = 0; compression_level <= 6;
+ ++compression_level) {
+ options.SetSpeed(10 - compression_level, 10 - compression_level);
+ encoder.SetPointCloud(pc);
+ ASSERT_TRUE(encoder.Encode(options, &buffer).ok());
+
+ DecoderBuffer dec_buffer;
+ dec_buffer.Init(buffer.data(), buffer.size());
+ PointCloudKdTreeDecoder decoder;
+
+ std::unique_ptr<PointCloud> out_pc(new PointCloud());
+ DecoderOptions dec_options;
+ ASSERT_TRUE(decoder.Decode(dec_options, &dec_buffer, out_pc.get()).ok());
+
+ ComparePointClouds(pc, *out_pc);
+ }
+ }
+
+ void TestFloatEncoding(const std::string &file_name) {
+ std::unique_ptr<PointCloud> pc = ReadPointCloudFromTestFile(file_name);
+ ASSERT_NE(pc, nullptr);
+
+ TestKdTreeEncoding(*pc);
+ }
+};
+
+TEST_F(PointCloudKdTreeEncodingTest, TestFloatKdTreeEncoding) {
+ TestFloatEncoding("cube_subd.obj");
+}
+
+TEST_F(PointCloudKdTreeEncodingTest, TestIntKdTreeEncoding) {
+ constexpr int num_points = 120;
+ std::vector<std::array<uint32_t, 3>> points(num_points);
+ for (int i = 0; i < num_points; ++i) {
+ std::array<uint32_t, 3> pos;
+ // Generate some pseudo-random points.
+ pos[0] = 8 * ((i * 7) % 127);
+ pos[1] = 13 * ((i * 3) % 321);
+ pos[2] = 29 * ((i * 19) % 450);
+ points[i] = pos;
+ }
+
+ PointCloudBuilder builder;
+ builder.Start(num_points);
+ const int att_id =
+ builder.AddAttribute(GeometryAttribute::POSITION, 3, DT_UINT32);
+ for (PointIndex i(0); i < num_points; ++i) {
+ builder.SetAttributeValueForPoint(att_id, PointIndex(i),
+ &(points[i.value()])[0]);
+ }
+ std::unique_ptr<PointCloud> pc = builder.Finalize(false);
+ ASSERT_NE(pc, nullptr);
+
+ TestKdTreeEncoding(*pc);
+}
+
+// test higher dimensions with more attributes
+TEST_F(PointCloudKdTreeEncodingTest, TestIntKdTreeEncodingHigherDimension) {
+ constexpr int num_points = 120;
+ std::vector<std::array<uint32_t, 3>> points3(num_points);
+ for (int i = 0; i < num_points; ++i) {
+ std::array<uint32_t, 3> pos;
+ // Generate some pseudo-random points.
+ pos[0] = 8 * ((i * 7) % 127);
+ pos[1] = 13 * ((i * 3) % 321);
+ pos[2] = 29 * ((i * 19) % 450);
+ points3[i] = pos;
+ }
+ std::vector<std::array<uint32_t, 2>> points2(num_points);
+ for (int i = 0; i < num_points; ++i) {
+ std::array<uint32_t, 2> pos;
+ // Generate some pseudo-random points.
+ pos[0] = 8 * ((i * 7) % 127) + 1;
+ pos[1] = 13 * ((i * 3) % 321) + 1;
+ points2[i] = pos;
+ }
+ std::vector<std::array<uint32_t, 1>> points1(num_points);
+ for (int i = 0; i < num_points; ++i) {
+ std::array<uint32_t, 1> pos;
+ // Generate some pseudo-random points.
+ pos[0] = 8 * ((i * 7) % 127) + 11;
+ points1[i] = pos;
+ }
+
+ PointCloudBuilder builder;
+ builder.Start(num_points);
+ const int att_id3 =
+ builder.AddAttribute(GeometryAttribute::POSITION, 3, DT_UINT32);
+ for (PointIndex i(0); i < num_points; ++i) {
+ builder.SetAttributeValueForPoint(att_id3, PointIndex(i),
+ &(points3[i.value()])[0]);
+ }
+ const int att_id2 =
+ builder.AddAttribute(GeometryAttribute::POSITION, 2, DT_UINT32);
+ for (PointIndex i(0); i < num_points; ++i) {
+ builder.SetAttributeValueForPoint(att_id2, PointIndex(i),
+ &(points2[i.value()])[0]);
+ }
+ const int att_id1 =
+ builder.AddAttribute(GeometryAttribute::GENERIC, 1, DT_UINT32);
+ for (PointIndex i(0); i < num_points; ++i) {
+ builder.SetAttributeValueForPoint(att_id1, PointIndex(i),
+ &(points1[i.value()])[0]);
+ }
+
+ std::unique_ptr<PointCloud> pc = builder.Finalize(false);
+ ASSERT_NE(pc, nullptr);
+
+ TestKdTreeEncoding(*pc);
+}
+
+// Test 16 and 8 bit encoding.
+TEST_F(PointCloudKdTreeEncodingTest,
+ TestIntKdTreeEncodingHigherDimensionVariedTypes) {
+ constexpr int num_points = 120;
+ std::vector<std::array<uint32_t, 3>> points3(num_points);
+ for (int i = 0; i < num_points; ++i) {
+ std::array<uint32_t, 3> pos;
+ // Generate some pseudo-random points.
+ pos[0] = 8 * ((i * 7) % 127);
+ pos[1] = 13 * ((i * 3) % 321);
+ pos[2] = 29 * ((i * 19) % 450);
+ points3[i] = pos;
+ }
+ std::vector<std::array<uint16_t, 2>> points2(num_points);
+ for (int i = 0; i < num_points; ++i) {
+ std::array<uint16_t, 2> pos;
+ // Generate some pseudo-random points.
+ pos[0] = 8 * ((i * 7) % 127) + 1;
+ pos[1] = 13 * ((i * 3) % 321) + 1;
+ points2[i] = pos;
+ }
+ std::vector<std::array<uint8_t, 1>> points1(num_points);
+ for (int i = 0; i < num_points; ++i) {
+ std::array<uint8_t, 1> pos;
+ // Generate some pseudo-random points.
+ pos[0] = 8 * ((i * 7) % 127) + 11;
+ points1[i] = pos;
+ }
+
+ PointCloudBuilder builder;
+ builder.Start(num_points);
+ const int att_id3 =
+ builder.AddAttribute(GeometryAttribute::POSITION, 3, DT_UINT32);
+ for (PointIndex i(0); i < num_points; ++i) {
+ builder.SetAttributeValueForPoint(att_id3, PointIndex(i),
+ &(points3[i.value()])[0]);
+ }
+ const int att_id2 =
+ builder.AddAttribute(GeometryAttribute::POSITION, 2, DT_UINT16);
+ for (PointIndex i(0); i < num_points; ++i) {
+ builder.SetAttributeValueForPoint(att_id2, PointIndex(i),
+ &(points2[i.value()])[0]);
+ }
+ const int att_id1 =
+ builder.AddAttribute(GeometryAttribute::GENERIC, 1, DT_UINT8);
+ for (PointIndex i(0); i < num_points; ++i) {
+ builder.SetAttributeValueForPoint(att_id1, PointIndex(i),
+ &(points1[i.value()])[0]);
+ }
+
+ std::unique_ptr<PointCloud> pc = builder.Finalize(false);
+ ASSERT_NE(pc, nullptr);
+
+ TestKdTreeEncoding(*pc);
+}
+
+// Test 16 only encoding for one attribute.
+TEST_F(PointCloudKdTreeEncodingTest, TestIntKdTreeEncoding16Bit) {
+ constexpr int num_points = 120;
+ std::vector<std::array<uint16_t, 3>> points3(num_points);
+ for (int i = 0; i < num_points; ++i) {
+ std::array<uint16_t, 3> pos;
+ // Generate some pseudo-random points.
+ pos[0] = 8 * ((i * 7) % 127);
+ pos[1] = 13 * ((i * 3) % 321);
+ pos[2] = 29 * ((i * 19) % 450);
+ points3[i] = pos;
+ }
+
+ PointCloudBuilder builder;
+ builder.Start(num_points);
+ const int att_id3 =
+ builder.AddAttribute(GeometryAttribute::POSITION, 3, DT_UINT16);
+ for (PointIndex i(0); i < num_points; ++i) {
+ builder.SetAttributeValueForPoint(att_id3, PointIndex(i),
+ &(points3[i.value()])[0]);
+ }
+
+ std::unique_ptr<PointCloud> pc = builder.Finalize(false);
+ ASSERT_NE(pc, nullptr);
+
+ TestKdTreeEncoding(*pc);
+}
+
+// Test 16 and 8 bit encoding with size bigger than 32bit encoding.
+TEST_F(PointCloudKdTreeEncodingTest,
+ TestIntKdTreeEncodingHigherDimensionVariedTypesBig16BitEncoding) {
+ constexpr int num_points = 120;
+ std::vector<std::array<uint32_t, 3>> points3(num_points);
+ for (int i = 0; i < num_points; ++i) {
+ std::array<uint32_t, 3> pos;
+ // Generate some pseudo-random points.
+ pos[0] = 8 * ((i * 7) % 127);
+ pos[1] = 13 * ((i * 3) % 321);
+ pos[2] = 29 * ((i * 19) % 450);
+ points3[i] = pos;
+ }
+ // The total size of the 16bit encoding must be bigger than the total size of
+ // the 32bit encoding.
+ std::vector<std::array<uint16_t, 7>> points7(num_points);
+ for (int i = 0; i < num_points; ++i) {
+ std::array<uint16_t, 7> pos;
+ // Generate some pseudo-random points.
+ pos[0] = 8 * ((i * 7) % 127) + 1;
+ pos[1] = 13 * ((i * 3) % 321) + 1;
+ pos[2] = pos[0] + 13;
+ pos[3] = pos[2] + 13;
+ pos[4] = pos[3] + 13;
+ pos[5] = pos[4] + 13;
+ pos[6] = pos[5] + 13;
+ points7[i] = pos;
+ }
+ std::vector<std::array<uint8_t, 1>> points1(num_points);
+ for (int i = 0; i < num_points; ++i) {
+ std::array<uint8_t, 1> pos;
+ // Generate some pseudo-random points.
+ pos[0] = 8 * ((i * 7) % 127) + 11;
+ points1[i] = pos;
+ }
+
+ PointCloudBuilder builder;
+ builder.Start(num_points);
+ const int att_id3 =
+ builder.AddAttribute(GeometryAttribute::POSITION, 3, DT_UINT32);
+ for (PointIndex i(0); i < num_points; ++i) {
+ builder.SetAttributeValueForPoint(att_id3, PointIndex(i),
+ &(points3[i.value()])[0]);
+ }
+ const int att_id2 =
+ builder.AddAttribute(GeometryAttribute::POSITION, 7, DT_UINT16);
+ for (PointIndex i(0); i < num_points; ++i) {
+ builder.SetAttributeValueForPoint(att_id2, PointIndex(i),
+ &(points7[i.value()])[0]);
+ }
+ const int att_id1 =
+ builder.AddAttribute(GeometryAttribute::GENERIC, 1, DT_UINT8);
+ for (PointIndex i(0); i < num_points; ++i) {
+ builder.SetAttributeValueForPoint(att_id1, PointIndex(i),
+ &(points1[i.value()])[0]);
+ }
+
+ std::unique_ptr<PointCloud> pc = builder.Finalize(false);
+ ASSERT_NE(pc, nullptr);
+
+ TestKdTreeEncoding(*pc);
+}
+
+// Test encoding of quantized values.
+TEST_F(PointCloudKdTreeEncodingTest,
+ TestIntKdTreeEncodingHigherDimensionFloatTypes) {
+ constexpr int num_points = 130;
+ std::vector<std::array<uint32_t, 3>> points3(num_points);
+ for (int i = 0; i < num_points; ++i) {
+ std::array<uint32_t, 3> pos;
+ // Generate some pseudo-random points.
+ pos[0] = 8 * ((i * 7) % 125);
+ pos[1] = 13 * ((i * 3) % 334);
+ pos[2] = 29 * ((i * 19) % 470);
+ points3[i] = pos;
+ }
+ std::vector<std::array<float, 2>> points_float(num_points);
+ for (int i = 0; i < num_points; ++i) {
+ std::array<float, 2> pos;
+ // Generate some pseudo-random points.
+ pos[0] = static_cast<float>(8 * ((i * 7) % 127) + 1) / 2.5f;
+ pos[1] = static_cast<float>(13 * ((i * 3) % 321) + 1) / 3.2f;
+ points_float[i] = pos;
+ }
+
+ PointCloudBuilder builder;
+ builder.Start(num_points);
+ const int att_id3 =
+ builder.AddAttribute(GeometryAttribute::POSITION, 3, DT_UINT32);
+ for (PointIndex i(0); i < num_points; ++i) {
+ builder.SetAttributeValueForPoint(att_id3, PointIndex(i),
+ &(points3[i.value()])[0]);
+ }
+ const int att_id_float =
+ builder.AddAttribute(GeometryAttribute::GENERIC, 2, DT_FLOAT32);
+ for (PointIndex i(0); i < num_points; ++i) {
+ builder.SetAttributeValueForPoint(att_id_float, PointIndex(i),
+ &(points_float[i.value()])[0]);
+ }
+
+ std::unique_ptr<PointCloud> pc = builder.Finalize(false);
+ ASSERT_NE(pc, nullptr);
+
+ TestKdTreeEncoding(*pc);
+}
+
+// Test encoding of signed integer values
+TEST_F(PointCloudKdTreeEncodingTest, TestIntKdTreeEncodingSignedTypes) {
+ constexpr int num_points = 120;
+ std::vector<std::array<uint32_t, 3>> points3(num_points);
+ for (int i = 0; i < num_points; ++i) {
+ std::array<uint32_t, 3> pos;
+ // Generate some pseudo-random points.
+ pos[0] = 8 * ((i * 7) % 127);
+ pos[1] = 13 * ((i * 3) % 321);
+ pos[2] = 29 * ((i * 19) % 450);
+ points3[i] = pos;
+ }
+ std::vector<std::array<int32_t, 2>> points2(num_points);
+ for (int i = 0; i < num_points; ++i) {
+ std::array<int32_t, 2> pos;
+ // Generate some pseudo-random points.
+ pos[0] = 8 * ((i * 7) % 127) + 1;
+ if (i % 3 == 0) {
+ pos[0] = -pos[0];
+ }
+ pos[1] = 13 * ((i * 3) % 321) + 1;
+ points2[i] = pos;
+ }
+ std::vector<std::array<int16_t, 1>> points1(num_points);
+ for (int i = 0; i < num_points; ++i) {
+ std::array<int16_t, 1> pos;
+ // Generate some pseudo-random points.
+ pos[0] = 8 * ((i * 7) % 127) + 11;
+ if (i % 5 == 0) {
+ pos[0] = -pos[0];
+ }
+ points1[i] = pos;
+ }
+
+ PointCloudBuilder builder;
+ builder.Start(num_points);
+ const int att_id3 =
+ builder.AddAttribute(GeometryAttribute::POSITION, 3, DT_UINT32);
+ for (PointIndex i(0); i < num_points; ++i) {
+ builder.SetAttributeValueForPoint(att_id3, PointIndex(i),
+ &(points3[i.value()])[0]);
+ }
+ const int att_id2 =
+ builder.AddAttribute(GeometryAttribute::POSITION, 2, DT_INT32);
+ for (PointIndex i(0); i < num_points; ++i) {
+ builder.SetAttributeValueForPoint(att_id2, PointIndex(i),
+ &(points2[i.value()])[0]);
+ }
+
+ const int att_id1 =
+ builder.AddAttribute(GeometryAttribute::GENERIC, 1, DT_INT16);
+ for (PointIndex i(0); i < num_points; ++i) {
+ builder.SetAttributeValueForPoint(att_id1, PointIndex(i),
+ &(points1[i.value()])[0]);
+ }
+
+ std::unique_ptr<PointCloud> pc = builder.Finalize(false);
+ ASSERT_NE(pc, nullptr);
+
+ TestKdTreeEncoding(*pc);
+}
+
+// Test encoding of integer point clouds with > 16 dimensions.
+TEST_F(PointCloudKdTreeEncodingTest, TestIntKdTreeEncodingHighDimensional) {
+ constexpr int num_points = 120;
+ constexpr int num_dims = 42;
+ std::vector<std::array<uint32_t, num_dims>> points(num_points);
+ for (int i = 0; i < num_points; ++i) {
+ std::array<uint32_t, num_dims> pos;
+ // Generate some pseudo-random points.
+ for (int d = 0; d < num_dims; ++d) {
+ pos[d] = 8 * ((i + d) * (7 + (d % 4)) % (127 + d % 3));
+ }
+ points[i] = pos;
+ }
+ PointCloudBuilder builder;
+ builder.Start(num_points);
+ const int att_id =
+ builder.AddAttribute(GeometryAttribute::POSITION, num_dims, DT_UINT32);
+ for (PointIndex i(0); i < num_points; ++i) {
+ builder.SetAttributeValueForPoint(att_id, PointIndex(i),
+ &(points[i.value()])[0]);
+ }
+
+ std::unique_ptr<PointCloud> pc = builder.Finalize(false);
+ ASSERT_NE(pc, nullptr);
+
+ TestKdTreeEncoding(*pc);
+}
+
+} // namespace draco
diff --git a/libs/assimp/contrib/draco/src/draco/compression/point_cloud/point_cloud_sequential_decoder.cc b/libs/assimp/contrib/draco/src/draco/compression/point_cloud/point_cloud_sequential_decoder.cc
new file mode 100644
index 0000000..b9382d3
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/point_cloud/point_cloud_sequential_decoder.cc
@@ -0,0 +1,42 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/compression/point_cloud/point_cloud_sequential_decoder.h"
+
+#include "draco/compression/attributes/linear_sequencer.h"
+#include "draco/compression/attributes/sequential_attribute_decoders_controller.h"
+
+namespace draco {
+
+bool PointCloudSequentialDecoder::DecodeGeometryData() {
+ int32_t num_points;
+ if (!buffer()->Decode(&num_points)) {
+ return false;
+ }
+ point_cloud()->set_num_points(num_points);
+ return true;
+}
+
+bool PointCloudSequentialDecoder::CreateAttributesDecoder(
+ int32_t att_decoder_id) {
+ // Always create the basic attribute decoder.
+ return SetAttributesDecoder(
+ att_decoder_id,
+ std::unique_ptr<AttributesDecoder>(
+ new SequentialAttributeDecodersController(
+ std::unique_ptr<PointsSequencer>(
+ new LinearSequencer(point_cloud()->num_points())))));
+}
+
+} // namespace draco
diff --git a/libs/assimp/contrib/draco/src/draco/compression/point_cloud/point_cloud_sequential_decoder.h b/libs/assimp/contrib/draco/src/draco/compression/point_cloud/point_cloud_sequential_decoder.h
new file mode 100644
index 0000000..9968dc2
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/point_cloud/point_cloud_sequential_decoder.h
@@ -0,0 +1,33 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_COMPRESSION_POINT_CLOUD_POINT_CLOUD_SEQUENTIAL_DECODER_H_
+#define DRACO_COMPRESSION_POINT_CLOUD_POINT_CLOUD_SEQUENTIAL_DECODER_H_
+
+#include "draco/compression/point_cloud/point_cloud_decoder.h"
+
+namespace draco {
+
+// Point cloud decoder for data encoded by the PointCloudSequentialEncoder.
+// All attribute values are decoded using an identity mapping between point ids
+// and attribute value ids.
+class PointCloudSequentialDecoder : public PointCloudDecoder {
+ protected:
+ bool DecodeGeometryData() override;
+ bool CreateAttributesDecoder(int32_t att_decoder_id) override;
+};
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_POINT_CLOUD_POINT_CLOUD_SEQUENTIAL_DECODER_H_
diff --git a/libs/assimp/contrib/draco/src/draco/compression/point_cloud/point_cloud_sequential_encoder.cc b/libs/assimp/contrib/draco/src/draco/compression/point_cloud/point_cloud_sequential_encoder.cc
new file mode 100644
index 0000000..fa7b6fd
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/point_cloud/point_cloud_sequential_encoder.cc
@@ -0,0 +1,49 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/compression/point_cloud/point_cloud_sequential_encoder.h"
+
+#include "draco/compression/attributes/linear_sequencer.h"
+#include "draco/compression/attributes/sequential_attribute_encoders_controller.h"
+
+namespace draco {
+
+Status PointCloudSequentialEncoder::EncodeGeometryData() {
+ const int32_t num_points = point_cloud()->num_points();
+ buffer()->Encode(num_points);
+ return OkStatus();
+}
+
+bool PointCloudSequentialEncoder::GenerateAttributesEncoder(int32_t att_id) {
+ // Create only one attribute encoder that is going to encode all points in a
+ // linear sequence.
+ if (att_id == 0) {
+ // Create a new attribute encoder only for the first attribute.
+ AddAttributesEncoder(std::unique_ptr<AttributesEncoder>(
+ new SequentialAttributeEncodersController(
+ std::unique_ptr<PointsSequencer>(
+ new LinearSequencer(point_cloud()->num_points())),
+ att_id)));
+ } else {
+ // Reuse the existing attribute encoder for other attributes.
+ attributes_encoder(0)->AddAttributeId(att_id);
+ }
+ return true;
+}
+
+void PointCloudSequentialEncoder::ComputeNumberOfEncodedPoints() {
+ set_num_encoded_points(point_cloud()->num_points());
+}
+
+} // namespace draco
diff --git a/libs/assimp/contrib/draco/src/draco/compression/point_cloud/point_cloud_sequential_encoder.h b/libs/assimp/contrib/draco/src/draco/compression/point_cloud/point_cloud_sequential_encoder.h
new file mode 100644
index 0000000..40d8edc
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/point_cloud/point_cloud_sequential_encoder.h
@@ -0,0 +1,43 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_COMPRESSION_POINT_CLOUD_POINT_CLOUD_SEQUENTIAL_ENCODER_H_
+#define DRACO_COMPRESSION_POINT_CLOUD_POINT_CLOUD_SEQUENTIAL_ENCODER_H_
+
+#include "draco/compression/point_cloud/point_cloud_encoder.h"
+
+namespace draco {
+
+// A basic point cloud encoder that iterates over all points and encodes all
+// attribute values for every visited point. The attribute values encoding
+// can be controlled using provided encoding option to enable features such
+// as quantization or prediction schemes.
+// This encoder preserves the order and the number of input points, but the
+// mapping between point ids and attribute values may be different for the
+// decoded point cloud.
+class PointCloudSequentialEncoder : public PointCloudEncoder {
+ public:
+ uint8_t GetEncodingMethod() const override {
+ return POINT_CLOUD_SEQUENTIAL_ENCODING;
+ }
+
+ protected:
+ Status EncodeGeometryData() override;
+ bool GenerateAttributesEncoder(int32_t att_id) override;
+ void ComputeNumberOfEncodedPoints() override;
+};
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_POINT_CLOUD_POINT_CLOUD_SEQUENTIAL_ENCODER_H_
diff --git a/libs/assimp/contrib/draco/src/draco/compression/point_cloud/point_cloud_sequential_encoding_test.cc b/libs/assimp/contrib/draco/src/draco/compression/point_cloud/point_cloud_sequential_encoding_test.cc
new file mode 100644
index 0000000..32be120
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/compression/point_cloud/point_cloud_sequential_encoding_test.cc
@@ -0,0 +1,92 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/compression/point_cloud/point_cloud_sequential_decoder.h"
+#include "draco/compression/point_cloud/point_cloud_sequential_encoder.h"
+#include "draco/core/draco_test_base.h"
+#include "draco/core/draco_test_utils.h"
+#include "draco/io/obj_decoder.h"
+
+namespace draco {
+
+class PointCloudSequentialEncodingTest : public ::testing::Test {
+ protected:
+ std::unique_ptr<PointCloud> EncodeAndDecodePointCloud(const PointCloud *pc) {
+ EncoderBuffer buffer;
+ PointCloudSequentialEncoder encoder;
+ EncoderOptions options = EncoderOptions::CreateDefaultOptions();
+ encoder.SetPointCloud(*pc);
+ if (!encoder.Encode(options, &buffer).ok()) {
+ return nullptr;
+ }
+
+ DecoderBuffer dec_buffer;
+ dec_buffer.Init(buffer.data(), buffer.size());
+ PointCloudSequentialDecoder decoder;
+
+ std::unique_ptr<PointCloud> out_pc(new PointCloud());
+ DecoderOptions dec_options;
+ if (!decoder.Decode(dec_options, &dec_buffer, out_pc.get()).ok()) {
+ return nullptr;
+ }
+ return out_pc;
+ }
+
+ void TestEncoding(const std::string &file_name) {
+ std::unique_ptr<PointCloud> pc = ReadPointCloudFromTestFile(file_name);
+ ASSERT_NE(pc, nullptr);
+
+ std::unique_ptr<PointCloud> decoded_pc =
+ EncodeAndDecodePointCloud(pc.get());
+ ASSERT_NE(decoded_pc.get(), nullptr);
+ ASSERT_EQ(decoded_pc->num_points(), pc->num_points());
+ }
+};
+
+TEST_F(PointCloudSequentialEncodingTest, DoesEncodeAndDecode) {
+ TestEncoding("test_nm.obj");
+}
+
+TEST_F(PointCloudSequentialEncodingTest, EncodingPointCloudWithMetadata) {
+ std::unique_ptr<PointCloud> pc = ReadPointCloudFromTestFile("test_nm.obj");
+ ASSERT_NE(pc, nullptr);
+ // Add metadata to point cloud.
+ std::unique_ptr<GeometryMetadata> metadata =
+ std::unique_ptr<GeometryMetadata>(new GeometryMetadata());
+ const uint32_t pos_att_id =
+ pc->GetNamedAttributeId(GeometryAttribute::POSITION);
+ std::unique_ptr<AttributeMetadata> pos_metadata =
+ std::unique_ptr<AttributeMetadata>(new AttributeMetadata());
+ pos_metadata->AddEntryString("name", "position");
+ pc->AddAttributeMetadata(pos_att_id, std::move(pos_metadata));
+
+ std::unique_ptr<PointCloud> decoded_pc = EncodeAndDecodePointCloud(pc.get());
+ ASSERT_NE(decoded_pc.get(), nullptr);
+
+ const GeometryMetadata *const pc_metadata = decoded_pc->GetMetadata();
+ ASSERT_NE(pc_metadata, nullptr);
+ // Test getting attribute metadata by id.
+ ASSERT_NE(pc->GetAttributeMetadataByAttributeId(pos_att_id), nullptr);
+ // Test getting attribute metadata by entry name value pair.
+ const AttributeMetadata *const requested_att_metadata =
+ pc_metadata->GetAttributeMetadataByStringEntry("name", "position");
+ ASSERT_NE(requested_att_metadata, nullptr);
+ ASSERT_EQ(requested_att_metadata->att_unique_id(),
+ pc->attribute(pos_att_id)->unique_id());
+}
+
+// TODO(ostava): Test the reusability of a single instance of the encoder and
+// decoder class.
+
+} // namespace draco
diff --git a/libs/assimp/contrib/draco/src/draco/core/bit_utils.cc b/libs/assimp/contrib/draco/src/draco/core/bit_utils.cc
new file mode 100644
index 0000000..37119a7
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/core/bit_utils.cc
@@ -0,0 +1,36 @@
+// Copyright 2017 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/core/bit_utils.h"
+
+namespace draco {
+
+void ConvertSignedIntsToSymbols(const int32_t *in, int in_values,
+ uint32_t *out) {
+ // Convert the quantized values into a format more suitable for entropy
+ // encoding.
+ // Put the sign bit into LSB pos and shift the rest one bit left.
+ for (int i = 0; i < in_values; ++i) {
+ out[i] = ConvertSignedIntToSymbol(in[i]);
+ }
+}
+
+void ConvertSymbolsToSignedInts(const uint32_t *in, int in_values,
+ int32_t *out) {
+ for (int i = 0; i < in_values; ++i) {
+ out[i] = ConvertSymbolToSignedInt(in[i]);
+ }
+}
+
+} // namespace draco
diff --git a/libs/assimp/contrib/draco/src/draco/core/bit_utils.h b/libs/assimp/contrib/draco/src/draco/core/bit_utils.h
new file mode 100644
index 0000000..a102095
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/core/bit_utils.h
@@ -0,0 +1,124 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// File containing a basic set of bit manipulation utilities used within the
+// Draco library.
+
+#ifndef DRACO_CORE_BIT_UTILS_H_
+#define DRACO_CORE_BIT_UTILS_H_
+
+#include <inttypes.h>
+#include <stdint.h>
+
+#include <type_traits>
+
+#if defined(_MSC_VER)
+#include <intrin.h>
+#endif // defined(_MSC_VER)
+
+namespace draco {
+
+// Returns the number of '1' bits within the input 32 bit integer.
+inline int CountOneBits32(uint32_t n) {
+ n -= ((n >> 1) & 0x55555555);
+ n = ((n >> 2) & 0x33333333) + (n & 0x33333333);
+ return (((n + (n >> 4)) & 0xF0F0F0F) * 0x1010101) >> 24;
+}
+
+inline uint32_t ReverseBits32(uint32_t n) {
+ n = ((n >> 1) & 0x55555555) | ((n & 0x55555555) << 1);
+ n = ((n >> 2) & 0x33333333) | ((n & 0x33333333) << 2);
+ n = ((n >> 4) & 0x0F0F0F0F) | ((n & 0x0F0F0F0F) << 4);
+ n = ((n >> 8) & 0x00FF00FF) | ((n & 0x00FF00FF) << 8);
+ return (n >> 16) | (n << 16);
+}
+
+// Copies the |nbits| from the src integer into the |dst| integer using the
+// provided bit offsets |dst_offset| and |src_offset|.
+inline void CopyBits32(uint32_t *dst, int dst_offset, uint32_t src,
+ int src_offset, int nbits) {
+ const uint32_t mask = (~static_cast<uint32_t>(0)) >> (32 - nbits)
+ << dst_offset;
+ *dst = (*dst & (~mask)) | (((src >> src_offset) << dst_offset) & mask);
+}
+
+// Returns the location of the most significant bit in the input integer |n|.
+// The functionality is not defined for |n == 0|.
+inline int MostSignificantBit(uint32_t n) {
+#if defined(__GNUC__)
+ return 31 ^ __builtin_clz(n);
+#elif defined(_MSC_VER)
+
+ unsigned long where;
+ _BitScanReverse(&where, n);
+ return (int)where;
+#else
+ // TODO(fgalligan): Optimize this code.
+ int msb = -1;
+ while (n != 0) {
+ msb++;
+ n >>= 1;
+ }
+ return msb;
+#endif
+}
+
+// Helper function that converts signed integer values into unsigned integer
+// symbols that can be encoded using an entropy encoder.
+void ConvertSignedIntsToSymbols(const int32_t *in, int in_values,
+ uint32_t *out);
+
+// Converts unsigned integer symbols encoded with an entropy encoder back to
+// signed values.
+void ConvertSymbolsToSignedInts(const uint32_t *in, int in_values,
+ int32_t *out);
+
+// Helper function that converts a single signed integer value into an unsigned
+// integer symbol that can be encoded using an entropy encoder.
+template <class IntTypeT>
+typename std::make_unsigned<IntTypeT>::type ConvertSignedIntToSymbol(
+ IntTypeT val) {
+ typedef typename std::make_unsigned<IntTypeT>::type UnsignedType;
+ static_assert(std::is_integral<IntTypeT>::value, "IntTypeT is not integral.");
+ // Early exit if val is positive.
+ if (val >= 0) {
+ return static_cast<UnsignedType>(val) << 1;
+ }
+ val = -(val + 1); // Map -1 to 0, -2 to -1, etc..
+ UnsignedType ret = static_cast<UnsignedType>(val);
+ ret <<= 1;
+ ret |= 1;
+ return ret;
+}
+
+// Converts a single unsigned integer symbol encoded with an entropy encoder
+// back to a signed value.
+template <class IntTypeT>
+typename std::make_signed<IntTypeT>::type ConvertSymbolToSignedInt(
+ IntTypeT val) {
+ static_assert(std::is_integral<IntTypeT>::value, "IntTypeT is not integral.");
+ typedef typename std::make_signed<IntTypeT>::type SignedType;
+ const bool is_positive = !static_cast<bool>(val & 1);
+ val >>= 1;
+ if (is_positive) {
+ return static_cast<SignedType>(val);
+ }
+ SignedType ret = static_cast<SignedType>(val);
+ ret = -ret - 1;
+ return ret;
+}
+
+} // namespace draco
+
+#endif // DRACO_CORE_BIT_UTILS_H_
diff --git a/libs/assimp/contrib/draco/src/draco/core/bounding_box.cc b/libs/assimp/contrib/draco/src/draco/core/bounding_box.cc
new file mode 100644
index 0000000..8a07096
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/core/bounding_box.cc
@@ -0,0 +1,30 @@
+// Copyright 2018 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "draco/core/bounding_box.h"
+
+namespace draco {
+
+BoundingBox::BoundingBox()
+ : BoundingBox(Vector3f(std::numeric_limits<float>::max(),
+ std::numeric_limits<float>::max(),
+ std::numeric_limits<float>::max()),
+ Vector3f(-std::numeric_limits<float>::max(),
+ -std::numeric_limits<float>::max(),
+ -std::numeric_limits<float>::max())) {}
+
+BoundingBox::BoundingBox(const Vector3f &min_point, const Vector3f &max_point)
+ : min_point_(min_point), max_point_(max_point) {}
+
+} // namespace draco
diff --git a/libs/assimp/contrib/draco/src/draco/core/bounding_box.h b/libs/assimp/contrib/draco/src/draco/core/bounding_box.h
new file mode 100644
index 0000000..31ba2d6
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/core/bounding_box.h
@@ -0,0 +1,72 @@
+// Copyright 2018 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_CORE_BOUNDING_BOX_H_
+#define DRACO_CORE_BOUNDING_BOX_H_
+
+#include "draco/core/vector_d.h"
+
+namespace draco {
+
+// Class for computing the bounding box of points in 3D space.
+class BoundingBox {
+ public:
+ // Creates bounding box object with minimum and maximum points initialized to
+ // the largest positive and the smallest negative values, respectively. The
+ // resulting abstract bounding box effectively has no points and can be
+ // updated by providing any point to Update() method.
+ BoundingBox();
+
+ // Creates bounding box object with minimum and maximum points initialized to
+ // |min_point| and |max_point|, respectively.
+ BoundingBox(const Vector3f &min_point, const Vector3f &max_point);
+
+ // Returns the minimum point of the bounding box.
+ inline const Vector3f &GetMinPoint() const { return min_point_; }
+
+ // Returns the maximum point of the bounding box.
+ inline const Vector3f &GetMaxPoint() const { return max_point_; }
+
+ // Conditionally updates the bounding box with a given |new_point|.
+ void Update(const Vector3f &new_point) {
+ for (int i = 0; i < 3; i++) {
+ if (new_point[i] < min_point_[i]) {
+ min_point_[i] = new_point[i];
+ }
+ if (new_point[i] > max_point_[i]) {
+ max_point_[i] = new_point[i];
+ }
+ }
+ }
+
+ // Updates bounding box with minimum and maximum points of the |other|
+ // bounding box.
+ void Update(const BoundingBox &other) {
+ Update(other.GetMinPoint());
+ Update(other.GetMaxPoint());
+ }
+
+ // Returns the size of the bounding box along each axis.
+ Vector3f Size() const { return max_point_ - min_point_; }
+
+ // Returns the center of the bounding box.
+ Vector3f Center() const { return (min_point_ + max_point_) / 2; }
+
+ private:
+ Vector3f min_point_;
+ Vector3f max_point_;
+};
+} // namespace draco
+
+#endif // DRACO_CORE_BOUNDING_BOX_H_
diff --git a/libs/assimp/contrib/draco/src/draco/core/buffer_bit_coding_test.cc b/libs/assimp/contrib/draco/src/draco/core/buffer_bit_coding_test.cc
new file mode 100644
index 0000000..892b35b
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/core/buffer_bit_coding_test.cc
@@ -0,0 +1,115 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/core/decoder_buffer.h"
+#include "draco/core/draco_test_base.h"
+#include "draco/core/encoder_buffer.h"
+
+namespace draco {
+
+class BufferBitCodingTest : public ::testing::Test {
+ public:
+ typedef DecoderBuffer::BitDecoder BitDecoder;
+ typedef EncoderBuffer::BitEncoder BitEncoder;
+};
+
+TEST_F(BufferBitCodingTest, TestBitCodersByteAligned) {
+ constexpr int buffer_size = 32;
+ char buffer[buffer_size];
+ BitEncoder encoder(buffer);
+ const uint8_t data[] = {0x76, 0x54, 0x32, 0x10, 0x76, 0x54, 0x32, 0x10};
+ const int bytes_to_encode = sizeof(data);
+
+ for (int i = 0; i < bytes_to_encode; ++i) {
+ encoder.PutBits(data[i], sizeof(data[i]) * 8);
+ ASSERT_EQ((i + 1) * sizeof(data[i]) * 8, encoder.Bits());
+ }
+
+ BitDecoder decoder;
+ decoder.reset(static_cast<const void *>(buffer), bytes_to_encode);
+ for (int i = 0; i < bytes_to_encode; ++i) {
+ uint32_t x = 0;
+ ASSERT_TRUE(decoder.GetBits(8, &x));
+ ASSERT_EQ(x, data[i]);
+ }
+
+ ASSERT_EQ(bytes_to_encode * 8u, decoder.BitsDecoded());
+}
+
+TEST_F(BufferBitCodingTest, TestBitCodersNonByte) {
+ constexpr int buffer_size = 32;
+ char buffer[buffer_size];
+ BitEncoder encoder(buffer);
+ const uint8_t data[] = {0x76, 0x54, 0x32, 0x10, 0x76, 0x54, 0x32, 0x10};
+ const uint32_t bits_to_encode = 51;
+ const int bytes_to_encode = (bits_to_encode / 8) + 1;
+
+ for (int i = 0; i < bytes_to_encode; ++i) {
+ const int num_bits = (encoder.Bits() + 8 <= bits_to_encode)
+ ? 8
+ : bits_to_encode - encoder.Bits();
+ encoder.PutBits(data[i], num_bits);
+ }
+
+ BitDecoder decoder;
+ decoder.reset(static_cast<const void *>(buffer), bytes_to_encode);
+ int64_t bits_to_decode = encoder.Bits();
+ for (int i = 0; i < bytes_to_encode; ++i) {
+ uint32_t x = 0;
+ const int num_bits = (bits_to_decode > 8) ? 8 : bits_to_decode;
+ ASSERT_TRUE(decoder.GetBits(num_bits, &x));
+ const int bits_to_shift = 8 - num_bits;
+ const uint8_t test_byte =
+ ((data[i] << bits_to_shift) & 0xff) >> bits_to_shift;
+ ASSERT_EQ(x, test_byte);
+ bits_to_decode -= 8;
+ }
+
+ ASSERT_EQ(bits_to_encode, decoder.BitsDecoded());
+}
+
+TEST_F(BufferBitCodingTest, TestSingleBits) {
+ const int data = 0xaaaa;
+
+ BitDecoder decoder;
+ decoder.reset(static_cast<const void *>(&data), sizeof(data));
+
+ for (uint32_t i = 0; i < 16; ++i) {
+ uint32_t x = 0;
+ ASSERT_TRUE(decoder.GetBits(1, &x));
+ ASSERT_EQ(x, (i % 2));
+ }
+
+ ASSERT_EQ(16u, decoder.BitsDecoded());
+}
+
+TEST_F(BufferBitCodingTest, TestMultipleBits) {
+ const uint8_t data[] = {0x76, 0x54, 0x32, 0x10, 0x76, 0x54, 0x32, 0x10};
+
+ BitDecoder decoder;
+ decoder.reset(static_cast<const void *>(data), sizeof(data));
+
+ uint32_t x = 0;
+ for (uint32_t i = 0; i < 2; ++i) {
+ ASSERT_TRUE(decoder.GetBits(16, &x));
+ ASSERT_EQ(x, 0x5476u);
+ ASSERT_EQ(16 + (i * 32), decoder.BitsDecoded());
+
+ ASSERT_TRUE(decoder.GetBits(16, &x));
+ ASSERT_EQ(x, 0x1032u);
+ ASSERT_EQ(32 + (i * 32), decoder.BitsDecoded());
+ }
+}
+
+} // namespace draco
diff --git a/libs/assimp/contrib/draco/src/draco/core/cycle_timer.cc b/libs/assimp/contrib/draco/src/draco/core/cycle_timer.cc
new file mode 100644
index 0000000..58df4df
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/core/cycle_timer.cc
@@ -0,0 +1,49 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/core/cycle_timer.h"
+
+namespace draco {
+void DracoTimer::Start() {
+#ifdef _WIN32
+ QueryPerformanceCounter(&tv_start_);
+#else
+ gettimeofday(&tv_start_, nullptr);
+#endif
+}
+
+void DracoTimer::Stop() {
+#ifdef _WIN32
+ QueryPerformanceCounter(&tv_end_);
+#else
+ gettimeofday(&tv_end_, nullptr);
+#endif
+}
+
+int64_t DracoTimer::GetInMs() {
+#ifdef _WIN32
+ LARGE_INTEGER elapsed = {0};
+ elapsed.QuadPart = tv_end_.QuadPart - tv_start_.QuadPart;
+
+ LARGE_INTEGER frequency = {0};
+ QueryPerformanceFrequency(&frequency);
+ return elapsed.QuadPart * 1000 / frequency.QuadPart;
+#else
+ const int64_t seconds = (tv_end_.tv_sec - tv_start_.tv_sec) * 1000;
+ const int64_t milliseconds = (tv_end_.tv_usec - tv_start_.tv_usec) / 1000;
+ return seconds + milliseconds;
+#endif
+}
+
+} // namespace draco
diff --git a/libs/assimp/contrib/draco/src/draco/core/cycle_timer.h b/libs/assimp/contrib/draco/src/draco/core/cycle_timer.h
new file mode 100644
index 0000000..f480cc9
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/core/cycle_timer.h
@@ -0,0 +1,51 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_CORE_CYCLE_TIMER_H_
+#define DRACO_CORE_CYCLE_TIMER_H_
+
+#ifdef _WIN32
+#ifndef WIN32_LEAN_AND_MEAN
+#define WIN32_LEAN_AND_MEAN
+#endif
+#include <windows.h>
+typedef LARGE_INTEGER DracoTimeVal;
+#else
+#include <sys/time.h>
+typedef timeval DracoTimeVal;
+#endif
+
+#include <cinttypes>
+#include <cstddef>
+
+namespace draco {
+
+class DracoTimer {
+ public:
+ DracoTimer() {}
+ ~DracoTimer() {}
+ void Start();
+ void Stop();
+ int64_t GetInMs();
+
+ private:
+ DracoTimeVal tv_start_;
+ DracoTimeVal tv_end_;
+};
+
+typedef DracoTimer CycleTimer;
+
+} // namespace draco
+
+#endif // DRACO_CORE_CYCLE_TIMER_H_
diff --git a/libs/assimp/contrib/draco/src/draco/core/data_buffer.cc b/libs/assimp/contrib/draco/src/draco/core/data_buffer.cc
new file mode 100644
index 0000000..f0b43d6
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/core/data_buffer.cc
@@ -0,0 +1,61 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/core/data_buffer.h"
+
+#include <algorithm>
+
+namespace draco {
+
+DataBuffer::DataBuffer() {}
+
+bool DataBuffer::Update(const void *data, int64_t size) {
+ const int64_t offset = 0;
+ return this->Update(data, size, offset);
+}
+
+bool DataBuffer::Update(const void *data, int64_t size, int64_t offset) {
+ if (data == nullptr) {
+ if (size + offset < 0) {
+ return false;
+ }
+ // If no data is provided, just resize the buffer.
+ data_.resize(size + offset);
+ } else {
+ if (size < 0) {
+ return false;
+ }
+ if (size + offset > static_cast<int64_t>(data_.size())) {
+ data_.resize(size + offset);
+ }
+ const uint8_t *const byte_data = static_cast<const uint8_t *>(data);
+ std::copy(byte_data, byte_data + size, data_.data() + offset);
+ }
+ descriptor_.buffer_update_count++;
+ return true;
+}
+
+void DataBuffer::Resize(int64_t size) {
+ data_.resize(size);
+ descriptor_.buffer_update_count++;
+}
+
+void DataBuffer::WriteDataToStream(std::ostream &stream) {
+ if (data_.size() == 0) {
+ return;
+ }
+ stream.write(reinterpret_cast<char *>(data_.data()), data_.size());
+}
+
+} // namespace draco
diff --git a/libs/assimp/contrib/draco/src/draco/core/data_buffer.h b/libs/assimp/contrib/draco/src/draco/core/data_buffer.h
new file mode 100644
index 0000000..8ee6905
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/core/data_buffer.h
@@ -0,0 +1,82 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_CORE_DATA_BUFFER_H_
+#define DRACO_CORE_DATA_BUFFER_H_
+
+#include <cstring>
+#include <ostream>
+#include <vector>
+
+#include "draco/core/draco_types.h"
+
+namespace draco {
+
+// Buffer descriptor servers as a unique identifier of a buffer.
+struct DataBufferDescriptor {
+ DataBufferDescriptor() : buffer_id(0), buffer_update_count(0) {}
+ // Id of the data buffer.
+ int64_t buffer_id;
+ // The number of times the buffer content was updated.
+ int64_t buffer_update_count;
+};
+
+// Class used for storing raw buffer data.
+class DataBuffer {
+ public:
+ DataBuffer();
+ bool Update(const void *data, int64_t size);
+ bool Update(const void *data, int64_t size, int64_t offset);
+
+ // Reallocate the buffer storage to a new size keeping the data unchanged.
+ void Resize(int64_t new_size);
+ void WriteDataToStream(std::ostream &stream);
+ // Reads data from the buffer. Potentially unsafe, called needs to ensure
+ // the accessed memory is valid.
+ void Read(int64_t byte_pos, void *out_data, size_t data_size) const {
+ memcpy(out_data, data() + byte_pos, data_size);
+ }
+
+ // Writes data to the buffer. Unsafe, caller must ensure the accessed memory
+ // is valid.
+ void Write(int64_t byte_pos, const void *in_data, size_t data_size) {
+ memcpy(const_cast<uint8_t *>(data()) + byte_pos, in_data, data_size);
+ }
+
+ // Copies data from another buffer to this buffer.
+ void Copy(int64_t dst_offset, const DataBuffer *src_buf, int64_t src_offset,
+ int64_t size) {
+ memcpy(const_cast<uint8_t *>(data()) + dst_offset,
+ src_buf->data() + src_offset, size);
+ }
+
+ void set_update_count(int64_t buffer_update_count) {
+ descriptor_.buffer_update_count = buffer_update_count;
+ }
+ int64_t update_count() const { return descriptor_.buffer_update_count; }
+ size_t data_size() const { return data_.size(); }
+ const uint8_t *data() const { return data_.data(); }
+ uint8_t *data() { return &data_[0]; }
+ int64_t buffer_id() const { return descriptor_.buffer_id; }
+ void set_buffer_id(int64_t buffer_id) { descriptor_.buffer_id = buffer_id; }
+
+ private:
+ std::vector<uint8_t> data_;
+ // Counter incremented by Update() calls.
+ DataBufferDescriptor descriptor_;
+};
+
+} // namespace draco
+
+#endif // DRACO_CORE_DATA_BUFFER_H_
diff --git a/libs/assimp/contrib/draco/src/draco/core/decoder_buffer.cc b/libs/assimp/contrib/draco/src/draco/core/decoder_buffer.cc
new file mode 100644
index 0000000..4e8ed61
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/core/decoder_buffer.cc
@@ -0,0 +1,72 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/core/decoder_buffer.h"
+
+#include "draco/core/macros.h"
+#include "draco/core/varint_decoding.h"
+
+namespace draco {
+
+DecoderBuffer::DecoderBuffer()
+ : data_(nullptr),
+ data_size_(0),
+ pos_(0),
+ bit_mode_(false),
+ bitstream_version_(0) {}
+
+void DecoderBuffer::Init(const char *data, size_t data_size) {
+ Init(data, data_size, bitstream_version_);
+}
+
+void DecoderBuffer::Init(const char *data, size_t data_size, uint16_t version) {
+ data_ = data;
+ data_size_ = data_size;
+ bitstream_version_ = version;
+ pos_ = 0;
+}
+
+bool DecoderBuffer::StartBitDecoding(bool decode_size, uint64_t *out_size) {
+ if (decode_size) {
+#ifdef DRACO_BACKWARDS_COMPATIBILITY_SUPPORTED
+ if (bitstream_version_ < DRACO_BITSTREAM_VERSION(2, 2)) {
+ if (!Decode(out_size)) {
+ return false;
+ }
+ } else
+#endif
+ {
+ if (!DecodeVarint(out_size, this)) {
+ return false;
+ }
+ }
+ }
+ bit_mode_ = true;
+ bit_decoder_.reset(data_head(), remaining_size());
+ return true;
+}
+
+void DecoderBuffer::EndBitDecoding() {
+ bit_mode_ = false;
+ const uint64_t bits_decoded = bit_decoder_.BitsDecoded();
+ const uint64_t bytes_decoded = (bits_decoded + 7) / 8;
+ pos_ += bytes_decoded;
+}
+
+DecoderBuffer::BitDecoder::BitDecoder()
+ : bit_buffer_(nullptr), bit_buffer_end_(nullptr), bit_offset_(0) {}
+
+DecoderBuffer::BitDecoder::~BitDecoder() {}
+
+} // namespace draco
diff --git a/libs/assimp/contrib/draco/src/draco/core/decoder_buffer.h b/libs/assimp/contrib/draco/src/draco/core/decoder_buffer.h
new file mode 100644
index 0000000..0559abb
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/core/decoder_buffer.h
@@ -0,0 +1,216 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_CORE_DECODER_BUFFER_H_
+#define DRACO_CORE_DECODER_BUFFER_H_
+
+#include <stdint.h>
+
+#include <cstring>
+#include <memory>
+
+#include "draco/core/macros.h"
+#include "draco/draco_features.h"
+
+namespace draco {
+
+// Class is a wrapper around input data used by MeshDecoder. It provides a
+// basic interface for decoding either typed or variable-bit sized data.
+class DecoderBuffer {
+ public:
+ DecoderBuffer();
+ DecoderBuffer(const DecoderBuffer &buf) = default;
+
+ DecoderBuffer &operator=(const DecoderBuffer &buf) = default;
+
+ // Sets the buffer's internal data. Note that no copy of the input data is
+ // made so the data owner needs to keep the data valid and unchanged for
+ // runtime of the decoder.
+ void Init(const char *data, size_t data_size);
+
+ // Sets the buffer's internal data. |version| is the Draco bitstream version.
+ void Init(const char *data, size_t data_size, uint16_t version);
+
+ // Starts decoding a bit sequence.
+ // decode_size must be true if the size of the encoded bit data was included,
+ // during encoding. The size is then returned to out_size.
+ // Returns false on error.
+ bool StartBitDecoding(bool decode_size, uint64_t *out_size);
+
+ // Ends the decoding of the bit sequence and return to the default
+ // byte-aligned decoding.
+ void EndBitDecoding();
+
+ // Decodes up to 32 bits into out_val. Can be called only in between
+ // StartBitDecoding and EndBitDecoding. Otherwise returns false.
+ bool DecodeLeastSignificantBits32(int nbits, uint32_t *out_value) {
+ if (!bit_decoder_active()) {
+ return false;
+ }
+ bit_decoder_.GetBits(nbits, out_value);
+ return true;
+ }
+
+ // Decodes an arbitrary data type.
+ // Can be used only when we are not decoding a bit-sequence.
+ // Returns false on error.
+ template <typename T>
+ bool Decode(T *out_val) {
+ if (!Peek(out_val)) {
+ return false;
+ }
+ pos_ += sizeof(T);
+ return true;
+ }
+
+ bool Decode(void *out_data, size_t size_to_decode) {
+ if (data_size_ < static_cast<int64_t>(pos_ + size_to_decode)) {
+ return false; // Buffer overflow.
+ }
+ memcpy(out_data, (data_ + pos_), size_to_decode);
+ pos_ += size_to_decode;
+ return true;
+ }
+
+ // Decodes an arbitrary data, but does not advance the reading position.
+ template <typename T>
+ bool Peek(T *out_val) {
+ const size_t size_to_decode = sizeof(T);
+ if (data_size_ < static_cast<int64_t>(pos_ + size_to_decode)) {
+ return false; // Buffer overflow.
+ }
+ memcpy(out_val, (data_ + pos_), size_to_decode);
+ return true;
+ }
+
+ bool Peek(void *out_data, size_t size_to_peek) {
+ if (data_size_ < static_cast<int64_t>(pos_ + size_to_peek)) {
+ return false; // Buffer overflow.
+ }
+ memcpy(out_data, (data_ + pos_), size_to_peek);
+ return true;
+ }
+
+ // Discards #bytes from the input buffer.
+ void Advance(int64_t bytes) { pos_ += bytes; }
+
+ // Moves the parsing position to a specific offset from the beginning of the
+ // input data.
+ void StartDecodingFrom(int64_t offset) { pos_ = offset; }
+
+ void set_bitstream_version(uint16_t version) { bitstream_version_ = version; }
+
+ // Returns the data array at the current decoder position.
+ const char *data_head() const { return data_ + pos_; }
+ int64_t remaining_size() const { return data_size_ - pos_; }
+ int64_t decoded_size() const { return pos_; }
+ bool bit_decoder_active() const { return bit_mode_; }
+
+ // Returns the bitstream associated with the data. Returns 0 if unknown.
+ uint16_t bitstream_version() const { return bitstream_version_; }
+
+ private:
+ // Internal helper class to decode bits from a bit buffer.
+ class BitDecoder {
+ public:
+ BitDecoder();
+ ~BitDecoder();
+
+ // Sets the bit buffer to |b|. |s| is the size of |b| in bytes.
+ inline void reset(const void *b, size_t s) {
+ bit_offset_ = 0;
+ bit_buffer_ = static_cast<const uint8_t *>(b);
+ bit_buffer_end_ = bit_buffer_ + s;
+ }
+
+ // Returns number of bits decoded so far.
+ inline uint64_t BitsDecoded() const {
+ return static_cast<uint64_t>(bit_offset_);
+ }
+
+ // Return number of bits available for decoding
+ inline uint64_t AvailBits() const {
+ return ((bit_buffer_end_ - bit_buffer_) * 8) - bit_offset_;
+ }
+
+ inline uint32_t EnsureBits(int k) {
+ DRACO_DCHECK_LE(k, 24);
+ DRACO_DCHECK_LE(static_cast<uint64_t>(k), AvailBits());
+
+ uint32_t buf = 0;
+ for (int i = 0; i < k; ++i) {
+ buf |= PeekBit(i) << i;
+ }
+ return buf; // Okay to return extra bits
+ }
+
+ inline void ConsumeBits(int k) { bit_offset_ += k; }
+
+ // Returns |nbits| bits in |x|.
+ inline bool GetBits(int32_t nbits, uint32_t *x) {
+ DRACO_DCHECK_GE(nbits, 0);
+ DRACO_DCHECK_LE(nbits, 32);
+ uint32_t value = 0;
+ for (int32_t bit = 0; bit < nbits; ++bit) {
+ value |= GetBit() << bit;
+ }
+ *x = value;
+ return true;
+ }
+
+ private:
+ // TODO(fgalligan): Add support for error reporting on range check.
+ // Returns one bit from the bit buffer.
+ inline int GetBit() {
+ const size_t off = bit_offset_;
+ const size_t byte_offset = off >> 3;
+ const int bit_shift = static_cast<int>(off & 0x7);
+ if (bit_buffer_ + byte_offset < bit_buffer_end_) {
+ const int bit = (bit_buffer_[byte_offset] >> bit_shift) & 1;
+ bit_offset_ = off + 1;
+ return bit;
+ }
+ return 0;
+ }
+
+ inline int PeekBit(int offset) {
+ const size_t off = bit_offset_ + offset;
+ const size_t byte_offset = off >> 3;
+ const int bit_shift = static_cast<int>(off & 0x7);
+ if (bit_buffer_ + byte_offset < bit_buffer_end_) {
+ const int bit = (bit_buffer_[byte_offset] >> bit_shift) & 1;
+ return bit;
+ }
+ return 0;
+ }
+
+ const uint8_t *bit_buffer_;
+ const uint8_t *bit_buffer_end_;
+ size_t bit_offset_;
+ };
+ friend class BufferBitCodingTest;
+
+ const char *data_;
+ int64_t data_size_;
+
+ // Current parsing position of the decoder.
+ int64_t pos_;
+ BitDecoder bit_decoder_;
+ bool bit_mode_;
+ uint16_t bitstream_version_;
+};
+
+} // namespace draco
+
+#endif // DRACO_CORE_DECODER_BUFFER_H_
diff --git a/libs/assimp/contrib/draco/src/draco/core/divide.cc b/libs/assimp/contrib/draco/src/draco/core/divide.cc
new file mode 100644
index 0000000..6d2e571
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/core/divide.cc
@@ -0,0 +1,88 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// This file is based off libvpx's divide.c.
+
+#include "draco/core/divide.h"
+
+namespace draco {
+
+const struct fastdiv_elem vp10_fastdiv_tab[256] = {
+ {0, 0}, {0, 0}, {0, 1}, {1431655766, 2},
+ {0, 2}, {2576980378, 3}, {1431655766, 3}, {613566757, 3},
+ {0, 3}, {3340530120, 4}, {2576980378, 4}, {1952257862, 4},
+ {1431655766, 4}, {991146300, 4}, {613566757, 4}, {286331154, 4},
+ {0, 4}, {3789677026, 5}, {3340530120, 5}, {2938661835, 5},
+ {2576980378, 5}, {2249744775, 5}, {1952257862, 5}, {1680639377, 5},
+ {1431655766, 5}, {1202590843, 5}, {991146300, 5}, {795364315, 5},
+ {613566757, 5}, {444306962, 5}, {286331154, 5}, {138547333, 5},
+ {0, 5}, {4034666248, 6}, {3789677026, 6}, {3558687189, 6},
+ {3340530120, 6}, {3134165325, 6}, {2938661835, 6}, {2753184165, 6},
+ {2576980378, 6}, {2409371898, 6}, {2249744775, 6}, {2097542168, 6},
+ {1952257862, 6}, {1813430637, 6}, {1680639377, 6}, {1553498810, 6},
+ {1431655766, 6}, {1314785907, 6}, {1202590843, 6}, {1094795586, 6},
+ {991146300, 6}, {891408307, 6}, {795364315, 6}, {702812831, 6},
+ {613566757, 6}, {527452125, 6}, {444306962, 6}, {363980280, 6},
+ {286331154, 6}, {211227900, 6}, {138547333, 6}, {68174085, 6},
+ {0, 6}, {4162814457, 7}, {4034666248, 7}, {3910343360, 7},
+ {3789677026, 7}, {3672508268, 7}, {3558687189, 7}, {3448072337, 7},
+ {3340530120, 7}, {3235934265, 7}, {3134165325, 7}, {3035110223, 7},
+ {2938661835, 7}, {2844718599, 7}, {2753184165, 7}, {2663967058, 7},
+ {2576980378, 7}, {2492141518, 7}, {2409371898, 7}, {2328596727, 7},
+ {2249744775, 7}, {2172748162, 7}, {2097542168, 7}, {2024065048, 7},
+ {1952257862, 7}, {1882064321, 7}, {1813430637, 7}, {1746305385, 7},
+ {1680639377, 7}, {1616385542, 7}, {1553498810, 7}, {1491936009, 7},
+ {1431655766, 7}, {1372618415, 7}, {1314785907, 7}, {1258121734, 7},
+ {1202590843, 7}, {1148159575, 7}, {1094795586, 7}, {1042467791, 7},
+ {991146300, 7}, {940802361, 7}, {891408307, 7}, {842937507, 7},
+ {795364315, 7}, {748664025, 7}, {702812831, 7}, {657787785, 7},
+ {613566757, 7}, {570128403, 7}, {527452125, 7}, {485518043, 7},
+ {444306962, 7}, {403800345, 7}, {363980280, 7}, {324829460, 7},
+ {286331154, 7}, {248469183, 7}, {211227900, 7}, {174592167, 7},
+ {138547333, 7}, {103079216, 7}, {68174085, 7}, {33818641, 7},
+ {0, 7}, {4228378656, 8}, {4162814457, 8}, {4098251237, 8},
+ {4034666248, 8}, {3972037425, 8}, {3910343360, 8}, {3849563281, 8},
+ {3789677026, 8}, {3730665024, 8}, {3672508268, 8}, {3615188300, 8},
+ {3558687189, 8}, {3502987511, 8}, {3448072337, 8}, {3393925206, 8},
+ {3340530120, 8}, {3287871517, 8}, {3235934265, 8}, {3184703642, 8},
+ {3134165325, 8}, {3084305374, 8}, {3035110223, 8}, {2986566663, 8},
+ {2938661835, 8}, {2891383213, 8}, {2844718599, 8}, {2798656110, 8},
+ {2753184165, 8}, {2708291480, 8}, {2663967058, 8}, {2620200175, 8},
+ {2576980378, 8}, {2534297473, 8}, {2492141518, 8}, {2450502814, 8},
+ {2409371898, 8}, {2368739540, 8}, {2328596727, 8}, {2288934667, 8},
+ {2249744775, 8}, {2211018668, 8}, {2172748162, 8}, {2134925265, 8},
+ {2097542168, 8}, {2060591247, 8}, {2024065048, 8}, {1987956292, 8},
+ {1952257862, 8}, {1916962805, 8}, {1882064321, 8}, {1847555765, 8},
+ {1813430637, 8}, {1779682582, 8}, {1746305385, 8}, {1713292966, 8},
+ {1680639377, 8}, {1648338801, 8}, {1616385542, 8}, {1584774030, 8},
+ {1553498810, 8}, {1522554545, 8}, {1491936009, 8}, {1461638086, 8},
+ {1431655766, 8}, {1401984144, 8}, {1372618415, 8}, {1343553873, 8},
+ {1314785907, 8}, {1286310003, 8}, {1258121734, 8}, {1230216764, 8},
+ {1202590843, 8}, {1175239808, 8}, {1148159575, 8}, {1121346142, 8},
+ {1094795586, 8}, {1068504060, 8}, {1042467791, 8}, {1016683080, 8},
+ {991146300, 8}, {965853890, 8}, {940802361, 8}, {915988286, 8},
+ {891408307, 8}, {867059126, 8}, {842937507, 8}, {819040276, 8},
+ {795364315, 8}, {771906565, 8}, {748664025, 8}, {725633745, 8},
+ {702812831, 8}, {680198441, 8}, {657787785, 8}, {635578121, 8},
+ {613566757, 8}, {591751050, 8}, {570128403, 8}, {548696263, 8},
+ {527452125, 8}, {506393524, 8}, {485518043, 8}, {464823301, 8},
+ {444306962, 8}, {423966729, 8}, {403800345, 8}, {383805589, 8},
+ {363980280, 8}, {344322273, 8}, {324829460, 8}, {305499766, 8},
+ {286331154, 8}, {267321616, 8}, {248469183, 8}, {229771913, 8},
+ {211227900, 8}, {192835267, 8}, {174592167, 8}, {156496785, 8},
+ {138547333, 8}, {120742053, 8}, {103079216, 8}, {85557118, 8},
+ {68174085, 8}, {50928466, 8}, {33818641, 8}, {16843010, 8},
+};
+
+} // namespace draco
diff --git a/libs/assimp/contrib/draco/src/draco/core/divide.h b/libs/assimp/contrib/draco/src/draco/core/divide.h
new file mode 100644
index 0000000..7e3838a
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/core/divide.h
@@ -0,0 +1,42 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_CORE_DIVIDE_H_
+#define DRACO_CORE_DIVIDE_H_
+// An implementation of the divide by multiply algorithm
+// https://gmplib.org/~tege/divcnst-pldi94.pdf
+// This file is based off libvpx's divide.h.
+
+#include <stdint.h>
+
+#include <climits>
+
+namespace draco {
+
+struct fastdiv_elem {
+ unsigned mult;
+ unsigned shift;
+};
+
+extern const struct fastdiv_elem vp10_fastdiv_tab[256];
+
+static inline unsigned fastdiv(unsigned x, int y) {
+ unsigned t =
+ ((uint64_t)x * vp10_fastdiv_tab[y].mult) >> (sizeof(x) * CHAR_BIT);
+ return (t + x) >> vp10_fastdiv_tab[y].shift;
+}
+
+} // namespace draco
+
+#endif // DRACO_CORE_DIVIDE_H_
diff --git a/libs/assimp/contrib/draco/src/draco/core/draco_index_type.h b/libs/assimp/contrib/draco/src/draco/core/draco_index_type.h
new file mode 100644
index 0000000..d9dd3f6
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/core/draco_index_type.h
@@ -0,0 +1,183 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// This files provides a basic framework for strongly typed indices that are
+// used within the Draco library. The motivation of using strongly typed indices
+// is to prevent bugs caused by mixing up incompatible indices, such as indexing
+// mesh faces with point indices and vice versa.
+//
+// Usage:
+// Define strongly typed index using macro:
+//
+// DEFINE_NEW_DRACO_INDEX_TYPE(value_type, name)
+//
+// where |value_type| is the data type of the index value (such as int32_t)
+// and |name| is a unique typename of the new index.
+//
+// E.g., we can define new index types as:
+//
+// DEFINE_NEW_DRACO_INDEX_TYPE(int, PointIndex)
+// DEFINE_NEW_DRACO_INDEX_TYPE(int, FaceIndex)
+//
+// The new types can then be used in the similar way as the regular weakly
+// typed indices (such as int32, int64, ...), but they cannot be
+// accidentally misassigned. E.g.:
+//
+// PointIndex point_index(10);
+// FaceIndex face_index;
+// face_index = point_index; // Compile error!
+//
+// One can still cast one type to another explicitly by accessing the index
+// value directly using the .value() method:
+//
+// face_index = FaceIndex(point_index.value()); // Compiles OK.
+//
+// Strongly typed indices support most of the common binary and unary
+// operators and support for additional operators can be added if
+// necessary.
+
+#ifndef DRACO_CORE_DRACO_INDEX_TYPE_H_
+#define DRACO_CORE_DRACO_INDEX_TYPE_H_
+
+#include <ostream>
+
+#include "draco/draco_features.h"
+
+namespace draco {
+
+#define DEFINE_NEW_DRACO_INDEX_TYPE(value_type, name) \
+ struct name##_tag_type_ {}; \
+ typedef IndexType<value_type, name##_tag_type_> name;
+
+template <class ValueTypeT, class TagT>
+class IndexType {
+ public:
+ typedef IndexType<ValueTypeT, TagT> ThisIndexType;
+ typedef ValueTypeT ValueType;
+
+ constexpr IndexType() : value_(ValueTypeT()) {}
+ constexpr explicit IndexType(ValueTypeT value) : value_(value) {}
+
+ constexpr ValueTypeT value() const { return value_; }
+
+ constexpr bool operator==(const IndexType &i) const {
+ return value_ == i.value_;
+ }
+ constexpr bool operator==(const ValueTypeT &val) const {
+ return value_ == val;
+ }
+ constexpr bool operator!=(const IndexType &i) const {
+ return value_ != i.value_;
+ }
+ constexpr bool operator!=(const ValueTypeT &val) const {
+ return value_ != val;
+ }
+ constexpr bool operator<(const IndexType &i) const {
+ return value_ < i.value_;
+ }
+ constexpr bool operator<(const ValueTypeT &val) const { return value_ < val; }
+ constexpr bool operator>(const IndexType &i) const {
+ return value_ > i.value_;
+ }
+ constexpr bool operator>(const ValueTypeT &val) const { return value_ > val; }
+ constexpr bool operator>=(const IndexType &i) const {
+ return value_ >= i.value_;
+ }
+ constexpr bool operator>=(const ValueTypeT &val) const {
+ return value_ >= val;
+ }
+
+ inline ThisIndexType &operator++() {
+ ++value_;
+ return *this;
+ }
+ inline ThisIndexType operator++(int) {
+ const ThisIndexType ret(value_);
+ ++value_;
+ return ret;
+ }
+
+ inline ThisIndexType &operator--() {
+ --value_;
+ return *this;
+ }
+ inline ThisIndexType operator--(int) {
+ const ThisIndexType ret(value_);
+ --value_;
+ return ret;
+ }
+
+ constexpr ThisIndexType operator+(const IndexType &i) const {
+ return ThisIndexType(value_ + i.value_);
+ }
+ constexpr ThisIndexType operator+(const ValueTypeT &val) const {
+ return ThisIndexType(value_ + val);
+ }
+ constexpr ThisIndexType operator-(const IndexType &i) const {
+ return ThisIndexType(value_ - i.value_);
+ }
+ constexpr ThisIndexType operator-(const ValueTypeT &val) const {
+ return ThisIndexType(value_ - val);
+ }
+
+ inline ThisIndexType &operator+=(const IndexType &i) {
+ value_ += i.value_;
+ return *this;
+ }
+ inline ThisIndexType operator+=(const ValueTypeT &val) {
+ value_ += val;
+ return *this;
+ }
+ inline ThisIndexType &operator-=(const IndexType &i) {
+ value_ -= i.value_;
+ return *this;
+ }
+ inline ThisIndexType operator-=(const ValueTypeT &val) {
+ value_ -= val;
+ return *this;
+ }
+ inline ThisIndexType &operator=(const ThisIndexType &i) {
+ value_ = i.value_;
+ return *this;
+ }
+ inline ThisIndexType &operator=(const ValueTypeT &val) {
+ value_ = val;
+ return *this;
+ }
+
+ private:
+ ValueTypeT value_;
+};
+
+// Stream operator << provided for logging purposes.
+template <class ValueTypeT, class TagT>
+std::ostream &operator<<(std::ostream &os, IndexType<ValueTypeT, TagT> index) {
+ return os << index.value();
+}
+
+} // namespace draco
+
+// Specialize std::hash for the strongly indexed types.
+namespace std {
+
+template <class ValueTypeT, class TagT>
+struct hash<draco::IndexType<ValueTypeT, TagT>> {
+ size_t operator()(const draco::IndexType<ValueTypeT, TagT> &i) const {
+ return static_cast<size_t>(i.value());
+ }
+};
+
+} // namespace std
+
+#endif // DRACO_CORE_DRACO_INDEX_TYPE_H_
diff --git a/libs/assimp/contrib/draco/src/draco/core/draco_index_type_vector.h b/libs/assimp/contrib/draco/src/draco/core/draco_index_type_vector.h
new file mode 100644
index 0000000..aae1e7a
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/core/draco_index_type_vector.h
@@ -0,0 +1,83 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_CORE_DRACO_INDEX_TYPE_VECTOR_H_
+#define DRACO_CORE_DRACO_INDEX_TYPE_VECTOR_H_
+
+#include <cstddef>
+#include <utility>
+#include <vector>
+
+#include "draco/core/draco_index_type.h"
+
+namespace draco {
+
+// A wrapper around the standard std::vector that supports indexing of the
+// vector entries using the strongly typed indices as defined in
+// draco_index_type.h .
+// TODO(ostava): Make the interface more complete. It's currently missing
+// features such as iterators.
+// TODO(vytyaz): Add more unit tests for this class.
+template <class IndexTypeT, class ValueTypeT>
+class IndexTypeVector {
+ public:
+ typedef typename std::vector<ValueTypeT>::const_reference const_reference;
+ typedef typename std::vector<ValueTypeT>::reference reference;
+
+ IndexTypeVector() {}
+ explicit IndexTypeVector(size_t size) : vector_(size) {}
+ IndexTypeVector(size_t size, const ValueTypeT &val) : vector_(size, val) {}
+
+ void clear() { vector_.clear(); }
+ void reserve(size_t size) { vector_.reserve(size); }
+ void resize(size_t size) { vector_.resize(size); }
+ void resize(size_t size, const ValueTypeT &val) { vector_.resize(size, val); }
+ void assign(size_t size, const ValueTypeT &val) { vector_.assign(size, val); }
+
+ void swap(IndexTypeVector<IndexTypeT, ValueTypeT> &arg) {
+ vector_.swap(arg.vector_);
+ }
+
+ size_t size() const { return vector_.size(); }
+ bool empty() const { return vector_.empty(); }
+
+ void push_back(const ValueTypeT &val) { vector_.push_back(val); }
+ void push_back(ValueTypeT &&val) { vector_.push_back(std::move(val)); }
+
+ template <typename... Args>
+ void emplace_back(Args &&...args) {
+ vector_.emplace_back(std::forward<Args>(args)...);
+ }
+
+ inline reference operator[](const IndexTypeT &index) {
+ return vector_[index.value()];
+ }
+ inline const_reference operator[](const IndexTypeT &index) const {
+ return vector_[index.value()];
+ }
+ inline reference at(const IndexTypeT &index) {
+ return vector_[index.value()];
+ }
+ inline const_reference at(const IndexTypeT &index) const {
+ return vector_[index.value()];
+ }
+ const ValueTypeT *data() const { return vector_.data(); }
+
+ private:
+ std::vector<ValueTypeT> vector_;
+};
+
+} // namespace draco
+
+#endif // DRACO_CORE_DRACO_INDEX_TYPE_VECTOR_H_
diff --git a/libs/assimp/contrib/draco/src/draco/core/draco_test_base.h b/libs/assimp/contrib/draco/src/draco/core/draco_test_base.h
new file mode 100644
index 0000000..f5c9d75
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/core/draco_test_base.h
@@ -0,0 +1,11 @@
+// Wrapper for including googletest indirectly. Useful when the location of the
+// googletest sources must change depending on build environment and repository
+// source location.
+#ifndef DRACO_CORE_DRACO_TEST_BASE_H_
+#define DRACO_CORE_DRACO_TEST_BASE_H_
+
+static bool FLAGS_update_golden_files;
+#include "gtest/gtest.h"
+#include "testing/draco_test_config.h"
+
+#endif // DRACO_CORE_DRACO_TEST_BASE_H_
diff --git a/libs/assimp/contrib/draco/src/draco/core/draco_test_utils.cc b/libs/assimp/contrib/draco/src/draco/core/draco_test_utils.cc
new file mode 100644
index 0000000..edca985
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/core/draco_test_utils.cc
@@ -0,0 +1,80 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/core/draco_test_utils.h"
+
+#include <fstream>
+
+#include "draco/core/macros.h"
+#include "draco/io/file_utils.h"
+#include "draco_test_base.h"
+
+namespace draco {
+
+namespace {
+static constexpr char kTestDataDir[] = DRACO_TEST_DATA_DIR;
+static constexpr char kTestTempDir[] = DRACO_TEST_TEMP_DIR;
+} // namespace
+
+std::string GetTestFileFullPath(const std::string &file_name) {
+ return std::string(kTestDataDir) + std::string("/") + file_name;
+}
+
+std::string GetTestTempFileFullPath(const std::string &file_name) {
+ return std::string(kTestTempDir) + std::string("/") + file_name;
+}
+
+bool GenerateGoldenFile(const std::string &golden_file_name, const void *data,
+ int data_size) {
+ const std::string path = GetTestFileFullPath(golden_file_name);
+ return WriteBufferToFile(data, data_size, path);
+}
+
+bool CompareGoldenFile(const std::string &golden_file_name, const void *data,
+ int data_size) {
+ const std::string golden_path = GetTestFileFullPath(golden_file_name);
+ std::ifstream in_file(golden_path, std::ios::binary);
+ if (!in_file || data_size < 0) {
+ return false;
+ }
+ const char *const data_c8 = static_cast<const char *>(data);
+ constexpr int buffer_size = 1024;
+ char buffer[buffer_size];
+ size_t extracted_size = 0;
+ size_t remaining_data_size = data_size;
+ int offset = 0;
+ while ((extracted_size = in_file.read(buffer, buffer_size).gcount()) > 0) {
+ if (remaining_data_size <= 0)
+ break; // Input and golden sizes are different.
+ size_t size_to_check = extracted_size;
+ if (remaining_data_size < size_to_check)
+ size_to_check = remaining_data_size;
+ for (uint32_t i = 0; i < size_to_check; ++i) {
+ if (buffer[i] != data_c8[offset++]) {
+ LOG(INFO) << "Test output differed from golden file at byte "
+ << offset - 1;
+ return false;
+ }
+ }
+ remaining_data_size -= extracted_size;
+ }
+ if (remaining_data_size != extracted_size) {
+ // Both of these values should be 0 at the end.
+ LOG(INFO) << "Test output size differed from golden file size";
+ return false;
+ }
+ return true;
+}
+
+} // namespace draco
diff --git a/libs/assimp/contrib/draco/src/draco/core/draco_test_utils.h b/libs/assimp/contrib/draco/src/draco/core/draco_test_utils.h
new file mode 100644
index 0000000..fa548f5
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/core/draco_test_utils.h
@@ -0,0 +1,93 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_CORE_DRACO_TEST_UTILS_H_
+#define DRACO_CORE_DRACO_TEST_UTILS_H_
+
+#include "draco/core/draco_test_base.h"
+#include "draco/io/mesh_io.h"
+#include "draco/io/point_cloud_io.h"
+
+namespace draco {
+
+// Returns the full path to a given file system entry, such as test file or test
+// directory.
+std::string GetTestFileFullPath(const std::string &entry_name);
+
+// Returns the full path to a given temporary file (a location where tests store
+// generated files).
+std::string GetTestTempFileFullPath(const std::string &file_name);
+
+// Generates a new golden file and saves it into the correct folder.
+// Returns false if the file couldn't be created.
+bool GenerateGoldenFile(const std::string &golden_file_name, const void *data,
+ int data_size);
+
+// Compare a golden file content with the input data.
+// Function will log the first byte position where the data differ.
+// Returns false if there are any differences.
+bool CompareGoldenFile(const std::string &golden_file_name, const void *data,
+ int data_size);
+
+// Loads a mesh / point cloud specified by a |file_name| that is going to be
+// automatically converted to the correct path available to the testing
+// instance.
+inline std::unique_ptr<Mesh> ReadMeshFromTestFile(
+ const std::string &file_name) {
+ const std::string path = GetTestFileFullPath(file_name);
+ return ReadMeshFromFile(path).value();
+}
+inline std::unique_ptr<Mesh> ReadMeshFromTestFile(const std::string &file_name,
+ bool use_metadata) {
+ const std::string path = GetTestFileFullPath(file_name);
+ return ReadMeshFromFile(path, use_metadata).value();
+}
+inline std::unique_ptr<Mesh> ReadMeshFromTestFile(const std::string &file_name,
+ const Options &options) {
+ const std::string path = GetTestFileFullPath(file_name);
+ return ReadMeshFromFile(path, options).value();
+}
+
+inline std::unique_ptr<PointCloud> ReadPointCloudFromTestFile(
+ const std::string &file_name) {
+ const std::string path = GetTestFileFullPath(file_name);
+ return ReadPointCloudFromFile(path).value();
+}
+
+// Evaluates an expression that returns draco::Status. If the status is not OK,
+// the macro asserts and logs the error message.
+#define DRACO_ASSERT_OK(expression) \
+ { \
+ const draco::Status _local_status = (expression); \
+ ASSERT_TRUE(_local_status.ok()) << _local_status.error_msg_string(); \
+ }
+
+// In case StatusOr<T> is ok(), this macro assigns value stored in StatusOr<T>
+// to |lhs|, otherwise it asserts and logs the error message.
+//
+// DRACO_ASSIGN_OR_ASSERT(lhs, expression)
+//
+#define DRACO_ASSIGN_OR_ASSERT(lhs, expression) \
+ DRACO_ASSIGN_OR_ASSERT_IMPL_(DRACO_MACROS_IMPL_CONCAT_(_statusor, __LINE__), \
+ lhs, expression, _status)
+
+// The actual implementation of the above macro.
+#define DRACO_ASSIGN_OR_ASSERT_IMPL_(statusor, lhs, expression, error_expr) \
+ auto statusor = (expression); \
+ ASSERT_TRUE(statusor.ok()) << statusor.status().error_msg_string(); \
+ lhs = std::move(statusor).value();
+
+} // namespace draco
+
+#endif // DRACO_CORE_DRACO_TEST_UTILS_H_
diff --git a/libs/assimp/contrib/draco/src/draco/core/draco_types.cc b/libs/assimp/contrib/draco/src/draco/core/draco_types.cc
new file mode 100644
index 0000000..9bde05f
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/core/draco_types.cc
@@ -0,0 +1,61 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/core/draco_types.h"
+
+namespace draco {
+
+int32_t DataTypeLength(DataType dt) {
+ switch (dt) {
+ case DT_INT8:
+ case DT_UINT8:
+ return 1;
+ case DT_INT16:
+ case DT_UINT16:
+ return 2;
+ case DT_INT32:
+ case DT_UINT32:
+ return 4;
+ case DT_INT64:
+ case DT_UINT64:
+ return 8;
+ case DT_FLOAT32:
+ return 4;
+ case DT_FLOAT64:
+ return 8;
+ case DT_BOOL:
+ return 1;
+ default:
+ return -1;
+ }
+}
+
+bool IsDataTypeIntegral(DataType dt) {
+ switch (dt) {
+ case DT_INT8:
+ case DT_UINT8:
+ case DT_INT16:
+ case DT_UINT16:
+ case DT_INT32:
+ case DT_UINT32:
+ case DT_INT64:
+ case DT_UINT64:
+ case DT_BOOL:
+ return true;
+ default:
+ return false;
+ }
+}
+
+} // namespace draco
diff --git a/libs/assimp/contrib/draco/src/draco/core/draco_types.h b/libs/assimp/contrib/draco/src/draco/core/draco_types.h
new file mode 100644
index 0000000..d14437a
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/core/draco_types.h
@@ -0,0 +1,52 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_CORE_DRACO_TYPES_H_
+#define DRACO_CORE_DRACO_TYPES_H_
+
+#include <stdint.h>
+
+#include <string>
+
+#include "draco/draco_features.h"
+
+namespace draco {
+
+enum DataType {
+ // Not a legal value for DataType. Used to indicate a field has not been set.
+ DT_INVALID = 0,
+ DT_INT8,
+ DT_UINT8,
+ DT_INT16,
+ DT_UINT16,
+ DT_INT32,
+ DT_UINT32,
+ DT_INT64,
+ DT_UINT64,
+ DT_FLOAT32,
+ DT_FLOAT64,
+ DT_BOOL,
+ DT_TYPES_COUNT
+};
+
+int32_t DataTypeLength(DataType dt);
+
+// Equivalent to std::is_integral for draco::DataType. Returns true for all
+// signed and unsigned integer types (including DT_BOOL). Returns false
+// otherwise.
+bool IsDataTypeIntegral(DataType dt);
+
+} // namespace draco
+
+#endif // DRACO_CORE_DRACO_TYPES_H_
diff --git a/libs/assimp/contrib/draco/src/draco/core/draco_version.h b/libs/assimp/contrib/draco/src/draco/core/draco_version.h
new file mode 100644
index 0000000..14a504a
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/core/draco_version.h
@@ -0,0 +1,27 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_CORE_DRACO_VERSION_H_
+#define DRACO_CORE_DRACO_VERSION_H_
+
+namespace draco {
+
+// Draco version is comprised of <major>.<minor>.<revision>.
+static const char kDracoVersion[] = "1.4.1";
+
+const char *Version() { return kDracoVersion; }
+
+} // namespace draco
+
+#endif // DRACO_CORE_DRACO_VERSION_H_
diff --git a/libs/assimp/contrib/draco/src/draco/core/encoder_buffer.cc b/libs/assimp/contrib/draco/src/draco/core/encoder_buffer.cc
new file mode 100644
index 0000000..df98677
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/core/encoder_buffer.cc
@@ -0,0 +1,93 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/core/encoder_buffer.h"
+
+#include <cstring> // for memcpy
+
+#include "draco/core/varint_encoding.h"
+
+namespace draco {
+
+EncoderBuffer::EncoderBuffer()
+ : bit_encoder_reserved_bytes_(false), encode_bit_sequence_size_(false) {}
+
+void EncoderBuffer::Clear() {
+ buffer_.clear();
+ bit_encoder_reserved_bytes_ = 0;
+}
+
+void EncoderBuffer::Resize(int64_t nbytes) { buffer_.resize(nbytes); }
+
+bool EncoderBuffer::StartBitEncoding(int64_t required_bits, bool encode_size) {
+ if (bit_encoder_active()) {
+ return false; // Bit encoding mode already active.
+ }
+ if (required_bits <= 0) {
+ return false; // Invalid size.
+ }
+ encode_bit_sequence_size_ = encode_size;
+ const int64_t required_bytes = (required_bits + 7) / 8;
+ bit_encoder_reserved_bytes_ = required_bytes;
+ uint64_t buffer_start_size = buffer_.size();
+ if (encode_size) {
+ // Reserve memory for storing the encoded bit sequence size. It will be
+ // filled once the bit encoding ends.
+ buffer_start_size += sizeof(uint64_t);
+ }
+ // Resize buffer to fit the maximum size of encoded bit data.
+ buffer_.resize(buffer_start_size + required_bytes);
+ // Get the buffer data pointer for the bit encoder.
+ const char *const data = buffer_.data() + buffer_start_size;
+ bit_encoder_ =
+ std::unique_ptr<BitEncoder>(new BitEncoder(const_cast<char *>(data)));
+ return true;
+}
+
+void EncoderBuffer::EndBitEncoding() {
+ if (!bit_encoder_active()) {
+ return;
+ }
+ // Get the number of encoded bits and bytes (rounded up).
+ const uint64_t encoded_bits = bit_encoder_->Bits();
+ const uint64_t encoded_bytes = (encoded_bits + 7) / 8;
+ // Flush all cached bits that are not in the bit encoder's main buffer.
+ bit_encoder_->Flush(0);
+ // Encode size if needed.
+ if (encode_bit_sequence_size_) {
+ char *out_mem = const_cast<char *>(data() + size());
+ // Make the out_mem point to the memory reserved for storing the size.
+ out_mem = out_mem - (bit_encoder_reserved_bytes_ + sizeof(uint64_t));
+
+ EncoderBuffer var_size_buffer;
+ EncodeVarint(encoded_bytes, &var_size_buffer);
+ const uint32_t size_len = static_cast<uint32_t>(var_size_buffer.size());
+ char *const dst = out_mem + size_len;
+ const char *const src = out_mem + sizeof(uint64_t);
+ memmove(dst, src, encoded_bytes);
+
+ // Store the size of the encoded data.
+ memcpy(out_mem, var_size_buffer.data(), size_len);
+
+ // We need to account for the difference between the preallocated and actual
+ // storage needed for storing the encoded length. This will be used later to
+ // compute the correct size of |buffer_|.
+ bit_encoder_reserved_bytes_ += sizeof(uint64_t) - size_len;
+ }
+ // Resize the underlying buffer to match the number of encoded bits.
+ buffer_.resize(buffer_.size() - bit_encoder_reserved_bytes_ + encoded_bytes);
+ bit_encoder_reserved_bytes_ = 0;
+}
+
+} // namespace draco
diff --git a/libs/assimp/contrib/draco/src/draco/core/encoder_buffer.h b/libs/assimp/contrib/draco/src/draco/core/encoder_buffer.h
new file mode 100644
index 0000000..b153a62
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/core/encoder_buffer.h
@@ -0,0 +1,152 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_CORE_ENCODER_BUFFER_H_
+#define DRACO_CORE_ENCODER_BUFFER_H_
+
+#include <memory>
+#include <vector>
+
+#include "draco/core/bit_utils.h"
+#include "draco/core/macros.h"
+
+namespace draco {
+
+// Class representing a buffer that can be used for either for byte-aligned
+// encoding of arbitrary data structures or for encoding of variable-length
+// bit data.
+class EncoderBuffer {
+ public:
+ EncoderBuffer();
+ void Clear();
+ void Resize(int64_t nbytes);
+
+ // Start encoding a bit sequence. A maximum size of the sequence needs to
+ // be known upfront.
+ // If encode_size is true, the size of encoded bit sequence is stored before
+ // the sequence. Decoder can then use this size to skip over the bit sequence
+ // if needed.
+ // Returns false on error.
+ bool StartBitEncoding(int64_t required_bits, bool encode_size);
+
+ // End the encoding of the bit sequence and return to the default byte-aligned
+ // encoding.
+ void EndBitEncoding();
+
+ // Encode up to 32 bits into the buffer. Can be called only in between
+ // StartBitEncoding and EndBitEncoding. Otherwise returns false.
+ bool EncodeLeastSignificantBits32(int nbits, uint32_t value) {
+ if (!bit_encoder_active()) {
+ return false;
+ }
+ bit_encoder_->PutBits(value, nbits);
+ return true;
+ }
+ // Encode an arbitrary data type.
+ // Can be used only when we are not encoding a bit-sequence.
+ // Returns false when the value couldn't be encoded.
+ template <typename T>
+ bool Encode(const T &data) {
+ if (bit_encoder_active()) {
+ return false;
+ }
+ const uint8_t *src_data = reinterpret_cast<const uint8_t *>(&data);
+ buffer_.insert(buffer_.end(), src_data, src_data + sizeof(T));
+ return true;
+ }
+ bool Encode(const void *data, size_t data_size) {
+ if (bit_encoder_active()) {
+ return false;
+ }
+ const uint8_t *src_data = reinterpret_cast<const uint8_t *>(data);
+ buffer_.insert(buffer_.end(), src_data, src_data + data_size);
+ return true;
+ }
+
+ bool bit_encoder_active() const { return bit_encoder_reserved_bytes_ > 0; }
+ const char *data() const { return buffer_.data(); }
+ size_t size() const { return buffer_.size(); }
+ std::vector<char> *buffer() { return &buffer_; }
+
+ private:
+ // Internal helper class to encode bits to a bit buffer.
+ class BitEncoder {
+ public:
+ // |data| is the buffer to write the bits into.
+ explicit BitEncoder(char *data) : bit_buffer_(data), bit_offset_(0) {}
+
+ // Write |nbits| of |data| into the bit buffer.
+ void PutBits(uint32_t data, int32_t nbits) {
+ DRACO_DCHECK_GE(nbits, 0);
+ DRACO_DCHECK_LE(nbits, 32);
+ for (int32_t bit = 0; bit < nbits; ++bit) {
+ PutBit((data >> bit) & 1);
+ }
+ }
+
+ // Return number of bits encoded so far.
+ uint64_t Bits() const { return static_cast<uint64_t>(bit_offset_); }
+
+ // TODO(fgalligan): Remove this function once we know we do not need the
+ // old API anymore.
+ // This is a function of an old API, that currently does nothing.
+ void Flush(int /* left_over_bit_value */) {}
+
+ // Return the number of bits required to store the given number
+ static uint32_t BitsRequired(uint32_t x) {
+ return static_cast<uint32_t>(MostSignificantBit(x));
+ }
+
+ private:
+ void PutBit(uint8_t value) {
+ const int byte_size = 8;
+ const uint64_t off = static_cast<uint64_t>(bit_offset_);
+ const uint64_t byte_offset = off / byte_size;
+ const int bit_shift = off % byte_size;
+
+ // TODO(fgalligan): Check performance if we add a branch and only do one
+ // memory write if bit_shift is 7. Also try using a temporary variable to
+ // hold the bits before writing to the buffer.
+
+ bit_buffer_[byte_offset] &= ~(1 << bit_shift);
+ bit_buffer_[byte_offset] |= value << bit_shift;
+ bit_offset_++;
+ }
+
+ char *bit_buffer_;
+ size_t bit_offset_;
+ };
+ friend class BufferBitCodingTest;
+ // All data is stored in this vector.
+ std::vector<char> buffer_;
+
+ // Bit encoder is used when encoding variable-length bit data.
+ // TODO(ostava): Currently encoder needs to be recreated each time
+ // StartBitEncoding method is called. This is not necessary if BitEncoder
+ // supported reset function which can easily added but let's leave that for
+ // later.
+ std::unique_ptr<BitEncoder> bit_encoder_;
+
+ // The number of bytes reserved for bit encoder.
+ // Values > 0 indicate we are in the bit encoding mode.
+ int64_t bit_encoder_reserved_bytes_;
+
+ // Flag used indicating that we need to store the length of the currently
+ // processed bit sequence.
+ bool encode_bit_sequence_size_;
+};
+
+} // namespace draco
+
+#endif // DRACO_CORE_ENCODER_BUFFER_H_
diff --git a/libs/assimp/contrib/draco/src/draco/core/hash_utils.cc b/libs/assimp/contrib/draco/src/draco/core/hash_utils.cc
new file mode 100644
index 0000000..fbbd653
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/core/hash_utils.cc
@@ -0,0 +1,58 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/core/hash_utils.h"
+
+#include <cstddef>
+#include <functional>
+#include <limits>
+
+namespace draco {
+
+// Will never return 1 or 0.
+uint64_t FingerprintString(const char *s, size_t len) {
+ const uint64_t seed = 0x87654321;
+ const int hash_loop_count = static_cast<int>(len / 8) + 1;
+ uint64_t hash = seed;
+
+ for (int i = 0; i < hash_loop_count; ++i) {
+ const int off = i * 8;
+ const int num_chars_left = static_cast<int>(len) - off;
+ uint64_t new_hash = seed;
+
+ if (num_chars_left > 7) {
+ const int off2 = i * 8;
+ new_hash = static_cast<uint64_t>(s[off2]) << 56 |
+ static_cast<uint64_t>(s[off2 + 1]) << 48 |
+ static_cast<uint64_t>(s[off2 + 2]) << 40 |
+ static_cast<uint64_t>(s[off2 + 3]) << 32 |
+ static_cast<uint64_t>(s[off2 + 4]) << 24 |
+ static_cast<uint64_t>(s[off2 + 5]) << 16 |
+ static_cast<uint64_t>(s[off2 + 6]) << 8 | s[off2 + 7];
+ } else {
+ for (int j = 0; j < num_chars_left; ++j) {
+ new_hash |= static_cast<uint64_t>(s[off + j])
+ << (64 - ((num_chars_left - j) * 8));
+ }
+ }
+
+ hash = HashCombine(new_hash, hash);
+ }
+
+ if (hash < std::numeric_limits<uint64_t>::max() - 1) {
+ hash += 2;
+ }
+ return hash;
+}
+} // namespace draco
diff --git a/libs/assimp/contrib/draco/src/draco/core/hash_utils.h b/libs/assimp/contrib/draco/src/draco/core/hash_utils.h
new file mode 100644
index 0000000..aa61523
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/core/hash_utils.h
@@ -0,0 +1,64 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_CORE_HASH_UTILS_H_
+#define DRACO_CORE_HASH_UTILS_H_
+
+#include <stdint.h>
+
+#include <cstddef>
+#include <functional>
+
+namespace draco {
+
+template <typename T1, typename T2>
+size_t HashCombine(T1 a, T2 b) {
+ const size_t hash1 = std::hash<T1>()(a);
+ const size_t hash2 = std::hash<T2>()(b);
+ return (hash1 << 2) ^ (hash2 << 1);
+}
+
+template <typename T>
+size_t HashCombine(T a, size_t hash) {
+ const size_t hasha = std::hash<T>()(a);
+ return (hash) ^ (hasha + 239);
+}
+
+inline uint64_t HashCombine(uint64_t a, uint64_t b) {
+ return (a + 1013) ^ (b + 107) << 1;
+}
+
+// Will never return 1 or 0.
+uint64_t FingerprintString(const char *s, size_t len);
+
+// Hash for std::array.
+template <typename T>
+struct HashArray {
+ size_t operator()(const T &a) const {
+ size_t hash = 79; // Magic number.
+ for (unsigned int i = 0; i < std::tuple_size<T>::value; ++i) {
+ hash = HashCombine(hash, ValueHash(a[i]));
+ }
+ return hash;
+ }
+
+ template <typename V>
+ size_t ValueHash(const V &val) const {
+ return std::hash<V>()(val);
+ }
+};
+
+} // namespace draco
+
+#endif // DRACO_CORE_HASH_UTILS_H_
diff --git a/libs/assimp/contrib/draco/src/draco/core/macros.h b/libs/assimp/contrib/draco/src/draco/core/macros.h
new file mode 100644
index 0000000..147bbaa
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/core/macros.h
@@ -0,0 +1,119 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_CORE_MACROS_H_
+#define DRACO_CORE_MACROS_H_
+
+#include "assert.h"
+#include "draco/draco_features.h"
+
+#ifdef ANDROID_LOGGING
+#include <android/log.h>
+#define LOG_TAG "draco"
+#define DRACO_LOGI(...) \
+ __android_log_print(ANDROID_LOG_INFO, LOG_TAG, __VA_ARGS__)
+#define DRACO_LOGE(...) \
+ __android_log_print(ANDROID_LOG_ERROR, LOG_TAG, __VA_ARGS__)
+#else
+#define DRACO_LOGI printf
+#define DRACO_LOGE printf
+#endif
+
+#include <iostream>
+namespace draco {
+
+#ifndef DISALLOW_COPY_AND_ASSIGN
+#define DISALLOW_COPY_AND_ASSIGN(TypeName) \
+ TypeName(const TypeName &) = delete; \
+ void operator=(const TypeName &) = delete;
+#endif
+
+#ifndef FALLTHROUGH_INTENDED
+#if defined(__clang__) && defined(__has_warning)
+#if __has_feature(cxx_attributes) && __has_warning("-Wimplicit-fallthrough")
+#define FALLTHROUGH_INTENDED [[clang::fallthrough]]
+#endif
+#elif defined(__GNUC__) && __GNUC__ >= 7
+#define FALLTHROUGH_INTENDED [[gnu::fallthrough]]
+#endif
+
+// If FALLTHROUGH_INTENDED is still not defined, define it.
+#ifndef FALLTHROUGH_INTENDED
+#define FALLTHROUGH_INTENDED \
+ do { \
+ } while (0)
+#endif
+#endif
+
+#ifndef LOG
+#define LOG(...) std::cout
+#endif
+
+#ifndef VLOG
+#define VLOG(...) std::cout
+#endif
+
+} // namespace draco
+
+#ifdef DRACO_DEBUG
+#define DRACO_DCHECK(x) (assert(x));
+#define DRACO_DCHECK_EQ(a, b) assert((a) == (b));
+#define DRACO_DCHECK_NE(a, b) assert((a) != (b));
+#define DRACO_DCHECK_GE(a, b) assert((a) >= (b));
+#define DRACO_DCHECK_GT(a, b) assert((a) > (b));
+#define DRACO_DCHECK_LE(a, b) assert((a) <= (b));
+#define DRACO_DCHECK_LT(a, b) assert((a) < (b));
+#define DRACO_DCHECK_NOTNULL(x) assert((x) != NULL);
+#else
+#define DRACO_DCHECK(x)
+#define DRACO_DCHECK_EQ(a, b)
+#define DRACO_DCHECK_NE(a, b)
+#define DRACO_DCHECK_GE(a, b)
+#define DRACO_DCHECK_GT(a, b)
+#define DRACO_DCHECK_LE(a, b)
+#define DRACO_DCHECK_LT(a, b)
+#define DRACO_DCHECK_NOTNULL(x)
+#endif
+
+// Helper macros for concatenating macro values.
+#define DRACO_MACROS_IMPL_CONCAT_INNER_(x, y) x##y
+#define DRACO_MACROS_IMPL_CONCAT_(x, y) DRACO_MACROS_IMPL_CONCAT_INNER_(x, y)
+
+// Expand the n-th argument of the macro. Used to select an argument based on
+// the number of entries in a variadic macro argument. Example usage:
+//
+// #define FUNC_1(x) x
+// #define FUNC_2(x, y) x + y
+// #define FUNC_3(x, y, z) x + y + z
+//
+// #define VARIADIC_MACRO(...)
+// DRACO_SELECT_NTH_FROM_3(__VA_ARGS__, FUNC_3, FUNC_2, FUNC_1) __VA_ARGS__
+//
+#define DRACO_SELECT_NTH_FROM_2(_1, _2, NAME) NAME
+#define DRACO_SELECT_NTH_FROM_3(_1, _2, _3, NAME) NAME
+#define DRACO_SELECT_NTH_FROM_4(_1, _2, _3, _4, NAME) NAME
+
+// Macro that converts the Draco bit-stream into one uint16_t number.
+// Useful mostly when checking version numbers.
+#define DRACO_BITSTREAM_VERSION(MAJOR, MINOR) \
+ ((static_cast<uint16_t>(MAJOR) << 8) | MINOR)
+
+// Macro that converts the uint16_t Draco bit-stream number into the major
+// and minor components respectively.
+#define DRACO_BISTREAM_VERSION_MAJOR(VERSION) \
+ (static_cast<uint8_t>(VERSION >> 8))
+#define DRACO_BISTREAM_VERSION_MINOR(VERSION) \
+ (static_cast<uint8_t>(VERSION & 0xFF))
+
+#endif // DRACO_CORE_MACROS_H_
diff --git a/libs/assimp/contrib/draco/src/draco/core/math_utils.h b/libs/assimp/contrib/draco/src/draco/core/math_utils.h
new file mode 100644
index 0000000..7f382fa
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/core/math_utils.h
@@ -0,0 +1,55 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_CORE_MATH_UTILS_H_
+#define DRACO_CORE_MATH_UTILS_H_
+
+#include <inttypes.h>
+
+#include "draco/core/vector_d.h"
+
+#define DRACO_INCREMENT_MOD(I, M) (((I) == ((M)-1)) ? 0 : ((I) + 1))
+
+// Returns floor(sqrt(x)) where x is an integer number. The main intend of this
+// function is to provide a cross platform and deterministic implementation of
+// square root for integer numbers. This function is not intended to be a
+// replacement for std::sqrt() for general cases. IntSqrt is in fact about 3X
+// slower compared to most implementation of std::sqrt().
+inline uint64_t IntSqrt(uint64_t number) {
+ if (number == 0) {
+ return 0;
+ }
+ // First estimate good initial value of the square root as log2(number).
+ uint64_t act_number = number;
+ uint64_t square_root = 1;
+ while (act_number >= 2) {
+ // Double the square root until |square_root * square_root > number|.
+ square_root *= 2;
+ act_number /= 4;
+ }
+ // Perform Newton's (or Babylonian) method to find the true floor(sqrt()).
+ do {
+ // New |square_root| estimate is computed as the average between
+ // |square_root| and |number / square_root|.
+ square_root = (square_root + number / square_root) / 2;
+
+ // Note that after the first iteration, the estimate is always going to be
+ // larger or equal to the true square root value. Therefore to check
+ // convergence, we can simply detect condition when the square of the
+ // estimated square root is larger than the input.
+ } while (square_root * square_root > number);
+ return square_root;
+}
+
+#endif // DRACO_CORE_MATH_UTILS_H_
diff --git a/libs/assimp/contrib/draco/src/draco/core/math_utils_test.cc b/libs/assimp/contrib/draco/src/draco/core/math_utils_test.cc
new file mode 100644
index 0000000..8c255d0
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/core/math_utils_test.cc
@@ -0,0 +1,22 @@
+#include "draco/core/math_utils.h"
+
+#include <cmath>
+#include <random>
+
+#include "draco/core/draco_test_base.h"
+
+using draco::Vector3f;
+
+TEST(MathUtils, Mod) { EXPECT_EQ(DRACO_INCREMENT_MOD(1, 1 << 1), 0); }
+
+TEST(MathUtils, IntSqrt) {
+ ASSERT_EQ(IntSqrt(0), 0);
+ // 64-bit pseudo random number generator seeded with a predefined number.
+ std::mt19937_64 generator(109);
+ std::uniform_int_distribution<uint64_t> distribution(0, 1ull << 60);
+
+ for (int i = 0; i < 10000; ++i) {
+ const uint64_t number = distribution(generator);
+ ASSERT_EQ(IntSqrt(number), static_cast<uint64_t>(floor(std::sqrt(number))));
+ }
+}
diff --git a/libs/assimp/contrib/draco/src/draco/core/options.cc b/libs/assimp/contrib/draco/src/draco/core/options.cc
new file mode 100644
index 0000000..9b81db4
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/core/options.cc
@@ -0,0 +1,94 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/core/options.h"
+
+#include <cstdlib>
+#include <string>
+#include <utility>
+
+namespace draco {
+
+Options::Options() {}
+
+void Options::MergeAndReplace(const Options &other_options) {
+ for (const auto &item : other_options.options_) {
+ options_[item.first] = item.second;
+ }
+}
+
+void Options::SetInt(const std::string &name, int val) {
+ options_[name] = std::to_string(val);
+}
+
+void Options::SetFloat(const std::string &name, float val) {
+ options_[name] = std::to_string(val);
+}
+
+void Options::SetBool(const std::string &name, bool val) {
+ options_[name] = std::to_string(val ? 1 : 0);
+}
+
+void Options::SetString(const std::string &name, const std::string &val) {
+ options_[name] = val;
+}
+
+int Options::GetInt(const std::string &name) const { return GetInt(name, -1); }
+
+int Options::GetInt(const std::string &name, int default_val) const {
+ const auto it = options_.find(name);
+ if (it == options_.end()) {
+ return default_val;
+ }
+ return std::atoi(it->second.c_str());
+}
+
+float Options::GetFloat(const std::string &name) const {
+ return GetFloat(name, -1);
+}
+
+float Options::GetFloat(const std::string &name, float default_val) const {
+ const auto it = options_.find(name);
+ if (it == options_.end()) {
+ return default_val;
+ }
+ return static_cast<float>(std::atof(it->second.c_str()));
+}
+
+bool Options::GetBool(const std::string &name) const {
+ return GetBool(name, false);
+}
+
+bool Options::GetBool(const std::string &name, bool default_val) const {
+ const int ret = GetInt(name, -1);
+ if (ret == -1) {
+ return default_val;
+ }
+ return static_cast<bool>(ret);
+}
+
+std::string Options::GetString(const std::string &name) const {
+ return GetString(name, "");
+}
+
+std::string Options::GetString(const std::string &name,
+ const std::string &default_val) const {
+ const auto it = options_.find(name);
+ if (it == options_.end()) {
+ return default_val;
+ }
+ return it->second;
+}
+
+} // namespace draco
diff --git a/libs/assimp/contrib/draco/src/draco/core/options.h b/libs/assimp/contrib/draco/src/draco/core/options.h
new file mode 100644
index 0000000..1bc4dc0
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/core/options.h
@@ -0,0 +1,150 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_CORE_OPTIONS_H_
+#define DRACO_CORE_OPTIONS_H_
+
+#include <cstdlib>
+#include <map>
+#include <string>
+
+namespace draco {
+
+// Class for storing generic options as a <name, value> pair in a string map.
+// The API provides helper methods for directly storing values of various types
+// such as ints and bools. One named option should be set with only a single
+// data type.
+class Options {
+ public:
+ Options();
+
+ // Merges |other_options| on top of the existing options of this instance
+ // replacing all entries that are present in both options instances.
+ void MergeAndReplace(const Options &other_options);
+
+ void SetInt(const std::string &name, int val);
+ void SetFloat(const std::string &name, float val);
+ void SetBool(const std::string &name, bool val);
+ void SetString(const std::string &name, const std::string &val);
+ template <class VectorT>
+ void SetVector(const std::string &name, const VectorT &vec) {
+ SetVector(name, &vec[0], VectorT::dimension);
+ }
+ template <typename DataTypeT>
+ void SetVector(const std::string &name, const DataTypeT *vec, int num_dims);
+
+ // Getters will return a default value if the entry is not found. The default
+ // value can be specified in the overloaded version of each function.
+ int GetInt(const std::string &name) const;
+ int GetInt(const std::string &name, int default_val) const;
+ float GetFloat(const std::string &name) const;
+ float GetFloat(const std::string &name, float default_val) const;
+ bool GetBool(const std::string &name) const;
+ bool GetBool(const std::string &name, bool default_val) const;
+ std::string GetString(const std::string &name) const;
+ std::string GetString(const std::string &name,
+ const std::string &default_val) const;
+ template <class VectorT>
+ VectorT GetVector(const std::string &name, const VectorT &default_val) const;
+ // Unlike other Get functions, this function returns false if the option does
+ // not exist, otherwise it fills |out_val| with the vector values. If a
+ // default value is needed, it can be set in |out_val|.
+ template <typename DataTypeT>
+ bool GetVector(const std::string &name, int num_dims,
+ DataTypeT *out_val) const;
+
+ bool IsOptionSet(const std::string &name) const {
+ return options_.count(name) > 0;
+ }
+
+ private:
+ // All entries are internally stored as strings and converted to the desired
+ // return type based on the used Get* method.
+ // TODO(ostava): Consider adding type safety mechanism that would prevent
+ // unsafe operations such as a conversion from vector to int.
+ std::map<std::string, std::string> options_;
+};
+
+template <typename DataTypeT>
+void Options::SetVector(const std::string &name, const DataTypeT *vec,
+ int num_dims) {
+ std::string out;
+ for (int i = 0; i < num_dims; ++i) {
+ if (i > 0) {
+ out += " ";
+ }
+
+// GNU STL on android doesn't include a proper std::to_string, but the libc++
+// version does
+#if defined(ANDROID) && !defined(_LIBCPP_VERSION)
+ out += to_string(vec[i]);
+#else
+ out += std::to_string(vec[i]);
+#endif
+ }
+ options_[name] = out;
+}
+
+template <class VectorT>
+VectorT Options::GetVector(const std::string &name,
+ const VectorT &default_val) const {
+ VectorT ret = default_val;
+ GetVector(name, VectorT::dimension, &ret[0]);
+ return ret;
+}
+
+template <typename DataTypeT>
+bool Options::GetVector(const std::string &name, int num_dims,
+ DataTypeT *out_val) const {
+ const auto it = options_.find(name);
+ if (it == options_.end()) {
+ return false;
+ }
+ const std::string value = it->second;
+ if (value.length() == 0) {
+ return true; // Option set but no data is present
+ }
+ const char *act_str = value.c_str();
+ char *next_str;
+ for (int i = 0; i < num_dims; ++i) {
+ if (std::is_integral<DataTypeT>::value) {
+#ifdef ANDROID
+ const int val = strtol(act_str, &next_str, 10);
+#else
+ const int val = static_cast<int>(std::strtol(act_str, &next_str, 10));
+#endif
+ if (act_str == next_str) {
+ return true; // End reached.
+ }
+ act_str = next_str;
+ out_val[i] = static_cast<DataTypeT>(val);
+ } else {
+#ifdef ANDROID
+ const float val = strtof(act_str, &next_str);
+#else
+ const float val = std::strtof(act_str, &next_str);
+#endif
+ if (act_str == next_str) {
+ return true; // End reached.
+ }
+ act_str = next_str;
+ out_val[i] = static_cast<DataTypeT>(val);
+ }
+ }
+ return true;
+}
+
+} // namespace draco
+
+#endif // DRACO_CORE_OPTIONS_H_
diff --git a/libs/assimp/contrib/draco/src/draco/core/quantization_utils.cc b/libs/assimp/contrib/draco/src/draco/core/quantization_utils.cc
new file mode 100644
index 0000000..58dcf5c
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/core/quantization_utils.cc
@@ -0,0 +1,42 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/core/quantization_utils.h"
+
+namespace draco {
+
+Quantizer::Quantizer() : inverse_delta_(1.f) {}
+
+void Quantizer::Init(float range, int32_t max_quantized_value) {
+ inverse_delta_ = static_cast<float>(max_quantized_value) / range;
+}
+
+void Quantizer::Init(float delta) { inverse_delta_ = 1.f / delta; }
+
+Dequantizer::Dequantizer() : delta_(1.f) {}
+
+bool Dequantizer::Init(float range, int32_t max_quantized_value) {
+ if (max_quantized_value <= 0) {
+ return false;
+ }
+ delta_ = range / static_cast<float>(max_quantized_value);
+ return true;
+}
+
+bool Dequantizer::Init(float delta) {
+ delta_ = delta;
+ return true;
+}
+
+} // namespace draco
diff --git a/libs/assimp/contrib/draco/src/draco/core/quantization_utils.h b/libs/assimp/contrib/draco/src/draco/core/quantization_utils.h
new file mode 100644
index 0000000..0f60f1e
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/core/quantization_utils.h
@@ -0,0 +1,82 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// A set of classes for quantizing and dequantizing of floating point values
+// into integers.
+// The quantization works on all floating point numbers within (-range, +range)
+// interval producing integers in range
+// (-max_quantized_value, +max_quantized_value).
+
+#ifndef DRACO_CORE_QUANTIZATION_UTILS_H_
+#define DRACO_CORE_QUANTIZATION_UTILS_H_
+
+#include <stdint.h>
+
+#include <cmath>
+
+#include "draco/core/macros.h"
+
+namespace draco {
+
+// Class for quantizing single precision floating point values. The values
+// should be centered around zero and be within interval (-range, +range), where
+// the range is specified in the Init() method. Alternatively, the quantization
+// can be defined by |delta| that specifies the distance between two quantized
+// values. Note that the quantizer always snaps the values to the nearest
+// integer value. E.g. for |delta| == 1.f, values -0.4f and 0.4f would be
+// both quantized to 0 while value 0.6f would be quantized to 1. If a value
+// lies exactly between two quantized states, it is always rounded up. E.g.,
+// for |delta| == 1.f, value -0.5f would be quantized to 0 while 0.5f would be
+// quantized to 1.
+class Quantizer {
+ public:
+ Quantizer();
+ void Init(float range, int32_t max_quantized_value);
+ void Init(float delta);
+ inline int32_t QuantizeFloat(float val) const {
+ val *= inverse_delta_;
+ return static_cast<int32_t>(floor(val + 0.5f));
+ }
+ inline int32_t operator()(float val) const { return QuantizeFloat(val); }
+
+ private:
+ float inverse_delta_;
+};
+
+// Class for dequantizing values that were previously quantized using the
+// Quantizer class.
+class Dequantizer {
+ public:
+ Dequantizer();
+
+ // Initializes the dequantizer. Both parameters must correspond to the values
+ // provided to the initializer of the Quantizer class.
+ // Returns false when the initialization fails.
+ bool Init(float range, int32_t max_quantized_value);
+
+ // Initializes the dequantizer using the |delta| between two quantized values.
+ bool Init(float delta);
+
+ inline float DequantizeFloat(int32_t val) const {
+ return static_cast<float>(val) * delta_;
+ }
+ inline float operator()(int32_t val) const { return DequantizeFloat(val); }
+
+ private:
+ float delta_;
+};
+
+} // namespace draco
+
+#endif // DRACO_CORE_QUANTIZATION_UTILS_H_
diff --git a/libs/assimp/contrib/draco/src/draco/core/quantization_utils_test.cc b/libs/assimp/contrib/draco/src/draco/core/quantization_utils_test.cc
new file mode 100644
index 0000000..b4f0473
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/core/quantization_utils_test.cc
@@ -0,0 +1,91 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/core/quantization_utils.h"
+
+#include "draco/core/draco_test_base.h"
+
+namespace draco {
+
+class QuantizationUtilsTest : public ::testing::Test {};
+
+TEST_F(QuantizationUtilsTest, TestQuantizer) {
+ Quantizer quantizer;
+ quantizer.Init(10.f, 255);
+ EXPECT_EQ(quantizer.QuantizeFloat(0.f), 0);
+ EXPECT_EQ(quantizer.QuantizeFloat(10.f), 255);
+ EXPECT_EQ(quantizer.QuantizeFloat(-10.f), -255);
+ EXPECT_EQ(quantizer.QuantizeFloat(4.999f), 127);
+ EXPECT_EQ(quantizer.QuantizeFloat(5.f), 128);
+ EXPECT_EQ(quantizer.QuantizeFloat(-4.9999f), -127);
+ // Note: Both -5.f and +5.f lie exactly on the boundary between two
+ // quantized values (127.5f and -127.5f). Due to rounding, both values are
+ // then converted to 128 and -127 respectively.
+ EXPECT_EQ(quantizer.QuantizeFloat(-5.f), -127);
+ EXPECT_EQ(quantizer.QuantizeFloat(-5.0001f), -128);
+
+ // Out of range quantization.
+ // The behavior is technically undefined, but both quantizer and dequantizer
+ // should still work correctly unless the quantized values overflow.
+ EXPECT_LT(quantizer.QuantizeFloat(-15.f), -255);
+ EXPECT_GT(quantizer.QuantizeFloat(15.f), 255);
+}
+
+TEST_F(QuantizationUtilsTest, TestDequantizer) {
+ Dequantizer dequantizer;
+ ASSERT_TRUE(dequantizer.Init(10.f, 255));
+ EXPECT_EQ(dequantizer.DequantizeFloat(0), 0.f);
+ EXPECT_EQ(dequantizer.DequantizeFloat(255), 10.f);
+ EXPECT_EQ(dequantizer.DequantizeFloat(-255), -10.f);
+ EXPECT_EQ(dequantizer.DequantizeFloat(128), 10.f * (128.f / 255.f));
+
+ // Test that the dequantizer fails to initialize with invalid input
+ // parameters.
+ ASSERT_FALSE(dequantizer.Init(1.f, 0));
+ ASSERT_FALSE(dequantizer.Init(1.f, -4));
+}
+
+TEST_F(QuantizationUtilsTest, TestDeltaQuantization) {
+ // Test verifies that the quantizer and dequantizer work correctly when
+ // initialized with a delta value.
+ Quantizer quantizer_delta;
+ quantizer_delta.Init(0.5f);
+
+ Quantizer quantizer_range;
+ quantizer_range.Init(50.f, 100);
+
+ EXPECT_EQ(quantizer_delta.QuantizeFloat(1.2f), 2);
+ EXPECT_EQ(quantizer_delta.QuantizeFloat(10.f),
+ quantizer_range.QuantizeFloat(10.f));
+ EXPECT_EQ(quantizer_delta.QuantizeFloat(-3.3f),
+ quantizer_range.QuantizeFloat(-3.3f));
+ EXPECT_EQ(quantizer_delta.QuantizeFloat(0.25f),
+ quantizer_range.QuantizeFloat(0.25f));
+
+ Dequantizer dequantizer_delta;
+ dequantizer_delta.Init(0.5f);
+
+ Dequantizer dequantizer_range;
+ dequantizer_range.Init(50.f, 100);
+
+ EXPECT_EQ(dequantizer_delta.DequantizeFloat(2), 1.f);
+ EXPECT_EQ(dequantizer_delta.DequantizeFloat(-4),
+ dequantizer_range.DequantizeFloat(-4));
+ EXPECT_EQ(dequantizer_delta.DequantizeFloat(9),
+ dequantizer_range.DequantizeFloat(9));
+ EXPECT_EQ(dequantizer_delta.DequantizeFloat(0),
+ dequantizer_range.DequantizeFloat(0));
+}
+
+} // namespace draco
diff --git a/libs/assimp/contrib/draco/src/draco/core/status.h b/libs/assimp/contrib/draco/src/draco/core/status.h
new file mode 100644
index 0000000..449ad85
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/core/status.h
@@ -0,0 +1,77 @@
+// Copyright 2017 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_CORE_STATUS_H_
+#define DRACO_CORE_STATUS_H_
+
+#include <string>
+
+namespace draco {
+
+// Class encapsulating a return status of an operation with an optional error
+// message. Intended to be used as a return type for functions instead of bool.
+class Status {
+ public:
+ enum Code {
+ OK = 0,
+ DRACO_ERROR = -1, // Used for general errors.
+ IO_ERROR = -2, // Error when handling input or output stream.
+ INVALID_PARAMETER = -3, // Invalid parameter passed to a function.
+ UNSUPPORTED_VERSION = -4, // Input not compatible with the current version.
+ UNKNOWN_VERSION = -5, // Input was created with an unknown version of
+ // the library.
+ UNSUPPORTED_FEATURE = -6, // Input contains feature that is not supported.
+ };
+
+ Status() : code_(OK) {}
+ Status(const Status &status) = default;
+ Status(Status &&status) = default;
+ explicit Status(Code code) : code_(code) {}
+ Status(Code code, const std::string &error_msg)
+ : code_(code), error_msg_(error_msg) {}
+
+ Code code() const { return code_; }
+ const std::string &error_msg_string() const { return error_msg_; }
+ const char *error_msg() const { return error_msg_.c_str(); }
+
+ bool operator==(Code code) const { return code == code_; }
+ bool ok() const { return code_ == OK; }
+
+ Status &operator=(const Status &) = default;
+
+ private:
+ Code code_;
+ std::string error_msg_;
+};
+
+inline std::ostream &operator<<(std::ostream &os, const Status &status) {
+ os << status.error_msg_string();
+ return os;
+}
+
+inline Status OkStatus() { return Status(Status::OK); }
+
+// Evaluates an expression that returns draco::Status. If the status is not OK,
+// the macro returns the status object.
+#define DRACO_RETURN_IF_ERROR(expression) \
+ { \
+ const draco::Status _local_status = (expression); \
+ if (!_local_status.ok()) { \
+ return _local_status; \
+ } \
+ }
+
+} // namespace draco
+
+#endif // DRACO_CORE_STATUS_H_
diff --git a/libs/assimp/contrib/draco/src/draco/core/status_or.h b/libs/assimp/contrib/draco/src/draco/core/status_or.h
new file mode 100644
index 0000000..156b9bc
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/core/status_or.h
@@ -0,0 +1,81 @@
+// Copyright 2017 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_CORE_STATUS_OR_H_
+#define DRACO_CORE_STATUS_OR_H_
+
+#include "draco/core/macros.h"
+#include "draco/core/status.h"
+
+namespace draco {
+
+// Class StatusOr is used to wrap a Status along with a value of a specified
+// type |T|. StatusOr is intended to be returned from functions in situations
+// where it is desirable to carry over more information about the potential
+// errors encountered during the function execution. If there are not errors,
+// the caller can simply use the return value, otherwise the Status object
+// provides more info about the encountered problem.
+template <class T>
+class StatusOr {
+ public:
+ StatusOr() {}
+ // Note: Constructors are intentionally not explicit to allow returning
+ // Status or the return value directly from functions.
+ StatusOr(const StatusOr &) = default;
+ StatusOr(StatusOr &&) = default;
+ StatusOr(const Status &status) : status_(status) {}
+ StatusOr(const T &value) : status_(OkStatus()), value_(value) {}
+ StatusOr(T &&value) : status_(OkStatus()), value_(std::move(value)) {}
+ StatusOr(const Status &status, const T &value)
+ : status_(status), value_(value) {}
+
+ const Status &status() const { return status_; }
+ const T &value() const & { return value_; }
+ const T &&value() const && { return std::move(value_); }
+ T &&value() && { return std::move(value_); }
+
+ // For consistency with existing Google StatusOr API we also include
+ // ValueOrDie() that currently returns the value().
+ const T &ValueOrDie() const & { return value(); }
+ T &&ValueOrDie() && { return std::move(value()); }
+
+ bool ok() const { return status_.ok(); }
+
+ private:
+ Status status_;
+ T value_;
+};
+
+// In case StatusOr<T> is ok(), this macro assigns value stored in StatusOr<T>
+// to |lhs|, otherwise it returns the error Status.
+//
+// DRACO_ASSIGN_OR_RETURN(lhs, expression)
+//
+#define DRACO_ASSIGN_OR_RETURN(lhs, expression) \
+ DRACO_ASSIGN_OR_RETURN_IMPL_(DRACO_MACROS_IMPL_CONCAT_(_statusor, __LINE__), \
+ lhs, expression, _status)
+
+// The actual implementation of the above macro.
+#define DRACO_ASSIGN_OR_RETURN_IMPL_(statusor, lhs, expression, error_expr) \
+ auto statusor = (expression); \
+ if (!statusor.ok()) { \
+ auto _status = std::move(statusor.status()); \
+ (void)_status; /* error_expression may not use it */ \
+ return error_expr; \
+ } \
+ lhs = std::move(statusor).value();
+
+} // namespace draco
+
+#endif // DRACO_CORE_STATUS_OR_H_
diff --git a/libs/assimp/contrib/draco/src/draco/core/status_test.cc b/libs/assimp/contrib/draco/src/draco/core/status_test.cc
new file mode 100644
index 0000000..c1ad4ab
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/core/status_test.cc
@@ -0,0 +1,38 @@
+// Copyright 2017 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/core/status.h"
+
+#include <sstream>
+
+#include "draco/core/draco_test_base.h"
+
+namespace {
+
+class StatusTest : public ::testing::Test {
+ protected:
+ StatusTest() {}
+};
+
+TEST_F(StatusTest, TestStatusOutput) {
+ // Tests that the Status can be stored in a provided std::ostream.
+ const draco::Status status(draco::Status::DRACO_ERROR, "Error msg.");
+ ASSERT_EQ(status.code(), draco::Status::DRACO_ERROR);
+
+ std::stringstream str;
+ str << status;
+ ASSERT_EQ(str.str(), "Error msg.");
+}
+
+} // namespace
diff --git a/libs/assimp/contrib/draco/src/draco/core/varint_decoding.h b/libs/assimp/contrib/draco/src/draco/core/varint_decoding.h
new file mode 100644
index 0000000..cff47e9
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/core/varint_decoding.h
@@ -0,0 +1,81 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_CORE_VARINT_DECODING_H_
+#define DRACO_CORE_VARINT_DECODING_H_
+
+#include <type_traits>
+
+#include "draco/core/bit_utils.h"
+#include "draco/core/decoder_buffer.h"
+
+namespace draco {
+
+namespace {
+
+// Decodes a specified unsigned integer as varint. |depth| is the current
+// recursion call depth. The first call to the function must be 1.
+template <typename IntTypeT>
+bool DecodeVarintUnsigned(int depth, IntTypeT *out_val, DecoderBuffer *buffer) {
+ constexpr IntTypeT max_depth = sizeof(IntTypeT) + 1 + (sizeof(IntTypeT) >> 3);
+ if (depth > max_depth) {
+ return false;
+ }
+ // Coding of unsigned values.
+ // 0-6 bit - data
+ // 7 bit - next byte?
+ uint8_t in;
+ if (!buffer->Decode(&in)) {
+ return false;
+ }
+ if (in & (1 << 7)) {
+ // Next byte is available, decode it first.
+ if (!DecodeVarintUnsigned<IntTypeT>(depth + 1, out_val, buffer)) {
+ return false;
+ }
+ // Append decoded info from this byte.
+ *out_val <<= 7;
+ *out_val |= in & ((1 << 7) - 1);
+ } else {
+ // Last byte reached
+ *out_val = in;
+ }
+ return true;
+}
+
+} // namespace
+
+// Decodes a specified integer as varint. Note that the IntTypeT must be the
+// same as the one used in the corresponding EncodeVarint() call.
+// out_val is undefined if this returns false.
+template <typename IntTypeT>
+bool DecodeVarint(IntTypeT *out_val, DecoderBuffer *buffer) {
+ if (std::is_unsigned<IntTypeT>::value) {
+ if (!DecodeVarintUnsigned<IntTypeT>(1, out_val, buffer)) {
+ return false;
+ }
+ } else {
+ // IntTypeT is a signed value. Decode the symbol and convert to signed.
+ typename std::make_unsigned<IntTypeT>::type symbol;
+ if (!DecodeVarintUnsigned(1, &symbol, buffer)) {
+ return false;
+ }
+ *out_val = ConvertSymbolToSignedInt(symbol);
+ }
+ return true;
+}
+
+} // namespace draco
+
+#endif // DRACO_CORE_VARINT_DECODING_H_
diff --git a/libs/assimp/contrib/draco/src/draco/core/varint_encoding.h b/libs/assimp/contrib/draco/src/draco/core/varint_encoding.h
new file mode 100644
index 0000000..9a8a539
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/core/varint_encoding.h
@@ -0,0 +1,61 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_CORE_VARINT_ENCODING_H_
+#define DRACO_CORE_VARINT_ENCODING_H_
+
+#include <type_traits>
+
+#include "draco/core/bit_utils.h"
+#include "draco/core/encoder_buffer.h"
+
+namespace draco {
+
+// Encodes a specified integer as varint. Note that different coding is used
+// when IntTypeT is an unsigned data type.
+template <typename IntTypeT>
+bool EncodeVarint(IntTypeT val, EncoderBuffer *out_buffer) {
+ if (std::is_unsigned<IntTypeT>::value) {
+ // Coding of unsigned values.
+ // 0-6 bit - data
+ // 7 bit - next byte?
+ uint8_t out = 0;
+ out |= val & ((1 << 7) - 1);
+ if (val >= (1 << 7)) {
+ out |= (1 << 7);
+ if (!out_buffer->Encode(out)) {
+ return false;
+ }
+ if (!EncodeVarint<IntTypeT>(val >> 7, out_buffer)) {
+ return false;
+ }
+ return true;
+ }
+ if (!out_buffer->Encode(out)) {
+ return false;
+ }
+ } else {
+ // IntTypeT is a signed value. Convert to unsigned symbol and encode.
+ const typename std::make_unsigned<IntTypeT>::type symbol =
+ ConvertSignedIntToSymbol(val);
+ if (!EncodeVarint(symbol, out_buffer)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+} // namespace draco
+
+#endif // DRACO_CORE_VARINT_ENCODING_H_
diff --git a/libs/assimp/contrib/draco/src/draco/core/vector_d.h b/libs/assimp/contrib/draco/src/draco/core/vector_d.h
new file mode 100644
index 0000000..a3c46a4
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/core/vector_d.h
@@ -0,0 +1,355 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_CORE_VECTOR_D_H_
+#define DRACO_CORE_VECTOR_D_H_
+
+#include <inttypes.h>
+
+#include <algorithm>
+#include <array>
+#include <cmath>
+#include <limits>
+
+#include "draco/core/macros.h"
+
+namespace draco {
+// D-dimensional vector class with basic operations.
+template <class ScalarT, int dimension_t>
+class VectorD {
+ public:
+ static constexpr int dimension = dimension_t;
+
+ typedef ScalarT Scalar;
+ typedef VectorD<Scalar, dimension_t> Self;
+
+ // TODO(hemmer): Deprecate.
+ typedef ScalarT CoefficientType;
+
+ VectorD() {
+ for (int i = 0; i < dimension; ++i) {
+ (*this)[i] = Scalar(0);
+ }
+ }
+
+ // The following constructor does not compile in opt mode, which for now led
+ // to the constructors further down, which is not ideal.
+ // TODO(hemmer): fix constructor below and remove others.
+ // template <typename... Args>
+ // explicit VectorD(Args... args) : v_({args...}) {}
+
+ VectorD(const Scalar &c0, const Scalar &c1) : v_({{c0, c1}}) {
+ DRACO_DCHECK_EQ(dimension, 2);
+ v_[0] = c0;
+ v_[1] = c1;
+ }
+
+ VectorD(const Scalar &c0, const Scalar &c1, const Scalar &c2)
+ : v_({{c0, c1, c2}}) {
+ DRACO_DCHECK_EQ(dimension, 3);
+ }
+
+ VectorD(const Scalar &c0, const Scalar &c1, const Scalar &c2,
+ const Scalar &c3)
+ : v_({{c0, c1, c2, c3}}) {
+ DRACO_DCHECK_EQ(dimension, 4);
+ }
+
+ VectorD(const Scalar &c0, const Scalar &c1, const Scalar &c2,
+ const Scalar &c3, const Scalar &c4)
+ : v_({{c0, c1, c2, c3, c4}}) {
+ DRACO_DCHECK_EQ(dimension, 5);
+ }
+
+ VectorD(const Scalar &c0, const Scalar &c1, const Scalar &c2,
+ const Scalar &c3, const Scalar &c4, const Scalar &c5)
+ : v_({{c0, c1, c2, c3, c4, c5}}) {
+ DRACO_DCHECK_EQ(dimension, 6);
+ }
+
+ VectorD(const Scalar &c0, const Scalar &c1, const Scalar &c2,
+ const Scalar &c3, const Scalar &c4, const Scalar &c5,
+ const Scalar &c6)
+ : v_({{c0, c1, c2, c3, c4, c5, c6}}) {
+ DRACO_DCHECK_EQ(dimension, 7);
+ }
+
+ VectorD(const Self &o) {
+ for (int i = 0; i < dimension; ++i) {
+ (*this)[i] = o[i];
+ }
+ }
+
+ // Constructs the vector from another vector with a different data type or a
+ // different number of components. If the |src_vector| has more components
+ // than |this| vector, the excess components are truncated. If the
+ // |src_vector| has fewer components than |this| vector, the remaining
+ // components are padded with 0.
+ // Note that the constructor is intentionally explicit to avoid accidental
+ // conversions between different vector types.
+ template <class OtherScalarT, int other_dimension_t>
+ explicit VectorD(const VectorD<OtherScalarT, other_dimension_t> &src_vector) {
+ for (int i = 0; i < dimension; ++i) {
+ if (i < other_dimension_t) {
+ v_[i] = Scalar(src_vector[i]);
+ } else {
+ v_[i] = Scalar(0);
+ }
+ }
+ }
+
+ Scalar &operator[](int i) { return v_[i]; }
+ const Scalar &operator[](int i) const { return v_[i]; }
+ // TODO(hemmer): remove.
+ // Similar to interface of Eigen library.
+ Scalar &operator()(int i) { return v_[i]; }
+ const Scalar &operator()(int i) const { return v_[i]; }
+
+ // Unary operators.
+ Self operator-() const {
+ Self ret;
+ for (int i = 0; i < dimension; ++i) {
+ ret[i] = -(*this)[i];
+ }
+ return ret;
+ }
+
+ // Binary operators.
+ Self operator+(const Self &o) const {
+ Self ret;
+ for (int i = 0; i < dimension; ++i) {
+ ret[i] = (*this)[i] + o[i];
+ }
+ return ret;
+ }
+
+ Self operator-(const Self &o) const {
+ Self ret;
+ for (int i = 0; i < dimension; ++i) {
+ ret[i] = (*this)[i] - o[i];
+ }
+ return ret;
+ }
+
+ Self operator*(const Self &o) const {
+ Self ret;
+ for (int i = 0; i < dimension; ++i) {
+ ret[i] = (*this)[i] * o[i];
+ }
+ return ret;
+ }
+
+ Self &operator+=(const Self &o) {
+ for (int i = 0; i < dimension; ++i) {
+ (*this)[i] += o[i];
+ }
+ return *this;
+ }
+
+ Self &operator-=(const Self &o) {
+ for (int i = 0; i < dimension; ++i) {
+ (*this)[i] -= o[i];
+ }
+ return *this;
+ }
+
+ Self &operator*=(const Self &o) {
+ for (int i = 0; i < dimension; ++i) {
+ (*this)[i] *= o[i];
+ }
+ return *this;
+ }
+
+ Self operator*(const Scalar &o) const {
+ Self ret;
+ for (int i = 0; i < dimension; ++i) {
+ ret[i] = (*this)[i] * o;
+ }
+ return ret;
+ }
+
+ Self operator/(const Scalar &o) const {
+ Self ret;
+ for (int i = 0; i < dimension; ++i) {
+ ret[i] = (*this)[i] / o;
+ }
+ return ret;
+ }
+
+ Self operator+(const Scalar &o) const {
+ Self ret;
+ for (int i = 0; i < dimension; ++i) {
+ ret[i] = (*this)[i] + o;
+ }
+ return ret;
+ }
+
+ Self operator-(const Scalar &o) const {
+ Self ret;
+ for (int i = 0; i < dimension; ++i) {
+ ret[i] = (*this)[i] - o;
+ }
+ return ret;
+ }
+
+ bool operator==(const Self &o) const {
+ for (int i = 0; i < dimension; ++i) {
+ if ((*this)[i] != o[i]) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ bool operator!=(const Self &x) const { return !((*this) == x); }
+
+ bool operator<(const Self &x) const {
+ for (int i = 0; i < dimension - 1; ++i) {
+ if (v_[i] < x.v_[i]) {
+ return true;
+ }
+ if (v_[i] > x.v_[i]) {
+ return false;
+ }
+ }
+ // Only one check needed for the last dimension.
+ if (v_[dimension - 1] < x.v_[dimension - 1]) {
+ return true;
+ }
+ return false;
+ }
+
+ // Functions.
+ Scalar SquaredNorm() const { return this->Dot(*this); }
+
+ // Computes L1, the sum of absolute values of all entries.
+ Scalar AbsSum() const {
+ Scalar result(0);
+ for (int i = 0; i < dimension; ++i) {
+ Scalar next_value = std::abs(v_[i]);
+ if (result > std::numeric_limits<Scalar>::max() - next_value) {
+ // Return the max if adding would have caused an overflow.
+ return std::numeric_limits<Scalar>::max();
+ }
+ result += next_value;
+ }
+ return result;
+ }
+
+ Scalar Dot(const Self &o) const {
+ Scalar ret(0);
+ for (int i = 0; i < dimension; ++i) {
+ ret += (*this)[i] * o[i];
+ }
+ return ret;
+ }
+
+ void Normalize() {
+ const Scalar magnitude = std::sqrt(this->SquaredNorm());
+ if (magnitude == 0) {
+ return;
+ }
+ for (int i = 0; i < dimension; ++i) {
+ (*this)[i] /= magnitude;
+ }
+ }
+
+ Self GetNormalized() const {
+ Self ret(*this);
+ ret.Normalize();
+ return ret;
+ }
+
+ const Scalar &MaxCoeff() const {
+ return *std::max_element(v_.begin(), v_.end());
+ }
+
+ const Scalar &MinCoeff() const {
+ return *std::min_element(v_.begin(), v_.end());
+ }
+
+ Scalar *data() { return &(v_[0]); }
+ const Scalar *data() const { return &(v_[0]); }
+
+ private:
+ std::array<Scalar, dimension> v_;
+};
+
+// Scalar multiplication from the other side too.
+template <class ScalarT, int dimension_t>
+VectorD<ScalarT, dimension_t> operator*(
+ const ScalarT &o, const VectorD<ScalarT, dimension_t> &v) {
+ return v * o;
+}
+
+// Calculates the squared distance between two points.
+template <class ScalarT, int dimension_t>
+ScalarT SquaredDistance(const VectorD<ScalarT, dimension_t> &v1,
+ const VectorD<ScalarT, dimension_t> &v2) {
+ ScalarT difference;
+ ScalarT squared_distance = 0;
+ // Check each index separately so difference is never negative and underflow
+ // is avoided for unsigned types.
+ for (int i = 0; i < dimension_t; ++i) {
+ if (v1[i] >= v2[i]) {
+ difference = v1[i] - v2[i];
+ } else {
+ difference = v2[i] - v1[i];
+ }
+ squared_distance += (difference * difference);
+ }
+ return squared_distance;
+}
+
+// Global function computing the cross product of two 3D vectors.
+template <class ScalarT>
+VectorD<ScalarT, 3> CrossProduct(const VectorD<ScalarT, 3> &u,
+ const VectorD<ScalarT, 3> &v) {
+ // Preventing accidental use with uint32_t and the like.
+ static_assert(std::is_signed<ScalarT>::value,
+ "ScalarT must be a signed type. ");
+ VectorD<ScalarT, 3> r;
+ r[0] = (u[1] * v[2]) - (u[2] * v[1]);
+ r[1] = (u[2] * v[0]) - (u[0] * v[2]);
+ r[2] = (u[0] * v[1]) - (u[1] * v[0]);
+ return r;
+}
+
+template <class ScalarT, int dimension_t>
+inline std::ostream &operator<<(
+ std::ostream &out, const draco::VectorD<ScalarT, dimension_t> &vec) {
+ for (int i = 0; i < dimension_t - 1; ++i) {
+ out << vec[i] << " ";
+ }
+ out << vec[dimension_t - 1];
+ return out;
+}
+
+typedef VectorD<float, 2> Vector2f;
+typedef VectorD<float, 3> Vector3f;
+typedef VectorD<float, 4> Vector4f;
+typedef VectorD<float, 5> Vector5f;
+typedef VectorD<float, 6> Vector6f;
+typedef VectorD<float, 7> Vector7f;
+
+typedef VectorD<uint32_t, 2> Vector2ui;
+typedef VectorD<uint32_t, 3> Vector3ui;
+typedef VectorD<uint32_t, 4> Vector4ui;
+typedef VectorD<uint32_t, 5> Vector5ui;
+typedef VectorD<uint32_t, 6> Vector6ui;
+typedef VectorD<uint32_t, 7> Vector7ui;
+
+} // namespace draco
+
+#endif // DRACO_CORE_VECTOR_D_H_
diff --git a/libs/assimp/contrib/draco/src/draco/core/vector_d_test.cc b/libs/assimp/contrib/draco/src/draco/core/vector_d_test.cc
new file mode 100644
index 0000000..d66128f
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/core/vector_d_test.cc
@@ -0,0 +1,306 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/core/vector_d.h"
+
+#include <sstream>
+
+#include "draco/core/draco_test_base.h"
+
+namespace {
+
+typedef draco::Vector2f Vector2f;
+typedef draco::Vector3f Vector3f;
+typedef draco::Vector4f Vector4f;
+typedef draco::Vector5f Vector5f;
+typedef draco::Vector2ui Vector2ui;
+typedef draco::Vector3ui Vector3ui;
+typedef draco::Vector4ui Vector4ui;
+typedef draco::Vector5ui Vector5ui;
+
+typedef draco::VectorD<int32_t, 3> Vector3i;
+typedef draco::VectorD<int32_t, 4> Vector4i;
+
+template <class CoeffT, int dimension_t>
+void TestSquaredDistance(const draco::VectorD<CoeffT, dimension_t> v1,
+ const draco::VectorD<CoeffT, dimension_t> v2,
+ const CoeffT result) {
+ CoeffT squared_distance = SquaredDistance(v1, v2);
+ ASSERT_EQ(squared_distance, result);
+ squared_distance = SquaredDistance(v2, v1);
+ ASSERT_EQ(squared_distance, result);
+}
+
+TEST(VectorDTest, TestOperators) {
+ {
+ const Vector3f v;
+ ASSERT_EQ(v[0], 0);
+ ASSERT_EQ(v[1], 0);
+ ASSERT_EQ(v[2], 0);
+ }
+ Vector3f v(1, 2, 3);
+ ASSERT_EQ(v[0], 1);
+ ASSERT_EQ(v[1], 2);
+ ASSERT_EQ(v[2], 3);
+
+ Vector3f w = v;
+ ASSERT_TRUE(v == w);
+ ASSERT_FALSE(v != w);
+ ASSERT_EQ(w[0], 1);
+ ASSERT_EQ(w[1], 2);
+ ASSERT_EQ(w[2], 3);
+
+ w = -v;
+ ASSERT_EQ(w[0], -1);
+ ASSERT_EQ(w[1], -2);
+ ASSERT_EQ(w[2], -3);
+
+ w = v + v;
+ ASSERT_EQ(w[0], 2);
+ ASSERT_EQ(w[1], 4);
+ ASSERT_EQ(w[2], 6);
+
+ w = w - v;
+ ASSERT_EQ(w[0], 1);
+ ASSERT_EQ(w[1], 2);
+ ASSERT_EQ(w[2], 3);
+
+ // Scalar multiplication from left and right.
+ w = v * 2.f;
+ ASSERT_EQ(w[0], 2);
+ ASSERT_EQ(w[1], 4);
+ ASSERT_EQ(w[2], 6);
+ w = 2.f * v;
+ ASSERT_EQ(w[0], 2);
+ ASSERT_EQ(w[1], 4);
+ ASSERT_EQ(w[2], 6);
+
+ ASSERT_EQ(v.SquaredNorm(), 14);
+ ASSERT_EQ(v.Dot(v), 14);
+
+ Vector3f new_v = v;
+ new_v.Normalize();
+ const float tolerance = 1e-5;
+ const float magnitude = std::sqrt(v.SquaredNorm());
+ const float new_magnitude = std::sqrt(new_v.SquaredNorm());
+ ASSERT_NEAR(new_magnitude, 1, tolerance);
+ for (int i = 0; i < 3; ++i) {
+ new_v[i] *= magnitude;
+ ASSERT_NEAR(new_v[i], v[i], tolerance);
+ }
+
+ Vector3f x(0, 0, 0);
+ x.Normalize();
+ for (int i = 0; i < 3; ++i) {
+ ASSERT_EQ(0, x[i]);
+ }
+}
+
+TEST(VectorDTest, TestAdditionAssignmentOperator) {
+ Vector3ui v(1, 2, 3);
+ Vector3ui w(4, 5, 6);
+
+ w += v;
+ ASSERT_EQ(w[0], 5);
+ ASSERT_EQ(w[1], 7);
+ ASSERT_EQ(w[2], 9);
+
+ w += w;
+ ASSERT_EQ(w[0], 10);
+ ASSERT_EQ(w[1], 14);
+ ASSERT_EQ(w[2], 18);
+}
+
+TEST(VectorDTest, TestSubtractionAssignmentOperator) {
+ Vector3ui v(1, 2, 3);
+ Vector3ui w(4, 6, 8);
+
+ w -= v;
+ ASSERT_EQ(w[0], 3);
+ ASSERT_EQ(w[1], 4);
+ ASSERT_EQ(w[2], 5);
+
+ w -= w;
+ ASSERT_EQ(w[0], 0);
+ ASSERT_EQ(w[1], 0);
+ ASSERT_EQ(w[2], 0);
+}
+
+TEST(VectorDTest, TestMultiplicationAssignmentOperator) {
+ Vector3ui v(1, 2, 3);
+ Vector3ui w(4, 5, 6);
+
+ w *= v;
+ ASSERT_EQ(w[0], 4);
+ ASSERT_EQ(w[1], 10);
+ ASSERT_EQ(w[2], 18);
+
+ v *= v;
+ ASSERT_EQ(v[0], 1);
+ ASSERT_EQ(v[1], 4);
+ ASSERT_EQ(v[2], 9);
+}
+
+TEST(VectorTest, TestGetNormalized) {
+ const Vector3f original(2, 3, -4);
+ const Vector3f normalized = original.GetNormalized();
+ const float magnitude = sqrt(original.SquaredNorm());
+ const float tolerance = 1e-5f;
+ ASSERT_NEAR(normalized[0], original[0] / magnitude, tolerance);
+ ASSERT_NEAR(normalized[1], original[1] / magnitude, tolerance);
+ ASSERT_NEAR(normalized[2], original[2] / magnitude, tolerance);
+}
+
+TEST(VectorTest, TestGetNormalizedWithZeroLengthVector) {
+ const Vector3f original(0, 0, 0);
+ const Vector3f normalized = original.GetNormalized();
+ ASSERT_EQ(normalized[0], 0);
+ ASSERT_EQ(normalized[1], 0);
+ ASSERT_EQ(normalized[2], 0);
+}
+
+TEST(VectorDTest, TestSquaredDistance) {
+ // Test Vector2f: float, 2D.
+ Vector2f v1_2f(5.5, 10.5);
+ Vector2f v2_2f(3.5, 15.5);
+ float result_f = 29;
+ TestSquaredDistance(v1_2f, v2_2f, result_f);
+
+ // Test Vector3f: float, 3D.
+ Vector3f v1_3f(5.5, 10.5, 2.3);
+ Vector3f v2_3f(3.5, 15.5, 0);
+ result_f = 34.29;
+ TestSquaredDistance(v1_3f, v2_3f, result_f);
+
+ // Test Vector4f: float, 4D.
+ Vector4f v1_4f(5.5, 10.5, 2.3, 7.2);
+ Vector4f v2_4f(3.5, 15.5, 0, 9.9);
+ result_f = 41.58;
+ TestSquaredDistance(v1_4f, v2_4f, result_f);
+
+ // Test Vector5f: float, 5D.
+ Vector5f v1_5f(5.5, 10.5, 2.3, 7.2, 1.0);
+ Vector5f v2_5f(3.5, 15.5, 0, 9.9, 0.2);
+ result_f = 42.22;
+ TestSquaredDistance(v1_5f, v2_5f, result_f);
+
+ // Test Vector 2ui: uint32_t, 2D.
+ Vector2ui v1_2ui(5, 10);
+ Vector2ui v2_2ui(3, 15);
+ uint32_t result_ui = 29;
+ TestSquaredDistance(v1_2ui, v2_2ui, result_ui);
+
+ // Test Vector 3ui: uint32_t, 3D.
+ Vector3ui v1_3ui(5, 10, 2);
+ Vector3ui v2_3ui(3, 15, 0);
+ result_ui = 33;
+ TestSquaredDistance(v1_3ui, v2_3ui, result_ui);
+
+ // Test Vector 4ui: uint32_t, 4D.
+ Vector4ui v1_4ui(5, 10, 2, 7);
+ Vector4ui v2_4ui(3, 15, 0, 9);
+ result_ui = 37;
+ TestSquaredDistance(v1_4ui, v2_4ui, result_ui);
+
+ // Test Vector 5ui: uint32_t, 5D.
+ Vector5ui v1_5ui(5, 10, 2, 7, 1);
+ Vector5ui v2_5ui(3, 15, 0, 9, 12);
+ result_ui = 158;
+ TestSquaredDistance(v1_5ui, v2_5ui, result_ui);
+}
+
+TEST(VectorDTest, TestCrossProduct3D) {
+ const Vector3i e1(1, 0, 0);
+ const Vector3i e2(0, 1, 0);
+ const Vector3i e3(0, 0, 1);
+ const Vector3i o(0, 0, 0);
+ ASSERT_EQ(e3, draco::CrossProduct(e1, e2));
+ ASSERT_EQ(e1, draco::CrossProduct(e2, e3));
+ ASSERT_EQ(e2, draco::CrossProduct(e3, e1));
+ ASSERT_EQ(-e3, draco::CrossProduct(e2, e1));
+ ASSERT_EQ(-e1, draco::CrossProduct(e3, e2));
+ ASSERT_EQ(-e2, draco::CrossProduct(e1, e3));
+ ASSERT_EQ(o, draco::CrossProduct(e1, e1));
+ ASSERT_EQ(o, draco::CrossProduct(e2, e2));
+ ASSERT_EQ(o, draco::CrossProduct(e3, e3));
+
+ // Orthogonality of result for some general vectors.
+ const Vector3i v1(123, -62, 223);
+ const Vector3i v2(734, 244, -13);
+ const Vector3i orth = draco::CrossProduct(v1, v2);
+ ASSERT_EQ(0, v1.Dot(orth));
+ ASSERT_EQ(0, v2.Dot(orth));
+}
+
+TEST(VectorDTest, TestAbsSum) {
+ // Testing const of function and zero.
+ const Vector3i v(0, 0, 0);
+ ASSERT_EQ(v.AbsSum(), 0);
+ // Testing semantic.
+ ASSERT_EQ(Vector3i(0, 0, 0).AbsSum(), 0);
+ ASSERT_EQ(Vector3i(1, 2, 3).AbsSum(), 6);
+ ASSERT_EQ(Vector3i(-1, -2, -3).AbsSum(), 6);
+ ASSERT_EQ(Vector3i(-2, 4, -8).AbsSum(), 14);
+ // Other dimension.
+ ASSERT_EQ(Vector4i(-2, 4, -8, 3).AbsSum(), 17);
+}
+
+TEST(VectorDTest, TestMinMaxCoeff) {
+ // Test verifies that MinCoeff() and MaxCoeff() functions work as intended.
+ const Vector4i vi(-10, 5, 2, 3);
+ ASSERT_EQ(vi.MinCoeff(), -10);
+ ASSERT_EQ(vi.MaxCoeff(), 5);
+
+ const Vector3f vf(6.f, 1000.f, -101.f);
+ ASSERT_EQ(vf.MinCoeff(), -101.f);
+ ASSERT_EQ(vf.MaxCoeff(), 1000.f);
+}
+
+TEST(VectorDTest, TestOstream) {
+ // Tests that the vector can be stored in a provided std::ostream.
+ const draco::VectorD<int64_t, 3> vector(1, 2, 3);
+ std::stringstream str;
+ str << vector << " ";
+ ASSERT_EQ(str.str(), "1 2 3 ");
+}
+
+TEST(VectorDTest, TestConvertConstructor) {
+ // Tests that a vector can be constructed from another vector with a different
+ // type.
+ const draco::VectorD<int64_t, 3> vector(1, 2, 3);
+
+ const draco::VectorD<float, 3> vector3f(vector);
+ ASSERT_EQ(vector3f, draco::Vector3f(1.f, 2.f, 3.f));
+
+ const draco::VectorD<float, 2> vector2f(vector);
+ ASSERT_EQ(vector2f, draco::Vector2f(1.f, 2.f));
+
+ const draco::VectorD<float, 4> vector4f(vector3f);
+ ASSERT_EQ(vector4f, draco::Vector4f(1.f, 2.f, 3.f, 0.f));
+
+ const draco::VectorD<double, 1> vector1d(vector3f);
+ ASSERT_EQ(vector1d[0], 1.0);
+}
+
+TEST(VectorDTest, TestBinaryOps) {
+ // Tests the binary multiplication operator of the VectorD class.
+ const draco::Vector4f vector_0(1.f, 2.3f, 4.2f, -10.f);
+ ASSERT_EQ(vector_0 * draco::Vector4f(1.f, 1.f, 1.f, 1.f), vector_0);
+ ASSERT_EQ(vector_0 * draco::Vector4f(0.f, 0.f, 0.f, 0.f),
+ draco::Vector4f(0.f, 0.f, 0.f, 0.f));
+ ASSERT_EQ(vector_0 * draco::Vector4f(0.1f, 0.2f, 0.3f, 0.4f),
+ draco::Vector4f(0.1f, 0.46f, 1.26f, -4.f));
+}
+
+} // namespace
diff --git a/libs/assimp/contrib/draco/src/draco/io/file_reader_factory.cc b/libs/assimp/contrib/draco/src/draco/io/file_reader_factory.cc
new file mode 100644
index 0000000..ac7b092
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/io/file_reader_factory.cc
@@ -0,0 +1,45 @@
+#include "draco/io/file_reader_factory.h"
+
+#include <vector>
+
+namespace draco {
+namespace {
+
+#define FILEREADER_LOG_ERROR(error_string) \
+ do { \
+ fprintf(stderr, "%s:%d (%s): %s.\n", __FILE__, __LINE__, __func__, \
+ error_string); \
+ } while (false)
+
+std::vector<FileReaderFactory::OpenFunction> *GetFileReaderOpenFunctions() {
+ static auto open_functions =
+ new (std::nothrow) std::vector<FileReaderFactory::OpenFunction>();
+ return open_functions;
+}
+
+} // namespace
+
+bool FileReaderFactory::RegisterReader(OpenFunction open_function) {
+ if (open_function == nullptr) {
+ return false;
+ }
+ auto open_functions = GetFileReaderOpenFunctions();
+ const size_t num_readers = open_functions->size();
+ open_functions->push_back(open_function);
+ return open_functions->size() == num_readers + 1;
+}
+
+std::unique_ptr<FileReaderInterface> FileReaderFactory::OpenReader(
+ const std::string &file_name) {
+ for (auto open_function : *GetFileReaderOpenFunctions()) {
+ auto reader = open_function(file_name);
+ if (reader == nullptr) {
+ continue;
+ }
+ return reader;
+ }
+ FILEREADER_LOG_ERROR("No file reader able to open input");
+ return nullptr;
+}
+
+} // namespace draco
diff --git a/libs/assimp/contrib/draco/src/draco/io/file_reader_factory.h b/libs/assimp/contrib/draco/src/draco/io/file_reader_factory.h
new file mode 100644
index 0000000..12bd7a5
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/io/file_reader_factory.h
@@ -0,0 +1,34 @@
+#ifndef DRACO_IO_FILE_READER_FACTORY_H_
+#define DRACO_IO_FILE_READER_FACTORY_H_
+
+#include <memory>
+#include <string>
+
+#include "draco/io/file_reader_interface.h"
+
+namespace draco {
+
+class FileReaderFactory {
+ public:
+ using OpenFunction =
+ std::unique_ptr<FileReaderInterface> (*)(const std::string &file_name);
+
+ FileReaderFactory() = delete;
+ FileReaderFactory(const FileReaderFactory &) = delete;
+ FileReaderFactory &operator=(const FileReaderFactory &) = delete;
+ ~FileReaderFactory() = default;
+
+ // Registers the OpenFunction for a FileReaderInterface and returns true when
+ // registration succeeds.
+ static bool RegisterReader(OpenFunction open_function);
+
+ // Passes |file_name| to each OpenFunction until one succeeds. Returns nullptr
+ // when no reader is found for |file_name|. Otherwise a FileReaderInterface is
+ // returned.
+ static std::unique_ptr<FileReaderInterface> OpenReader(
+ const std::string &file_name);
+};
+
+} // namespace draco
+
+#endif // DRACO_IO_FILE_READER_FACTORY_H_
diff --git a/libs/assimp/contrib/draco/src/draco/io/file_reader_factory_test.cc b/libs/assimp/contrib/draco/src/draco/io/file_reader_factory_test.cc
new file mode 100644
index 0000000..d304d63
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/io/file_reader_factory_test.cc
@@ -0,0 +1,85 @@
+#include "draco/io/file_reader_factory.h"
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "draco/core/draco_test_base.h"
+#include "draco/io/file_reader_interface.h"
+
+namespace draco {
+namespace {
+
+class AlwaysFailFileReader : public FileReaderInterface {
+ public:
+ static std::unique_ptr<FileReaderInterface> Open(
+ const std::string & /*file_name*/) {
+ return nullptr;
+ }
+
+ AlwaysFailFileReader() = delete;
+ AlwaysFailFileReader(const AlwaysFailFileReader &) = delete;
+ AlwaysFailFileReader &operator=(const AlwaysFailFileReader &) = delete;
+ // Note this isn't overridden as the class can never be instantiated. This
+ // avoids an unused function warning.
+ // ~AlwaysFailFileReader() override = default;
+
+ bool ReadFileToBuffer(std::vector<char> * /*buffer*/) override {
+ return false;
+ }
+
+ bool ReadFileToBuffer(std::vector<uint8_t> * /*buffer*/) override {
+ return false;
+ }
+
+ size_t GetFileSize() override { return 0; }
+
+ private:
+ static bool is_registered_;
+};
+
+class AlwaysOkFileReader : public FileReaderInterface {
+ public:
+ static std::unique_ptr<FileReaderInterface> Open(
+ const std::string & /*file_name*/) {
+ return std::unique_ptr<AlwaysOkFileReader>(new AlwaysOkFileReader());
+ }
+
+ AlwaysOkFileReader(const AlwaysOkFileReader &) = delete;
+ AlwaysOkFileReader &operator=(const AlwaysOkFileReader &) = delete;
+ ~AlwaysOkFileReader() override = default;
+
+ bool ReadFileToBuffer(std::vector<char> * /*buffer*/) override {
+ return true;
+ }
+
+ bool ReadFileToBuffer(std::vector<uint8_t> * /*buffer*/) override {
+ return true;
+ }
+
+ size_t GetFileSize() override { return 0; }
+
+ private:
+ AlwaysOkFileReader() = default;
+ static bool is_registered_;
+};
+
+bool AlwaysFailFileReader::is_registered_ =
+ FileReaderFactory::RegisterReader(AlwaysFailFileReader::Open);
+
+bool AlwaysOkFileReader::is_registered_ =
+ FileReaderFactory::RegisterReader(AlwaysOkFileReader::Open);
+
+TEST(FileReaderFactoryTest, RegistrationFail) {
+ EXPECT_FALSE(FileReaderFactory::RegisterReader(nullptr));
+}
+
+TEST(FileReaderFactoryTest, OpenReader) {
+ auto reader = FileReaderFactory::OpenReader("fake file");
+ EXPECT_NE(reader, nullptr);
+ std::vector<char> *buffer = nullptr;
+ EXPECT_TRUE(reader->ReadFileToBuffer(buffer));
+}
+
+} // namespace
+} // namespace draco
diff --git a/libs/assimp/contrib/draco/src/draco/io/file_reader_interface.h b/libs/assimp/contrib/draco/src/draco/io/file_reader_interface.h
new file mode 100644
index 0000000..a6e6a0d
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/io/file_reader_interface.h
@@ -0,0 +1,32 @@
+#ifndef DRACO_IO_FILE_READER_INTERFACE_H_
+#define DRACO_IO_FILE_READER_INTERFACE_H_
+
+#include <cstddef>
+#include <cstdint>
+#include <vector>
+
+namespace draco {
+
+class FileReaderInterface {
+ public:
+ FileReaderInterface() = default;
+ FileReaderInterface(const FileReaderInterface &) = delete;
+ FileReaderInterface &operator=(const FileReaderInterface &) = delete;
+
+ FileReaderInterface(FileReaderInterface &&) = default;
+ FileReaderInterface &operator=(FileReaderInterface &&) = default;
+
+ // Closes the file.
+ virtual ~FileReaderInterface() = default;
+
+ // Reads the entire contents of the input file into |buffer| and returns true.
+ virtual bool ReadFileToBuffer(std::vector<char> *buffer) = 0;
+ virtual bool ReadFileToBuffer(std::vector<uint8_t> *buffer) = 0;
+
+ // Returns the size of the file.
+ virtual size_t GetFileSize() = 0;
+};
+
+} // namespace draco
+
+#endif // DRACO_IO_FILE_READER_INTERFACE_H_
diff --git a/libs/assimp/contrib/draco/src/draco/io/file_reader_test_common.h b/libs/assimp/contrib/draco/src/draco/io/file_reader_test_common.h
new file mode 100644
index 0000000..0d07d4a
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/io/file_reader_test_common.h
@@ -0,0 +1,13 @@
+#ifndef DRACO_IO_FILE_READER_TEST_COMMON_H_
+#define DRACO_IO_FILE_READER_TEST_COMMON_H_
+
+#include <cstddef>
+
+namespace draco {
+
+const size_t kFileSizeCarDrc = 69892;
+const size_t kFileSizeCubePcDrc = 224;
+
+} // namespace draco
+
+#endif // DRACO_IO_FILE_READER_TEST_COMMON_H_
diff --git a/libs/assimp/contrib/draco/src/draco/io/file_utils.cc b/libs/assimp/contrib/draco/src/draco/io/file_utils.cc
new file mode 100644
index 0000000..f93cbd8
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/io/file_utils.cc
@@ -0,0 +1,110 @@
+// Copyright 2018 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/io/file_utils.h"
+
+#include "draco/io/file_reader_factory.h"
+#include "draco/io/file_reader_interface.h"
+#include "draco/io/file_writer_factory.h"
+#include "draco/io/file_writer_interface.h"
+#include "draco/io/file_writer_utils.h"
+#include "draco/io/parser_utils.h"
+
+namespace draco {
+
+void SplitPath(const std::string &full_path, std::string *out_folder_path,
+ std::string *out_file_name) {
+ SplitPathPrivate(full_path, out_folder_path, out_file_name);
+}
+
+std::string ReplaceFileExtension(const std::string &in_file_name,
+ const std::string &new_extension) {
+ const auto pos = in_file_name.find_last_of(".");
+ if (pos == std::string::npos) {
+ // No extension found.
+ return in_file_name + "." + new_extension;
+ }
+ return in_file_name.substr(0, pos + 1) + new_extension;
+}
+
+std::string LowercaseFileExtension(const std::string &filename) {
+ const size_t pos = filename.find_last_of('.');
+ if (pos == 0 || pos == std::string::npos || pos == filename.length() - 1) {
+ return "";
+ }
+ return parser::ToLower(filename.substr(pos + 1));
+}
+
+std::string GetFullPath(const std::string &input_file_relative_path,
+ const std::string &sibling_file_full_path) {
+ const auto pos = sibling_file_full_path.find_last_of("/\\");
+ std::string input_file_full_path;
+ if (pos != std::string::npos) {
+ input_file_full_path = sibling_file_full_path.substr(0, pos + 1);
+ }
+ input_file_full_path += input_file_relative_path;
+ return input_file_full_path;
+}
+
+bool ReadFileToBuffer(const std::string &file_name, std::vector<char> *buffer) {
+ std::unique_ptr<FileReaderInterface> file_reader =
+ FileReaderFactory::OpenReader(file_name);
+ if (file_reader == nullptr) {
+ return false;
+ }
+ return file_reader->ReadFileToBuffer(buffer);
+}
+
+bool ReadFileToBuffer(const std::string &file_name,
+ std::vector<uint8_t> *buffer) {
+ std::unique_ptr<FileReaderInterface> file_reader =
+ FileReaderFactory::OpenReader(file_name);
+ if (file_reader == nullptr) {
+ return false;
+ }
+ return file_reader->ReadFileToBuffer(buffer);
+}
+
+bool WriteBufferToFile(const char *buffer, size_t buffer_size,
+ const std::string &file_name) {
+ std::unique_ptr<FileWriterInterface> file_writer =
+ FileWriterFactory::OpenWriter(file_name);
+ if (file_writer == nullptr) {
+ return false;
+ }
+ return file_writer->Write(buffer, buffer_size);
+}
+
+bool WriteBufferToFile(const unsigned char *buffer, size_t buffer_size,
+ const std::string &file_name) {
+ return WriteBufferToFile(reinterpret_cast<const char *>(buffer), buffer_size,
+ file_name);
+}
+
+bool WriteBufferToFile(const void *buffer, size_t buffer_size,
+ const std::string &file_name) {
+ return WriteBufferToFile(reinterpret_cast<const char *>(buffer), buffer_size,
+ file_name);
+}
+
+size_t GetFileSize(const std::string &file_name) {
+ std::unique_ptr<FileReaderInterface> file_reader =
+ FileReaderFactory::OpenReader(file_name);
+ if (file_reader == nullptr) {
+ return 0;
+ }
+ return file_reader->GetFileSize();
+}
+
+} // namespace draco
diff --git a/libs/assimp/contrib/draco/src/draco/io/file_utils.h b/libs/assimp/contrib/draco/src/draco/io/file_utils.h
new file mode 100644
index 0000000..4b734e0
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/io/file_utils.h
@@ -0,0 +1,73 @@
+// Copyright 2018 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_IO_FILE_UTILS_H_
+#define DRACO_IO_FILE_UTILS_H_
+
+#include <string>
+#include <vector>
+
+namespace draco {
+
+// Splits full path to a file into a folder path + file name.
+// |out_folder_path| will contain the path to the folder containing the file
+// excluding the final slash. If no folder is specified in the |full_path|, then
+// |out_folder_path| is set to "."
+void SplitPath(const std::string &full_path, std::string *out_folder_path,
+ std::string *out_file_name);
+
+// Replaces file extension in |in_file_name| with |new_extension|.
+// If |in_file_name| does not have any extension, the extension is appended.
+std::string ReplaceFileExtension(const std::string &in_file_name,
+ const std::string &new_extension);
+
+// Returns the file extension in lowercase if present, else "". Extension is
+// defined as the string after the last '.' character. If the file starts with
+// '.' (e.g. Linux hidden files), the first delimiter is ignored.
+std::string LowercaseFileExtension(const std::string &filename);
+
+// Given a path of the input file |input_file_relative_path| relative to the
+// parent directory of |sibling_file_full_path|, this function returns full path
+// to the input file. If |sibling_file_full_path| has no directory, the relative
+// path itself |input_file_relative_path| is returned. A common use case is for
+// the |input_file_relative_path| to be just a file name. See usage examples in
+// the unit test.
+std::string GetFullPath(const std::string &input_file_relative_path,
+ const std::string &sibling_file_full_path);
+
+// Convenience method. Uses draco::FileReaderFactory internally. Reads contents
+// of file referenced by |file_name| into |buffer| and returns true upon
+// success.
+bool ReadFileToBuffer(const std::string &file_name, std::vector<char> *buffer);
+bool ReadFileToBuffer(const std::string &file_name,
+ std::vector<uint8_t> *buffer);
+
+// Convenience method. Uses draco::FileWriterFactory internally. Writes contents
+// of |buffer| to file referred to by |file_name|. File is overwritten if it
+// exists. Returns true after successful write.
+bool WriteBufferToFile(const char *buffer, size_t buffer_size,
+ const std::string &file_name);
+bool WriteBufferToFile(const unsigned char *buffer, size_t buffer_size,
+ const std::string &file_name);
+bool WriteBufferToFile(const void *buffer, size_t buffer_size,
+ const std::string &file_name);
+
+// Convenience method. Uses draco::FileReaderFactory internally. Returns size of
+// file referenced by |file_name|. Returns 0 when referenced file is empty or
+// does not exist.
+size_t GetFileSize(const std::string &file_name);
+
+} // namespace draco
+
+#endif // DRACO_IO_FILE_UTILS_H_
diff --git a/libs/assimp/contrib/draco/src/draco/io/file_utils_test.cc b/libs/assimp/contrib/draco/src/draco/io/file_utils_test.cc
new file mode 100644
index 0000000..4085ff0
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/io/file_utils_test.cc
@@ -0,0 +1,69 @@
+// Copyright 2018 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/io/file_utils.h"
+
+#include "draco/core/draco_test_base.h"
+#include "draco/core/draco_test_utils.h"
+
+namespace {
+
+TEST(FileUtilsTest, SplitsPath) {
+ // Tests that the function SplitPath correctly splits a set of test paths.
+ std::string folder_path, file_name;
+ draco::SplitPath("file.x", &folder_path, &file_name);
+ ASSERT_EQ(folder_path, ".");
+ ASSERT_EQ(file_name, "file.x");
+
+ draco::SplitPath("a/b/file.y", &folder_path, &file_name);
+ ASSERT_EQ(folder_path, "a/b");
+ ASSERT_EQ(file_name, "file.y");
+
+ draco::SplitPath("//a/b/c/d/file.z", &folder_path, &file_name);
+ ASSERT_EQ(folder_path, "//a/b/c/d");
+ ASSERT_EQ(file_name, "file.z");
+}
+
+TEST(FileUtilsTest, ReplaceExtension) {
+ // Tests that the function ReplaceFileExtension correctly replaces extensions
+ // of specified files.
+ ASSERT_EQ(draco::ReplaceFileExtension("a.abc", "x"), "a.x");
+ ASSERT_EQ(draco::ReplaceFileExtension("abc", "x"), "abc.x"); // No extension
+ ASSERT_EQ(draco::ReplaceFileExtension("a/b/c.d", "xyz"), "a/b/c.xyz");
+}
+
+TEST(FileUtilsTest, LowercaseFileExtension) {
+ ASSERT_EQ(draco::LowercaseFileExtension("image.jpeg"), "jpeg");
+ ASSERT_EQ(draco::LowercaseFileExtension("image.JPEG"), "jpeg");
+ ASSERT_EQ(draco::LowercaseFileExtension("image.png"), "png");
+ ASSERT_EQ(draco::LowercaseFileExtension("image.pNg"), "png");
+ ASSERT_EQ(draco::LowercaseFileExtension("FILE.glb"), "glb");
+ ASSERT_EQ(draco::LowercaseFileExtension(".file.gltf"), "gltf");
+ ASSERT_EQ(draco::LowercaseFileExtension("the.file.gltf"), "gltf");
+ ASSERT_EQ(draco::LowercaseFileExtension("FILE_glb"), "");
+ ASSERT_EQ(draco::LowercaseFileExtension(""), "");
+ ASSERT_EQ(draco::LowercaseFileExtension("image."), "");
+}
+
+TEST(FileUtilsTest, GetFullPath) {
+ // Tests that full path is returned when a sibling file has full path.
+ ASSERT_EQ(draco::GetFullPath("xo.png", "/d/i/r/xo.gltf"), "/d/i/r/xo.png");
+ ASSERT_EQ(draco::GetFullPath("buf/01.bin", "dir/xo.gltf"), "dir/buf/01.bin");
+ ASSERT_EQ(draco::GetFullPath("xo.mtl", "/xo.obj"), "/xo.mtl");
+
+ // Tests that only file name is returned when a sibling file has no full path.
+ ASSERT_EQ(draco::GetFullPath("xo.mtl", "xo.obj"), "xo.mtl");
+}
+
+} // namespace
diff --git a/libs/assimp/contrib/draco/src/draco/io/file_writer_factory.cc b/libs/assimp/contrib/draco/src/draco/io/file_writer_factory.cc
new file mode 100644
index 0000000..cb68516
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/io/file_writer_factory.cc
@@ -0,0 +1,45 @@
+#include "draco/io/file_writer_factory.h"
+
+#include <vector>
+
+namespace draco {
+namespace {
+
+#define FILEWRITER_LOG_ERROR(error_string) \
+ do { \
+ fprintf(stderr, "%s:%d (%s): %s.\n", __FILE__, __LINE__, __func__, \
+ error_string); \
+ } while (false)
+
+std::vector<FileWriterFactory::OpenFunction> *GetFileWriterOpenFunctions() {
+ static auto open_functions =
+ new (std::nothrow) std::vector<FileWriterFactory::OpenFunction>();
+ return open_functions;
+}
+
+} // namespace
+
+bool FileWriterFactory::RegisterWriter(OpenFunction open_function) {
+ if (open_function == nullptr) {
+ return false;
+ }
+ auto open_functions = GetFileWriterOpenFunctions();
+ const size_t num_writers = open_functions->size();
+ open_functions->push_back(open_function);
+ return open_functions->size() == num_writers + 1;
+}
+
+std::unique_ptr<FileWriterInterface> FileWriterFactory::OpenWriter(
+ const std::string &file_name) {
+ for (auto open_function : *GetFileWriterOpenFunctions()) {
+ auto writer = open_function(file_name);
+ if (writer == nullptr) {
+ continue;
+ }
+ return writer;
+ }
+ FILEWRITER_LOG_ERROR("No file writer able to open output");
+ return nullptr;
+}
+
+} // namespace draco
diff --git a/libs/assimp/contrib/draco/src/draco/io/file_writer_factory.h b/libs/assimp/contrib/draco/src/draco/io/file_writer_factory.h
new file mode 100644
index 0000000..ecf735d
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/io/file_writer_factory.h
@@ -0,0 +1,34 @@
+#ifndef DRACO_IO_FILE_WRITER_FACTORY_H_
+#define DRACO_IO_FILE_WRITER_FACTORY_H_
+
+#include <memory>
+#include <string>
+
+#include "draco/io/file_writer_interface.h"
+
+namespace draco {
+
+class FileWriterFactory {
+ public:
+ using OpenFunction =
+ std::unique_ptr<FileWriterInterface> (*)(const std::string &file_name);
+
+ FileWriterFactory() = delete;
+ FileWriterFactory(const FileWriterFactory &) = delete;
+ FileWriterFactory &operator=(const FileWriterFactory &) = delete;
+ ~FileWriterFactory() = default;
+
+ // Registers the OpenFunction for a FileWriterInterface and returns true when
+ // registration succeeds.
+ static bool RegisterWriter(OpenFunction open_function);
+
+ // Passes |file_name| to each OpenFunction until one succeeds. Returns nullptr
+ // when no writer is found for |file_name|. Otherwise a FileWriterInterface is
+ // returned.
+ static std::unique_ptr<FileWriterInterface> OpenWriter(
+ const std::string &file_name);
+};
+
+} // namespace draco
+
+#endif // DRACO_IO_FILE_WRITER_FACTORY_H_
diff --git a/libs/assimp/contrib/draco/src/draco/io/file_writer_factory_test.cc b/libs/assimp/contrib/draco/src/draco/io/file_writer_factory_test.cc
new file mode 100644
index 0000000..fbad5cf
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/io/file_writer_factory_test.cc
@@ -0,0 +1,70 @@
+#include "draco/io/file_writer_factory.h"
+
+#include <cstdint>
+#include <memory>
+#include <string>
+
+#include "draco/core/draco_test_base.h"
+#include "draco/io/file_writer_interface.h"
+
+namespace draco {
+namespace {
+
+class AlwaysFailFileWriter : public FileWriterInterface {
+ public:
+ static std::unique_ptr<FileWriterInterface> Open(
+ const std::string & /*file_name*/) {
+ return nullptr;
+ }
+
+ AlwaysFailFileWriter() = delete;
+ AlwaysFailFileWriter(const AlwaysFailFileWriter &) = delete;
+ AlwaysFailFileWriter &operator=(const AlwaysFailFileWriter &) = delete;
+ // Note this isn't overridden as the class can never be instantiated. This
+ // avoids an unused function warning.
+ // ~AlwaysFailFileWriter() override = default;
+
+ bool Write(const char * /*buffer*/, size_t /*size*/) override {
+ return false;
+ }
+
+ private:
+ static bool is_registered_;
+};
+
+class AlwaysOkFileWriter : public FileWriterInterface {
+ public:
+ static std::unique_ptr<FileWriterInterface> Open(
+ const std::string & /*file_name*/) {
+ return std::unique_ptr<AlwaysOkFileWriter>(new AlwaysOkFileWriter());
+ }
+
+ AlwaysOkFileWriter(const AlwaysOkFileWriter &) = delete;
+ AlwaysOkFileWriter &operator=(const AlwaysOkFileWriter &) = delete;
+ ~AlwaysOkFileWriter() override = default;
+
+ bool Write(const char * /*buffer*/, size_t /*size*/) override { return true; }
+
+ private:
+ AlwaysOkFileWriter() = default;
+ static bool is_registered_;
+};
+
+bool AlwaysFailFileWriter::is_registered_ =
+ FileWriterFactory::RegisterWriter(AlwaysFailFileWriter::Open);
+
+bool AlwaysOkFileWriter::is_registered_ =
+ FileWriterFactory::RegisterWriter(AlwaysOkFileWriter::Open);
+
+TEST(FileWriterFactoryTest, RegistrationFail) {
+ EXPECT_FALSE(FileWriterFactory::RegisterWriter(nullptr));
+}
+
+TEST(FileWriterFactoryTest, OpenWriter) {
+ auto writer = FileWriterFactory::OpenWriter("fake file");
+ EXPECT_NE(writer, nullptr);
+ EXPECT_TRUE(writer->Write(nullptr, 0u));
+}
+
+} // namespace
+} // namespace draco
diff --git a/libs/assimp/contrib/draco/src/draco/io/file_writer_interface.h b/libs/assimp/contrib/draco/src/draco/io/file_writer_interface.h
new file mode 100644
index 0000000..719f7cc
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/io/file_writer_interface.h
@@ -0,0 +1,26 @@
+#ifndef DRACO_IO_FILE_WRITER_INTERFACE_H_
+#define DRACO_IO_FILE_WRITER_INTERFACE_H_
+
+#include <cstddef>
+
+namespace draco {
+
+class FileWriterInterface {
+ public:
+ FileWriterInterface() = default;
+ FileWriterInterface(const FileWriterInterface &) = delete;
+ FileWriterInterface &operator=(const FileWriterInterface &) = delete;
+
+ FileWriterInterface(FileWriterInterface &&) = default;
+ FileWriterInterface &operator=(FileWriterInterface &&) = default;
+
+ // Closes the file.
+ virtual ~FileWriterInterface() = default;
+
+ // Writes |size| bytes from |buffer| to file.
+ virtual bool Write(const char *buffer, size_t size) = 0;
+};
+
+} // namespace draco
+
+#endif // DRACO_IO_FILE_WRITER_INTERFACE_H_
diff --git a/libs/assimp/contrib/draco/src/draco/io/file_writer_utils.cc b/libs/assimp/contrib/draco/src/draco/io/file_writer_utils.cc
new file mode 100644
index 0000000..bcadccf
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/io/file_writer_utils.cc
@@ -0,0 +1,57 @@
+#include "draco/io/file_writer_utils.h"
+
+#include <sys/stat.h>
+#include <sys/types.h>
+
+#include <string>
+
+#include "draco/draco_features.h"
+
+namespace draco {
+
+void SplitPathPrivate(const std::string &full_path,
+ std::string *out_folder_path,
+ std::string *out_file_name) {
+ const auto pos = full_path.find_last_of("/\\");
+ if (pos != std::string::npos) {
+ if (out_folder_path) {
+ *out_folder_path = full_path.substr(0, pos);
+ }
+ if (out_file_name) {
+ *out_file_name = full_path.substr(pos + 1, full_path.length());
+ }
+ } else {
+ if (out_folder_path) {
+ *out_folder_path = ".";
+ }
+ if (out_file_name) {
+ *out_file_name = full_path;
+ }
+ }
+}
+
+bool DirectoryExists(const std::string &path) {
+ struct stat path_stat;
+
+ // Check if |path| exists.
+ if (stat(path.c_str(), &path_stat) != 0) {
+ return false;
+ }
+
+ // Check if |path| is a directory.
+ if (path_stat.st_mode & S_IFDIR) {
+ return true;
+ }
+ return false;
+}
+
+bool CheckAndCreatePathForFile(const std::string &filename) {
+ std::string path;
+ std::string basename;
+ SplitPathPrivate(filename, &path, &basename);
+
+ const bool directory_exists = DirectoryExists(path);
+ return directory_exists;
+}
+
+} // namespace draco
diff --git a/libs/assimp/contrib/draco/src/draco/io/file_writer_utils.h b/libs/assimp/contrib/draco/src/draco/io/file_writer_utils.h
new file mode 100644
index 0000000..e5ba283
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/io/file_writer_utils.h
@@ -0,0 +1,38 @@
+// Copyright 2020 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_IO_FILE_WRITER_UTILS_H_
+#define DRACO_IO_FILE_WRITER_UTILS_H_
+
+#include <string>
+
+namespace draco {
+
+// Splits full path to a file into a folder path + file name.
+// |out_folder_path| will contain the path to the folder containing the file
+// excluding the final slash. If no folder is specified in the |full_path|, then
+// |out_folder_path| is set to "."
+void SplitPathPrivate(const std::string &full_path,
+ std::string *out_folder_path, std::string *out_file_name);
+
+// Checks is |path| exists and if it is a directory.
+bool DirectoryExists(const std::string &path);
+
+// Checks if the path for file is valid. If not this function will try and
+// create the path. Returns false on error.
+bool CheckAndCreatePathForFile(const std::string &filename);
+
+} // namespace draco
+
+#endif // DRACO_IO_FILE_WRITER_UTILS_H_
diff --git a/libs/assimp/contrib/draco/src/draco/io/mesh_io.cc b/libs/assimp/contrib/draco/src/draco/io/mesh_io.cc
new file mode 100644
index 0000000..e0dc69c
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/io/mesh_io.cc
@@ -0,0 +1,87 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/io/mesh_io.h"
+
+#include <fstream>
+#include <string>
+
+#include "draco/io/file_utils.h"
+#include "draco/io/obj_decoder.h"
+#include "draco/io/ply_decoder.h"
+
+namespace draco {
+
+StatusOr<std::unique_ptr<Mesh>> ReadMeshFromFile(const std::string &file_name) {
+ const Options options;
+ return ReadMeshFromFile(file_name, options, nullptr);
+}
+
+StatusOr<std::unique_ptr<Mesh>> ReadMeshFromFile(const std::string &file_name,
+ bool use_metadata) {
+ Options options;
+ options.SetBool("use_metadata", use_metadata);
+ return ReadMeshFromFile(file_name, options, nullptr);
+}
+
+StatusOr<std::unique_ptr<Mesh>> ReadMeshFromFile(const std::string &file_name,
+ const Options &options) {
+ return ReadMeshFromFile(file_name, options, nullptr);
+}
+
+StatusOr<std::unique_ptr<Mesh>> ReadMeshFromFile(
+ const std::string &file_name, const Options &options,
+ std::vector<std::string> *mesh_files) {
+ std::unique_ptr<Mesh> mesh(new Mesh());
+ // Analyze file extension.
+ const std::string extension = LowercaseFileExtension(file_name);
+ if (extension != "gltf" && mesh_files) {
+ // The GLTF decoder will fill |mesh_files|, but for other file types we set
+ // the root file here to avoid duplicating code.
+ mesh_files->push_back(file_name);
+ }
+ if (extension == "obj") {
+ // Wavefront OBJ file format.
+ ObjDecoder obj_decoder;
+ obj_decoder.set_use_metadata(options.GetBool("use_metadata", false));
+ const Status obj_status = obj_decoder.DecodeFromFile(file_name, mesh.get());
+ if (!obj_status.ok()) {
+ return obj_status;
+ }
+ return std::move(mesh);
+ }
+ if (extension == "ply") {
+ // Wavefront PLY file format.
+ PlyDecoder ply_decoder;
+ DRACO_RETURN_IF_ERROR(ply_decoder.DecodeFromFile(file_name, mesh.get()));
+ return std::move(mesh);
+ }
+
+ // Otherwise not an obj file. Assume the file was encoded with one of the
+ // draco encoding methods.
+ std::vector<char> file_data;
+ if (!ReadFileToBuffer(file_name, &file_data)) {
+ return Status(Status::DRACO_ERROR, "Unable to read input file.");
+ }
+ DecoderBuffer buffer;
+ buffer.Init(file_data.data(), file_data.size());
+ Decoder decoder;
+ auto statusor = decoder.DecodeMeshFromBuffer(&buffer);
+ if (!statusor.ok() || statusor.value() == nullptr) {
+ return Status(Status::DRACO_ERROR, "Error decoding input.");
+ }
+ return std::move(statusor).value();
+}
+
+} // namespace draco
diff --git a/libs/assimp/contrib/draco/src/draco/io/mesh_io.h b/libs/assimp/contrib/draco/src/draco/io/mesh_io.h
new file mode 100644
index 0000000..9af178c
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/io/mesh_io.h
@@ -0,0 +1,107 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_IO_MESH_IO_H_
+#define DRACO_IO_MESH_IO_H_
+
+#include "draco/compression/config/compression_shared.h"
+#include "draco/compression/decode.h"
+#include "draco/compression/expert_encode.h"
+#include "draco/core/options.h"
+
+namespace draco {
+
+template <typename OutStreamT>
+OutStreamT WriteMeshIntoStream(const Mesh *mesh, OutStreamT &&os,
+ MeshEncoderMethod method,
+ const EncoderOptions &options) {
+ EncoderBuffer buffer;
+ EncoderOptions local_options = options;
+ ExpertEncoder encoder(*mesh);
+ encoder.Reset(local_options);
+ encoder.SetEncodingMethod(method);
+ if (!encoder.EncodeToBuffer(&buffer).ok()) {
+ os.setstate(std::ios_base::badbit);
+ return os;
+ }
+
+ os.write(static_cast<const char *>(buffer.data()), buffer.size());
+
+ return os;
+}
+
+template <typename OutStreamT>
+OutStreamT WriteMeshIntoStream(const Mesh *mesh, OutStreamT &&os,
+ MeshEncoderMethod method) {
+ const EncoderOptions options = EncoderOptions::CreateDefaultOptions();
+ return WriteMeshIntoStream(mesh, os, method, options);
+}
+
+template <typename OutStreamT>
+OutStreamT &WriteMeshIntoStream(const Mesh *mesh, OutStreamT &&os) {
+ return WriteMeshIntoStream(mesh, os, MESH_EDGEBREAKER_ENCODING);
+}
+
+template <typename InStreamT>
+InStreamT &ReadMeshFromStream(std::unique_ptr<Mesh> *mesh, InStreamT &&is) {
+ // Determine size of stream and write into a vector
+ const auto start_pos = is.tellg();
+ is.seekg(0, std::ios::end);
+ const std::streampos is_size = is.tellg() - start_pos;
+ is.seekg(start_pos);
+ std::vector<char> data(is_size);
+ is.read(&data[0], is_size);
+
+ // Create a mesh from that data.
+ DecoderBuffer buffer;
+ buffer.Init(&data[0], data.size());
+ Decoder decoder;
+ auto statusor = decoder.DecodeMeshFromBuffer(&buffer);
+ *mesh = std::move(statusor).value();
+ if (!statusor.ok() || *mesh == nullptr) {
+ is.setstate(std::ios_base::badbit);
+ }
+
+ return is;
+}
+
+// Reads a mesh from a file. The function automatically chooses the correct
+// decoder based on the extension of the files. Currently, .obj and .ply files
+// are supported. Other file extensions are processed by the default
+// draco::MeshDecoder.
+// Returns nullptr with an error status if the decoding failed.
+StatusOr<std::unique_ptr<Mesh>> ReadMeshFromFile(const std::string &file_name);
+
+// Reads a mesh from a file. The function does the same thing as the previous
+// one except using metadata to encode additional information when
+// |use_metadata| is set to true.
+// Returns nullptr with an error status if the decoding failed.
+StatusOr<std::unique_ptr<Mesh>> ReadMeshFromFile(const std::string &file_name,
+ bool use_metadata);
+
+// Reads a mesh from a file. Reading is configured with |options|:
+// use_metadata : Read obj file info like material names and object names into
+// metadata. Default is false.
+// The second form returns the files associated with the mesh via the
+// |mesh_files| argument.
+// Returns nullptr with an error status if the decoding failed.
+StatusOr<std::unique_ptr<Mesh>> ReadMeshFromFile(const std::string &file_name,
+ const Options &options);
+StatusOr<std::unique_ptr<Mesh>> ReadMeshFromFile(
+ const std::string &file_name, const Options &options,
+ std::vector<std::string> *mesh_files);
+
+} // namespace draco
+
+#endif // DRACO_IO_MESH_IO_H_
diff --git a/libs/assimp/contrib/draco/src/draco/io/obj_decoder.cc b/libs/assimp/contrib/draco/src/draco/io/obj_decoder.cc
new file mode 100644
index 0000000..9b4eab6
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/io/obj_decoder.cc
@@ -0,0 +1,708 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/io/obj_decoder.h"
+
+#include <cctype>
+#include <cmath>
+
+#include "draco/io/file_utils.h"
+#include "draco/io/parser_utils.h"
+#include "draco/metadata/geometry_metadata.h"
+
+namespace draco {
+
+ObjDecoder::ObjDecoder()
+ : counting_mode_(true),
+ num_obj_faces_(0),
+ num_positions_(0),
+ num_tex_coords_(0),
+ num_normals_(0),
+ num_materials_(0),
+ last_sub_obj_id_(0),
+ pos_att_id_(-1),
+ tex_att_id_(-1),
+ norm_att_id_(-1),
+ material_att_id_(-1),
+ sub_obj_att_id_(-1),
+ deduplicate_input_values_(true),
+ last_material_id_(0),
+ use_metadata_(false),
+ out_mesh_(nullptr),
+ out_point_cloud_(nullptr) {}
+
+Status ObjDecoder::DecodeFromFile(const std::string &file_name,
+ Mesh *out_mesh) {
+ out_mesh_ = out_mesh;
+ return DecodeFromFile(file_name, static_cast<PointCloud *>(out_mesh));
+}
+
+Status ObjDecoder::DecodeFromFile(const std::string &file_name,
+ PointCloud *out_point_cloud) {
+ std::vector<char> buffer;
+ if (!ReadFileToBuffer(file_name, &buffer)) {
+ return Status(Status::DRACO_ERROR, "Unable to read input file.");
+ }
+ buffer_.Init(buffer.data(), buffer.size());
+
+ out_point_cloud_ = out_point_cloud;
+ input_file_name_ = file_name;
+ return DecodeInternal();
+}
+
+Status ObjDecoder::DecodeFromBuffer(DecoderBuffer *buffer, Mesh *out_mesh) {
+ out_mesh_ = out_mesh;
+ return DecodeFromBuffer(buffer, static_cast<PointCloud *>(out_mesh));
+}
+
+Status ObjDecoder::DecodeFromBuffer(DecoderBuffer *buffer,
+ PointCloud *out_point_cloud) {
+ out_point_cloud_ = out_point_cloud;
+ buffer_.Init(buffer->data_head(), buffer->remaining_size());
+ return DecodeInternal();
+}
+
+Status ObjDecoder::DecodeInternal() {
+ // In the first pass, count the number of different elements in the geometry.
+ // In case the desired output is just a point cloud (i.e., when
+ // out_mesh_ == nullptr) the decoder will ignore all information about the
+ // connectivity that may be included in the source data.
+ counting_mode_ = true;
+ ResetCounters();
+ material_name_to_id_.clear();
+ last_sub_obj_id_ = 0;
+ // Parse all lines.
+ Status status(Status::OK);
+ while (ParseDefinition(&status) && status.ok()) {
+ }
+ if (!status.ok()) {
+ return status;
+ }
+
+ bool use_identity_mapping = false;
+ if (num_obj_faces_ == 0) {
+ // Mesh has no faces. In this case we try to read the geometry as a point
+ // cloud where every attribute entry is a point.
+
+ // Ensure the number of all entries is same for all attributes.
+ if (num_positions_ == 0) {
+ return Status(Status::DRACO_ERROR, "No position attribute");
+ }
+ if (num_tex_coords_ > 0 && num_tex_coords_ != num_positions_) {
+ return Status(Status::DRACO_ERROR,
+ "Invalid number of texture coordinates for a point cloud");
+ }
+ if (num_normals_ > 0 && num_normals_ != num_positions_) {
+ return Status(Status::DRACO_ERROR,
+ "Invalid number of normals for a point cloud");
+ }
+
+ out_mesh_ = nullptr; // Treat the output geometry as a point cloud.
+ use_identity_mapping = true;
+ }
+
+ // Initialize point cloud and mesh properties.
+ if (out_mesh_) {
+ // Start decoding a mesh with the given number of faces. For point clouds we
+ // silently ignore all data about the mesh connectivity.
+ out_mesh_->SetNumFaces(num_obj_faces_);
+ }
+ if (num_obj_faces_ > 0) {
+ out_point_cloud_->set_num_points(3 * num_obj_faces_);
+ } else {
+ out_point_cloud_->set_num_points(num_positions_);
+ }
+
+ // Add attributes if they are present in the input data.
+ if (num_positions_ > 0) {
+ GeometryAttribute va;
+ va.Init(GeometryAttribute::POSITION, nullptr, 3, DT_FLOAT32, false,
+ sizeof(float) * 3, 0);
+ pos_att_id_ = out_point_cloud_->AddAttribute(va, use_identity_mapping,
+ num_positions_);
+ }
+ if (num_tex_coords_ > 0) {
+ GeometryAttribute va;
+ va.Init(GeometryAttribute::TEX_COORD, nullptr, 2, DT_FLOAT32, false,
+ sizeof(float) * 2, 0);
+ tex_att_id_ = out_point_cloud_->AddAttribute(va, use_identity_mapping,
+ num_tex_coords_);
+ }
+ if (num_normals_ > 0) {
+ GeometryAttribute va;
+ va.Init(GeometryAttribute::NORMAL, nullptr, 3, DT_FLOAT32, false,
+ sizeof(float) * 3, 0);
+ norm_att_id_ =
+ out_point_cloud_->AddAttribute(va, use_identity_mapping, num_normals_);
+ }
+ if (num_materials_ > 0 && num_obj_faces_ > 0) {
+ GeometryAttribute va;
+ const auto geometry_attribute_type = GeometryAttribute::GENERIC;
+ if (num_materials_ < 256) {
+ va.Init(geometry_attribute_type, nullptr, 1, DT_UINT8, false, 1, 0);
+ } else if (num_materials_ < (1 << 16)) {
+ va.Init(geometry_attribute_type, nullptr, 1, DT_UINT16, false, 2, 0);
+ } else {
+ va.Init(geometry_attribute_type, nullptr, 1, DT_UINT32, false, 4, 0);
+ }
+ material_att_id_ =
+ out_point_cloud_->AddAttribute(va, false, num_materials_);
+
+ // Fill the material entries.
+ for (int i = 0; i < num_materials_; ++i) {
+ const AttributeValueIndex avi(i);
+ out_point_cloud_->attribute(material_att_id_)->SetAttributeValue(avi, &i);
+ }
+
+ if (use_metadata_) {
+ // Use metadata to store the name of materials.
+ std::unique_ptr<AttributeMetadata> material_metadata =
+ std::unique_ptr<AttributeMetadata>(new AttributeMetadata());
+ material_metadata->AddEntryString("name", "material");
+ // Add all material names.
+ for (const auto &itr : material_name_to_id_) {
+ material_metadata->AddEntryInt(itr.first, itr.second);
+ }
+ if (!material_file_name_.empty()) {
+ material_metadata->AddEntryString("file_name", material_file_name_);
+ }
+
+ out_point_cloud_->AddAttributeMetadata(material_att_id_,
+ std::move(material_metadata));
+ }
+ }
+ if (!obj_name_to_id_.empty() && num_obj_faces_ > 0) {
+ GeometryAttribute va;
+ if (obj_name_to_id_.size() < 256) {
+ va.Init(GeometryAttribute::GENERIC, nullptr, 1, DT_UINT8, false, 1, 0);
+ } else if (obj_name_to_id_.size() < (1 << 16)) {
+ va.Init(GeometryAttribute::GENERIC, nullptr, 1, DT_UINT16, false, 2, 0);
+ } else {
+ va.Init(GeometryAttribute::GENERIC, nullptr, 1, DT_UINT32, false, 4, 0);
+ }
+ sub_obj_att_id_ = out_point_cloud_->AddAttribute(
+ va, false, static_cast<uint32_t>(obj_name_to_id_.size()));
+ // Fill the sub object id entries.
+ for (const auto &itr : obj_name_to_id_) {
+ const AttributeValueIndex i(itr.second);
+ out_point_cloud_->attribute(sub_obj_att_id_)->SetAttributeValue(i, &i);
+ }
+ if (use_metadata_) {
+ // Use metadata to store the name of materials.
+ std::unique_ptr<AttributeMetadata> sub_obj_metadata =
+ std::unique_ptr<AttributeMetadata>(new AttributeMetadata());
+ sub_obj_metadata->AddEntryString("name", "sub_obj");
+ // Add all sub object names.
+ for (const auto &itr : obj_name_to_id_) {
+ const AttributeValueIndex i(itr.second);
+ sub_obj_metadata->AddEntryInt(itr.first, itr.second);
+ }
+ out_point_cloud_->AddAttributeMetadata(sub_obj_att_id_,
+ std::move(sub_obj_metadata));
+ }
+ }
+
+ // Perform a second iteration of parsing and fill all the data.
+ counting_mode_ = false;
+ ResetCounters();
+ // Start parsing from the beginning of the buffer again.
+ buffer()->StartDecodingFrom(0);
+ while (ParseDefinition(&status) && status.ok()) {
+ }
+ if (!status.ok()) {
+ return status;
+ }
+ if (out_mesh_) {
+ // Add faces with identity mapping between vertex and corner indices.
+ // Duplicate vertices will get removed later.
+ Mesh::Face face;
+ for (FaceIndex i(0); i < num_obj_faces_; ++i) {
+ for (int c = 0; c < 3; ++c) {
+ face[c] = 3 * i.value() + c;
+ }
+ out_mesh_->SetFace(i, face);
+ }
+ }
+
+#ifdef DRACO_ATTRIBUTE_VALUES_DEDUPLICATION_SUPPORTED
+ if (deduplicate_input_values_) {
+ out_point_cloud_->DeduplicateAttributeValues();
+ }
+#endif
+#ifdef DRACO_ATTRIBUTE_INDICES_DEDUPLICATION_SUPPORTED
+ out_point_cloud_->DeduplicatePointIds();
+#endif
+ return status;
+}
+
+void ObjDecoder::ResetCounters() {
+ num_obj_faces_ = 0;
+ num_positions_ = 0;
+ num_tex_coords_ = 0;
+ num_normals_ = 0;
+ last_material_id_ = 0;
+ last_sub_obj_id_ = 0;
+}
+
+bool ObjDecoder::ParseDefinition(Status *status) {
+ char c;
+ parser::SkipWhitespace(buffer());
+ if (!buffer()->Peek(&c)) {
+ // End of file reached?.
+ return false;
+ }
+ if (c == '#') {
+ // Comment, ignore the line.
+ parser::SkipLine(buffer());
+ return true;
+ }
+ if (ParseVertexPosition(status)) {
+ return true;
+ }
+ if (ParseNormal(status)) {
+ return true;
+ }
+ if (ParseTexCoord(status)) {
+ return true;
+ }
+ if (ParseFace(status)) {
+ return true;
+ }
+ if (ParseMaterial(status)) {
+ return true;
+ }
+ if (ParseMaterialLib(status)) {
+ return true;
+ }
+ if (ParseObject(status)) {
+ return true;
+ }
+ // No known definition was found. Ignore the line.
+ parser::SkipLine(buffer());
+ return true;
+}
+
+bool ObjDecoder::ParseVertexPosition(Status *status) {
+ std::array<char, 2> c;
+ if (!buffer()->Peek(&c)) {
+ return false;
+ }
+ if (c[0] != 'v' || c[1] != ' ') {
+ return false;
+ }
+ // Vertex definition found!
+ buffer()->Advance(2);
+ if (!counting_mode_) {
+ // Parse three float numbers for vertex position coordinates.
+ float val[3];
+ for (int i = 0; i < 3; ++i) {
+ parser::SkipWhitespace(buffer());
+ if (!parser::ParseFloat(buffer(), val + i)) {
+ *status = Status(Status::DRACO_ERROR, "Failed to parse a float number");
+ // The definition is processed so return true.
+ return true;
+ }
+ }
+ out_point_cloud_->attribute(pos_att_id_)
+ ->SetAttributeValue(AttributeValueIndex(num_positions_), val);
+ }
+ ++num_positions_;
+ parser::SkipLine(buffer());
+ return true;
+}
+
+bool ObjDecoder::ParseNormal(Status *status) {
+ std::array<char, 2> c;
+ if (!buffer()->Peek(&c)) {
+ return false;
+ }
+ if (c[0] != 'v' || c[1] != 'n') {
+ return false;
+ }
+ // Normal definition found!
+ buffer()->Advance(2);
+ if (!counting_mode_) {
+ // Parse three float numbers for the normal vector.
+ float val[3];
+ for (int i = 0; i < 3; ++i) {
+ parser::SkipWhitespace(buffer());
+ if (!parser::ParseFloat(buffer(), val + i)) {
+ *status = Status(Status::DRACO_ERROR, "Failed to parse a float number");
+ // The definition is processed so return true.
+ return true;
+ }
+ }
+ out_point_cloud_->attribute(norm_att_id_)
+ ->SetAttributeValue(AttributeValueIndex(num_normals_), val);
+ }
+ ++num_normals_;
+ parser::SkipLine(buffer());
+ return true;
+}
+
+bool ObjDecoder::ParseTexCoord(Status *status) {
+ std::array<char, 2> c;
+ if (!buffer()->Peek(&c)) {
+ return false;
+ }
+ if (c[0] != 'v' || c[1] != 't') {
+ return false;
+ }
+ // Texture coord definition found!
+ buffer()->Advance(2);
+ if (!counting_mode_) {
+ // Parse two float numbers for the texture coordinate.
+ float val[2];
+ for (int i = 0; i < 2; ++i) {
+ parser::SkipWhitespace(buffer());
+ if (!parser::ParseFloat(buffer(), val + i)) {
+ *status = Status(Status::DRACO_ERROR, "Failed to parse a float number");
+ // The definition is processed so return true.
+ return true;
+ }
+ }
+ out_point_cloud_->attribute(tex_att_id_)
+ ->SetAttributeValue(AttributeValueIndex(num_tex_coords_), val);
+ }
+ ++num_tex_coords_;
+ parser::SkipLine(buffer());
+ return true;
+}
+
+bool ObjDecoder::ParseFace(Status *status) {
+ char c;
+ if (!buffer()->Peek(&c)) {
+ return false;
+ }
+ if (c != 'f') {
+ return false;
+ }
+ // Face definition found!
+ buffer()->Advance(1);
+ if (!counting_mode_) {
+ std::array<int32_t, 3> indices[4];
+ // Parse face indices (we try to look for up to four to support quads).
+ int num_valid_indices = 0;
+ for (int i = 0; i < 4; ++i) {
+ if (!ParseVertexIndices(&indices[i])) {
+ if (i == 3) {
+ break; // It's OK if there is no fourth vertex index.
+ }
+ *status = Status(Status::DRACO_ERROR, "Failed to parse vertex indices");
+ return true;
+ }
+ ++num_valid_indices;
+ }
+ // Process the first face.
+ for (int i = 0; i < 3; ++i) {
+ const PointIndex vert_id(3 * num_obj_faces_ + i);
+ MapPointToVertexIndices(vert_id, indices[i]);
+ }
+ ++num_obj_faces_;
+ if (num_valid_indices == 4) {
+ // Add an additional triangle for the quad.
+ //
+ // 3----2
+ // | / |
+ // | / |
+ // 0----1
+ //
+ const PointIndex vert_id(3 * num_obj_faces_);
+ MapPointToVertexIndices(vert_id, indices[0]);
+ MapPointToVertexIndices(vert_id + 1, indices[2]);
+ MapPointToVertexIndices(vert_id + 2, indices[3]);
+ ++num_obj_faces_;
+ }
+ } else {
+ // We are in the counting mode.
+ // We need to determine how many triangles are in the obj face.
+ // Go over the line and check how many gaps there are between non-empty
+ // sub-strings.
+ parser::SkipWhitespace(buffer());
+ int num_indices = 0;
+ bool is_end = false;
+ while (buffer()->Peek(&c) && c != '\n') {
+ if (parser::PeekWhitespace(buffer(), &is_end)) {
+ buffer()->Advance(1);
+ } else {
+ // Non-whitespace reached.. assume it's index declaration, skip it.
+ num_indices++;
+ while (!parser::PeekWhitespace(buffer(), &is_end) && !is_end) {
+ buffer()->Advance(1);
+ }
+ }
+ }
+ if (num_indices < 3 || num_indices > 4) {
+ *status =
+ Status(Status::DRACO_ERROR, "Invalid number of indices on a face");
+ return false;
+ }
+ // Either one or two new triangles.
+ num_obj_faces_ += num_indices - 2;
+ }
+ parser::SkipLine(buffer());
+ return true;
+}
+
+bool ObjDecoder::ParseMaterialLib(Status *status) {
+ // Allow only one material library per file for now.
+ if (!material_name_to_id_.empty()) {
+ return false;
+ }
+ std::array<char, 6> c;
+ if (!buffer()->Peek(&c)) {
+ return false;
+ }
+ if (std::memcmp(&c[0], "mtllib", 6) != 0) {
+ return false;
+ }
+ buffer()->Advance(6);
+ DecoderBuffer line_buffer = parser::ParseLineIntoDecoderBuffer(buffer());
+ parser::SkipWhitespace(&line_buffer);
+ material_file_name_.clear();
+ if (!parser::ParseString(&line_buffer, &material_file_name_)) {
+ *status = Status(Status::DRACO_ERROR, "Failed to parse material file name");
+ return true;
+ }
+ parser::SkipLine(&line_buffer);
+
+ if (!material_file_name_.empty()) {
+ if (!ParseMaterialFile(material_file_name_, status)) {
+ // Silently ignore problems with material files for now.
+ return true;
+ }
+ }
+ return true;
+}
+
+bool ObjDecoder::ParseMaterial(Status * /* status */) {
+ // In second pass, skip when we don't use materials.
+ if (!counting_mode_ && material_att_id_ < 0) {
+ return false;
+ }
+ std::array<char, 6> c;
+ if (!buffer()->Peek(&c)) {
+ return false;
+ }
+ if (std::memcmp(&c[0], "usemtl", 6) != 0) {
+ return false;
+ }
+ buffer()->Advance(6);
+ DecoderBuffer line_buffer = parser::ParseLineIntoDecoderBuffer(buffer());
+ parser::SkipWhitespace(&line_buffer);
+ std::string mat_name;
+ parser::ParseLine(&line_buffer, &mat_name);
+ if (mat_name.length() == 0) {
+ return false;
+ }
+ auto it = material_name_to_id_.find(mat_name);
+ if (it == material_name_to_id_.end()) {
+ // In first pass, materials found in obj that's not in the .mtl file
+ // will be added to the list.
+ last_material_id_ = num_materials_;
+ material_name_to_id_[mat_name] = num_materials_++;
+
+ return true;
+ }
+ last_material_id_ = it->second;
+ return true;
+}
+
+bool ObjDecoder::ParseObject(Status *status) {
+ std::array<char, 2> c;
+ if (!buffer()->Peek(&c)) {
+ return false;
+ }
+ if (std::memcmp(&c[0], "o ", 2) != 0) {
+ return false;
+ }
+ buffer()->Advance(1);
+ DecoderBuffer line_buffer = parser::ParseLineIntoDecoderBuffer(buffer());
+ parser::SkipWhitespace(&line_buffer);
+ std::string obj_name;
+ if (!parser::ParseString(&line_buffer, &obj_name)) {
+ return false;
+ }
+ if (obj_name.length() == 0) {
+ return true; // Ignore empty name entries.
+ }
+ auto it = obj_name_to_id_.find(obj_name);
+ if (it == obj_name_to_id_.end()) {
+ const int num_obj = static_cast<int>(obj_name_to_id_.size());
+ obj_name_to_id_[obj_name] = num_obj;
+ last_sub_obj_id_ = num_obj;
+ } else {
+ last_sub_obj_id_ = it->second;
+ }
+ return true;
+}
+
+bool ObjDecoder::ParseVertexIndices(std::array<int32_t, 3> *out_indices) {
+ // Parsed attribute indices can be in format:
+ // 1. POS_INDEX
+ // 2. POS_INDEX/TEX_COORD_INDEX
+ // 3. POS_INDEX/TEX_COORD_INDEX/NORMAL_INDEX
+ // 4. POS_INDEX//NORMAL_INDEX
+ parser::SkipCharacters(buffer(), " \t");
+ if (!parser::ParseSignedInt(buffer(), &(*out_indices)[0]) ||
+ (*out_indices)[0] == 0) {
+ return false; // Position index must be present and valid.
+ }
+ (*out_indices)[1] = (*out_indices)[2] = 0;
+ char ch;
+ if (!buffer()->Peek(&ch)) {
+ return true; // It may be OK if we cannot read any more characters.
+ }
+ if (ch != '/') {
+ return true;
+ }
+ buffer()->Advance(1);
+ // Check if we should skip texture index or not.
+ if (!buffer()->Peek(&ch)) {
+ return false; // Here, we should be always able to read the next char.
+ }
+ if (ch != '/') {
+ // Must be texture coord index.
+ if (!parser::ParseSignedInt(buffer(), &(*out_indices)[1]) ||
+ (*out_indices)[1] == 0) {
+ return false; // Texture index must be present and valid.
+ }
+ }
+ if (!buffer()->Peek(&ch)) {
+ return true;
+ }
+ if (ch == '/') {
+ buffer()->Advance(1);
+ // Read normal index.
+ if (!parser::ParseSignedInt(buffer(), &(*out_indices)[2]) ||
+ (*out_indices)[2] == 0) {
+ return false; // Normal index must be present and valid.
+ }
+ }
+ return true;
+}
+
+void ObjDecoder::MapPointToVertexIndices(
+ PointIndex vert_id, const std::array<int32_t, 3> &indices) {
+ // Use face entries to store mapping between vertex and attribute indices
+ // (positions, texture coordinates and normal indices).
+ // Any given index is used when indices[x] != 0. For positive values, the
+ // point is mapped directly to the specified attribute index. Negative input
+ // indices indicate addressing from the last element (e.g. -1 is the last
+ // attribute value of a given type, -2 the second last, etc.).
+ if (indices[0] > 0) {
+ out_point_cloud_->attribute(pos_att_id_)
+ ->SetPointMapEntry(vert_id, AttributeValueIndex(indices[0] - 1));
+ } else if (indices[0] < 0) {
+ out_point_cloud_->attribute(pos_att_id_)
+ ->SetPointMapEntry(vert_id,
+ AttributeValueIndex(num_positions_ + indices[0]));
+ }
+
+ if (tex_att_id_ >= 0) {
+ if (indices[1] > 0) {
+ out_point_cloud_->attribute(tex_att_id_)
+ ->SetPointMapEntry(vert_id, AttributeValueIndex(indices[1] - 1));
+ } else if (indices[1] < 0) {
+ out_point_cloud_->attribute(tex_att_id_)
+ ->SetPointMapEntry(vert_id,
+ AttributeValueIndex(num_tex_coords_ + indices[1]));
+ } else {
+ // Texture index not provided but expected. Insert 0 entry as the
+ // default value.
+ out_point_cloud_->attribute(tex_att_id_)
+ ->SetPointMapEntry(vert_id, AttributeValueIndex(0));
+ }
+ }
+
+ if (norm_att_id_ >= 0) {
+ if (indices[2] > 0) {
+ out_point_cloud_->attribute(norm_att_id_)
+ ->SetPointMapEntry(vert_id, AttributeValueIndex(indices[2] - 1));
+ } else if (indices[2] < 0) {
+ out_point_cloud_->attribute(norm_att_id_)
+ ->SetPointMapEntry(vert_id,
+ AttributeValueIndex(num_normals_ + indices[2]));
+ } else {
+ // Normal index not provided but expected. Insert 0 entry as the default
+ // value.
+ out_point_cloud_->attribute(norm_att_id_)
+ ->SetPointMapEntry(vert_id, AttributeValueIndex(0));
+ }
+ }
+
+ // Assign material index to the point if it is available.
+ if (material_att_id_ >= 0) {
+ out_point_cloud_->attribute(material_att_id_)
+ ->SetPointMapEntry(vert_id, AttributeValueIndex(last_material_id_));
+ }
+
+ // Assign sub-object index to the point if it is available.
+ if (sub_obj_att_id_ >= 0) {
+ out_point_cloud_->attribute(sub_obj_att_id_)
+ ->SetPointMapEntry(vert_id, AttributeValueIndex(last_sub_obj_id_));
+ }
+}
+
+bool ObjDecoder::ParseMaterialFile(const std::string &file_name,
+ Status *status) {
+ const std::string full_path = GetFullPath(file_name, input_file_name_);
+ std::vector<char> buffer;
+ if (!ReadFileToBuffer(full_path, &buffer)) {
+ return false;
+ }
+
+ // Backup the original decoder buffer.
+ DecoderBuffer old_buffer = buffer_;
+
+ buffer_.Init(buffer.data(), buffer.size());
+
+ num_materials_ = 0;
+ while (ParseMaterialFileDefinition(status)) {
+ }
+
+ // Restore the original buffer.
+ buffer_ = old_buffer;
+ return true;
+}
+
+bool ObjDecoder::ParseMaterialFileDefinition(Status * /* status */) {
+ char c;
+ parser::SkipWhitespace(buffer());
+ if (!buffer()->Peek(&c)) {
+ // End of file reached?.
+ return false;
+ }
+ if (c == '#') {
+ // Comment, ignore the line.
+ parser::SkipLine(buffer());
+ return true;
+ }
+ std::string str;
+ if (!parser::ParseString(buffer(), &str)) {
+ return false;
+ }
+ if (str == "newmtl") {
+ parser::SkipWhitespace(buffer());
+ parser::ParseLine(buffer(), &str);
+ if (str.empty()) {
+ return false;
+ }
+ // Add new material to our map.
+ material_name_to_id_[str] = num_materials_++;
+ }
+ return true;
+}
+
+} // namespace draco
diff --git a/libs/assimp/contrib/draco/src/draco/io/obj_decoder.h b/libs/assimp/contrib/draco/src/draco/io/obj_decoder.h
new file mode 100644
index 0000000..baeab5b
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/io/obj_decoder.h
@@ -0,0 +1,129 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_IO_OBJ_DECODER_H_
+#define DRACO_IO_OBJ_DECODER_H_
+
+#include <string>
+#include <unordered_map>
+
+#include "draco/core/decoder_buffer.h"
+#include "draco/core/status.h"
+#include "draco/draco_features.h"
+#include "draco/mesh/mesh.h"
+
+namespace draco {
+
+// Decodes a Wavefront OBJ file into draco::Mesh (or draco::PointCloud if the
+// connectivity data is not needed).. This decoder can handle decoding of
+// positions, texture coordinates, normals and triangular faces.
+// All other geometry properties are ignored.
+class ObjDecoder {
+ public:
+ ObjDecoder();
+
+ // Decodes an obj file stored in the input file.
+ // Returns nullptr if the decoding failed.
+ Status DecodeFromFile(const std::string &file_name, Mesh *out_mesh);
+ Status DecodeFromFile(const std::string &file_name,
+ PointCloud *out_point_cloud);
+
+ Status DecodeFromBuffer(DecoderBuffer *buffer, Mesh *out_mesh);
+ Status DecodeFromBuffer(DecoderBuffer *buffer, PointCloud *out_point_cloud);
+
+ // Flag that can be used to turn on/off deduplication of input values.
+ // This should be disabled only when we are sure that the input data does not
+ // contain any duplicate entries.
+ // Default: true
+ void set_deduplicate_input_values(bool v) { deduplicate_input_values_ = v; }
+ // Flag for whether using metadata to record other information in the obj
+ // file, e.g. material names, object names.
+ void set_use_metadata(bool flag) { use_metadata_ = flag; }
+
+ protected:
+ Status DecodeInternal();
+ DecoderBuffer *buffer() { return &buffer_; }
+
+ private:
+ // Resets internal counters for attributes and faces.
+ void ResetCounters();
+
+ // Parses the next mesh property definition (position, tex coord, normal, or
+ // face). If the parsed data is unrecognized, it will be skipped.
+ // Returns false when the end of file was reached.
+ bool ParseDefinition(Status *status);
+
+ // Attempts to parse definition of position, normal, tex coord, or face
+ // respectively.
+ // Returns false when the parsed data didn't contain the given definition.
+ bool ParseVertexPosition(Status *status);
+ bool ParseNormal(Status *status);
+ bool ParseTexCoord(Status *status);
+ bool ParseFace(Status *status);
+ bool ParseMaterialLib(Status *status);
+ bool ParseMaterial(Status *status);
+ bool ParseObject(Status *status);
+
+ // Parses triplet of position, tex coords and normal indices.
+ // Returns false on error.
+ bool ParseVertexIndices(std::array<int32_t, 3> *out_indices);
+
+ // Maps specified point index to the parsed vertex indices (triplet of
+ // position, texture coordinate, and normal indices) .
+ void MapPointToVertexIndices(PointIndex vert_id,
+ const std::array<int32_t, 3> &indices);
+
+ // Parses material file definitions from a separate file.
+ bool ParseMaterialFile(const std::string &file_name, Status *status);
+ bool ParseMaterialFileDefinition(Status *status);
+
+ // If set to true, the parser will count the number of various definitions
+ // but it will not parse the actual data or add any new entries to the mesh.
+ bool counting_mode_;
+ int num_obj_faces_;
+ int num_positions_;
+ int num_tex_coords_;
+ int num_normals_;
+ int num_materials_;
+ int last_sub_obj_id_;
+
+ int pos_att_id_;
+ int tex_att_id_;
+ int norm_att_id_;
+ int material_att_id_;
+ int sub_obj_att_id_; // Attribute id for storing sub-objects.
+
+ bool deduplicate_input_values_;
+
+ int last_material_id_;
+ std::string material_file_name_;
+
+ std::string input_file_name_;
+
+ std::unordered_map<std::string, int> material_name_to_id_;
+ std::unordered_map<std::string, int> obj_name_to_id_;
+
+ bool use_metadata_;
+
+ DecoderBuffer buffer_;
+
+ // Data structure that stores the decoded data. |out_point_cloud_| must be
+ // always set but |out_mesh_| is optional.
+ Mesh *out_mesh_;
+ PointCloud *out_point_cloud_;
+};
+
+} // namespace draco
+
+#endif // DRACO_IO_OBJ_DECODER_H_
diff --git a/libs/assimp/contrib/draco/src/draco/io/obj_decoder_test.cc b/libs/assimp/contrib/draco/src/draco/io/obj_decoder_test.cc
new file mode 100644
index 0000000..b19fe6e
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/io/obj_decoder_test.cc
@@ -0,0 +1,193 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/io/obj_decoder.h"
+
+#include <sstream>
+
+#include "draco/core/draco_test_base.h"
+#include "draco/core/draco_test_utils.h"
+
+namespace draco {
+
+class ObjDecoderTest : public ::testing::Test {
+ protected:
+ template <class Geometry>
+ std::unique_ptr<Geometry> DecodeObj(const std::string &file_name) const {
+ return DecodeObj<Geometry>(file_name, false);
+ }
+
+ template <class Geometry>
+ std::unique_ptr<Geometry> DecodeObj(const std::string &file_name,
+ bool deduplicate_input_values) const {
+ const std::string path = GetTestFileFullPath(file_name);
+ ObjDecoder decoder;
+ decoder.set_deduplicate_input_values(deduplicate_input_values);
+ std::unique_ptr<Geometry> geometry(new Geometry());
+ if (!decoder.DecodeFromFile(path, geometry.get()).ok()) {
+ return nullptr;
+ }
+ return geometry;
+ }
+
+ template <class Geometry>
+ std::unique_ptr<Geometry> DecodeObjWithMetadata(
+ const std::string &file_name) const {
+ const std::string path = GetTestFileFullPath(file_name);
+ ObjDecoder decoder;
+ decoder.set_use_metadata(true);
+ std::unique_ptr<Geometry> geometry(new Geometry());
+ if (!decoder.DecodeFromFile(path, geometry.get()).ok()) {
+ return nullptr;
+ }
+ return geometry;
+ }
+
+ void test_decoding(const std::string &file_name) {
+ const std::unique_ptr<Mesh> mesh(DecodeObj<Mesh>(file_name));
+ ASSERT_NE(mesh, nullptr) << "Failed to load test model " << file_name;
+ ASSERT_GT(mesh->num_faces(), 0);
+
+ const std::unique_ptr<PointCloud> pc(DecodeObj<PointCloud>(file_name));
+ ASSERT_NE(pc, nullptr) << "Failed to load test model " << file_name;
+ ASSERT_GT(pc->num_points(), 0);
+ }
+};
+
+TEST_F(ObjDecoderTest, ExtraVertexOBJ) {
+ const std::string file_name = "extra_vertex.obj";
+ test_decoding(file_name);
+}
+
+TEST_F(ObjDecoderTest, PartialAttributesOBJ) {
+ const std::string file_name = "cube_att_partial.obj";
+ test_decoding(file_name);
+}
+
+TEST_F(ObjDecoderTest, SubObjects) {
+ // Tests loading an Obj with sub objects.
+ const std::string file_name = "cube_att_sub_o.obj";
+ const std::unique_ptr<Mesh> mesh(DecodeObj<Mesh>(file_name));
+ ASSERT_NE(mesh, nullptr) << "Failed to load test model " << file_name;
+ ASSERT_GT(mesh->num_faces(), 0);
+
+ // A sub object attribute should be the fourth attribute of the mesh (in this
+ // case).
+ ASSERT_EQ(mesh->num_attributes(), 4);
+ ASSERT_EQ(mesh->attribute(3)->attribute_type(), GeometryAttribute::GENERIC);
+ // There should be 3 different sub objects used in the model.
+ ASSERT_EQ(mesh->attribute(3)->size(), 3);
+ // Verify that the sub object attribute has unique id == 3.
+ ASSERT_EQ(mesh->attribute(3)->unique_id(), 3);
+}
+
+TEST_F(ObjDecoderTest, SubObjectsWithMetadata) {
+ // Tests loading an Obj with sub objects.
+ const std::string file_name = "cube_att_sub_o.obj";
+ const std::unique_ptr<Mesh> mesh(DecodeObjWithMetadata<Mesh>(file_name));
+ ASSERT_NE(mesh, nullptr) << "Failed to load test model " << file_name;
+ ASSERT_GT(mesh->num_faces(), 0);
+
+ ASSERT_EQ(mesh->num_attributes(), 4);
+ ASSERT_EQ(mesh->attribute(3)->attribute_type(), GeometryAttribute::GENERIC);
+ // There should be 3 different sub objects used in the model.
+ ASSERT_EQ(mesh->attribute(3)->size(), 3);
+
+ // Test material names stored in metadata.
+ ASSERT_NE(mesh->GetMetadata(), nullptr);
+ ASSERT_NE(mesh->GetAttributeMetadataByAttributeId(3), nullptr);
+ int32_t sub_obj_id = 0;
+ ASSERT_TRUE(mesh->GetAttributeMetadataByAttributeId(3)->GetEntryInt(
+ "obj2", &sub_obj_id));
+ ASSERT_EQ(sub_obj_id, 2);
+}
+
+TEST_F(ObjDecoderTest, QuadOBJ) {
+ // Tests loading an Obj with quad faces.
+ const std::string file_name = "cube_quads.obj";
+ const std::unique_ptr<Mesh> mesh(DecodeObj<Mesh>(file_name));
+ ASSERT_NE(mesh, nullptr) << "Failed to load test model " << file_name;
+ ASSERT_EQ(mesh->num_faces(), 12);
+
+ ASSERT_EQ(mesh->num_attributes(), 3);
+ ASSERT_EQ(mesh->num_points(), 4 * 6); // Four points per quad face.
+}
+
+TEST_F(ObjDecoderTest, ComplexPolyOBJ) {
+ // Tests that we fail to load an obj with complex polygon (expected failure).
+ const std::string file_name = "invalid/complex_poly.obj";
+ const std::unique_ptr<Mesh> mesh(DecodeObj<Mesh>(file_name));
+ ASSERT_EQ(mesh, nullptr);
+}
+
+TEST_F(ObjDecoderTest, EmptyNameOBJ) {
+ // Tests that we load an obj file that has an sub-object defined with an empty
+ // name.
+ const std::string file_name = "empty_name.obj";
+ const std::unique_ptr<Mesh> mesh(DecodeObj<Mesh>(file_name));
+ ASSERT_NE(mesh, nullptr);
+ ASSERT_EQ(mesh->num_attributes(), 1);
+ // Three valid entries in the attribute are expected.
+ ASSERT_EQ(mesh->attribute(0)->size(), 3);
+}
+
+TEST_F(ObjDecoderTest, PointCloudOBJ) {
+ // Tests that we load an obj file that does not contain any faces.
+ const std::string file_name = "test_lines.obj";
+ const std::unique_ptr<Mesh> mesh(DecodeObj<Mesh>(file_name, false));
+ ASSERT_NE(mesh, nullptr);
+ ASSERT_EQ(mesh->num_faces(), 0);
+ ASSERT_EQ(mesh->num_attributes(), 1);
+ ASSERT_EQ(mesh->attribute(0)->size(), 484);
+}
+
+TEST_F(ObjDecoderTest, WrongAttributeMapping) {
+ // Tests that we load an obj file that contains invalid mapping between
+ // attribute indices and values. In such case the invalid indices should be
+ // ignored.
+ const std::string file_name = "test_wrong_attribute_mapping.obj";
+ const std::unique_ptr<Mesh> mesh(DecodeObj<Mesh>(file_name, false));
+ ASSERT_NE(mesh, nullptr);
+ ASSERT_EQ(mesh->num_faces(), 1);
+ ASSERT_EQ(mesh->num_attributes(), 1);
+ ASSERT_EQ(mesh->attribute(0)->size(), 3);
+}
+
+TEST_F(ObjDecoderTest, TestObjDecodingAll) {
+ // test if we can read all obj that are currently in test folder.
+ test_decoding("bunny_norm.obj");
+ // test_decoding("complex_poly.obj"); // not supported see test above
+ test_decoding("cube_att.obj");
+ test_decoding("cube_att_partial.obj");
+ test_decoding("cube_att_sub_o.obj");
+ test_decoding("cube_quads.obj");
+ test_decoding("cube_subd.obj");
+ test_decoding("eof_test.obj");
+ test_decoding("extra_vertex.obj");
+ test_decoding("mat_test.obj");
+ test_decoding("one_face_123.obj");
+ test_decoding("one_face_312.obj");
+ test_decoding("one_face_321.obj");
+ test_decoding("sphere.obj");
+ test_decoding("test_nm.obj");
+ test_decoding("test_nm_trans.obj");
+ test_decoding("test_sphere.obj");
+ test_decoding("three_faces_123.obj");
+ test_decoding("three_faces_312.obj");
+ test_decoding("two_faces_123.obj");
+ test_decoding("two_faces_312.obj");
+ test_decoding("inf_nan.obj");
+}
+
+} // namespace draco
diff --git a/libs/assimp/contrib/draco/src/draco/io/obj_encoder.cc b/libs/assimp/contrib/draco/src/draco/io/obj_encoder.cc
new file mode 100644
index 0000000..29c6ca8
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/io/obj_encoder.cc
@@ -0,0 +1,346 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/io/obj_encoder.h"
+
+#include <memory>
+
+#include "draco/io/file_writer_factory.h"
+#include "draco/io/file_writer_interface.h"
+#include "draco/metadata/geometry_metadata.h"
+
+namespace draco {
+
+ObjEncoder::ObjEncoder()
+ : pos_att_(nullptr),
+ tex_coord_att_(nullptr),
+ normal_att_(nullptr),
+ material_att_(nullptr),
+ sub_obj_att_(nullptr),
+ out_buffer_(nullptr),
+ in_point_cloud_(nullptr),
+ in_mesh_(nullptr),
+ current_sub_obj_id_(-1),
+ current_material_id_(-1) {}
+
+bool ObjEncoder::EncodeToFile(const PointCloud &pc,
+ const std::string &file_name) {
+ std::unique_ptr<FileWriterInterface> file =
+ FileWriterFactory::OpenWriter(file_name);
+ if (!file) {
+ return false; // File could not be opened.
+ }
+ file_name_ = file_name;
+ // Encode the mesh into a buffer.
+ EncoderBuffer buffer;
+ if (!EncodeToBuffer(pc, &buffer)) {
+ return false;
+ }
+ // Write the buffer into the file.
+ file->Write(buffer.data(), buffer.size());
+ return true;
+}
+
+bool ObjEncoder::EncodeToFile(const Mesh &mesh, const std::string &file_name) {
+ in_mesh_ = &mesh;
+ return EncodeToFile(static_cast<const PointCloud &>(mesh), file_name);
+}
+
+bool ObjEncoder::EncodeToBuffer(const PointCloud &pc,
+ EncoderBuffer *out_buffer) {
+ in_point_cloud_ = &pc;
+ out_buffer_ = out_buffer;
+ if (!EncodeInternal()) {
+ return ExitAndCleanup(false);
+ }
+ return ExitAndCleanup(true);
+}
+
+bool ObjEncoder::EncodeToBuffer(const Mesh &mesh, EncoderBuffer *out_buffer) {
+ in_mesh_ = &mesh;
+ return EncodeToBuffer(static_cast<const PointCloud &>(mesh), out_buffer);
+}
+
+bool ObjEncoder::EncodeInternal() {
+ pos_att_ = nullptr;
+ tex_coord_att_ = nullptr;
+ normal_att_ = nullptr;
+ material_att_ = nullptr;
+ sub_obj_att_ = nullptr;
+ current_sub_obj_id_ = -1;
+ current_material_id_ = -1;
+ if (!GetSubObjects()) {
+ return false;
+ }
+ if (!EncodeMaterialFileName()) {
+ return false;
+ }
+ if (!EncodePositions()) {
+ return false;
+ }
+ if (!EncodeTextureCoordinates()) {
+ return false;
+ }
+ if (!EncodeNormals()) {
+ return false;
+ }
+ if (in_mesh_ && !EncodeFaces()) {
+ return false;
+ }
+ return true;
+}
+
+bool ObjEncoder::ExitAndCleanup(bool return_value) {
+ in_mesh_ = nullptr;
+ in_point_cloud_ = nullptr;
+ out_buffer_ = nullptr;
+ pos_att_ = nullptr;
+ tex_coord_att_ = nullptr;
+ normal_att_ = nullptr;
+ material_att_ = nullptr;
+ sub_obj_att_ = nullptr;
+ current_sub_obj_id_ = -1;
+ current_material_id_ = -1;
+ file_name_.clear();
+ return return_value;
+}
+
+bool ObjEncoder::GetSubObjects() {
+ const GeometryMetadata *pc_metadata = in_point_cloud_->GetMetadata();
+ if (!pc_metadata) {
+ return true;
+ }
+ const AttributeMetadata *sub_obj_metadata =
+ pc_metadata->GetAttributeMetadataByStringEntry("name", "sub_obj");
+ if (!sub_obj_metadata) {
+ return true;
+ }
+ sub_obj_id_to_name_.clear();
+ for (const auto &entry : sub_obj_metadata->entries()) {
+ // Sub-object id must be int.
+ int value = 0;
+ if (!entry.second.GetValue(&value)) {
+ continue;
+ }
+ sub_obj_id_to_name_[value] = entry.first;
+ }
+ sub_obj_att_ = in_point_cloud_->GetAttributeByUniqueId(
+ sub_obj_metadata->att_unique_id());
+ if (sub_obj_att_ == nullptr || sub_obj_att_->size() == 0) {
+ return false;
+ }
+ return true;
+}
+
+bool ObjEncoder::EncodeMaterialFileName() {
+ const GeometryMetadata *pc_metadata = in_point_cloud_->GetMetadata();
+ const AttributeMetadata *material_metadata = nullptr;
+ if (pc_metadata) {
+ material_metadata =
+ pc_metadata->GetAttributeMetadataByStringEntry("name", "material");
+ }
+ std::string material_file_name;
+ std::string material_full_path;
+ if (!material_metadata) {
+ return true;
+ }
+ if (!material_metadata->GetEntryString("file_name", &material_file_name))
+ return false;
+ buffer()->Encode("mtllib ", 7);
+ buffer()->Encode(material_file_name.c_str(), material_file_name.size());
+ buffer()->Encode("\n", 1);
+ material_id_to_name_.clear();
+ for (const auto &entry : material_metadata->entries()) {
+ // Material id must be int.
+ int value = 0;
+ // Found entry that are not material id, e.g. file name as a string.
+ if (!entry.second.GetValue(&value)) {
+ continue;
+ }
+ material_id_to_name_[value] = entry.first;
+ }
+ material_att_ = in_point_cloud_->GetAttributeByUniqueId(
+ material_metadata->att_unique_id());
+ if (material_att_ == nullptr || material_att_->size() == 0) {
+ return false;
+ }
+ return true;
+}
+
+bool ObjEncoder::EncodePositions() {
+ const PointAttribute *const att =
+ in_point_cloud_->GetNamedAttribute(GeometryAttribute::POSITION);
+ if (att == nullptr || att->size() == 0) {
+ return false; // Position attribute must be valid.
+ }
+ std::array<float, 3> value;
+ for (AttributeValueIndex i(0); i < static_cast<uint32_t>(att->size()); ++i) {
+ if (!att->ConvertValue<float, 3>(i, &value[0])) {
+ return false;
+ }
+ buffer()->Encode("v ", 2);
+ EncodeFloatList(&value[0], 3);
+ buffer()->Encode("\n", 1);
+ }
+ pos_att_ = att;
+ return true;
+}
+
+bool ObjEncoder::EncodeTextureCoordinates() {
+ const PointAttribute *const att =
+ in_point_cloud_->GetNamedAttribute(GeometryAttribute::TEX_COORD);
+ if (att == nullptr || att->size() == 0) {
+ return true; // It's OK if we don't have texture coordinates.
+ }
+ std::array<float, 2> value;
+ for (AttributeValueIndex i(0); i < static_cast<uint32_t>(att->size()); ++i) {
+ if (!att->ConvertValue<float, 2>(i, &value[0])) {
+ return false;
+ }
+ buffer()->Encode("vt ", 3);
+ EncodeFloatList(&value[0], 2);
+ buffer()->Encode("\n", 1);
+ }
+ tex_coord_att_ = att;
+ return true;
+}
+
+bool ObjEncoder::EncodeNormals() {
+ const PointAttribute *const att =
+ in_point_cloud_->GetNamedAttribute(GeometryAttribute::NORMAL);
+ if (att == nullptr || att->size() == 0) {
+ return true; // It's OK if we don't have normals.
+ }
+ std::array<float, 3> value;
+ for (AttributeValueIndex i(0); i < static_cast<uint32_t>(att->size()); ++i) {
+ if (!att->ConvertValue<float, 3>(i, &value[0])) {
+ return false;
+ }
+ buffer()->Encode("vn ", 3);
+ EncodeFloatList(&value[0], 3);
+ buffer()->Encode("\n", 1);
+ }
+ normal_att_ = att;
+ return true;
+}
+
+bool ObjEncoder::EncodeFaces() {
+ for (FaceIndex i(0); i < in_mesh_->num_faces(); ++i) {
+ if (sub_obj_att_) {
+ if (!EncodeSubObject(i)) {
+ return false;
+ }
+ }
+ if (material_att_) {
+ if (!EncodeMaterial(i)) {
+ return false;
+ }
+ }
+ buffer()->Encode('f');
+ for (int j = 0; j < 3; ++j) {
+ if (!EncodeFaceCorner(i, j)) {
+ return false;
+ }
+ }
+ buffer()->Encode("\n", 1);
+ }
+ return true;
+}
+
+bool ObjEncoder::EncodeMaterial(FaceIndex face_id) {
+ int material_id = 0;
+ // Pick the first corner, all corners of a face should have same id.
+ const PointIndex vert_index = in_mesh_->face(face_id)[0];
+ const AttributeValueIndex index_id(material_att_->mapped_index(vert_index));
+ if (!material_att_->ConvertValue<int>(index_id, &material_id)) {
+ return false;
+ }
+
+ if (material_id != current_material_id_) {
+ // Update material information.
+ buffer()->Encode("usemtl ", 7);
+ const auto mat_ptr = material_id_to_name_.find(material_id);
+ // If the material id is not found.
+ if (mat_ptr == material_id_to_name_.end()) {
+ return false;
+ }
+ buffer()->Encode(mat_ptr->second.c_str(), mat_ptr->second.size());
+ buffer()->Encode("\n", 1);
+ current_material_id_ = material_id;
+ }
+ return true;
+}
+
+bool ObjEncoder::EncodeSubObject(FaceIndex face_id) {
+ int sub_obj_id = 0;
+ // Pick the first corner, all corners of a face should have same id.
+ const PointIndex vert_index = in_mesh_->face(face_id)[0];
+ const AttributeValueIndex index_id(sub_obj_att_->mapped_index(vert_index));
+ if (!sub_obj_att_->ConvertValue<int>(index_id, &sub_obj_id)) {
+ return false;
+ }
+ if (sub_obj_id != current_sub_obj_id_) {
+ buffer()->Encode("o ", 2);
+ const auto sub_obj_ptr = sub_obj_id_to_name_.find(sub_obj_id);
+ if (sub_obj_ptr == sub_obj_id_to_name_.end()) {
+ return false;
+ }
+ buffer()->Encode(sub_obj_ptr->second.c_str(), sub_obj_ptr->second.size());
+ buffer()->Encode("\n", 1);
+ current_sub_obj_id_ = sub_obj_id;
+ }
+ return true;
+}
+
+bool ObjEncoder::EncodeFaceCorner(FaceIndex face_id, int local_corner_id) {
+ buffer()->Encode(' ');
+ const PointIndex vert_index = in_mesh_->face(face_id)[local_corner_id];
+ // Note that in the OBJ format, all indices are encoded starting from index 1.
+ // Encode position index.
+ EncodeInt(pos_att_->mapped_index(vert_index).value() + 1);
+ if (tex_coord_att_ || normal_att_) {
+ // Encoding format is pos_index/tex_coord_index/normal_index.
+ // If tex_coords are not present, we must encode pos_index//normal_index.
+ buffer()->Encode('/');
+ if (tex_coord_att_) {
+ EncodeInt(tex_coord_att_->mapped_index(vert_index).value() + 1);
+ }
+ if (normal_att_) {
+ buffer()->Encode('/');
+ EncodeInt(normal_att_->mapped_index(vert_index).value() + 1);
+ }
+ }
+ return true;
+}
+
+void ObjEncoder::EncodeFloat(float val) {
+ snprintf(num_buffer_, sizeof(num_buffer_), "%f", val);
+ buffer()->Encode(num_buffer_, strlen(num_buffer_));
+}
+
+void ObjEncoder::EncodeFloatList(float *vals, int num_vals) {
+ for (int i = 0; i < num_vals; ++i) {
+ if (i > 0) {
+ buffer()->Encode(' ');
+ }
+ EncodeFloat(vals[i]);
+ }
+}
+
+void ObjEncoder::EncodeInt(int32_t val) {
+ snprintf(num_buffer_, sizeof(num_buffer_), "%d", val);
+ buffer()->Encode(num_buffer_, strlen(num_buffer_));
+}
+
+} // namespace draco
diff --git a/libs/assimp/contrib/draco/src/draco/io/obj_encoder.h b/libs/assimp/contrib/draco/src/draco/io/obj_encoder.h
new file mode 100644
index 0000000..509d39b
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/io/obj_encoder.h
@@ -0,0 +1,92 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_IO_OBJ_ENCODER_H_
+#define DRACO_IO_OBJ_ENCODER_H_
+
+#include <unordered_map>
+
+#include "draco/core/encoder_buffer.h"
+#include "draco/mesh/mesh.h"
+
+namespace draco {
+
+// Class for encoding input draco::Mesh or draco::PointCloud into the Wavefront
+// OBJ format.
+class ObjEncoder {
+ public:
+ ObjEncoder();
+
+ // Encodes the mesh or a point cloud and saves it into a file.
+ // Returns false when either the encoding failed or when the file couldn't be
+ // opened.
+ bool EncodeToFile(const PointCloud &pc, const std::string &file_name);
+ bool EncodeToFile(const Mesh &mesh, const std::string &file_name);
+
+ // Encodes the mesh or the point cloud into a buffer.
+ bool EncodeToBuffer(const PointCloud &pc, EncoderBuffer *out_buffer);
+ bool EncodeToBuffer(const Mesh &mesh, EncoderBuffer *out_buffer);
+
+ protected:
+ bool EncodeInternal();
+ EncoderBuffer *buffer() const { return out_buffer_; }
+ bool ExitAndCleanup(bool return_value);
+
+ private:
+ bool GetSubObjects();
+ bool EncodeMaterialFileName();
+ bool EncodePositions();
+ bool EncodeTextureCoordinates();
+ bool EncodeNormals();
+ bool EncodeFaces();
+ bool EncodeSubObject(FaceIndex face_id);
+ bool EncodeMaterial(FaceIndex face_id);
+ bool EncodeFaceCorner(FaceIndex face_id, int local_corner_id);
+
+ void EncodeFloat(float val);
+ void EncodeFloatList(float *vals, int num_vals);
+ void EncodeInt(int32_t val);
+
+ // Various attributes used by the encoder. If an attribute is not used, it is
+ // set to nullptr.
+ const PointAttribute *pos_att_;
+ const PointAttribute *tex_coord_att_;
+ const PointAttribute *normal_att_;
+ const PointAttribute *material_att_;
+ const PointAttribute *sub_obj_att_;
+
+ // Buffer used for encoding float/int numbers.
+ char num_buffer_[20];
+
+ EncoderBuffer *out_buffer_;
+
+ const PointCloud *in_point_cloud_;
+ const Mesh *in_mesh_;
+
+ // Store sub object name for each value.
+ std::unordered_map<int, std::string> sub_obj_id_to_name_;
+ // Current sub object id of faces.
+ int current_sub_obj_id_;
+
+ // Store material name for each value in material attribute.
+ std::unordered_map<int, std::string> material_id_to_name_;
+ // Current material id of faces.
+ int current_material_id_;
+
+ std::string file_name_;
+};
+
+} // namespace draco
+
+#endif // DRACO_IO_OBJ_ENCODER_H_
diff --git a/libs/assimp/contrib/draco/src/draco/io/obj_encoder_test.cc b/libs/assimp/contrib/draco/src/draco/io/obj_encoder_test.cc
new file mode 100644
index 0000000..4838e56
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/io/obj_encoder_test.cc
@@ -0,0 +1,110 @@
+// Copyright 2017 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/io/obj_encoder.h"
+
+#include <sstream>
+
+#include "draco/core/draco_test_base.h"
+#include "draco/core/draco_test_utils.h"
+#include "draco/io/file_reader_factory.h"
+#include "draco/io/file_reader_interface.h"
+#include "draco/io/obj_decoder.h"
+
+namespace draco {
+
+class ObjEncoderTest : public ::testing::Test {
+ protected:
+ void CompareMeshes(const Mesh *mesh0, const Mesh *mesh1) {
+ ASSERT_EQ(mesh0->num_faces(), mesh1->num_faces());
+ ASSERT_EQ(mesh0->num_attributes(), mesh1->num_attributes());
+ for (size_t att_id = 0; att_id < mesh0->num_attributes(); ++att_id) {
+ ASSERT_EQ(mesh0->attribute(att_id)->size(),
+ mesh1->attribute(att_id)->size());
+ }
+ }
+
+ // Encode a mesh using the ObjEncoder and then decode to verify the encoding.
+ std::unique_ptr<Mesh> EncodeAndDecodeMesh(const Mesh *mesh) {
+ EncoderBuffer encoder_buffer;
+ ObjEncoder encoder;
+ if (!encoder.EncodeToBuffer(*mesh, &encoder_buffer)) {
+ return nullptr;
+ }
+
+ DecoderBuffer decoder_buffer;
+ decoder_buffer.Init(encoder_buffer.data(), encoder_buffer.size());
+ std::unique_ptr<Mesh> decoded_mesh(new Mesh());
+ ObjDecoder decoder;
+ decoder.set_use_metadata(true);
+ if (!decoder.DecodeFromBuffer(&decoder_buffer, decoded_mesh.get()).ok()) {
+ return nullptr;
+ }
+ return decoded_mesh;
+ }
+
+ void test_encoding(const std::string &file_name) {
+ const std::unique_ptr<Mesh> mesh(ReadMeshFromTestFile(file_name, true));
+
+ ASSERT_NE(mesh, nullptr) << "Failed to load test model " << file_name;
+ ASSERT_GT(mesh->num_faces(), 0);
+
+ const std::unique_ptr<Mesh> decoded_mesh = EncodeAndDecodeMesh(mesh.get());
+ CompareMeshes(mesh.get(), decoded_mesh.get());
+ }
+};
+
+TEST_F(ObjEncoderTest, HasSubObject) { test_encoding("cube_att_sub_o.obj"); }
+
+TEST_F(ObjEncoderTest, HasMaterial) {
+ const std::unique_ptr<Mesh> mesh0(ReadMeshFromTestFile("mat_test.obj", true));
+ ASSERT_NE(mesh0, nullptr);
+ const std::unique_ptr<Mesh> mesh1 = EncodeAndDecodeMesh(mesh0.get());
+ ASSERT_NE(mesh1, nullptr);
+ ASSERT_EQ(mesh0->num_faces(), mesh1->num_faces());
+ ASSERT_EQ(mesh0->num_attributes(), mesh1->num_attributes());
+ // Position attribute should be the same.
+ ASSERT_EQ(mesh0->attribute(0)->size(), mesh1->attribute(0)->size());
+ // Since |mesh1| is decoded from buffer, it has not material file. So the
+ // size of material attribute is the number of materials used in the obj
+ // file which is 7. The size of material attribute of |mesh0| decoded from
+ // the obj file will be the number of materials defined in the .mtl file.
+ ASSERT_EQ(mesh0->attribute(1)->size(), 29);
+ ASSERT_EQ(mesh1->attribute(1)->size(), 7);
+}
+
+TEST_F(ObjEncoderTest, TestObjEncodingAll) {
+ // Test decoded mesh from encoded obj file stays the same.
+ test_encoding("bunny_norm.obj");
+ test_encoding("cube_att.obj");
+ test_encoding("cube_att_partial.obj");
+ test_encoding("cube_quads.obj");
+ test_encoding("cube_subd.obj");
+ test_encoding("extra_vertex.obj");
+ test_encoding("multiple_isolated_triangles.obj");
+ test_encoding("multiple_tetrahedrons.obj");
+ test_encoding("one_face_123.obj");
+ test_encoding("one_face_312.obj");
+ test_encoding("one_face_321.obj");
+ test_encoding("sphere.obj");
+ test_encoding("test_nm.obj");
+ test_encoding("test_nm_trans.obj");
+ test_encoding("test_sphere.obj");
+ test_encoding("three_faces_123.obj");
+ test_encoding("three_faces_312.obj");
+ test_encoding("two_faces_123.obj");
+ test_encoding("two_faces_312.obj");
+}
+
+} // namespace draco
diff --git a/libs/assimp/contrib/draco/src/draco/io/parser_utils.cc b/libs/assimp/contrib/draco/src/draco/io/parser_utils.cc
new file mode 100644
index 0000000..12afacf
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/io/parser_utils.cc
@@ -0,0 +1,261 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/io/parser_utils.h"
+
+#include <algorithm>
+#include <cctype>
+#include <cmath>
+#include <iterator>
+#include <limits>
+
+namespace draco {
+namespace parser {
+
+void SkipCharacters(DecoderBuffer *buffer, const char *skip_chars) {
+ if (skip_chars == nullptr) {
+ return;
+ }
+ const int num_skip_chars = static_cast<int>(strlen(skip_chars));
+ char c;
+ while (buffer->Peek(&c)) {
+ // Check all characters in the pattern.
+ bool skip = false;
+ for (int i = 0; i < num_skip_chars; ++i) {
+ if (c == skip_chars[i]) {
+ skip = true;
+ break;
+ }
+ }
+ if (!skip) {
+ return;
+ }
+ buffer->Advance(1);
+ }
+}
+
+void SkipWhitespace(DecoderBuffer *buffer) {
+ bool end_reached = false;
+ while (PeekWhitespace(buffer, &end_reached) && !end_reached) {
+ // Skip the whitespace character
+ buffer->Advance(1);
+ }
+}
+
+bool PeekWhitespace(DecoderBuffer *buffer, bool *end_reached) {
+ uint8_t c;
+ if (!buffer->Peek(&c)) {
+ *end_reached = true;
+ return false; // eof reached.
+ }
+ if (!isspace(c)) {
+ return false; // Non-whitespace character reached.
+ }
+ return true;
+}
+
+void SkipLine(DecoderBuffer *buffer) { ParseLine(buffer, nullptr); }
+
+bool ParseFloat(DecoderBuffer *buffer, float *value) {
+ // Read optional sign.
+ char ch;
+ if (!buffer->Peek(&ch)) {
+ return false;
+ }
+ int sign = GetSignValue(ch);
+ if (sign != 0) {
+ buffer->Advance(1);
+ } else {
+ sign = 1;
+ }
+
+ // Parse integer component.
+ bool have_digits = false;
+ double v = 0.0;
+ while (buffer->Peek(&ch) && ch >= '0' && ch <= '9') {
+ v *= 10.0;
+ v += (ch - '0');
+ buffer->Advance(1);
+ have_digits = true;
+ }
+ if (ch == '.') {
+ // Parse fractional component.
+ buffer->Advance(1);
+ double fraction = 1.0;
+ while (buffer->Peek(&ch) && ch >= '0' && ch <= '9') {
+ fraction *= 0.1;
+ v += (ch - '0') * fraction;
+ buffer->Advance(1);
+ have_digits = true;
+ }
+ }
+
+ if (!have_digits) {
+ // Check for special constants (inf, nan, ...).
+ std::string text;
+ if (!ParseString(buffer, &text)) {
+ return false;
+ }
+ if (text == "inf" || text == "Inf") {
+ v = std::numeric_limits<double>::infinity();
+ } else if (text == "nan" || text == "NaN") {
+ v = nan("");
+ } else {
+ // Invalid string.
+ return false;
+ }
+ } else {
+ // Handle exponent if present.
+ if (ch == 'e' || ch == 'E') {
+ buffer->Advance(1); // Skip 'e' marker.
+
+ // Parse integer exponent.
+ int32_t exponent = 0;
+ if (!ParseSignedInt(buffer, &exponent)) {
+ return false;
+ }
+
+ // Apply exponent scaling to value.
+ v *= pow(static_cast<double>(10.0), exponent);
+ }
+ }
+
+ *value = (sign < 0) ? static_cast<float>(-v) : static_cast<float>(v);
+ return true;
+}
+
+bool ParseSignedInt(DecoderBuffer *buffer, int32_t *value) {
+ // Parse any explicit sign and set the appropriate largest magnitude
+ // value that can be represented without overflow.
+ char ch;
+ if (!buffer->Peek(&ch)) {
+ return false;
+ }
+ const int sign = GetSignValue(ch);
+ if (sign != 0) {
+ buffer->Advance(1);
+ }
+
+ // Attempt to parse integer body.
+ uint32_t v;
+ if (!ParseUnsignedInt(buffer, &v)) {
+ return false;
+ }
+ *value = (sign < 0) ? -v : v;
+ return true;
+}
+
+bool ParseUnsignedInt(DecoderBuffer *buffer, uint32_t *value) {
+ // Parse the number until we run out of digits.
+ uint32_t v = 0;
+ char ch;
+ bool have_digits = false;
+ while (buffer->Peek(&ch) && ch >= '0' && ch <= '9') {
+ v *= 10;
+ v += (ch - '0');
+ buffer->Advance(1);
+ have_digits = true;
+ }
+ if (!have_digits) {
+ return false;
+ }
+ *value = v;
+ return true;
+}
+
+int GetSignValue(char c) {
+ if (c == '-') {
+ return -1;
+ }
+ if (c == '+') {
+ return 1;
+ }
+ return 0;
+}
+
+bool ParseString(DecoderBuffer *buffer, std::string *out_string) {
+ out_string->clear();
+ SkipWhitespace(buffer);
+ bool end_reached = false;
+ while (!PeekWhitespace(buffer, &end_reached) && !end_reached) {
+ char c;
+ if (!buffer->Decode(&c)) {
+ return false;
+ }
+ *out_string += c;
+ }
+ return true;
+}
+
+void ParseLine(DecoderBuffer *buffer, std::string *out_string) {
+ if (out_string) {
+ out_string->clear();
+ }
+ char c;
+ bool delim_reached = false;
+ while (buffer->Peek(&c)) {
+ // Check if |c| is a delimeter. We want to parse all delimeters until we
+ // reach a non-delimeter symbol. (E.g. we want to ignore '\r\n' at the end
+ // of the line).
+ const bool is_delim = (c == '\r' || c == '\n');
+
+ // If |c| is a delimeter or it is a non-delimeter symbol before any
+ // delimeter was found, we advance the buffer to the next character.
+ if (is_delim || !delim_reached) {
+ buffer->Advance(1);
+ }
+
+ if (is_delim) {
+ // Mark that we found a delimeter symbol.
+ delim_reached = true;
+ continue;
+ }
+ if (delim_reached) {
+ // We reached a non-delimeter symbol after a delimeter was already found.
+ // Stop the parsing.
+ return;
+ }
+ // Otherwise we put the non-delimeter symbol into the output string.
+ if (out_string) {
+ out_string->push_back(c);
+ }
+ }
+}
+
+DecoderBuffer ParseLineIntoDecoderBuffer(DecoderBuffer *buffer) {
+ const char *const head = buffer->data_head();
+ char c;
+ while (buffer->Peek(&c)) {
+ // Skip the character.
+ buffer->Advance(1);
+ if (c == '\n') {
+ break; // End of the line reached.
+ }
+ if (c == '\r') {
+ continue; // Ignore extra line ending characters.
+ }
+ }
+ DecoderBuffer out_buffer;
+ out_buffer.Init(head, buffer->data_head() - head);
+ return out_buffer;
+}
+
+std::string ToLower(const std::string &str) {
+ std::string out;
+ std::transform(str.begin(), str.end(), std::back_inserter(out), tolower);
+ return out;
+}
+
+} // namespace parser
+} // namespace draco
diff --git a/libs/assimp/contrib/draco/src/draco/io/parser_utils.h b/libs/assimp/contrib/draco/src/draco/io/parser_utils.h
new file mode 100644
index 0000000..b83cd93
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/io/parser_utils.h
@@ -0,0 +1,66 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_IO_PARSER_UTILS_H_
+#define DRACO_IO_PARSER_UTILS_H_
+
+#include "draco/core/decoder_buffer.h"
+
+namespace draco {
+namespace parser {
+
+// Skips to first character not included in |skip_chars|.
+void SkipCharacters(DecoderBuffer *buffer, const char *skip_chars);
+
+// Skips any whitespace until a regular character is reached.
+void SkipWhitespace(DecoderBuffer *buffer);
+
+// Returns true if the next character is a whitespace.
+// |end_reached| is set to true when the end of the stream is reached.
+bool PeekWhitespace(DecoderBuffer *buffer, bool *end_reached);
+
+// Skips all characters until the end of the line.
+void SkipLine(DecoderBuffer *buffer);
+
+// Parses signed floating point number or returns false on error.
+bool ParseFloat(DecoderBuffer *buffer, float *value);
+
+// Parses a signed integer (can be preceded by '-' or '+' characters.
+bool ParseSignedInt(DecoderBuffer *buffer, int32_t *value);
+
+// Parses an unsigned integer. It cannot be preceded by '-' or '+'
+// characters.
+bool ParseUnsignedInt(DecoderBuffer *buffer, uint32_t *value);
+
+// Returns -1 if c == '-'.
+// Returns +1 if c == '+'.
+// Returns 0 otherwise.
+int GetSignValue(char c);
+
+// Parses a string until a whitespace or end of file is reached.
+bool ParseString(DecoderBuffer *buffer, std::string *out_string);
+
+// Parses the entire line into the buffer (excluding the new line characters).
+void ParseLine(DecoderBuffer *buffer, std::string *out_string);
+
+// Parses line and stores into a new decoder buffer.
+DecoderBuffer ParseLineIntoDecoderBuffer(DecoderBuffer *buffer);
+
+// Returns a string with all characters converted to lower case.
+std::string ToLower(const std::string &str);
+
+} // namespace parser
+} // namespace draco
+
+#endif // DRACO_IO_PARSER_UTILS_H_
diff --git a/libs/assimp/contrib/draco/src/draco/io/ply_decoder.cc b/libs/assimp/contrib/draco/src/draco/io/ply_decoder.cc
new file mode 100644
index 0000000..b78c056
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/io/ply_decoder.cc
@@ -0,0 +1,320 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/io/ply_decoder.h"
+
+#include "draco/core/macros.h"
+#include "draco/core/status.h"
+#include "draco/io/file_utils.h"
+#include "draco/io/ply_property_reader.h"
+
+namespace draco {
+namespace {
+int64_t CountNumTriangles(const PlyElement &face_element,
+ const PlyProperty &vertex_indices) {
+ int64_t num_triangles = 0;
+ for (int i = 0; i < face_element.num_entries(); ++i) {
+ const int64_t list_size = vertex_indices.GetListEntryNumValues(i);
+ if (list_size < 3) {
+ // Correctly encoded ply files don't have less than three vertices.
+ continue;
+ }
+ num_triangles += list_size - 2;
+ }
+ return num_triangles;
+}
+} // namespace
+
+PlyDecoder::PlyDecoder() : out_mesh_(nullptr), out_point_cloud_(nullptr) {}
+
+Status PlyDecoder::DecodeFromFile(const std::string &file_name,
+ Mesh *out_mesh) {
+ out_mesh_ = out_mesh;
+ return DecodeFromFile(file_name, static_cast<PointCloud *>(out_mesh));
+}
+
+Status PlyDecoder::DecodeFromFile(const std::string &file_name,
+ PointCloud *out_point_cloud) {
+ std::vector<char> data;
+ if (!ReadFileToBuffer(file_name, &data)) {
+ return Status(Status::DRACO_ERROR, "Unable to read input file.");
+ }
+ buffer_.Init(data.data(), data.size());
+ return DecodeFromBuffer(&buffer_, out_point_cloud);
+}
+
+Status PlyDecoder::DecodeFromBuffer(DecoderBuffer *buffer, Mesh *out_mesh) {
+ out_mesh_ = out_mesh;
+ return DecodeFromBuffer(buffer, static_cast<PointCloud *>(out_mesh));
+}
+
+Status PlyDecoder::DecodeFromBuffer(DecoderBuffer *buffer,
+ PointCloud *out_point_cloud) {
+ out_point_cloud_ = out_point_cloud;
+ buffer_.Init(buffer->data_head(), buffer->remaining_size());
+ return DecodeInternal();
+}
+
+Status PlyDecoder::DecodeInternal() {
+ PlyReader ply_reader;
+ DRACO_RETURN_IF_ERROR(ply_reader.Read(buffer()));
+ // First, decode the connectivity data.
+ if (out_mesh_)
+ DRACO_RETURN_IF_ERROR(DecodeFaceData(ply_reader.GetElementByName("face")));
+ // Decode all attributes.
+ DRACO_RETURN_IF_ERROR(
+ DecodeVertexData(ply_reader.GetElementByName("vertex")));
+ // In case there are no faces this is just a point cloud which does
+ // not require deduplication.
+ if (out_mesh_ && out_mesh_->num_faces() != 0) {
+#ifdef DRACO_ATTRIBUTE_VALUES_DEDUPLICATION_SUPPORTED
+ if (!out_point_cloud_->DeduplicateAttributeValues()) {
+ return Status(Status::DRACO_ERROR,
+ "Could not deduplicate attribute values");
+ }
+#endif
+#ifdef DRACO_ATTRIBUTE_INDICES_DEDUPLICATION_SUPPORTED
+ out_point_cloud_->DeduplicatePointIds();
+#endif
+ }
+ return OkStatus();
+}
+
+Status PlyDecoder::DecodeFaceData(const PlyElement *face_element) {
+ // We accept point clouds now.
+ if (face_element == nullptr) {
+ return Status(Status::INVALID_PARAMETER, "face_element is null");
+ }
+ const PlyProperty *vertex_indices =
+ face_element->GetPropertyByName("vertex_indices");
+ if (vertex_indices == nullptr) {
+ // The property name may be named either "vertex_indices" or "vertex_index".
+ vertex_indices = face_element->GetPropertyByName("vertex_index");
+ }
+ if (vertex_indices == nullptr || !vertex_indices->is_list()) {
+ return Status(Status::DRACO_ERROR, "No faces defined");
+ }
+
+ // Allocate faces.
+ out_mesh_->SetNumFaces(CountNumTriangles(*face_element, *vertex_indices));
+ const int64_t num_polygons = face_element->num_entries();
+
+ PlyPropertyReader<PointIndex::ValueType> vertex_index_reader(vertex_indices);
+ Mesh::Face face;
+ FaceIndex face_index(0);
+ for (int i = 0; i < num_polygons; ++i) {
+ const int64_t list_offset = vertex_indices->GetListEntryOffset(i);
+ const int64_t list_size = vertex_indices->GetListEntryNumValues(i);
+ if (list_size < 3) {
+ continue; // All invalid polygons are skipped.
+ }
+
+ // Triangulate polygon assuming the polygon is convex.
+ const int64_t num_triangles = list_size - 2;
+ face[0] = vertex_index_reader.ReadValue(static_cast<int>(list_offset));
+ for (int64_t ti = 0; ti < num_triangles; ++ti) {
+ for (int64_t c = 1; c < 3; ++c) {
+ face[c] = vertex_index_reader.ReadValue(
+ static_cast<int>(list_offset + ti + c));
+ }
+ out_mesh_->SetFace(face_index, face);
+ face_index++;
+ }
+ }
+ out_mesh_->SetNumFaces(face_index.value());
+ return OkStatus();
+}
+
+template <typename DataTypeT>
+bool PlyDecoder::ReadPropertiesToAttribute(
+ const std::vector<const PlyProperty *> &properties,
+ PointAttribute *attribute, int num_vertices) {
+ std::vector<std::unique_ptr<PlyPropertyReader<DataTypeT>>> readers;
+ readers.reserve(properties.size());
+ for (int prop = 0; prop < properties.size(); ++prop) {
+ readers.push_back(std::unique_ptr<PlyPropertyReader<DataTypeT>>(
+ new PlyPropertyReader<DataTypeT>(properties[prop])));
+ }
+ std::vector<DataTypeT> memory(properties.size());
+ for (PointIndex::ValueType i = 0; i < static_cast<uint32_t>(num_vertices);
+ ++i) {
+ for (int prop = 0; prop < properties.size(); ++prop) {
+ memory[prop] = readers[prop]->ReadValue(i);
+ }
+ attribute->SetAttributeValue(AttributeValueIndex(i), memory.data());
+ }
+ return true;
+}
+
+Status PlyDecoder::DecodeVertexData(const PlyElement *vertex_element) {
+ if (vertex_element == nullptr) {
+ return Status(Status::INVALID_PARAMETER, "vertex_element is null");
+ }
+ // TODO(b/34330853): For now, try to load x,y,z vertices, red,green,blue,alpha
+ // colors, and nx,ny,nz normals. We need to add other properties later.
+ const PlyProperty *const x_prop = vertex_element->GetPropertyByName("x");
+ const PlyProperty *const y_prop = vertex_element->GetPropertyByName("y");
+ const PlyProperty *const z_prop = vertex_element->GetPropertyByName("z");
+ if (!x_prop || !y_prop || !z_prop) {
+ // Currently, we require 3 vertex coordinates (this should be generalized
+ // later on).
+ return Status(Status::INVALID_PARAMETER, "x, y, or z property is missing");
+ }
+ const PointIndex::ValueType num_vertices = vertex_element->num_entries();
+ out_point_cloud_->set_num_points(num_vertices);
+ // Decode vertex positions.
+ {
+ // All properties must have the same type.
+ if (x_prop->data_type() != y_prop->data_type() ||
+ y_prop->data_type() != z_prop->data_type()) {
+ return Status(Status::INVALID_PARAMETER,
+ "x, y, and z properties must have the same type");
+ }
+ // TODO(ostava): For now assume the position types are float32 or int32.
+ const DataType dt = x_prop->data_type();
+ if (dt != DT_FLOAT32 && dt != DT_INT32) {
+ return Status(Status::INVALID_PARAMETER,
+ "x, y, and z properties must be of type float32 or int32");
+ }
+
+ GeometryAttribute va;
+ va.Init(GeometryAttribute::POSITION, nullptr, 3, dt, false,
+ DataTypeLength(dt) * 3, 0);
+ const int att_id = out_point_cloud_->AddAttribute(va, true, num_vertices);
+ std::vector<const PlyProperty *> properties;
+ properties.push_back(x_prop);
+ properties.push_back(y_prop);
+ properties.push_back(z_prop);
+ if (dt == DT_FLOAT32) {
+ ReadPropertiesToAttribute<float>(
+ properties, out_point_cloud_->attribute(att_id), num_vertices);
+ } else if (dt == DT_INT32) {
+ ReadPropertiesToAttribute<int32_t>(
+ properties, out_point_cloud_->attribute(att_id), num_vertices);
+ }
+ }
+
+ // Decode normals if present.
+ const PlyProperty *const n_x_prop = vertex_element->GetPropertyByName("nx");
+ const PlyProperty *const n_y_prop = vertex_element->GetPropertyByName("ny");
+ const PlyProperty *const n_z_prop = vertex_element->GetPropertyByName("nz");
+ if (n_x_prop != nullptr && n_y_prop != nullptr && n_z_prop != nullptr) {
+ // For now, all normal properties must be set and of type float32
+ if (n_x_prop->data_type() == DT_FLOAT32 &&
+ n_y_prop->data_type() == DT_FLOAT32 &&
+ n_z_prop->data_type() == DT_FLOAT32) {
+ PlyPropertyReader<float> x_reader(n_x_prop);
+ PlyPropertyReader<float> y_reader(n_y_prop);
+ PlyPropertyReader<float> z_reader(n_z_prop);
+ GeometryAttribute va;
+ va.Init(GeometryAttribute::NORMAL, nullptr, 3, DT_FLOAT32, false,
+ sizeof(float) * 3, 0);
+ const int att_id = out_point_cloud_->AddAttribute(va, true, num_vertices);
+ for (PointIndex::ValueType i = 0; i < num_vertices; ++i) {
+ std::array<float, 3> val;
+ val[0] = x_reader.ReadValue(i);
+ val[1] = y_reader.ReadValue(i);
+ val[2] = z_reader.ReadValue(i);
+ out_point_cloud_->attribute(att_id)->SetAttributeValue(
+ AttributeValueIndex(i), &val[0]);
+ }
+ }
+ }
+
+ // Decode color data if present.
+ int num_colors = 0;
+ const PlyProperty *const r_prop = vertex_element->GetPropertyByName("red");
+ const PlyProperty *const g_prop = vertex_element->GetPropertyByName("green");
+ const PlyProperty *const b_prop = vertex_element->GetPropertyByName("blue");
+ const PlyProperty *const a_prop = vertex_element->GetPropertyByName("alpha");
+ if (r_prop) {
+ ++num_colors;
+ }
+ if (g_prop) {
+ ++num_colors;
+ }
+ if (b_prop) {
+ ++num_colors;
+ }
+ if (a_prop) {
+ ++num_colors;
+ }
+
+ if (num_colors) {
+ std::vector<std::unique_ptr<PlyPropertyReader<uint8_t>>> color_readers;
+ const PlyProperty *p;
+ if (r_prop) {
+ p = r_prop;
+ // TODO(ostava): For now ensure the data type of all components is uint8.
+ DRACO_DCHECK_EQ(true, p->data_type() == DT_UINT8);
+ if (p->data_type() != DT_UINT8) {
+ return Status(Status::INVALID_PARAMETER,
+ "Type of 'red' property must be uint8");
+ }
+ color_readers.push_back(std::unique_ptr<PlyPropertyReader<uint8_t>>(
+ new PlyPropertyReader<uint8_t>(p)));
+ }
+ if (g_prop) {
+ p = g_prop;
+ // TODO(ostava): For now ensure the data type of all components is uint8.
+ DRACO_DCHECK_EQ(true, p->data_type() == DT_UINT8);
+ if (p->data_type() != DT_UINT8) {
+ return Status(Status::INVALID_PARAMETER,
+ "Type of 'green' property must be uint8");
+ }
+ color_readers.push_back(std::unique_ptr<PlyPropertyReader<uint8_t>>(
+ new PlyPropertyReader<uint8_t>(p)));
+ }
+ if (b_prop) {
+ p = b_prop;
+ // TODO(ostava): For now ensure the data type of all components is uint8.
+ DRACO_DCHECK_EQ(true, p->data_type() == DT_UINT8);
+ if (p->data_type() != DT_UINT8) {
+ return Status(Status::INVALID_PARAMETER,
+ "Type of 'blue' property must be uint8");
+ }
+ color_readers.push_back(std::unique_ptr<PlyPropertyReader<uint8_t>>(
+ new PlyPropertyReader<uint8_t>(p)));
+ }
+ if (a_prop) {
+ p = a_prop;
+ // TODO(ostava): For now ensure the data type of all components is uint8.
+ DRACO_DCHECK_EQ(true, p->data_type() == DT_UINT8);
+ if (p->data_type() != DT_UINT8) {
+ return Status(Status::INVALID_PARAMETER,
+ "Type of 'alpha' property must be uint8");
+ }
+ color_readers.push_back(std::unique_ptr<PlyPropertyReader<uint8_t>>(
+ new PlyPropertyReader<uint8_t>(p)));
+ }
+
+ GeometryAttribute va;
+ va.Init(GeometryAttribute::COLOR, nullptr, num_colors, DT_UINT8, true,
+ sizeof(uint8_t) * num_colors, 0);
+ const int32_t att_id =
+ out_point_cloud_->AddAttribute(va, true, num_vertices);
+ for (PointIndex::ValueType i = 0; i < num_vertices; ++i) {
+ std::array<uint8_t, 4> val;
+ for (int j = 0; j < num_colors; j++) {
+ val[j] = color_readers[j]->ReadValue(i);
+ }
+ out_point_cloud_->attribute(att_id)->SetAttributeValue(
+ AttributeValueIndex(i), &val[0]);
+ }
+ }
+
+ return OkStatus();
+}
+
+} // namespace draco
diff --git a/libs/assimp/contrib/draco/src/draco/io/ply_decoder.h b/libs/assimp/contrib/draco/src/draco/io/ply_decoder.h
new file mode 100644
index 0000000..db1e480
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/io/ply_decoder.h
@@ -0,0 +1,69 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_IO_PLY_DECODER_H_
+#define DRACO_IO_PLY_DECODER_H_
+
+#include <string>
+
+#include "draco/core/decoder_buffer.h"
+#include "draco/core/status.h"
+#include "draco/draco_features.h"
+#include "draco/io/ply_reader.h"
+#include "draco/mesh/mesh.h"
+
+namespace draco {
+
+// Decodes a PLY file into draco::Mesh (or draco::PointCloud if the
+// connectivity data is not needed).
+// TODO(b/34330853): The current implementation assumes that the input vertices
+// are defined with x, y, z properties. The decoder also reads uint8 red, green,
+// blue, alpha color information, float32 defined as nx, ny, nz properties, but
+// all other attributes are ignored for now.
+class PlyDecoder {
+ public:
+ PlyDecoder();
+
+ // Decodes an obj file stored in the input file.
+ Status DecodeFromFile(const std::string &file_name, Mesh *out_mesh);
+ Status DecodeFromFile(const std::string &file_name,
+ PointCloud *out_point_cloud);
+
+ Status DecodeFromBuffer(DecoderBuffer *buffer, Mesh *out_mesh);
+ Status DecodeFromBuffer(DecoderBuffer *buffer, PointCloud *out_point_cloud);
+
+ protected:
+ Status DecodeInternal();
+ DecoderBuffer *buffer() { return &buffer_; }
+
+ private:
+ Status DecodeFaceData(const PlyElement *face_element);
+ Status DecodeVertexData(const PlyElement *vertex_element);
+
+ template <typename DataTypeT>
+ bool ReadPropertiesToAttribute(
+ const std::vector<const PlyProperty *> &properties,
+ PointAttribute *attribute, int num_vertices);
+
+ DecoderBuffer buffer_;
+
+ // Data structure that stores the decoded data. |out_point_cloud_| must be
+ // always set but |out_mesh_| is optional.
+ Mesh *out_mesh_;
+ PointCloud *out_point_cloud_;
+};
+
+} // namespace draco
+
+#endif // DRACO_IO_PLY_DECODER_H_
diff --git a/libs/assimp/contrib/draco/src/draco/io/ply_decoder_test.cc b/libs/assimp/contrib/draco/src/draco/io/ply_decoder_test.cc
new file mode 100644
index 0000000..97977c8
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/io/ply_decoder_test.cc
@@ -0,0 +1,93 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/io/ply_decoder.h"
+
+#include "draco/core/draco_test_base.h"
+#include "draco/core/draco_test_utils.h"
+
+namespace draco {
+
+class PlyDecoderTest : public ::testing::Test {
+ protected:
+ template <class Geometry>
+ std::unique_ptr<Geometry> DecodePly(const std::string &file_name) const {
+ const std::string path = GetTestFileFullPath(file_name);
+ PlyDecoder decoder;
+ std::unique_ptr<Geometry> geometry(new Geometry());
+ Status status = decoder.DecodeFromFile(path, geometry.get());
+ if (!status.ok()) {
+ LOG(ERROR) << "Failed to decode " << file_name << ": " << status;
+ return nullptr;
+ }
+ return geometry;
+ }
+
+ void test_decoding(const std::string &file_name, int num_faces,
+ uint32_t num_points, std::unique_ptr<Mesh> *out_mesh) {
+ // Don't test mesh decoding when the input is point cloud.
+ if (num_faces > 0) {
+ std::unique_ptr<Mesh> mesh(DecodePly<Mesh>(file_name));
+ ASSERT_NE(mesh, nullptr) << "Failed to load test model " << file_name;
+ ASSERT_EQ(mesh->num_faces(), num_faces);
+ if (out_mesh) {
+ *out_mesh = std::move(mesh);
+ }
+ }
+
+ const std::unique_ptr<PointCloud> pc(DecodePly<PointCloud>(file_name));
+ ASSERT_NE(pc, nullptr) << "Failed to load test model " << file_name;
+ ASSERT_EQ(pc->num_points(), num_points);
+ }
+ void test_decoding(const std::string &file_name) {
+ const std::unique_ptr<Mesh> mesh(DecodePly<Mesh>(file_name));
+ ASSERT_NE(mesh, nullptr) << "Failed to load test model " << file_name;
+ ASSERT_GT(mesh->num_faces(), 0);
+
+ const std::unique_ptr<PointCloud> pc(DecodePly<PointCloud>(file_name));
+ ASSERT_NE(pc, nullptr) << "Failed to load test model " << file_name;
+ ASSERT_GT(pc->num_points(), 0);
+ }
+};
+
+TEST_F(PlyDecoderTest, TestPlyDecoding) {
+ const std::string file_name = "test_pos_color.ply";
+ test_decoding(file_name, 224, 114, nullptr);
+}
+
+TEST_F(PlyDecoderTest, TestPlyNormals) {
+ const std::string file_name = "cube_att.ply";
+ std::unique_ptr<Mesh> mesh;
+ test_decoding(file_name, 12, 3 * 8, &mesh);
+ ASSERT_NE(mesh, nullptr);
+ const int att_id = mesh->GetNamedAttributeId(GeometryAttribute::NORMAL);
+ ASSERT_GE(att_id, 0);
+ const PointAttribute *const att = mesh->attribute(att_id);
+ ASSERT_EQ(att->size(), 6); // 6 unique normal values.
+}
+
+TEST_F(PlyDecoderTest, TestPlyDecodingAll) {
+ // test if we can read all ply that are currently in test folder.
+ test_decoding("bun_zipper.ply");
+ // test_decoding("cube_att.ply"); // tested
+ test_decoding("test_extra_whitespace.ply");
+ test_decoding("test_more_datatypes.ply");
+ test_decoding("test_pos_color_ascii.ply");
+ test_decoding("int_point_cloud.ply", 0, 16, nullptr);
+ // test_decoding("test_pos_color.ply"); // tested
+ test_decoding("cube_quads.ply");
+ test_decoding("Box.ply");
+}
+
+} // namespace draco
diff --git a/libs/assimp/contrib/draco/src/draco/io/ply_encoder.cc b/libs/assimp/contrib/draco/src/draco/io/ply_encoder.cc
new file mode 100644
index 0000000..2f6a1a2
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/io/ply_encoder.cc
@@ -0,0 +1,211 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/io/ply_encoder.h"
+
+#include <memory>
+#include <sstream>
+
+#include "draco/io/file_writer_factory.h"
+#include "draco/io/file_writer_interface.h"
+
+namespace draco {
+
+PlyEncoder::PlyEncoder()
+ : out_buffer_(nullptr), in_point_cloud_(nullptr), in_mesh_(nullptr) {}
+
+bool PlyEncoder::EncodeToFile(const PointCloud &pc,
+ const std::string &file_name) {
+ std::unique_ptr<FileWriterInterface> file =
+ FileWriterFactory::OpenWriter(file_name);
+ if (!file) {
+ return false; // File couldn't be opened.
+ }
+ // Encode the mesh into a buffer.
+ EncoderBuffer buffer;
+ if (!EncodeToBuffer(pc, &buffer)) {
+ return false;
+ }
+ // Write the buffer into the file.
+ file->Write(buffer.data(), buffer.size());
+ return true;
+}
+
+bool PlyEncoder::EncodeToFile(const Mesh &mesh, const std::string &file_name) {
+ in_mesh_ = &mesh;
+ return EncodeToFile(static_cast<const PointCloud &>(mesh), file_name);
+}
+
+bool PlyEncoder::EncodeToBuffer(const PointCloud &pc,
+ EncoderBuffer *out_buffer) {
+ in_point_cloud_ = &pc;
+ out_buffer_ = out_buffer;
+ if (!EncodeInternal()) {
+ return ExitAndCleanup(false);
+ }
+ return ExitAndCleanup(true);
+}
+
+bool PlyEncoder::EncodeToBuffer(const Mesh &mesh, EncoderBuffer *out_buffer) {
+ in_mesh_ = &mesh;
+ return EncodeToBuffer(static_cast<const PointCloud &>(mesh), out_buffer);
+}
+bool PlyEncoder::EncodeInternal() {
+ // Write PLY header.
+ // TODO(ostava): Currently works only for xyz positions and rgb(a) colors.
+ std::stringstream out;
+ out << "ply" << std::endl;
+ out << "format binary_little_endian 1.0" << std::endl;
+ out << "element vertex " << in_point_cloud_->num_points() << std::endl;
+
+ const int pos_att_id =
+ in_point_cloud_->GetNamedAttributeId(GeometryAttribute::POSITION);
+ int normal_att_id =
+ in_point_cloud_->GetNamedAttributeId(GeometryAttribute::NORMAL);
+ int tex_coord_att_id =
+ in_point_cloud_->GetNamedAttributeId(GeometryAttribute::TEX_COORD);
+ const int color_att_id =
+ in_point_cloud_->GetNamedAttributeId(GeometryAttribute::COLOR);
+
+ if (pos_att_id < 0) {
+ return false;
+ }
+
+ // Ensure normals are 3 component. Don't encode them otherwise.
+ if (normal_att_id >= 0 &&
+ in_point_cloud_->attribute(normal_att_id)->num_components() != 3) {
+ normal_att_id = -1;
+ }
+
+ // Ensure texture coordinates have only 2 components. Don't encode them
+ // otherwise. TODO(ostava): Add support for 3 component normals (uvw).
+ if (tex_coord_att_id >= 0 &&
+ in_point_cloud_->attribute(tex_coord_att_id)->num_components() != 2) {
+ tex_coord_att_id = -1;
+ }
+
+ out << "property " << GetAttributeDataType(pos_att_id) << " x" << std::endl;
+ out << "property " << GetAttributeDataType(pos_att_id) << " y" << std::endl;
+ out << "property " << GetAttributeDataType(pos_att_id) << " z" << std::endl;
+ if (normal_att_id >= 0) {
+ out << "property " << GetAttributeDataType(normal_att_id) << " nx"
+ << std::endl;
+ out << "property " << GetAttributeDataType(normal_att_id) << " ny"
+ << std::endl;
+ out << "property " << GetAttributeDataType(normal_att_id) << " nz"
+ << std::endl;
+ }
+ if (color_att_id >= 0) {
+ const auto *const attribute = in_point_cloud_->attribute(color_att_id);
+ if (attribute->num_components() > 0) {
+ out << "property " << GetAttributeDataType(color_att_id) << " red"
+ << std::endl;
+ }
+ if (attribute->num_components() > 1) {
+ out << "property " << GetAttributeDataType(color_att_id) << " green"
+ << std::endl;
+ }
+ if (attribute->num_components() > 2) {
+ out << "property " << GetAttributeDataType(color_att_id) << " blue"
+ << std::endl;
+ }
+ if (attribute->num_components() > 3) {
+ out << "property " << GetAttributeDataType(color_att_id) << " alpha"
+ << std::endl;
+ }
+ }
+ if (in_mesh_) {
+ out << "element face " << in_mesh_->num_faces() << std::endl;
+ out << "property list uchar int vertex_indices" << std::endl;
+ if (tex_coord_att_id >= 0) {
+ // Texture coordinates are usually encoded in the property list (one value
+ // per corner).
+ out << "property list uchar " << GetAttributeDataType(tex_coord_att_id)
+ << " texcoord" << std::endl;
+ }
+ }
+ out << "end_header" << std::endl;
+
+ // Not very efficient but the header should be small so just copy the stream
+ // to a string.
+ const std::string header_str = out.str();
+ buffer()->Encode(header_str.data(), header_str.length());
+
+ // Store point attributes.
+ for (PointIndex v(0); v < in_point_cloud_->num_points(); ++v) {
+ const auto *const pos_att = in_point_cloud_->attribute(pos_att_id);
+ buffer()->Encode(pos_att->GetAddress(pos_att->mapped_index(v)),
+ pos_att->byte_stride());
+ if (normal_att_id >= 0) {
+ const auto *const normal_att = in_point_cloud_->attribute(normal_att_id);
+ buffer()->Encode(normal_att->GetAddress(normal_att->mapped_index(v)),
+ normal_att->byte_stride());
+ }
+ if (color_att_id >= 0) {
+ const auto *const color_att = in_point_cloud_->attribute(color_att_id);
+ buffer()->Encode(color_att->GetAddress(color_att->mapped_index(v)),
+ color_att->byte_stride());
+ }
+ }
+
+ if (in_mesh_) {
+ // Write face data.
+ for (FaceIndex i(0); i < in_mesh_->num_faces(); ++i) {
+ // Write the number of face indices (always 3).
+ buffer()->Encode(static_cast<uint8_t>(3));
+
+ const auto &f = in_mesh_->face(i);
+ buffer()->Encode(f[0]);
+ buffer()->Encode(f[1]);
+ buffer()->Encode(f[2]);
+
+ if (tex_coord_att_id >= 0) {
+ // Two coordinates for every corner -> 6.
+ buffer()->Encode(static_cast<uint8_t>(6));
+
+ const auto *const tex_att =
+ in_point_cloud_->attribute(tex_coord_att_id);
+ for (int c = 0; c < 3; ++c) {
+ buffer()->Encode(tex_att->GetAddress(tex_att->mapped_index(f[c])),
+ tex_att->byte_stride());
+ }
+ }
+ }
+ }
+ return true;
+}
+
+bool PlyEncoder::ExitAndCleanup(bool return_value) {
+ in_mesh_ = nullptr;
+ in_point_cloud_ = nullptr;
+ out_buffer_ = nullptr;
+ return return_value;
+}
+
+const char *PlyEncoder::GetAttributeDataType(int attribute) {
+ // TODO(ostava): Add support for more types.
+ switch (in_point_cloud_->attribute(attribute)->data_type()) {
+ case DT_FLOAT32:
+ return "float";
+ case DT_UINT8:
+ return "uchar";
+ case DT_INT32:
+ return "int";
+ default:
+ break;
+ }
+ return nullptr;
+}
+
+} // namespace draco
diff --git a/libs/assimp/contrib/draco/src/draco/io/ply_encoder.h b/libs/assimp/contrib/draco/src/draco/io/ply_encoder.h
new file mode 100644
index 0000000..242bbd6
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/io/ply_encoder.h
@@ -0,0 +1,54 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_IO_PLY_ENCODER_H_
+#define DRACO_IO_PLY_ENCODER_H_
+
+#include "draco/core/encoder_buffer.h"
+#include "draco/mesh/mesh.h"
+
+namespace draco {
+
+// Class for encoding draco::Mesh or draco::PointCloud into the PLY file format.
+class PlyEncoder {
+ public:
+ PlyEncoder();
+
+ // Encodes the mesh or a point cloud and saves it into a file.
+ // Returns false when either the encoding failed or when the file couldn't be
+ // opened.
+ bool EncodeToFile(const PointCloud &pc, const std::string &file_name);
+ bool EncodeToFile(const Mesh &mesh, const std::string &file_name);
+
+ // Encodes the mesh or the point cloud into a buffer.
+ bool EncodeToBuffer(const PointCloud &pc, EncoderBuffer *out_buffer);
+ bool EncodeToBuffer(const Mesh &mesh, EncoderBuffer *out_buffer);
+
+ protected:
+ bool EncodeInternal();
+ EncoderBuffer *buffer() const { return out_buffer_; }
+ bool ExitAndCleanup(bool return_value);
+
+ private:
+ const char *GetAttributeDataType(int attribute);
+
+ EncoderBuffer *out_buffer_;
+
+ const PointCloud *in_point_cloud_;
+ const Mesh *in_mesh_;
+};
+
+} // namespace draco
+
+#endif // DRACO_IO_PLY_ENCODER_H_
diff --git a/libs/assimp/contrib/draco/src/draco/io/ply_property_reader.h b/libs/assimp/contrib/draco/src/draco/io/ply_property_reader.h
new file mode 100644
index 0000000..efb8a3a
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/io/ply_property_reader.h
@@ -0,0 +1,96 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_IO_PLY_PROPERTY_READER_H_
+#define DRACO_IO_PLY_PROPERTY_READER_H_
+
+#include <functional>
+
+#include "draco/io/ply_reader.h"
+
+namespace draco {
+
+// Class for reading PlyProperty with a given type, performing data conversion
+// if necessary.
+template <typename ReadTypeT>
+class PlyPropertyReader {
+ public:
+ explicit PlyPropertyReader(const PlyProperty *property)
+ : property_(property) {
+ // Find the suitable function for converting values.
+ switch (property->data_type()) {
+ case DT_UINT8:
+ convert_value_func_ = [=](int val_id) {
+ return this->ConvertValue<uint8_t>(val_id);
+ };
+ break;
+ case DT_INT8:
+ convert_value_func_ = [=](int val_id) {
+ return this->ConvertValue<int8_t>(val_id);
+ };
+ break;
+ case DT_UINT16:
+ convert_value_func_ = [=](int val_id) {
+ return this->ConvertValue<uint16_t>(val_id);
+ };
+ break;
+ case DT_INT16:
+ convert_value_func_ = [=](int val_id) {
+ return this->ConvertValue<int16_t>(val_id);
+ };
+ break;
+ case DT_UINT32:
+ convert_value_func_ = [=](int val_id) {
+ return this->ConvertValue<uint32_t>(val_id);
+ };
+ break;
+ case DT_INT32:
+ convert_value_func_ = [=](int val_id) {
+ return this->ConvertValue<int32_t>(val_id);
+ };
+ break;
+ case DT_FLOAT32:
+ convert_value_func_ = [=](int val_id) {
+ return this->ConvertValue<float>(val_id);
+ };
+ break;
+ case DT_FLOAT64:
+ convert_value_func_ = [=](int val_id) {
+ return this->ConvertValue<double>(val_id);
+ };
+ break;
+ default:
+ break;
+ }
+ }
+
+ ReadTypeT ReadValue(int value_id) const {
+ return convert_value_func_(value_id);
+ }
+
+ private:
+ template <typename SourceTypeT>
+ ReadTypeT ConvertValue(int value_id) const {
+ const void *const address = property_->GetDataEntryAddress(value_id);
+ const SourceTypeT src_val = *reinterpret_cast<const SourceTypeT *>(address);
+ return static_cast<ReadTypeT>(src_val);
+ }
+
+ const PlyProperty *property_;
+ std::function<ReadTypeT(int)> convert_value_func_;
+};
+
+} // namespace draco
+
+#endif // DRACO_IO_PLY_PROPERTY_READER_H_
diff --git a/libs/assimp/contrib/draco/src/draco/io/ply_property_writer.h b/libs/assimp/contrib/draco/src/draco/io/ply_property_writer.h
new file mode 100644
index 0000000..4f243b2
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/io/ply_property_writer.h
@@ -0,0 +1,94 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_IO_PLY_PROPERTY_WRITER_H_
+#define DRACO_IO_PLY_PROPERTY_WRITER_H_
+
+#include <functional>
+
+#include "draco/io/ply_reader.h"
+
+namespace draco {
+
+// Class for writing PlyProperty with a given type, performing data conversion
+// if necessary.
+template <typename WriteTypeT>
+class PlyPropertyWriter {
+ public:
+ explicit PlyPropertyWriter(PlyProperty *property) : property_(property) {
+ // Find the suitable function for converting values.
+ switch (property->data_type()) {
+ case DT_UINT8:
+ convert_value_func_ = [=](WriteTypeT val) {
+ return this->ConvertValue<uint8_t>(val);
+ };
+ break;
+ case DT_INT8:
+ convert_value_func_ = [=](WriteTypeT val) {
+ return this->ConvertValue<int8_t>(val);
+ };
+ break;
+ case DT_UINT16:
+ convert_value_func_ = [=](WriteTypeT val) {
+ return this->ConvertValue<uint16_t>(val);
+ };
+ break;
+ case DT_INT16:
+ convert_value_func_ = [=](WriteTypeT val) {
+ return this->ConvertValue<int16_t>(val);
+ };
+ break;
+ case DT_UINT32:
+ convert_value_func_ = [=](WriteTypeT val) {
+ return this->ConvertValue<uint32_t>(val);
+ };
+ break;
+ case DT_INT32:
+ convert_value_func_ = [=](WriteTypeT val) {
+ return this->ConvertValue<int32_t>(val);
+ };
+ break;
+ case DT_FLOAT32:
+ convert_value_func_ = [=](WriteTypeT val) {
+ return this->ConvertValue<float>(val);
+ };
+ break;
+ case DT_FLOAT64:
+ convert_value_func_ = [=](WriteTypeT val) {
+ return this->ConvertValue<double>(val);
+ };
+ break;
+ default:
+ break;
+ }
+ }
+
+ void PushBackValue(WriteTypeT value) const {
+ return convert_value_func_(value);
+ }
+
+ private:
+ template <typename SourceTypeT>
+ void ConvertValue(WriteTypeT value) const {
+ const SourceTypeT src_val = static_cast<SourceTypeT>(value);
+ property_->push_back_value(&src_val);
+ }
+
+ PlyProperty *property_;
+ std::function<void(WriteTypeT)> convert_value_func_;
+};
+
+} // namespace draco
+
+#endif // DRACO_IO_PLY_PROPERTY_WRITER_H_
diff --git a/libs/assimp/contrib/draco/src/draco/io/ply_reader.cc b/libs/assimp/contrib/draco/src/draco/io/ply_reader.cc
new file mode 100644
index 0000000..ea7f268
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/io/ply_reader.cc
@@ -0,0 +1,312 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/io/ply_reader.h"
+
+#include <array>
+#include <regex>
+
+#include "draco/core/status.h"
+#include "draco/io/parser_utils.h"
+#include "draco/io/ply_property_writer.h"
+
+namespace draco {
+
+PlyProperty::PlyProperty(const std::string &name, DataType data_type,
+ DataType list_type)
+ : name_(name), data_type_(data_type), list_data_type_(list_type) {
+ data_type_num_bytes_ = DataTypeLength(data_type);
+ list_data_type_num_bytes_ = DataTypeLength(list_type);
+}
+
+PlyElement::PlyElement(const std::string &name, int64_t num_entries)
+ : name_(name), num_entries_(num_entries) {}
+
+PlyReader::PlyReader() : format_(kLittleEndian) {}
+
+Status PlyReader::Read(DecoderBuffer *buffer) {
+ std::string value;
+ // The first line needs to by "ply".
+ if (!parser::ParseString(buffer, &value) || value != "ply") {
+ return Status(Status::INVALID_PARAMETER, "Not a valid ply file");
+ }
+ parser::SkipLine(buffer);
+
+ // The second line needs to be the format of the ply file.
+ parser::ParseLine(buffer, &value);
+ std::string format, version;
+ const std::vector<std::string> words = SplitWords(value);
+ if (words.size() >= 3 && words[0] == "format") {
+ format = words[1];
+ version = words[2];
+ } else {
+ return Status(Status::INVALID_PARAMETER, "Missing or wrong format line");
+ }
+ if (version != "1.0") {
+ return Status(Status::UNSUPPORTED_VERSION, "Unsupported PLY version");
+ }
+ if (format == "binary_big_endian") {
+ return Status(Status::UNSUPPORTED_VERSION,
+ "Unsupported format. Currently we support only ascii and"
+ " binary_little_endian format.");
+ }
+ if (format == "ascii") {
+ format_ = kAscii;
+ } else {
+ format_ = kLittleEndian;
+ }
+ DRACO_RETURN_IF_ERROR(ParseHeader(buffer));
+ if (!ParsePropertiesData(buffer)) {
+ return Status(Status::INVALID_PARAMETER, "Couldn't parse properties");
+ }
+ return OkStatus();
+}
+
+Status PlyReader::ParseHeader(DecoderBuffer *buffer) {
+ while (true) {
+ DRACO_ASSIGN_OR_RETURN(bool end, ParseEndHeader(buffer));
+ if (end) {
+ break;
+ }
+ if (ParseElement(buffer)) {
+ continue;
+ }
+ DRACO_ASSIGN_OR_RETURN(bool property_parsed, ParseProperty(buffer));
+ if (property_parsed) {
+ continue;
+ }
+ parser::SkipLine(buffer);
+ }
+ return OkStatus();
+}
+
+StatusOr<bool> PlyReader::ParseEndHeader(DecoderBuffer *buffer) {
+ parser::SkipWhitespace(buffer);
+ std::array<char, 10> c;
+ if (!buffer->Peek(&c)) {
+ return Status(Status::INVALID_PARAMETER,
+ "End of file reached before the end_header");
+ }
+ if (std::memcmp(&c[0], "end_header", 10) != 0) {
+ return false;
+ }
+ parser::SkipLine(buffer);
+ return true;
+}
+
+bool PlyReader::ParseElement(DecoderBuffer *buffer) {
+ DecoderBuffer line_buffer(*buffer);
+ std::string line;
+ parser::ParseLine(&line_buffer, &line);
+
+ std::string element_name;
+ int64_t count;
+ const std::vector<std::string> words = SplitWords(line);
+ if (words.size() >= 3 && words[0] == "element") {
+ element_name = words[1];
+ const std::string count_str = words[2];
+ count = strtoll(count_str.c_str(), nullptr, 10);
+ } else {
+ return false;
+ }
+ element_index_[element_name] = static_cast<uint32_t>(elements_.size());
+ elements_.emplace_back(PlyElement(element_name, count));
+ *buffer = line_buffer;
+ return true;
+}
+
+StatusOr<bool> PlyReader::ParseProperty(DecoderBuffer *buffer) {
+ if (elements_.empty()) {
+ return false; // Ignore properties if there is no active element.
+ }
+ DecoderBuffer line_buffer(*buffer);
+ std::string line;
+ parser::ParseLine(&line_buffer, &line);
+
+ std::string data_type_str, list_type_str, property_name;
+ bool property_search = false;
+ const std::vector<std::string> words = SplitWords(line);
+ if (words.size() >= 3 && words[0] == "property" && words[1] != "list") {
+ property_search = true;
+ data_type_str = words[1];
+ property_name = words[2];
+ }
+
+ bool property_list_search = false;
+ if (words.size() >= 5 && words[0] == "property" && words[1] == "list") {
+ property_list_search = true;
+ list_type_str = words[2];
+ data_type_str = words[3];
+ property_name = words[4];
+ }
+ if (!property_search && !property_list_search) {
+ return false;
+ }
+ const DataType data_type = GetDataTypeFromString(data_type_str);
+ if (data_type == DT_INVALID) {
+ return Status(Status::INVALID_PARAMETER, "Wrong property data type");
+ }
+ DataType list_type = DT_INVALID;
+ if (property_list_search) {
+ list_type = GetDataTypeFromString(list_type_str);
+ if (list_type == DT_INVALID) {
+ return Status(Status::INVALID_PARAMETER, "Wrong property list type");
+ }
+ }
+ elements_.back().AddProperty(
+ PlyProperty(property_name, data_type, list_type));
+ *buffer = line_buffer;
+ return true;
+}
+
+bool PlyReader::ParsePropertiesData(DecoderBuffer *buffer) {
+ for (int i = 0; i < static_cast<int>(elements_.size()); ++i) {
+ if (format_ == kLittleEndian) {
+ if (!ParseElementData(buffer, i)) {
+ return false;
+ }
+ } else if (format_ == kAscii) {
+ if (!ParseElementDataAscii(buffer, i)) {
+ return false;
+ }
+ }
+ }
+ return true;
+}
+
+bool PlyReader::ParseElementData(DecoderBuffer *buffer, int element_index) {
+ PlyElement &element = elements_[element_index];
+ for (int entry = 0; entry < element.num_entries(); ++entry) {
+ for (int i = 0; i < element.num_properties(); ++i) {
+ PlyProperty &prop = element.property(i);
+ if (prop.is_list()) {
+ // Parse the number of entries for the list element.
+ int64_t num_entries = 0;
+ buffer->Decode(&num_entries, prop.list_data_type_num_bytes());
+ // Store offset to the main data entry.
+ prop.list_data_.push_back(prop.data_.size() /
+ prop.data_type_num_bytes_);
+ // Store the number of entries.
+ prop.list_data_.push_back(num_entries);
+ // Read and store the actual property data
+ const int64_t num_bytes_to_read =
+ prop.data_type_num_bytes() * num_entries;
+ prop.data_.insert(prop.data_.end(), buffer->data_head(),
+ buffer->data_head() + num_bytes_to_read);
+ buffer->Advance(num_bytes_to_read);
+ } else {
+ // Non-list property
+ prop.data_.insert(prop.data_.end(), buffer->data_head(),
+ buffer->data_head() + prop.data_type_num_bytes());
+ buffer->Advance(prop.data_type_num_bytes());
+ }
+ }
+ }
+ return true;
+}
+
+bool PlyReader::ParseElementDataAscii(DecoderBuffer *buffer,
+ int element_index) {
+ PlyElement &element = elements_[element_index];
+ for (int entry = 0; entry < element.num_entries(); ++entry) {
+ for (int i = 0; i < element.num_properties(); ++i) {
+ PlyProperty &prop = element.property(i);
+ PlyPropertyWriter<double> prop_writer(&prop);
+ int32_t num_entries = 1;
+ if (prop.is_list()) {
+ parser::SkipWhitespace(buffer);
+ // Parse the number of entries for the list element.
+ if (!parser::ParseSignedInt(buffer, &num_entries)) {
+ return false;
+ }
+
+ // Store offset to the main data entry.
+ prop.list_data_.push_back(prop.data_.size() /
+ prop.data_type_num_bytes_);
+ // Store the number of entries.
+ prop.list_data_.push_back(num_entries);
+ }
+ // Read and store the actual property data.
+ for (int v = 0; v < num_entries; ++v) {
+ parser::SkipWhitespace(buffer);
+ if (prop.data_type() == DT_FLOAT32 || prop.data_type() == DT_FLOAT64) {
+ float val;
+ if (!parser::ParseFloat(buffer, &val)) {
+ return false;
+ }
+ prop_writer.PushBackValue(val);
+ } else {
+ int32_t val;
+ if (!parser::ParseSignedInt(buffer, &val)) {
+ return false;
+ }
+ prop_writer.PushBackValue(val);
+ }
+ }
+ }
+ }
+ return true;
+}
+
+std::vector<std::string> PlyReader::SplitWords(const std::string &line) {
+ std::vector<std::string> output;
+ std::string::size_type start = 0;
+ std::string::size_type end = 0;
+
+ // Check for isspace chars.
+ while ((end = line.find_first_of(" \t\n\v\f\r", start)) !=
+ std::string::npos) {
+ const std::string word(line.substr(start, end - start));
+ if (!std::all_of(word.begin(), word.end(), isspace)) {
+ output.push_back(word);
+ }
+ start = end + 1;
+ }
+
+ const std::string last_word(line.substr(start));
+ if (!std::all_of(last_word.begin(), last_word.end(), isspace)) {
+ output.push_back(last_word);
+ }
+ return output;
+}
+
+DataType PlyReader::GetDataTypeFromString(const std::string &name) const {
+ if (name == "char" || name == "int8") {
+ return DT_INT8;
+ }
+ if (name == "uchar" || name == "uint8") {
+ return DT_UINT8;
+ }
+ if (name == "short" || name == "int16") {
+ return DT_INT16;
+ }
+ if (name == "ushort" || name == "uint16") {
+ return DT_UINT16;
+ }
+ if (name == "int" || name == "int32") {
+ return DT_INT32;
+ }
+ if (name == "uint" || name == "uint32") {
+ return DT_UINT32;
+ }
+ if (name == "float" || name == "float32") {
+ return DT_FLOAT32;
+ }
+ if (name == "double" || name == "float64") {
+ return DT_FLOAT64;
+ }
+ return DT_INVALID;
+}
+
+} // namespace draco
diff --git a/libs/assimp/contrib/draco/src/draco/io/ply_reader.h b/libs/assimp/contrib/draco/src/draco/io/ply_reader.h
new file mode 100644
index 0000000..e0f15a3
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/io/ply_reader.h
@@ -0,0 +1,155 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// File contains helper classes used for parsing of PLY files. The classes are
+// used by the PlyDecoder (ply_decoder.h) to read a point cloud or mesh from a
+// source PLY file.
+// TODO(ostava): Currently, we support only binary PLYs encoded in the little
+// endian format ("format binary_little_endian 1.0").
+
+#ifndef DRACO_IO_PLY_READER_H_
+#define DRACO_IO_PLY_READER_H_
+
+#include <map>
+#include <vector>
+
+#include "draco/core/decoder_buffer.h"
+#include "draco/core/draco_types.h"
+#include "draco/core/status.h"
+#include "draco/core/status_or.h"
+
+namespace draco {
+
+// A single PLY property of a given PLY element. For "vertex" element this can
+// contain data such as "x", "y", or "z" coordinate of the vertex, while for
+// "face" element this usually contains corner indices.
+class PlyProperty {
+ public:
+ friend class PlyReader;
+
+ PlyProperty(const std::string &name, DataType data_type, DataType list_type);
+ void ReserveData(int num_entries) {
+ data_.reserve(DataTypeLength(data_type_) * num_entries);
+ }
+
+ int64_t GetListEntryOffset(int entry_id) const {
+ return list_data_[entry_id * 2];
+ }
+ int64_t GetListEntryNumValues(int entry_id) const {
+ return list_data_[entry_id * 2 + 1];
+ }
+ const void *GetDataEntryAddress(int entry_id) const {
+ return data_.data() + entry_id * data_type_num_bytes_;
+ }
+ void push_back_value(const void *data) {
+ data_.insert(data_.end(), static_cast<const uint8_t *>(data),
+ static_cast<const uint8_t *>(data) + data_type_num_bytes_);
+ }
+
+ const std::string &name() const { return name_; }
+ bool is_list() const { return list_data_type_ != DT_INVALID; }
+ DataType data_type() const { return data_type_; }
+ int data_type_num_bytes() const { return data_type_num_bytes_; }
+ DataType list_data_type() const { return list_data_type_; }
+ int list_data_type_num_bytes() const { return list_data_type_num_bytes_; }
+
+ private:
+ std::string name_;
+ std::vector<uint8_t> data_;
+ // List data contain pairs of <offset, number_of_values>
+ std::vector<int64_t> list_data_;
+ DataType data_type_;
+ int data_type_num_bytes_;
+ DataType list_data_type_;
+ int list_data_type_num_bytes_;
+};
+
+// A single PLY element such as "vertex" or "face". Each element can store
+// arbitrary properties such as vertex coordinates or face indices.
+class PlyElement {
+ public:
+ PlyElement(const std::string &name, int64_t num_entries);
+ void AddProperty(const PlyProperty &prop) {
+ property_index_[prop.name()] = static_cast<int>(properties_.size());
+ properties_.emplace_back(prop);
+ if (!properties_.back().is_list()) {
+ properties_.back().ReserveData(static_cast<int>(num_entries_));
+ }
+ }
+
+ const PlyProperty *GetPropertyByName(const std::string &name) const {
+ const auto it = property_index_.find(name);
+ if (it != property_index_.end()) {
+ return &properties_[it->second];
+ }
+ return nullptr;
+ }
+
+ int num_properties() const { return static_cast<int>(properties_.size()); }
+ int num_entries() const { return static_cast<int>(num_entries_); }
+ const PlyProperty &property(int prop_index) const {
+ return properties_[prop_index];
+ }
+ PlyProperty &property(int prop_index) { return properties_[prop_index]; }
+
+ private:
+ std::string name_;
+ int64_t num_entries_;
+ std::vector<PlyProperty> properties_;
+ std::map<std::string, int> property_index_;
+};
+
+// Class responsible for parsing PLY data. It produces a list of PLY elements
+// and their properties that can be used to construct a mesh or a point cloud.
+class PlyReader {
+ public:
+ PlyReader();
+ Status Read(DecoderBuffer *buffer);
+
+ const PlyElement *GetElementByName(const std::string &name) const {
+ const auto it = element_index_.find(name);
+ if (it != element_index_.end()) {
+ return &elements_[it->second];
+ }
+ return nullptr;
+ }
+
+ int num_elements() const { return static_cast<int>(elements_.size()); }
+ const PlyElement &element(int element_index) const {
+ return elements_[element_index];
+ }
+
+ private:
+ enum Format { kLittleEndian = 0, kAscii };
+
+ Status ParseHeader(DecoderBuffer *buffer);
+ StatusOr<bool> ParseEndHeader(DecoderBuffer *buffer);
+ bool ParseElement(DecoderBuffer *buffer);
+ StatusOr<bool> ParseProperty(DecoderBuffer *buffer);
+ bool ParsePropertiesData(DecoderBuffer *buffer);
+ bool ParseElementData(DecoderBuffer *buffer, int element_index);
+ bool ParseElementDataAscii(DecoderBuffer *buffer, int element_index);
+
+ // Splits |line| by whitespace characters.
+ std::vector<std::string> SplitWords(const std::string &line);
+ DataType GetDataTypeFromString(const std::string &name) const;
+
+ std::vector<PlyElement> elements_;
+ std::map<std::string, int> element_index_;
+ Format format_;
+};
+
+} // namespace draco
+
+#endif // DRACO_IO_PLY_READER_H_
diff --git a/libs/assimp/contrib/draco/src/draco/io/ply_reader_test.cc b/libs/assimp/contrib/draco/src/draco/io/ply_reader_test.cc
new file mode 100644
index 0000000..05ff63d
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/io/ply_reader_test.cc
@@ -0,0 +1,143 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/io/ply_reader.h"
+
+#include "draco/core/draco_test_base.h"
+#include "draco/core/draco_test_utils.h"
+#include "draco/io/file_utils.h"
+#include "draco/io/ply_property_reader.h"
+
+namespace draco {
+
+class PlyReaderTest : public ::testing::Test {
+ protected:
+ std::vector<char> ReadPlyFile(const std::string &file_name) const {
+ const std::string path = GetTestFileFullPath(file_name);
+
+ std::vector<char> data;
+ EXPECT_TRUE(ReadFileToBuffer(path, &data));
+ return data;
+ }
+};
+
+TEST_F(PlyReaderTest, TestReader) {
+ const std::string file_name = "test_pos_color.ply";
+ const std::vector<char> data = ReadPlyFile(file_name);
+ DecoderBuffer buf;
+ buf.Init(data.data(), data.size());
+ PlyReader reader;
+ Status status = reader.Read(&buf);
+ ASSERT_TRUE(status.ok()) << status;
+ ASSERT_EQ(reader.num_elements(), 2);
+ ASSERT_EQ(reader.element(0).num_properties(), 7);
+ ASSERT_EQ(reader.element(1).num_properties(), 1);
+ ASSERT_TRUE(reader.element(1).property(0).is_list());
+
+ ASSERT_TRUE(reader.element(0).GetPropertyByName("red") != nullptr);
+ const PlyProperty *const prop = reader.element(0).GetPropertyByName("red");
+ PlyPropertyReader<uint8_t> reader_uint8(prop);
+ PlyPropertyReader<uint32_t> reader_uint32(prop);
+ PlyPropertyReader<float> reader_float(prop);
+ for (int i = 0; i < reader.element(0).num_entries(); ++i) {
+ ASSERT_EQ(reader_uint8.ReadValue(i), reader_uint32.ReadValue(i));
+ ASSERT_EQ(reader_uint8.ReadValue(i), reader_float.ReadValue(i));
+ }
+}
+
+TEST_F(PlyReaderTest, TestReaderAscii) {
+ const std::string file_name = "test_pos_color.ply";
+ const std::vector<char> data = ReadPlyFile(file_name);
+ ASSERT_NE(data.size(), 0u);
+ DecoderBuffer buf;
+ buf.Init(data.data(), data.size());
+ PlyReader reader;
+ Status status = reader.Read(&buf);
+ ASSERT_TRUE(status.ok()) << status;
+
+ const std::string file_name_ascii = "test_pos_color_ascii.ply";
+ const std::vector<char> data_ascii = ReadPlyFile(file_name_ascii);
+ buf.Init(data_ascii.data(), data_ascii.size());
+ PlyReader reader_ascii;
+ status = reader_ascii.Read(&buf);
+ ASSERT_TRUE(status.ok()) << status;
+ ASSERT_EQ(reader.num_elements(), reader_ascii.num_elements());
+ ASSERT_EQ(reader.element(0).num_properties(),
+ reader_ascii.element(0).num_properties());
+
+ ASSERT_TRUE(reader.element(0).GetPropertyByName("x") != nullptr);
+ const PlyProperty *const prop = reader.element(0).GetPropertyByName("x");
+ const PlyProperty *const prop_ascii =
+ reader_ascii.element(0).GetPropertyByName("x");
+ PlyPropertyReader<float> reader_float(prop);
+ PlyPropertyReader<float> reader_float_ascii(prop_ascii);
+ for (int i = 0; i < reader.element(0).num_entries(); ++i) {
+ ASSERT_NEAR(reader_float.ReadValue(i), reader_float_ascii.ReadValue(i),
+ 1e-4f);
+ }
+}
+
+TEST_F(PlyReaderTest, TestReaderExtraWhitespace) {
+ const std::string file_name = "test_extra_whitespace.ply";
+ const std::vector<char> data = ReadPlyFile(file_name);
+ ASSERT_NE(data.size(), 0u);
+ DecoderBuffer buf;
+ buf.Init(data.data(), data.size());
+ PlyReader reader;
+ Status status = reader.Read(&buf);
+ ASSERT_TRUE(status.ok()) << status;
+
+ ASSERT_EQ(reader.num_elements(), 2);
+ ASSERT_EQ(reader.element(0).num_properties(), 7);
+ ASSERT_EQ(reader.element(1).num_properties(), 1);
+ ASSERT_TRUE(reader.element(1).property(0).is_list());
+
+ ASSERT_TRUE(reader.element(0).GetPropertyByName("red") != nullptr);
+ const PlyProperty *const prop = reader.element(0).GetPropertyByName("red");
+ PlyPropertyReader<uint8_t> reader_uint8(prop);
+ PlyPropertyReader<uint32_t> reader_uint32(prop);
+ PlyPropertyReader<float> reader_float(prop);
+ for (int i = 0; i < reader.element(0).num_entries(); ++i) {
+ ASSERT_EQ(reader_uint8.ReadValue(i), reader_uint32.ReadValue(i));
+ ASSERT_EQ(reader_uint8.ReadValue(i), reader_float.ReadValue(i));
+ }
+}
+
+TEST_F(PlyReaderTest, TestReaderMoreDataTypes) {
+ const std::string file_name = "test_more_datatypes.ply";
+ const std::vector<char> data = ReadPlyFile(file_name);
+ ASSERT_NE(data.size(), 0u);
+ DecoderBuffer buf;
+ buf.Init(data.data(), data.size());
+ PlyReader reader;
+ Status status = reader.Read(&buf);
+ ASSERT_TRUE(status.ok()) << status;
+
+ ASSERT_EQ(reader.num_elements(), 2);
+ ASSERT_EQ(reader.element(0).num_properties(), 7);
+ ASSERT_EQ(reader.element(1).num_properties(), 1);
+ ASSERT_TRUE(reader.element(1).property(0).is_list());
+
+ ASSERT_TRUE(reader.element(0).GetPropertyByName("red") != nullptr);
+ const PlyProperty *const prop = reader.element(0).GetPropertyByName("red");
+ PlyPropertyReader<uint8_t> reader_uint8(prop);
+ PlyPropertyReader<uint32_t> reader_uint32(prop);
+ PlyPropertyReader<float> reader_float(prop);
+ for (int i = 0; i < reader.element(0).num_entries(); ++i) {
+ ASSERT_EQ(reader_uint8.ReadValue(i), reader_uint32.ReadValue(i));
+ ASSERT_EQ(reader_uint8.ReadValue(i), reader_float.ReadValue(i));
+ }
+}
+
+} // namespace draco
diff --git a/libs/assimp/contrib/draco/src/draco/io/point_cloud_io.cc b/libs/assimp/contrib/draco/src/draco/io/point_cloud_io.cc
new file mode 100644
index 0000000..643820b
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/io/point_cloud_io.cc
@@ -0,0 +1,58 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/io/point_cloud_io.h"
+
+#include "draco/io/file_utils.h"
+#include "draco/io/obj_decoder.h"
+#include "draco/io/parser_utils.h"
+#include "draco/io/ply_decoder.h"
+
+namespace draco {
+
+StatusOr<std::unique_ptr<PointCloud>> ReadPointCloudFromFile(
+ const std::string &file_name) {
+ std::unique_ptr<PointCloud> pc(new PointCloud());
+ // Analyze file extension.
+ const std::string extension = parser::ToLower(
+ file_name.size() >= 4 ? file_name.substr(file_name.size() - 4)
+ : file_name);
+ if (extension == ".obj") {
+ // Wavefront OBJ file format.
+ ObjDecoder obj_decoder;
+ const Status obj_status = obj_decoder.DecodeFromFile(file_name, pc.get());
+ if (!obj_status.ok()) {
+ return obj_status;
+ }
+ return std::move(pc);
+ }
+ if (extension == ".ply") {
+ // Wavefront PLY file format.
+ PlyDecoder ply_decoder;
+ DRACO_RETURN_IF_ERROR(ply_decoder.DecodeFromFile(file_name, pc.get()));
+ return std::move(pc);
+ }
+
+ std::vector<char> buffer;
+ if (!ReadFileToBuffer(file_name, &buffer)) {
+ return Status(Status::DRACO_ERROR, "Unable to read input file.");
+ }
+ DecoderBuffer decoder_buffer;
+ decoder_buffer.Init(buffer.data(), buffer.size());
+ Decoder decoder;
+ auto status_or = decoder.DecodePointCloudFromBuffer(&decoder_buffer);
+ return std::move(status_or).value();
+}
+
+} // namespace draco
diff --git a/libs/assimp/contrib/draco/src/draco/io/point_cloud_io.h b/libs/assimp/contrib/draco/src/draco/io/point_cloud_io.h
new file mode 100644
index 0000000..4e1eb35
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/io/point_cloud_io.h
@@ -0,0 +1,89 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_IO_POINT_CLOUD_IO_H_
+#define DRACO_IO_POINT_CLOUD_IO_H_
+
+#include "draco/compression/config/compression_shared.h"
+#include "draco/compression/decode.h"
+#include "draco/compression/expert_encode.h"
+
+namespace draco {
+
+template <typename OutStreamT>
+OutStreamT WritePointCloudIntoStream(const PointCloud *pc, OutStreamT &&os,
+ PointCloudEncodingMethod method,
+ const EncoderOptions &options) {
+ EncoderBuffer buffer;
+ EncoderOptions local_options = options;
+ ExpertEncoder encoder(*pc);
+ encoder.Reset(local_options);
+ encoder.SetEncodingMethod(method);
+ if (!encoder.EncodeToBuffer(&buffer).ok()) {
+ os.setstate(std::ios_base::badbit);
+ return os;
+ }
+
+ os.write(static_cast<const char *>(buffer.data()), buffer.size());
+
+ return os;
+}
+
+template <typename OutStreamT>
+OutStreamT WritePointCloudIntoStream(const PointCloud *pc, OutStreamT &&os,
+ PointCloudEncodingMethod method) {
+ const EncoderOptions options = EncoderOptions::CreateDefaultOptions();
+ return WritePointCloudIntoStream(pc, os, method, options);
+}
+
+template <typename OutStreamT>
+OutStreamT &WritePointCloudIntoStream(const PointCloud *pc, OutStreamT &&os) {
+ return WritePointCloudIntoStream(pc, os, POINT_CLOUD_SEQUENTIAL_ENCODING);
+}
+
+template <typename InStreamT>
+InStreamT &ReadPointCloudFromStream(std::unique_ptr<PointCloud> *point_cloud,
+ InStreamT &&is) {
+ // Determine size of stream and write into a vector
+ const auto start_pos = is.tellg();
+ is.seekg(0, std::ios::end);
+ const std::streampos is_size = is.tellg() - start_pos;
+ is.seekg(start_pos);
+ std::vector<char> data(is_size);
+ is.read(&data[0], is_size);
+
+ // Create a point cloud from that data.
+ DecoderBuffer buffer;
+ buffer.Init(&data[0], data.size());
+ Decoder decoder;
+ auto statusor = decoder.DecodePointCloudFromBuffer(&buffer);
+ *point_cloud = std::move(statusor).value();
+ if (!statusor.ok() || *point_cloud == nullptr) {
+ is.setstate(std::ios_base::badbit);
+ }
+
+ return is;
+}
+
+// Reads a point cloud from a file. The function automatically chooses the
+// correct decoder based on the extension of the files. Currently, .obj and .ply
+// files are supported. Other file extensions are processed by the default
+// draco::PointCloudDecoder.
+// Returns nullptr with an error status if the decoding failed.
+StatusOr<std::unique_ptr<PointCloud>> ReadPointCloudFromFile(
+ const std::string &file_name);
+
+} // namespace draco
+
+#endif // DRACO_IO_POINT_CLOUD_IO_H_
diff --git a/libs/assimp/contrib/draco/src/draco/io/point_cloud_io_test.cc b/libs/assimp/contrib/draco/src/draco/io/point_cloud_io_test.cc
new file mode 100644
index 0000000..73674d0
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/io/point_cloud_io_test.cc
@@ -0,0 +1,115 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/io/point_cloud_io.h"
+
+#include <sstream>
+
+#include "draco/core/draco_test_base.h"
+#include "draco/core/draco_test_utils.h"
+#include "draco/io/obj_decoder.h"
+
+namespace draco {
+
+class IoPointCloudIoTest : public ::testing::Test {
+ protected:
+ void test_compression_method(PointCloudEncodingMethod method,
+ int expected_num_attributes,
+ const std::string &file_name) {
+ const std::unique_ptr<PointCloud> encoded_pc =
+ ReadPointCloudFromTestFile(file_name);
+ ASSERT_NE(encoded_pc, nullptr) << "Failed to load test model " << file_name;
+ ASSERT_GE(encoded_pc->num_attributes(), expected_num_attributes)
+ << "Failed to load test model: " << file_name
+ << " wrong number of attributes" << std::endl;
+
+ // Set quantization.
+ EncoderOptions options = EncoderOptions::CreateDefaultOptions();
+ for (int i = 0; i <= GeometryAttribute::NAMED_ATTRIBUTES_COUNT; i++) {
+ options.SetAttributeInt(GeometryAttribute::Type(i), "quantization_bits",
+ 14);
+ }
+
+ std::stringstream ss;
+ WritePointCloudIntoStream(encoded_pc.get(), ss, method, options);
+ ASSERT_TRUE(ss.good());
+
+ std::unique_ptr<PointCloud> decoded_pc;
+ ReadPointCloudFromStream(&decoded_pc, ss);
+ ASSERT_TRUE(ss.good());
+
+ for (int i = 0; i <= GeometryAttribute::NAMED_ATTRIBUTES_COUNT; i++) {
+ ASSERT_EQ(encoded_pc->NumNamedAttributes(GeometryAttribute::Type(i)),
+ decoded_pc->NumNamedAttributes(GeometryAttribute::Type(i)));
+ }
+
+ ASSERT_EQ(encoded_pc->num_points(), decoded_pc->num_points());
+ }
+};
+
+TEST_F(IoPointCloudIoTest, EncodeSequentialPointCloudTestNmObj) {
+ test_compression_method(POINT_CLOUD_SEQUENTIAL_ENCODING, 2, "test_nm.obj");
+}
+TEST_F(IoPointCloudIoTest, EncodeSequentialPointCloudTestPosObj) {
+ test_compression_method(POINT_CLOUD_SEQUENTIAL_ENCODING, 1,
+ "point_cloud_test_pos.obj");
+}
+TEST_F(IoPointCloudIoTest, EncodeSequentialPointCloudTestPosPly) {
+ test_compression_method(POINT_CLOUD_SEQUENTIAL_ENCODING, 1,
+ "point_cloud_test_pos.ply");
+}
+TEST_F(IoPointCloudIoTest, EncodeSequentialPointCloudTestPosNormObj) {
+ test_compression_method(POINT_CLOUD_SEQUENTIAL_ENCODING, 2,
+ "point_cloud_test_pos_norm.obj");
+}
+TEST_F(IoPointCloudIoTest, EncodeSequentialPointCloudTestPosNormPly) {
+ test_compression_method(POINT_CLOUD_SEQUENTIAL_ENCODING, 2,
+ "point_cloud_test_pos_norm.ply");
+}
+
+TEST_F(IoPointCloudIoTest, EncodeKdTreePointCloudTestPosObj) {
+ test_compression_method(POINT_CLOUD_KD_TREE_ENCODING, 1,
+ "point_cloud_test_pos.obj");
+}
+TEST_F(IoPointCloudIoTest, EncodeKdTreePointCloudTestPosPly) {
+ test_compression_method(POINT_CLOUD_KD_TREE_ENCODING, 1,
+ "point_cloud_test_pos.ply");
+}
+
+TEST_F(IoPointCloudIoTest, ObjFileInput) {
+ // Tests whether loading obj point clouds from files works as expected.
+ const std::unique_ptr<PointCloud> pc =
+ ReadPointCloudFromTestFile("test_nm.obj");
+ ASSERT_NE(pc, nullptr) << "Failed to load the obj point cloud.";
+ EXPECT_EQ(pc->num_points(), 97) << "Obj point cloud not loaded properly.";
+}
+
+// Test if we handle wrong input for all file extensions.
+TEST_F(IoPointCloudIoTest, WrongFileObj) {
+ const std::unique_ptr<PointCloud> pc =
+ ReadPointCloudFromTestFile("wrong_file_name.obj");
+ ASSERT_EQ(pc, nullptr);
+}
+TEST_F(IoPointCloudIoTest, WrongFilePly) {
+ const std::unique_ptr<PointCloud> pc =
+ ReadPointCloudFromTestFile("wrong_file_name.ply");
+ ASSERT_EQ(pc, nullptr);
+}
+TEST_F(IoPointCloudIoTest, WrongFile) {
+ const std::unique_ptr<PointCloud> pc =
+ ReadPointCloudFromTestFile("wrong_file_name");
+ ASSERT_EQ(pc, nullptr);
+}
+
+} // namespace draco
diff --git a/libs/assimp/contrib/draco/src/draco/io/stdio_file_reader.cc b/libs/assimp/contrib/draco/src/draco/io/stdio_file_reader.cc
new file mode 100644
index 0000000..a99c96f
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/io/stdio_file_reader.cc
@@ -0,0 +1,103 @@
+#include "draco/io/stdio_file_reader.h"
+
+#include <algorithm>
+#include <cstdint>
+#include <cstdio>
+#include <string>
+#include <vector>
+
+#if defined(_WIN32)
+#include <fcntl.h>
+#include <io.h>
+#endif
+
+#include "draco/io/file_reader_factory.h"
+
+namespace draco {
+
+#define FILEREADER_LOG_ERROR(error_string) \
+ do { \
+ fprintf(stderr, "%s:%d (%s): %s.\n", __FILE__, __LINE__, __func__, \
+ error_string); \
+ } while (false)
+
+bool StdioFileReader::registered_in_factory_ =
+ FileReaderFactory::RegisterReader(StdioFileReader::Open);
+
+StdioFileReader::~StdioFileReader() { fclose(file_); }
+
+std::unique_ptr<FileReaderInterface> StdioFileReader::Open(
+ const std::string &file_name) {
+ if (file_name.empty()) {
+ return nullptr;
+ }
+
+ FILE *raw_file_ptr = fopen(file_name.c_str(), "rb");
+
+ if (raw_file_ptr == nullptr) {
+ return nullptr;
+ }
+
+ std::unique_ptr<FileReaderInterface> file(new (std::nothrow)
+ StdioFileReader(raw_file_ptr));
+ if (file == nullptr) {
+ FILEREADER_LOG_ERROR("Out of memory");
+ fclose(raw_file_ptr);
+ return nullptr;
+ }
+
+ return file;
+}
+
+bool StdioFileReader::ReadFileToBuffer(std::vector<char> *buffer) {
+ if (buffer == nullptr) {
+ return false;
+ }
+ buffer->clear();
+
+ const size_t file_size = GetFileSize();
+ if (file_size == 0) {
+ FILEREADER_LOG_ERROR("Unable to obtain file size or file empty");
+ return false;
+ }
+
+ buffer->resize(file_size);
+ return fread(buffer->data(), 1, file_size, file_) == file_size;
+}
+
+bool StdioFileReader::ReadFileToBuffer(std::vector<uint8_t> *buffer) {
+ if (buffer == nullptr) {
+ return false;
+ }
+ buffer->clear();
+
+ const size_t file_size = GetFileSize();
+ if (file_size == 0) {
+ FILEREADER_LOG_ERROR("Unable to obtain file size or file empty");
+ return false;
+ }
+
+ buffer->resize(file_size);
+ return fread(buffer->data(), 1, file_size, file_) == file_size;
+}
+
+size_t StdioFileReader::GetFileSize() {
+ if (fseek(file_, SEEK_SET, SEEK_END) != 0) {
+ FILEREADER_LOG_ERROR("Seek to EoF failed");
+ return false;
+ }
+
+#if _FILE_OFFSET_BITS == 64
+ const size_t file_size = static_cast<size_t>(ftello(file_));
+#elif defined _WIN64
+ const size_t file_size = static_cast<size_t>(_ftelli64(file_));
+#else
+ const size_t file_size = static_cast<size_t>(ftell(file_));
+#endif
+
+ rewind(file_);
+
+ return file_size;
+}
+
+} // namespace draco
diff --git a/libs/assimp/contrib/draco/src/draco/io/stdio_file_reader.h b/libs/assimp/contrib/draco/src/draco/io/stdio_file_reader.h
new file mode 100644
index 0000000..f822c89
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/io/stdio_file_reader.h
@@ -0,0 +1,48 @@
+#ifndef DRACO_IO_STDIO_FILE_READER_H_
+#define DRACO_IO_STDIO_FILE_READER_H_
+
+#include <cstddef>
+#include <cstdint>
+#include <cstdio>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "draco/io/file_reader_interface.h"
+
+namespace draco {
+
+class StdioFileReader : public FileReaderInterface {
+ public:
+ // Creates and returns a StdioFileReader that reads from |file_name|.
+ // Returns nullptr when the file does not exist or cannot be read.
+ static std::unique_ptr<FileReaderInterface> Open(
+ const std::string &file_name);
+
+ StdioFileReader() = delete;
+ StdioFileReader(const StdioFileReader &) = delete;
+ StdioFileReader &operator=(const StdioFileReader &) = delete;
+
+ StdioFileReader(StdioFileReader &&) = default;
+ StdioFileReader &operator=(StdioFileReader &&) = default;
+
+ // Closes |file_|.
+ ~StdioFileReader() override;
+
+ // Reads the entire contents of the input file into |buffer| and returns true.
+ bool ReadFileToBuffer(std::vector<char> *buffer) override;
+ bool ReadFileToBuffer(std::vector<uint8_t> *buffer) override;
+
+ // Returns the size of the file.
+ size_t GetFileSize() override;
+
+ private:
+ StdioFileReader(FILE *file) : file_(file) {}
+
+ FILE *file_ = nullptr;
+ static bool registered_in_factory_;
+};
+
+} // namespace draco
+
+#endif // DRACO_IO_STDIO_FILE_READER_H_
diff --git a/libs/assimp/contrib/draco/src/draco/io/stdio_file_reader_test.cc b/libs/assimp/contrib/draco/src/draco/io/stdio_file_reader_test.cc
new file mode 100644
index 0000000..487819a
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/io/stdio_file_reader_test.cc
@@ -0,0 +1,49 @@
+#include "draco/io/stdio_file_reader.h"
+
+#include "draco/core/draco_test_base.h"
+#include "draco/core/draco_test_utils.h"
+#include "draco/io/file_reader_test_common.h"
+
+namespace draco {
+namespace {
+
+TEST(StdioFileReaderTest, FailOpen) {
+ EXPECT_EQ(StdioFileReader::Open(""), nullptr);
+ EXPECT_EQ(StdioFileReader::Open("fake file"), nullptr);
+}
+
+TEST(StdioFileReaderTest, Open) {
+ EXPECT_NE(StdioFileReader::Open(GetTestFileFullPath("car.drc")), nullptr);
+ EXPECT_NE(StdioFileReader::Open(GetTestFileFullPath("cube_pc.drc")), nullptr);
+}
+
+TEST(StdioFileReaderTest, FailRead) {
+ auto reader = StdioFileReader::Open(GetTestFileFullPath("car.drc"));
+ ASSERT_NE(reader, nullptr);
+ std::vector<char> *buffer = nullptr;
+ EXPECT_FALSE(reader->ReadFileToBuffer(buffer));
+}
+
+TEST(StdioFileReaderTest, ReadFile) {
+ std::vector<char> buffer;
+
+ auto reader = StdioFileReader::Open(GetTestFileFullPath("car.drc"));
+ ASSERT_NE(reader, nullptr);
+ EXPECT_TRUE(reader->ReadFileToBuffer(&buffer));
+ EXPECT_EQ(buffer.size(), kFileSizeCarDrc);
+
+ reader = StdioFileReader::Open(GetTestFileFullPath("cube_pc.drc"));
+ ASSERT_NE(reader, nullptr);
+ EXPECT_TRUE(reader->ReadFileToBuffer(&buffer));
+ EXPECT_EQ(buffer.size(), kFileSizeCubePcDrc);
+}
+
+TEST(StdioFileReaderTest, GetFileSize) {
+ auto reader = StdioFileReader::Open(GetTestFileFullPath("car.drc"));
+ ASSERT_EQ(reader->GetFileSize(), kFileSizeCarDrc);
+ reader = StdioFileReader::Open(GetTestFileFullPath("cube_pc.drc"));
+ ASSERT_EQ(reader->GetFileSize(), kFileSizeCubePcDrc);
+}
+
+} // namespace
+} // namespace draco
diff --git a/libs/assimp/contrib/draco/src/draco/io/stdio_file_writer.cc b/libs/assimp/contrib/draco/src/draco/io/stdio_file_writer.cc
new file mode 100644
index 0000000..2467d07
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/io/stdio_file_writer.cc
@@ -0,0 +1,59 @@
+#include "draco/io/stdio_file_writer.h"
+
+#include <algorithm>
+#include <cstdint>
+#include <cstdio>
+#include <memory>
+#include <string>
+
+#include "draco/draco_features.h"
+#include "draco/io/file_writer_factory.h"
+#include "draco/io/file_writer_utils.h"
+
+namespace draco {
+
+#define FILEWRITER_LOG_ERROR(error_string) \
+ do { \
+ fprintf(stderr, "%s:%d (%s): %s.\n", __FILE__, __LINE__, __func__, \
+ error_string); \
+ } while (false)
+
+bool StdioFileWriter::registered_in_factory_ =
+ FileWriterFactory::RegisterWriter(StdioFileWriter::Open);
+
+StdioFileWriter::~StdioFileWriter() { fclose(file_); }
+
+std::unique_ptr<FileWriterInterface> StdioFileWriter::Open(
+ const std::string &file_name) {
+ if (file_name.empty()) {
+ return nullptr;
+ }
+ if (!CheckAndCreatePathForFile(file_name)) {
+ return nullptr;
+ }
+
+ FILE *raw_file_ptr = fopen(file_name.c_str(), "wb");
+ if (raw_file_ptr == nullptr) {
+ return nullptr;
+ }
+
+ std::unique_ptr<StdioFileWriter> file(new (std::nothrow)
+ StdioFileWriter(raw_file_ptr));
+ if (file == nullptr) {
+ FILEWRITER_LOG_ERROR("Out of memory");
+ fclose(raw_file_ptr);
+ return nullptr;
+ }
+
+#ifndef DRACO_OLD_GCC
+ return file;
+#else
+ return std::move(file);
+#endif
+}
+
+bool StdioFileWriter::Write(const char *buffer, size_t size) {
+ return fwrite(buffer, 1, size, file_) == size;
+}
+
+} // namespace draco
diff --git a/libs/assimp/contrib/draco/src/draco/io/stdio_file_writer.h b/libs/assimp/contrib/draco/src/draco/io/stdio_file_writer.h
new file mode 100644
index 0000000..4e39255
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/io/stdio_file_writer.h
@@ -0,0 +1,42 @@
+#ifndef DRACO_IO_STDIO_FILE_WRITER_H_
+#define DRACO_IO_STDIO_FILE_WRITER_H_
+
+#include <cstddef>
+#include <cstdio>
+#include <memory>
+#include <string>
+
+#include "draco/io/file_writer_interface.h"
+
+namespace draco {
+
+class StdioFileWriter : public FileWriterInterface {
+ public:
+ // Creates and returns a StdioFileWriter that writes to |file_name|.
+ // Returns nullptr when |file_name| cannot be opened for writing.
+ static std::unique_ptr<FileWriterInterface> Open(
+ const std::string &file_name);
+
+ StdioFileWriter() = delete;
+ StdioFileWriter(const StdioFileWriter &) = delete;
+ StdioFileWriter &operator=(const StdioFileWriter &) = delete;
+
+ StdioFileWriter(StdioFileWriter &&) = default;
+ StdioFileWriter &operator=(StdioFileWriter &&) = default;
+
+ // Closes |file_|.
+ ~StdioFileWriter() override;
+
+ // Writes |size| bytes to |file_| from |buffer|. Returns true for success.
+ bool Write(const char *buffer, size_t size) override;
+
+ private:
+ StdioFileWriter(FILE *file) : file_(file) {}
+
+ FILE *file_ = nullptr;
+ static bool registered_in_factory_;
+};
+
+} // namespace draco
+
+#endif // DRACO_IO_STDIO_FILE_WRITER_H_
diff --git a/libs/assimp/contrib/draco/src/draco/io/stdio_file_writer_test.cc b/libs/assimp/contrib/draco/src/draco/io/stdio_file_writer_test.cc
new file mode 100644
index 0000000..ed607d1
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/io/stdio_file_writer_test.cc
@@ -0,0 +1,38 @@
+#include "draco/io/stdio_file_writer.h"
+
+#include <cstdio>
+#include <cstring>
+
+#include "draco/core/draco_test_base.h"
+#include "draco/core/draco_test_utils.h"
+
+namespace draco {
+namespace {
+
+void CheckFileWriter(const std::string &data, const std::string &filename) {
+ auto writer = StdioFileWriter::Open(filename);
+ ASSERT_NE(writer, nullptr);
+ ASSERT_TRUE(writer->Write(data.data(), data.size()));
+ writer.reset();
+ std::unique_ptr<FILE, decltype(&fclose)> file(fopen(filename.c_str(), "r"),
+ fclose);
+ ASSERT_NE(file, nullptr);
+ std::string read_buffer(data.size(), ' ');
+ ASSERT_EQ(fread(reinterpret_cast<void *>(&read_buffer[0]), 1, data.size(),
+ file.get()),
+ data.size());
+ ASSERT_EQ(read_buffer, data);
+}
+
+TEST(StdioFileWriterTest, FailOpen) {
+ EXPECT_EQ(StdioFileWriter::Open(""), nullptr);
+}
+
+TEST(StdioFileWriterTest, BasicWrite) {
+ const std::string kWriteString = "Hello";
+ const std::string kTempFilePath = GetTestTempFileFullPath("hello");
+ CheckFileWriter(kWriteString, kTempFilePath);
+}
+
+} // namespace
+} // namespace draco
diff --git a/libs/assimp/contrib/draco/src/draco/javascript/emscripten/animation_decoder_webidl_wrapper.cc b/libs/assimp/contrib/draco/src/draco/javascript/emscripten/animation_decoder_webidl_wrapper.cc
new file mode 100644
index 0000000..7e9e6d1
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/javascript/emscripten/animation_decoder_webidl_wrapper.cc
@@ -0,0 +1,101 @@
+// Copyright 2017 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/javascript/emscripten/animation_decoder_webidl_wrapper.h"
+
+#include <vector>
+
+#include "draco/compression/decode.h"
+#include "draco/mesh/mesh.h"
+#include "draco/mesh/mesh_stripifier.h"
+
+using draco::DecoderBuffer;
+using draco::PointAttribute;
+using draco::Status;
+
+DracoFloat32Array::DracoFloat32Array() {}
+
+float DracoFloat32Array::GetValue(int index) const { return values_[index]; }
+
+bool DracoFloat32Array::SetValues(const float *values, int count) {
+ if (values) {
+ values_.assign(values, values + count);
+ } else {
+ values_.resize(count);
+ }
+ return true;
+}
+
+AnimationDecoder::AnimationDecoder() {}
+
+// Decodes animation data from the provided buffer.
+const draco::Status *AnimationDecoder::DecodeBufferToKeyframeAnimation(
+ draco::DecoderBuffer *in_buffer, draco::KeyframeAnimation *animation) {
+ draco::DecoderOptions dec_options;
+ last_status_ = decoder_.Decode(dec_options, in_buffer, animation);
+ return &last_status_;
+}
+
+bool AnimationDecoder::GetTimestamps(const draco::KeyframeAnimation &animation,
+ DracoFloat32Array *timestamp) {
+ if (!timestamp) {
+ return false;
+ }
+ const int num_frames = animation.num_frames();
+ const draco::PointAttribute *timestamp_att = animation.timestamps();
+ // Timestamp attribute has only 1 component, so the number of components is
+ // equal to the number of frames.
+ timestamp->SetValues(nullptr, num_frames);
+ int entry_id = 0;
+ float timestamp_value = -1.0;
+ for (draco::PointIndex i(0); i < num_frames; ++i) {
+ const draco::AttributeValueIndex val_index = timestamp_att->mapped_index(i);
+ if (!timestamp_att->ConvertValue<float>(val_index, &timestamp_value)) {
+ return false;
+ }
+ timestamp->SetValue(entry_id++, timestamp_value);
+ }
+ return true;
+}
+
+bool AnimationDecoder::GetKeyframes(const draco::KeyframeAnimation &animation,
+ int keyframes_id,
+ DracoFloat32Array *animation_data) {
+ const int num_frames = animation.num_frames();
+ // Get animation data.
+ const draco::PointAttribute *animation_data_att =
+ animation.keyframes(keyframes_id);
+ if (!animation_data_att) {
+ return false;
+ }
+
+ const int components = animation_data_att->num_components();
+ const int num_entries = num_frames * components;
+ const int kMaxAttributeFloatValues = 4;
+
+ std::vector<float> values(components, -1.0);
+ int entry_id = 0;
+ animation_data->SetValues(nullptr, num_entries);
+ for (draco::PointIndex i(0); i < num_frames; ++i) {
+ const draco::AttributeValueIndex val_index =
+ animation_data_att->mapped_index(i);
+ if (!animation_data_att->ConvertValue<float>(val_index, &values[0])) {
+ return false;
+ }
+ for (int j = 0; j < components; ++j) {
+ animation_data->SetValue(entry_id++, values[j]);
+ }
+ }
+ return true;
+}
diff --git a/libs/assimp/contrib/draco/src/draco/javascript/emscripten/animation_decoder_webidl_wrapper.h b/libs/assimp/contrib/draco/src/draco/javascript/emscripten/animation_decoder_webidl_wrapper.h
new file mode 100644
index 0000000..7486d15
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/javascript/emscripten/animation_decoder_webidl_wrapper.h
@@ -0,0 +1,73 @@
+// Copyright 2017 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_JAVASCRIPT_EMSCRIPTEN_ANIMATION_DECODER_WEBIDL_WRAPPER_H_
+#define DRACO_JAVASCRIPT_EMSCRIPTEN_ANIMATION_DECODER_WEBIDL_WRAPPER_H_
+
+#include <vector>
+
+#include "draco/animation/keyframe_animation_decoder.h"
+#include "draco/attributes/attribute_transform_type.h"
+#include "draco/attributes/point_attribute.h"
+#include "draco/compression/config/compression_shared.h"
+#include "draco/compression/decode.h"
+#include "draco/core/decoder_buffer.h"
+
+typedef draco::AttributeTransformType draco_AttributeTransformType;
+typedef draco::GeometryAttribute draco_GeometryAttribute;
+typedef draco_GeometryAttribute::Type draco_GeometryAttribute_Type;
+typedef draco::EncodedGeometryType draco_EncodedGeometryType;
+typedef draco::Status draco_Status;
+typedef draco::Status::Code draco_StatusCode;
+
+class DracoFloat32Array {
+ public:
+ DracoFloat32Array();
+ float GetValue(int index) const;
+
+ // In case |values| is nullptr, the data is allocated but not initialized.
+ bool SetValues(const float *values, int count);
+
+ // Directly sets a value for a specific index. The array has to be already
+ // allocated at this point (using SetValues() method).
+ void SetValue(int index, float val) { values_[index] = val; }
+
+ int size() const { return values_.size(); }
+
+ private:
+ std::vector<float> values_;
+};
+
+// Class used by emscripten WebIDL Binder [1] to wrap calls to decode animation
+// data.
+class AnimationDecoder {
+ public:
+ AnimationDecoder();
+
+ // Decodes animation data from the provided buffer.
+ const draco::Status *DecodeBufferToKeyframeAnimation(
+ draco::DecoderBuffer *in_buffer, draco::KeyframeAnimation *animation);
+
+ static bool GetTimestamps(const draco::KeyframeAnimation &animation,
+ DracoFloat32Array *timestamp);
+
+ static bool GetKeyframes(const draco::KeyframeAnimation &animation,
+ int keyframes_id, DracoFloat32Array *animation_data);
+
+ private:
+ draco::KeyframeAnimationDecoder decoder_;
+ draco::Status last_status_;
+};
+
+#endif // DRACO_JAVASCRIPT_EMSCRIPTEN_ANIMATION_DECODER_WEBIDL_WRAPPER_H_
diff --git a/libs/assimp/contrib/draco/src/draco/javascript/emscripten/animation_encoder_webidl_wrapper.cc b/libs/assimp/contrib/draco/src/draco/javascript/emscripten/animation_encoder_webidl_wrapper.cc
new file mode 100644
index 0000000..53a10e5
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/javascript/emscripten/animation_encoder_webidl_wrapper.cc
@@ -0,0 +1,89 @@
+// Copyright 2017 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/javascript/emscripten/animation_encoder_webidl_wrapper.h"
+
+#include "draco/animation/keyframe_animation.h"
+#include "draco/animation/keyframe_animation_encoder.h"
+
+DracoInt8Array::DracoInt8Array() {}
+
+int DracoInt8Array::GetValue(int index) const { return values_[index]; }
+
+bool DracoInt8Array::SetValues(const char *values, int count) {
+ values_.assign(values, values + count);
+ return true;
+}
+
+AnimationBuilder::AnimationBuilder() {}
+
+bool AnimationBuilder::SetTimestamps(draco::KeyframeAnimation *animation,
+ long num_frames, const float *timestamps) {
+ if (!animation || !timestamps) {
+ return false;
+ }
+ std::vector<draco::KeyframeAnimation::TimestampType> timestamps_arr(
+ timestamps, timestamps + num_frames);
+ return animation->SetTimestamps(timestamps_arr);
+}
+
+int AnimationBuilder::AddKeyframes(draco::KeyframeAnimation *animation,
+ long num_frames, long num_components,
+ const float *animation_data) {
+ if (!animation || !animation_data) {
+ return -1;
+ }
+ std::vector<float> keyframes_arr(
+ animation_data, animation_data + num_frames * num_components);
+ return animation->AddKeyframes(draco::DT_FLOAT32, num_components,
+ keyframes_arr);
+}
+
+AnimationEncoder::AnimationEncoder()
+ : timestamps_quantization_bits_(-1),
+ keyframes_quantization_bits_(-1),
+ options_(draco::EncoderOptions::CreateDefaultOptions()) {}
+
+void AnimationEncoder::SetTimestampsQuantization(long quantization_bits) {
+ timestamps_quantization_bits_ = quantization_bits;
+}
+
+void AnimationEncoder::SetKeyframesQuantization(long quantization_bits) {
+ keyframes_quantization_bits_ = quantization_bits;
+}
+
+int AnimationEncoder::EncodeAnimationToDracoBuffer(
+ draco::KeyframeAnimation *animation, DracoInt8Array *draco_buffer) {
+ if (!animation) {
+ return 0;
+ }
+ draco::EncoderBuffer buffer;
+
+ if (timestamps_quantization_bits_ > 0) {
+ options_.SetAttributeInt(0, "quantization_bits",
+ timestamps_quantization_bits_);
+ }
+ if (keyframes_quantization_bits_ > 0) {
+ for (int i = 1; i <= animation->num_animations(); ++i) {
+ options_.SetAttributeInt(i, "quantization_bits",
+ keyframes_quantization_bits_);
+ }
+ }
+ if (!encoder_.EncodeKeyframeAnimation(*animation, options_, &buffer).ok()) {
+ return 0;
+ }
+
+ draco_buffer->SetValues(buffer.data(), buffer.size());
+ return buffer.size();
+}
diff --git a/libs/assimp/contrib/draco/src/draco/javascript/emscripten/animation_encoder_webidl_wrapper.h b/libs/assimp/contrib/draco/src/draco/javascript/emscripten/animation_encoder_webidl_wrapper.h
new file mode 100644
index 0000000..f2ac733
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/javascript/emscripten/animation_encoder_webidl_wrapper.h
@@ -0,0 +1,66 @@
+// Copyright 2017 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_JAVASCRIPT_EMSCRIPTEN_ANIMATION_ENCODER_WEBIDL_WRAPPER_H_
+#define DRACO_JAVASCRIPT_EMSCRIPTEN_ANIMATION_ENCODER_WEBIDL_WRAPPER_H_
+
+#include <vector>
+
+#include "draco/animation/keyframe_animation_encoder.h"
+#include "draco/attributes/point_attribute.h"
+#include "draco/compression/config/compression_shared.h"
+#include "draco/compression/config/encoder_options.h"
+#include "draco/compression/encode.h"
+
+class DracoInt8Array {
+ public:
+ DracoInt8Array();
+ int GetValue(int index) const;
+ bool SetValues(const char *values, int count);
+
+ size_t size() { return values_.size(); }
+
+ private:
+ std::vector<int> values_;
+};
+
+class AnimationBuilder {
+ public:
+ AnimationBuilder();
+
+ bool SetTimestamps(draco::KeyframeAnimation *animation, long num_frames,
+ const float *timestamps);
+
+ int AddKeyframes(draco::KeyframeAnimation *animation, long num_frames,
+ long num_components, const float *animation_data);
+};
+
+class AnimationEncoder {
+ public:
+ AnimationEncoder();
+
+ void SetTimestampsQuantization(long quantization_bits);
+ // TODO: Use expert encoder to set per attribute quantization.
+ void SetKeyframesQuantization(long quantization_bits);
+ int EncodeAnimationToDracoBuffer(draco::KeyframeAnimation *animation,
+ DracoInt8Array *draco_buffer);
+
+ private:
+ draco::KeyframeAnimationEncoder encoder_;
+ long timestamps_quantization_bits_;
+ long keyframes_quantization_bits_;
+ draco::EncoderOptions options_;
+};
+
+#endif // DRACO_JAVASCRIPT_EMSCRIPTEN_ANIMATION_ENCODER_WEBIDL_WRAPPER_H_
diff --git a/libs/assimp/contrib/draco/src/draco/javascript/emscripten/decoder_functions.js b/libs/assimp/contrib/draco/src/draco/javascript/emscripten/decoder_functions.js
new file mode 100644
index 0000000..577900f
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/javascript/emscripten/decoder_functions.js
@@ -0,0 +1,33 @@
+// Copyright 2020 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Returns encoded geometry type stored in the |array|. In general, |array|
+// should be a javascript Int8Array containing the encoded data. For backward
+// compatibility, |array| can also represent a Module.DecoderBuffer object.
+Module['Decoder'].prototype.GetEncodedGeometryType = function(array) {
+ if (array.__class__ && array.__class__ === Module.DecoderBuffer) {
+ // |array| is a DecoderBuffer. Pass it to the deprecated function.
+ return Module.Decoder.prototype.GetEncodedGeometryType_Deprecated(array);
+ }
+ if (array.byteLength < 8)
+ return Module.INVALID_GEOMETRY_TYPE;
+ switch (array[7]) {
+ case 0:
+ return Module.POINT_CLOUD;
+ case 1:
+ return Module.TRIANGULAR_MESH;
+ default:
+ return Module.INVALID_GEOMETRY_TYPE;
+ }
+};
diff --git a/libs/assimp/contrib/draco/src/draco/javascript/emscripten/decoder_webidl_wrapper.cc b/libs/assimp/contrib/draco/src/draco/javascript/emscripten/decoder_webidl_wrapper.cc
new file mode 100644
index 0000000..66fe77d
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/javascript/emscripten/decoder_webidl_wrapper.cc
@@ -0,0 +1,363 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/javascript/emscripten/decoder_webidl_wrapper.h"
+
+#include "draco/compression/decode.h"
+#include "draco/mesh/mesh.h"
+#include "draco/mesh/mesh_stripifier.h"
+
+using draco::DecoderBuffer;
+using draco::Mesh;
+using draco::Metadata;
+using draco::PointAttribute;
+using draco::PointCloud;
+using draco::Status;
+
+MetadataQuerier::MetadataQuerier() : entry_names_metadata_(nullptr) {}
+
+bool MetadataQuerier::HasEntry(const Metadata &metadata,
+ const char *entry_name) const {
+ return metadata.entries().count(entry_name) > 0;
+}
+
+long MetadataQuerier::GetIntEntry(const Metadata &metadata,
+ const char *entry_name) const {
+ int32_t value = 0;
+ const std::string name(entry_name);
+ metadata.GetEntryInt(name, &value);
+ return value;
+}
+
+void MetadataQuerier::GetIntEntryArray(const draco::Metadata &metadata,
+ const char *entry_name,
+ DracoInt32Array *out_values) const {
+ const std::string name(entry_name);
+ std::vector<int32_t> values;
+ metadata.GetEntryIntArray(name, &values);
+ out_values->MoveData(std::move(values));
+}
+
+double MetadataQuerier::GetDoubleEntry(const Metadata &metadata,
+ const char *entry_name) const {
+ double value = 0;
+ const std::string name(entry_name);
+ metadata.GetEntryDouble(name, &value);
+ return value;
+}
+
+const char *MetadataQuerier::GetStringEntry(const Metadata &metadata,
+ const char *entry_name) {
+ const std::string name(entry_name);
+ if (!metadata.GetEntryString(name, &last_string_returned_)) {
+ return nullptr;
+ }
+
+ const char *value = last_string_returned_.c_str();
+ return value;
+}
+
+long MetadataQuerier::NumEntries(const Metadata &metadata) const {
+ return metadata.num_entries();
+}
+
+const char *MetadataQuerier::GetEntryName(const Metadata &metadata,
+ int entry_id) {
+ if (entry_names_metadata_ != &metadata) {
+ entry_names_.clear();
+ entry_names_metadata_ = &metadata;
+ // Initialize the list of entry names.
+ for (auto &&entry : metadata.entries()) {
+ entry_names_.push_back(entry.first);
+ }
+ }
+ if (entry_id < 0 || entry_id >= entry_names_.size()) {
+ return nullptr;
+ }
+ return entry_names_[entry_id].c_str();
+}
+
+Decoder::Decoder() {}
+
+draco_EncodedGeometryType Decoder::GetEncodedGeometryType_Deprecated(
+ DecoderBuffer *in_buffer) {
+ return draco::Decoder::GetEncodedGeometryType(in_buffer).value();
+}
+
+const Status *Decoder::DecodeBufferToPointCloud(DecoderBuffer *in_buffer,
+ PointCloud *out_point_cloud) {
+ last_status_ = decoder_.DecodeBufferToGeometry(in_buffer, out_point_cloud);
+ return &last_status_;
+}
+
+const draco::Status *Decoder::DecodeArrayToPointCloud(
+ const char *data, size_t data_size, PointCloud *out_point_cloud) {
+ DecoderBuffer buffer;
+ buffer.Init(data, data_size);
+ return DecodeBufferToPointCloud(&buffer, out_point_cloud);
+}
+
+const Status *Decoder::DecodeBufferToMesh(DecoderBuffer *in_buffer,
+ Mesh *out_mesh) {
+ last_status_ = decoder_.DecodeBufferToGeometry(in_buffer, out_mesh);
+ return &last_status_;
+}
+
+const draco::Status *Decoder::DecodeArrayToMesh(const char *data,
+ size_t data_size,
+ Mesh *out_mesh) {
+ DecoderBuffer buffer;
+ buffer.Init(data, data_size);
+ return DecodeBufferToMesh(&buffer, out_mesh);
+}
+
+long Decoder::GetAttributeId(const PointCloud &pc,
+ draco_GeometryAttribute_Type type) const {
+ return pc.GetNamedAttributeId(type);
+}
+
+const PointAttribute *Decoder::GetAttribute(const PointCloud &pc, long att_id) {
+ return pc.attribute(att_id);
+}
+
+const PointAttribute *Decoder::GetAttributeByUniqueId(const PointCloud &pc,
+ long unique_id) {
+ return pc.GetAttributeByUniqueId(unique_id);
+}
+
+long Decoder::GetAttributeIdByName(const PointCloud &pc,
+ const char *attribute_name) {
+ const std::string entry_value(attribute_name);
+ return pc.GetAttributeIdByMetadataEntry("name", entry_value);
+}
+
+long Decoder::GetAttributeIdByMetadataEntry(const PointCloud &pc,
+ const char *metadata_name,
+ const char *metadata_value) {
+ const std::string entry_name(metadata_name);
+ const std::string entry_value(metadata_value);
+ return pc.GetAttributeIdByMetadataEntry(entry_name, entry_value);
+}
+
+bool Decoder::GetFaceFromMesh(const Mesh &m,
+ draco::FaceIndex::ValueType face_id,
+ DracoInt32Array *out_values) {
+ const Mesh::Face &face = m.face(draco::FaceIndex(face_id));
+ const auto ptr = reinterpret_cast<const int32_t *>(face.data());
+ out_values->MoveData(std::vector<int32_t>({ptr, ptr + face.size()}));
+ return true;
+}
+
+long Decoder::GetTriangleStripsFromMesh(const Mesh &m,
+ DracoInt32Array *strip_values) {
+ draco::MeshStripifier stripifier;
+ std::vector<int32_t> strip_indices;
+ if (!stripifier.GenerateTriangleStripsWithDegenerateTriangles(
+ m, std::back_inserter(strip_indices))) {
+ return 0;
+ }
+ strip_values->MoveData(std::move(strip_indices));
+ return stripifier.num_strips();
+}
+
+template <typename T>
+bool GetTrianglesArray(const draco::Mesh &m, const int out_size,
+ T *out_values) {
+ const uint32_t num_faces = m.num_faces();
+ if (num_faces * 3 * sizeof(T) != out_size) {
+ return false;
+ }
+
+ for (uint32_t face_id = 0; face_id < num_faces; ++face_id) {
+ const Mesh::Face &face = m.face(draco::FaceIndex(face_id));
+ out_values[face_id * 3 + 0] = static_cast<T>(face[0].value());
+ out_values[face_id * 3 + 1] = static_cast<T>(face[1].value());
+ out_values[face_id * 3 + 2] = static_cast<T>(face[2].value());
+ }
+ return true;
+}
+
+bool Decoder::GetTrianglesUInt16Array(const draco::Mesh &m, int out_size,
+ void *out_values) {
+ if (m.num_points() > std::numeric_limits<uint16_t>::max()) {
+ return false;
+ }
+ return GetTrianglesArray<uint16_t>(m, out_size,
+ reinterpret_cast<uint16_t *>(out_values));
+}
+
+bool Decoder::GetTrianglesUInt32Array(const draco::Mesh &m, int out_size,
+ void *out_values) {
+ return GetTrianglesArray<uint32_t>(m, out_size,
+ reinterpret_cast<uint32_t *>(out_values));
+}
+
+bool Decoder::GetAttributeFloat(const PointAttribute &pa,
+ draco::AttributeValueIndex::ValueType val_index,
+ DracoFloat32Array *out_values) {
+ const int kMaxAttributeFloatValues = 4;
+ const int components = pa.num_components();
+ float values[kMaxAttributeFloatValues] = {-2.0, -2.0, -2.0, -2.0};
+ if (!pa.ConvertValue<float>(draco::AttributeValueIndex(val_index), values))
+ return false;
+ out_values->MoveData({values, values + components});
+ return true;
+}
+
+bool Decoder::GetAttributeFloatForAllPoints(const PointCloud &pc,
+ const PointAttribute &pa,
+ DracoFloat32Array *out_values) {
+ const int components = pa.num_components();
+ const int num_points = pc.num_points();
+ const int num_entries = num_points * components;
+ const int kMaxAttributeFloatValues = 4;
+ float values[kMaxAttributeFloatValues] = {-2.0, -2.0, -2.0, -2.0};
+ int entry_id = 0;
+
+ out_values->Resize(num_entries);
+ for (draco::PointIndex i(0); i < num_points; ++i) {
+ const draco::AttributeValueIndex val_index = pa.mapped_index(i);
+ if (!pa.ConvertValue<float>(val_index, values)) {
+ return false;
+ }
+ for (int j = 0; j < components; ++j) {
+ out_values->SetValue(entry_id++, values[j]);
+ }
+ }
+ return true;
+}
+
+bool Decoder::GetAttributeFloatArrayForAllPoints(const PointCloud &pc,
+ const PointAttribute &pa,
+ int out_size,
+ void *out_values) {
+ const int components = pa.num_components();
+ const int num_points = pc.num_points();
+ const int data_size = num_points * components * sizeof(float);
+ if (data_size != out_size) {
+ return false;
+ }
+ const bool requested_type_is_float = pa.data_type() == draco::DT_FLOAT32;
+ const int kMaxAttributeFloatValues = 4;
+ float values[kMaxAttributeFloatValues] = {-2.0, -2.0, -2.0, -2.0};
+ int entry_id = 0;
+ float *const floats = reinterpret_cast<float *>(out_values);
+
+ for (draco::PointIndex i(0); i < num_points; ++i) {
+ const draco::AttributeValueIndex val_index = pa.mapped_index(i);
+ if (requested_type_is_float) {
+ pa.GetValue(val_index, values);
+ } else {
+ if (!pa.ConvertValue<float>(val_index, values)) {
+ return false;
+ }
+ }
+ for (int j = 0; j < components; ++j) {
+ floats[entry_id++] = values[j];
+ }
+ }
+ return true;
+}
+
+bool Decoder::GetAttributeInt8ForAllPoints(const PointCloud &pc,
+ const PointAttribute &pa,
+ DracoInt8Array *out_values) {
+ return GetAttributeDataForAllPoints<DracoInt8Array, int8_t>(
+ pc, pa, draco::DT_INT8, draco::DT_UINT8, out_values);
+}
+
+bool Decoder::GetAttributeUInt8ForAllPoints(const PointCloud &pc,
+ const PointAttribute &pa,
+ DracoUInt8Array *out_values) {
+ return GetAttributeDataForAllPoints<DracoUInt8Array, uint8_t>(
+ pc, pa, draco::DT_INT8, draco::DT_UINT8, out_values);
+}
+
+bool Decoder::GetAttributeInt16ForAllPoints(const PointCloud &pc,
+ const PointAttribute &pa,
+ DracoInt16Array *out_values) {
+ return GetAttributeDataForAllPoints<DracoInt16Array, int16_t>(
+ pc, pa, draco::DT_INT16, draco::DT_UINT16, out_values);
+}
+
+bool Decoder::GetAttributeUInt16ForAllPoints(const PointCloud &pc,
+ const PointAttribute &pa,
+ DracoUInt16Array *out_values) {
+ return GetAttributeDataForAllPoints<DracoUInt16Array, uint16_t>(
+ pc, pa, draco::DT_INT16, draco::DT_UINT16, out_values);
+}
+
+bool Decoder::GetAttributeInt32ForAllPoints(const PointCloud &pc,
+ const PointAttribute &pa,
+ DracoInt32Array *out_values) {
+ return GetAttributeDataForAllPoints<DracoInt32Array, int32_t>(
+ pc, pa, draco::DT_INT32, draco::DT_UINT32, out_values);
+}
+
+bool Decoder::GetAttributeIntForAllPoints(const PointCloud &pc,
+ const PointAttribute &pa,
+ DracoInt32Array *out_values) {
+ return GetAttributeInt32ForAllPoints(pc, pa, out_values);
+}
+
+bool Decoder::GetAttributeUInt32ForAllPoints(const PointCloud &pc,
+ const PointAttribute &pa,
+ DracoUInt32Array *out_values) {
+ return GetAttributeDataForAllPoints<DracoUInt32Array, uint32_t>(
+ pc, pa, draco::DT_INT32, draco::DT_UINT32, out_values);
+}
+
+bool Decoder::GetAttributeDataArrayForAllPoints(const draco::PointCloud &pc,
+ const draco::PointAttribute &pa,
+ draco_DataType data_type,
+ int out_size,
+ void *out_values) {
+ switch (data_type) {
+ case draco::DT_INT8:
+ return GetAttributeDataArrayForAllPoints<int8_t>(pc, pa, draco::DT_INT8,
+ out_size, out_values);
+ case draco::DT_INT16:
+ return GetAttributeDataArrayForAllPoints<int16_t>(pc, pa, draco::DT_INT16,
+ out_size, out_values);
+ case draco::DT_INT32:
+ return GetAttributeDataArrayForAllPoints<int32_t>(pc, pa, draco::DT_INT32,
+ out_size, out_values);
+ case draco::DT_UINT8:
+ return GetAttributeDataArrayForAllPoints<uint8_t>(pc, pa, draco::DT_UINT8,
+ out_size, out_values);
+ case draco::DT_UINT16:
+ return GetAttributeDataArrayForAllPoints<uint16_t>(
+ pc, pa, draco::DT_UINT16, out_size, out_values);
+ case draco::DT_UINT32:
+ return GetAttributeDataArrayForAllPoints<uint32_t>(
+ pc, pa, draco::DT_UINT32, out_size, out_values);
+ case draco::DT_FLOAT32:
+ return GetAttributeFloatArrayForAllPoints(pc, pa, out_size, out_values);
+ default:
+ return false;
+ }
+}
+
+void Decoder::SkipAttributeTransform(draco_GeometryAttribute_Type att_type) {
+ decoder_.SetSkipAttributeTransform(att_type);
+}
+
+const Metadata *Decoder::GetMetadata(const PointCloud &pc) const {
+ return pc.GetMetadata();
+}
+
+const Metadata *Decoder::GetAttributeMetadata(const PointCloud &pc,
+ long att_id) const {
+ return pc.GetAttributeMetadataByAttributeId(att_id);
+}
diff --git a/libs/assimp/contrib/draco/src/draco/javascript/emscripten/decoder_webidl_wrapper.h b/libs/assimp/contrib/draco/src/draco/javascript/emscripten/decoder_webidl_wrapper.h
new file mode 100644
index 0000000..75ae76a
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/javascript/emscripten/decoder_webidl_wrapper.h
@@ -0,0 +1,330 @@
+// Copyright 2017 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_JAVASCRIPT_EMSCRIPTEN_DECODER_WEBIDL_WRAPPER_H_
+#define DRACO_JAVASCRIPT_EMSCRIPTEN_DECODER_WEBIDL_WRAPPER_H_
+
+#include <vector>
+
+#include "draco/attributes/attribute_transform_type.h"
+#include "draco/attributes/point_attribute.h"
+#include "draco/compression/config/compression_shared.h"
+#include "draco/compression/decode.h"
+#include "draco/core/decoder_buffer.h"
+#include "draco/mesh/mesh.h"
+
+typedef draco::AttributeTransformType draco_AttributeTransformType;
+typedef draco::GeometryAttribute draco_GeometryAttribute;
+typedef draco_GeometryAttribute::Type draco_GeometryAttribute_Type;
+typedef draco::EncodedGeometryType draco_EncodedGeometryType;
+typedef draco::Status draco_Status;
+typedef draco::Status::Code draco_StatusCode;
+typedef draco::DataType draco_DataType;
+
+// To generate Draco JavaScript bindings you must have emscripten installed.
+// Then run make -f Makefile.emcc jslib.
+template <typename T>
+class DracoArray {
+ public:
+ T GetValue(int index) const { return values_[index]; }
+
+ void Resize(int size) { values_.resize(size); }
+ void MoveData(std::vector<T> &&values) { values_ = std::move(values); }
+
+ // Directly sets a value for a specific index. The array has to be already
+ // allocated at this point (using Resize() method).
+ void SetValue(int index, T val) { values_[index] = val; }
+
+ int size() const { return values_.size(); }
+
+ private:
+ std::vector<T> values_;
+};
+
+using DracoFloat32Array = DracoArray<float>;
+using DracoInt8Array = DracoArray<int8_t>;
+using DracoUInt8Array = DracoArray<uint8_t>;
+using DracoInt16Array = DracoArray<int16_t>;
+using DracoUInt16Array = DracoArray<uint16_t>;
+using DracoInt32Array = DracoArray<int32_t>;
+using DracoUInt32Array = DracoArray<uint32_t>;
+
+class MetadataQuerier {
+ public:
+ MetadataQuerier();
+
+ bool HasEntry(const draco::Metadata &metadata, const char *entry_name) const;
+
+ // This function does not guarantee that entry's type is long.
+ long GetIntEntry(const draco::Metadata &metadata,
+ const char *entry_name) const;
+
+ // This function does not guarantee that entry types are long.
+ void GetIntEntryArray(const draco::Metadata &metadata, const char *entry_name,
+ DracoInt32Array *out_values) const;
+
+ // This function does not guarantee that entry's type is double.
+ double GetDoubleEntry(const draco::Metadata &metadata,
+ const char *entry_name) const;
+
+ // This function does not guarantee that entry's type is char*.
+ const char *GetStringEntry(const draco::Metadata &metadata,
+ const char *entry_name);
+
+ long NumEntries(const draco::Metadata &metadata) const;
+ const char *GetEntryName(const draco::Metadata &metadata, int entry_id);
+
+ private:
+ // Cached values for metadata entries.
+ std::vector<std::string> entry_names_;
+ const draco::Metadata *entry_names_metadata_;
+
+ // Cached value for GetStringEntry() to avoid scoping issues.
+ std::string last_string_returned_;
+};
+
+// Class used by emscripten WebIDL Binder [1] to wrap calls to decode Draco
+// data.
+// [1]http://kripken.github.io/emscripten-site/docs/porting/connecting_cpp_and_javascript/WebIDL-Binder.html
+class Decoder {
+ public:
+ Decoder();
+
+ // Returns the geometry type stored in the |in_buffer|. Return values can be
+ // INVALID_GEOMETRY_TYPE, POINT_CLOUD, or MESH.
+ // Deprecated: Use decoder.GetEncodedGeometryType(array), where |array| is
+ // an Int8Array containing the encoded data.
+ static draco_EncodedGeometryType GetEncodedGeometryType_Deprecated(
+ draco::DecoderBuffer *in_buffer);
+
+ // Decodes a point cloud from the provided buffer.
+ // Deprecated: Use DecodeArrayToPointCloud.
+ const draco::Status *DecodeBufferToPointCloud(
+ draco::DecoderBuffer *in_buffer, draco::PointCloud *out_point_cloud);
+
+ // Decodes a point cloud from the provided array.
+ const draco::Status *DecodeArrayToPointCloud(
+ const char *data, size_t data_size, draco::PointCloud *out_point_cloud);
+
+ // Decodes a triangular mesh from the provided buffer.
+ // Deprecated: Use DecodeArrayToMesh.
+ const draco::Status *DecodeBufferToMesh(draco::DecoderBuffer *in_buffer,
+ draco::Mesh *out_mesh);
+
+ // Decodes a mesh from the provided array.
+ const draco::Status *DecodeArrayToMesh(const char *data, size_t data_size,
+ draco::Mesh *out_mesh);
+
+ // Returns an attribute id for the first attribute of a given type.
+ long GetAttributeId(const draco::PointCloud &pc,
+ draco_GeometryAttribute_Type type) const;
+
+ // Returns an attribute id of an attribute that contains a valid metadata
+ // entry "name" with value |attribute_name|.
+ static long GetAttributeIdByName(const draco::PointCloud &pc,
+ const char *attribute_name);
+
+ // Returns an attribute id of an attribute with a specified metadata pair
+ // <|metadata_name|, |metadata_value|>.
+ static long GetAttributeIdByMetadataEntry(const draco::PointCloud &pc,
+ const char *metadata_name,
+ const char *metadata_value);
+
+ // Returns an attribute id of an attribute that has the unique id.
+ static const draco::PointAttribute *GetAttributeByUniqueId(
+ const draco::PointCloud &pc, long unique_id);
+
+ // Returns a PointAttribute pointer from |att_id| index.
+ static const draco::PointAttribute *GetAttribute(const draco::PointCloud &pc,
+ long att_id);
+
+ // Returns Mesh::Face values in |out_values| from |face_id| index.
+ static bool GetFaceFromMesh(const draco::Mesh &m,
+ draco::FaceIndex::ValueType face_id,
+ DracoInt32Array *out_values);
+
+ // Returns triangle strips for mesh |m|. If there's multiple strips,
+ // the strips will be separated by degenerate faces.
+ static long GetTriangleStripsFromMesh(const draco::Mesh &m,
+ DracoInt32Array *strip_values);
+
+ // Returns all faces as triangles. Fails if indices exceed the data range (in
+ // particular for uint16), or the output array size does not match.
+ // |out_size| is the size in bytes of |out_values|. |out_values| must be
+ // allocated before calling this function.
+ static bool GetTrianglesUInt16Array(const draco::Mesh &m, int out_size,
+ void *out_values);
+ static bool GetTrianglesUInt32Array(const draco::Mesh &m, int out_size,
+ void *out_values);
+
+ // Returns float attribute values in |out_values| from |entry_index| index.
+ static bool GetAttributeFloat(
+ const draco::PointAttribute &pa,
+ draco::AttributeValueIndex::ValueType entry_index,
+ DracoFloat32Array *out_values);
+
+ // Returns float attribute values for all point ids of the point cloud.
+ // I.e., the |out_values| is going to contain m.num_points() entries.
+ static bool GetAttributeFloatForAllPoints(const draco::PointCloud &pc,
+ const draco::PointAttribute &pa,
+ DracoFloat32Array *out_values);
+
+ // Returns float attribute values for all point ids of the point cloud.
+ // I.e., the |out_values| is going to contain m.num_points() entries.
+ static bool GetAttributeFloatArrayForAllPoints(
+ const draco::PointCloud &pc, const draco::PointAttribute &pa,
+ int out_size, void *out_values);
+
+ // Returns int8_t attribute values for all point ids of the point cloud.
+ // I.e., the |out_values| is going to contain m.num_points() entries.
+ static bool GetAttributeInt8ForAllPoints(const draco::PointCloud &pc,
+ const draco::PointAttribute &pa,
+ DracoInt8Array *out_values);
+
+ // Returns uint8_t attribute values for all point ids of the point cloud.
+ // I.e., the |out_values| is going to contain m.num_points() entries.
+ static bool GetAttributeUInt8ForAllPoints(const draco::PointCloud &pc,
+ const draco::PointAttribute &pa,
+ DracoUInt8Array *out_values);
+
+ // Returns int16_t attribute values for all point ids of the point cloud.
+ // I.e., the |out_values| is going to contain m.num_points() entries.
+ static bool GetAttributeInt16ForAllPoints(const draco::PointCloud &pc,
+ const draco::PointAttribute &pa,
+ DracoInt16Array *out_values);
+
+ // Returns uint16_t attribute values for all point ids of the point cloud.
+ // I.e., the |out_values| is going to contain m.num_points() entries.
+ static bool GetAttributeUInt16ForAllPoints(const draco::PointCloud &pc,
+ const draco::PointAttribute &pa,
+ DracoUInt16Array *out_values);
+
+ // Returns int32_t attribute values for all point ids of the point cloud.
+ // I.e., the |out_values| is going to contain m.num_points() entries.
+ static bool GetAttributeInt32ForAllPoints(const draco::PointCloud &pc,
+ const draco::PointAttribute &pa,
+ DracoInt32Array *out_values);
+
+ // Deprecated: Use GetAttributeInt32ForAllPoints() instead.
+ static bool GetAttributeIntForAllPoints(const draco::PointCloud &pc,
+ const draco::PointAttribute &pa,
+ DracoInt32Array *out_values);
+
+ // Returns uint32_t attribute values for all point ids of the point cloud.
+ // I.e., the |out_values| is going to contain m.num_points() entries.
+ static bool GetAttributeUInt32ForAllPoints(const draco::PointCloud &pc,
+ const draco::PointAttribute &pa,
+ DracoUInt32Array *out_values);
+
+ // Returns |data_type| attribute values for all point ids of the point cloud.
+ // I.e., the |out_values| is going to contain m.num_points() entries.
+ // |out_size| is the size in bytes of |out_values|. |out_values| must be
+ // allocated before calling this function.
+ static bool GetAttributeDataArrayForAllPoints(const draco::PointCloud &pc,
+ const draco::PointAttribute &pa,
+ draco_DataType data_type,
+ int out_size, void *out_values);
+
+ // Tells the decoder to skip an attribute transform (e.g. dequantization) for
+ // an attribute of a given type.
+ void SkipAttributeTransform(draco_GeometryAttribute_Type att_type);
+
+ const draco::Metadata *GetMetadata(const draco::PointCloud &pc) const;
+ const draco::Metadata *GetAttributeMetadata(const draco::PointCloud &pc,
+ long att_id) const;
+
+ private:
+ template <class DracoArrayT, class ValueTypeT>
+ static bool GetAttributeDataForAllPoints(const draco::PointCloud &pc,
+ const draco::PointAttribute &pa,
+ draco::DataType draco_signed_type,
+ draco::DataType draco_unsigned_type,
+ DracoArrayT *out_values) {
+ const int components = pa.num_components();
+ const int num_points = pc.num_points();
+ const int num_entries = num_points * components;
+
+ if ((pa.data_type() == draco_signed_type ||
+ pa.data_type() == draco_unsigned_type) &&
+ pa.is_mapping_identity()) {
+ // Copy values directly to the output vector.
+ const ValueTypeT *ptr = reinterpret_cast<const ValueTypeT *>(
+ pa.GetAddress(draco::AttributeValueIndex(0)));
+ out_values->MoveData({ptr, ptr + num_entries});
+ return true;
+ }
+
+ // Copy values one by one.
+ std::vector<ValueTypeT> values(components);
+ int entry_id = 0;
+
+ out_values->Resize(num_entries);
+ for (draco::PointIndex i(0); i < num_points; ++i) {
+ const draco::AttributeValueIndex val_index = pa.mapped_index(i);
+ if (!pa.ConvertValue<ValueTypeT>(val_index, &values[0])) {
+ return false;
+ }
+ for (int j = 0; j < components; ++j) {
+ out_values->SetValue(entry_id++, values[j]);
+ }
+ }
+ return true;
+ }
+
+ template <class T>
+ static bool GetAttributeDataArrayForAllPoints(const draco::PointCloud &pc,
+ const draco::PointAttribute &pa,
+ const draco::DataType type,
+ int out_size,
+ void *out_values) {
+ const int components = pa.num_components();
+ const int num_points = pc.num_points();
+ const int data_size = num_points * components * sizeof(T);
+ if (data_size != out_size) {
+ return false;
+ }
+ const bool requested_type_matches = pa.data_type() == type;
+ if (requested_type_matches && pa.is_mapping_identity()) {
+ // Copy values directly to the output vector.
+ const auto ptr = pa.GetAddress(draco::AttributeValueIndex(0));
+ ::memcpy(out_values, ptr, data_size);
+ return true;
+ }
+
+ // Copy values one by one.
+ std::vector<T> values(components);
+ int entry_id = 0;
+
+ T *const typed_output = reinterpret_cast<T *>(out_values);
+ for (draco::PointIndex i(0); i < num_points; ++i) {
+ const draco::AttributeValueIndex val_index = pa.mapped_index(i);
+ if (requested_type_matches) {
+ pa.GetValue(val_index, values.data());
+ } else {
+ if (!pa.ConvertValue<T>(val_index, values.data())) {
+ return false;
+ }
+ }
+ for (int j = 0; j < components; ++j) {
+ typed_output[entry_id++] = values[j];
+ }
+ }
+ return true;
+ }
+
+ draco::Decoder decoder_;
+ draco::Status last_status_;
+};
+
+#endif // DRACO_JAVASCRIPT_EMSCRIPTEN_DECODER_WEBIDL_WRAPPER_H_
diff --git a/libs/assimp/contrib/draco/src/draco/javascript/emscripten/draco_animation_decoder_glue_wrapper.cc b/libs/assimp/contrib/draco/src/draco/javascript/emscripten/draco_animation_decoder_glue_wrapper.cc
new file mode 100644
index 0000000..83ed98f
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/javascript/emscripten/draco_animation_decoder_glue_wrapper.cc
@@ -0,0 +1,28 @@
+// Copyright 2017 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// This file is used by emscripten's WebIDL Binder.
+// http://kripken.github.io/emscripten-site/docs/porting/connecting_cpp_and_javascript/WebIDL-Binder.html
+#include "draco/attributes/attribute_octahedron_transform.h"
+#include "draco/attributes/attribute_quantization_transform.h"
+#include "draco/attributes/geometry_attribute.h"
+#include "draco/attributes/point_attribute.h"
+#include "draco/compression/decode.h"
+#include "draco/core/decoder_buffer.h"
+#include "draco/javascript/emscripten/animation_decoder_webidl_wrapper.h"
+#include "draco/mesh/mesh.h"
+#include "draco/point_cloud/point_cloud.h"
+
+// glue_animation_decoder.cpp is generated by Makefile.emcc build_glue target.
+#include "glue_animation_decoder.cpp"
diff --git a/libs/assimp/contrib/draco/src/draco/javascript/emscripten/draco_animation_encoder_glue_wrapper.cc b/libs/assimp/contrib/draco/src/draco/javascript/emscripten/draco_animation_encoder_glue_wrapper.cc
new file mode 100644
index 0000000..29e7ed3
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/javascript/emscripten/draco_animation_encoder_glue_wrapper.cc
@@ -0,0 +1,25 @@
+// Copyright 2017 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// This file is used by emscripten's WebIDL Binder.
+// http://kripken.github.io/emscripten-site/docs/porting/connecting_cpp_and_javascript/WebIDL-Binder.html
+#include "draco/attributes/geometry_attribute.h"
+#include "draco/attributes/point_attribute.h"
+#include "draco/compression/encode.h"
+#include "draco/javascript/emscripten/animation_encoder_webidl_wrapper.h"
+#include "draco/mesh/mesh.h"
+#include "draco/point_cloud/point_cloud.h"
+
+// glue_animation_encoder.cpp is generated by Makefile.emcc build_glue target.
+#include "glue_animation_encoder.cpp"
diff --git a/libs/assimp/contrib/draco/src/draco/javascript/emscripten/draco_animation_web_decoder.idl b/libs/assimp/contrib/draco/src/draco/javascript/emscripten/draco_animation_web_decoder.idl
new file mode 100644
index 0000000..c9fe76b
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/javascript/emscripten/draco_animation_web_decoder.idl
@@ -0,0 +1,52 @@
+// Interface exposed to emscripten's WebIDL Binder.
+// http://kripken.github.io/emscripten-site/docs/porting/connecting_cpp_and_javascript/WebIDL-Binder.html
+[Prefix="draco::"]
+interface DecoderBuffer {
+ void DecoderBuffer();
+ void Init([Const] byte[] data, unsigned long data_size);
+};
+
+enum draco_StatusCode {
+ "draco_Status::OK",
+ "draco_Status::DRACO_ERROR",
+ "draco_Status::IO_ERROR",
+ "draco_Status::INVALID_PARAMETER",
+ "draco_Status::UNSUPPORTED_VERSION",
+ "draco_Status::UNKNOWN_VERSION",
+};
+
+[Prefix="draco::"]
+interface Status {
+ draco_StatusCode code();
+ boolean ok();
+ [Const] DOMString error_msg();
+};
+
+// Draco version of typed arrays. The memory of these arrays is allocated on the
+// emscripten heap.
+interface DracoFloat32Array {
+ void DracoFloat32Array();
+ float GetValue(long index);
+ long size();
+};
+
+[Prefix="draco::"]
+interface KeyframeAnimation {
+ void KeyframeAnimation();
+ long num_frames();
+ long num_animations();
+};
+
+interface AnimationDecoder {
+ void AnimationDecoder();
+
+ [Const] Status DecodeBufferToKeyframeAnimation(DecoderBuffer in_buffer,
+ KeyframeAnimation animation);
+
+ boolean GetTimestamps([Ref, Const] KeyframeAnimation animation,
+ DracoFloat32Array timestamp);
+
+ boolean GetKeyframes([Ref, Const] KeyframeAnimation animation,
+ long keyframes_id,
+ DracoFloat32Array animation_data);
+};
diff --git a/libs/assimp/contrib/draco/src/draco/javascript/emscripten/draco_animation_web_encoder.idl b/libs/assimp/contrib/draco/src/draco/javascript/emscripten/draco_animation_web_encoder.idl
new file mode 100644
index 0000000..e74a4c9
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/javascript/emscripten/draco_animation_web_encoder.idl
@@ -0,0 +1,34 @@
+// Interface exposed to emscripten's WebIDL Binder.
+// http://kripken.github.io/emscripten-site/docs/porting/connecting_cpp_and_javascript/WebIDL-Binder.html
+// Draco version of typed arrays. The memory of these arrays is allocated on the
+// emscripten heap.
+interface DracoInt8Array {
+ void DracoInt8Array();
+ long GetValue(long index);
+ long size();
+};
+
+[Prefix="draco::"]
+interface KeyframeAnimation {
+ void KeyframeAnimation();
+ long num_frames();
+};
+
+interface AnimationBuilder {
+ void AnimationBuilder();
+ boolean SetTimestamps(KeyframeAnimation animation, long num_frames,
+ [Const] float[] timestamps);
+
+ long AddKeyframes(KeyframeAnimation animation, long num_frames,
+ long num_components, [Const] float[] animation_data);
+};
+
+interface AnimationEncoder {
+ void AnimationEncoder();
+
+ void SetTimestampsQuantization(long quantization_bits);
+ void SetKeyframesQuantization(long quantization_bits);
+
+ long EncodeAnimationToDracoBuffer(KeyframeAnimation animation,
+ DracoInt8Array encoded_data);
+};
diff --git a/libs/assimp/contrib/draco/src/draco/javascript/emscripten/draco_decoder_glue_wrapper.cc b/libs/assimp/contrib/draco/src/draco/javascript/emscripten/draco_decoder_glue_wrapper.cc
new file mode 100644
index 0000000..249d86a
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/javascript/emscripten/draco_decoder_glue_wrapper.cc
@@ -0,0 +1,28 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// This file is used by emscripten's WebIDL Binder.
+// http://kripken.github.io/emscripten-site/docs/porting/connecting_cpp_and_javascript/WebIDL-Binder.html
+#include "draco/attributes/attribute_octahedron_transform.h"
+#include "draco/attributes/attribute_quantization_transform.h"
+#include "draco/attributes/geometry_attribute.h"
+#include "draco/attributes/point_attribute.h"
+#include "draco/compression/decode.h"
+#include "draco/core/decoder_buffer.h"
+#include "draco/javascript/emscripten/decoder_webidl_wrapper.h"
+#include "draco/mesh/mesh.h"
+#include "draco/point_cloud/point_cloud.h"
+
+// glue.cpp is generated by Makefile.emcc build_glue target.
+#include "glue_decoder.cpp"
diff --git a/libs/assimp/contrib/draco/src/draco/javascript/emscripten/draco_encoder_glue_wrapper.cc b/libs/assimp/contrib/draco/src/draco/javascript/emscripten/draco_encoder_glue_wrapper.cc
new file mode 100644
index 0000000..05a0af2
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/javascript/emscripten/draco_encoder_glue_wrapper.cc
@@ -0,0 +1,25 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// This file is used by emscripten's WebIDL Binder.
+// http://kripken.github.io/emscripten-site/docs/porting/connecting_cpp_and_javascript/WebIDL-Binder.html
+#include "draco/attributes/geometry_attribute.h"
+#include "draco/attributes/point_attribute.h"
+#include "draco/compression/encode.h"
+#include "draco/javascript/emscripten/encoder_webidl_wrapper.h"
+#include "draco/mesh/mesh.h"
+#include "draco/point_cloud/point_cloud.h"
+
+// glue.cpp is generated by Makefile.emcc build_glue target.
+#include "glue_encoder.cpp"
diff --git a/libs/assimp/contrib/draco/src/draco/javascript/emscripten/draco_web_decoder.idl b/libs/assimp/contrib/draco/src/draco/javascript/emscripten/draco_web_decoder.idl
new file mode 100644
index 0000000..3666941
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/javascript/emscripten/draco_web_decoder.idl
@@ -0,0 +1,283 @@
+// Interface exposed to emscripten's WebIDL Binder.
+// http://kripken.github.io/emscripten-site/docs/porting/connecting_cpp_and_javascript/WebIDL-Binder.html
+
+// Deprecated: DecoderBuffer is no longer supported and will be removed in
+// future releases. Please refer to Decoder declaration below for
+// new decoding functions that do not use DecoderBuffer.
+[Prefix="draco::"]
+interface DecoderBuffer {
+ void DecoderBuffer();
+ void Init([Const] byte[] data, unsigned long data_size);
+};
+
+// TODO(fgalligan): Can we remove this?
+enum draco_AttributeTransformType {
+ "draco::ATTRIBUTE_INVALID_TRANSFORM",
+ "draco::ATTRIBUTE_NO_TRANSFORM",
+ "draco::ATTRIBUTE_QUANTIZATION_TRANSFORM",
+ "draco::ATTRIBUTE_OCTAHEDRON_TRANSFORM"
+};
+
+[Prefix="draco::"]
+interface AttributeTransformData {
+ void AttributeTransformData();
+ long transform_type();
+};
+
+enum draco_GeometryAttribute_Type {
+ "draco_GeometryAttribute::INVALID",
+ "draco_GeometryAttribute::POSITION",
+ "draco_GeometryAttribute::NORMAL",
+ "draco_GeometryAttribute::COLOR",
+ "draco_GeometryAttribute::TEX_COORD",
+ "draco_GeometryAttribute::GENERIC"
+};
+
+[Prefix="draco::"]
+interface GeometryAttribute {
+ void GeometryAttribute();
+};
+
+enum draco_EncodedGeometryType {
+ "draco::INVALID_GEOMETRY_TYPE",
+ "draco::POINT_CLOUD",
+ "draco::TRIANGULAR_MESH"
+};
+
+enum draco_DataType {
+ "draco::DT_INVALID",
+ "draco::DT_INT8",
+ "draco::DT_UINT8",
+ "draco::DT_INT16",
+ "draco::DT_UINT16",
+ "draco::DT_INT32",
+ "draco::DT_UINT32",
+ "draco::DT_INT64",
+ "draco::DT_UINT64",
+ "draco::DT_FLOAT32",
+ "draco::DT_FLOAT64",
+ "draco::DT_BOOL",
+ "draco::DT_TYPES_COUNT"
+};
+
+[Prefix="draco::"]
+interface PointAttribute {
+ void PointAttribute();
+ long size();
+ [Const] AttributeTransformData GetAttributeTransformData();
+
+ // From GeometryAttribute
+ long attribute_type();
+ long data_type();
+ byte num_components();
+ boolean normalized();
+ long byte_stride();
+ long byte_offset();
+ long unique_id();
+};
+
+[Prefix="draco::"]
+interface AttributeQuantizationTransform {
+ void AttributeQuantizationTransform();
+ boolean InitFromAttribute([Ref, Const] PointAttribute att);
+ long quantization_bits();
+ float min_value(long axis);
+ float range();
+};
+
+[Prefix="draco::"]
+interface AttributeOctahedronTransform {
+ void AttributeOctahedronTransform();
+ boolean InitFromAttribute([Ref, Const] PointAttribute att);
+ long quantization_bits();
+};
+
+
+[Prefix="draco::"]
+interface PointCloud {
+ void PointCloud();
+
+ long num_attributes();
+ long num_points();
+};
+
+[Prefix="draco::"]
+interface Mesh : PointCloud {
+ void Mesh();
+ long num_faces();
+
+ // From PointCloud
+ long num_attributes();
+ long num_points();
+};
+
+[Prefix="draco::"]
+interface Metadata {
+ void Metadata();
+};
+
+enum draco_StatusCode {
+ "draco_Status::OK",
+ "draco_Status::DRACO_ERROR",
+ "draco_Status::IO_ERROR",
+ "draco_Status::INVALID_PARAMETER",
+ "draco_Status::UNSUPPORTED_VERSION",
+ "draco_Status::UNKNOWN_VERSION",
+};
+
+[Prefix="draco::"]
+interface Status {
+ draco_StatusCode code();
+ boolean ok();
+ [Const] DOMString error_msg();
+};
+
+// Draco version of typed arrays. The memory of these arrays is allocated on the
+// emscripten heap.
+interface DracoFloat32Array {
+ void DracoFloat32Array();
+ float GetValue(long index);
+ long size();
+};
+
+interface DracoInt8Array {
+ void DracoInt8Array();
+ byte GetValue(long index);
+ long size();
+};
+
+interface DracoUInt8Array {
+ void DracoUInt8Array();
+ octet GetValue(long index);
+ long size();
+};
+
+interface DracoInt16Array {
+ void DracoInt16Array();
+ short GetValue(long index);
+ long size();
+};
+
+interface DracoUInt16Array {
+ void DracoUInt16Array();
+ unsigned short GetValue(long index);
+ long size();
+};
+
+interface DracoInt32Array {
+ void DracoInt32Array();
+ long GetValue(long index);
+ long size();
+};
+
+interface DracoUInt32Array {
+ void DracoUInt32Array();
+ unsigned long GetValue(long index);
+ long size();
+};
+
+interface MetadataQuerier {
+ void MetadataQuerier();
+
+ boolean HasEntry([Ref, Const] Metadata metadata,
+ [Const] DOMString entry_name);
+ long GetIntEntry([Ref, Const] Metadata metadata,
+ [Const] DOMString entry_name);
+ void GetIntEntryArray([Ref, Const] Metadata metadata,
+ [Const] DOMString entry_name,
+ DracoInt32Array out_values);
+ double GetDoubleEntry([Ref, Const] Metadata metadata,
+ [Const] DOMString entry_name);
+ [Const] DOMString GetStringEntry([Ref, Const] Metadata metadata,
+ [Const] DOMString entry_name);
+
+ long NumEntries([Ref, Const] Metadata metadata);
+ [Const] DOMString GetEntryName([Ref, Const] Metadata metadata, long entry_id);
+};
+
+interface Decoder {
+ void Decoder();
+
+ [Const] Status DecodeArrayToPointCloud([Const] byte[] data,
+ unsigned long data_size,
+ PointCloud out_point_cloud);
+
+ [Const] Status DecodeArrayToMesh([Const] byte[] data,
+ unsigned long data_size,
+ Mesh out_mesh);
+
+ long GetAttributeId([Ref, Const] PointCloud pc,
+ draco_GeometryAttribute_Type type);
+ long GetAttributeIdByName([Ref, Const] PointCloud pc, [Const] DOMString name);
+ long GetAttributeIdByMetadataEntry([Ref, Const] PointCloud pc,
+ [Const] DOMString name,
+ [Const] DOMString value);
+
+ [Const] PointAttribute GetAttribute([Ref, Const] PointCloud pc, long att_id);
+ [Const] PointAttribute GetAttributeByUniqueId([Ref, Const] PointCloud pc,
+ long unique_id);
+
+ [Const] Metadata GetMetadata([Ref, Const] PointCloud pc);
+ [Const] Metadata GetAttributeMetadata([Ref, Const] PointCloud pc,
+ long att_id);
+
+ boolean GetFaceFromMesh([Ref, Const] Mesh m, long face_id,
+ DracoInt32Array out_values);
+ long GetTriangleStripsFromMesh([Ref, Const] Mesh m,
+ DracoInt32Array strip_values);
+
+ boolean GetTrianglesUInt16Array([Ref, Const] Mesh m,
+ long out_size, VoidPtr out_values);
+ boolean GetTrianglesUInt32Array([Ref, Const] Mesh m,
+ long out_size, VoidPtr out_values);
+
+ boolean GetAttributeFloat([Ref, Const] PointAttribute pa,
+ long att_index,
+ DracoFloat32Array out_values);
+
+ boolean GetAttributeFloatForAllPoints([Ref, Const] PointCloud pc,
+ [Ref, Const] PointAttribute pa,
+ DracoFloat32Array out_values);
+
+ // Deprecated, use GetAttributeInt32ForAllPoints instead.
+ boolean GetAttributeIntForAllPoints([Ref, Const] PointCloud pc,
+ [Ref, Const] PointAttribute pa,
+ DracoInt32Array out_values);
+
+ boolean GetAttributeInt8ForAllPoints([Ref, Const] PointCloud pc,
+ [Ref, Const] PointAttribute pa,
+ DracoInt8Array out_values);
+ boolean GetAttributeUInt8ForAllPoints([Ref, Const] PointCloud pc,
+ [Ref, Const] PointAttribute pa,
+ DracoUInt8Array out_values);
+ boolean GetAttributeInt16ForAllPoints([Ref, Const] PointCloud pc,
+ [Ref, Const] PointAttribute pa,
+ DracoInt16Array out_values);
+ boolean GetAttributeUInt16ForAllPoints([Ref, Const] PointCloud pc,
+ [Ref, Const] PointAttribute pa,
+ DracoUInt16Array out_values);
+ boolean GetAttributeInt32ForAllPoints([Ref, Const] PointCloud pc,
+ [Ref, Const] PointAttribute pa,
+ DracoInt32Array out_values);
+ boolean GetAttributeUInt32ForAllPoints([Ref, Const] PointCloud pc,
+ [Ref, Const] PointAttribute pa,
+ DracoUInt32Array out_values);
+
+ boolean GetAttributeDataArrayForAllPoints([Ref, Const] PointCloud pc,
+ [Ref, Const] PointAttribute pa,
+ draco_DataType data_type,
+ long out_size, VoidPtr out_values);
+
+ void SkipAttributeTransform(draco_GeometryAttribute_Type att_type);
+
+ // Deprecated: Use decoder.GetEncodedGeometryType(array) instead, where
+ // |array| is an Int8Array containing the encoded data.
+ draco_EncodedGeometryType GetEncodedGeometryType_Deprecated(
+ DecoderBuffer in_buffer);
+
+ // Deprecated: UseDecodeArrayToPointCloud instead.
+ [Const] Status DecodeBufferToPointCloud(DecoderBuffer in_buffer,
+ PointCloud out_point_cloud);
+ // Deprecated: UseDecodeArrayToMesh instead.
+ [Const] Status DecodeBufferToMesh(DecoderBuffer in_buffer, Mesh out_mesh);
+};
diff --git a/libs/assimp/contrib/draco/src/draco/javascript/emscripten/draco_web_encoder.idl b/libs/assimp/contrib/draco/src/draco/javascript/emscripten/draco_web_encoder.idl
new file mode 100644
index 0000000..d3261b1
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/javascript/emscripten/draco_web_encoder.idl
@@ -0,0 +1,208 @@
+// Interface exposed to emscripten's WebIDL Binder.
+// http://kripken.github.io/emscripten-site/docs/porting/connecting_cpp_and_javascript/WebIDL-Binder.html
+enum draco_GeometryAttribute_Type {
+ "draco_GeometryAttribute::INVALID",
+ "draco_GeometryAttribute::POSITION",
+ "draco_GeometryAttribute::NORMAL",
+ "draco_GeometryAttribute::COLOR",
+ "draco_GeometryAttribute::TEX_COORD",
+ "draco_GeometryAttribute::GENERIC"
+};
+
+[Prefix="draco::"]
+interface GeometryAttribute {
+ void GeometryAttribute();
+};
+
+enum draco_EncodedGeometryType {
+ "draco::INVALID_GEOMETRY_TYPE",
+ "draco::POINT_CLOUD",
+ "draco::TRIANGULAR_MESH"
+};
+
+enum draco_MeshEncoderMethod {
+ "draco::MESH_SEQUENTIAL_ENCODING",
+ "draco::MESH_EDGEBREAKER_ENCODING"
+};
+
+[Prefix="draco::"]
+interface PointAttribute {
+ void PointAttribute();
+ long size();
+
+ // From GeometryAttribute
+ long attribute_type();
+ long data_type();
+ byte num_components();
+ boolean normalized();
+ long byte_stride();
+ long byte_offset();
+ long unique_id();
+};
+
+[Prefix="draco::"]
+interface PointCloud {
+ void PointCloud();
+
+ long num_attributes();
+ long num_points();
+};
+
+[Prefix="draco::"]
+interface Mesh : PointCloud {
+ void Mesh();
+ long num_faces();
+
+ // From PointCloud
+ long num_attributes();
+ long num_points();
+ void set_num_points(long num_points);
+};
+
+[Prefix="draco::"]
+interface Metadata {
+ void Metadata();
+};
+
+interface DracoInt8Array {
+ void DracoInt8Array();
+ long GetValue(long index);
+ long size();
+};
+
+interface MetadataBuilder {
+ void MetadataBuilder();
+
+ boolean AddStringEntry(Metadata metadata,
+ [Const] DOMString entry_name,
+ [Const] DOMString entry_value);
+ boolean AddIntEntry(Metadata metadata,
+ [Const] DOMString entry_name,
+ long entry_value);
+ boolean AddIntEntryArray(Metadata metadata,
+ [Const] DOMString entry_name,
+ [Const] long[] att_values,
+ long num_values);
+ boolean AddDoubleEntry(Metadata metadata,
+ [Const] DOMString entry_name,
+ double entry_value);
+};
+
+interface PointCloudBuilder {
+ void PointCloudBuilder();
+ long AddFloatAttribute(PointCloud pc, draco_GeometryAttribute_Type type,
+ long num_vertices, long num_components,
+ [Const] float[] att_values);
+ long AddInt8Attribute(PointCloud pc, draco_GeometryAttribute_Type type,
+ long num_vertices, long num_components,
+ [Const] byte[] att_values);
+ long AddUInt8Attribute(PointCloud pc, draco_GeometryAttribute_Type type,
+ long num_vertices, long num_components,
+ [Const] octet[] att_values);
+ long AddInt16Attribute(PointCloud pc, draco_GeometryAttribute_Type type,
+ long num_vertices, long num_components,
+ [Const] short[] att_values);
+ long AddUInt16Attribute(PointCloud pc, draco_GeometryAttribute_Type type,
+ long num_vertices, long num_components,
+ [Const] unsigned short[] att_values);
+ long AddInt32Attribute(PointCloud pc, draco_GeometryAttribute_Type type,
+ long num_vertices, long num_components,
+ [Const] long[] att_values);
+ long AddUInt32Attribute(PointCloud pc, draco_GeometryAttribute_Type type,
+ long num_vertices, long num_components,
+ [Const] unsigned long[] att_values);
+
+ boolean AddMetadata(PointCloud pc, [Const] Metadata metadata);
+ boolean SetMetadataForAttribute(PointCloud pc, long attribute_id,
+ [Const] Metadata metadata);
+};
+
+interface MeshBuilder : PointCloudBuilder {
+ void MeshBuilder();
+
+ boolean AddFacesToMesh(Mesh mesh, long num_faces, [Const] long[] faces);
+
+ // Deprecated.
+ long AddFloatAttributeToMesh(Mesh mesh, draco_GeometryAttribute_Type type,
+ long num_vertices, long num_components,
+ [Const] float[] att_values);
+ // Deprecated.
+ long AddInt32AttributeToMesh(Mesh mesh, draco_GeometryAttribute_Type type,
+ long num_vertices, long num_components,
+ [Const] long[] att_values);
+ // Deprecated.
+ boolean AddMetadataToMesh(Mesh mesh, [Const] Metadata metadata);
+
+ // From PointCloudBuilder
+ long AddFloatAttribute(PointCloud pc, draco_GeometryAttribute_Type type,
+ long num_vertices, long num_components,
+ [Const] float[] att_values);
+ long AddInt8Attribute(PointCloud pc, draco_GeometryAttribute_Type type,
+ long num_vertices, long num_components,
+ [Const] byte[] att_values);
+ long AddUInt8Attribute(PointCloud pc, draco_GeometryAttribute_Type type,
+ long num_vertices, long num_components,
+ [Const] octet[] att_values);
+ long AddInt16Attribute(PointCloud pc, draco_GeometryAttribute_Type type,
+ long num_vertices, long num_components,
+ [Const] short[] att_values);
+ long AddUInt16Attribute(PointCloud pc, draco_GeometryAttribute_Type type,
+ long num_vertices, long num_components,
+ [Const] unsigned short[] att_values);
+ long AddInt32Attribute(PointCloud pc, draco_GeometryAttribute_Type type,
+ long num_vertices, long num_components,
+ [Const] long[] att_values);
+ long AddUInt32Attribute(PointCloud pc, draco_GeometryAttribute_Type type,
+ long num_vertices, long num_components,
+ [Const] unsigned long[] att_values);
+
+ boolean AddMetadata(PointCloud pc, [Const] Metadata metadata);
+ boolean SetMetadataForAttribute(PointCloud pc, long attribute_id,
+ [Const] Metadata metadata);
+};
+
+interface Encoder {
+ void Encoder();
+ void SetEncodingMethod(long method);
+ void SetAttributeQuantization(draco_GeometryAttribute_Type type,
+ long quantization_bits);
+ void SetAttributeExplicitQuantization(draco_GeometryAttribute_Type type,
+ long quantization_bits,
+ long num_components,
+ [Const] float[] origin,
+ float range);
+ void SetSpeedOptions(long encoding_speed, long decoding_speed);
+ void SetTrackEncodedProperties(boolean flag);
+
+ long EncodeMeshToDracoBuffer(Mesh mesh,
+ DracoInt8Array encoded_data);
+ long EncodePointCloudToDracoBuffer(PointCloud pc, boolean deduplicate_values,
+ DracoInt8Array encoded_data);
+
+ // Returns the number of encoded points or faces from the last Encode
+ // operation. Returns 0 if SetTrackEncodedProperties was not set to true.
+ long GetNumberOfEncodedPoints();
+ long GetNumberOfEncodedFaces();
+};
+
+interface ExpertEncoder {
+ void ExpertEncoder(PointCloud pc);
+ void SetEncodingMethod(long method);
+ void SetAttributeQuantization(long att_id,
+ long quantization_bits);
+ void SetAttributeExplicitQuantization(long att_id,
+ long quantization_bits,
+ long num_components,
+ [Const] float[] origin,
+ float range);
+ void SetSpeedOptions(long encoding_speed, long decoding_speed);
+ void SetTrackEncodedProperties(boolean flag);
+
+ long EncodeToDracoBuffer(boolean deduplicate_values,
+ DracoInt8Array encoded_data);
+
+ // Returns the number of encoded points or faces from the last Encode
+ // operation. Returns 0 if SetTrackEncodedProperties was not set to true.
+ long GetNumberOfEncodedPoints();
+ long GetNumberOfEncodedFaces();
+}; \ No newline at end of file
diff --git a/libs/assimp/contrib/draco/src/draco/javascript/emscripten/encoder_webidl_wrapper.cc b/libs/assimp/contrib/draco/src/draco/javascript/emscripten/encoder_webidl_wrapper.cc
new file mode 100644
index 0000000..0a1ff7c
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/javascript/emscripten/encoder_webidl_wrapper.cc
@@ -0,0 +1,359 @@
+// Copyright 2017 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/javascript/emscripten/encoder_webidl_wrapper.h"
+
+#include "draco/compression/encode.h"
+#include "draco/mesh/mesh.h"
+
+DracoInt8Array::DracoInt8Array() {}
+
+int8_t DracoInt8Array::GetValue(int index) const { return values_[index]; }
+
+bool DracoInt8Array::SetValues(const char *values, int count) {
+ values_.assign(values, values + count);
+ return true;
+}
+
+using draco::Mesh;
+using draco::Metadata;
+using draco::PointCloud;
+
+MetadataBuilder::MetadataBuilder() {}
+
+bool MetadataBuilder::AddStringEntry(Metadata *metadata, const char *entry_name,
+ const char *entry_value) {
+ if (!metadata) {
+ return false;
+ }
+ const std::string name{entry_name};
+ const std::string value{entry_value};
+ metadata->AddEntryString(entry_name, entry_value);
+ return true;
+}
+
+bool MetadataBuilder::AddIntEntry(Metadata *metadata, const char *entry_name,
+ long entry_value) {
+ if (!metadata) {
+ return false;
+ }
+ const std::string name{entry_name};
+ metadata->AddEntryInt(name, entry_value);
+ return true;
+}
+
+bool MetadataBuilder::AddIntEntryArray(draco::Metadata *metadata,
+ const char *entry_name,
+ const int32_t *entry_values,
+ int32_t num_values) {
+ if (!metadata) {
+ return false;
+ }
+ const std::string name{entry_name};
+ metadata->AddEntryIntArray(name, {entry_values, entry_values + num_values});
+ return true;
+}
+
+bool MetadataBuilder::AddDoubleEntry(Metadata *metadata, const char *entry_name,
+ double entry_value) {
+ if (!metadata) {
+ return false;
+ }
+ const std::string name{entry_name};
+ metadata->AddEntryDouble(name, entry_value);
+ return true;
+}
+
+int PointCloudBuilder::AddFloatAttribute(PointCloud *pc,
+ draco_GeometryAttribute_Type type,
+ long num_vertices, long num_components,
+ const float *att_values) {
+ return AddAttribute(pc, type, num_vertices, num_components, att_values,
+ draco::DT_FLOAT32);
+}
+
+int PointCloudBuilder::AddInt8Attribute(PointCloud *pc,
+ draco_GeometryAttribute_Type type,
+ long num_vertices, long num_components,
+ const char *att_values) {
+ return AddAttribute(pc, type, num_vertices, num_components, att_values,
+ draco::DT_INT8);
+}
+
+int PointCloudBuilder::AddUInt8Attribute(PointCloud *pc,
+ draco_GeometryAttribute_Type type,
+ long num_vertices, long num_components,
+ const uint8_t *att_values) {
+ return AddAttribute(pc, type, num_vertices, num_components, att_values,
+ draco::DT_UINT8);
+}
+
+int PointCloudBuilder::AddInt16Attribute(PointCloud *pc,
+ draco_GeometryAttribute_Type type,
+ long num_vertices, long num_components,
+ const int16_t *att_values) {
+ return AddAttribute(pc, type, num_vertices, num_components, att_values,
+ draco::DT_INT16);
+}
+
+int PointCloudBuilder::AddUInt16Attribute(PointCloud *pc,
+ draco_GeometryAttribute_Type type,
+ long num_vertices,
+ long num_components,
+ const uint16_t *att_values) {
+ return AddAttribute(pc, type, num_vertices, num_components, att_values,
+ draco::DT_UINT16);
+}
+
+int PointCloudBuilder::AddInt32Attribute(PointCloud *pc,
+ draco_GeometryAttribute_Type type,
+ long num_vertices, long num_components,
+ const int32_t *att_values) {
+ return AddAttribute(pc, type, num_vertices, num_components, att_values,
+ draco::DT_INT32);
+}
+
+int PointCloudBuilder::AddUInt32Attribute(PointCloud *pc,
+ draco_GeometryAttribute_Type type,
+ long num_vertices,
+ long num_components,
+ const uint32_t *att_values) {
+ return AddAttribute(pc, type, num_vertices, num_components, att_values,
+ draco::DT_UINT32);
+}
+
+bool PointCloudBuilder::AddMetadata(PointCloud *pc, const Metadata *metadata) {
+ if (!pc) {
+ return false;
+ }
+ // Not allow write over metadata.
+ if (pc->metadata()) {
+ return false;
+ }
+ std::unique_ptr<draco::GeometryMetadata> new_metadata =
+ std::unique_ptr<draco::GeometryMetadata>(
+ new draco::GeometryMetadata(*metadata));
+ pc->AddMetadata(std::move(new_metadata));
+ return true;
+}
+
+bool PointCloudBuilder::SetMetadataForAttribute(PointCloud *pc,
+ long attribute_id,
+ const Metadata *metadata) {
+ if (!pc) {
+ return false;
+ }
+ // If empty metadata, just ignore.
+ if (!metadata) {
+ return false;
+ }
+ if (attribute_id < 0 || attribute_id >= pc->num_attributes()) {
+ return false;
+ }
+
+ if (!pc->metadata()) {
+ std::unique_ptr<draco::GeometryMetadata> geometry_metadata =
+ std::unique_ptr<draco::GeometryMetadata>(new draco::GeometryMetadata());
+ pc->AddMetadata(std::move(geometry_metadata));
+ }
+
+ // Get unique attribute id for the attribute.
+ const long unique_id = pc->attribute(attribute_id)->unique_id();
+
+ std::unique_ptr<draco::AttributeMetadata> att_metadata =
+ std::unique_ptr<draco::AttributeMetadata>(
+ new draco::AttributeMetadata(*metadata));
+ att_metadata->set_att_unique_id(unique_id);
+ pc->metadata()->AddAttributeMetadata(std::move(att_metadata));
+ return true;
+}
+
+MeshBuilder::MeshBuilder() {}
+
+bool MeshBuilder::AddFacesToMesh(Mesh *mesh, long num_faces, const int *faces) {
+ if (!mesh) {
+ return false;
+ }
+ mesh->SetNumFaces(num_faces);
+ for (draco::FaceIndex i(0); i < num_faces; ++i) {
+ draco::Mesh::Face face;
+ face[0] = faces[i.value() * 3];
+ face[1] = faces[i.value() * 3 + 1];
+ face[2] = faces[i.value() * 3 + 2];
+ mesh->SetFace(i, face);
+ }
+ return true;
+}
+
+int MeshBuilder::AddFloatAttributeToMesh(Mesh *mesh,
+ draco_GeometryAttribute_Type type,
+ long num_vertices, long num_components,
+ const float *att_values) {
+ return AddFloatAttribute(mesh, type, num_vertices, num_components,
+ att_values);
+}
+
+int MeshBuilder::AddInt32AttributeToMesh(draco::Mesh *mesh,
+ draco_GeometryAttribute_Type type,
+ long num_vertices, long num_components,
+ const int32_t *att_values) {
+ return AddInt32Attribute(mesh, type, num_vertices, num_components,
+ att_values);
+}
+
+bool MeshBuilder::AddMetadataToMesh(Mesh *mesh, const Metadata *metadata) {
+ return AddMetadata(mesh, metadata);
+}
+
+Encoder::Encoder() {}
+
+void Encoder::SetEncodingMethod(long method) {
+ encoder_.SetEncodingMethod(method);
+}
+
+void Encoder::SetAttributeQuantization(draco_GeometryAttribute_Type type,
+ long quantization_bits) {
+ encoder_.SetAttributeQuantization(type, quantization_bits);
+}
+
+void Encoder::SetAttributeExplicitQuantization(
+ draco_GeometryAttribute_Type type, long quantization_bits,
+ long num_components, const float *origin, float range) {
+ encoder_.SetAttributeExplicitQuantization(type, quantization_bits,
+ num_components, origin, range);
+}
+
+void Encoder::SetSpeedOptions(long encoding_speed, long decoding_speed) {
+ encoder_.SetSpeedOptions(encoding_speed, decoding_speed);
+}
+
+void Encoder::SetTrackEncodedProperties(bool flag) {
+ encoder_.SetTrackEncodedProperties(flag);
+}
+
+int Encoder::EncodeMeshToDracoBuffer(Mesh *mesh, DracoInt8Array *draco_buffer) {
+ if (!mesh) {
+ return 0;
+ }
+ draco::EncoderBuffer buffer;
+ if (mesh->GetNamedAttributeId(draco::GeometryAttribute::POSITION) == -1) {
+ return 0;
+ }
+ if (!mesh->DeduplicateAttributeValues()) {
+ return 0;
+ }
+ mesh->DeduplicatePointIds();
+ if (!encoder_.EncodeMeshToBuffer(*mesh, &buffer).ok()) {
+ return 0;
+ }
+ draco_buffer->SetValues(buffer.data(), buffer.size());
+ return buffer.size();
+}
+
+int Encoder::EncodePointCloudToDracoBuffer(draco::PointCloud *pc,
+ bool deduplicate_values,
+ DracoInt8Array *draco_buffer) {
+ // TODO(ostava): Refactor common functionality with EncodeMeshToDracoBuffer().
+ if (!pc) {
+ return 0;
+ }
+ draco::EncoderBuffer buffer;
+ if (pc->GetNamedAttributeId(draco::GeometryAttribute::POSITION) == -1) {
+ return 0;
+ }
+ if (deduplicate_values) {
+ if (!pc->DeduplicateAttributeValues()) {
+ return 0;
+ }
+ pc->DeduplicatePointIds();
+ }
+ if (!encoder_.EncodePointCloudToBuffer(*pc, &buffer).ok()) {
+ return 0;
+ }
+ draco_buffer->SetValues(buffer.data(), buffer.size());
+ return buffer.size();
+}
+
+int Encoder::GetNumberOfEncodedPoints() {
+ return encoder_.num_encoded_points();
+}
+
+int Encoder::GetNumberOfEncodedFaces() { return encoder_.num_encoded_faces(); }
+
+ExpertEncoder::ExpertEncoder(PointCloud *pc) : pc_(pc) {
+ // Web-IDL interface does not support constructor overloading so instead we
+ // use RTTI to determine whether the input is a mesh or a point cloud.
+ Mesh *mesh = dynamic_cast<Mesh *>(pc);
+ if (mesh) {
+ encoder_ =
+ std::unique_ptr<draco::ExpertEncoder>(new draco::ExpertEncoder(*mesh));
+ } else {
+ encoder_ =
+ std::unique_ptr<draco::ExpertEncoder>(new draco::ExpertEncoder(*pc));
+ }
+}
+
+void ExpertEncoder::SetEncodingMethod(long method) {
+ encoder_->SetEncodingMethod(method);
+}
+
+void ExpertEncoder::SetAttributeQuantization(long att_id,
+ long quantization_bits) {
+ encoder_->SetAttributeQuantization(att_id, quantization_bits);
+}
+
+void ExpertEncoder::SetAttributeExplicitQuantization(long att_id,
+ long quantization_bits,
+ long num_components,
+ const float *origin,
+ float range) {
+ encoder_->SetAttributeExplicitQuantization(att_id, quantization_bits,
+ num_components, origin, range);
+}
+
+void ExpertEncoder::SetSpeedOptions(long encoding_speed, long decoding_speed) {
+ encoder_->SetSpeedOptions(encoding_speed, decoding_speed);
+}
+
+void ExpertEncoder::SetTrackEncodedProperties(bool flag) {
+ encoder_->SetTrackEncodedProperties(flag);
+}
+
+int ExpertEncoder::EncodeToDracoBuffer(bool deduplicate_values,
+ DracoInt8Array *draco_buffer) {
+ if (!pc_) {
+ return 0;
+ }
+ if (deduplicate_values) {
+ if (!pc_->DeduplicateAttributeValues()) {
+ return 0;
+ }
+ pc_->DeduplicatePointIds();
+ }
+
+ draco::EncoderBuffer buffer;
+ if (!encoder_->EncodeToBuffer(&buffer).ok()) {
+ return 0;
+ }
+ draco_buffer->SetValues(buffer.data(), buffer.size());
+ return buffer.size();
+}
+
+int ExpertEncoder::GetNumberOfEncodedPoints() {
+ return encoder_->num_encoded_points();
+}
+
+int ExpertEncoder::GetNumberOfEncodedFaces() {
+ return encoder_->num_encoded_faces();
+}
diff --git a/libs/assimp/contrib/draco/src/draco/javascript/emscripten/encoder_webidl_wrapper.h b/libs/assimp/contrib/draco/src/draco/javascript/emscripten/encoder_webidl_wrapper.h
new file mode 100644
index 0000000..b1cce79
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/javascript/emscripten/encoder_webidl_wrapper.h
@@ -0,0 +1,186 @@
+// Copyright 2017 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_JAVASCRIPT_EMSCRIPTEN_ENCODER_WEBIDL_WRAPPER_H_
+#define DRACO_JAVASCRIPT_EMSCRIPTEN_ENCODER_WEBIDL_WRAPPER_H_
+
+#include <vector>
+
+#include "draco/attributes/point_attribute.h"
+#include "draco/compression/config/compression_shared.h"
+#include "draco/compression/config/encoder_options.h"
+#include "draco/compression/encode.h"
+#include "draco/compression/expert_encode.h"
+#include "draco/mesh/mesh.h"
+
+typedef draco::GeometryAttribute draco_GeometryAttribute;
+typedef draco::GeometryAttribute::Type draco_GeometryAttribute_Type;
+typedef draco::EncodedGeometryType draco_EncodedGeometryType;
+typedef draco::MeshEncoderMethod draco_MeshEncoderMethod;
+
+class DracoInt8Array {
+ public:
+ DracoInt8Array();
+ int8_t GetValue(int index) const;
+ bool SetValues(const char *values, int count);
+
+ size_t size() { return values_.size(); }
+
+ private:
+ std::vector<int8_t> values_;
+};
+
+class MetadataBuilder {
+ public:
+ MetadataBuilder();
+ bool AddStringEntry(draco::Metadata *metadata, const char *entry_name,
+ const char *entry_value);
+ bool AddIntEntry(draco::Metadata *metadata, const char *entry_name,
+ long entry_value);
+ bool AddIntEntryArray(draco::Metadata *metadata, const char *entry_name,
+ const int32_t *entry_values, int32_t num_values);
+ bool AddDoubleEntry(draco::Metadata *metadata, const char *entry_name,
+ double entry_value);
+};
+
+class PointCloudBuilder {
+ public:
+ PointCloudBuilder() {}
+ int AddFloatAttribute(draco::PointCloud *pc,
+ draco_GeometryAttribute_Type type, long num_vertices,
+ long num_components, const float *att_values);
+ int AddInt8Attribute(draco::PointCloud *pc, draco_GeometryAttribute_Type type,
+ long num_vertices, long num_components,
+ const char *att_values);
+ int AddUInt8Attribute(draco::PointCloud *pc,
+ draco_GeometryAttribute_Type type, long num_vertices,
+ long num_components, const uint8_t *att_values);
+ int AddInt16Attribute(draco::PointCloud *pc,
+ draco_GeometryAttribute_Type type, long num_vertices,
+ long num_components, const int16_t *att_values);
+ int AddUInt16Attribute(draco::PointCloud *pc,
+ draco_GeometryAttribute_Type type, long num_vertices,
+ long num_components, const uint16_t *att_values);
+ int AddInt32Attribute(draco::PointCloud *pc,
+ draco_GeometryAttribute_Type type, long num_vertices,
+ long num_components, const int32_t *att_values);
+ int AddUInt32Attribute(draco::PointCloud *pc,
+ draco_GeometryAttribute_Type type, long num_vertices,
+ long num_components, const uint32_t *att_values);
+ bool SetMetadataForAttribute(draco::PointCloud *pc, long attribute_id,
+ const draco::Metadata *metadata);
+ bool AddMetadata(draco::PointCloud *pc, const draco::Metadata *metadata);
+
+ private:
+ template <typename DataTypeT>
+ int AddAttribute(draco::PointCloud *pc, draco_GeometryAttribute_Type type,
+ long num_vertices, long num_components,
+ const DataTypeT *att_values,
+ draco::DataType draco_data_type) {
+ if (!pc) {
+ return -1;
+ }
+ std::unique_ptr<draco::PointAttribute> att(new draco::PointAttribute());
+ att->Init(type, num_components, draco_data_type,
+ /* normalized */ false, num_vertices);
+ const int att_id = pc->AddAttribute(std::move(att));
+ draco::PointAttribute *const att_ptr = pc->attribute(att_id);
+
+ for (draco::PointIndex i(0); i < num_vertices; ++i) {
+ att_ptr->SetAttributeValue(att_ptr->mapped_index(i),
+ &att_values[i.value() * num_components]);
+ }
+ if (pc->num_points() == 0) {
+ pc->set_num_points(num_vertices);
+ } else if (pc->num_points() != num_vertices) {
+ return -1;
+ }
+ return att_id;
+ }
+};
+
+// TODO(draco-eng): Regenerate wasm decoder.
+// TODO(draco-eng): Add script to generate and test all Javascipt code.
+class MeshBuilder : public PointCloudBuilder {
+ public:
+ MeshBuilder();
+
+ bool AddFacesToMesh(draco::Mesh *mesh, long num_faces, const int *faces);
+
+ // Deprecated: Use AddFloatAttribute() instead.
+ int AddFloatAttributeToMesh(draco::Mesh *mesh,
+ draco_GeometryAttribute_Type type,
+ long num_vertices, long num_components,
+ const float *att_values);
+
+ // Deprecated: Use AddInt32Attribute() instead.
+ int AddInt32AttributeToMesh(draco::Mesh *mesh,
+ draco_GeometryAttribute_Type type,
+ long num_vertices, long num_components,
+ const int32_t *att_values);
+
+ // Deprecated: Use AddMetadata() instead.
+ bool AddMetadataToMesh(draco::Mesh *mesh, const draco::Metadata *metadata);
+};
+
+class Encoder {
+ public:
+ Encoder();
+
+ void SetEncodingMethod(long method);
+ void SetAttributeQuantization(draco_GeometryAttribute_Type type,
+ long quantization_bits);
+ void SetAttributeExplicitQuantization(draco_GeometryAttribute_Type type,
+ long quantization_bits,
+ long num_components,
+ const float *origin, float range);
+ void SetSpeedOptions(long encoding_speed, long decoding_speed);
+ void SetTrackEncodedProperties(bool flag);
+
+ int EncodeMeshToDracoBuffer(draco::Mesh *mesh, DracoInt8Array *buffer);
+
+ int EncodePointCloudToDracoBuffer(draco::PointCloud *pc,
+ bool deduplicate_values,
+ DracoInt8Array *buffer);
+ int GetNumberOfEncodedPoints();
+ int GetNumberOfEncodedFaces();
+
+ private:
+ draco::Encoder encoder_;
+};
+
+class ExpertEncoder {
+ public:
+ ExpertEncoder(draco::PointCloud *pc);
+
+ void SetEncodingMethod(long method);
+ void SetAttributeQuantization(long att_id, long quantization_bits);
+ void SetAttributeExplicitQuantization(long att_id, long quantization_bits,
+ long num_components,
+ const float *origin, float range);
+ void SetSpeedOptions(long encoding_speed, long decoding_speed);
+ void SetTrackEncodedProperties(bool flag);
+
+ int EncodeToDracoBuffer(bool deduplicate_values, DracoInt8Array *buffer);
+
+ int GetNumberOfEncodedPoints();
+ int GetNumberOfEncodedFaces();
+
+ private:
+ std::unique_ptr<draco::ExpertEncoder> encoder_;
+
+ draco::PointCloud *pc_;
+};
+
+#endif // DRACO_JAVASCRIPT_EMSCRIPTEN_ENCODER_WEBIDL_WRAPPER_H_
diff --git a/libs/assimp/contrib/draco/src/draco/javascript/emscripten/finalize.js b/libs/assimp/contrib/draco/src/draco/javascript/emscripten/finalize.js
new file mode 100644
index 0000000..fe2828e
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/javascript/emscripten/finalize.js
@@ -0,0 +1,22 @@
+// Copyright 2017 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Calls the 'onModuleParsed' callback if provided. This file is included as the
+// last one in the generated javascript and it gives the caller a way to check
+// that all previous content was successfully processed.
+// Note: emscripten's |onRuntimeInitialized| is called before any --post-js
+// files are included which is not equivalent to this callback.
+if (typeof Module['onModuleParsed'] === 'function') {
+ Module['onModuleParsed']();
+}
diff --git a/libs/assimp/contrib/draco/src/draco/javascript/emscripten/prepareCallbacks.js b/libs/assimp/contrib/draco/src/draco/javascript/emscripten/prepareCallbacks.js
new file mode 100644
index 0000000..7e150bb
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/javascript/emscripten/prepareCallbacks.js
@@ -0,0 +1,38 @@
+// Copyright 2017 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Prepares callbacks that can be used to inform the caller that the module has
+// been fully loaded.
+var isRuntimeInitialized = false;
+var isModuleParsed = false;
+
+// These two callbacks can be called in arbitrary order. We call the final
+// function |onModuleLoaded| after both of these callbacks have been called.
+Module['onRuntimeInitialized'] = function() {
+ isRuntimeInitialized = true;
+ if (isModuleParsed) {
+ if (typeof Module['onModuleLoaded'] === 'function') {
+ Module['onModuleLoaded'](Module);
+ }
+ }
+};
+
+Module['onModuleParsed'] = function() {
+ isModuleParsed = true;
+ if (isRuntimeInitialized) {
+ if (typeof Module['onModuleLoaded'] === 'function') {
+ Module['onModuleLoaded'](Module);
+ }
+ }
+};
diff --git a/libs/assimp/contrib/draco/src/draco/javascript/emscripten/version.js b/libs/assimp/contrib/draco/src/draco/javascript/emscripten/version.js
new file mode 100644
index 0000000..46fb252
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/javascript/emscripten/version.js
@@ -0,0 +1,29 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Returns true if the specified Draco version is supported by this decoder.
+function isVersionSupported(versionString) {
+ if (typeof versionString !== 'string')
+ return false;
+ const version = versionString.split('.');
+ if (version.length < 2 || version.length > 3)
+ return false; // Unexpected version string.
+ if (version[0] == 1 && version[1] >= 0 && version[1] <= 4)
+ return true;
+ if (version[0] != 0 || version[1] > 10)
+ return false;
+ return true;
+}
+
+Module['isVersionSupported'] = isVersionSupported;
diff --git a/libs/assimp/contrib/draco/src/draco/maya/draco_maya_plugin.cc b/libs/assimp/contrib/draco/src/draco/maya/draco_maya_plugin.cc
new file mode 100644
index 0000000..25b0240
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/maya/draco_maya_plugin.cc
@@ -0,0 +1,265 @@
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/maya/draco_maya_plugin.h"
+
+#ifdef DRACO_MAYA_PLUGIN
+
+namespace draco {
+namespace maya {
+
+static void decode_faces(std::unique_ptr<draco::Mesh> &drc_mesh,
+ Drc2PyMesh *out_mesh) {
+ int num_faces = drc_mesh->num_faces();
+ out_mesh->faces = new int[num_faces * 3];
+ out_mesh->faces_num = num_faces;
+ for (int i = 0; i < num_faces; i++) {
+ const draco::Mesh::Face &face = drc_mesh->face(draco::FaceIndex(i));
+ out_mesh->faces[i * 3 + 0] = face[0].value();
+ out_mesh->faces[i * 3 + 1] = face[1].value();
+ out_mesh->faces[i * 3 + 2] = face[2].value();
+ }
+}
+static void decode_vertices(std::unique_ptr<draco::Mesh> &drc_mesh,
+ Drc2PyMesh *out_mesh) {
+ const auto pos_att =
+ drc_mesh->GetNamedAttribute(draco::GeometryAttribute::POSITION);
+ if (pos_att == nullptr) {
+ out_mesh->vertices = new float[0];
+ out_mesh->vertices_num = 0;
+ return;
+ }
+
+ int num_vertices = drc_mesh->num_points();
+ out_mesh->vertices = new float[num_vertices * 3];
+ out_mesh->vertices_num = num_vertices;
+ for (int i = 0; i < num_vertices; i++) {
+ draco::PointIndex pi(i);
+ const draco::AttributeValueIndex val_index = pos_att->mapped_index(pi);
+ float out_vertex[3];
+ bool is_ok = pos_att->ConvertValue<float, 3>(val_index, out_vertex);
+ if (!is_ok) return;
+ out_mesh->vertices[i * 3 + 0] = out_vertex[0];
+ out_mesh->vertices[i * 3 + 1] = out_vertex[1];
+ out_mesh->vertices[i * 3 + 2] = out_vertex[2];
+ }
+}
+static void decode_normals(std::unique_ptr<draco::Mesh> &drc_mesh,
+ Drc2PyMesh *out_mesh) {
+ const auto normal_att =
+ drc_mesh->GetNamedAttribute(draco::GeometryAttribute::NORMAL);
+ if (normal_att == nullptr) {
+ out_mesh->normals = new float[0];
+ out_mesh->normals_num = 0;
+ return;
+ }
+ int num_normals = drc_mesh->num_points();
+ out_mesh->normals = new float[num_normals * 3];
+ out_mesh->normals_num = num_normals;
+
+ for (int i = 0; i < num_normals; i++) {
+ draco::PointIndex pi(i);
+ const draco::AttributeValueIndex val_index = normal_att->mapped_index(pi);
+ float out_normal[3];
+ bool is_ok = normal_att->ConvertValue<float, 3>(val_index, out_normal);
+ if (!is_ok) return;
+ out_mesh->normals[i * 3 + 0] = out_normal[0];
+ out_mesh->normals[i * 3 + 1] = out_normal[1];
+ out_mesh->normals[i * 3 + 2] = out_normal[2];
+ }
+}
+static void decode_uvs(std::unique_ptr<draco::Mesh> &drc_mesh,
+ Drc2PyMesh *out_mesh) {
+ const auto uv_att =
+ drc_mesh->GetNamedAttribute(draco::GeometryAttribute::TEX_COORD);
+ if (uv_att == nullptr) {
+ out_mesh->uvs = new float[0];
+ out_mesh->uvs_num = 0;
+ out_mesh->uvs_real_num = 0;
+ return;
+ }
+ int num_uvs = drc_mesh->num_points();
+ out_mesh->uvs = new float[num_uvs * 2];
+ out_mesh->uvs_num = num_uvs;
+ out_mesh->uvs_real_num = uv_att->size();
+
+ for (int i = 0; i < num_uvs; i++) {
+ draco::PointIndex pi(i);
+ const draco::AttributeValueIndex val_index = uv_att->mapped_index(pi);
+ float out_uv[2];
+ bool is_ok = uv_att->ConvertValue<float, 2>(val_index, out_uv);
+ if (!is_ok) return;
+ out_mesh->uvs[i * 2 + 0] = out_uv[0];
+ out_mesh->uvs[i * 2 + 1] = out_uv[1];
+ }
+}
+
+void drc2py_free(Drc2PyMesh **mesh_ptr) {
+ Drc2PyMesh *mesh = *mesh_ptr;
+ if (!mesh) return;
+ if (mesh->faces) {
+ delete[] mesh->faces;
+ mesh->faces = nullptr;
+ mesh->faces_num = 0;
+ }
+ if (mesh->vertices) {
+ delete[] mesh->vertices;
+ mesh->vertices = nullptr;
+ mesh->vertices_num = 0;
+ }
+ if (mesh->normals) {
+ delete[] mesh->normals;
+ mesh->normals = nullptr;
+ mesh->normals_num = 0;
+ }
+ if (mesh->uvs) {
+ delete[] mesh->uvs;
+ mesh->uvs = nullptr;
+ mesh->uvs_num = 0;
+ }
+ delete mesh;
+ *mesh_ptr = nullptr;
+}
+
+DecodeResult drc2py_decode(char *data, unsigned int length,
+ Drc2PyMesh **res_mesh) {
+ draco::DecoderBuffer buffer;
+ buffer.Init(data, length);
+ auto type_statusor = draco::Decoder::GetEncodedGeometryType(&buffer);
+ if (!type_statusor.ok()) {
+ return DecodeResult::KO_GEOMETRY_TYPE_INVALID;
+ }
+ const draco::EncodedGeometryType geom_type = type_statusor.value();
+ if (geom_type != draco::TRIANGULAR_MESH) {
+ return DecodeResult::KO_TRIANGULAR_MESH_NOT_FOUND;
+ }
+
+ draco::Decoder decoder;
+ auto statusor = decoder.DecodeMeshFromBuffer(&buffer);
+ if (!statusor.ok()) {
+ return DecodeResult::KO_MESH_DECODING;
+ }
+ std::unique_ptr<draco::Mesh> drc_mesh = std::move(statusor).value();
+
+ *res_mesh = new Drc2PyMesh();
+ decode_faces(drc_mesh, *res_mesh);
+ decode_vertices(drc_mesh, *res_mesh);
+ decode_normals(drc_mesh, *res_mesh);
+ decode_uvs(drc_mesh, *res_mesh);
+ return DecodeResult::OK;
+}
+
+// As encode references see https://github.com/google/draco/issues/116
+EncodeResult drc2py_encode(Drc2PyMesh *in_mesh, char *file_path) {
+ if (in_mesh->faces_num == 0) return EncodeResult::KO_WRONG_INPUT;
+ if (in_mesh->vertices_num == 0) return EncodeResult::KO_WRONG_INPUT;
+ // TODO: Add check to protect against quad faces. At the moment only
+ // Triangular faces are supported
+
+ std::unique_ptr<draco::Mesh> drc_mesh(new draco::Mesh());
+
+ // Marshall Faces
+ int num_faces = in_mesh->faces_num;
+ drc_mesh->SetNumFaces(num_faces);
+ for (int i = 0; i < num_faces; ++i) {
+ Mesh::Face face;
+ face[0] = in_mesh->faces[i * 3 + 0];
+ face[1] = in_mesh->faces[i * 3 + 1];
+ face[2] = in_mesh->faces[i * 3 + 2];
+ drc_mesh->SetFace(FaceIndex(i), face);
+ }
+
+ // Marshall Vertices
+ int num_points = in_mesh->vertices_num;
+ drc_mesh->set_num_points(num_points);
+ GeometryAttribute va;
+ va.Init(GeometryAttribute::POSITION, nullptr, 3, DT_FLOAT32, false,
+ sizeof(float) * 3, 0);
+ int pos_att_id = drc_mesh->AddAttribute(va, true, num_points);
+ float point[3];
+ for (int i = 0; i < num_points; ++i) {
+ point[0] = in_mesh->vertices[i * 3 + 0];
+ point[1] = in_mesh->vertices[i * 3 + 1];
+ point[2] = in_mesh->vertices[i * 3 + 2];
+ drc_mesh->attribute(pos_att_id)
+ ->SetAttributeValue(AttributeValueIndex(i), point);
+ }
+
+ // Marshall Normals
+ int num_normals = in_mesh->normals_num;
+ int norm_att_id;
+ if (num_normals > 0) {
+ GeometryAttribute va;
+ va.Init(GeometryAttribute::NORMAL, nullptr, 3, DT_FLOAT32, false,
+ sizeof(float) * 3, 0);
+ norm_att_id = drc_mesh->AddAttribute(va, true, num_normals);
+
+ float norm[3];
+ for (int i = 0; i < num_normals; ++i) {
+ norm[0] = in_mesh->normals[i * 3 + 0];
+ norm[1] = in_mesh->normals[i * 3 + 1];
+ norm[2] = in_mesh->normals[i * 3 + 2];
+ drc_mesh->attribute(norm_att_id)
+ ->SetAttributeValue(AttributeValueIndex(i), norm);
+ }
+ }
+
+ // Marshall Uvs
+ int num_uvs = in_mesh->uvs_num;
+ int uv_att_id;
+ if (num_uvs > 0) {
+ GeometryAttribute va;
+ va.Init(GeometryAttribute::TEX_COORD, nullptr, 2, DT_FLOAT32, false,
+ sizeof(float) * 2, 0);
+ uv_att_id = drc_mesh->AddAttribute(va, true, num_uvs);
+ float uv[3];
+ for (int i = 0; i < num_uvs; ++i) {
+ uv[0] = in_mesh->uvs[i * 2 + 0];
+ uv[1] = in_mesh->uvs[i * 2 + 1];
+ drc_mesh->attribute(uv_att_id)->SetAttributeValue(AttributeValueIndex(i),
+ uv);
+ }
+ }
+
+// Deduplicate Attributes and Points
+#ifdef DRACO_ATTRIBUTE_VALUES_DEDUPLICATION_SUPPORTED
+ drc_mesh->DeduplicateAttributeValues();
+#endif
+#ifdef DRACO_ATTRIBUTE_INDICES_DEDUPLICATION_SUPPORTED
+ drc_mesh->DeduplicatePointIds();
+#endif
+
+ // Encode Mesh
+ draco::Encoder encoder; // Use default encode settings (See draco_encoder.cc
+ // Options struct)
+ draco::EncoderBuffer buffer;
+ const draco::Status status = encoder.EncodeMeshToBuffer(*drc_mesh, &buffer);
+ if (!status.ok()) {
+ // Use status.error_msg() to check the error
+ return EncodeResult::KO_MESH_ENCODING;
+ }
+
+ // Save to file
+ std::string file = file_path;
+ std::ofstream out_file(file, std::ios::binary);
+ if (!out_file) {
+ return EncodeResult::KO_FILE_CREATION;
+ }
+ out_file.write(buffer.data(), buffer.size());
+ return EncodeResult::OK;
+}
+
+} // namespace maya
+
+} // namespace draco
+
+#endif // DRACO_MAYA_PLUGIN
diff --git a/libs/assimp/contrib/draco/src/draco/maya/draco_maya_plugin.h b/libs/assimp/contrib/draco/src/draco/maya/draco_maya_plugin.h
new file mode 100644
index 0000000..372e25f
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/maya/draco_maya_plugin.h
@@ -0,0 +1,81 @@
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_MAYA_DRACO_MAYA_PLUGIN_H_
+#define DRACO_MAYA_DRACO_MAYA_PLUGIN_H_
+
+#include <fstream>
+
+#include "draco/compression/decode.h"
+#include "draco/compression/encode.h"
+
+#ifdef DRACO_MAYA_PLUGIN
+
+// If compiling with Visual Studio.
+#if defined(_MSC_VER)
+#define EXPORT_API __declspec(dllexport)
+#else
+// Other platforms don't need this.
+#define EXPORT_API
+#endif // defined(_MSC_VER)
+
+namespace draco {
+namespace maya {
+
+enum class EncodeResult {
+ OK = 0,
+ KO_WRONG_INPUT = -1,
+ KO_MESH_ENCODING = -2,
+ KO_FILE_CREATION = -3
+};
+enum class DecodeResult {
+ OK = 0,
+ KO_GEOMETRY_TYPE_INVALID = -1,
+ KO_TRIANGULAR_MESH_NOT_FOUND = -2,
+ KO_MESH_DECODING = -3
+};
+
+extern "C" {
+struct EXPORT_API Drc2PyMesh {
+ Drc2PyMesh()
+ : faces_num(0),
+ faces(nullptr),
+ vertices_num(0),
+ vertices(nullptr),
+ normals_num(0),
+ normals(nullptr),
+ uvs_num(0),
+ uvs_real_num(0),
+ uvs(nullptr) {}
+ int faces_num;
+ int *faces;
+ int vertices_num;
+ float *vertices;
+ int normals_num;
+ float *normals;
+ int uvs_num;
+ int uvs_real_num;
+ float *uvs;
+};
+
+EXPORT_API DecodeResult drc2py_decode(char *data, unsigned int length,
+ Drc2PyMesh **res_mesh);
+EXPORT_API void drc2py_free(Drc2PyMesh **res_mesh);
+EXPORT_API EncodeResult drc2py_encode(Drc2PyMesh *in_mesh, char *file_path);
+} // extern "C"
+
+} // namespace maya
+} // namespace draco
+
+#endif // DRACO_MAYA_PLUGIN
+
+#endif // DRACO_MAYA_DRACO_MAYA_PLUGIN_H_
diff --git a/libs/assimp/contrib/draco/src/draco/mesh/corner_table.cc b/libs/assimp/contrib/draco/src/draco/mesh/corner_table.cc
new file mode 100644
index 0000000..3f92f65
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/mesh/corner_table.cc
@@ -0,0 +1,441 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/mesh/corner_table.h"
+
+#include <limits>
+
+#include "draco/attributes/geometry_indices.h"
+#include "draco/mesh/corner_table_iterators.h"
+
+namespace draco {
+
+CornerTable::CornerTable()
+ : num_original_vertices_(0),
+ num_degenerated_faces_(0),
+ num_isolated_vertices_(0),
+ valence_cache_(*this) {}
+
+std::unique_ptr<CornerTable> CornerTable::Create(
+ const IndexTypeVector<FaceIndex, FaceType> &faces) {
+ std::unique_ptr<CornerTable> ct(new CornerTable());
+ if (!ct->Init(faces)) {
+ return nullptr;
+ }
+ return ct;
+}
+
+bool CornerTable::Init(const IndexTypeVector<FaceIndex, FaceType> &faces) {
+ valence_cache_.ClearValenceCache();
+ valence_cache_.ClearValenceCacheInaccurate();
+ corner_to_vertex_map_.resize(faces.size() * 3);
+ for (FaceIndex fi(0); fi < static_cast<uint32_t>(faces.size()); ++fi) {
+ for (int i = 0; i < 3; ++i) {
+ corner_to_vertex_map_[FirstCorner(fi) + i] = faces[fi][i];
+ }
+ }
+ int num_vertices = -1;
+ if (!ComputeOppositeCorners(&num_vertices)) {
+ return false;
+ }
+ if (!BreakNonManifoldEdges()) {
+ return false;
+ }
+ if (!ComputeVertexCorners(num_vertices)) {
+ return false;
+ }
+ return true;
+}
+
+bool CornerTable::Reset(int num_faces) {
+ return Reset(num_faces, num_faces * 3);
+}
+
+bool CornerTable::Reset(int num_faces, int num_vertices) {
+ if (num_faces < 0 || num_vertices < 0) {
+ return false;
+ }
+ const unsigned int num_faces_unsigned = num_faces;
+ if (num_faces_unsigned >
+ std::numeric_limits<CornerIndex::ValueType>::max() / 3) {
+ return false;
+ }
+ corner_to_vertex_map_.assign(num_faces_unsigned * 3, kInvalidVertexIndex);
+ opposite_corners_.assign(num_faces_unsigned * 3, kInvalidCornerIndex);
+ vertex_corners_.reserve(num_vertices);
+ valence_cache_.ClearValenceCache();
+ valence_cache_.ClearValenceCacheInaccurate();
+ return true;
+}
+
+bool CornerTable::ComputeOppositeCorners(int *num_vertices) {
+ DRACO_DCHECK(GetValenceCache().IsCacheEmpty());
+ if (num_vertices == nullptr) {
+ return false;
+ }
+ opposite_corners_.resize(num_corners(), kInvalidCornerIndex);
+
+ // Out implementation for finding opposite corners is based on keeping track
+ // of outgoing half-edges for each vertex of the mesh. Half-edges (defined by
+ // their opposite corners) are processed one by one and whenever a new
+ // half-edge (corner) is processed, we check whether the sink vertex of
+ // this half-edge contains its sibling half-edge. If yes, we connect them and
+ // remove the sibling half-edge from the sink vertex, otherwise we add the new
+ // half-edge to its source vertex.
+
+ // First compute the number of outgoing half-edges (corners) attached to each
+ // vertex.
+ std::vector<int> num_corners_on_vertices;
+ num_corners_on_vertices.reserve(num_corners());
+ for (CornerIndex c(0); c < num_corners(); ++c) {
+ const VertexIndex v1 = Vertex(c);
+ if (v1.value() >= static_cast<int>(num_corners_on_vertices.size())) {
+ num_corners_on_vertices.resize(v1.value() + 1, 0);
+ }
+ // For each corner there is always exactly one outgoing half-edge attached
+ // to its vertex.
+ num_corners_on_vertices[v1.value()]++;
+ }
+
+ // Create a storage for half-edges on each vertex. We store all half-edges in
+ // one array, where each entry is identified by the half-edge's sink vertex id
+ // and the associated half-edge corner id (corner opposite to the half-edge).
+ // Each vertex will be assigned storage for up to
+ // |num_corners_on_vertices[vert_id]| half-edges. Unused half-edges are marked
+ // with |sink_vert| == kInvalidVertexIndex.
+ struct VertexEdgePair {
+ VertexEdgePair()
+ : sink_vert(kInvalidVertexIndex), edge_corner(kInvalidCornerIndex) {}
+ VertexIndex sink_vert;
+ CornerIndex edge_corner;
+ };
+ std::vector<VertexEdgePair> vertex_edges(num_corners(), VertexEdgePair());
+
+ // For each vertex compute the offset (location where the first half-edge
+ // entry of a given vertex is going to be stored). This way each vertex is
+ // guaranteed to have a non-overlapping storage with respect to the other
+ // vertices.
+ std::vector<int> vertex_offset(num_corners_on_vertices.size());
+ int offset = 0;
+ for (size_t i = 0; i < num_corners_on_vertices.size(); ++i) {
+ vertex_offset[i] = offset;
+ offset += num_corners_on_vertices[i];
+ }
+
+ // Now go over the all half-edges (using their opposite corners) and either
+ // insert them to the |vertex_edge| array or connect them with existing
+ // half-edges.
+ for (CornerIndex c(0); c < num_corners(); ++c) {
+ const VertexIndex tip_v = Vertex(c);
+ const VertexIndex source_v = Vertex(Next(c));
+ const VertexIndex sink_v = Vertex(Previous(c));
+
+ const FaceIndex face_index = Face(c);
+ if (c == FirstCorner(face_index)) {
+ // Check whether the face is degenerated, if so ignore it.
+ const VertexIndex v0 = Vertex(c);
+ if (v0 == source_v || v0 == sink_v || source_v == sink_v) {
+ ++num_degenerated_faces_;
+ c += 2; // Ignore the next two corners of the same face.
+ continue;
+ }
+ }
+
+ CornerIndex opposite_c(kInvalidCornerIndex);
+ // The maximum number of half-edges attached to the sink vertex.
+ const int num_corners_on_vert = num_corners_on_vertices[sink_v.value()];
+ // Where to look for the first half-edge on the sink vertex.
+ offset = vertex_offset[sink_v.value()];
+ for (int i = 0; i < num_corners_on_vert; ++i, ++offset) {
+ const VertexIndex other_v = vertex_edges[offset].sink_vert;
+ if (other_v == kInvalidVertexIndex) {
+ break; // No matching half-edge found on the sink vertex.
+ }
+ if (other_v == source_v) {
+ if (tip_v == Vertex(vertex_edges[offset].edge_corner)) {
+ continue; // Don't connect mirrored faces.
+ }
+ // A matching half-edge was found on the sink vertex. Mark the
+ // half-edge's opposite corner.
+ opposite_c = vertex_edges[offset].edge_corner;
+ // Remove the half-edge from the sink vertex. We remap all subsequent
+ // half-edges one slot down.
+ // TODO(ostava): This can be optimized a little bit, by remapping only
+ // the half-edge on the last valid slot into the deleted half-edge's
+ // slot.
+ for (int j = i + 1; j < num_corners_on_vert; ++j, ++offset) {
+ vertex_edges[offset] = vertex_edges[offset + 1];
+ if (vertex_edges[offset].sink_vert == kInvalidVertexIndex) {
+ break; // Unused half-edge reached.
+ }
+ }
+ // Mark the last entry as unused.
+ vertex_edges[offset].sink_vert = kInvalidVertexIndex;
+ break;
+ }
+ }
+ if (opposite_c == kInvalidCornerIndex) {
+ // No opposite corner found. Insert the new edge
+ const int num_corners_on_source_vert =
+ num_corners_on_vertices[source_v.value()];
+ offset = vertex_offset[source_v.value()];
+ for (int i = 0; i < num_corners_on_source_vert; ++i, ++offset) {
+ // Find the first unused half-edge slot on the source vertex.
+ if (vertex_edges[offset].sink_vert == kInvalidVertexIndex) {
+ vertex_edges[offset].sink_vert = sink_v;
+ vertex_edges[offset].edge_corner = c;
+ break;
+ }
+ }
+ } else {
+ // Opposite corner found.
+ opposite_corners_[c] = opposite_c;
+ opposite_corners_[opposite_c] = c;
+ }
+ }
+ *num_vertices = static_cast<int>(num_corners_on_vertices.size());
+ return true;
+}
+
+bool CornerTable::BreakNonManifoldEdges() {
+ // This function detects and breaks non-manifold edges that are caused by
+ // folds in 1-ring neighborhood around a vertex. Non-manifold edges can occur
+ // when the 1-ring surface around a vertex self-intersects in a common edge.
+ // For example imagine a surface around a pivot vertex 0, where the 1-ring
+ // is defined by vertices |1, 2, 3, 1, 4|. The surface passes edge <0, 1>
+ // twice which would result in a non-manifold edge that needs to be broken.
+ // For now all faces connected to these non-manifold edges are disconnected
+ // resulting in open boundaries on the mesh. New vertices will be created
+ // automatically for each new disjoint patch in the ComputeVertexCorners()
+ // method.
+ // Note that all other non-manifold edges are implicitly handled by the
+ // function ComputeVertexCorners() that automatically creates new vertices
+ // on disjoint 1-ring surface patches.
+
+ std::vector<bool> visited_corners(num_corners(), false);
+ std::vector<std::pair<VertexIndex, CornerIndex>> sink_vertices;
+ bool mesh_connectivity_updated = false;
+ do {
+ mesh_connectivity_updated = false;
+ for (CornerIndex c(0); c < num_corners(); ++c) {
+ if (visited_corners[c.value()]) {
+ continue;
+ }
+ sink_vertices.clear();
+
+ // First swing all the way to find the left-most corner connected to the
+ // corner's vertex.
+ CornerIndex first_c = c;
+ CornerIndex current_c = c;
+ CornerIndex next_c;
+ while (next_c = SwingLeft(current_c),
+ next_c != first_c && next_c != kInvalidCornerIndex &&
+ !visited_corners[next_c.value()]) {
+ current_c = next_c;
+ }
+
+ first_c = current_c;
+
+ // Swing right from the first corner and check if all visited edges
+ // are unique.
+ do {
+ visited_corners[current_c.value()] = true;
+ // Each new edge is defined by the pivot vertex (that is the same for
+ // all faces) and by the sink vertex (that is the |next| vertex from the
+ // currently processed pivot corner. I.e., each edge is uniquely defined
+ // by the sink vertex index.
+ const CornerIndex sink_c = Next(current_c);
+ const VertexIndex sink_v = corner_to_vertex_map_[sink_c];
+
+ // Corner that defines the edge on the face.
+ const CornerIndex edge_corner = Previous(current_c);
+ bool vertex_connectivity_updated = false;
+ // Go over all processed edges (sink vertices). If the current sink
+ // vertex has been already encountered before it may indicate a
+ // non-manifold edge that needs to be broken.
+ for (auto &&attached_sink_vertex : sink_vertices) {
+ if (attached_sink_vertex.first == sink_v) {
+ // Sink vertex has been already processed.
+ const CornerIndex other_edge_corner = attached_sink_vertex.second;
+ const CornerIndex opp_edge_corner = Opposite(edge_corner);
+
+ if (opp_edge_corner == other_edge_corner) {
+ // We are closing the loop so no need to change the connectivity.
+ continue;
+ }
+
+ // Break the connectivity on the non-manifold edge.
+ // TODO(ostava): It may be possible to reconnect the faces in a way
+ // that the final surface would be manifold.
+ const CornerIndex opp_other_edge_corner =
+ Opposite(other_edge_corner);
+ if (opp_edge_corner != kInvalidCornerIndex) {
+ SetOppositeCorner(opp_edge_corner, kInvalidCornerIndex);
+ }
+ if (opp_other_edge_corner != kInvalidCornerIndex) {
+ SetOppositeCorner(opp_other_edge_corner, kInvalidCornerIndex);
+ }
+
+ SetOppositeCorner(edge_corner, kInvalidCornerIndex);
+ SetOppositeCorner(other_edge_corner, kInvalidCornerIndex);
+
+ vertex_connectivity_updated = true;
+ break;
+ }
+ }
+ if (vertex_connectivity_updated) {
+ // Because of the updated connectivity, not all corners connected to
+ // this vertex have been processed and we need to go over them again.
+ // TODO(ostava): This can be optimized as we don't really need to
+ // iterate over all corners.
+ mesh_connectivity_updated = true;
+ break;
+ }
+ // Insert new sink vertex information <sink vertex index, edge corner>.
+ std::pair<VertexIndex, CornerIndex> new_sink_vert;
+ new_sink_vert.first = corner_to_vertex_map_[Previous(current_c)];
+ new_sink_vert.second = sink_c;
+ sink_vertices.push_back(new_sink_vert);
+
+ current_c = SwingRight(current_c);
+ } while (current_c != first_c && current_c != kInvalidCornerIndex);
+ }
+ } while (mesh_connectivity_updated);
+ return true;
+}
+
+bool CornerTable::ComputeVertexCorners(int num_vertices) {
+ DRACO_DCHECK(GetValenceCache().IsCacheEmpty());
+ num_original_vertices_ = num_vertices;
+ vertex_corners_.resize(num_vertices, kInvalidCornerIndex);
+ // Arrays for marking visited vertices and corners that allow us to detect
+ // non-manifold vertices.
+ std::vector<bool> visited_vertices(num_vertices, false);
+ std::vector<bool> visited_corners(num_corners(), false);
+
+ for (FaceIndex f(0); f < num_faces(); ++f) {
+ const CornerIndex first_face_corner = FirstCorner(f);
+ // Check whether the face is degenerated. If so ignore it.
+ if (IsDegenerated(f)) {
+ continue;
+ }
+
+ for (int k = 0; k < 3; ++k) {
+ const CornerIndex c = first_face_corner + k;
+ if (visited_corners[c.value()]) {
+ continue;
+ }
+ VertexIndex v = corner_to_vertex_map_[c];
+ // Note that one vertex maps to many corners, but we just keep track
+ // of the vertex which has a boundary on the left if the vertex lies on
+ // the boundary. This means that all the related corners can be accessed
+ // by iterating over the SwingRight() operator.
+ // In case of a vertex inside the mesh, the choice is arbitrary.
+ bool is_non_manifold_vertex = false;
+ if (visited_vertices[v.value()]) {
+ // A visited vertex of an unvisited corner found. Must be a non-manifold
+ // vertex.
+ // Create a new vertex for it.
+ vertex_corners_.push_back(kInvalidCornerIndex);
+ non_manifold_vertex_parents_.push_back(v);
+ visited_vertices.push_back(false);
+ v = VertexIndex(num_vertices++);
+ is_non_manifold_vertex = true;
+ }
+ // Mark the vertex as visited.
+ visited_vertices[v.value()] = true;
+
+ // First swing all the way to the left and mark all corners on the way.
+ CornerIndex act_c(c);
+ while (act_c != kInvalidCornerIndex) {
+ visited_corners[act_c.value()] = true;
+ // Vertex will eventually point to the left most corner.
+ vertex_corners_[v] = act_c;
+ if (is_non_manifold_vertex) {
+ // Update vertex index in the corresponding face.
+ corner_to_vertex_map_[act_c] = v;
+ }
+ act_c = SwingLeft(act_c);
+ if (act_c == c) {
+ break; // Full circle reached.
+ }
+ }
+ if (act_c == kInvalidCornerIndex) {
+ // If we have reached an open boundary we need to swing right from the
+ // initial corner to mark all corners in the opposite direction.
+ act_c = SwingRight(c);
+ while (act_c != kInvalidCornerIndex) {
+ visited_corners[act_c.value()] = true;
+ if (is_non_manifold_vertex) {
+ // Update vertex index in the corresponding face.
+ corner_to_vertex_map_[act_c] = v;
+ }
+ act_c = SwingRight(act_c);
+ }
+ }
+ }
+ }
+
+ // Count the number of isolated (unprocessed) vertices.
+ num_isolated_vertices_ = 0;
+ for (bool visited : visited_vertices) {
+ if (!visited) {
+ ++num_isolated_vertices_;
+ }
+ }
+ return true;
+}
+
+bool CornerTable::IsDegenerated(FaceIndex face) const {
+ if (face == kInvalidFaceIndex) {
+ return true;
+ }
+ const CornerIndex first_face_corner = FirstCorner(face);
+ const VertexIndex v0 = Vertex(first_face_corner);
+ const VertexIndex v1 = Vertex(Next(first_face_corner));
+ const VertexIndex v2 = Vertex(Previous(first_face_corner));
+ if (v0 == v1 || v0 == v2 || v1 == v2) {
+ return true;
+ }
+ return false;
+}
+
+int CornerTable::Valence(VertexIndex v) const {
+ if (v == kInvalidVertexIndex) {
+ return -1;
+ }
+ return ConfidentValence(v);
+}
+
+int CornerTable::ConfidentValence(VertexIndex v) const {
+ DRACO_DCHECK_GE(v.value(), 0);
+ DRACO_DCHECK_LT(v.value(), num_vertices());
+ VertexRingIterator<CornerTable> vi(this, v);
+ int valence = 0;
+ for (; !vi.End(); vi.Next()) {
+ ++valence;
+ }
+ return valence;
+}
+
+void CornerTable::UpdateFaceToVertexMap(const VertexIndex vertex) {
+ DRACO_DCHECK(GetValenceCache().IsCacheEmpty());
+ VertexCornersIterator<CornerTable> it(this, vertex);
+ for (; !it.End(); ++it) {
+ const CornerIndex corner = *it;
+ corner_to_vertex_map_[corner] = vertex;
+ }
+}
+
+} // namespace draco
diff --git a/libs/assimp/contrib/draco/src/draco/mesh/corner_table.h b/libs/assimp/contrib/draco/src/draco/mesh/corner_table.h
new file mode 100644
index 0000000..3aa720f
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/mesh/corner_table.h
@@ -0,0 +1,396 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_MESH_CORNER_TABLE_H_
+#define DRACO_MESH_CORNER_TABLE_H_
+
+#include <array>
+#include <memory>
+
+#include "draco/attributes/geometry_indices.h"
+#include "draco/core/draco_index_type_vector.h"
+#include "draco/core/macros.h"
+#include "draco/mesh/valence_cache.h"
+
+namespace draco {
+
+// CornerTable is used to represent connectivity of triangular meshes.
+// For every corner of all faces, the corner table stores the index of the
+// opposite corner in the neighboring face (if it exists) as illustrated in the
+// figure below (see corner |c| and it's opposite corner |o|).
+//
+// *
+// /c\
+// / \
+// /n p\
+// *-------*
+// \ /
+// \ /
+// \o/
+// *
+//
+// All corners are defined by unique CornerIndex and each triplet of corners
+// that define a single face id always ordered consecutively as:
+// { 3 * FaceIndex, 3 * FaceIndex + 1, 3 * FaceIndex +2 }.
+// This representation of corners allows CornerTable to easily retrieve Next and
+// Previous corners on any face (see corners |n| and |p| in the figure above).
+// Using the Next, Previous, and Opposite corners then enables traversal of any
+// 2-manifold surface.
+// If the CornerTable is constructed from a non-manifold surface, the input
+// non-manifold edges and vertices are automatically split.
+class CornerTable {
+ public:
+ // Corner table face type.
+ typedef std::array<VertexIndex, 3> FaceType;
+
+ CornerTable();
+ static std::unique_ptr<CornerTable> Create(
+ const IndexTypeVector<FaceIndex, FaceType> &faces);
+
+ // Initializes the CornerTable from provides set of indexed faces.
+ // The input faces can represent a non-manifold topology, in which case the
+ // non-manifold edges and vertices are going to be split.
+ bool Init(const IndexTypeVector<FaceIndex, FaceType> &faces);
+
+ // Resets the corner table to the given number of invalid faces.
+ bool Reset(int num_faces);
+
+ // Resets the corner table to the given number of invalid faces and vertices.
+ bool Reset(int num_faces, int num_vertices);
+
+ inline int num_vertices() const {
+ return static_cast<int>(vertex_corners_.size());
+ }
+ inline int num_corners() const {
+ return static_cast<int>(corner_to_vertex_map_.size());
+ }
+ inline int num_faces() const {
+ return static_cast<int>(corner_to_vertex_map_.size() / 3);
+ }
+
+ inline CornerIndex Opposite(CornerIndex corner) const {
+ if (corner == kInvalidCornerIndex) {
+ return corner;
+ }
+ return opposite_corners_[corner];
+ }
+ inline CornerIndex Next(CornerIndex corner) const {
+ if (corner == kInvalidCornerIndex) {
+ return corner;
+ }
+ return LocalIndex(++corner) ? corner : corner - 3;
+ }
+ inline CornerIndex Previous(CornerIndex corner) const {
+ if (corner == kInvalidCornerIndex) {
+ return corner;
+ }
+ return LocalIndex(corner) ? corner - 1 : corner + 2;
+ }
+ inline VertexIndex Vertex(CornerIndex corner) const {
+ if (corner == kInvalidCornerIndex) {
+ return kInvalidVertexIndex;
+ }
+ return ConfidentVertex(corner);
+ }
+ inline VertexIndex ConfidentVertex(CornerIndex corner) const {
+ DRACO_DCHECK_GE(corner.value(), 0);
+ DRACO_DCHECK_LT(corner.value(), num_corners());
+ return corner_to_vertex_map_[corner];
+ }
+ inline FaceIndex Face(CornerIndex corner) const {
+ if (corner == kInvalidCornerIndex) {
+ return kInvalidFaceIndex;
+ }
+ return FaceIndex(corner.value() / 3);
+ }
+ inline CornerIndex FirstCorner(FaceIndex face) const {
+ if (face == kInvalidFaceIndex) {
+ return kInvalidCornerIndex;
+ }
+ return CornerIndex(face.value() * 3);
+ }
+ inline std::array<CornerIndex, 3> AllCorners(FaceIndex face) const {
+ const CornerIndex ci = CornerIndex(face.value() * 3);
+ return {{ci, ci + 1, ci + 2}};
+ }
+ inline int LocalIndex(CornerIndex corner) const { return corner.value() % 3; }
+
+ inline FaceType FaceData(FaceIndex face) const {
+ const CornerIndex first_corner = FirstCorner(face);
+ FaceType face_data;
+ for (int i = 0; i < 3; ++i) {
+ face_data[i] = corner_to_vertex_map_[first_corner + i];
+ }
+ return face_data;
+ }
+
+ void SetFaceData(FaceIndex face, FaceType data) {
+ DRACO_DCHECK(GetValenceCache().IsCacheEmpty());
+ const CornerIndex first_corner = FirstCorner(face);
+ for (int i = 0; i < 3; ++i) {
+ corner_to_vertex_map_[first_corner + i] = data[i];
+ }
+ }
+
+ // Returns the left-most corner of a single vertex 1-ring. If a vertex is not
+ // on a boundary (in which case it has a full 1-ring), this function returns
+ // any of the corners mapped to the given vertex.
+ inline CornerIndex LeftMostCorner(VertexIndex v) const {
+ return vertex_corners_[v];
+ }
+
+ // Returns the parent vertex index of a given corner table vertex.
+ VertexIndex VertexParent(VertexIndex vertex) const {
+ if (vertex.value() < static_cast<uint32_t>(num_original_vertices_)) {
+ return vertex;
+ }
+ return non_manifold_vertex_parents_[vertex - num_original_vertices_];
+ }
+
+ // Returns true if the corner is valid.
+ inline bool IsValid(CornerIndex c) const {
+ return Vertex(c) != kInvalidVertexIndex;
+ }
+
+ // Returns the valence (or degree) of a vertex.
+ // Returns -1 if the given vertex index is not valid.
+ int Valence(VertexIndex v) const;
+ // Same as above but does not check for validity and does not return -1
+ int ConfidentValence(VertexIndex v) const;
+ // Returns the valence of the vertex at the given corner.
+ inline int Valence(CornerIndex c) const {
+ if (c == kInvalidCornerIndex) {
+ return -1;
+ }
+ return ConfidentValence(c);
+ }
+ inline int ConfidentValence(CornerIndex c) const {
+ DRACO_DCHECK_LT(c.value(), num_corners());
+ return ConfidentValence(ConfidentVertex(c));
+ }
+
+ // Returns true if the specified vertex is on a boundary.
+ inline bool IsOnBoundary(VertexIndex vert) const {
+ const CornerIndex corner = LeftMostCorner(vert);
+ if (SwingLeft(corner) == kInvalidCornerIndex) {
+ return true;
+ }
+ return false;
+ }
+
+ // *-------*
+ // / \ / \
+ // / \ / \
+ // / sl\c/sr \
+ // *-------v-------*
+ // Returns the corner on the adjacent face on the right that maps to
+ // the same vertex as the given corner (sr in the above diagram).
+ inline CornerIndex SwingRight(CornerIndex corner) const {
+ return Previous(Opposite(Previous(corner)));
+ }
+ // Returns the corner on the left face that maps to the same vertex as the
+ // given corner (sl in the above diagram).
+ inline CornerIndex SwingLeft(CornerIndex corner) const {
+ return Next(Opposite(Next(corner)));
+ }
+
+ // Get opposite corners on the left and right faces respectively (see image
+ // below, where L and R are the left and right corners of a corner X.
+ //
+ // *-------*-------*
+ // \L /X\ R/
+ // \ / \ /
+ // \ / \ /
+ // *-------*
+ inline CornerIndex GetLeftCorner(CornerIndex corner_id) const {
+ if (corner_id == kInvalidCornerIndex) {
+ return kInvalidCornerIndex;
+ }
+ return Opposite(Previous(corner_id));
+ }
+ inline CornerIndex GetRightCorner(CornerIndex corner_id) const {
+ if (corner_id == kInvalidCornerIndex) {
+ return kInvalidCornerIndex;
+ }
+ return Opposite(Next(corner_id));
+ }
+
+ // Returns the number of new vertices that were created as a result of
+ // splitting of non-manifold vertices of the input geometry.
+ int NumNewVertices() const { return num_vertices() - num_original_vertices_; }
+ int NumOriginalVertices() const { return num_original_vertices_; }
+
+ // Returns the number of faces with duplicated vertex indices.
+ int NumDegeneratedFaces() const { return num_degenerated_faces_; }
+
+ // Returns the number of isolated vertices (vertices that have
+ // vertex_corners_ mapping set to kInvalidCornerIndex.
+ int NumIsolatedVertices() const { return num_isolated_vertices_; }
+
+ bool IsDegenerated(FaceIndex face) const;
+
+ // Methods that modify an existing corner table.
+ // Sets the opposite corner mapping between two corners. Caller must ensure
+ // that the indices are valid.
+ inline void SetOppositeCorner(CornerIndex corner_id,
+ CornerIndex opp_corner_id) {
+ DRACO_DCHECK(GetValenceCache().IsCacheEmpty());
+ opposite_corners_[corner_id] = opp_corner_id;
+ }
+
+ // Sets opposite corners for both input corners.
+ inline void SetOppositeCorners(CornerIndex corner_0, CornerIndex corner_1) {
+ DRACO_DCHECK(GetValenceCache().IsCacheEmpty());
+ if (corner_0 != kInvalidCornerIndex) {
+ SetOppositeCorner(corner_0, corner_1);
+ }
+ if (corner_1 != kInvalidCornerIndex) {
+ SetOppositeCorner(corner_1, corner_0);
+ }
+ }
+
+ // Updates mapping between a corner and a vertex.
+ inline void MapCornerToVertex(CornerIndex corner_id, VertexIndex vert_id) {
+ DRACO_DCHECK(GetValenceCache().IsCacheEmpty());
+ corner_to_vertex_map_[corner_id] = vert_id;
+ }
+
+ VertexIndex AddNewVertex() {
+ DRACO_DCHECK(GetValenceCache().IsCacheEmpty());
+ // Add a new invalid vertex.
+ vertex_corners_.push_back(kInvalidCornerIndex);
+ return VertexIndex(static_cast<uint32_t>(vertex_corners_.size() - 1));
+ }
+
+ // Adds a new face connected to three vertices. Note that connectivity is not
+ // automatically updated and all opposite corners need to be set explicitly.
+ FaceIndex AddNewFace(const std::array<VertexIndex, 3> &vertices) {
+ // Add a new invalid face.
+ const FaceIndex new_face_index(num_faces());
+ for (int i = 0; i < 3; ++i) {
+ corner_to_vertex_map_.push_back(vertices[i]);
+ SetLeftMostCorner(vertices[i],
+ CornerIndex(corner_to_vertex_map_.size() - 1));
+ }
+ opposite_corners_.resize(corner_to_vertex_map_.size(), kInvalidCornerIndex);
+ return new_face_index;
+ }
+
+ // Sets a new left most corner for a given vertex.
+ void SetLeftMostCorner(VertexIndex vert, CornerIndex corner) {
+ DRACO_DCHECK(GetValenceCache().IsCacheEmpty());
+ if (vert != kInvalidVertexIndex) {
+ vertex_corners_[vert] = corner;
+ }
+ }
+
+ // Updates the vertex to corner map on a specified vertex. This should be
+ // called in cases where the mapping may be invalid (e.g. when the corner
+ // table was constructed manually).
+ void UpdateVertexToCornerMap(VertexIndex vert) {
+ DRACO_DCHECK(GetValenceCache().IsCacheEmpty());
+ const CornerIndex first_c = vertex_corners_[vert];
+ if (first_c == kInvalidCornerIndex) {
+ return; // Isolated vertex.
+ }
+ CornerIndex act_c = SwingLeft(first_c);
+ CornerIndex c = first_c;
+ while (act_c != kInvalidCornerIndex && act_c != first_c) {
+ c = act_c;
+ act_c = SwingLeft(act_c);
+ }
+ if (act_c != first_c) {
+ vertex_corners_[vert] = c;
+ }
+ }
+
+ // Sets the new number of vertices. It's a responsibility of the caller to
+ // ensure that no corner is mapped beyond the range of the new number of
+ // vertices.
+ inline void SetNumVertices(int num_vertices) {
+ DRACO_DCHECK(GetValenceCache().IsCacheEmpty());
+ vertex_corners_.resize(num_vertices, kInvalidCornerIndex);
+ }
+
+ // Makes a vertex isolated (not attached to any corner).
+ void MakeVertexIsolated(VertexIndex vert) {
+ DRACO_DCHECK(GetValenceCache().IsCacheEmpty());
+ vertex_corners_[vert] = kInvalidCornerIndex;
+ }
+
+ // Returns true if a vertex is not attached to any face.
+ inline bool IsVertexIsolated(VertexIndex v) const {
+ return LeftMostCorner(v) == kInvalidCornerIndex;
+ }
+
+ // Makes a given face invalid (all corners are marked as invalid).
+ void MakeFaceInvalid(FaceIndex face) {
+ DRACO_DCHECK(GetValenceCache().IsCacheEmpty());
+ if (face != kInvalidFaceIndex) {
+ const CornerIndex first_corner = FirstCorner(face);
+ for (int i = 0; i < 3; ++i) {
+ corner_to_vertex_map_[first_corner + i] = kInvalidVertexIndex;
+ }
+ }
+ }
+
+ // Updates mapping between faces and a vertex using the corners mapped to
+ // the provided vertex.
+ void UpdateFaceToVertexMap(const VertexIndex vertex);
+
+ // Allows access to an internal object for caching valences. The object can
+ // be instructed to cache or uncache all valences and then its interfaces
+ // queried directly for valences with differing performance/confidence
+ // qualities. If the mesh or table is modified the cache should be discarded
+ // and not relied on as it does not automatically update or invalidate for
+ // performance reasons.
+ const draco::ValenceCache<CornerTable> &GetValenceCache() const {
+ return valence_cache_;
+ }
+
+ private:
+ // Computes opposite corners mapping from the data stored in
+ // |corner_to_vertex_map_|.
+ bool ComputeOppositeCorners(int *num_vertices);
+
+ // Finds and breaks non-manifold edges in the 1-ring neighborhood around
+ // vertices (vertices themselves will be split in the ComputeVertexCorners()
+ // function if necessary).
+ bool BreakNonManifoldEdges();
+
+ // Computes the lookup map for going from a vertex to a corner. This method
+ // can handle non-manifold vertices by splitting them into multiple manifold
+ // vertices.
+ bool ComputeVertexCorners(int num_vertices);
+
+ // Each three consecutive corners represent one face.
+ IndexTypeVector<CornerIndex, VertexIndex> corner_to_vertex_map_;
+ IndexTypeVector<CornerIndex, CornerIndex> opposite_corners_;
+ IndexTypeVector<VertexIndex, CornerIndex> vertex_corners_;
+
+ int num_original_vertices_;
+ int num_degenerated_faces_;
+ int num_isolated_vertices_;
+ IndexTypeVector<VertexIndex, VertexIndex> non_manifold_vertex_parents_;
+
+ draco::ValenceCache<CornerTable> valence_cache_;
+};
+
+// A special case to denote an invalid corner table triangle.
+static constexpr CornerTable::FaceType kInvalidFace(
+ {{kInvalidVertexIndex, kInvalidVertexIndex, kInvalidVertexIndex}});
+
+} // namespace draco
+
+#endif // DRACO_MESH_CORNER_TABLE_H_
diff --git a/libs/assimp/contrib/draco/src/draco/mesh/corner_table_iterators.h b/libs/assimp/contrib/draco/src/draco/mesh/corner_table_iterators.h
new file mode 100644
index 0000000..7122aa1
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/mesh/corner_table_iterators.h
@@ -0,0 +1,289 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_MESH_CORNER_TABLE_ITERATORS_H_
+#define DRACO_MESH_CORNER_TABLE_ITERATORS_H_
+
+#include "draco/mesh/corner_table.h"
+
+namespace draco {
+
+// Class for iterating over vertices in a 1-ring around the specified vertex.
+template <class CornerTableT>
+class VertexRingIterator
+ : public std::iterator<std::forward_iterator_tag, VertexIndex> {
+ public:
+ // std::iterator interface requires a default constructor.
+ VertexRingIterator()
+ : corner_table_(nullptr),
+ start_corner_(kInvalidCornerIndex),
+ corner_(start_corner_),
+ left_traversal_(true) {}
+
+ // Create the iterator from the provided corner table and the central vertex.
+ VertexRingIterator(const CornerTableT *table, VertexIndex vert_id)
+ : corner_table_(table),
+ start_corner_(table->LeftMostCorner(vert_id)),
+ corner_(start_corner_),
+ left_traversal_(true) {}
+
+ // Gets the last visited ring vertex.
+ VertexIndex Vertex() const {
+ CornerIndex ring_corner = left_traversal_ ? corner_table_->Previous(corner_)
+ : corner_table_->Next(corner_);
+ return corner_table_->Vertex(ring_corner);
+ }
+
+ // Returns one of the corners opposite to the edge connecting the currently
+ // iterated ring vertex with the central vertex.
+ CornerIndex EdgeCorner() const {
+ return left_traversal_ ? corner_table_->Next(corner_)
+ : corner_table_->Previous(corner_);
+ }
+
+ // Returns true when all ring vertices have been visited.
+ bool End() const { return corner_ == kInvalidCornerIndex; }
+
+ // Proceeds to the next ring vertex if possible.
+ void Next() {
+ if (left_traversal_) {
+ corner_ = corner_table_->SwingLeft(corner_);
+ if (corner_ == kInvalidCornerIndex) {
+ // Open boundary reached.
+ corner_ = start_corner_;
+ left_traversal_ = false;
+ } else if (corner_ == start_corner_) {
+ // End reached.
+ corner_ = kInvalidCornerIndex;
+ }
+ } else {
+ // Go to the right until we reach a boundary there (no explicit check
+ // is needed in this case).
+ corner_ = corner_table_->SwingRight(corner_);
+ }
+ }
+
+ // std::iterator interface.
+ value_type operator*() const { return Vertex(); }
+ VertexRingIterator &operator++() {
+ Next();
+ return *this;
+ }
+ VertexRingIterator operator++(int) {
+ const VertexRingIterator result = *this;
+ ++(*this);
+ return result;
+ }
+ bool operator!=(const VertexRingIterator &other) const {
+ return corner_ != other.corner_ || start_corner_ != other.start_corner_;
+ }
+ bool operator==(const VertexRingIterator &other) const {
+ return !this->operator!=(other);
+ }
+
+ // Helper function for getting a valid end iterator.
+ static VertexRingIterator EndIterator(VertexRingIterator other) {
+ VertexRingIterator ret = other;
+ ret.corner_ = kInvalidCornerIndex;
+ return ret;
+ }
+
+ private:
+ const CornerTableT *corner_table_;
+ // The first processed corner.
+ CornerIndex start_corner_;
+ // The last processed corner.
+ CornerIndex corner_;
+ // Traversal direction.
+ bool left_traversal_;
+};
+
+// Class for iterating over faces adjacent to the specified input face.
+template <class CornerTableT>
+class FaceAdjacencyIterator
+ : public std::iterator<std::forward_iterator_tag, FaceIndex> {
+ public:
+ // std::iterator interface requires a default constructor.
+ FaceAdjacencyIterator()
+ : corner_table_(nullptr),
+ start_corner_(kInvalidCornerIndex),
+ corner_(start_corner_) {}
+
+ // Create the iterator from the provided corner table and the central vertex.
+ FaceAdjacencyIterator(const CornerTableT *table, FaceIndex face_id)
+ : corner_table_(table),
+ start_corner_(table->FirstCorner(face_id)),
+ corner_(start_corner_) {
+ // We need to start with a corner that has a valid opposite face (if
+ // there is any such corner).
+ if (corner_table_->Opposite(corner_) == kInvalidCornerIndex) {
+ FindNextFaceNeighbor();
+ }
+ }
+
+ // Gets the last visited adjacent face.
+ FaceIndex Face() const {
+ return corner_table_->Face(corner_table_->Opposite(corner_));
+ }
+
+ // Returns true when all adjacent faces have been visited.
+ bool End() const { return corner_ == kInvalidCornerIndex; }
+
+ // Proceeds to the next adjacent face if possible.
+ void Next() { FindNextFaceNeighbor(); }
+
+ // std::iterator interface.
+ value_type operator*() const { return Face(); }
+ FaceAdjacencyIterator &operator++() {
+ Next();
+ return *this;
+ }
+ FaceAdjacencyIterator operator++(int) {
+ const FaceAdjacencyIterator result = *this;
+ ++(*this);
+ return result;
+ }
+ bool operator!=(const FaceAdjacencyIterator &other) const {
+ return corner_ != other.corner_ || start_corner_ != other.start_corner_;
+ }
+ bool operator==(const FaceAdjacencyIterator &other) const {
+ return !this->operator!=(other);
+ }
+
+ // Helper function for getting a valid end iterator.
+ static FaceAdjacencyIterator EndIterator(FaceAdjacencyIterator other) {
+ FaceAdjacencyIterator ret = other;
+ ret.corner_ = kInvalidCornerIndex;
+ return ret;
+ }
+
+ private:
+ // Finds the next corner with a valid opposite face.
+ void FindNextFaceNeighbor() {
+ while (corner_ != kInvalidCornerIndex) {
+ corner_ = corner_table_->Next(corner_);
+ if (corner_ == start_corner_) {
+ corner_ = kInvalidCornerIndex;
+ return;
+ }
+ if (corner_table_->Opposite(corner_) != kInvalidCornerIndex) {
+ // Valid opposite face.
+ return;
+ }
+ }
+ }
+
+ const CornerTableT *corner_table_;
+ // The first processed corner.
+ CornerIndex start_corner_;
+ // The last processed corner.
+ CornerIndex corner_;
+};
+
+// Class for iterating over corners attached to a specified vertex.
+template <class CornerTableT = CornerTable>
+class VertexCornersIterator
+ : public std::iterator<std::forward_iterator_tag, CornerIndex> {
+ public:
+ // std::iterator interface requires a default constructor.
+ VertexCornersIterator()
+ : corner_table_(nullptr),
+ start_corner_(-1),
+ corner_(start_corner_),
+ left_traversal_(true) {}
+
+ // Create the iterator from the provided corner table and the central vertex.
+ VertexCornersIterator(const CornerTableT *table, VertexIndex vert_id)
+ : corner_table_(table),
+ start_corner_(table->LeftMostCorner(vert_id)),
+ corner_(start_corner_),
+ left_traversal_(true) {}
+
+ // Create the iterator from the provided corner table and the first corner.
+ VertexCornersIterator(const CornerTableT *table, CornerIndex corner_id)
+ : corner_table_(table),
+ start_corner_(corner_id),
+ corner_(start_corner_),
+ left_traversal_(true) {}
+
+ // Gets the last visited corner.
+ CornerIndex Corner() const { return corner_; }
+
+ // Returns true when all ring vertices have been visited.
+ bool End() const { return corner_ == kInvalidCornerIndex; }
+
+ // Proceeds to the next corner if possible.
+ void Next() {
+ if (left_traversal_) {
+ corner_ = corner_table_->SwingLeft(corner_);
+ if (corner_ == kInvalidCornerIndex) {
+ // Open boundary reached.
+ corner_ = corner_table_->SwingRight(start_corner_);
+ left_traversal_ = false;
+ } else if (corner_ == start_corner_) {
+ // End reached.
+ corner_ = kInvalidCornerIndex;
+ }
+ } else {
+ // Go to the right until we reach a boundary there (no explicit check
+ // is needed in this case).
+ corner_ = corner_table_->SwingRight(corner_);
+ }
+ }
+
+ // std::iterator interface.
+ CornerIndex operator*() const { return Corner(); }
+ VertexCornersIterator &operator++() {
+ Next();
+ return *this;
+ }
+ VertexCornersIterator operator++(int) {
+ const VertexCornersIterator result = *this;
+ ++(*this);
+ return result;
+ }
+ bool operator!=(const VertexCornersIterator &other) const {
+ return corner_ != other.corner_ || start_corner_ != other.start_corner_;
+ }
+ bool operator==(const VertexCornersIterator &other) const {
+ return !this->operator!=(other);
+ }
+
+ // Helper function for getting a valid end iterator.
+ static VertexCornersIterator EndIterator(VertexCornersIterator other) {
+ VertexCornersIterator ret = other;
+ ret.corner_ = kInvalidCornerIndex;
+ return ret;
+ }
+
+ protected:
+ const CornerTableT *corner_table() const { return corner_table_; }
+ CornerIndex start_corner() const { return start_corner_; }
+ CornerIndex &corner() { return corner_; }
+ bool is_left_traversal() const { return left_traversal_; }
+ void swap_traversal() { left_traversal_ = !left_traversal_; }
+
+ private:
+ const CornerTableT *corner_table_;
+ // The first processed corner.
+ CornerIndex start_corner_;
+ // The last processed corner.
+ CornerIndex corner_;
+ // Traversal direction.
+ bool left_traversal_;
+};
+
+} // namespace draco
+
+#endif // DRACO_MESH_CORNER_TABLE_ITERATORS_H_
diff --git a/libs/assimp/contrib/draco/src/draco/mesh/mesh.cc b/libs/assimp/contrib/draco/src/draco/mesh/mesh.cc
new file mode 100644
index 0000000..3be4b14
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/mesh/mesh.cc
@@ -0,0 +1,40 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/mesh/mesh.h"
+
+#include <array>
+
+namespace draco {
+
+// Shortcut for typed conditionals.
+template <bool B, class T, class F>
+using conditional_t = typename std::conditional<B, T, F>::type;
+
+Mesh::Mesh() {}
+
+#ifdef DRACO_ATTRIBUTE_INDICES_DEDUPLICATION_SUPPORTED
+void Mesh::ApplyPointIdDeduplication(
+ const IndexTypeVector<PointIndex, PointIndex> &id_map,
+ const std::vector<PointIndex> &unique_point_ids) {
+ PointCloud::ApplyPointIdDeduplication(id_map, unique_point_ids);
+ for (FaceIndex f(0); f < num_faces(); ++f) {
+ for (int32_t c = 0; c < 3; ++c) {
+ faces_[f][c] = id_map[faces_[f][c]];
+ }
+ }
+}
+#endif
+
+} // namespace draco
diff --git a/libs/assimp/contrib/draco/src/draco/mesh/mesh.h b/libs/assimp/contrib/draco/src/draco/mesh/mesh.h
new file mode 100644
index 0000000..f4506da
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/mesh/mesh.h
@@ -0,0 +1,152 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_MESH_MESH_H_
+#define DRACO_MESH_MESH_H_
+
+#include <memory>
+
+#include "draco/attributes/geometry_indices.h"
+#include "draco/core/hash_utils.h"
+#include "draco/core/macros.h"
+#include "draco/core/status.h"
+#include "draco/draco_features.h"
+#include "draco/point_cloud/point_cloud.h"
+
+namespace draco {
+
+// List of different variants of mesh attributes.
+enum MeshAttributeElementType {
+ // All corners attached to a vertex share the same attribute value. A typical
+ // example are the vertex positions and often vertex colors.
+ MESH_VERTEX_ATTRIBUTE = 0,
+ // The most general attribute where every corner of the mesh can have a
+ // different attribute value. Often used for texture coordinates or normals.
+ MESH_CORNER_ATTRIBUTE,
+ // All corners of a single face share the same value.
+ MESH_FACE_ATTRIBUTE
+};
+
+// Mesh class can be used to represent general triangular meshes. Internally,
+// Mesh is just an extended PointCloud with extra connectivity data that defines
+// what points are connected together in triangles.
+class Mesh : public PointCloud {
+ public:
+ typedef std::array<PointIndex, 3> Face;
+
+ Mesh();
+
+ void AddFace(const Face &face) { faces_.push_back(face); }
+
+ void SetFace(FaceIndex face_id, const Face &face) {
+ if (face_id >= static_cast<uint32_t>(faces_.size())) {
+ faces_.resize(face_id.value() + 1, Face());
+ }
+ faces_[face_id] = face;
+ }
+
+ // Sets the total number of faces. Creates new empty faces or deletes
+ // existing ones if necessary.
+ void SetNumFaces(size_t num_faces) { faces_.resize(num_faces, Face()); }
+
+ FaceIndex::ValueType num_faces() const {
+ return static_cast<uint32_t>(faces_.size());
+ }
+ const Face &face(FaceIndex face_id) const {
+ DRACO_DCHECK_LE(0, face_id.value());
+ DRACO_DCHECK_LT(face_id.value(), static_cast<int>(faces_.size()));
+ return faces_[face_id];
+ }
+
+ void SetAttribute(int att_id, std::unique_ptr<PointAttribute> pa) override {
+ PointCloud::SetAttribute(att_id, std::move(pa));
+ if (static_cast<int>(attribute_data_.size()) <= att_id) {
+ attribute_data_.resize(att_id + 1);
+ }
+ }
+
+ void DeleteAttribute(int att_id) override {
+ PointCloud::DeleteAttribute(att_id);
+ if (att_id >= 0 && att_id < static_cast<int>(attribute_data_.size())) {
+ attribute_data_.erase(attribute_data_.begin() + att_id);
+ }
+ }
+
+ MeshAttributeElementType GetAttributeElementType(int att_id) const {
+ return attribute_data_[att_id].element_type;
+ }
+
+ void SetAttributeElementType(int att_id, MeshAttributeElementType et) {
+ attribute_data_[att_id].element_type = et;
+ }
+
+ // Returns the point id of for a corner |ci|.
+ inline PointIndex CornerToPointId(int ci) const {
+ if (ci < 0 || static_cast<uint32_t>(ci) == kInvalidCornerIndex.value()) {
+ return kInvalidPointIndex;
+ }
+ return this->face(FaceIndex(ci / 3))[ci % 3];
+ }
+
+ // Returns the point id of a corner |ci|.
+ inline PointIndex CornerToPointId(CornerIndex ci) const {
+ return this->CornerToPointId(ci.value());
+ }
+
+ struct AttributeData {
+ AttributeData() : element_type(MESH_CORNER_ATTRIBUTE) {}
+ MeshAttributeElementType element_type;
+ };
+
+ protected:
+#ifdef DRACO_ATTRIBUTE_INDICES_DEDUPLICATION_SUPPORTED
+ // Extends the point deduplication to face corners. This method is called from
+ // the PointCloud::DeduplicatePointIds() and it remaps all point ids stored in
+ // |faces_| to the new deduplicated point ids using the map |id_map|.
+ void ApplyPointIdDeduplication(
+ const IndexTypeVector<PointIndex, PointIndex> &id_map,
+ const std::vector<PointIndex> &unique_point_ids) override;
+#endif
+
+ private:
+ // Mesh specific per-attribute data.
+ std::vector<AttributeData> attribute_data_;
+
+ // Vertex indices valid for all attributes. Each attribute has its own map
+ // that converts vertex indices into attribute indices.
+ IndexTypeVector<FaceIndex, Face> faces_;
+
+ friend struct MeshHasher;
+};
+
+// Functor for computing a hash from data stored within a mesh.
+// Note that this can be quite slow. Two meshes will have the same hash only
+// when they have exactly the same connectivity and attribute values.
+struct MeshHasher {
+ size_t operator()(const Mesh &mesh) const {
+ PointCloudHasher pc_hasher;
+ size_t hash = pc_hasher(mesh);
+ // Hash faces.
+ for (FaceIndex i(0); i < static_cast<uint32_t>(mesh.faces_.size()); ++i) {
+ for (int j = 0; j < 3; ++j) {
+ hash = HashCombine(mesh.faces_[i][j].value(), hash);
+ }
+ }
+ return hash;
+ }
+};
+
+} // namespace draco
+
+#endif // DRACO_MESH_MESH_H_
diff --git a/libs/assimp/contrib/draco/src/draco/mesh/mesh_are_equivalent.cc b/libs/assimp/contrib/draco/src/draco/mesh/mesh_are_equivalent.cc
new file mode 100644
index 0000000..b832379
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/mesh/mesh_are_equivalent.cc
@@ -0,0 +1,205 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/mesh/mesh_are_equivalent.h"
+
+#include <algorithm>
+
+namespace draco {
+
+void MeshAreEquivalent::PrintPosition(const Mesh &mesh, FaceIndex f,
+ int32_t c) {
+ fprintf(stderr, "Printing position for (%i,%i)\n", f.value(), c);
+ const auto pos_att = mesh.GetNamedAttribute(GeometryAttribute::POSITION);
+ const PointIndex ver_index = mesh.face(f)[c];
+ const AttributeValueIndex pos_index = pos_att->mapped_index(ver_index);
+ const auto pos = pos_att->GetValue<float, 3>(pos_index);
+ fprintf(stderr, "Position (%f,%f,%f)\n", pos[0], pos[1], pos[2]);
+}
+
+Vector3f MeshAreEquivalent::GetPosition(const Mesh &mesh, FaceIndex f,
+ int32_t c) {
+ const auto pos_att = mesh.GetNamedAttribute(GeometryAttribute::POSITION);
+ const PointIndex ver_index = mesh.face(f)[c];
+ const AttributeValueIndex pos_index = pos_att->mapped_index(ver_index);
+ const auto pos = pos_att->GetValue<float, 3>(pos_index);
+ return Vector3f(pos[0], pos[1], pos[2]);
+}
+
+void MeshAreEquivalent::InitCornerIndexOfSmallestPointXYZ() {
+ DRACO_DCHECK_EQ(mesh_infos_[0].corner_index_of_smallest_vertex.size(), 0);
+ DRACO_DCHECK_EQ(mesh_infos_[1].corner_index_of_smallest_vertex.size(), 0);
+ for (int i = 0; i < 2; ++i) {
+ mesh_infos_[i].corner_index_of_smallest_vertex.reserve(num_faces_);
+ for (FaceIndex f(0); f < num_faces_; ++f) {
+ mesh_infos_[i].corner_index_of_smallest_vertex.push_back(
+ ComputeCornerIndexOfSmallestPointXYZ(mesh_infos_[i].mesh, f));
+ }
+ }
+ DRACO_DCHECK_EQ(mesh_infos_[0].corner_index_of_smallest_vertex.size(),
+ num_faces_);
+ DRACO_DCHECK_EQ(mesh_infos_[1].corner_index_of_smallest_vertex.size(),
+ num_faces_);
+}
+
+void MeshAreEquivalent::InitOrderedFaceIndex() {
+ DRACO_DCHECK_EQ(mesh_infos_[0].ordered_index_of_face.size(), 0);
+ DRACO_DCHECK_EQ(mesh_infos_[1].ordered_index_of_face.size(), 0);
+ for (int32_t i = 0; i < 2; ++i) {
+ mesh_infos_[i].ordered_index_of_face.reserve(num_faces_);
+ for (FaceIndex j(0); j < num_faces_; ++j) {
+ mesh_infos_[i].ordered_index_of_face.push_back(j);
+ }
+ const FaceIndexLess less(mesh_infos_[i]);
+ std::sort(mesh_infos_[i].ordered_index_of_face.begin(),
+ mesh_infos_[i].ordered_index_of_face.end(), less);
+
+ DRACO_DCHECK_EQ(mesh_infos_[i].ordered_index_of_face.size(), num_faces_);
+ DRACO_DCHECK(std::is_sorted(mesh_infos_[i].ordered_index_of_face.begin(),
+ mesh_infos_[i].ordered_index_of_face.end(),
+ less));
+ }
+}
+
+int32_t MeshAreEquivalent::ComputeCornerIndexOfSmallestPointXYZ(
+ const Mesh &mesh, FaceIndex f) {
+ Vector3f pos[3]; // For the three corners.
+ for (int32_t i = 0; i < 3; ++i) {
+ pos[i] = GetPosition(mesh, f, i);
+ }
+ const auto min_it = std::min_element(pos, pos + 3);
+ return static_cast<int32_t>(min_it - pos);
+}
+
+void MeshAreEquivalent::Init(const Mesh &mesh0, const Mesh &mesh1) {
+ mesh_infos_.clear();
+ DRACO_DCHECK_EQ(mesh_infos_.size(), 0);
+
+ num_faces_ = mesh1.num_faces();
+ mesh_infos_.push_back(MeshInfo(mesh0));
+ mesh_infos_.push_back(MeshInfo(mesh1));
+
+ DRACO_DCHECK_EQ(mesh_infos_.size(), 2);
+ DRACO_DCHECK_EQ(mesh_infos_[0].corner_index_of_smallest_vertex.size(), 0);
+ DRACO_DCHECK_EQ(mesh_infos_[1].corner_index_of_smallest_vertex.size(), 0);
+ DRACO_DCHECK_EQ(mesh_infos_[0].ordered_index_of_face.size(), 0);
+ DRACO_DCHECK_EQ(mesh_infos_[1].ordered_index_of_face.size(), 0);
+
+ InitCornerIndexOfSmallestPointXYZ();
+ InitOrderedFaceIndex();
+}
+
+bool MeshAreEquivalent::operator()(const Mesh &mesh0, const Mesh &mesh1) {
+ if (mesh0.num_faces() != mesh1.num_faces()) {
+ return false;
+ }
+ if (mesh0.num_attributes() != mesh1.num_attributes()) {
+ return false;
+ }
+
+ // The following function inits mesh info, i.e., computes the order of
+ // faces with respect to the lex order. This way one can then compare the
+ // the two meshes face by face. It also determines the first corner of each
+ // face with respect to lex order.
+ Init(mesh0, mesh1);
+
+ // Check for every attribute that is valid that every corner is identical.
+ typedef GeometryAttribute::Type AttributeType;
+ const int att_max = AttributeType::NAMED_ATTRIBUTES_COUNT;
+ for (int att_id = 0; att_id < att_max; ++att_id) {
+ // First check for existence of the attribute in both meshes.
+ const PointAttribute *const att0 =
+ mesh0.GetNamedAttribute(AttributeType(att_id));
+ const PointAttribute *const att1 =
+ mesh1.GetNamedAttribute(AttributeType(att_id));
+ if (att0 == nullptr && att1 == nullptr) {
+ continue;
+ }
+ if (att0 == nullptr) {
+ return false;
+ }
+ if (att1 == nullptr) {
+ return false;
+ }
+ if (att0->data_type() != att1->data_type()) {
+ return false;
+ }
+ if (att0->num_components() != att1->num_components()) {
+ return false;
+ }
+ if (att0->normalized() != att1->normalized()) {
+ return false;
+ }
+ if (att0->byte_stride() != att1->byte_stride()) {
+ return false;
+ }
+
+ DRACO_DCHECK(att0->IsValid());
+ DRACO_DCHECK(att1->IsValid());
+
+ // Prepare blocks of memory to hold data of corners for this attribute.
+ std::unique_ptr<uint8_t[]> data0(new uint8_t[att0->byte_stride()]);
+ std::unique_ptr<uint8_t[]> data1(new uint8_t[att0->byte_stride()]);
+
+ // Check every corner of every face.
+ for (int i = 0; i < num_faces_; ++i) {
+ const FaceIndex f0 = mesh_infos_[0].ordered_index_of_face[i];
+ const FaceIndex f1 = mesh_infos_[1].ordered_index_of_face[i];
+ const int c0_off = mesh_infos_[0].corner_index_of_smallest_vertex[f0];
+ const int c1_off = mesh_infos_[1].corner_index_of_smallest_vertex[f1];
+
+ for (int c = 0; c < 3; ++c) {
+ // Get the index of each corner.
+ const PointIndex corner0 = mesh0.face(f0)[(c0_off + c) % 3];
+ const PointIndex corner1 = mesh1.face(f1)[(c1_off + c) % 3];
+ // Map it to the right index for that attribute.
+ const AttributeValueIndex index0 = att0->mapped_index(corner0);
+ const AttributeValueIndex index1 = att1->mapped_index(corner1);
+
+ // Obtaining the data.
+ att0->GetValue(index0, data0.get());
+ att1->GetValue(index1, data1.get());
+ // Compare the data as is in memory.
+ if (memcmp(data0.get(), data1.get(), att0->byte_stride()) != 0) {
+ return false;
+ }
+ }
+ }
+ }
+ return true;
+}
+
+bool MeshAreEquivalent::FaceIndexLess::operator()(FaceIndex f0,
+ FaceIndex f1) const {
+ if (f0 == f1) {
+ return false;
+ }
+ const int c0 = mesh_info.corner_index_of_smallest_vertex[f0];
+ const int c1 = mesh_info.corner_index_of_smallest_vertex[f1];
+
+ for (int i = 0; i < 3; ++i) {
+ const Vector3f vf0 = GetPosition(mesh_info.mesh, f0, (c0 + i) % 3);
+ const Vector3f vf1 = GetPosition(mesh_info.mesh, f1, (c1 + i) % 3);
+ if (vf0 < vf1) {
+ return true;
+ }
+ if (vf1 < vf0) {
+ return false;
+ }
+ }
+ // In case the two faces are equivalent.
+ return false;
+}
+
+} // namespace draco
diff --git a/libs/assimp/contrib/draco/src/draco/mesh/mesh_are_equivalent.h b/libs/assimp/contrib/draco/src/draco/mesh/mesh_are_equivalent.h
new file mode 100644
index 0000000..71ef4a9
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/mesh/mesh_are_equivalent.h
@@ -0,0 +1,71 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_MESH_MESH_ARE_EQUIVALENT_H_
+#define DRACO_MESH_MESH_ARE_EQUIVALENT_H_
+
+#include "draco/core/vector_d.h"
+#include "draco/mesh/mesh.h"
+
+// This file defines a functor to compare two meshes for equivalency up
+// to permutation of the vertices.
+namespace draco {
+
+// A functor to compare two meshes for equivalency up to permutation of the
+// vertices.
+class MeshAreEquivalent {
+ public:
+ // Returns true if both meshes are equivalent up to permutation of
+ // the internal order of vertices. This includes all attributes.
+ bool operator()(const Mesh &mesh0, const Mesh &mesh1);
+
+ private:
+ // Internal type to keep overview.
+ struct MeshInfo {
+ explicit MeshInfo(const Mesh &in_mesh) : mesh(in_mesh) {}
+ const Mesh &mesh;
+ std::vector<FaceIndex> ordered_index_of_face;
+ IndexTypeVector<FaceIndex, int> corner_index_of_smallest_vertex;
+ };
+
+ // Prepare functor for actual comparison.
+ void Init(const Mesh &mesh0, const Mesh &mesh1);
+
+ // Get position as Vector3f of corner c of face f.
+ static Vector3f GetPosition(const Mesh &mesh, FaceIndex f, int32_t c);
+ // Internal helper function mostly for debugging.
+ void PrintPosition(const Mesh &mesh, FaceIndex f, int32_t c);
+ // Get the corner index of the lex smallest vertex of face f.
+ static int32_t ComputeCornerIndexOfSmallestPointXYZ(const Mesh &mesh,
+ FaceIndex f);
+
+ // Less compare functor for two faces (represented by their indices)
+ // with respect to their lex order.
+ struct FaceIndexLess {
+ explicit FaceIndexLess(const MeshInfo &in_mesh_info)
+ : mesh_info(in_mesh_info) {}
+ bool operator()(FaceIndex f0, FaceIndex f1) const;
+ const MeshInfo &mesh_info;
+ };
+
+ void InitCornerIndexOfSmallestPointXYZ();
+ void InitOrderedFaceIndex();
+
+ std::vector<MeshInfo> mesh_infos_;
+ int32_t num_faces_;
+};
+
+} // namespace draco
+
+#endif // DRACO_MESH_MESH_ARE_EQUIVALENT_H_
diff --git a/libs/assimp/contrib/draco/src/draco/mesh/mesh_are_equivalent_test.cc b/libs/assimp/contrib/draco/src/draco/mesh/mesh_are_equivalent_test.cc
new file mode 100644
index 0000000..74db3f7
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/mesh/mesh_are_equivalent_test.cc
@@ -0,0 +1,98 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/mesh/mesh_are_equivalent.h"
+
+#include <sstream>
+
+#include "draco/core/draco_test_base.h"
+#include "draco/core/draco_test_utils.h"
+#include "draco/io/mesh_io.h"
+#include "draco/io/obj_decoder.h"
+#include "draco/mesh/mesh.h"
+
+namespace draco {
+
+class MeshAreEquivalentTest : public ::testing::Test {};
+
+TEST_F(MeshAreEquivalentTest, TestOnIndenticalMesh) {
+ const std::string file_name = "test_nm.obj";
+ const std::unique_ptr<Mesh> mesh(ReadMeshFromTestFile(file_name));
+ ASSERT_NE(mesh, nullptr) << "Failed to load test model." << file_name;
+ MeshAreEquivalent equiv;
+ ASSERT_TRUE(equiv(*mesh, *mesh));
+}
+
+TEST_F(MeshAreEquivalentTest, TestPermutedOneFace) {
+ const std::string file_name_0 = "one_face_123.obj";
+ const std::string file_name_1 = "one_face_312.obj";
+ const std::string file_name_2 = "one_face_321.obj";
+ const std::unique_ptr<Mesh> mesh_0(ReadMeshFromTestFile(file_name_0));
+ const std::unique_ptr<Mesh> mesh_1(ReadMeshFromTestFile(file_name_1));
+ const std::unique_ptr<Mesh> mesh_2(ReadMeshFromTestFile(file_name_2));
+ ASSERT_NE(mesh_0, nullptr) << "Failed to load test model." << file_name_0;
+ ASSERT_NE(mesh_1, nullptr) << "Failed to load test model." << file_name_1;
+ ASSERT_NE(mesh_2, nullptr) << "Failed to load test model." << file_name_2;
+ MeshAreEquivalent equiv;
+ ASSERT_TRUE(equiv(*mesh_0, *mesh_0));
+ ASSERT_TRUE(equiv(*mesh_0, *mesh_1)); // Face rotated.
+ ASSERT_FALSE(equiv(*mesh_0, *mesh_2)); // Face inverted.
+}
+
+TEST_F(MeshAreEquivalentTest, TestPermutedTwoFaces) {
+ const std::string file_name_0 = "two_faces_123.obj";
+ const std::string file_name_1 = "two_faces_312.obj";
+ const std::unique_ptr<Mesh> mesh_0(ReadMeshFromTestFile(file_name_0));
+ const std::unique_ptr<Mesh> mesh_1(ReadMeshFromTestFile(file_name_1));
+ ASSERT_NE(mesh_0, nullptr) << "Failed to load test model." << file_name_0;
+ ASSERT_NE(mesh_1, nullptr) << "Failed to load test model." << file_name_1;
+ MeshAreEquivalent equiv;
+ ASSERT_TRUE(equiv(*mesh_0, *mesh_0));
+ ASSERT_TRUE(equiv(*mesh_1, *mesh_1));
+ ASSERT_TRUE(equiv(*mesh_0, *mesh_1));
+}
+
+TEST_F(MeshAreEquivalentTest, TestPermutedThreeFaces) {
+ const std::string file_name_0 = "three_faces_123.obj";
+ const std::string file_name_1 = "three_faces_312.obj";
+ const std::unique_ptr<Mesh> mesh_0(ReadMeshFromTestFile(file_name_0));
+ const std::unique_ptr<Mesh> mesh_1(ReadMeshFromTestFile(file_name_1));
+ ASSERT_NE(mesh_0, nullptr) << "Failed to load test model." << file_name_0;
+ ASSERT_NE(mesh_1, nullptr) << "Failed to load test model." << file_name_1;
+ MeshAreEquivalent equiv;
+ ASSERT_TRUE(equiv(*mesh_0, *mesh_0));
+ ASSERT_TRUE(equiv(*mesh_1, *mesh_1));
+ ASSERT_TRUE(equiv(*mesh_0, *mesh_1));
+}
+
+// This test checks that the edgebreaker algorithm does not change the mesh up
+// to the order of faces and vertices.
+TEST_F(MeshAreEquivalentTest, TestOnBigMesh) {
+ const std::string file_name = "test_nm.obj";
+ const std::unique_ptr<Mesh> mesh0(ReadMeshFromTestFile(file_name));
+ ASSERT_NE(mesh0, nullptr) << "Failed to load test model." << file_name;
+
+ std::unique_ptr<Mesh> mesh1;
+ std::stringstream ss;
+ WriteMeshIntoStream(mesh0.get(), ss, MESH_EDGEBREAKER_ENCODING);
+ ReadMeshFromStream(&mesh1, ss);
+ ASSERT_TRUE(ss.good()) << "Mesh IO failed.";
+
+ MeshAreEquivalent equiv;
+ ASSERT_TRUE(equiv(*mesh0, *mesh0));
+ ASSERT_TRUE(equiv(*mesh1, *mesh1));
+ ASSERT_TRUE(equiv(*mesh0, *mesh1));
+}
+
+} // namespace draco
diff --git a/libs/assimp/contrib/draco/src/draco/mesh/mesh_attribute_corner_table.cc b/libs/assimp/contrib/draco/src/draco/mesh/mesh_attribute_corner_table.cc
new file mode 100644
index 0000000..28b68d5
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/mesh/mesh_attribute_corner_table.cc
@@ -0,0 +1,211 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/mesh/mesh_attribute_corner_table.h"
+
+#include "draco/mesh/corner_table_iterators.h"
+#include "draco/mesh/mesh_misc_functions.h"
+
+namespace draco {
+
+MeshAttributeCornerTable::MeshAttributeCornerTable()
+ : no_interior_seams_(true), corner_table_(nullptr), valence_cache_(*this) {}
+
+bool MeshAttributeCornerTable::InitEmpty(const CornerTable *table) {
+ if (table == nullptr) {
+ return false;
+ }
+ valence_cache_.ClearValenceCache();
+ valence_cache_.ClearValenceCacheInaccurate();
+ is_edge_on_seam_.assign(table->num_corners(), false);
+ is_vertex_on_seam_.assign(table->num_vertices(), false);
+ corner_to_vertex_map_.assign(table->num_corners(), kInvalidVertexIndex);
+ vertex_to_attribute_entry_id_map_.reserve(table->num_vertices());
+ vertex_to_left_most_corner_map_.reserve(table->num_vertices());
+ corner_table_ = table;
+ no_interior_seams_ = true;
+ return true;
+}
+
+bool MeshAttributeCornerTable::InitFromAttribute(const Mesh *mesh,
+ const CornerTable *table,
+ const PointAttribute *att) {
+ if (!InitEmpty(table)) {
+ return false;
+ }
+ valence_cache_.ClearValenceCache();
+ valence_cache_.ClearValenceCacheInaccurate();
+
+ // Find all necessary data for encoding attributes. For now we check which of
+ // the mesh vertices is part of an attribute seam, because seams require
+ // special handling.
+ for (CornerIndex c(0); c < corner_table_->num_corners(); ++c) {
+ const FaceIndex f = corner_table_->Face(c);
+ if (corner_table_->IsDegenerated(f)) {
+ continue; // Ignore corners on degenerated faces.
+ }
+ const CornerIndex opp_corner = corner_table_->Opposite(c);
+ if (opp_corner == kInvalidCornerIndex) {
+ // Boundary. Mark it as seam edge.
+ is_edge_on_seam_[c.value()] = true;
+ // Mark seam vertices.
+ VertexIndex v;
+ v = corner_table_->Vertex(corner_table_->Next(c));
+ is_vertex_on_seam_[v.value()] = true;
+ v = corner_table_->Vertex(corner_table_->Previous(c));
+ is_vertex_on_seam_[v.value()] = true;
+ continue;
+ }
+ if (opp_corner < c) {
+ continue; // Opposite corner was already processed.
+ }
+
+ CornerIndex act_c(c), act_sibling_c(opp_corner);
+ for (int i = 0; i < 2; ++i) {
+ // Get the sibling corners. I.e., the two corners attached to the same
+ // vertex but divided by the seam edge.
+ act_c = corner_table_->Next(act_c);
+ act_sibling_c = corner_table_->Previous(act_sibling_c);
+ const PointIndex point_id = mesh->CornerToPointId(act_c.value());
+ const PointIndex sibling_point_id =
+ mesh->CornerToPointId(act_sibling_c.value());
+ if (att->mapped_index(point_id) != att->mapped_index(sibling_point_id)) {
+ no_interior_seams_ = false;
+ is_edge_on_seam_[c.value()] = true;
+ is_edge_on_seam_[opp_corner.value()] = true;
+ // Mark seam vertices.
+ is_vertex_on_seam_[corner_table_
+ ->Vertex(corner_table_->Next(CornerIndex(c)))
+ .value()] = true;
+ is_vertex_on_seam_[corner_table_
+ ->Vertex(corner_table_->Previous(CornerIndex(c)))
+ .value()] = true;
+ is_vertex_on_seam_
+ [corner_table_->Vertex(corner_table_->Next(opp_corner)).value()] =
+ true;
+ is_vertex_on_seam_[corner_table_
+ ->Vertex(corner_table_->Previous(opp_corner))
+ .value()] = true;
+ break;
+ }
+ }
+ }
+ RecomputeVertices(mesh, att);
+ return true;
+}
+
+void MeshAttributeCornerTable::AddSeamEdge(CornerIndex c) {
+ DRACO_DCHECK(GetValenceCache().IsCacheEmpty());
+ is_edge_on_seam_[c.value()] = true;
+ // Mark seam vertices.
+ is_vertex_on_seam_[corner_table_->Vertex(corner_table_->Next(c)).value()] =
+ true;
+ is_vertex_on_seam_[corner_table_->Vertex(corner_table_->Previous(c))
+ .value()] = true;
+
+ const CornerIndex opp_corner = corner_table_->Opposite(c);
+ if (opp_corner != kInvalidCornerIndex) {
+ no_interior_seams_ = false;
+ is_edge_on_seam_[opp_corner.value()] = true;
+ is_vertex_on_seam_[corner_table_->Vertex(corner_table_->Next(opp_corner))
+ .value()] = true;
+ is_vertex_on_seam_
+ [corner_table_->Vertex(corner_table_->Previous(opp_corner)).value()] =
+ true;
+ }
+}
+
+void MeshAttributeCornerTable::RecomputeVertices(const Mesh *mesh,
+ const PointAttribute *att) {
+ DRACO_DCHECK(GetValenceCache().IsCacheEmpty());
+ if (mesh != nullptr && att != nullptr) {
+ RecomputeVerticesInternal<true>(mesh, att);
+ } else {
+ RecomputeVerticesInternal<false>(nullptr, nullptr);
+ }
+}
+
+template <bool init_vertex_to_attribute_entry_map>
+void MeshAttributeCornerTable::RecomputeVerticesInternal(
+ const Mesh *mesh, const PointAttribute *att) {
+ DRACO_DCHECK(GetValenceCache().IsCacheEmpty());
+ vertex_to_attribute_entry_id_map_.clear();
+ vertex_to_left_most_corner_map_.clear();
+ int num_new_vertices = 0;
+ for (VertexIndex v(0); v < corner_table_->num_vertices(); ++v) {
+ const CornerIndex c = corner_table_->LeftMostCorner(v);
+ if (c == kInvalidCornerIndex) {
+ continue; // Isolated vertex?
+ }
+ AttributeValueIndex first_vert_id(num_new_vertices++);
+ if (init_vertex_to_attribute_entry_map) {
+ const PointIndex point_id = mesh->CornerToPointId(c.value());
+ vertex_to_attribute_entry_id_map_.push_back(att->mapped_index(point_id));
+ } else {
+ // Identity mapping
+ vertex_to_attribute_entry_id_map_.push_back(first_vert_id);
+ }
+ CornerIndex first_c = c;
+ CornerIndex act_c;
+ // Check if the vertex is on a seam edge, if it is we need to find the first
+ // attribute entry on the seam edge when traversing in the CCW direction.
+ if (is_vertex_on_seam_[v.value()]) {
+ // Try to swing left on the modified corner table. We need to get the
+ // first corner that defines an attribute seam.
+ act_c = SwingLeft(first_c);
+ while (act_c != kInvalidCornerIndex) {
+ first_c = act_c;
+ act_c = SwingLeft(act_c);
+ }
+ }
+ corner_to_vertex_map_[first_c.value()] = VertexIndex(first_vert_id.value());
+ vertex_to_left_most_corner_map_.push_back(first_c);
+ act_c = corner_table_->SwingRight(first_c);
+ while (act_c != kInvalidCornerIndex && act_c != first_c) {
+ if (IsCornerOppositeToSeamEdge(corner_table_->Next(act_c))) {
+ first_vert_id = AttributeValueIndex(num_new_vertices++);
+ if (init_vertex_to_attribute_entry_map) {
+ const PointIndex point_id = mesh->CornerToPointId(act_c.value());
+ vertex_to_attribute_entry_id_map_.push_back(
+ att->mapped_index(point_id));
+ } else {
+ // Identity mapping.
+ vertex_to_attribute_entry_id_map_.push_back(first_vert_id);
+ }
+ vertex_to_left_most_corner_map_.push_back(act_c);
+ }
+ corner_to_vertex_map_[act_c.value()] = VertexIndex(first_vert_id.value());
+ act_c = corner_table_->SwingRight(act_c);
+ }
+ }
+}
+
+int MeshAttributeCornerTable::Valence(VertexIndex v) const {
+ if (v == kInvalidVertexIndex) {
+ return -1;
+ }
+ return ConfidentValence(v);
+}
+
+int MeshAttributeCornerTable::ConfidentValence(VertexIndex v) const {
+ DRACO_DCHECK_LT(v.value(), num_vertices());
+ draco::VertexRingIterator<MeshAttributeCornerTable> vi(this, v);
+ int valence = 0;
+ for (; !vi.End(); vi.Next()) {
+ ++valence;
+ }
+ return valence;
+}
+
+} // namespace draco
diff --git a/libs/assimp/contrib/draco/src/draco/mesh/mesh_attribute_corner_table.h b/libs/assimp/contrib/draco/src/draco/mesh/mesh_attribute_corner_table.h
new file mode 100644
index 0000000..7dad25c
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/mesh/mesh_attribute_corner_table.h
@@ -0,0 +1,196 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_MESH_MESH_ATTRIBUTE_CORNER_TABLE_H_
+#define DRACO_MESH_MESH_ATTRIBUTE_CORNER_TABLE_H_
+
+#include "draco/core/macros.h"
+#include "draco/mesh/corner_table.h"
+#include "draco/mesh/mesh.h"
+#include "draco/mesh/valence_cache.h"
+
+namespace draco {
+
+// Class for storing connectivity of mesh attributes. The connectivity is stored
+// as a difference from the base mesh's corner table, where the differences are
+// represented by attribute seam edges. This class provides a basic
+// functionality for detecting the seam edges for a given attribute and for
+// traversing the constrained corner table with the seam edges.
+class MeshAttributeCornerTable {
+ public:
+ MeshAttributeCornerTable();
+ bool InitEmpty(const CornerTable *table);
+ bool InitFromAttribute(const Mesh *mesh, const CornerTable *table,
+ const PointAttribute *att);
+
+ void AddSeamEdge(CornerIndex opp_corner);
+
+ // Recomputes vertices using the newly added seam edges (needs to be called
+ // whenever the seam edges are updated).
+ // |mesh| and |att| can be null, in which case mapping between vertices and
+ // attribute value ids is set to identity.
+ void RecomputeVertices(const Mesh *mesh, const PointAttribute *att);
+
+ inline bool IsCornerOppositeToSeamEdge(CornerIndex corner) const {
+ return is_edge_on_seam_[corner.value()];
+ }
+
+ inline CornerIndex Opposite(CornerIndex corner) const {
+ if (corner == kInvalidCornerIndex || IsCornerOppositeToSeamEdge(corner)) {
+ return kInvalidCornerIndex;
+ }
+ return corner_table_->Opposite(corner);
+ }
+
+ inline CornerIndex Next(CornerIndex corner) const {
+ return corner_table_->Next(corner);
+ }
+
+ inline CornerIndex Previous(CornerIndex corner) const {
+ return corner_table_->Previous(corner);
+ }
+
+ // Returns true when a corner is attached to any attribute seam.
+ inline bool IsCornerOnSeam(CornerIndex corner) const {
+ return is_vertex_on_seam_[corner_table_->Vertex(corner).value()];
+ }
+
+ // Similar to CornerTable::GetLeftCorner and CornerTable::GetRightCorner, but
+ // does not go over seam edges.
+ inline CornerIndex GetLeftCorner(CornerIndex corner) const {
+ return Opposite(Previous(corner));
+ }
+ inline CornerIndex GetRightCorner(CornerIndex corner) const {
+ return Opposite(Next(corner));
+ }
+
+ // Similar to CornerTable::SwingRight, but it does not go over seam edges.
+ inline CornerIndex SwingRight(CornerIndex corner) const {
+ return Previous(Opposite(Previous(corner)));
+ }
+
+ // Similar to CornerTable::SwingLeft, but it does not go over seam edges.
+ inline CornerIndex SwingLeft(CornerIndex corner) const {
+ return Next(Opposite(Next(corner)));
+ }
+
+ int num_vertices() const {
+ return static_cast<int>(vertex_to_attribute_entry_id_map_.size());
+ }
+ int num_faces() const { return static_cast<int>(corner_table_->num_faces()); }
+ int num_corners() const { return corner_table_->num_corners(); }
+
+ VertexIndex Vertex(CornerIndex corner) const {
+ DRACO_DCHECK_LT(corner.value(), corner_to_vertex_map_.size());
+ return ConfidentVertex(corner);
+ }
+ VertexIndex ConfidentVertex(CornerIndex corner) const {
+ return corner_to_vertex_map_[corner.value()];
+ }
+ // Returns the attribute entry id associated to the given vertex.
+ VertexIndex VertexParent(VertexIndex vert) const {
+ return VertexIndex(vertex_to_attribute_entry_id_map_[vert.value()].value());
+ }
+
+ inline CornerIndex LeftMostCorner(VertexIndex v) const {
+ return vertex_to_left_most_corner_map_[v.value()];
+ }
+
+ inline FaceIndex Face(CornerIndex corner) const {
+ return corner_table_->Face(corner);
+ }
+
+ inline CornerIndex FirstCorner(FaceIndex face) const {
+ return corner_table_->FirstCorner(face);
+ }
+
+ inline std::array<CornerIndex, 3> AllCorners(FaceIndex face) const {
+ return corner_table_->AllCorners(face);
+ }
+
+ inline bool IsOnBoundary(VertexIndex vert) const {
+ const CornerIndex corner = LeftMostCorner(vert);
+ if (corner == kInvalidCornerIndex) {
+ return true;
+ }
+ if (SwingLeft(corner) == kInvalidCornerIndex) {
+ return true;
+ }
+ return false;
+ }
+
+ bool no_interior_seams() const { return no_interior_seams_; }
+ const CornerTable *corner_table() const { return corner_table_; }
+
+ // TODO(draco-eng): extract valence functions into a reusable class/object
+ // also from 'corner_table.*'
+
+ // Returns the valence (or degree) of a vertex.
+ // Returns -1 if the given vertex index is not valid.
+ int Valence(VertexIndex v) const;
+ // Same as above but does not check for validity and does not return -1
+ int ConfidentValence(VertexIndex v) const;
+ // Returns the valence of the vertex at the given corner.
+ inline int Valence(CornerIndex c) const {
+ DRACO_DCHECK_LT(c.value(), corner_table_->num_corners());
+ if (c == kInvalidCornerIndex) {
+ return -1;
+ }
+ return ConfidentValence(c);
+ }
+ inline int ConfidentValence(CornerIndex c) const {
+ DRACO_DCHECK_LT(c.value(), corner_table_->num_corners());
+ return ConfidentValence(Vertex(c));
+ }
+
+ // Allows access to an internal object for caching valences. The object can
+ // be instructed to cache or uncache all valences and then its interfaces
+ // queried directly for valences with differing performance/confidence
+ // qualities. If the mesh or table is modified the cache should be discarded
+ // and not relied on as it does not automatically update or invalidate for
+ // performance reasons.
+ const ValenceCache<MeshAttributeCornerTable> &GetValenceCache() const {
+ return valence_cache_;
+ }
+
+ private:
+ template <bool init_vertex_to_attribute_entry_map>
+ void RecomputeVerticesInternal(const Mesh *mesh, const PointAttribute *att);
+
+ std::vector<bool> is_edge_on_seam_;
+ std::vector<bool> is_vertex_on_seam_;
+
+ // If this is set to true, it means that there are no attribute seams between
+ // two faces. This can be used to speed up some algorithms.
+ bool no_interior_seams_;
+
+ std::vector<VertexIndex> corner_to_vertex_map_;
+
+ // Map between vertices and their associated left most corners. A left most
+ // corner is a corner that is adjacent to a boundary or an attribute seam from
+ // right (i.e., SwingLeft from that corner will return an invalid corner). If
+ // no such corner exists for a given vertex, then any corner attached to the
+ // vertex can be used.
+ std::vector<CornerIndex> vertex_to_left_most_corner_map_;
+
+ // Map between vertex ids and attribute entry ids (i.e. the values stored in
+ // the attribute buffer). The attribute entry id can be retrieved using the
+ // VertexParent() method.
+ std::vector<AttributeValueIndex> vertex_to_attribute_entry_id_map_;
+ const CornerTable *corner_table_;
+ ValenceCache<MeshAttributeCornerTable> valence_cache_;
+};
+
+} // namespace draco
+#endif // DRACO_MESH_MESH_ATTRIBUTE_CORNER_TABLE_H_
diff --git a/libs/assimp/contrib/draco/src/draco/mesh/mesh_cleanup.cc b/libs/assimp/contrib/draco/src/draco/mesh/mesh_cleanup.cc
new file mode 100644
index 0000000..75b55f0
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/mesh/mesh_cleanup.cc
@@ -0,0 +1,251 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/mesh/mesh_cleanup.h"
+
+#include <unordered_set>
+
+#include "draco/core/hash_utils.h"
+
+namespace draco {
+
+bool MeshCleanup::operator()(Mesh *mesh, const MeshCleanupOptions &options) {
+ if (!options.remove_degenerated_faces && !options.remove_unused_attributes &&
+ !options.remove_duplicate_faces && !options.make_geometry_manifold) {
+ return true; // Nothing to cleanup.
+ }
+ const PointAttribute *const pos_att =
+ mesh->GetNamedAttribute(GeometryAttribute::POSITION);
+ if (pos_att == nullptr) {
+ return false;
+ }
+
+ if (options.remove_degenerated_faces) {
+ RemoveDegeneratedFaces(mesh);
+ }
+
+ if (options.remove_duplicate_faces) {
+ RemoveDuplicateFaces(mesh);
+ }
+
+ if (options.remove_unused_attributes) {
+ RemoveUnusedAttributes(mesh);
+ }
+
+ return true;
+}
+
+void MeshCleanup::RemoveDegeneratedFaces(Mesh *mesh) {
+ const PointAttribute *const pos_att =
+ mesh->GetNamedAttribute(GeometryAttribute::POSITION);
+ FaceIndex::ValueType num_degenerated_faces = 0;
+ // Array for storing position indices on a face.
+ std::array<AttributeValueIndex, 3> pos_indices;
+ for (FaceIndex f(0); f < mesh->num_faces(); ++f) {
+ const Mesh::Face &face = mesh->face(f);
+ for (int p = 0; p < 3; ++p) {
+ pos_indices[p] = pos_att->mapped_index(face[p]);
+ }
+ if (pos_indices[0] == pos_indices[1] || pos_indices[0] == pos_indices[2] ||
+ pos_indices[1] == pos_indices[2]) {
+ ++num_degenerated_faces;
+ } else if (num_degenerated_faces > 0) {
+ // Copy the face to its new location.
+ mesh->SetFace(f - num_degenerated_faces, face);
+ }
+ }
+ if (num_degenerated_faces > 0) {
+ mesh->SetNumFaces(mesh->num_faces() - num_degenerated_faces);
+ }
+}
+
+void MeshCleanup::RemoveDuplicateFaces(Mesh *mesh) {
+ const PointAttribute *const pos_att =
+ mesh->GetNamedAttribute(GeometryAttribute::POSITION);
+
+ typedef std::array<AttributeValueIndex::ValueType, 3> PosTriplet;
+ PosTriplet pos_indices;
+ std::unordered_set<PosTriplet, HashArray<PosTriplet>> is_face_used;
+
+ uint32_t num_duplicate_faces = 0;
+ for (FaceIndex fi(0); fi < mesh->num_faces(); ++fi) {
+ const auto f = mesh->face(fi);
+ for (int c = 0; c < 3; ++c) {
+ pos_indices[c] = pos_att->mapped_index(f[c]).value();
+ }
+ // Shift the position indices until the smallest index is the first one.
+ while (pos_indices[0] > pos_indices[1] || pos_indices[0] > pos_indices[2]) {
+ // Shift to the left.
+ std::swap(pos_indices[0], pos_indices[1]);
+ std::swap(pos_indices[1], pos_indices[2]);
+ }
+ // Check if have encountered the same position triplet on a different face.
+ if (is_face_used.find(pos_indices) != is_face_used.end()) {
+ // Duplicate face. Ignore it.
+ num_duplicate_faces++;
+ } else {
+ // Insert new face to the set.
+ is_face_used.insert(pos_indices);
+ if (num_duplicate_faces > 0) {
+ // Copy the face to its new location.
+ mesh->SetFace(fi - num_duplicate_faces, f);
+ }
+ }
+ }
+ if (num_duplicate_faces > 0) {
+ mesh->SetNumFaces(mesh->num_faces() - num_duplicate_faces);
+ }
+}
+
+void MeshCleanup::RemoveUnusedAttributes(Mesh *mesh) {
+ // Array that is going to store whether a corresponding point is used.
+ std::vector<bool> is_point_used;
+ PointIndex::ValueType num_new_points = 0;
+ is_point_used.resize(mesh->num_points(), false);
+ for (FaceIndex f(0); f < mesh->num_faces(); ++f) {
+ const Mesh::Face &face = mesh->face(f);
+ for (int p = 0; p < 3; ++p) {
+ if (!is_point_used[face[p].value()]) {
+ is_point_used[face[p].value()] = true;
+ ++num_new_points;
+ }
+ }
+ }
+
+ bool points_changed = false;
+ const PointIndex::ValueType num_original_points = mesh->num_points();
+ // Map from old points to the new ones.
+ IndexTypeVector<PointIndex, PointIndex> point_map(num_original_points);
+ if (num_new_points < static_cast<int>(mesh->num_points())) {
+ // Some of the points were removed. We need to remap the old points to the
+ // new ones.
+ num_new_points = 0;
+ for (PointIndex i(0); i < num_original_points; ++i) {
+ if (is_point_used[i.value()]) {
+ point_map[i] = num_new_points++;
+ } else {
+ point_map[i] = kInvalidPointIndex;
+ }
+ }
+ // Go over faces and update their points.
+ for (FaceIndex f(0); f < mesh->num_faces(); ++f) {
+ Mesh::Face face = mesh->face(f);
+ for (int p = 0; p < 3; ++p) {
+ face[p] = point_map[face[p]];
+ }
+ mesh->SetFace(f, face);
+ }
+ // Set the new number of points.
+ mesh->set_num_points(num_new_points);
+ points_changed = true;
+ } else {
+ // No points were removed. Initialize identity map between the old and new
+ // points.
+ for (PointIndex i(0); i < num_original_points; ++i) {
+ point_map[i] = i;
+ }
+ }
+
+ // Update index mapping for attributes.
+ IndexTypeVector<AttributeValueIndex, uint8_t> is_att_index_used;
+ IndexTypeVector<AttributeValueIndex, AttributeValueIndex> att_index_map;
+ for (int a = 0; a < mesh->num_attributes(); ++a) {
+ PointAttribute *const att = mesh->attribute(a);
+ // First detect which attribute entries are used (included in a point).
+ is_att_index_used.assign(att->size(), 0);
+ att_index_map.clear();
+ AttributeValueIndex::ValueType num_used_entries = 0;
+ for (PointIndex i(0); i < num_original_points; ++i) {
+ if (point_map[i] != kInvalidPointIndex) {
+ const AttributeValueIndex entry_id = att->mapped_index(i);
+ if (!is_att_index_used[entry_id]) {
+ is_att_index_used[entry_id] = 1;
+ ++num_used_entries;
+ }
+ }
+ }
+ bool att_indices_changed = false;
+ // If there are some unused attribute entries, remap the attribute values
+ // in the attribute buffer.
+ if (num_used_entries < static_cast<int>(att->size())) {
+ att_index_map.resize(att->size());
+ num_used_entries = 0;
+ for (AttributeValueIndex i(0); i < static_cast<uint32_t>(att->size());
+ ++i) {
+ if (is_att_index_used[i]) {
+ att_index_map[i] = num_used_entries;
+ if (i > num_used_entries) {
+ const uint8_t *const src_add = att->GetAddress(i);
+ att->buffer()->Write(
+ att->GetBytePos(AttributeValueIndex(num_used_entries)), src_add,
+ att->byte_stride());
+ }
+ ++num_used_entries;
+ }
+ }
+ // Update the number of unique entries in the vertex buffer.
+ att->Resize(num_used_entries);
+ att_indices_changed = true;
+ }
+ // If either the points or attribute indices have changed, we need to
+ // update the attribute index mapping.
+ if (points_changed || att_indices_changed) {
+ if (att->is_mapping_identity()) {
+ // The mapping was identity. It'll remain identity only if the
+ // number of point and attribute indices is still the same.
+ if (num_used_entries != static_cast<int>(mesh->num_points())) {
+ // We need to create an explicit mapping.
+ // First we need to initialize the explicit map to the original
+ // number of points to recreate the original identity map.
+ att->SetExplicitMapping(num_original_points);
+ // Set the entries of the explicit map to identity.
+ for (PointIndex::ValueType i = 0; i < num_original_points; ++i) {
+ att->SetPointMapEntry(PointIndex(i), AttributeValueIndex(i));
+ }
+ }
+ }
+ if (!att->is_mapping_identity()) {
+ // Explicit mapping between points and local attribute indices.
+ for (PointIndex i(0); i < num_original_points; ++i) {
+ // The new point id that maps to the currently processed attribute
+ // entry.
+ const PointIndex new_point_id = point_map[i];
+ if (new_point_id == kInvalidPointIndex) {
+ continue;
+ }
+ // Index of the currently processed attribute entry in the original
+ // mesh.
+ const AttributeValueIndex original_entry_index = att->mapped_index(i);
+ // New index of the same entry after unused entries were removed.
+ const AttributeValueIndex new_entry_index =
+ att_indices_changed ? att_index_map[original_entry_index]
+ : original_entry_index;
+
+ // Update the mapping. Note that the new point index is always smaller
+ // than the processed index |i|, making this operation safe.
+ att->SetPointMapEntry(new_point_id, new_entry_index);
+ }
+ // If the number of points changed, we need to set a new explicit map
+ // size.
+ att->SetExplicitMapping(mesh->num_points());
+ }
+ }
+ }
+}
+
+Status MeshCleanup::MakeGeometryManifold(Mesh *mesh) {
+ return Status(Status::DRACO_ERROR, "Unsupported function.");
+}
+
+} // namespace draco
diff --git a/libs/assimp/contrib/draco/src/draco/mesh/mesh_cleanup.h b/libs/assimp/contrib/draco/src/draco/mesh/mesh_cleanup.h
new file mode 100644
index 0000000..09aae2e
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/mesh/mesh_cleanup.h
@@ -0,0 +1,65 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_MESH_MESH_CLEANUP_H_
+#define DRACO_MESH_MESH_CLEANUP_H_
+
+#include "draco/core/status.h"
+#include "draco/mesh/mesh.h"
+
+namespace draco {
+
+// Options used by the MeshCleanup class.
+struct MeshCleanupOptions {
+ MeshCleanupOptions()
+ : remove_degenerated_faces(true),
+ remove_duplicate_faces(true),
+ remove_unused_attributes(true),
+ make_geometry_manifold(false) {}
+ // If true, the cleanup tool removes any face where two or more vertices
+ // share the same position index.
+ bool remove_degenerated_faces;
+
+ // If true, the cleanup tool removes all duplicate faces. A pair of faces is
+ // duplicate if both faces share the same position indices on all vertices
+ // (that is, position values have to be duduplicated). Note that all
+ // non-position properties are currently ignored.
+ bool remove_duplicate_faces;
+
+ // If true, the cleanup tool removes any unused attribute value or unused
+ // point id. For example, it can be used to remove isolated vertices.
+ bool remove_unused_attributes;
+
+ // If true, the cleanup tool splits vertices along non-manifold edges and
+ // vertices. This ensures that the connectivity defined by position indices
+ // is manifold.
+ bool make_geometry_manifold;
+};
+
+// Tool that can be used for removing bad or unused data from draco::Meshes.
+class MeshCleanup {
+ public:
+ // Performs in-place cleanup of the input mesh according to the input options.
+ bool operator()(Mesh *mesh, const MeshCleanupOptions &options);
+
+ private:
+ static void RemoveDegeneratedFaces(Mesh *mesh);
+ static void RemoveDuplicateFaces(Mesh *mesh);
+ static void RemoveUnusedAttributes(Mesh *mesh);
+ static Status MakeGeometryManifold(Mesh *mesh);
+};
+
+} // namespace draco
+
+#endif // DRACO_MESH_MESH_CLEANUP_H_
diff --git a/libs/assimp/contrib/draco/src/draco/mesh/mesh_cleanup_test.cc b/libs/assimp/contrib/draco/src/draco/mesh/mesh_cleanup_test.cc
new file mode 100644
index 0000000..89c350e
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/mesh/mesh_cleanup_test.cc
@@ -0,0 +1,192 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/mesh/mesh_cleanup.h"
+
+#include "draco/core/draco_test_base.h"
+#include "draco/core/vector_d.h"
+#include "draco/mesh/triangle_soup_mesh_builder.h"
+
+namespace draco {
+
+class MeshCleanupTest : public ::testing::Test {};
+
+TEST_F(MeshCleanupTest, TestDegneratedFaces) {
+ // This test verifies that the mesh cleanup tools removes degenerated faces.
+ TriangleSoupMeshBuilder mb;
+ mb.Start(2);
+ const int pos_att_id =
+ mb.AddAttribute(GeometryAttribute::POSITION, 3, DT_FLOAT32);
+ // clang-format off
+ mb.SetAttributeValuesForFace(pos_att_id, FaceIndex(0),
+ Vector3f(0.f, 0.f, 0.f).data(),
+ Vector3f(1.f, 0.f, 0.f).data(),
+ Vector3f(0.f, 1.f, 0.f).data());
+ mb.SetAttributeValuesForFace(pos_att_id, FaceIndex(1),
+ Vector3f(0.f, 1.f, 0.f).data(),
+ Vector3f(1.f, 0.f, 0.f).data(),
+ Vector3f(1.f, 0.f, 0.f).data());
+ // clang-format on
+
+ std::unique_ptr<Mesh> mesh = mb.Finalize();
+ ASSERT_NE(mesh, nullptr) << "Failed to build the test mesh.";
+ ASSERT_EQ(mesh->num_faces(), 2) << "Wrong number of faces in the input mesh.";
+ MeshCleanupOptions cleanup_options;
+ MeshCleanup cleanup;
+ ASSERT_TRUE(cleanup(mesh.get(), cleanup_options))
+ << "Failed to cleanup the mesh.";
+ ASSERT_EQ(mesh->num_faces(), 1) << "Failed to remove degenerated faces.";
+}
+
+TEST_F(MeshCleanupTest, TestDegneratedFacesAndIsolatedVertices) {
+ // This test verifies that the mesh cleanup tools removes degenerated faces
+ // and isolated vertices.
+ TriangleSoupMeshBuilder mb;
+ mb.Start(2);
+ const int pos_att_id =
+ mb.AddAttribute(GeometryAttribute::POSITION, 3, DT_FLOAT32);
+
+ // Dummy integer attribute for which we do not expect the number of entries
+ // to change after the degnerate face and isolated vertex are removed.
+ const int int_att_id =
+ mb.AddAttribute(GeometryAttribute::GENERIC, 2, DT_INT32);
+
+ // clang-format off
+ mb.SetAttributeValuesForFace(pos_att_id, FaceIndex(0),
+ Vector3f(0.f, 0.f, 0.f).data(),
+ Vector3f(1.f, 0.f, 0.f).data(),
+ Vector3f(0.f, 1.f, 0.f).data());
+ mb.SetAttributeValuesForFace(int_att_id, FaceIndex(0),
+ VectorD<int32_t, 2>(0, 0).data(),
+ VectorD<int32_t, 2>(0, 1).data(),
+ VectorD<int32_t, 2>(0, 2).data());
+
+ mb.SetAttributeValuesForFace(pos_att_id, FaceIndex(1),
+ Vector3f(10.f, 1.f, 0.f).data(),
+ Vector3f(1.f, 0.f, 0.f).data(),
+ Vector3f(10.f, 1.f, 0.f).data());
+ mb.SetAttributeValuesForFace(int_att_id, FaceIndex(1),
+ VectorD<int32_t, 2>(0, 0).data(),
+ VectorD<int32_t, 2>(0, 1).data(),
+ VectorD<int32_t, 2>(0, 2).data());
+ // clang-format on
+
+ std::unique_ptr<Mesh> mesh = mb.Finalize();
+ ASSERT_NE(mesh, nullptr) << "Failed to build the test mesh.";
+ ASSERT_EQ(mesh->num_faces(), 2) << "Wrong number of faces in the input mesh.";
+ ASSERT_EQ(mesh->num_points(), 5)
+ << "Wrong number of point ids in the input mesh.";
+ ASSERT_EQ(mesh->attribute(int_att_id)->size(), 3);
+ const MeshCleanupOptions cleanup_options;
+ MeshCleanup cleanup;
+ ASSERT_TRUE(cleanup(mesh.get(), cleanup_options))
+ << "Failed to cleanup the mesh.";
+ ASSERT_EQ(mesh->num_faces(), 1) << "Failed to remove degenerated faces.";
+ ASSERT_EQ(mesh->num_points(), 3)
+ << "Failed to remove isolated attribute indices.";
+ ASSERT_EQ(mesh->attribute(int_att_id)->size(), 3);
+}
+
+TEST_F(MeshCleanupTest, TestAttributes) {
+ TriangleSoupMeshBuilder mb;
+ mb.Start(2);
+ const int pos_att_id =
+ mb.AddAttribute(GeometryAttribute::POSITION, 3, DT_FLOAT32);
+ const int generic_att_id =
+ mb.AddAttribute(GeometryAttribute::GENERIC, 2, DT_FLOAT32);
+ // clang-format off
+ mb.SetAttributeValuesForFace(pos_att_id, FaceIndex(0),
+ Vector3f(0.f, 0.f, 0.f).data(),
+ Vector3f(1.f, 0.f, 0.f).data(),
+ Vector3f(0.f, 1.f, 0.f).data());
+ mb.SetAttributeValuesForFace(generic_att_id, FaceIndex(0),
+ Vector2f(0.f, 0.f).data(),
+ Vector2f(0.f, 0.f).data(),
+ Vector2f(0.f, 0.f).data());
+
+ mb.SetAttributeValuesForFace(pos_att_id, FaceIndex(1),
+ Vector3f(10.f, 1.f, 0.f).data(),
+ Vector3f(1.f, 0.f, 0.f).data(),
+ Vector3f(10.f, 1.f, 0.f).data());
+ mb.SetAttributeValuesForFace(generic_att_id, FaceIndex(1),
+ Vector2f(1.f, 0.f).data(),
+ Vector2f(1.f, 0.f).data(),
+ Vector2f(1.f, 0.f).data());
+ // clang-format on
+
+ std::unique_ptr<Mesh> mesh = mb.Finalize();
+ ASSERT_NE(mesh, nullptr) << "Failed to build the test mesh.";
+ ASSERT_EQ(mesh->num_faces(), 2) << "Wrong number of faces in the input mesh.";
+ ASSERT_EQ(mesh->num_points(), 5)
+ << "Wrong number of point ids in the input mesh.";
+ ASSERT_EQ(mesh->attribute(1)->size(), 2u)
+ << "Wrong number of generic attribute entries.";
+ const MeshCleanupOptions cleanup_options;
+ MeshCleanup cleanup;
+ ASSERT_TRUE(cleanup(mesh.get(), cleanup_options))
+ << "Failed to cleanup the mesh.";
+ ASSERT_EQ(mesh->num_faces(), 1) << "Failed to remove degenerated faces.";
+ ASSERT_EQ(mesh->num_points(), 3)
+ << "Failed to remove isolated attribute indices.";
+ ASSERT_EQ(mesh->attribute(0)->size(), 3u)
+ << "Wrong number of unique positions after cleanup.";
+ ASSERT_EQ(mesh->attribute(1)->size(), 1u)
+ << "Wrong number of generic attribute entries after cleanup.";
+}
+
+TEST_F(MeshCleanupTest, TestDuplicateFaces) {
+ TriangleSoupMeshBuilder mb;
+ mb.Start(5);
+ const int pos_att_id =
+ mb.AddAttribute(GeometryAttribute::POSITION, 3, DT_FLOAT32);
+
+ // Five faces where only two are unique.
+
+ // clang-format off
+ mb.SetAttributeValuesForFace(pos_att_id, FaceIndex(0),
+ Vector3f(0.f, 0.f, 0.f).data(),
+ Vector3f(1.f, 0.f, 0.f).data(),
+ Vector3f(0.f, 1.f, 0.f).data());
+
+ mb.SetAttributeValuesForFace(pos_att_id, FaceIndex(1),
+ Vector3f(0.f, 0.f, 0.f).data(),
+ Vector3f(1.f, 0.f, 0.f).data(),
+ Vector3f(0.f, 1.f, 0.f).data());
+
+ mb.SetAttributeValuesForFace(pos_att_id, FaceIndex(2),
+ Vector3f(0.f, 0.f, 0.f).data(),
+ Vector3f(1.f, 0.f, 0.f).data(),
+ Vector3f(0.f, 1.f, 1.f).data());
+
+ mb.SetAttributeValuesForFace(pos_att_id, FaceIndex(3),
+ Vector3f(1.f, 0.f, 0.f).data(),
+ Vector3f(0.f, 1.f, 0.f).data(),
+ Vector3f(0.f, 0.f, 0.f).data());
+
+ mb.SetAttributeValuesForFace(pos_att_id, FaceIndex(4),
+ Vector3f(0.f, 0.f, 0.f).data(),
+ Vector3f(1.f, 0.f, 0.f).data(),
+ Vector3f(0.f, 1.f, 1.f).data());
+ // clang-format on
+
+ std::unique_ptr<Mesh> mesh = mb.Finalize();
+ ASSERT_NE(mesh, nullptr);
+ ASSERT_EQ(mesh->num_faces(), 5);
+ const MeshCleanupOptions cleanup_options;
+ MeshCleanup cleanup;
+ ASSERT_TRUE(cleanup(mesh.get(), cleanup_options));
+ ASSERT_EQ(mesh->num_faces(), 2);
+}
+
+} // namespace draco
diff --git a/libs/assimp/contrib/draco/src/draco/mesh/mesh_misc_functions.cc b/libs/assimp/contrib/draco/src/draco/mesh/mesh_misc_functions.cc
new file mode 100644
index 0000000..4485b33
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/mesh/mesh_misc_functions.cc
@@ -0,0 +1,63 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/mesh/mesh_misc_functions.h"
+
+namespace draco {
+
+std::unique_ptr<CornerTable> CreateCornerTableFromPositionAttribute(
+ const Mesh *mesh) {
+ return CreateCornerTableFromAttribute(mesh, GeometryAttribute::POSITION);
+}
+
+std::unique_ptr<CornerTable> CreateCornerTableFromAttribute(
+ const Mesh *mesh, GeometryAttribute::Type type) {
+ typedef CornerTable::FaceType FaceType;
+
+ const PointAttribute *const att = mesh->GetNamedAttribute(type);
+ if (att == nullptr) {
+ return nullptr;
+ }
+ IndexTypeVector<FaceIndex, FaceType> faces(mesh->num_faces());
+ FaceType new_face;
+ for (FaceIndex i(0); i < mesh->num_faces(); ++i) {
+ const Mesh::Face &face = mesh->face(i);
+ for (int j = 0; j < 3; ++j) {
+ // Map general vertex indices to attribute indices.
+ new_face[j] = att->mapped_index(face[j]).value();
+ }
+ faces[FaceIndex(i)] = new_face;
+ }
+ // Build the corner table.
+ return CornerTable::Create(faces);
+}
+
+std::unique_ptr<CornerTable> CreateCornerTableFromAllAttributes(
+ const Mesh *mesh) {
+ typedef CornerTable::FaceType FaceType;
+ IndexTypeVector<FaceIndex, FaceType> faces(mesh->num_faces());
+ FaceType new_face;
+ for (FaceIndex i(0); i < mesh->num_faces(); ++i) {
+ const Mesh::Face &face = mesh->face(i);
+ // Each face is identified by point indices that automatically split the
+ // mesh along attribute seams.
+ for (int j = 0; j < 3; ++j) {
+ new_face[j] = face[j].value();
+ }
+ faces[i] = new_face;
+ }
+ // Build the corner table.
+ return CornerTable::Create(faces);
+}
+} // namespace draco
diff --git a/libs/assimp/contrib/draco/src/draco/mesh/mesh_misc_functions.h b/libs/assimp/contrib/draco/src/draco/mesh/mesh_misc_functions.h
new file mode 100644
index 0000000..b450bc8
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/mesh/mesh_misc_functions.h
@@ -0,0 +1,98 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// This file contains misc functions that are needed by several mesh related
+// algorithms.
+
+#ifndef DRACO_MESH_MESH_MISC_FUNCTIONS_H_
+#define DRACO_MESH_MESH_MISC_FUNCTIONS_H_
+
+#include "draco/mesh/corner_table.h"
+#include "draco/mesh/mesh.h"
+
+// The file contains functions that use both Mesh and CornerTable as inputs.
+namespace draco {
+
+// Creates a CornerTable from the position attribute of |mesh|. Returns nullptr
+// on error.
+std::unique_ptr<CornerTable> CreateCornerTableFromPositionAttribute(
+ const Mesh *mesh);
+
+// Creates a CornerTable from the first named attribute of |mesh| with a given
+// type. Returns nullptr on error.
+std::unique_ptr<CornerTable> CreateCornerTableFromAttribute(
+ const Mesh *mesh, GeometryAttribute::Type type);
+
+// Creates a CornerTable from all attributes of |mesh|. Boundaries are
+// automatically introduced on all attribute seams. Returns nullptr on error.
+std::unique_ptr<CornerTable> CreateCornerTableFromAllAttributes(
+ const Mesh *mesh);
+
+// Returns true when the given corner lies opposite to an attribute seam.
+inline bool IsCornerOppositeToAttributeSeam(CornerIndex ci,
+ const PointAttribute &att,
+ const Mesh &mesh,
+ const CornerTable &ct) {
+ const CornerIndex opp_ci = ct.Opposite(ci);
+ if (opp_ci == kInvalidCornerIndex) {
+ return false; // No opposite corner == no attribute seam.
+ }
+ // Compare attribute value indices on both ends of the opposite edge.
+ CornerIndex c0 = ct.Next(ci);
+ CornerIndex c1 = ct.Previous(opp_ci);
+ if (att.mapped_index(mesh.CornerToPointId(c0)) !=
+ att.mapped_index(mesh.CornerToPointId(c1))) {
+ return true;
+ }
+ c0 = ct.Previous(ci);
+ c1 = ct.Next(opp_ci);
+ if (att.mapped_index(mesh.CornerToPointId(c0)) !=
+ att.mapped_index(mesh.CornerToPointId(c1))) {
+ return true;
+ }
+ return false;
+}
+
+// Interpolates an attribute value on a face using given barycentric
+// coordinates. InterpolatedVectorT should be a VectorD that corresponds to the
+// values stored in the attribute.
+// TODO(ostava): Find a better place for this.
+template <typename InterpolatedVectorT>
+InterpolatedVectorT ComputeInterpolatedAttributeValueOnMeshFace(
+ const Mesh &mesh, const PointAttribute &attribute, FaceIndex fi,
+ const std::array<float, 3> &barycentric_coord) {
+ const Mesh::Face &face = mesh.face(fi);
+ // Get values for all three corners of the face.
+ InterpolatedVectorT val[3];
+ for (int c = 0; c < 3; ++c) {
+ attribute.GetMappedValue(face[c], &(val[c][0]));
+ }
+ // Return an interpolated value.
+ InterpolatedVectorT res;
+ for (int d = 0; d < InterpolatedVectorT::dimension; ++d) {
+ const float interpolated_component = barycentric_coord[0] * val[0][d] +
+ barycentric_coord[1] * val[1][d] +
+ barycentric_coord[2] * val[2][d];
+ if (std::is_integral<typename InterpolatedVectorT::Scalar>::value) {
+ res[d] = std::floor(interpolated_component + 0.5f);
+ } else {
+ res[d] = interpolated_component;
+ }
+ }
+ return res;
+}
+
+} // namespace draco
+
+#endif // DRACO_MESH_MESH_MISC_FUNCTIONS_H_
diff --git a/libs/assimp/contrib/draco/src/draco/mesh/mesh_stripifier.cc b/libs/assimp/contrib/draco/src/draco/mesh/mesh_stripifier.cc
new file mode 100644
index 0000000..f68062e
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/mesh/mesh_stripifier.cc
@@ -0,0 +1,102 @@
+// Copyright 2017 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/mesh/mesh_stripifier.h"
+
+namespace draco {
+
+void MeshStripifier::GenerateStripsFromCorner(int local_strip_id,
+ CornerIndex ci) {
+ // Clear the storage for strip faces.
+ strip_faces_[local_strip_id].clear();
+ // Start corner of the strip (where the strip starts).
+ CornerIndex start_ci = ci;
+ FaceIndex fi = corner_table_->Face(ci);
+ // We need to grow the strip both forward and backward (2 passes).
+ // Note that the backward pass can change the start corner of the strip (the
+ // start corner is going to be moved to the end of the backward strip).
+ for (int pass = 0; pass < 2; ++pass) {
+ if (pass == 1) {
+ // Backward pass.
+ // Start the traversal from the B that is the left sibling of the next
+ // corner to the start corner C = |start_ci|.
+ //
+ // *-------*-------*-------*
+ // / \ / \C / \ /
+ // / \ / \ / \ /
+ // / \ / B\ / \ /
+ // *-------*-------*-------*
+ //
+ // Perform the backward pass only when there is no attribute seam between
+ // the initial face and the first face of the backward traversal.
+ if (GetOppositeCorner(corner_table_->Previous(start_ci)) ==
+ kInvalidCornerIndex) {
+ break; // Attribute seam or a boundary.
+ }
+
+ ci = corner_table_->Next(start_ci);
+ ci = corner_table_->SwingLeft(ci);
+ if (ci == kInvalidCornerIndex) {
+ break;
+ }
+
+ fi = corner_table_->Face(ci);
+ }
+ int num_added_faces = 0;
+ while (!is_face_visited_[fi]) {
+ is_face_visited_[fi] = true;
+ strip_faces_[local_strip_id].push_back(fi);
+ ++num_added_faces;
+ if (num_added_faces > 1) {
+ // Move to the correct source corner to traverse to the next face.
+ if (num_added_faces & 1) {
+ // Odd number of faces added.
+ ci = corner_table_->Next(ci);
+ } else {
+ // Even number of faces added.
+ if (pass == 1) {
+ // If we are processing the backward pass, update the start corner
+ // of the strip on every even face reached (we cannot use odd faces
+ // for start of the strip as the strips would start in a wrong
+ // direction).
+ start_ci = ci;
+ }
+ ci = corner_table_->Previous(ci);
+ }
+ }
+ ci = GetOppositeCorner(ci);
+ if (ci == kInvalidCornerIndex) {
+ break;
+ }
+ fi = corner_table_->Face(ci);
+ }
+ // Strip end reached.
+ if (pass == 1 && (num_added_faces & 1)) {
+ // If we processed the backward strip and we add an odd number of faces to
+ // the strip, we need to remove the last one as it cannot be used to start
+ // the strip (the strip would start in a wrong direction from that face).
+ is_face_visited_[strip_faces_[local_strip_id].back()] = false;
+ strip_faces_[local_strip_id].pop_back();
+ }
+ }
+ strip_start_corners_[local_strip_id] = start_ci;
+
+ // Reset all visited flags for all faces (we need to process other strips from
+ // the given face before we choose the final strip that we are going to use).
+ for (int i = 0; i < strip_faces_[local_strip_id].size(); ++i) {
+ is_face_visited_[strip_faces_[local_strip_id][i]] = false;
+ }
+}
+
+} // namespace draco
diff --git a/libs/assimp/contrib/draco/src/draco/mesh/mesh_stripifier.h b/libs/assimp/contrib/draco/src/draco/mesh/mesh_stripifier.h
new file mode 100644
index 0000000..262e3c7
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/mesh/mesh_stripifier.h
@@ -0,0 +1,260 @@
+// Copyright 2017 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_MESH_MESH_STRIPIFIER_H_
+#define DRACO_MESH_MESH_STRIPIFIER_H_
+
+#include "draco/mesh/mesh_misc_functions.h"
+
+namespace draco {
+
+// Class that generates triangle strips from a provided draco::Mesh data
+// structure. The strips represent a more memory efficient storage of triangle
+// connectivity that can be used directly on the GPU (see
+// https://en.wikipedia.org/wiki/Triangle_strip ). In general, a mesh needs to
+// be represented by several triangle strips and it has been proven that finding
+// the optimal set of triangle strips is an NP-complete problem. The algorithm
+// implemented by this class finds this set of triangle strips based on a greedy
+// heuristic that always selects the longest available strip that covers the
+// next unprocessed face. The longest strip is found by analyzing all strips
+// that can cover the given face (three strips corresponding to three
+// directions).
+class MeshStripifier {
+ public:
+ MeshStripifier()
+ : mesh_(nullptr),
+ num_strips_(0),
+ num_encoded_faces_(0),
+ last_encoded_point_(kInvalidPointIndex) {}
+
+ // Generate triangle strips for a given mesh and output them to the output
+ // iterator |out_it|. In most cases |out_it| stores the values in a buffer
+ // that can be used directly on the GPU. Note that the algorithm can generate
+ // multiple strips to represent the whole mesh. In such cases multiple strips
+ // are separated using a so called primitive restart index that is specified
+ // by the |primitive_restart_index| (usually defined as the maximum allowed
+ // value for the given type).
+ // https://www.khronos.org/opengl/wiki/Vertex_Rendering#Primitive_Restart
+ template <typename OutputIteratorT, typename IndexTypeT>
+ bool GenerateTriangleStripsWithPrimitiveRestart(
+ const Mesh &mesh, IndexTypeT primitive_restart_index,
+ OutputIteratorT out_it);
+
+ // The same as above but disjoint triangle strips are separated by degenerate
+ // triangles instead of the primitive restart index. Degenerate triangles are
+ // zero area triangles that are automatically discarded by the GPU. Using
+ // degenerate triangles usually results in a slightly longer output indices
+ // array compared to the similar triangle strips that use primitive restart
+ // index. The advantage of this method is that it is supported by all hardware
+ // and all relevant APIs (including WebGL 1.0).
+ template <typename OutputIteratorT>
+ bool GenerateTriangleStripsWithDegenerateTriangles(const Mesh &mesh,
+ OutputIteratorT out_it);
+
+ // Returns the number of strips generated by the last call of the
+ // GenerateTriangleStrips() method.
+ int num_strips() const { return num_strips_; }
+
+ private:
+ bool Prepare(const Mesh &mesh) {
+ mesh_ = &mesh;
+ num_strips_ = 0;
+ num_encoded_faces_ = 0;
+ // TODO(ostava): We may be able to avoid computing the corner table if we
+ // already have it stored somewhere.
+ corner_table_ = CreateCornerTableFromPositionAttribute(mesh_);
+ if (corner_table_ == nullptr) {
+ return false;
+ }
+
+ // Mark all faces as unvisited.
+ is_face_visited_.assign(mesh.num_faces(), false);
+ return true;
+ }
+
+ // Returns local id of the longest strip that can be created from the given
+ // face |fi|.
+ int FindLongestStripFromFace(FaceIndex fi) {
+ // There are three possible strip directions that can contain the provided
+ // input face. We try all of them and select the direction that result in
+ // the longest strip.
+ const CornerIndex first_ci = corner_table_->FirstCorner(fi);
+ int longest_strip_id = -1;
+ int longest_strip_length = 0;
+ for (int i = 0; i < 3; ++i) {
+ GenerateStripsFromCorner(i, first_ci + i);
+ if (strip_faces_[i].size() > longest_strip_length) {
+ longest_strip_length = static_cast<int>(strip_faces_[i].size());
+ longest_strip_id = i;
+ }
+ }
+ return longest_strip_id;
+ }
+
+ // Generates strip from the data stored in |strip_faces_| and
+ // |strip_start_start_corners_| and stores it to |out_it|.
+ template <typename OutputIteratorT>
+ void StoreStrip(int local_strip_id, OutputIteratorT out_it) {
+ ++num_strips_;
+
+ const int num_strip_faces = strip_faces_[local_strip_id].size();
+ CornerIndex ci = strip_start_corners_[local_strip_id];
+ for (int i = 0; i < num_strip_faces; ++i) {
+ const FaceIndex fi = corner_table_->Face(ci);
+ is_face_visited_[fi] = true;
+ ++num_encoded_faces_;
+
+ if (i == 0) {
+ // Add the start face (three indices).
+ *out_it++ = CornerToPointIndex(ci).value();
+ *out_it++ = CornerToPointIndex(corner_table_->Next(ci)).value();
+ last_encoded_point_ = CornerToPointIndex(corner_table_->Previous(ci));
+ *out_it++ = last_encoded_point_.value();
+ } else {
+ // Store the point on the newly reached corner.
+ last_encoded_point_ = CornerToPointIndex(ci);
+ *out_it++ = last_encoded_point_.value();
+
+ // Go to the correct source corner to proceed to the next face.
+ if (i & 1) {
+ ci = corner_table_->Previous(ci);
+ } else {
+ ci = corner_table_->Next(ci);
+ }
+ }
+ ci = corner_table_->Opposite(ci);
+ }
+ }
+
+ PointIndex CornerToPointIndex(CornerIndex ci) const {
+ return mesh_->CornerToPointId(ci);
+ }
+
+ // Returns the opposite corner in case the opposite triangle does not lie
+ // across an attribute seam. Otherwise return kInvalidCornerIndex.
+ CornerIndex GetOppositeCorner(CornerIndex ci) const {
+ const CornerIndex oci = corner_table_->Opposite(ci);
+ if (oci < 0) {
+ return kInvalidCornerIndex;
+ }
+ // Ensure the point ids are same on both sides of the shared edge between
+ // the triangles.
+ if (CornerToPointIndex(corner_table_->Next(ci)) !=
+ CornerToPointIndex(corner_table_->Previous(oci))) {
+ return kInvalidCornerIndex;
+ }
+ if (CornerToPointIndex(corner_table_->Previous(ci)) !=
+ CornerToPointIndex(corner_table_->Next(oci))) {
+ return kInvalidCornerIndex;
+ }
+ return oci;
+ }
+
+ void GenerateStripsFromCorner(int local_strip_id, CornerIndex ci);
+
+ const Mesh *mesh_;
+ std::unique_ptr<CornerTable> corner_table_;
+
+ // Store strip faces for each of three possible directions from a given face.
+ std::vector<FaceIndex> strip_faces_[3];
+ // Start corner for each direction of the strip containing the processed face.
+ CornerIndex strip_start_corners_[3];
+ IndexTypeVector<FaceIndex, bool> is_face_visited_;
+ // The number of strips generated by this method.
+ int num_strips_;
+ // The number of encoded triangles.
+ int num_encoded_faces_;
+ // Last encoded point.
+ PointIndex last_encoded_point_;
+};
+
+template <typename OutputIteratorT, typename IndexTypeT>
+bool MeshStripifier::GenerateTriangleStripsWithPrimitiveRestart(
+ const Mesh &mesh, IndexTypeT primitive_restart_index,
+ OutputIteratorT out_it) {
+ if (!Prepare(mesh)) {
+ return false;
+ }
+
+ // Go over all faces and generate strips from the first unvisited one.
+ for (FaceIndex fi(0); fi < mesh.num_faces(); ++fi) {
+ if (is_face_visited_[fi]) {
+ continue;
+ }
+
+ const int longest_strip_id = FindLongestStripFromFace(fi);
+
+ // Separate triangle strips with the primitive restart index.
+ if (num_strips_ > 0) {
+ *out_it++ = primitive_restart_index;
+ }
+
+ StoreStrip(longest_strip_id, out_it);
+ }
+
+ return true;
+}
+
+template <typename OutputIteratorT>
+bool MeshStripifier::GenerateTriangleStripsWithDegenerateTriangles(
+ const Mesh &mesh, OutputIteratorT out_it) {
+ if (!Prepare(mesh)) {
+ return false;
+ }
+
+ // Go over all faces and generate strips from the first unvisited one.
+ for (FaceIndex fi(0); fi < mesh.num_faces(); ++fi) {
+ if (is_face_visited_[fi]) {
+ continue;
+ }
+
+ const int longest_strip_id = FindLongestStripFromFace(fi);
+
+ // Separate triangle strips by degenerate triangles. There will be either
+ // three or four degenerate triangles inserted based on the number of
+ // triangles that are already encoded in the output strip (three degenerate
+ // triangles for even number of existing triangles, four degenerate
+ // triangles for odd number of triangles).
+ if (num_strips_ > 0) {
+ // Duplicate last encoded index (first degenerate face).
+ *out_it++ = last_encoded_point_.value();
+
+ // Connect it to the start point of the new triangle strip (second
+ // degenerate face).
+ const CornerIndex new_start_corner =
+ strip_start_corners_[longest_strip_id];
+ const PointIndex new_start_point = CornerToPointIndex(new_start_corner);
+ *out_it++ = new_start_point.value();
+ num_encoded_faces_ += 2;
+ // If we have previously encoded number of faces we need to duplicate the
+ // point one more time to preserve the correct orientation of the next
+ // strip.
+ if (num_encoded_faces_ & 1) {
+ *out_it++ = new_start_point.value();
+ num_encoded_faces_ += 1;
+ }
+ // The last degenerate face will be added implicitly in the StoreStrip()
+ // function below as the first point index is going to be encoded there
+ // again.
+ }
+
+ StoreStrip(longest_strip_id, out_it);
+ }
+
+ return true;
+}
+
+} // namespace draco
+
+#endif // DRACO_MESH_MESH_STRIPIFIER_H_
diff --git a/libs/assimp/contrib/draco/src/draco/mesh/triangle_soup_mesh_builder.cc b/libs/assimp/contrib/draco/src/draco/mesh/triangle_soup_mesh_builder.cc
new file mode 100644
index 0000000..60b0c50
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/mesh/triangle_soup_mesh_builder.cc
@@ -0,0 +1,89 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/mesh/triangle_soup_mesh_builder.h"
+
+namespace draco {
+
+void TriangleSoupMeshBuilder::Start(int num_faces) {
+ mesh_ = std::unique_ptr<Mesh>(new Mesh());
+ mesh_->SetNumFaces(num_faces);
+ mesh_->set_num_points(num_faces * 3);
+ attribute_element_types_.clear();
+}
+
+int TriangleSoupMeshBuilder::AddAttribute(
+ GeometryAttribute::Type attribute_type, int8_t num_components,
+ DataType data_type) {
+ GeometryAttribute va;
+ va.Init(attribute_type, nullptr, num_components, data_type, false,
+ DataTypeLength(data_type) * num_components, 0);
+ attribute_element_types_.push_back(-1);
+ return mesh_->AddAttribute(va, true, mesh_->num_points());
+}
+
+void TriangleSoupMeshBuilder::SetAttributeValuesForFace(
+ int att_id, FaceIndex face_id, const void *corner_value_0,
+ const void *corner_value_1, const void *corner_value_2) {
+ const int start_index = 3 * face_id.value();
+ PointAttribute *const att = mesh_->attribute(att_id);
+ att->SetAttributeValue(AttributeValueIndex(start_index), corner_value_0);
+ att->SetAttributeValue(AttributeValueIndex(start_index + 1), corner_value_1);
+ att->SetAttributeValue(AttributeValueIndex(start_index + 2), corner_value_2);
+ // TODO(ostava): The below code should be called only for one attribute.
+ // It will work OK even for multiple attributes, but it's redundant.
+ mesh_->SetFace(face_id,
+ {{PointIndex(start_index), PointIndex(start_index + 1),
+ PointIndex(start_index + 2)}});
+ attribute_element_types_[att_id] = MESH_CORNER_ATTRIBUTE;
+}
+
+void TriangleSoupMeshBuilder::SetPerFaceAttributeValueForFace(
+ int att_id, FaceIndex face_id, const void *value) {
+ const int start_index = 3 * face_id.value();
+ PointAttribute *const att = mesh_->attribute(att_id);
+ att->SetAttributeValue(AttributeValueIndex(start_index), value);
+ att->SetAttributeValue(AttributeValueIndex(start_index + 1), value);
+ att->SetAttributeValue(AttributeValueIndex(start_index + 2), value);
+ mesh_->SetFace(face_id,
+ {{PointIndex(start_index), PointIndex(start_index + 1),
+ PointIndex(start_index + 2)}});
+ int8_t &element_type = attribute_element_types_[att_id];
+ if (element_type < 0) {
+ element_type = MESH_FACE_ATTRIBUTE;
+ }
+}
+
+std::unique_ptr<Mesh> TriangleSoupMeshBuilder::Finalize() {
+#ifdef DRACO_ATTRIBUTE_VALUES_DEDUPLICATION_SUPPORTED
+ // First deduplicate attribute values.
+ if (!mesh_->DeduplicateAttributeValues()) {
+ return nullptr;
+ }
+#endif
+#ifdef DRACO_ATTRIBUTE_INDICES_DEDUPLICATION_SUPPORTED
+ // Also deduplicate vertex indices.
+ mesh_->DeduplicatePointIds();
+#endif
+ for (size_t i = 0; i < attribute_element_types_.size(); ++i) {
+ if (attribute_element_types_[i] >= 0) {
+ mesh_->SetAttributeElementType(
+ static_cast<int>(i),
+ static_cast<MeshAttributeElementType>(attribute_element_types_[i]));
+ }
+ }
+ return std::move(mesh_);
+}
+
+} // namespace draco
diff --git a/libs/assimp/contrib/draco/src/draco/mesh/triangle_soup_mesh_builder.h b/libs/assimp/contrib/draco/src/draco/mesh/triangle_soup_mesh_builder.h
new file mode 100644
index 0000000..89466e1
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/mesh/triangle_soup_mesh_builder.h
@@ -0,0 +1,63 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_MESH_TRIANGLE_SOUP_MESH_BUILDER_H_
+#define DRACO_MESH_TRIANGLE_SOUP_MESH_BUILDER_H_
+
+#include "draco/draco_features.h"
+#include "draco/mesh/mesh.h"
+
+namespace draco {
+
+// Class for building meshes directly from attribute values that can be
+// specified for each face corner. All attributes are automatically
+// deduplicated.
+class TriangleSoupMeshBuilder {
+ public:
+ // Starts mesh building for a given number of faces.
+ // TODO(ostava): Currently it's necessary to select the correct number of
+ // faces upfront. This should be generalized, but it will require us to
+ // rewrite our attribute resizing functions.
+ void Start(int num_faces);
+
+ // Adds an empty attribute to the mesh. Returns the new attribute's id.
+ int AddAttribute(GeometryAttribute::Type attribute_type,
+ int8_t num_components, DataType data_type);
+
+ // Sets values for a given attribute on all corners of a given face.
+ void SetAttributeValuesForFace(int att_id, FaceIndex face_id,
+ const void *corner_value_0,
+ const void *corner_value_1,
+ const void *corner_value_2);
+
+ // Sets value for a per-face attribute. If all faces of a given attribute are
+ // set with this method, the attribute will be marked as per-face, otherwise
+ // it will be marked as per-corner attribute.
+ void SetPerFaceAttributeValueForFace(int att_id, FaceIndex face_id,
+ const void *value);
+
+ // Finalizes the mesh or returns nullptr on error.
+ // Once this function is called, the builder becomes invalid and cannot be
+ // used until the method Start() is called again.
+ std::unique_ptr<Mesh> Finalize();
+
+ private:
+ std::vector<int8_t> attribute_element_types_;
+
+ std::unique_ptr<Mesh> mesh_;
+};
+
+} // namespace draco
+
+#endif // DRACO_MESH_TRIANGLE_SOUP_MESH_BUILDER_H_
diff --git a/libs/assimp/contrib/draco/src/draco/mesh/triangle_soup_mesh_builder_test.cc b/libs/assimp/contrib/draco/src/draco/mesh/triangle_soup_mesh_builder_test.cc
new file mode 100644
index 0000000..171f8fe
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/mesh/triangle_soup_mesh_builder_test.cc
@@ -0,0 +1,197 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/mesh/triangle_soup_mesh_builder.h"
+
+#include "draco/core/draco_test_base.h"
+#include "draco/core/vector_d.h"
+
+namespace draco {
+
+class TriangleSoupMeshBuilderTest : public ::testing::Test {};
+
+TEST_F(TriangleSoupMeshBuilderTest, CubeTest) {
+ // This tests, verifies that the mesh builder constructs a valid cube out
+ // of the provided triangle soup data.
+ TriangleSoupMeshBuilder mb;
+ mb.Start(12);
+ const int pos_att_id =
+ mb.AddAttribute(GeometryAttribute::POSITION, 3, DT_FLOAT32);
+ // clang-format off
+ // Front face.
+ mb.SetAttributeValuesForFace(pos_att_id, FaceIndex(0),
+ Vector3f(0.f, 0.f, 0.f).data(),
+ Vector3f(1.f, 0.f, 0.f).data(),
+ Vector3f(0.f, 1.f, 0.f).data());
+ mb.SetAttributeValuesForFace(pos_att_id, FaceIndex(1),
+ Vector3f(0.f, 1.f, 0.f).data(),
+ Vector3f(1.f, 0.f, 0.f).data(),
+ Vector3f(1.f, 1.f, 0.f).data());
+
+ // Back face.
+ mb.SetAttributeValuesForFace(pos_att_id, FaceIndex(2),
+ Vector3f(0.f, 1.f, 1.f).data(),
+ Vector3f(1.f, 0.f, 1.f).data(),
+ Vector3f(0.f, 0.f, 1.f).data());
+ mb.SetAttributeValuesForFace(pos_att_id, FaceIndex(3),
+ Vector3f(1.f, 1.f, 1.f).data(),
+ Vector3f(1.f, 0.f, 1.f).data(),
+ Vector3f(0.f, 1.f, 1.f).data());
+
+ // Top face.
+ mb.SetAttributeValuesForFace(pos_att_id, FaceIndex(4),
+ Vector3f(0.f, 1.f, 0.f).data(),
+ Vector3f(1.f, 1.f, 0.f).data(),
+ Vector3f(0.f, 1.f, 1.f).data());
+ mb.SetAttributeValuesForFace(pos_att_id, FaceIndex(5),
+ Vector3f(0.f, 1.f, 1.f).data(),
+ Vector3f(1.f, 1.f, 0.f).data(),
+ Vector3f(1.f, 1.f, 1.f).data());
+
+ // Bottom face.
+ mb.SetAttributeValuesForFace(pos_att_id, FaceIndex(6),
+ Vector3f(0.f, 0.f, 1.f).data(),
+ Vector3f(1.f, 0.f, 0.f).data(),
+ Vector3f(0.f, 0.f, 0.f).data());
+ mb.SetAttributeValuesForFace(pos_att_id, FaceIndex(7),
+ Vector3f(1.f, 0.f, 1.f).data(),
+ Vector3f(1.f, 0.f, 0.f).data(),
+ Vector3f(0.f, 0.f, 1.f).data());
+
+ // Right face.
+ mb.SetAttributeValuesForFace(pos_att_id, FaceIndex(8),
+ Vector3f(1.f, 0.f, 0.f).data(),
+ Vector3f(1.f, 0.f, 1.f).data(),
+ Vector3f(1.f, 1.f, 0.f).data());
+ mb.SetAttributeValuesForFace(pos_att_id, FaceIndex(9),
+ Vector3f(1.f, 1.f, 0.f).data(),
+ Vector3f(1.f, 0.f, 1.f).data(),
+ Vector3f(1.f, 1.f, 1.f).data());
+
+ // Left face.
+ mb.SetAttributeValuesForFace(pos_att_id, FaceIndex(10),
+ Vector3f(0.f, 1.f, 0.f).data(),
+ Vector3f(0.f, 0.f, 1.f).data(),
+ Vector3f(0.f, 0.f, 0.f).data());
+ mb.SetAttributeValuesForFace(pos_att_id, FaceIndex(11),
+ Vector3f(0.f, 1.f, 1.f).data(),
+ Vector3f(0.f, 0.f, 1.f).data(),
+ Vector3f(0.f, 1.f, 0.f).data());
+ // clang-format on
+
+ std::unique_ptr<Mesh> mesh = mb.Finalize();
+ ASSERT_NE(mesh, nullptr) << "Failed to build the cube mesh.";
+ EXPECT_EQ(mesh->num_points(), 8) << "Unexpected number of vertices.";
+ EXPECT_EQ(mesh->num_faces(), 12) << "Unexpected number of faces.";
+}
+
+TEST_F(TriangleSoupMeshBuilderTest, TestPerFaceAttribs) {
+ // This tests, verifies that the mesh builder constructs a valid cube with
+ // per face Boolean attributes.
+ TriangleSoupMeshBuilder mb;
+ mb.Start(12);
+ const int pos_att_id =
+ mb.AddAttribute(GeometryAttribute::POSITION, 3, DT_FLOAT32);
+ const int gen_att_id =
+ mb.AddAttribute(GeometryAttribute::GENERIC, 1, DT_BOOL);
+ uint8_t bool_true = 1;
+ uint8_t bool_false = 0;
+ // clang-format off
+ // Front face.
+ mb.SetAttributeValuesForFace(pos_att_id, FaceIndex(0),
+ Vector3f(0.f, 0.f, 0.f).data(),
+ Vector3f(1.f, 0.f, 0.f).data(),
+ Vector3f(0.f, 1.f, 0.f).data());
+ mb.SetPerFaceAttributeValueForFace(gen_att_id, FaceIndex(0), &bool_false);
+
+ mb.SetAttributeValuesForFace(pos_att_id, FaceIndex(1),
+ Vector3f(0.f, 1.f, 0.f).data(),
+ Vector3f(1.f, 0.f, 0.f).data(),
+ Vector3f(1.f, 1.f, 0.f).data());
+ mb.SetPerFaceAttributeValueForFace(gen_att_id, FaceIndex(1), &bool_true);
+
+ // Back face.
+ mb.SetAttributeValuesForFace(pos_att_id, FaceIndex(2),
+ Vector3f(0.f, 1.f, 1.f).data(),
+ Vector3f(1.f, 0.f, 1.f).data(),
+ Vector3f(0.f, 0.f, 1.f).data());
+ mb.SetPerFaceAttributeValueForFace(gen_att_id, FaceIndex(2), &bool_true);
+
+ mb.SetAttributeValuesForFace(pos_att_id, FaceIndex(3),
+ Vector3f(1.f, 1.f, 1.f).data(),
+ Vector3f(1.f, 0.f, 1.f).data(),
+ Vector3f(0.f, 1.f, 1.f).data());
+ mb.SetPerFaceAttributeValueForFace(gen_att_id, FaceIndex(3), &bool_true);
+
+ // Top face.
+ mb.SetAttributeValuesForFace(pos_att_id, FaceIndex(4),
+ Vector3f(0.f, 1.f, 0.f).data(),
+ Vector3f(1.f, 1.f, 0.f).data(),
+ Vector3f(0.f, 1.f, 1.f).data());
+ mb.SetPerFaceAttributeValueForFace(gen_att_id, FaceIndex(4), &bool_false);;
+
+ mb.SetAttributeValuesForFace(pos_att_id, FaceIndex(5),
+ Vector3f(0.f, 1.f, 1.f).data(),
+ Vector3f(1.f, 1.f, 0.f).data(),
+ Vector3f(1.f, 1.f, 1.f).data());
+ mb.SetPerFaceAttributeValueForFace(gen_att_id, FaceIndex(5), &bool_false);
+
+ // Bottom face.
+ mb.SetAttributeValuesForFace(pos_att_id, FaceIndex(6),
+ Vector3f(0.f, 0.f, 1.f).data(),
+ Vector3f(1.f, 0.f, 0.f).data(),
+ Vector3f(0.f, 0.f, 0.f).data());
+ mb.SetPerFaceAttributeValueForFace(gen_att_id, FaceIndex(6), &bool_true);
+
+ mb.SetAttributeValuesForFace(pos_att_id, FaceIndex(7),
+ Vector3f(1.f, 0.f, 1.f).data(),
+ Vector3f(1.f, 0.f, 0.f).data(),
+ Vector3f(0.f, 0.f, 1.f).data());
+ mb.SetPerFaceAttributeValueForFace(gen_att_id, FaceIndex(7), &bool_true);
+
+ // Right face.
+ mb.SetAttributeValuesForFace(pos_att_id, FaceIndex(8),
+ Vector3f(1.f, 0.f, 0.f).data(),
+ Vector3f(1.f, 0.f, 1.f).data(),
+ Vector3f(1.f, 1.f, 0.f).data());
+ mb.SetPerFaceAttributeValueForFace(gen_att_id, FaceIndex(8), &bool_false);
+
+ mb.SetAttributeValuesForFace(pos_att_id, FaceIndex(9),
+ Vector3f(1.f, 1.f, 0.f).data(),
+ Vector3f(1.f, 0.f, 1.f).data(),
+ Vector3f(1.f, 1.f, 1.f).data());
+ mb.SetPerFaceAttributeValueForFace(gen_att_id, FaceIndex(9), &bool_true);
+
+ // Left face.
+ mb.SetAttributeValuesForFace(pos_att_id, FaceIndex(10),
+ Vector3f(0.f, 1.f, 0.f).data(),
+ Vector3f(0.f, 0.f, 1.f).data(),
+ Vector3f(0.f, 0.f, 0.f).data());
+ mb.SetPerFaceAttributeValueForFace(gen_att_id, FaceIndex(10), &bool_true);
+
+ mb.SetAttributeValuesForFace(pos_att_id, FaceIndex(11),
+ Vector3f(0.f, 1.f, 1.f).data(),
+ Vector3f(0.f, 0.f, 1.f).data(),
+ Vector3f(0.f, 1.f, 0.f).data());
+ mb.SetPerFaceAttributeValueForFace(gen_att_id, FaceIndex(11), &bool_false);
+ // clang-format on
+
+ std::unique_ptr<Mesh> mesh = mb.Finalize();
+ ASSERT_NE(mesh, nullptr) << "Failed to build the cube mesh.";
+ EXPECT_EQ(mesh->num_faces(), 12) << "Unexpected number of faces.";
+ EXPECT_EQ(mesh->GetAttributeElementType(gen_att_id), MESH_FACE_ATTRIBUTE)
+ << "Unexpected attribute element type.";
+}
+
+} // namespace draco
diff --git a/libs/assimp/contrib/draco/src/draco/mesh/valence_cache.h b/libs/assimp/contrib/draco/src/draco/mesh/valence_cache.h
new file mode 100644
index 0000000..3540377
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/mesh/valence_cache.h
@@ -0,0 +1,142 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_MESH_VALENCE_CACHE_H_
+#define DRACO_MESH_VALENCE_CACHE_H_
+
+#include "draco/attributes/geometry_indices.h"
+#include "draco/core/draco_index_type_vector.h"
+#include "draco/core/macros.h"
+
+namespace draco {
+
+// ValenceCache provides support for the caching of valences off of some kind of
+// CornerTable 'type' of class.
+// No valences should be queried before Caching is
+// performed and values should be removed/recached when changes to the
+// underlying mesh are taking place.
+
+template <class CornerTableT>
+class ValenceCache {
+ const CornerTableT &table_;
+
+ public:
+ explicit ValenceCache(const CornerTableT &table) : table_(table) {}
+
+ // Do not call before CacheValences() / CacheValencesInaccurate().
+ inline int8_t ValenceFromCacheInaccurate(CornerIndex c) const {
+ if (c == kInvalidCornerIndex) {
+ return -1;
+ }
+ return ValenceFromCacheInaccurate(table_.Vertex(c));
+ }
+ inline int32_t ValenceFromCache(CornerIndex c) const {
+ if (c == kInvalidCornerIndex) {
+ return -1;
+ }
+ return ValenceFromCache(table_.Vertex(c));
+ }
+
+ inline int32_t ConfidentValenceFromCache(VertexIndex v) const {
+ DRACO_DCHECK_LT(v.value(), table_.num_vertices());
+ DRACO_DCHECK_EQ(vertex_valence_cache_32_bit_.size(), table_.num_vertices());
+ return vertex_valence_cache_32_bit_[v];
+ }
+
+ // Collect the valence for all vertices so they can be reused later. The
+ // 'inaccurate' versions of this family of functions clips the true valence
+ // of the vertices to 8 signed bits as a space optimization. This clipping
+ // will lead to occasionally wrong results. If accurate results are required
+ // under all circumstances, do not use the 'inaccurate' version or else
+ // use it and fetch the correct result in the event the value appears clipped.
+ // The topology of the mesh should be a constant when Valence Cache functions
+ // are being used. Modification of the mesh while cache(s) are filled will
+ // not guarantee proper results on subsequent calls unless they are rebuilt.
+ void CacheValencesInaccurate() const {
+ if (vertex_valence_cache_8_bit_.size() == 0) {
+ const VertexIndex vertex_count = VertexIndex(table_.num_vertices());
+ vertex_valence_cache_8_bit_.resize(vertex_count.value());
+ for (VertexIndex v = VertexIndex(0); v < vertex_count; v += 1) {
+ vertex_valence_cache_8_bit_[v] = static_cast<int8_t>(
+ (std::min)(static_cast<int32_t>(std::numeric_limits<int8_t>::max()),
+ table_.Valence(v)));
+ }
+ }
+ }
+ void CacheValences() const {
+ if (vertex_valence_cache_32_bit_.size() == 0) {
+ const VertexIndex vertex_count = VertexIndex(table_.num_vertices());
+ vertex_valence_cache_32_bit_.resize(vertex_count.value());
+ for (VertexIndex v = VertexIndex(0); v < vertex_count; v += 1) {
+ vertex_valence_cache_32_bit_[v] = table_.Valence(v);
+ }
+ }
+ }
+
+ inline int8_t ConfidentValenceFromCacheInaccurate(CornerIndex c) const {
+ DRACO_DCHECK_GE(c.value(), 0);
+ return ConfidentValenceFromCacheInaccurate(table_.ConfidentVertex(c));
+ }
+ inline int32_t ConfidentValenceFromCache(CornerIndex c) const {
+ DRACO_DCHECK_GE(c.value(), 0);
+ return ConfidentValenceFromCache(table_.ConfidentVertex(c));
+ }
+ inline int8_t ValenceFromCacheInaccurate(VertexIndex v) const {
+ DRACO_DCHECK_EQ(vertex_valence_cache_8_bit_.size(), table_.num_vertices());
+ if (v == kInvalidVertexIndex || v.value() >= table_.num_vertices()) {
+ return -1;
+ }
+ return ConfidentValenceFromCacheInaccurate(v);
+ }
+ inline int8_t ConfidentValenceFromCacheInaccurate(VertexIndex v) const {
+ DRACO_DCHECK_LT(v.value(), table_.num_vertices());
+ DRACO_DCHECK_EQ(vertex_valence_cache_8_bit_.size(), table_.num_vertices());
+ return vertex_valence_cache_8_bit_[v];
+ }
+
+ // TODO(draco-eng) Add unit tests for ValenceCache functions.
+ inline int32_t ValenceFromCache(VertexIndex v) const {
+ DRACO_DCHECK_EQ(vertex_valence_cache_32_bit_.size(), table_.num_vertices());
+ if (v == kInvalidVertexIndex || v.value() >= table_.num_vertices()) {
+ return -1;
+ }
+ return ConfidentValenceFromCache(v);
+ }
+
+ // Clear the cache of valences and deallocate the memory.
+ void ClearValenceCacheInaccurate() const {
+ vertex_valence_cache_8_bit_.clear();
+ // Force erasure.
+ IndexTypeVector<VertexIndex, int8_t>().swap(vertex_valence_cache_8_bit_);
+ }
+ void ClearValenceCache() const {
+ vertex_valence_cache_32_bit_.clear();
+ // Force erasure.
+ IndexTypeVector<VertexIndex, int32_t>().swap(vertex_valence_cache_32_bit_);
+ }
+
+ bool IsCacheEmpty() const {
+ return vertex_valence_cache_8_bit_.size() == 0 &&
+ vertex_valence_cache_32_bit_.size() == 0;
+ }
+
+ private:
+ // Retain valences and clip them to char size.
+ mutable IndexTypeVector<VertexIndex, int8_t> vertex_valence_cache_8_bit_;
+ mutable IndexTypeVector<VertexIndex, int32_t> vertex_valence_cache_32_bit_;
+};
+
+} // namespace draco
+
+#endif // DRACO_MESH_VALENCE_CACHE_H_
diff --git a/libs/assimp/contrib/draco/src/draco/metadata/geometry_metadata.cc b/libs/assimp/contrib/draco/src/draco/metadata/geometry_metadata.cc
new file mode 100644
index 0000000..b838981
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/metadata/geometry_metadata.cc
@@ -0,0 +1,44 @@
+// Copyright 2017 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/metadata/geometry_metadata.h"
+
+#include <utility>
+
+namespace draco {
+
+const AttributeMetadata *GeometryMetadata::GetAttributeMetadataByStringEntry(
+ const std::string &entry_name, const std::string &entry_value) const {
+ for (auto &&att_metadata : att_metadatas_) {
+ std::string value;
+ if (!att_metadata->GetEntryString(entry_name, &value)) {
+ continue;
+ }
+ if (value == entry_value) {
+ return att_metadata.get();
+ }
+ }
+ // No attribute has the requested entry.
+ return nullptr;
+}
+
+bool GeometryMetadata::AddAttributeMetadata(
+ std::unique_ptr<AttributeMetadata> att_metadata) {
+ if (!att_metadata.get()) {
+ return false;
+ }
+ att_metadatas_.push_back(std::move(att_metadata));
+ return true;
+}
+} // namespace draco
diff --git a/libs/assimp/contrib/draco/src/draco/metadata/geometry_metadata.h b/libs/assimp/contrib/draco/src/draco/metadata/geometry_metadata.h
new file mode 100644
index 0000000..ec7ecb9
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/metadata/geometry_metadata.h
@@ -0,0 +1,140 @@
+// Copyright 2017 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_METADATA_GEOMETRY_METADATA_H_
+#define DRACO_METADATA_GEOMETRY_METADATA_H_
+
+#include "draco/metadata/metadata.h"
+
+namespace draco {
+
+// Class for representing specifically metadata of attributes. It must have an
+// attribute id which should be identical to it's counterpart attribute in
+// the point cloud it belongs to.
+class AttributeMetadata : public Metadata {
+ public:
+ AttributeMetadata() : att_unique_id_(0) {}
+ explicit AttributeMetadata(const Metadata &metadata)
+ : Metadata(metadata), att_unique_id_(0) {}
+
+ void set_att_unique_id(uint32_t att_unique_id) {
+ att_unique_id_ = att_unique_id;
+ }
+ // The unique id of the attribute that this metadata belongs to.
+ uint32_t att_unique_id() const { return att_unique_id_; }
+
+ private:
+ uint32_t att_unique_id_;
+
+ friend struct AttributeMetadataHasher;
+ friend class PointCloud;
+};
+
+// Functor for computing a hash from data stored in a AttributeMetadata class.
+struct AttributeMetadataHasher {
+ size_t operator()(const AttributeMetadata &metadata) const {
+ size_t hash = metadata.att_unique_id_;
+ MetadataHasher metadata_hasher;
+ hash = HashCombine(metadata_hasher(static_cast<const Metadata &>(metadata)),
+ hash);
+ return hash;
+ }
+};
+
+// Class for representing the metadata for a point cloud. It could have a list
+// of attribute metadata.
+class GeometryMetadata : public Metadata {
+ public:
+ GeometryMetadata() {}
+ explicit GeometryMetadata(const Metadata &metadata) : Metadata(metadata) {}
+
+ const AttributeMetadata *GetAttributeMetadataByStringEntry(
+ const std::string &entry_name, const std::string &entry_value) const;
+ bool AddAttributeMetadata(std::unique_ptr<AttributeMetadata> att_metadata);
+
+ void DeleteAttributeMetadataByUniqueId(int32_t att_unique_id) {
+ if (att_unique_id < 0) {
+ return;
+ }
+ for (auto itr = att_metadatas_.begin(); itr != att_metadatas_.end();
+ ++itr) {
+ if (itr->get()->att_unique_id() == static_cast<uint32_t>(att_unique_id)) {
+ att_metadatas_.erase(itr);
+ return;
+ }
+ }
+ }
+
+ const AttributeMetadata *GetAttributeMetadataByUniqueId(
+ int32_t att_unique_id) const {
+ if (att_unique_id < 0) {
+ return nullptr;
+ }
+
+ // TODO(draco-eng): Consider using unordered_map instead of vector to store
+ // attribute metadata.
+ for (auto &&att_metadata : att_metadatas_) {
+ if (att_metadata->att_unique_id() ==
+ static_cast<uint32_t>(att_unique_id)) {
+ return att_metadata.get();
+ }
+ }
+ return nullptr;
+ }
+
+ AttributeMetadata *attribute_metadata(int32_t att_unique_id) {
+ if (att_unique_id < 0) {
+ return nullptr;
+ }
+
+ // TODO(draco-eng): Consider use unordered_map instead of vector to store
+ // attribute metadata.
+ for (auto &&att_metadata : att_metadatas_) {
+ if (att_metadata->att_unique_id() ==
+ static_cast<uint32_t>(att_unique_id)) {
+ return att_metadata.get();
+ }
+ }
+ return nullptr;
+ }
+
+ const std::vector<std::unique_ptr<AttributeMetadata>> &attribute_metadatas()
+ const {
+ return att_metadatas_;
+ }
+
+ private:
+ std::vector<std::unique_ptr<AttributeMetadata>> att_metadatas_;
+
+ friend struct GeometryMetadataHasher;
+};
+
+// Functor for computing a hash from data stored in a GeometryMetadata class.
+struct GeometryMetadataHasher {
+ size_t operator()(const GeometryMetadata &metadata) const {
+ size_t hash = metadata.att_metadatas_.size();
+ AttributeMetadataHasher att_metadata_hasher;
+ for (auto &&att_metadata : metadata.att_metadatas_) {
+ hash = HashCombine(att_metadata_hasher(*att_metadata), hash);
+ }
+ MetadataHasher metadata_hasher;
+ hash = HashCombine(metadata_hasher(static_cast<const Metadata &>(metadata)),
+ hash);
+ return hash;
+ }
+};
+
+} // namespace draco
+
+#endif // THIRD_PARTY_DRACO_METADATA_GEOMETRY_METADATA_H_
diff --git a/libs/assimp/contrib/draco/src/draco/metadata/metadata.cc b/libs/assimp/contrib/draco/src/draco/metadata/metadata.cc
new file mode 100644
index 0000000..9141907
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/metadata/metadata.cc
@@ -0,0 +1,132 @@
+// Copyright 2017 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/metadata/metadata.h"
+
+#include <utility>
+
+namespace draco {
+
+EntryValue::EntryValue(const EntryValue &value) {
+ data_.resize(value.data_.size());
+ memcpy(&data_[0], &value.data_[0], value.data_.size());
+}
+
+EntryValue::EntryValue(const std::string &value) {
+ data_.resize(value.size());
+ memcpy(&data_[0], &value[0], value.size());
+}
+
+template <>
+bool EntryValue::GetValue(std::string *value) const {
+ if (data_.empty()) {
+ return false;
+ }
+ value->resize(data_.size());
+ memcpy(&value->at(0), &data_[0], data_.size());
+ return true;
+}
+
+Metadata::Metadata(const Metadata &metadata) {
+ entries_.insert(metadata.entries_.begin(), metadata.entries_.end());
+ for (const auto &sub_metadata_entry : metadata.sub_metadatas_) {
+ std::unique_ptr<Metadata> sub_metadata =
+ std::unique_ptr<Metadata>(new Metadata(*sub_metadata_entry.second));
+ sub_metadatas_[sub_metadata_entry.first] = std::move(sub_metadata);
+ }
+}
+
+void Metadata::AddEntryInt(const std::string &name, int32_t value) {
+ AddEntry(name, value);
+}
+
+bool Metadata::GetEntryInt(const std::string &name, int32_t *value) const {
+ return GetEntry(name, value);
+}
+
+void Metadata::AddEntryIntArray(const std::string &name,
+ const std::vector<int32_t> &value) {
+ AddEntry(name, value);
+}
+
+bool Metadata::GetEntryIntArray(const std::string &name,
+ std::vector<int32_t> *value) const {
+ return GetEntry(name, value);
+}
+
+void Metadata::AddEntryDouble(const std::string &name, double value) {
+ AddEntry(name, value);
+}
+
+bool Metadata::GetEntryDouble(const std::string &name, double *value) const {
+ return GetEntry(name, value);
+}
+
+void Metadata::AddEntryDoubleArray(const std::string &name,
+ const std::vector<double> &value) {
+ AddEntry(name, value);
+}
+
+bool Metadata::GetEntryDoubleArray(const std::string &name,
+ std::vector<double> *value) const {
+ return GetEntry(name, value);
+}
+
+void Metadata::AddEntryString(const std::string &name,
+ const std::string &value) {
+ AddEntry(name, value);
+}
+
+bool Metadata::GetEntryString(const std::string &name,
+ std::string *value) const {
+ return GetEntry(name, value);
+}
+
+void Metadata::AddEntryBinary(const std::string &name,
+ const std::vector<uint8_t> &value) {
+ AddEntry(name, value);
+}
+
+bool Metadata::GetEntryBinary(const std::string &name,
+ std::vector<uint8_t> *value) const {
+ return GetEntry(name, value);
+}
+
+bool Metadata::AddSubMetadata(const std::string &name,
+ std::unique_ptr<Metadata> sub_metadata) {
+ auto sub_ptr = sub_metadatas_.find(name);
+ // Avoid accidentally writing over a sub-metadata with the same name.
+ if (sub_ptr != sub_metadatas_.end()) {
+ return false;
+ }
+ sub_metadatas_[name] = std::move(sub_metadata);
+ return true;
+}
+
+const Metadata *Metadata::GetSubMetadata(const std::string &name) const {
+ auto sub_ptr = sub_metadatas_.find(name);
+ if (sub_ptr == sub_metadatas_.end()) {
+ return nullptr;
+ }
+ return sub_ptr->second.get();
+}
+
+void Metadata::RemoveEntry(const std::string &name) {
+ // Actually just remove "name", no need to check if it exists.
+ auto entry_ptr = entries_.find(name);
+ if (entry_ptr != entries_.end()) {
+ entries_.erase(entry_ptr);
+ }
+}
+} // namespace draco
diff --git a/libs/assimp/contrib/draco/src/draco/metadata/metadata.h b/libs/assimp/contrib/draco/src/draco/metadata/metadata.h
new file mode 100644
index 0000000..56d05e4
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/metadata/metadata.h
@@ -0,0 +1,208 @@
+// Copyright 2017 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_METADATA_METADATA_H_
+#define DRACO_METADATA_METADATA_H_
+
+#include <cstring>
+#include <map>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "draco/core/hash_utils.h"
+
+namespace draco {
+
+// Class for storing a value of an entry in Metadata. Internally it is
+// represented by a buffer of data. It can be accessed by various data types,
+// e.g. int, float, binary data or string.
+class EntryValue {
+ public:
+ template <typename DataTypeT>
+ explicit EntryValue(const DataTypeT &data) {
+ const size_t data_type_size = sizeof(DataTypeT);
+ data_.resize(data_type_size);
+ memcpy(&data_[0], &data, data_type_size);
+ }
+
+ template <typename DataTypeT>
+ explicit EntryValue(const std::vector<DataTypeT> &data) {
+ const size_t total_size = sizeof(DataTypeT) * data.size();
+ data_.resize(total_size);
+ memcpy(&data_[0], &data[0], total_size);
+ }
+
+ EntryValue(const EntryValue &value);
+ explicit EntryValue(const std::string &value);
+
+ template <typename DataTypeT>
+ bool GetValue(DataTypeT *value) const {
+ const size_t data_type_size = sizeof(DataTypeT);
+ if (data_type_size != data_.size()) {
+ return false;
+ }
+ memcpy(value, &data_[0], data_type_size);
+ return true;
+ }
+
+ template <typename DataTypeT>
+ bool GetValue(std::vector<DataTypeT> *value) const {
+ if (data_.empty()) {
+ return false;
+ }
+ const size_t data_type_size = sizeof(DataTypeT);
+ if (data_.size() % data_type_size != 0) {
+ return false;
+ }
+ value->resize(data_.size() / data_type_size);
+ memcpy(&value->at(0), &data_[0], data_.size());
+ return true;
+ }
+
+ const std::vector<uint8_t> &data() const { return data_; }
+
+ private:
+ std::vector<uint8_t> data_;
+
+ friend struct EntryValueHasher;
+};
+
+// Functor for computing a hash from data stored within an EntryValue.
+struct EntryValueHasher {
+ size_t operator()(const EntryValue &ev) const {
+ size_t hash = ev.data_.size();
+ for (size_t i = 0; i < ev.data_.size(); ++i) {
+ hash = HashCombine(ev.data_[i], hash);
+ }
+ return hash;
+ }
+};
+
+// Class for holding generic metadata. It has a list of entries which consist of
+// an entry name and an entry value. Each Metadata could also have nested
+// metadata.
+class Metadata {
+ public:
+ Metadata() {}
+ Metadata(const Metadata &metadata);
+ // In theory, we support all types of data as long as it could be serialized
+ // to binary data. We provide the following functions for inserting and
+ // accessing entries of common data types. For now, developers need to know
+ // the type of entries they are requesting.
+ void AddEntryInt(const std::string &name, int32_t value);
+
+ // Returns false if Metadata does not contain an entry with a key of |name|.
+ // This function does not guarantee that entry's type is int32_t.
+ bool GetEntryInt(const std::string &name, int32_t *value) const;
+
+ void AddEntryIntArray(const std::string &name,
+ const std::vector<int32_t> &value);
+
+ // Returns false if Metadata does not contain an entry with a key of |name|.
+ // This function does not guarantee that entry's type is a vector of int32_t.
+ bool GetEntryIntArray(const std::string &name,
+ std::vector<int32_t> *value) const;
+
+ void AddEntryDouble(const std::string &name, double value);
+
+ // Returns false if Metadata does not contain an entry with a key of |name|.
+ // This function does not guarantee that entry's type is double.
+ bool GetEntryDouble(const std::string &name, double *value) const;
+
+ void AddEntryDoubleArray(const std::string &name,
+ const std::vector<double> &value);
+
+ // Returns false if Metadata does not contain an entry with a key of |name|.
+ // This function does not guarantee that entry's type is a vector of double.
+ bool GetEntryDoubleArray(const std::string &name,
+ std::vector<double> *value) const;
+
+ void AddEntryString(const std::string &name, const std::string &value);
+
+ // Returns false if Metadata does not contain an entry with a key of |name|.
+ // This function does not guarantee that entry's type is std::string.
+ bool GetEntryString(const std::string &name, std::string *value) const;
+
+ // Add a blob of data as an entry.
+ void AddEntryBinary(const std::string &name,
+ const std::vector<uint8_t> &value);
+
+ // Returns false if Metadata does not contain an entry with a key of |name|.
+ // This function does not guarantee that entry's type is a vector of uint8_t.
+ bool GetEntryBinary(const std::string &name,
+ std::vector<uint8_t> *value) const;
+
+ bool AddSubMetadata(const std::string &name,
+ std::unique_ptr<Metadata> sub_metadata);
+ const Metadata *GetSubMetadata(const std::string &name) const;
+
+ void RemoveEntry(const std::string &name);
+
+ int num_entries() const { return static_cast<int>(entries_.size()); }
+ const std::map<std::string, EntryValue> &entries() const { return entries_; }
+ const std::map<std::string, std::unique_ptr<Metadata>> &sub_metadatas()
+ const {
+ return sub_metadatas_;
+ }
+
+ private:
+ // Make this function private to avoid adding undefined data types.
+ template <typename DataTypeT>
+ void AddEntry(const std::string &entry_name, const DataTypeT &entry_value) {
+ const auto itr = entries_.find(entry_name);
+ if (itr != entries_.end()) {
+ entries_.erase(itr);
+ }
+ entries_.insert(std::make_pair(entry_name, EntryValue(entry_value)));
+ }
+
+ // Make this function private to avoid adding undefined data types.
+ template <typename DataTypeT>
+ bool GetEntry(const std::string &entry_name, DataTypeT *entry_value) const {
+ const auto itr = entries_.find(entry_name);
+ if (itr == entries_.end()) {
+ return false;
+ }
+ return itr->second.GetValue(entry_value);
+ }
+
+ std::map<std::string, EntryValue> entries_;
+ std::map<std::string, std::unique_ptr<Metadata>> sub_metadatas_;
+
+ friend struct MetadataHasher;
+};
+
+// Functor for computing a hash from data stored within a metadata class.
+struct MetadataHasher {
+ size_t operator()(const Metadata &metadata) const {
+ size_t hash =
+ HashCombine(metadata.entries_.size(), metadata.sub_metadatas_.size());
+ EntryValueHasher entry_value_hasher;
+ for (const auto &entry : metadata.entries_) {
+ hash = HashCombine(entry.first, hash);
+ hash = HashCombine(entry_value_hasher(entry.second), hash);
+ }
+ MetadataHasher metadata_hasher;
+ for (auto &&sub_metadata : metadata.sub_metadatas_) {
+ hash = HashCombine(sub_metadata.first, hash);
+ hash = HashCombine(metadata_hasher(*sub_metadata.second), hash);
+ }
+ return hash;
+ }
+};
+
+} // namespace draco
+
+#endif // DRACO_METADATA_METADATA_H_
diff --git a/libs/assimp/contrib/draco/src/draco/metadata/metadata_decoder.cc b/libs/assimp/contrib/draco/src/draco/metadata/metadata_decoder.cc
new file mode 100644
index 0000000..a8e66f8
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/metadata/metadata_decoder.cc
@@ -0,0 +1,148 @@
+// Copyright 2017 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/metadata/metadata_decoder.h"
+
+#include <string>
+
+#include "draco/core/varint_decoding.h"
+
+namespace draco {
+
+MetadataDecoder::MetadataDecoder() : buffer_(nullptr) {}
+
+bool MetadataDecoder::DecodeMetadata(DecoderBuffer *in_buffer,
+ Metadata *metadata) {
+ if (!metadata) {
+ return false;
+ }
+ buffer_ = in_buffer;
+ return DecodeMetadata(metadata);
+}
+
+bool MetadataDecoder::DecodeGeometryMetadata(DecoderBuffer *in_buffer,
+ GeometryMetadata *metadata) {
+ if (!metadata) {
+ return false;
+ }
+ buffer_ = in_buffer;
+ uint32_t num_att_metadata = 0;
+ if (!DecodeVarint(&num_att_metadata, buffer_)) {
+ return false;
+ }
+ // Decode attribute metadata.
+ for (uint32_t i = 0; i < num_att_metadata; ++i) {
+ uint32_t att_unique_id;
+ if (!DecodeVarint(&att_unique_id, buffer_)) {
+ return false;
+ }
+ std::unique_ptr<AttributeMetadata> att_metadata =
+ std::unique_ptr<AttributeMetadata>(new AttributeMetadata());
+ att_metadata->set_att_unique_id(att_unique_id);
+ if (!DecodeMetadata(static_cast<Metadata *>(att_metadata.get()))) {
+ return false;
+ }
+ metadata->AddAttributeMetadata(std::move(att_metadata));
+ }
+ return DecodeMetadata(static_cast<Metadata *>(metadata));
+}
+
+bool MetadataDecoder::DecodeMetadata(Metadata *metadata) {
+ struct MetadataPair {
+ Metadata *parent_metadata;
+ Metadata *decoded_metadata;
+ };
+ std::vector<MetadataPair> metadata_stack;
+ metadata_stack.push_back({nullptr, metadata});
+ while (!metadata_stack.empty()) {
+ const MetadataPair mp = metadata_stack.back();
+ metadata_stack.pop_back();
+ metadata = mp.decoded_metadata;
+
+ if (mp.parent_metadata != nullptr) {
+ std::string sub_metadata_name;
+ if (!DecodeName(&sub_metadata_name)) {
+ return false;
+ }
+ std::unique_ptr<Metadata> sub_metadata =
+ std::unique_ptr<Metadata>(new Metadata());
+ metadata = sub_metadata.get();
+ if (!mp.parent_metadata->AddSubMetadata(sub_metadata_name,
+ std::move(sub_metadata))) {
+ return false;
+ }
+ }
+ if (metadata == nullptr) {
+ return false;
+ }
+
+ uint32_t num_entries = 0;
+ if (!DecodeVarint(&num_entries, buffer_)) {
+ return false;
+ }
+ for (uint32_t i = 0; i < num_entries; ++i) {
+ if (!DecodeEntry(metadata)) {
+ return false;
+ }
+ }
+ uint32_t num_sub_metadata = 0;
+ if (!DecodeVarint(&num_sub_metadata, buffer_)) {
+ return false;
+ }
+ if (num_sub_metadata > buffer_->remaining_size()) {
+ // The decoded number of metadata items is unreasonably high.
+ return false;
+ }
+ for (uint32_t i = 0; i < num_sub_metadata; ++i) {
+ metadata_stack.push_back({metadata, nullptr});
+ }
+ }
+ return true;
+}
+
+bool MetadataDecoder::DecodeEntry(Metadata *metadata) {
+ std::string entry_name;
+ if (!DecodeName(&entry_name)) {
+ return false;
+ }
+ uint32_t data_size = 0;
+ if (!DecodeVarint(&data_size, buffer_)) {
+ return false;
+ }
+ if (data_size == 0) {
+ return false;
+ }
+ std::vector<uint8_t> entry_value(data_size);
+ if (!buffer_->Decode(&entry_value[0], data_size)) {
+ return false;
+ }
+ metadata->AddEntryBinary(entry_name, entry_value);
+ return true;
+}
+
+bool MetadataDecoder::DecodeName(std::string *name) {
+ uint8_t name_len = 0;
+ if (!buffer_->Decode(&name_len)) {
+ return false;
+ }
+ name->resize(name_len);
+ if (name_len == 0) {
+ return true;
+ }
+ if (!buffer_->Decode(&name->at(0), name_len)) {
+ return false;
+ }
+ return true;
+}
+} // namespace draco
diff --git a/libs/assimp/contrib/draco/src/draco/metadata/metadata_decoder.h b/libs/assimp/contrib/draco/src/draco/metadata/metadata_decoder.h
new file mode 100644
index 0000000..b4c4943
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/metadata/metadata_decoder.h
@@ -0,0 +1,42 @@
+// Copyright 2017 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_METADATA_METADATA_DECODER_H_
+#define DRACO_METADATA_METADATA_DECODER_H_
+
+#include "draco/core/decoder_buffer.h"
+#include "draco/metadata/geometry_metadata.h"
+#include "draco/metadata/metadata.h"
+
+namespace draco {
+
+// Class for decoding the metadata.
+class MetadataDecoder {
+ public:
+ MetadataDecoder();
+ bool DecodeMetadata(DecoderBuffer *in_buffer, Metadata *metadata);
+ bool DecodeGeometryMetadata(DecoderBuffer *in_buffer,
+ GeometryMetadata *metadata);
+
+ private:
+ bool DecodeMetadata(Metadata *metadata);
+ bool DecodeEntries(Metadata *metadata);
+ bool DecodeEntry(Metadata *metadata);
+ bool DecodeName(std::string *name);
+
+ DecoderBuffer *buffer_;
+};
+} // namespace draco
+
+#endif // DRACO_METADATA_METADATA_DECODER_H_
diff --git a/libs/assimp/contrib/draco/src/draco/metadata/metadata_encoder.cc b/libs/assimp/contrib/draco/src/draco/metadata/metadata_encoder.cc
new file mode 100644
index 0000000..168be83
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/metadata/metadata_encoder.cc
@@ -0,0 +1,97 @@
+// Copyright 2017 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/metadata/metadata_encoder.h"
+
+#include "draco/core/varint_encoding.h"
+
+namespace draco {
+
+bool MetadataEncoder::EncodeMetadata(EncoderBuffer *out_buffer,
+ const Metadata *metadata) {
+ const std::map<std::string, EntryValue> &entries = metadata->entries();
+ // Encode number of entries.
+ EncodeVarint(static_cast<uint32_t>(metadata->num_entries()), out_buffer);
+ // Encode all entries.
+ for (const auto &entry : entries) {
+ if (!EncodeString(out_buffer, entry.first)) {
+ return false;
+ }
+ const std::vector<uint8_t> &entry_value = entry.second.data();
+ const uint32_t data_size = static_cast<uint32_t>(entry_value.size());
+ EncodeVarint(data_size, out_buffer);
+ out_buffer->Encode(entry_value.data(), data_size);
+ }
+ const std::map<std::string, std::unique_ptr<Metadata>> &sub_metadatas =
+ metadata->sub_metadatas();
+ // Encode number of sub-metadata
+ EncodeVarint(static_cast<uint32_t>(sub_metadatas.size()), out_buffer);
+ // Encode each sub-metadata
+ for (auto &&sub_metadata_entry : sub_metadatas) {
+ if (!EncodeString(out_buffer, sub_metadata_entry.first)) {
+ return false;
+ }
+ EncodeMetadata(out_buffer, sub_metadata_entry.second.get());
+ }
+
+ return true;
+}
+
+bool MetadataEncoder::EncodeAttributeMetadata(
+ EncoderBuffer *out_buffer, const AttributeMetadata *metadata) {
+ if (!metadata) {
+ return false;
+ }
+ // Encode attribute id.
+ EncodeVarint(metadata->att_unique_id(), out_buffer);
+ EncodeMetadata(out_buffer, static_cast<const Metadata *>(metadata));
+ return true;
+}
+
+bool MetadataEncoder::EncodeGeometryMetadata(EncoderBuffer *out_buffer,
+ const GeometryMetadata *metadata) {
+ if (!metadata) {
+ return false;
+ }
+ // Encode number of attribute metadata.
+ const std::vector<std::unique_ptr<AttributeMetadata>> &att_metadatas =
+ metadata->attribute_metadatas();
+ // TODO(draco-eng): Limit the number of attributes.
+ EncodeVarint(static_cast<uint32_t>(att_metadatas.size()), out_buffer);
+ // Encode each attribute metadata
+ for (auto &&att_metadata : att_metadatas) {
+ EncodeAttributeMetadata(out_buffer, att_metadata.get());
+ }
+ // Encode normal metadata part.
+ EncodeMetadata(out_buffer, static_cast<const Metadata *>(metadata));
+
+ return true;
+}
+
+bool MetadataEncoder::EncodeString(EncoderBuffer *out_buffer,
+ const std::string &str) {
+ // We only support string of maximum length 255 which is using one byte to
+ // encode the length.
+ if (str.size() > 255) {
+ return false;
+ }
+ if (str.empty()) {
+ out_buffer->Encode(static_cast<uint8_t>(0));
+ } else {
+ out_buffer->Encode(static_cast<uint8_t>(str.size()));
+ out_buffer->Encode(str.c_str(), str.size());
+ }
+ return true;
+}
+} // namespace draco
diff --git a/libs/assimp/contrib/draco/src/draco/metadata/metadata_encoder.h b/libs/assimp/contrib/draco/src/draco/metadata/metadata_encoder.h
new file mode 100644
index 0000000..5bce5d5
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/metadata/metadata_encoder.h
@@ -0,0 +1,41 @@
+// Copyright 2017 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_METADATA_METADATA_ENCODER_H_
+#define DRACO_METADATA_METADATA_ENCODER_H_
+
+#include "draco/core/encoder_buffer.h"
+#include "draco/metadata/geometry_metadata.h"
+#include "draco/metadata/metadata.h"
+
+namespace draco {
+
+// Class for encoding metadata. It could encode either base Metadata class or
+// a metadata of a geometry, e.g. a point cloud.
+class MetadataEncoder {
+ public:
+ MetadataEncoder() {}
+
+ bool EncodeGeometryMetadata(EncoderBuffer *out_buffer,
+ const GeometryMetadata *metadata);
+ bool EncodeMetadata(EncoderBuffer *out_buffer, const Metadata *metadata);
+
+ private:
+ bool EncodeAttributeMetadata(EncoderBuffer *out_buffer,
+ const AttributeMetadata *metadata);
+ bool EncodeString(EncoderBuffer *out_buffer, const std::string &str);
+};
+} // namespace draco
+
+#endif // DRACO_METADATA_METADATA_ENCODER_H_
diff --git a/libs/assimp/contrib/draco/src/draco/metadata/metadata_encoder_test.cc b/libs/assimp/contrib/draco/src/draco/metadata/metadata_encoder_test.cc
new file mode 100644
index 0000000..e5f14bf
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/metadata/metadata_encoder_test.cc
@@ -0,0 +1,167 @@
+// Copyright 2017 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/metadata/metadata_encoder.h"
+
+#include "draco/core/decoder_buffer.h"
+#include "draco/core/draco_test_base.h"
+#include "draco/core/encoder_buffer.h"
+#include "draco/metadata/metadata.h"
+#include "draco/metadata/metadata_decoder.h"
+
+namespace {
+
+class MetadataEncoderTest : public ::testing::Test {
+ protected:
+ MetadataEncoderTest() {}
+
+ void TestEncodingMetadata() {
+ ASSERT_TRUE(encoder.EncodeMetadata(&encoder_buffer, &metadata));
+
+ draco::Metadata decoded_metadata;
+ decoder_buffer.Init(encoder_buffer.data(), encoder_buffer.size());
+ ASSERT_TRUE(decoder.DecodeMetadata(&decoder_buffer, &decoded_metadata));
+ CheckMetadatasAreEqual(metadata, decoded_metadata);
+ }
+
+ void TestEncodingGeometryMetadata() {
+ ASSERT_TRUE(
+ encoder.EncodeGeometryMetadata(&encoder_buffer, &geometry_metadata));
+
+ draco::GeometryMetadata decoded_metadata;
+ decoder_buffer.Init(encoder_buffer.data(), encoder_buffer.size());
+ ASSERT_TRUE(
+ decoder.DecodeGeometryMetadata(&decoder_buffer, &decoded_metadata));
+ CheckGeometryMetadatasAreEqual(geometry_metadata, decoded_metadata);
+ }
+
+ void CheckBlobOfDataAreEqual(const std::vector<uint8_t> &data0,
+ const std::vector<uint8_t> &data1) {
+ ASSERT_EQ(data0.size(), data1.size());
+ for (int i = 0; i < data0.size(); ++i) {
+ ASSERT_EQ(data0[i], data1[i]);
+ }
+ }
+
+ void CheckGeometryMetadatasAreEqual(
+ const draco::GeometryMetadata &metadata0,
+ const draco::GeometryMetadata &metadata1) {
+ ASSERT_EQ(metadata0.attribute_metadatas().size(),
+ metadata1.attribute_metadatas().size());
+ const std::vector<std::unique_ptr<draco::AttributeMetadata>>
+ &att_metadatas0 = metadata0.attribute_metadatas();
+ const std::vector<std::unique_ptr<draco::AttributeMetadata>>
+ &att_metadatas1 = metadata1.attribute_metadatas();
+ // Compare each attribute metadata.
+ for (int i = 0; i < metadata0.attribute_metadatas().size(); ++i) {
+ CheckMetadatasAreEqual(
+ static_cast<const draco::Metadata &>(*att_metadatas0[i]),
+ static_cast<const draco::Metadata &>(*att_metadatas1[i]));
+ }
+ // Compare entries and sub metadata.
+ CheckMetadatasAreEqual(static_cast<const draco::Metadata &>(metadata0),
+ static_cast<const draco::Metadata &>(metadata1));
+ }
+
+ void CheckMetadatasAreEqual(const draco::Metadata &metadata0,
+ const draco::Metadata &metadata1) {
+ ASSERT_EQ(metadata0.num_entries(), metadata1.num_entries());
+ const std::map<std::string, draco::EntryValue> &entries0 =
+ metadata0.entries();
+ const std::map<std::string, draco::EntryValue> &entries1 =
+ metadata1.entries();
+ for (const auto &entry : entries0) {
+ const std::string &entry_name = entry.first;
+ const std::vector<uint8_t> &data0 = entry.second.data();
+ const auto entry1_ptr = entries1.find(entry_name);
+ ASSERT_NE(entry1_ptr, entries1.end());
+ const std::vector<uint8_t> &data1 = entry1_ptr->second.data();
+ CheckBlobOfDataAreEqual(data0, data1);
+ }
+ // Check nested metadata.
+ ASSERT_EQ(metadata0.sub_metadatas().size(),
+ metadata1.sub_metadatas().size());
+ const std::map<std::string, std::unique_ptr<draco::Metadata>>
+ &sub_metadatas0 = metadata0.sub_metadatas();
+ // Encode each sub-metadata
+ for (auto &&sub_metadata_entry0 : sub_metadatas0) {
+ const auto sub_metadata_ptr1 =
+ metadata1.GetSubMetadata(sub_metadata_entry0.first);
+ ASSERT_NE(sub_metadata_ptr1, nullptr);
+ CheckMetadatasAreEqual(*sub_metadata_entry0.second, *sub_metadata_ptr1);
+ }
+ }
+
+ draco::MetadataEncoder encoder;
+ draco::MetadataDecoder decoder;
+ draco::EncoderBuffer encoder_buffer;
+ draco::DecoderBuffer decoder_buffer;
+ draco::Metadata metadata;
+ draco::GeometryMetadata geometry_metadata;
+};
+
+TEST_F(MetadataEncoderTest, TestSingleEntry) {
+ metadata.AddEntryInt("int", 100);
+ ASSERT_EQ(metadata.num_entries(), 1);
+
+ TestEncodingMetadata();
+}
+
+TEST_F(MetadataEncoderTest, TestMultipleEntries) {
+ metadata.AddEntryInt("int", 100);
+ metadata.AddEntryDouble("double", 1.234);
+ const std::string entry_value = "test string entry";
+ metadata.AddEntryString("string", entry_value);
+ ASSERT_EQ(metadata.num_entries(), 3);
+
+ TestEncodingMetadata();
+}
+
+TEST_F(MetadataEncoderTest, TestEncodingArrayEntries) {
+ std::vector<int32_t> int_array({1, 2, 3});
+ metadata.AddEntryIntArray("int_array", int_array);
+ std::vector<double> double_array({0.1, 0.2, 0.3});
+ metadata.AddEntryDoubleArray("double_array", double_array);
+ ASSERT_EQ(metadata.num_entries(), 2);
+
+ TestEncodingMetadata();
+}
+
+TEST_F(MetadataEncoderTest, TestEncodingBinaryEntry) {
+ const std::vector<uint8_t> binarydata({0x1, 0x2, 0x3, 0x4});
+ metadata.AddEntryBinary("binary_data", binarydata);
+
+ TestEncodingMetadata();
+}
+
+TEST_F(MetadataEncoderTest, TestEncodingNestedMetadata) {
+ metadata.AddEntryDouble("double", 1.234);
+ std::unique_ptr<draco::Metadata> sub_metadata =
+ std::unique_ptr<draco::Metadata>(new draco::Metadata());
+ sub_metadata->AddEntryInt("int", 100);
+ metadata.AddSubMetadata("sub0", std::move(sub_metadata));
+
+ TestEncodingMetadata();
+}
+
+TEST_F(MetadataEncoderTest, TestEncodingGeometryMetadata) {
+ std::unique_ptr<draco::AttributeMetadata> att_metadata =
+ std::unique_ptr<draco::AttributeMetadata>(new draco::AttributeMetadata);
+ att_metadata->AddEntryInt("int", 100);
+ att_metadata->AddEntryString("name", "pos");
+ ASSERT_TRUE(geometry_metadata.AddAttributeMetadata(std::move(att_metadata)));
+
+ TestEncodingGeometryMetadata();
+}
+} // namespace
diff --git a/libs/assimp/contrib/draco/src/draco/metadata/metadata_test.cc b/libs/assimp/contrib/draco/src/draco/metadata/metadata_test.cc
new file mode 100644
index 0000000..cf7ae6e
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/metadata/metadata_test.cc
@@ -0,0 +1,157 @@
+// Copyright 2017 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/metadata/metadata.h"
+
+#include <memory>
+#include <string>
+
+#include "draco/core/draco_test_base.h"
+#include "draco/metadata/geometry_metadata.h"
+
+namespace {
+
+class MetadataTest : public ::testing::Test {
+ protected:
+ MetadataTest() {}
+
+ draco::Metadata metadata;
+ draco::GeometryMetadata geometry_metadata;
+};
+
+TEST_F(MetadataTest, TestRemoveEntry) {
+ metadata.AddEntryInt("int", 100);
+ metadata.RemoveEntry("int");
+ int32_t int_value = 0;
+ ASSERT_FALSE(metadata.GetEntryInt("int", &int_value));
+}
+
+TEST_F(MetadataTest, TestSingleEntry) {
+ metadata.AddEntryInt("int", 100);
+ int32_t int_value = 0;
+ ASSERT_TRUE(metadata.GetEntryInt("int", &int_value));
+ ASSERT_EQ(int_value, 100);
+
+ metadata.AddEntryDouble("double", 1.234);
+ double double_value = 0.0;
+ ASSERT_TRUE(metadata.GetEntryDouble("double", &double_value));
+ ASSERT_EQ(double_value, 1.234);
+}
+
+TEST_F(MetadataTest, TestWriteOverEntry) {
+ metadata.AddEntryInt("int", 100);
+ metadata.AddEntryInt("int", 200);
+ int32_t int_value = 0;
+ ASSERT_TRUE(metadata.GetEntryInt("int", &int_value));
+ ASSERT_EQ(int_value, 200);
+}
+
+TEST_F(MetadataTest, TestArrayEntry) {
+ std::vector<int32_t> int_array({1, 2, 3});
+ metadata.AddEntryIntArray("int_array", int_array);
+ std::vector<int32_t> return_int_array;
+ ASSERT_TRUE(metadata.GetEntryIntArray("int_array", &return_int_array));
+ ASSERT_EQ(return_int_array.size(), 3);
+ ASSERT_EQ(return_int_array[0], 1);
+ ASSERT_EQ(return_int_array[1], 2);
+ ASSERT_EQ(return_int_array[2], 3);
+
+ std::vector<double> double_array({0.1, 0.2, 0.3});
+ metadata.AddEntryDoubleArray("double_array", double_array);
+ std::vector<double> return_double_array;
+ ASSERT_TRUE(
+ metadata.GetEntryDoubleArray("double_array", &return_double_array));
+ ASSERT_EQ(return_double_array.size(), 3);
+ ASSERT_EQ(return_double_array[0], 0.1);
+ ASSERT_EQ(return_double_array[1], 0.2);
+ ASSERT_EQ(return_double_array[2], 0.3);
+}
+
+TEST_F(MetadataTest, TestStringEntry) {
+ const std::string entry_value = "test string entry";
+ metadata.AddEntryString("string", entry_value);
+ std::string return_value;
+ ASSERT_TRUE(metadata.GetEntryString("string", &return_value));
+ ASSERT_EQ(entry_value.size(), return_value.size());
+ ASSERT_EQ(entry_value, return_value);
+}
+
+TEST_F(MetadataTest, TestBinaryEntry) {
+ const std::vector<uint8_t> binarydata({0x1, 0x2, 0x3, 0x4});
+ metadata.AddEntryBinary("binary_data", binarydata);
+ std::vector<uint8_t> return_binarydata;
+ ASSERT_TRUE(metadata.GetEntryBinary("binary_data", &return_binarydata));
+ ASSERT_EQ(binarydata.size(), return_binarydata.size());
+ for (int i = 0; i < binarydata.size(); ++i) {
+ ASSERT_EQ(binarydata[i], return_binarydata[i]);
+ }
+}
+
+TEST_F(MetadataTest, TestNestedMetadata) {
+ std::unique_ptr<draco::Metadata> sub_metadata =
+ std::unique_ptr<draco::Metadata>(new draco::Metadata());
+ sub_metadata->AddEntryInt("int", 100);
+
+ metadata.AddSubMetadata("sub0", std::move(sub_metadata));
+ const auto sub_metadata_ptr = metadata.GetSubMetadata("sub0");
+ ASSERT_NE(sub_metadata_ptr, nullptr);
+
+ int32_t int_value = 0;
+ ASSERT_TRUE(sub_metadata_ptr->GetEntryInt("int", &int_value));
+ ASSERT_EQ(int_value, 100);
+}
+
+TEST_F(MetadataTest, TestHardCopyMetadata) {
+ metadata.AddEntryInt("int", 100);
+ std::unique_ptr<draco::Metadata> sub_metadata =
+ std::unique_ptr<draco::Metadata>(new draco::Metadata());
+ sub_metadata->AddEntryInt("int", 200);
+ metadata.AddSubMetadata("sub0", std::move(sub_metadata));
+
+ draco::Metadata copied_metadata(metadata);
+
+ int32_t int_value = 0;
+ ASSERT_TRUE(copied_metadata.GetEntryInt("int", &int_value));
+ ASSERT_EQ(int_value, 100);
+
+ const auto sub_metadata_ptr = copied_metadata.GetSubMetadata("sub0");
+ ASSERT_NE(sub_metadata_ptr, nullptr);
+
+ int32_t sub_int_value = 0;
+ ASSERT_TRUE(sub_metadata_ptr->GetEntryInt("int", &sub_int_value));
+ ASSERT_EQ(sub_int_value, 200);
+}
+
+TEST_F(MetadataTest, TestGeometryMetadata) {
+ std::unique_ptr<draco::AttributeMetadata> att_metadata =
+ std::unique_ptr<draco::AttributeMetadata>(new draco::AttributeMetadata());
+ att_metadata->set_att_unique_id(10);
+ att_metadata->AddEntryInt("int", 100);
+ att_metadata->AddEntryString("name", "pos");
+
+ ASSERT_FALSE(geometry_metadata.AddAttributeMetadata(nullptr));
+ ASSERT_TRUE(geometry_metadata.AddAttributeMetadata(std::move(att_metadata)));
+
+ ASSERT_NE(geometry_metadata.GetAttributeMetadataByUniqueId(10), nullptr);
+ ASSERT_EQ(geometry_metadata.GetAttributeMetadataByUniqueId(1), nullptr);
+
+ const draco::AttributeMetadata *requested_att_metadata =
+ geometry_metadata.GetAttributeMetadataByStringEntry("name", "pos");
+ ASSERT_NE(requested_att_metadata, nullptr);
+ ASSERT_EQ(
+ geometry_metadata.GetAttributeMetadataByStringEntry("name", "not_exists"),
+ nullptr);
+}
+
+} // namespace
diff --git a/libs/assimp/contrib/draco/src/draco/point_cloud/point_cloud.cc b/libs/assimp/contrib/draco/src/draco/point_cloud/point_cloud.cc
new file mode 100644
index 0000000..a9f9ea2
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/point_cloud/point_cloud.cc
@@ -0,0 +1,275 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/point_cloud/point_cloud.h"
+
+#include <algorithm>
+#include <unordered_map>
+
+namespace draco {
+
+PointCloud::PointCloud() : num_points_(0) {}
+
+int32_t PointCloud::NumNamedAttributes(GeometryAttribute::Type type) const {
+ if (type == GeometryAttribute::INVALID ||
+ type >= GeometryAttribute::NAMED_ATTRIBUTES_COUNT) {
+ return 0;
+ }
+ return static_cast<int32_t>(named_attribute_index_[type].size());
+}
+
+int32_t PointCloud::GetNamedAttributeId(GeometryAttribute::Type type) const {
+ return GetNamedAttributeId(type, 0);
+}
+
+int32_t PointCloud::GetNamedAttributeId(GeometryAttribute::Type type,
+ int i) const {
+ if (NumNamedAttributes(type) <= i) {
+ return -1;
+ }
+ return named_attribute_index_[type][i];
+}
+
+const PointAttribute *PointCloud::GetNamedAttribute(
+ GeometryAttribute::Type type) const {
+ return GetNamedAttribute(type, 0);
+}
+
+const PointAttribute *PointCloud::GetNamedAttribute(
+ GeometryAttribute::Type type, int i) const {
+ const int32_t att_id = GetNamedAttributeId(type, i);
+ if (att_id == -1) {
+ return nullptr;
+ }
+ return attributes_[att_id].get();
+}
+
+const PointAttribute *PointCloud::GetNamedAttributeByUniqueId(
+ GeometryAttribute::Type type, uint32_t unique_id) const {
+ for (size_t att_id = 0; att_id < named_attribute_index_[type].size();
+ ++att_id) {
+ if (attributes_[named_attribute_index_[type][att_id]]->unique_id() ==
+ unique_id) {
+ return attributes_[named_attribute_index_[type][att_id]].get();
+ }
+ }
+ return nullptr;
+}
+
+const PointAttribute *PointCloud::GetAttributeByUniqueId(
+ uint32_t unique_id) const {
+ const int32_t att_id = GetAttributeIdByUniqueId(unique_id);
+ if (att_id == -1) {
+ return nullptr;
+ }
+ return attributes_[att_id].get();
+}
+
+int32_t PointCloud::GetAttributeIdByUniqueId(uint32_t unique_id) const {
+ for (size_t att_id = 0; att_id < attributes_.size(); ++att_id) {
+ if (attributes_[att_id]->unique_id() == unique_id) {
+ return static_cast<int32_t>(att_id);
+ }
+ }
+ return -1;
+}
+
+int PointCloud::AddAttribute(std::unique_ptr<PointAttribute> pa) {
+ SetAttribute(static_cast<int>(attributes_.size()), std::move(pa));
+ return static_cast<int>(attributes_.size() - 1);
+}
+
+int PointCloud::AddAttribute(
+ const GeometryAttribute &att, bool identity_mapping,
+ AttributeValueIndex::ValueType num_attribute_values) {
+ auto pa = CreateAttribute(att, identity_mapping, num_attribute_values);
+ if (!pa) {
+ return -1;
+ }
+ const int32_t att_id = AddAttribute(std::move(pa));
+ return att_id;
+}
+
+std::unique_ptr<PointAttribute> PointCloud::CreateAttribute(
+ const GeometryAttribute &att, bool identity_mapping,
+ AttributeValueIndex::ValueType num_attribute_values) const {
+ if (att.attribute_type() == GeometryAttribute::INVALID) {
+ return nullptr;
+ }
+ std::unique_ptr<PointAttribute> pa =
+ std::unique_ptr<PointAttribute>(new PointAttribute(att));
+ // Initialize point cloud specific attribute data.
+ if (!identity_mapping) {
+ // First create mapping between indices.
+ pa->SetExplicitMapping(num_points_);
+ } else {
+ pa->SetIdentityMapping();
+ num_attribute_values = std::max(num_points_, num_attribute_values);
+ }
+ if (num_attribute_values > 0) {
+ pa->Reset(num_attribute_values);
+ }
+ return pa;
+}
+
+void PointCloud::SetAttribute(int att_id, std::unique_ptr<PointAttribute> pa) {
+ DRACO_DCHECK(att_id >= 0);
+ if (static_cast<int>(attributes_.size()) <= att_id) {
+ attributes_.resize(att_id + 1);
+ }
+ if (pa->attribute_type() < GeometryAttribute::NAMED_ATTRIBUTES_COUNT) {
+ named_attribute_index_[pa->attribute_type()].push_back(att_id);
+ }
+ pa->set_unique_id(att_id);
+ attributes_[att_id] = std::move(pa);
+}
+
+void PointCloud::DeleteAttribute(int att_id) {
+ if (att_id < 0 || att_id >= attributes_.size()) {
+ return; // Attribute does not exist.
+ }
+ const GeometryAttribute::Type att_type =
+ attributes_[att_id]->attribute_type();
+ const uint32_t unique_id = attribute(att_id)->unique_id();
+ attributes_.erase(attributes_.begin() + att_id);
+ // Remove metadata if applicable.
+ if (metadata_) {
+ metadata_->DeleteAttributeMetadataByUniqueId(unique_id);
+ }
+
+ // Remove the attribute from the named attribute list if applicable.
+ if (att_type < GeometryAttribute::NAMED_ATTRIBUTES_COUNT) {
+ const auto it = std::find(named_attribute_index_[att_type].begin(),
+ named_attribute_index_[att_type].end(), att_id);
+ if (it != named_attribute_index_[att_type].end()) {
+ named_attribute_index_[att_type].erase(it);
+ }
+ }
+
+ // Update ids of all subsequent named attributes (decrease them by one).
+ for (int i = 0; i < GeometryAttribute::NAMED_ATTRIBUTES_COUNT; ++i) {
+ for (int j = 0; j < named_attribute_index_[i].size(); ++j) {
+ if (named_attribute_index_[i][j] > att_id) {
+ named_attribute_index_[i][j]--;
+ }
+ }
+ }
+}
+
+#ifdef DRACO_ATTRIBUTE_INDICES_DEDUPLICATION_SUPPORTED
+void PointCloud::DeduplicatePointIds() {
+ // Hashing function for a single vertex.
+ auto point_hash = [this](PointIndex p) {
+ PointIndex::ValueType hash = 0;
+ for (int32_t i = 0; i < this->num_attributes(); ++i) {
+ const AttributeValueIndex att_id = attribute(i)->mapped_index(p);
+ hash = static_cast<uint32_t>(HashCombine(att_id.value(), hash));
+ }
+ return hash;
+ };
+ // Comparison function between two vertices.
+ auto point_compare = [this](PointIndex p0, PointIndex p1) {
+ for (int32_t i = 0; i < this->num_attributes(); ++i) {
+ const AttributeValueIndex att_id0 = attribute(i)->mapped_index(p0);
+ const AttributeValueIndex att_id1 = attribute(i)->mapped_index(p1);
+ if (att_id0 != att_id1) {
+ return false;
+ }
+ }
+ return true;
+ };
+
+ std::unordered_map<PointIndex, PointIndex, decltype(point_hash),
+ decltype(point_compare)>
+ unique_point_map(num_points_, point_hash, point_compare);
+ int32_t num_unique_points = 0;
+ IndexTypeVector<PointIndex, PointIndex> index_map(num_points_);
+ std::vector<PointIndex> unique_points;
+ // Go through all vertices and find their duplicates.
+ for (PointIndex i(0); i < num_points_; ++i) {
+ const auto it = unique_point_map.find(i);
+ if (it != unique_point_map.end()) {
+ index_map[i] = it->second;
+ } else {
+ unique_point_map.insert(std::make_pair(i, PointIndex(num_unique_points)));
+ index_map[i] = num_unique_points++;
+ unique_points.push_back(i);
+ }
+ }
+ if (num_unique_points == num_points_) {
+ return; // All vertices are already unique.
+ }
+
+ ApplyPointIdDeduplication(index_map, unique_points);
+ set_num_points(num_unique_points);
+}
+
+void PointCloud::ApplyPointIdDeduplication(
+ const IndexTypeVector<PointIndex, PointIndex> &id_map,
+ const std::vector<PointIndex> &unique_point_ids) {
+ int32_t num_unique_points = 0;
+ for (PointIndex i : unique_point_ids) {
+ const PointIndex new_point_id = id_map[i];
+ if (new_point_id >= num_unique_points) {
+ // New unique vertex reached. Copy attribute indices to the proper
+ // position.
+ for (int32_t a = 0; a < num_attributes(); ++a) {
+ attribute(a)->SetPointMapEntry(new_point_id,
+ attribute(a)->mapped_index(i));
+ }
+ num_unique_points = new_point_id.value() + 1;
+ }
+ }
+ for (int32_t a = 0; a < num_attributes(); ++a) {
+ attribute(a)->SetExplicitMapping(num_unique_points);
+ }
+}
+#endif
+
+#ifdef DRACO_ATTRIBUTE_VALUES_DEDUPLICATION_SUPPORTED
+bool PointCloud::DeduplicateAttributeValues() {
+ // Go over all attributes and create mapping between duplicate entries.
+ if (num_points() == 0) {
+ return true; // Nothing to deduplicate.
+ }
+ // Deduplicate all attributes.
+ for (int32_t att_id = 0; att_id < num_attributes(); ++att_id) {
+ if (!attribute(att_id)->DeduplicateValues(*attribute(att_id))) {
+ return false;
+ }
+ }
+ return true;
+}
+#endif
+
+// TODO(xiaoxumeng): Consider to cash the BBox.
+BoundingBox PointCloud::ComputeBoundingBox() const {
+ BoundingBox bounding_box;
+ auto pc_att = GetNamedAttribute(GeometryAttribute::POSITION);
+ // TODO(xiaoxumeng): Make the BoundingBox a template type, it may not be easy
+ // because PointCloud is not a template.
+ // Or simply add some preconditioning here to make sure the position attribute
+ // is valid, because the current code works only if the position attribute is
+ // defined with 3 components of DT_FLOAT32.
+ // Consider using pc_att->ConvertValue<float, 3>(i, &p[0]) (Enforced
+ // transformation from Vector with any dimension to Vector3f)
+ Vector3f p;
+ for (AttributeValueIndex i(0); i < static_cast<uint32_t>(pc_att->size());
+ ++i) {
+ pc_att->GetValue(i, &p[0]);
+ bounding_box.Update(p);
+ }
+ return bounding_box;
+}
+} // namespace draco
diff --git a/libs/assimp/contrib/draco/src/draco/point_cloud/point_cloud.h b/libs/assimp/contrib/draco/src/draco/point_cloud/point_cloud.h
new file mode 100644
index 0000000..d11bd47
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/point_cloud/point_cloud.h
@@ -0,0 +1,244 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_POINT_CLOUD_POINT_CLOUD_H_
+#define DRACO_POINT_CLOUD_POINT_CLOUD_H_
+
+#include "draco/attributes/point_attribute.h"
+#include "draco/core/bounding_box.h"
+#include "draco/core/vector_d.h"
+#include "draco/draco_features.h"
+#include "draco/metadata/geometry_metadata.h"
+
+namespace draco {
+
+// PointCloud is a collection of n-dimensional points that are described by a
+// set of PointAttributes that can represent data such as positions or colors
+// of individual points (see point_attribute.h).
+class PointCloud {
+ public:
+ PointCloud();
+ virtual ~PointCloud() = default;
+
+ // Returns the number of named attributes of a given type.
+ int32_t NumNamedAttributes(GeometryAttribute::Type type) const;
+
+ // Returns attribute id of the first named attribute with a given type or -1
+ // when the attribute is not used by the point cloud.
+ int32_t GetNamedAttributeId(GeometryAttribute::Type type) const;
+
+ // Returns the id of the i-th named attribute of a given type.
+ int32_t GetNamedAttributeId(GeometryAttribute::Type type, int i) const;
+
+ // Returns the first named attribute of a given type or nullptr if the
+ // attribute is not used by the point cloud.
+ const PointAttribute *GetNamedAttribute(GeometryAttribute::Type type) const;
+
+ // Returns the i-th named attribute of a given type.
+ const PointAttribute *GetNamedAttribute(GeometryAttribute::Type type,
+ int i) const;
+
+ // Returns the named attribute of a given unique id.
+ const PointAttribute *GetNamedAttributeByUniqueId(
+ GeometryAttribute::Type type, uint32_t id) const;
+
+ // Returns the attribute of a given unique id.
+ const PointAttribute *GetAttributeByUniqueId(uint32_t id) const;
+ int32_t GetAttributeIdByUniqueId(uint32_t unique_id) const;
+
+ int32_t num_attributes() const {
+ return static_cast<int32_t>(attributes_.size());
+ }
+ const PointAttribute *attribute(int32_t att_id) const {
+ DRACO_DCHECK_LE(0, att_id);
+ DRACO_DCHECK_LT(att_id, static_cast<int32_t>(attributes_.size()));
+ return attributes_[att_id].get();
+ }
+
+ // Returned attribute can be modified, but it's caller's responsibility to
+ // maintain the attribute's consistency with draco::PointCloud.
+ PointAttribute *attribute(int32_t att_id) {
+ DRACO_DCHECK_LE(0, att_id);
+ DRACO_DCHECK_LT(att_id, static_cast<int32_t>(attributes_.size()));
+ return attributes_[att_id].get();
+ }
+
+ // Adds a new attribute to the point cloud.
+ // Returns the attribute id.
+ int AddAttribute(std::unique_ptr<PointAttribute> pa);
+
+ // Creates and adds a new attribute to the point cloud. The attribute has
+ // properties derived from the provided GeometryAttribute |att|.
+ // If |identity_mapping| is set to true, the attribute will use identity
+ // mapping between point indices and attribute value indices (i.e., each
+ // point has a unique attribute value). If |identity_mapping| is false, the
+ // mapping between point indices and attribute value indices is set to
+ // explicit, and it needs to be initialized manually using the
+ // PointAttribute::SetPointMapEntry() method. |num_attribute_values| can be
+ // used to specify the number of attribute values that are going to be
+ // stored in the newly created attribute. Returns attribute id of the newly
+ // created attribute or -1 in case of failure.
+ int AddAttribute(const GeometryAttribute &att, bool identity_mapping,
+ AttributeValueIndex::ValueType num_attribute_values);
+
+ // Creates and returns a new attribute or nullptr in case of failure. This
+ // method is similar to AddAttribute(), except that it returns the new
+ // attribute instead of adding it to the point cloud.
+ std::unique_ptr<PointAttribute> CreateAttribute(
+ const GeometryAttribute &att, bool identity_mapping,
+ AttributeValueIndex::ValueType num_attribute_values) const;
+
+ // Assigns an attribute id to a given PointAttribute. If an attribute with
+ // the same attribute id already exists, it is deleted.
+ virtual void SetAttribute(int att_id, std::unique_ptr<PointAttribute> pa);
+
+ // Deletes an attribute with specified attribute id. Note that this changes
+ // attribute ids of all subsequent attributes.
+ virtual void DeleteAttribute(int att_id);
+
+#ifdef DRACO_ATTRIBUTE_VALUES_DEDUPLICATION_SUPPORTED
+ // Deduplicates all attribute values (all attribute entries with the same
+ // value are merged into a single entry).
+ virtual bool DeduplicateAttributeValues();
+#endif
+
+#ifdef DRACO_ATTRIBUTE_INDICES_DEDUPLICATION_SUPPORTED
+ // Removes duplicate point ids (two point ids are duplicate when all of their
+ // attributes are mapped to the same entry ids).
+ virtual void DeduplicatePointIds();
+#endif
+
+ // Get bounding box.
+ BoundingBox ComputeBoundingBox() const;
+
+ // Add metadata.
+ void AddMetadata(std::unique_ptr<GeometryMetadata> metadata) {
+ metadata_ = std::move(metadata);
+ }
+
+ // Add metadata for an attribute.
+ void AddAttributeMetadata(int32_t att_id,
+ std::unique_ptr<AttributeMetadata> metadata) {
+ if (!metadata_) {
+ metadata_ = std::unique_ptr<GeometryMetadata>(new GeometryMetadata());
+ }
+ const int32_t att_unique_id = attribute(att_id)->unique_id();
+ metadata->set_att_unique_id(att_unique_id);
+ metadata_->AddAttributeMetadata(std::move(metadata));
+ }
+
+ const AttributeMetadata *GetAttributeMetadataByAttributeId(
+ int32_t att_id) const {
+ if (metadata_ == nullptr) {
+ return nullptr;
+ }
+ const uint32_t unique_id = attribute(att_id)->unique_id();
+ return metadata_->GetAttributeMetadataByUniqueId(unique_id);
+ }
+
+ // Returns the attribute metadata that has the requested metadata entry.
+ const AttributeMetadata *GetAttributeMetadataByStringEntry(
+ const std::string &name, const std::string &value) const {
+ if (metadata_ == nullptr) {
+ return nullptr;
+ }
+ return metadata_->GetAttributeMetadataByStringEntry(name, value);
+ }
+
+ // Returns the first attribute that has the requested metadata entry.
+ int GetAttributeIdByMetadataEntry(const std::string &name,
+ const std::string &value) const {
+ if (metadata_ == nullptr) {
+ return -1;
+ }
+ const AttributeMetadata *att_metadata =
+ metadata_->GetAttributeMetadataByStringEntry(name, value);
+ if (!att_metadata) {
+ return -1;
+ }
+ return GetAttributeIdByUniqueId(att_metadata->att_unique_id());
+ }
+
+ // Get a const pointer of the metadata of the point cloud.
+ const GeometryMetadata *GetMetadata() const { return metadata_.get(); }
+
+ // Get a pointer to the metadata of the point cloud.
+ GeometryMetadata *metadata() { return metadata_.get(); }
+
+ // Returns the number of n-dimensional points stored within the point cloud.
+ PointIndex::ValueType num_points() const { return num_points_; }
+
+ // Sets the number of points. It's the caller's responsibility to ensure the
+ // new number is valid with respect to the PointAttributes stored in the point
+ // cloud.
+ void set_num_points(PointIndex::ValueType num) { num_points_ = num; }
+
+ protected:
+#ifdef DRACO_ATTRIBUTE_INDICES_DEDUPLICATION_SUPPORTED
+ // Applies id mapping of deduplicated points (called by DeduplicatePointIds).
+ virtual void ApplyPointIdDeduplication(
+ const IndexTypeVector<PointIndex, PointIndex> &id_map,
+ const std::vector<PointIndex> &unique_point_ids);
+#endif
+
+ private:
+ // Metadata for the point cloud.
+ std::unique_ptr<GeometryMetadata> metadata_;
+
+ // Attributes describing the point cloud.
+ std::vector<std::unique_ptr<PointAttribute>> attributes_;
+
+ // Ids of named attributes of the given type.
+ std::vector<int32_t>
+ named_attribute_index_[GeometryAttribute::NAMED_ATTRIBUTES_COUNT];
+
+ // The number of n-dimensional points. All point attribute values are stored
+ // in corresponding PointAttribute instances in the |attributes_| array.
+ PointIndex::ValueType num_points_;
+
+ friend struct PointCloudHasher;
+};
+
+// Functor for computing a hash from data stored within a point cloud.
+// Note that this can be quite slow. Two point clouds will have the same hash
+// only when all points have the same order and when all attribute values are
+// exactly the same.
+struct PointCloudHasher {
+ size_t operator()(const PointCloud &pc) const {
+ size_t hash = pc.num_points_;
+ hash = HashCombine(pc.attributes_.size(), hash);
+ for (int i = 0; i < GeometryAttribute::NAMED_ATTRIBUTES_COUNT; ++i) {
+ hash = HashCombine(pc.named_attribute_index_[i].size(), hash);
+ for (int j = 0; j < static_cast<int>(pc.named_attribute_index_[i].size());
+ ++j) {
+ hash = HashCombine(pc.named_attribute_index_[i][j], hash);
+ }
+ }
+ // Hash attributes.
+ for (int i = 0; i < static_cast<int>(pc.attributes_.size()); ++i) {
+ PointAttributeHasher att_hasher;
+ hash = HashCombine(att_hasher(*pc.attributes_[i]), hash);
+ }
+ // Hash metadata.
+ GeometryMetadataHasher metadata_hasher;
+ if (pc.metadata_) {
+ hash = HashCombine(metadata_hasher(*pc.metadata_), hash);
+ }
+ return hash;
+ }
+};
+
+} // namespace draco
+
+#endif // DRACO_POINT_CLOUD_POINT_CLOUD_H_
diff --git a/libs/assimp/contrib/draco/src/draco/point_cloud/point_cloud_builder.cc b/libs/assimp/contrib/draco/src/draco/point_cloud/point_cloud_builder.cc
new file mode 100644
index 0000000..431ae50
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/point_cloud/point_cloud_builder.cc
@@ -0,0 +1,76 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/point_cloud/point_cloud_builder.h"
+
+namespace draco {
+
+PointCloudBuilder::PointCloudBuilder() {}
+
+void PointCloudBuilder::Start(PointIndex::ValueType num_points) {
+ point_cloud_ = std::unique_ptr<PointCloud>(new PointCloud());
+ point_cloud_->set_num_points(num_points);
+}
+
+int PointCloudBuilder::AddAttribute(GeometryAttribute::Type attribute_type,
+ int8_t num_components, DataType data_type) {
+ GeometryAttribute ga;
+ ga.Init(attribute_type, nullptr, num_components, data_type, false,
+ DataTypeLength(data_type) * num_components, 0);
+ return point_cloud_->AddAttribute(ga, true, point_cloud_->num_points());
+}
+
+void PointCloudBuilder::SetAttributeValueForPoint(int att_id,
+ PointIndex point_index,
+ const void *attribute_value) {
+ PointAttribute *const att = point_cloud_->attribute(att_id);
+ att->SetAttributeValue(att->mapped_index(point_index), attribute_value);
+}
+
+void PointCloudBuilder::SetAttributeValuesForAllPoints(
+ int att_id, const void *attribute_values, int stride) {
+ PointAttribute *const att = point_cloud_->attribute(att_id);
+ const int data_stride =
+ DataTypeLength(att->data_type()) * att->num_components();
+ if (stride == 0) {
+ stride = data_stride;
+ }
+ if (stride == data_stride) {
+ // Fast copy path.
+ att->buffer()->Write(0, attribute_values,
+ point_cloud_->num_points() * data_stride);
+ } else {
+ // Copy attribute entries one by one.
+ for (PointIndex i(0); i < point_cloud_->num_points(); ++i) {
+ att->SetAttributeValue(
+ att->mapped_index(i),
+ static_cast<const uint8_t *>(attribute_values) + stride * i.value());
+ }
+ }
+}
+
+std::unique_ptr<PointCloud> PointCloudBuilder::Finalize(
+ bool deduplicate_points) {
+ if (deduplicate_points) {
+#ifdef DRACO_ATTRIBUTE_VALUES_DEDUPLICATION_SUPPORTED
+ point_cloud_->DeduplicateAttributeValues();
+#endif
+#ifdef DRACO_ATTRIBUTE_INDICES_DEDUPLICATION_SUPPORTED
+ point_cloud_->DeduplicatePointIds();
+#endif
+ }
+ return std::move(point_cloud_);
+}
+
+} // namespace draco
diff --git a/libs/assimp/contrib/draco/src/draco/point_cloud/point_cloud_builder.h b/libs/assimp/contrib/draco/src/draco/point_cloud/point_cloud_builder.h
new file mode 100644
index 0000000..cf55a72
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/point_cloud/point_cloud_builder.h
@@ -0,0 +1,80 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_POINT_CLOUD_POINT_CLOUD_BUILDER_H_
+#define DRACO_POINT_CLOUD_POINT_CLOUD_BUILDER_H_
+
+#include "draco/point_cloud/point_cloud.h"
+
+namespace draco {
+
+// A helper class for constructing PointCloud instances from other data sources.
+// Usage:
+// PointCloudBuilder builder;
+// // Initialize the builder for a given number of points (required).
+// builder.Start(num_points);
+// // Specify desired attributes.
+// int pos_att_id =
+// builder.AddAttribute(GeometryAttribute::POSITION, 3, DT_FLOAT32);
+// // Add attribute values.
+// for (PointIndex i(0); i < num_points; ++i) {
+// builder.SetAttributeValueForPoint(pos_att_id, i, input_pos[i.value()]);
+// }
+// // Get the final PointCloud.
+// constexpr bool deduplicate_points = false;
+// std::unique_ptr<PointCloud> pc = builder.Finalize(deduplicate_points);
+
+class PointCloudBuilder {
+ public:
+ PointCloudBuilder();
+
+ // Starts collecting point cloud data.
+ // The behavior of other functions is undefined before this method is called.
+ void Start(PointIndex::ValueType num_points);
+
+ int AddAttribute(GeometryAttribute::Type attribute_type,
+ int8_t num_components, DataType data_type);
+
+ // Sets attribute value for a specific point.
+ // |attribute_value| must contain data in the format specified by the
+ // AddAttribute method.
+ void SetAttributeValueForPoint(int att_id, PointIndex point_index,
+ const void *attribute_value);
+
+ // Sets attribute values for all points. All the values must be stored in the
+ // input |attribute_values| buffer. |stride| can be used to define the byte
+ // offset between two consecutive attribute values. If |stride| is set to 0,
+ // the stride is automatically computed based on the format of the given
+ // attribute.
+ void SetAttributeValuesForAllPoints(int att_id, const void *attribute_values,
+ int stride);
+
+ // Finalizes the PointCloud or returns nullptr on error.
+ // If |deduplicate_points| is set to true, the following happens:
+ // 1. Attribute values with duplicate entries are deduplicated.
+ // 2. Point ids that are mapped to the same attribute values are
+ // deduplicated.
+ // Therefore, if |deduplicate_points| is true the final PointCloud can have
+ // a different number of point from the value specified in the Start method.
+ // Once this function is called, the builder becomes invalid and cannot be
+ // used until the method Start() is called again.
+ std::unique_ptr<PointCloud> Finalize(bool deduplicate_points);
+
+ private:
+ std::unique_ptr<PointCloud> point_cloud_;
+};
+
+} // namespace draco
+
+#endif // DRACO_POINT_CLOUD_POINT_CLOUD_BUILDER_H_
diff --git a/libs/assimp/contrib/draco/src/draco/point_cloud/point_cloud_builder_test.cc b/libs/assimp/contrib/draco/src/draco/point_cloud/point_cloud_builder_test.cc
new file mode 100644
index 0000000..3222a4c
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/point_cloud/point_cloud_builder_test.cc
@@ -0,0 +1,171 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/point_cloud/point_cloud_builder.h"
+
+#include "draco/core/draco_test_base.h"
+
+namespace draco {
+
+class PointCloudBuilderTest : public ::testing::Test {
+ protected:
+ // Test data.
+ // clang-format off
+ std::vector<float> pos_data_ = {10.f, 0.f, 1.f,
+ 11.f, 1.f, 2.f,
+ 12.f, 2.f, 8.f,
+ 13.f, 4.f, 7.f,
+ 14.f, 5.f, 6.f,
+ 15.f, 6.f, 5.f,
+ 16.f, 1.f, 3.f,
+ 17.f, 1.f, 2.f,
+ 11.f, 1.f, 2.f,
+ 10.f, 0.f, 1.f};
+ std::vector<int16_t> intensity_data_ = {100,
+ 200,
+ 500,
+ 700,
+ 400,
+ 400,
+ 400,
+ 100,
+ 100,
+ 100};
+ // clang-format on
+};
+
+TEST_F(PointCloudBuilderTest, IndividualTest_NoDedup) {
+ // This test verifies that PointCloudBuilder can construct point cloud using
+ // SetAttributeValueForPoint API without deduplication.
+ PointCloudBuilder builder;
+ builder.Start(10);
+ const int pos_att_id =
+ builder.AddAttribute(GeometryAttribute::POSITION, 3, DT_FLOAT32);
+ const int intensity_att_id =
+ builder.AddAttribute(GeometryAttribute::GENERIC, 1, DT_INT16);
+ for (PointIndex i(0); i < 10; ++i) {
+ builder.SetAttributeValueForPoint(pos_att_id, i,
+ pos_data_.data() + 3 * i.value());
+ builder.SetAttributeValueForPoint(intensity_att_id, i,
+ intensity_data_.data() + i.value());
+ }
+ std::unique_ptr<PointCloud> res = builder.Finalize(false);
+ ASSERT_TRUE(res != nullptr);
+ ASSERT_EQ(res->num_points(), 10);
+}
+
+TEST_F(PointCloudBuilderTest, IndividualTest_Dedup) {
+ // This test verifies that PointCloudBuilder can construct point cloud using
+ // SetAttributeValueForPoint API with deduplication.
+ PointCloudBuilder builder;
+ builder.Start(10);
+ const int pos_att_id =
+ builder.AddAttribute(GeometryAttribute::POSITION, 3, DT_FLOAT32);
+ const int intensity_att_id =
+ builder.AddAttribute(GeometryAttribute::GENERIC, 1, DT_INT16);
+ for (PointIndex i(0); i < 10; ++i) {
+ builder.SetAttributeValueForPoint(pos_att_id, i,
+ pos_data_.data() + 3 * i.value());
+ builder.SetAttributeValueForPoint(intensity_att_id, i,
+ intensity_data_.data() + i.value());
+ }
+ std::unique_ptr<PointCloud> res = builder.Finalize(true);
+ ASSERT_TRUE(res != nullptr);
+ ASSERT_EQ(res->num_points(), 9);
+}
+
+TEST_F(PointCloudBuilderTest, BatchTest) {
+ // This test verifies that PointCloudBuilder can construct point cloud using
+ // SetAttributeValuesForAllPoints API.
+ PointCloudBuilder builder;
+ builder.Start(10);
+ const int pos_att_id =
+ builder.AddAttribute(GeometryAttribute::POSITION, 3, DT_FLOAT32);
+ const int intensity_att_id =
+ builder.AddAttribute(GeometryAttribute::GENERIC, 1, DT_INT16);
+ builder.SetAttributeValuesForAllPoints(pos_att_id, pos_data_.data(), 0);
+ builder.SetAttributeValuesForAllPoints(intensity_att_id,
+ intensity_data_.data(), 0);
+ std::unique_ptr<PointCloud> res = builder.Finalize(false);
+ ASSERT_TRUE(res != nullptr);
+ ASSERT_EQ(res->num_points(), 10);
+ for (PointIndex i(0); i < 10; ++i) {
+ float pos_val[3];
+ res->attribute(pos_att_id)->GetMappedValue(i, pos_val);
+ for (int c = 0; c < 3; ++c) {
+ ASSERT_EQ(pos_val[c], pos_data_[3 * i.value() + c]);
+ }
+ int16_t int_val;
+ res->attribute(intensity_att_id)->GetMappedValue(i, &int_val);
+ ASSERT_EQ(intensity_data_[i.value()], int_val);
+ }
+}
+
+TEST_F(PointCloudBuilderTest, MultiUse) {
+ // This test verifies that PointCloudBuilder can be used multiple times
+ PointCloudBuilder builder;
+ {
+ builder.Start(10);
+ const int pos_att_id =
+ builder.AddAttribute(GeometryAttribute::POSITION, 3, DT_FLOAT32);
+ const int intensity_att_id =
+ builder.AddAttribute(GeometryAttribute::GENERIC, 1, DT_INT16);
+ builder.SetAttributeValuesForAllPoints(pos_att_id, pos_data_.data(), 0);
+ builder.SetAttributeValuesForAllPoints(intensity_att_id,
+ intensity_data_.data(), 0);
+ std::unique_ptr<PointCloud> res = builder.Finalize(false);
+ ASSERT_TRUE(res != nullptr);
+ ASSERT_EQ(res->num_points(), 10);
+ for (PointIndex i(0); i < 10; ++i) {
+ float pos_val[3];
+ res->attribute(pos_att_id)->GetMappedValue(i, pos_val);
+ for (int c = 0; c < 3; ++c) {
+ ASSERT_EQ(pos_val[c], pos_data_[3 * i.value() + c]);
+ }
+ int16_t int_val;
+ res->attribute(intensity_att_id)->GetMappedValue(i, &int_val);
+ ASSERT_EQ(intensity_data_[i.value()], int_val);
+ }
+ }
+
+ {
+ // Use only a sub-set of data (offsetted to avoid possible reuse of old
+ // data).
+ builder.Start(4);
+ const int pos_att_id =
+ builder.AddAttribute(GeometryAttribute::POSITION, 3, DT_FLOAT32);
+ const int intensity_att_id =
+ builder.AddAttribute(GeometryAttribute::GENERIC, 1, DT_INT16);
+ constexpr int offset = 5;
+ builder.SetAttributeValuesForAllPoints(pos_att_id,
+ pos_data_.data() + 3 * offset, 0);
+ builder.SetAttributeValuesForAllPoints(intensity_att_id,
+ intensity_data_.data() + offset, 0);
+ std::unique_ptr<PointCloud> res = builder.Finalize(false);
+ ASSERT_TRUE(res != nullptr);
+ ASSERT_EQ(res->num_points(), 4);
+ for (PointIndex i(0); i < 4; ++i) {
+ float pos_val[3];
+ res->attribute(pos_att_id)->GetMappedValue(i, pos_val);
+ for (int c = 0; c < 3; ++c) {
+ ASSERT_EQ(pos_val[c], pos_data_[3 * (i.value() + offset) + c]);
+ }
+ int16_t int_val;
+ res->attribute(intensity_att_id)->GetMappedValue(i, &int_val);
+ ASSERT_EQ(intensity_data_[i.value() + offset], int_val);
+ }
+ }
+}
+
+} // namespace draco
diff --git a/libs/assimp/contrib/draco/src/draco/point_cloud/point_cloud_test.cc b/libs/assimp/contrib/draco/src/draco/point_cloud/point_cloud_test.cc
new file mode 100644
index 0000000..4e94603
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/point_cloud/point_cloud_test.cc
@@ -0,0 +1,132 @@
+// Copyright 2017 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/point_cloud/point_cloud.h"
+
+#include "draco/core/draco_test_base.h"
+#include "draco/core/draco_test_utils.h"
+#include "draco/metadata/geometry_metadata.h"
+
+namespace {
+
+class PointCloudTest : public ::testing::Test {
+ protected:
+ PointCloudTest() {}
+};
+
+TEST_F(PointCloudTest, TestAttributeDeletion) {
+ draco::PointCloud pc;
+ // Test whether we can correctly delete an attribute from a point cloud.
+ // Create some attributes for the point cloud.
+ draco::GeometryAttribute pos_att;
+ pos_att.Init(draco::GeometryAttribute::POSITION, nullptr, 3,
+ draco::DT_FLOAT32, false, 12, 0);
+ draco::GeometryAttribute norm_att;
+ norm_att.Init(draco::GeometryAttribute::NORMAL, nullptr, 3, draco::DT_FLOAT32,
+ false, 12, 0);
+ draco::GeometryAttribute gen_att;
+ gen_att.Init(draco::GeometryAttribute::GENERIC, nullptr, 3, draco::DT_FLOAT32,
+ false, 12, 0);
+
+ // Add one position, two normal and two generic attributes.
+ pc.AddAttribute(pos_att, false, 0);
+ pc.AddAttribute(gen_att, false, 0);
+ pc.AddAttribute(norm_att, false, 0);
+ pc.AddAttribute(gen_att, false, 0);
+ pc.AddAttribute(norm_att, false, 0);
+
+ ASSERT_EQ(pc.num_attributes(), 5);
+ ASSERT_EQ(pc.attribute(0)->attribute_type(),
+ draco::GeometryAttribute::POSITION);
+ ASSERT_EQ(pc.attribute(3)->attribute_type(),
+ draco::GeometryAttribute::GENERIC);
+
+ // Delete generic attribute.
+ pc.DeleteAttribute(1);
+ ASSERT_EQ(pc.num_attributes(), 4);
+ ASSERT_EQ(pc.attribute(1)->attribute_type(),
+ draco::GeometryAttribute::NORMAL);
+ ASSERT_EQ(pc.NumNamedAttributes(draco::GeometryAttribute::NORMAL), 2);
+ ASSERT_EQ(pc.GetNamedAttributeId(draco::GeometryAttribute::NORMAL, 1), 3);
+
+ // Delete the first normal attribute.
+ pc.DeleteAttribute(1);
+ ASSERT_EQ(pc.num_attributes(), 3);
+ ASSERT_EQ(pc.attribute(1)->attribute_type(),
+ draco::GeometryAttribute::GENERIC);
+ ASSERT_EQ(pc.NumNamedAttributes(draco::GeometryAttribute::NORMAL), 1);
+ ASSERT_EQ(pc.GetNamedAttributeId(draco::GeometryAttribute::NORMAL, 0), 2);
+}
+
+TEST_F(PointCloudTest, TestPointCloudWithMetadata) {
+ draco::PointCloud pc;
+ std::unique_ptr<draco::GeometryMetadata> metadata =
+ std::unique_ptr<draco::GeometryMetadata>(new draco::GeometryMetadata());
+
+ // Add a position attribute metadata.
+ draco::GeometryAttribute pos_att;
+ pos_att.Init(draco::GeometryAttribute::POSITION, nullptr, 3,
+ draco::DT_FLOAT32, false, 12, 0);
+ const uint32_t pos_att_id = pc.AddAttribute(pos_att, false, 0);
+ ASSERT_EQ(pos_att_id, 0);
+ std::unique_ptr<draco::AttributeMetadata> pos_metadata =
+ std::unique_ptr<draco::AttributeMetadata>(new draco::AttributeMetadata());
+ pos_metadata->AddEntryString("name", "position");
+ pc.AddAttributeMetadata(pos_att_id, std::move(pos_metadata));
+ const draco::GeometryMetadata *pc_metadata = pc.GetMetadata();
+ ASSERT_NE(pc_metadata, nullptr);
+ // Add a generic material attribute metadata.
+ draco::GeometryAttribute material_att;
+ material_att.Init(draco::GeometryAttribute::GENERIC, nullptr, 3,
+ draco::DT_FLOAT32, false, 12, 0);
+ const uint32_t material_att_id = pc.AddAttribute(material_att, false, 0);
+ ASSERT_EQ(material_att_id, 1);
+ std::unique_ptr<draco::AttributeMetadata> material_metadata =
+ std::unique_ptr<draco::AttributeMetadata>(new draco::AttributeMetadata());
+ material_metadata->AddEntryString("name", "material");
+ // The material attribute has id of 1 now.
+ pc.AddAttributeMetadata(material_att_id, std::move(material_metadata));
+
+ // Test if the attribute metadata is correctly added.
+ const draco::AttributeMetadata *requested_pos_metadata =
+ pc.GetAttributeMetadataByStringEntry("name", "position");
+ ASSERT_NE(requested_pos_metadata, nullptr);
+ const draco::AttributeMetadata *requested_mat_metadata =
+ pc.GetAttributeMetadataByStringEntry("name", "material");
+ ASSERT_NE(requested_mat_metadata, nullptr);
+
+ // Attribute id should be preserved.
+ ASSERT_EQ(
+ pc.GetAttributeIdByUniqueId(requested_pos_metadata->att_unique_id()), 0);
+ ASSERT_EQ(
+ pc.GetAttributeIdByUniqueId(requested_mat_metadata->att_unique_id()), 1);
+
+ // Test deleting attribute with metadata.
+ pc.DeleteAttribute(pos_att_id);
+ ASSERT_EQ(pc.GetAttributeMetadataByStringEntry("name", "position"), nullptr);
+
+ requested_mat_metadata =
+ pc.GetAttributeMetadataByStringEntry("name", "material");
+ // The unique id should not be changed.
+ ASSERT_EQ(requested_mat_metadata->att_unique_id(), 1);
+ // Now position attribute is removed, material attribute should have the
+ // attribute id of 0.
+ ASSERT_EQ(
+ pc.GetAttributeIdByUniqueId(requested_mat_metadata->att_unique_id()), 0);
+ // Should be able to get metadata using the current attribute id.
+ // Attribute id of material attribute is changed from 1 to 0.
+ ASSERT_NE(pc.GetAttributeMetadataByAttributeId(0), nullptr);
+}
+
+} // namespace
diff --git a/libs/assimp/contrib/draco/src/draco/tools/draco_decoder.cc b/libs/assimp/contrib/draco/src/draco/tools/draco_decoder.cc
new file mode 100644
index 0000000..610709d
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/tools/draco_decoder.cc
@@ -0,0 +1,168 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include <cinttypes>
+
+#include "draco/compression/decode.h"
+#include "draco/core/cycle_timer.h"
+#include "draco/io/file_utils.h"
+#include "draco/io/obj_encoder.h"
+#include "draco/io/parser_utils.h"
+#include "draco/io/ply_encoder.h"
+
+namespace {
+
+struct Options {
+ Options();
+
+ std::string input;
+ std::string output;
+};
+
+Options::Options() {}
+
+void Usage() {
+ printf("Usage: draco_decoder [options] -i input\n");
+ printf("\n");
+ printf("Main options:\n");
+ printf(" -h | -? show help.\n");
+ printf(" -o <output> output file name.\n");
+}
+
+int ReturnError(const draco::Status &status) {
+ printf("Failed to decode the input file %s\n", status.error_msg());
+ return -1;
+}
+
+} // namespace
+
+int main(int argc, char **argv) {
+ Options options;
+ const int argc_check = argc - 1;
+
+ for (int i = 1; i < argc; ++i) {
+ if (!strcmp("-h", argv[i]) || !strcmp("-?", argv[i])) {
+ Usage();
+ return 0;
+ } else if (!strcmp("-i", argv[i]) && i < argc_check) {
+ options.input = argv[++i];
+ } else if (!strcmp("-o", argv[i]) && i < argc_check) {
+ options.output = argv[++i];
+ }
+ }
+ if (argc < 3 || options.input.empty()) {
+ Usage();
+ return -1;
+ }
+
+ std::vector<char> data;
+ if (!draco::ReadFileToBuffer(options.input, &data)) {
+ printf("Failed opening the input file.\n");
+ return -1;
+ }
+
+ if (data.empty()) {
+ printf("Empty input file.\n");
+ return -1;
+ }
+
+ // Create a draco decoding buffer. Note that no data is copied in this step.
+ draco::DecoderBuffer buffer;
+ buffer.Init(data.data(), data.size());
+
+ draco::CycleTimer timer;
+ // Decode the input data into a geometry.
+ std::unique_ptr<draco::PointCloud> pc;
+ draco::Mesh *mesh = nullptr;
+ auto type_statusor = draco::Decoder::GetEncodedGeometryType(&buffer);
+ if (!type_statusor.ok()) {
+ return ReturnError(type_statusor.status());
+ }
+ const draco::EncodedGeometryType geom_type = type_statusor.value();
+ if (geom_type == draco::TRIANGULAR_MESH) {
+ timer.Start();
+ draco::Decoder decoder;
+ auto statusor = decoder.DecodeMeshFromBuffer(&buffer);
+ if (!statusor.ok()) {
+ return ReturnError(statusor.status());
+ }
+ std::unique_ptr<draco::Mesh> in_mesh = std::move(statusor).value();
+ timer.Stop();
+ if (in_mesh) {
+ mesh = in_mesh.get();
+ pc = std::move(in_mesh);
+ }
+ } else if (geom_type == draco::POINT_CLOUD) {
+ // Failed to decode it as mesh, so let's try to decode it as a point cloud.
+ timer.Start();
+ draco::Decoder decoder;
+ auto statusor = decoder.DecodePointCloudFromBuffer(&buffer);
+ if (!statusor.ok()) {
+ return ReturnError(statusor.status());
+ }
+ pc = std::move(statusor).value();
+ timer.Stop();
+ }
+
+ if (pc == nullptr) {
+ printf("Failed to decode the input file.\n");
+ return -1;
+ }
+
+ if (options.output.empty()) {
+ // Save the output model into a ply file.
+ options.output = options.input + ".ply";
+ }
+
+ // Save the decoded geometry into a file.
+ // TODO(fgalligan): Change extension code to look for '.'.
+ const std::string extension = draco::parser::ToLower(
+ options.output.size() >= 4
+ ? options.output.substr(options.output.size() - 4)
+ : options.output);
+
+ if (extension == ".obj") {
+ draco::ObjEncoder obj_encoder;
+ if (mesh) {
+ if (!obj_encoder.EncodeToFile(*mesh, options.output)) {
+ printf("Failed to store the decoded mesh as OBJ.\n");
+ return -1;
+ }
+ } else {
+ if (!obj_encoder.EncodeToFile(*pc.get(), options.output)) {
+ printf("Failed to store the decoded point cloud as OBJ.\n");
+ return -1;
+ }
+ }
+ } else if (extension == ".ply") {
+ draco::PlyEncoder ply_encoder;
+ if (mesh) {
+ if (!ply_encoder.EncodeToFile(*mesh, options.output)) {
+ printf("Failed to store the decoded mesh as PLY.\n");
+ return -1;
+ }
+ } else {
+ if (!ply_encoder.EncodeToFile(*pc.get(), options.output)) {
+ printf("Failed to store the decoded point cloud as PLY.\n");
+ return -1;
+ }
+ }
+ } else {
+ printf("Invalid extension of the output file. Use either .ply or .obj.\n");
+ return -1;
+ }
+ printf("Decoded geometry saved to %s (%" PRId64 " ms to decode)\n",
+ options.output.c_str(), timer.GetInMs());
+ return 0;
+}
diff --git a/libs/assimp/contrib/draco/src/draco/tools/draco_encoder.cc b/libs/assimp/contrib/draco/src/draco/tools/draco_encoder.cc
new file mode 100644
index 0000000..7e3632d
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/tools/draco_encoder.cc
@@ -0,0 +1,369 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include <cinttypes>
+#include <cstdlib>
+
+#include "draco/compression/encode.h"
+#include "draco/core/cycle_timer.h"
+#include "draco/io/file_utils.h"
+#include "draco/io/mesh_io.h"
+#include "draco/io/point_cloud_io.h"
+
+namespace {
+
+struct Options {
+ Options();
+
+ bool is_point_cloud;
+ int pos_quantization_bits;
+ int tex_coords_quantization_bits;
+ bool tex_coords_deleted;
+ int normals_quantization_bits;
+ bool normals_deleted;
+ int generic_quantization_bits;
+ bool generic_deleted;
+ int compression_level;
+ bool use_metadata;
+ std::string input;
+ std::string output;
+};
+
+Options::Options()
+ : is_point_cloud(false),
+ pos_quantization_bits(11),
+ tex_coords_quantization_bits(10),
+ tex_coords_deleted(false),
+ normals_quantization_bits(8),
+ normals_deleted(false),
+ generic_quantization_bits(8),
+ generic_deleted(false),
+ compression_level(7),
+ use_metadata(false) {}
+
+void Usage() {
+ printf("Usage: draco_encoder [options] -i input\n");
+ printf("\n");
+ printf("Main options:\n");
+ printf(" -h | -? show help.\n");
+ printf(" -i <input> input file name.\n");
+ printf(" -o <output> output file name.\n");
+ printf(
+ " -point_cloud forces the input to be encoded as a point "
+ "cloud.\n");
+ printf(
+ " -qp <value> quantization bits for the position "
+ "attribute, default=11.\n");
+ printf(
+ " -qt <value> quantization bits for the texture coordinate "
+ "attribute, default=10.\n");
+ printf(
+ " -qn <value> quantization bits for the normal vector "
+ "attribute, default=8.\n");
+ printf(
+ " -qg <value> quantization bits for any generic attribute, "
+ "default=8.\n");
+ printf(
+ " -cl <value> compression level [0-10], most=10, least=0, "
+ "default=7.\n");
+ printf(
+ " --skip ATTRIBUTE_NAME skip a given attribute (NORMAL, TEX_COORD, "
+ "GENERIC)\n");
+ printf(
+ " --metadata use metadata to encode extra information in "
+ "mesh files.\n");
+ printf(
+ "\nUse negative quantization values to skip the specified attribute\n");
+}
+
+int StringToInt(const std::string &s) {
+ char *end;
+ return strtol(s.c_str(), &end, 10); // NOLINT
+}
+
+void PrintOptions(const draco::PointCloud &pc, const Options &options) {
+ printf("Encoder options:\n");
+ printf(" Compression level = %d\n", options.compression_level);
+ if (options.pos_quantization_bits == 0) {
+ printf(" Positions: No quantization\n");
+ } else {
+ printf(" Positions: Quantization = %d bits\n",
+ options.pos_quantization_bits);
+ }
+
+ if (pc.GetNamedAttributeId(draco::GeometryAttribute::TEX_COORD) >= 0) {
+ if (options.tex_coords_quantization_bits == 0) {
+ printf(" Texture coordinates: No quantization\n");
+ } else {
+ printf(" Texture coordinates: Quantization = %d bits\n",
+ options.tex_coords_quantization_bits);
+ }
+ } else if (options.tex_coords_deleted) {
+ printf(" Texture coordinates: Skipped\n");
+ }
+
+ if (pc.GetNamedAttributeId(draco::GeometryAttribute::NORMAL) >= 0) {
+ if (options.normals_quantization_bits == 0) {
+ printf(" Normals: No quantization\n");
+ } else {
+ printf(" Normals: Quantization = %d bits\n",
+ options.normals_quantization_bits);
+ }
+ } else if (options.normals_deleted) {
+ printf(" Normals: Skipped\n");
+ }
+
+ if (pc.GetNamedAttributeId(draco::GeometryAttribute::GENERIC) >= 0) {
+ if (options.generic_quantization_bits == 0) {
+ printf(" Generic: No quantization\n");
+ } else {
+ printf(" Generic: Quantization = %d bits\n",
+ options.generic_quantization_bits);
+ }
+ } else if (options.generic_deleted) {
+ printf(" Generic: Skipped\n");
+ }
+ printf("\n");
+}
+
+int EncodePointCloudToFile(const draco::PointCloud &pc, const std::string &file,
+ draco::Encoder *encoder) {
+ draco::CycleTimer timer;
+ // Encode the geometry.
+ draco::EncoderBuffer buffer;
+ timer.Start();
+ const draco::Status status = encoder->EncodePointCloudToBuffer(pc, &buffer);
+ if (!status.ok()) {
+ printf("Failed to encode the point cloud.\n");
+ printf("%s\n", status.error_msg());
+ return -1;
+ }
+ timer.Stop();
+ // Save the encoded geometry into a file.
+ if (!draco::WriteBufferToFile(buffer.data(), buffer.size(), file)) {
+ printf("Failed to write the output file.\n");
+ return -1;
+ }
+ printf("Encoded point cloud saved to %s (%" PRId64 " ms to encode).\n",
+ file.c_str(), timer.GetInMs());
+ printf("\nEncoded size = %zu bytes\n\n", buffer.size());
+ return 0;
+}
+
+int EncodeMeshToFile(const draco::Mesh &mesh, const std::string &file,
+ draco::Encoder *encoder) {
+ draco::CycleTimer timer;
+ // Encode the geometry.
+ draco::EncoderBuffer buffer;
+ timer.Start();
+ const draco::Status status = encoder->EncodeMeshToBuffer(mesh, &buffer);
+ if (!status.ok()) {
+ printf("Failed to encode the mesh.\n");
+ printf("%s\n", status.error_msg());
+ return -1;
+ }
+ timer.Stop();
+ // Save the encoded geometry into a file.
+ if (!draco::WriteBufferToFile(buffer.data(), buffer.size(), file)) {
+ printf("Failed to create the output file.\n");
+ return -1;
+ }
+ printf("Encoded mesh saved to %s (%" PRId64 " ms to encode).\n", file.c_str(),
+ timer.GetInMs());
+ printf("\nEncoded size = %zu bytes\n\n", buffer.size());
+ return 0;
+}
+
+} // anonymous namespace
+
+int main(int argc, char **argv) {
+ Options options;
+ const int argc_check = argc - 1;
+
+ for (int i = 1; i < argc; ++i) {
+ if (!strcmp("-h", argv[i]) || !strcmp("-?", argv[i])) {
+ Usage();
+ return 0;
+ } else if (!strcmp("-i", argv[i]) && i < argc_check) {
+ options.input = argv[++i];
+ } else if (!strcmp("-o", argv[i]) && i < argc_check) {
+ options.output = argv[++i];
+ } else if (!strcmp("-point_cloud", argv[i])) {
+ options.is_point_cloud = true;
+ } else if (!strcmp("-qp", argv[i]) && i < argc_check) {
+ options.pos_quantization_bits = StringToInt(argv[++i]);
+ if (options.pos_quantization_bits > 30) {
+ printf(
+ "Error: The maximum number of quantization bits for the position "
+ "attribute is 30.\n");
+ return -1;
+ }
+ } else if (!strcmp("-qt", argv[i]) && i < argc_check) {
+ options.tex_coords_quantization_bits = StringToInt(argv[++i]);
+ if (options.tex_coords_quantization_bits > 30) {
+ printf(
+ "Error: The maximum number of quantization bits for the texture "
+ "coordinate attribute is 30.\n");
+ return -1;
+ }
+ } else if (!strcmp("-qn", argv[i]) && i < argc_check) {
+ options.normals_quantization_bits = StringToInt(argv[++i]);
+ if (options.normals_quantization_bits > 30) {
+ printf(
+ "Error: The maximum number of quantization bits for the normal "
+ "attribute is 30.\n");
+ return -1;
+ }
+ } else if (!strcmp("-qg", argv[i]) && i < argc_check) {
+ options.generic_quantization_bits = StringToInt(argv[++i]);
+ if (options.generic_quantization_bits > 30) {
+ printf(
+ "Error: The maximum number of quantization bits for generic "
+ "attributes is 30.\n");
+ return -1;
+ }
+ } else if (!strcmp("-cl", argv[i]) && i < argc_check) {
+ options.compression_level = StringToInt(argv[++i]);
+ } else if (!strcmp("--skip", argv[i]) && i < argc_check) {
+ if (!strcmp("NORMAL", argv[i + 1])) {
+ options.normals_quantization_bits = -1;
+ } else if (!strcmp("TEX_COORD", argv[i + 1])) {
+ options.tex_coords_quantization_bits = -1;
+ } else if (!strcmp("GENERIC", argv[i + 1])) {
+ options.generic_quantization_bits = -1;
+ } else {
+ printf("Error: Invalid attribute name after --skip\n");
+ return -1;
+ }
+ ++i;
+ } else if (!strcmp("--metadata", argv[i])) {
+ options.use_metadata = true;
+ }
+ }
+ if (argc < 3 || options.input.empty()) {
+ Usage();
+ return -1;
+ }
+
+ std::unique_ptr<draco::PointCloud> pc;
+ draco::Mesh *mesh = nullptr;
+ if (!options.is_point_cloud) {
+ auto maybe_mesh =
+ draco::ReadMeshFromFile(options.input, options.use_metadata);
+ if (!maybe_mesh.ok()) {
+ printf("Failed loading the input mesh: %s.\n",
+ maybe_mesh.status().error_msg());
+ return -1;
+ }
+ mesh = maybe_mesh.value().get();
+ pc = std::move(maybe_mesh).value();
+ } else {
+ auto maybe_pc = draco::ReadPointCloudFromFile(options.input);
+ if (!maybe_pc.ok()) {
+ printf("Failed loading the input point cloud: %s.\n",
+ maybe_pc.status().error_msg());
+ return -1;
+ }
+ pc = std::move(maybe_pc).value();
+ }
+
+ if (options.pos_quantization_bits < 0) {
+ printf("Error: Position attribute cannot be skipped.\n");
+ return -1;
+ }
+
+ // Delete attributes if needed. This needs to happen before we set any
+ // quantization settings.
+ if (options.tex_coords_quantization_bits < 0) {
+ if (pc->NumNamedAttributes(draco::GeometryAttribute::TEX_COORD) > 0) {
+ options.tex_coords_deleted = true;
+ }
+ while (pc->NumNamedAttributes(draco::GeometryAttribute::TEX_COORD) > 0) {
+ pc->DeleteAttribute(
+ pc->GetNamedAttributeId(draco::GeometryAttribute::TEX_COORD, 0));
+ }
+ }
+ if (options.normals_quantization_bits < 0) {
+ if (pc->NumNamedAttributes(draco::GeometryAttribute::NORMAL) > 0) {
+ options.normals_deleted = true;
+ }
+ while (pc->NumNamedAttributes(draco::GeometryAttribute::NORMAL) > 0) {
+ pc->DeleteAttribute(
+ pc->GetNamedAttributeId(draco::GeometryAttribute::NORMAL, 0));
+ }
+ }
+ if (options.generic_quantization_bits < 0) {
+ if (pc->NumNamedAttributes(draco::GeometryAttribute::GENERIC) > 0) {
+ options.generic_deleted = true;
+ }
+ while (pc->NumNamedAttributes(draco::GeometryAttribute::GENERIC) > 0) {
+ pc->DeleteAttribute(
+ pc->GetNamedAttributeId(draco::GeometryAttribute::GENERIC, 0));
+ }
+ }
+#ifdef DRACO_ATTRIBUTE_INDICES_DEDUPLICATION_SUPPORTED
+ // If any attribute has been deleted, run deduplication of point indices again
+ // as some points can be possibly combined.
+ if (options.tex_coords_deleted || options.normals_deleted ||
+ options.generic_deleted) {
+ pc->DeduplicatePointIds();
+ }
+#endif
+
+ // Convert compression level to speed (that 0 = slowest, 10 = fastest).
+ const int speed = 10 - options.compression_level;
+
+ draco::Encoder encoder;
+
+ // Setup encoder options.
+ if (options.pos_quantization_bits > 0) {
+ encoder.SetAttributeQuantization(draco::GeometryAttribute::POSITION,
+ options.pos_quantization_bits);
+ }
+ if (options.tex_coords_quantization_bits > 0) {
+ encoder.SetAttributeQuantization(draco::GeometryAttribute::TEX_COORD,
+ options.tex_coords_quantization_bits);
+ }
+ if (options.normals_quantization_bits > 0) {
+ encoder.SetAttributeQuantization(draco::GeometryAttribute::NORMAL,
+ options.normals_quantization_bits);
+ }
+ if (options.generic_quantization_bits > 0) {
+ encoder.SetAttributeQuantization(draco::GeometryAttribute::GENERIC,
+ options.generic_quantization_bits);
+ }
+ encoder.SetSpeedOptions(speed, speed);
+
+ if (options.output.empty()) {
+ // Create a default output file by attaching .drc to the input file name.
+ options.output = options.input + ".drc";
+ }
+
+ PrintOptions(*pc.get(), options);
+
+ int ret = -1;
+ const bool input_is_mesh = mesh && mesh->num_faces() > 0;
+ if (input_is_mesh)
+ ret = EncodeMeshToFile(*mesh, options.output, &encoder);
+ else
+ ret = EncodePointCloudToFile(*pc.get(), options.output, &encoder);
+
+ if (ret != -1 && options.compression_level < 10) {
+ printf(
+ "For better compression, increase the compression level up to '-cl 10' "
+ ".\n\n");
+ }
+
+ return ret;
+}
diff --git a/libs/assimp/contrib/draco/src/draco/tools/fuzz/build.sh b/libs/assimp/contrib/draco/src/draco/tools/fuzz/build.sh
new file mode 100644
index 0000000..bbeb105
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/tools/fuzz/build.sh
@@ -0,0 +1,35 @@
+#!/bin/bash -eu
+# Copyright 2020 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+################################################################################
+
+# build project
+cmake $SRC/draco
+# The draco_decoder and draco_encoder binaries don't build nicely with OSS-Fuzz
+# options, so just build the Draco shared libraries.
+make -j$(nproc) draco
+
+# build fuzzers
+for fuzzer in $(find $SRC/draco/src/draco/tools/fuzz -name '*.cc'); do
+ fuzzer_basename=$(basename -s .cc $fuzzer)
+ $CXX $CXXFLAGS \
+ -I $SRC/ \
+ -I $SRC/draco/src \
+ -I $WORK/ \
+ $LIB_FUZZING_ENGINE \
+ $fuzzer \
+ $WORK/libdraco.a \
+ -o $OUT/$fuzzer_basename
+done
diff --git a/libs/assimp/contrib/draco/src/draco/tools/fuzz/draco_mesh_decoder_fuzzer.cc b/libs/assimp/contrib/draco/src/draco/tools/fuzz/draco_mesh_decoder_fuzzer.cc
new file mode 100644
index 0000000..9a50836
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/tools/fuzz/draco_mesh_decoder_fuzzer.cc
@@ -0,0 +1,29 @@
+// Copyright 2020 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <fuzzer/FuzzedDataProvider.h>
+
+#include "draco/src/draco/compression/decode.h"
+#include "draco/src/draco/core/decoder_buffer.h"
+#include "draco/src/draco/mesh/mesh.h"
+
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) {
+ draco::DecoderBuffer buffer;
+ buffer.Init(reinterpret_cast<const char *>(data), size);
+
+ draco::Decoder decoder;
+ decoder.DecodeMeshFromBuffer(&buffer);
+
+ return 0;
+}
diff --git a/libs/assimp/contrib/draco/src/draco/tools/fuzz/draco_mesh_decoder_without_dequantization_fuzzer.cc b/libs/assimp/contrib/draco/src/draco/tools/fuzz/draco_mesh_decoder_without_dequantization_fuzzer.cc
new file mode 100644
index 0000000..4c612cc
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/tools/fuzz/draco_mesh_decoder_without_dequantization_fuzzer.cc
@@ -0,0 +1,30 @@
+// Copyright 2020 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <fuzzer/FuzzedDataProvider.h>
+
+#include "draco/src/draco/compression/decode.h"
+#include "draco/src/draco/core/decoder_buffer.h"
+#include "draco/src/draco/mesh/mesh.h"
+
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) {
+ draco::DecoderBuffer buffer;
+ buffer.Init(reinterpret_cast<const char *>(data), size);
+
+ draco::Decoder decoder;
+ decoder.SetSkipAttributeTransform(draco::GeometryAttribute::POSITION);
+ decoder.DecodeMeshFromBuffer(&buffer);
+
+ return 0;
+}
diff --git a/libs/assimp/contrib/draco/src/draco/tools/fuzz/draco_pc_decoder_fuzzer.cc b/libs/assimp/contrib/draco/src/draco/tools/fuzz/draco_pc_decoder_fuzzer.cc
new file mode 100644
index 0000000..3a764f1
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/tools/fuzz/draco_pc_decoder_fuzzer.cc
@@ -0,0 +1,29 @@
+// Copyright 2020 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <fuzzer/FuzzedDataProvider.h>
+
+#include "draco/src/draco/compression/decode.h"
+#include "draco/src/draco/core/decoder_buffer.h"
+#include "draco/src/draco/point_cloud/point_cloud.h"
+
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) {
+ draco::DecoderBuffer buffer;
+ buffer.Init(reinterpret_cast<const char *>(data), size);
+
+ draco::Decoder decoder;
+ decoder.DecodePointCloudFromBuffer(&buffer);
+
+ return 0;
+}
diff --git a/libs/assimp/contrib/draco/src/draco/tools/fuzz/draco_pc_decoder_without_dequantization_fuzzer.cc b/libs/assimp/contrib/draco/src/draco/tools/fuzz/draco_pc_decoder_without_dequantization_fuzzer.cc
new file mode 100644
index 0000000..1d0c539
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/tools/fuzz/draco_pc_decoder_without_dequantization_fuzzer.cc
@@ -0,0 +1,30 @@
+// Copyright 2020 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <fuzzer/FuzzedDataProvider.h>
+
+#include "draco/src/draco/compression/decode.h"
+#include "draco/src/draco/core/decoder_buffer.h"
+#include "draco/src/draco/point_cloud/point_cloud.h"
+
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) {
+ draco::DecoderBuffer buffer;
+ buffer.Init(reinterpret_cast<const char *>(data), size);
+
+ draco::Decoder decoder;
+ decoder.SetSkipAttributeTransform(draco::GeometryAttribute::POSITION);
+ decoder.DecodePointCloudFromBuffer(&buffer);
+
+ return 0;
+}
diff --git a/libs/assimp/contrib/draco/src/draco/unity/draco_unity_plugin.cc b/libs/assimp/contrib/draco/src/draco/unity/draco_unity_plugin.cc
new file mode 100644
index 0000000..e80279b
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/unity/draco_unity_plugin.cc
@@ -0,0 +1,407 @@
+// Copyright 2017 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/unity/draco_unity_plugin.h"
+
+#ifdef DRACO_UNITY_PLUGIN
+
+namespace {
+// Returns a DracoAttribute from a PointAttribute.
+draco::DracoAttribute *CreateDracoAttribute(const draco::PointAttribute *attr) {
+ draco::DracoAttribute *const attribute = new draco::DracoAttribute();
+ attribute->attribute_type =
+ static_cast<draco::GeometryAttribute::Type>(attr->attribute_type());
+ attribute->data_type = static_cast<draco::DataType>(attr->data_type());
+ attribute->num_components = attr->num_components();
+ attribute->unique_id = attr->unique_id();
+ attribute->private_attribute = static_cast<const void *>(attr);
+ return attribute;
+}
+
+// Returns the attribute data in |attr| as an array of type T.
+template <typename T>
+T *CopyAttributeData(int num_points, const draco::PointAttribute *attr) {
+ const int num_components = attr->num_components();
+ T *const data = new T[num_points * num_components];
+
+ for (draco::PointIndex i(0); i < num_points; ++i) {
+ const draco::AttributeValueIndex val_index = attr->mapped_index(i);
+ bool got_data = false;
+ switch (num_components) {
+ case 1:
+ got_data = attr->ConvertValue<T, 1>(val_index,
+ data + i.value() * num_components);
+ break;
+ case 2:
+ got_data = attr->ConvertValue<T, 2>(val_index,
+ data + i.value() * num_components);
+ break;
+ case 3:
+ got_data = attr->ConvertValue<T, 3>(val_index,
+ data + i.value() * num_components);
+ break;
+ case 4:
+ got_data = attr->ConvertValue<T, 4>(val_index,
+ data + i.value() * num_components);
+ break;
+ default:
+ break;
+ }
+ if (!got_data) {
+ delete[] data;
+ return nullptr;
+ }
+ }
+
+ return data;
+}
+
+// Returns the attribute data in |attr| as an array of void*.
+void *ConvertAttributeData(int num_points, const draco::PointAttribute *attr) {
+ switch (attr->data_type()) {
+ case draco::DataType::DT_INT8:
+ return static_cast<void *>(CopyAttributeData<int8_t>(num_points, attr));
+ case draco::DataType::DT_UINT8:
+ return static_cast<void *>(CopyAttributeData<uint8_t>(num_points, attr));
+ case draco::DataType::DT_INT16:
+ return static_cast<void *>(CopyAttributeData<int16_t>(num_points, attr));
+ case draco::DataType::DT_UINT16:
+ return static_cast<void *>(CopyAttributeData<uint16_t>(num_points, attr));
+ case draco::DataType::DT_INT32:
+ return static_cast<void *>(CopyAttributeData<int32_t>(num_points, attr));
+ case draco::DataType::DT_UINT32:
+ return static_cast<void *>(CopyAttributeData<uint32_t>(num_points, attr));
+ case draco::DataType::DT_FLOAT32:
+ return static_cast<void *>(CopyAttributeData<float>(num_points, attr));
+ default:
+ return nullptr;
+ }
+}
+} // namespace
+
+namespace draco {
+
+void EXPORT_API ReleaseDracoMesh(DracoMesh **mesh_ptr) {
+ if (!mesh_ptr) {
+ return;
+ }
+ const DracoMesh *const mesh = *mesh_ptr;
+ if (!mesh) {
+ return;
+ }
+ const Mesh *const m = static_cast<const Mesh *>(mesh->private_mesh);
+ delete m;
+ delete mesh;
+ *mesh_ptr = nullptr;
+}
+
+void EXPORT_API ReleaseDracoAttribute(DracoAttribute **attr_ptr) {
+ if (!attr_ptr) {
+ return;
+ }
+ const DracoAttribute *const attr = *attr_ptr;
+ if (!attr) {
+ return;
+ }
+ delete attr;
+ *attr_ptr = nullptr;
+}
+
+void EXPORT_API ReleaseDracoData(DracoData **data_ptr) {
+ if (!data_ptr) {
+ return;
+ }
+ const DracoData *const data = *data_ptr;
+ switch (data->data_type) {
+ case draco::DataType::DT_INT8:
+ delete[] static_cast<int8_t *>(data->data);
+ break;
+ case draco::DataType::DT_UINT8:
+ delete[] static_cast<uint8_t *>(data->data);
+ break;
+ case draco::DataType::DT_INT16:
+ delete[] static_cast<int16_t *>(data->data);
+ break;
+ case draco::DataType::DT_UINT16:
+ delete[] static_cast<uint16_t *>(data->data);
+ break;
+ case draco::DataType::DT_INT32:
+ delete[] static_cast<int32_t *>(data->data);
+ break;
+ case draco::DataType::DT_UINT32:
+ delete[] static_cast<uint32_t *>(data->data);
+ break;
+ case draco::DataType::DT_FLOAT32:
+ delete[] static_cast<float *>(data->data);
+ break;
+ default:
+ break;
+ }
+ delete data;
+ *data_ptr = nullptr;
+}
+
+int EXPORT_API DecodeDracoMesh(char *data, unsigned int length,
+ DracoMesh **mesh) {
+ if (mesh == nullptr || *mesh != nullptr) {
+ return -1;
+ }
+ draco::DecoderBuffer buffer;
+ buffer.Init(data, length);
+ auto type_statusor = draco::Decoder::GetEncodedGeometryType(&buffer);
+ if (!type_statusor.ok()) {
+ // TODO(draco-eng): Use enum instead.
+ return -2;
+ }
+ const draco::EncodedGeometryType geom_type = type_statusor.value();
+ if (geom_type != draco::TRIANGULAR_MESH) {
+ return -3;
+ }
+
+ draco::Decoder decoder;
+ auto statusor = decoder.DecodeMeshFromBuffer(&buffer);
+ if (!statusor.ok()) {
+ return -4;
+ }
+ std::unique_ptr<draco::Mesh> in_mesh = std::move(statusor).value();
+
+ *mesh = new DracoMesh();
+ DracoMesh *const unity_mesh = *mesh;
+ unity_mesh->num_faces = in_mesh->num_faces();
+ unity_mesh->num_vertices = in_mesh->num_points();
+ unity_mesh->num_attributes = in_mesh->num_attributes();
+ unity_mesh->private_mesh = static_cast<void *>(in_mesh.release());
+
+ return unity_mesh->num_faces;
+}
+
+bool EXPORT_API GetAttribute(const DracoMesh *mesh, int index,
+ DracoAttribute **attribute) {
+ if (mesh == nullptr || attribute == nullptr || *attribute != nullptr) {
+ return false;
+ }
+ const Mesh *const m = static_cast<const Mesh *>(mesh->private_mesh);
+ const PointAttribute *const attr = m->attribute(index);
+ if (attr == nullptr) {
+ return false;
+ }
+
+ *attribute = CreateDracoAttribute(attr);
+ return true;
+}
+
+bool EXPORT_API GetAttributeByType(const DracoMesh *mesh,
+ GeometryAttribute::Type type, int index,
+ DracoAttribute **attribute) {
+ if (mesh == nullptr || attribute == nullptr || *attribute != nullptr) {
+ return false;
+ }
+ const Mesh *const m = static_cast<const Mesh *>(mesh->private_mesh);
+ GeometryAttribute::Type att_type = static_cast<GeometryAttribute::Type>(type);
+ const PointAttribute *const attr = m->GetNamedAttribute(att_type, index);
+ if (attr == nullptr) {
+ return false;
+ }
+ *attribute = CreateDracoAttribute(attr);
+ return true;
+}
+
+bool EXPORT_API GetAttributeByUniqueId(const DracoMesh *mesh, int unique_id,
+ DracoAttribute **attribute) {
+ if (mesh == nullptr || attribute == nullptr || *attribute != nullptr) {
+ return false;
+ }
+ const Mesh *const m = static_cast<const Mesh *>(mesh->private_mesh);
+ const PointAttribute *const attr = m->GetAttributeByUniqueId(unique_id);
+ if (attr == nullptr) {
+ return false;
+ }
+ *attribute = CreateDracoAttribute(attr);
+ return true;
+}
+
+bool EXPORT_API GetMeshIndices(const DracoMesh *mesh, DracoData **indices) {
+ if (mesh == nullptr || indices == nullptr || *indices != nullptr) {
+ return false;
+ }
+ const Mesh *const m = static_cast<const Mesh *>(mesh->private_mesh);
+ int *const temp_indices = new int[m->num_faces() * 3];
+ for (draco::FaceIndex face_id(0); face_id < m->num_faces(); ++face_id) {
+ const Mesh::Face &face = m->face(draco::FaceIndex(face_id));
+ memcpy(temp_indices + face_id.value() * 3,
+ reinterpret_cast<const int *>(face.data()), sizeof(int) * 3);
+ }
+ DracoData *const draco_data = new DracoData();
+ draco_data->data = temp_indices;
+ draco_data->data_type = DT_INT32;
+ *indices = draco_data;
+ return true;
+}
+
+bool EXPORT_API GetAttributeData(const DracoMesh *mesh,
+ const DracoAttribute *attribute,
+ DracoData **data) {
+ if (mesh == nullptr || data == nullptr || *data != nullptr) {
+ return false;
+ }
+ const Mesh *const m = static_cast<const Mesh *>(mesh->private_mesh);
+ const PointAttribute *const attr =
+ static_cast<const PointAttribute *>(attribute->private_attribute);
+
+ void *temp_data = ConvertAttributeData(m->num_points(), attr);
+ if (temp_data == nullptr) {
+ return false;
+ }
+ DracoData *const draco_data = new DracoData();
+ draco_data->data = temp_data;
+ draco_data->data_type = static_cast<DataType>(attr->data_type());
+ *data = draco_data;
+ return true;
+}
+
+void ReleaseUnityMesh(DracoToUnityMesh **mesh_ptr) {
+ DracoToUnityMesh *mesh = *mesh_ptr;
+ if (!mesh) {
+ return;
+ }
+ if (mesh->indices) {
+ delete[] mesh->indices;
+ mesh->indices = nullptr;
+ }
+ if (mesh->position) {
+ delete[] mesh->position;
+ mesh->position = nullptr;
+ }
+ if (mesh->has_normal && mesh->normal) {
+ delete[] mesh->normal;
+ mesh->has_normal = false;
+ mesh->normal = nullptr;
+ }
+ if (mesh->has_texcoord && mesh->texcoord) {
+ delete[] mesh->texcoord;
+ mesh->has_texcoord = false;
+ mesh->texcoord = nullptr;
+ }
+ if (mesh->has_color && mesh->color) {
+ delete[] mesh->color;
+ mesh->has_color = false;
+ mesh->color = nullptr;
+ }
+ delete mesh;
+ *mesh_ptr = nullptr;
+}
+
+int DecodeMeshForUnity(char *data, unsigned int length,
+ DracoToUnityMesh **tmp_mesh) {
+ draco::DecoderBuffer buffer;
+ buffer.Init(data, length);
+ auto type_statusor = draco::Decoder::GetEncodedGeometryType(&buffer);
+ if (!type_statusor.ok()) {
+ // TODO(draco-eng): Use enum instead.
+ return -1;
+ }
+ const draco::EncodedGeometryType geom_type = type_statusor.value();
+ if (geom_type != draco::TRIANGULAR_MESH) {
+ return -2;
+ }
+
+ draco::Decoder decoder;
+ auto statusor = decoder.DecodeMeshFromBuffer(&buffer);
+ if (!statusor.ok()) {
+ return -3;
+ }
+ std::unique_ptr<draco::Mesh> in_mesh = std::move(statusor).value();
+
+ *tmp_mesh = new DracoToUnityMesh();
+ DracoToUnityMesh *unity_mesh = *tmp_mesh;
+ unity_mesh->num_faces = in_mesh->num_faces();
+ unity_mesh->num_vertices = in_mesh->num_points();
+
+ unity_mesh->indices = new int[in_mesh->num_faces() * 3];
+ for (draco::FaceIndex face_id(0); face_id < in_mesh->num_faces(); ++face_id) {
+ const Mesh::Face &face = in_mesh->face(draco::FaceIndex(face_id));
+ memcpy(unity_mesh->indices + face_id.value() * 3,
+ reinterpret_cast<const int *>(face.data()), sizeof(int) * 3);
+ }
+
+ // TODO(draco-eng): Add other attributes.
+ unity_mesh->position = new float[in_mesh->num_points() * 3];
+ const auto pos_att =
+ in_mesh->GetNamedAttribute(draco::GeometryAttribute::POSITION);
+ for (draco::PointIndex i(0); i < in_mesh->num_points(); ++i) {
+ const draco::AttributeValueIndex val_index = pos_att->mapped_index(i);
+ if (!pos_att->ConvertValue<float, 3>(
+ val_index, unity_mesh->position + i.value() * 3)) {
+ ReleaseUnityMesh(&unity_mesh);
+ return -8;
+ }
+ }
+ // Get normal attributes.
+ const auto normal_att =
+ in_mesh->GetNamedAttribute(draco::GeometryAttribute::NORMAL);
+ if (normal_att != nullptr) {
+ unity_mesh->normal = new float[in_mesh->num_points() * 3];
+ unity_mesh->has_normal = true;
+ for (draco::PointIndex i(0); i < in_mesh->num_points(); ++i) {
+ const draco::AttributeValueIndex val_index = normal_att->mapped_index(i);
+ if (!normal_att->ConvertValue<float, 3>(
+ val_index, unity_mesh->normal + i.value() * 3)) {
+ ReleaseUnityMesh(&unity_mesh);
+ return -8;
+ }
+ }
+ }
+ // Get color attributes.
+ const auto color_att =
+ in_mesh->GetNamedAttribute(draco::GeometryAttribute::COLOR);
+ if (color_att != nullptr) {
+ unity_mesh->color = new float[in_mesh->num_points() * 4];
+ unity_mesh->has_color = true;
+ for (draco::PointIndex i(0); i < in_mesh->num_points(); ++i) {
+ const draco::AttributeValueIndex val_index = color_att->mapped_index(i);
+ if (!color_att->ConvertValue<float, 4>(
+ val_index, unity_mesh->color + i.value() * 4)) {
+ ReleaseUnityMesh(&unity_mesh);
+ return -8;
+ }
+ if (color_att->num_components() < 4) {
+ // If the alpha component wasn't set in the input data we should set
+ // it to an opaque value.
+ unity_mesh->color[i.value() * 4 + 3] = 1.f;
+ }
+ }
+ }
+ // Get texture coordinates attributes.
+ const auto texcoord_att =
+ in_mesh->GetNamedAttribute(draco::GeometryAttribute::TEX_COORD);
+ if (texcoord_att != nullptr) {
+ unity_mesh->texcoord = new float[in_mesh->num_points() * 2];
+ unity_mesh->has_texcoord = true;
+ for (draco::PointIndex i(0); i < in_mesh->num_points(); ++i) {
+ const draco::AttributeValueIndex val_index =
+ texcoord_att->mapped_index(i);
+ if (!texcoord_att->ConvertValue<float, 2>(
+ val_index, unity_mesh->texcoord + i.value() * 2)) {
+ ReleaseUnityMesh(&unity_mesh);
+ return -8;
+ }
+ }
+ }
+
+ return in_mesh->num_faces();
+}
+
+} // namespace draco
+
+#endif // DRACO_UNITY_PLUGIN
diff --git a/libs/assimp/contrib/draco/src/draco/unity/draco_unity_plugin.h b/libs/assimp/contrib/draco/src/draco/unity/draco_unity_plugin.h
new file mode 100644
index 0000000..2f87888
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/unity/draco_unity_plugin.h
@@ -0,0 +1,154 @@
+// Copyright 2017 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_UNITY_DRACO_UNITY_PLUGIN_H_
+#define DRACO_UNITY_DRACO_UNITY_PLUGIN_H_
+
+#include "draco/attributes/geometry_attribute.h"
+#include "draco/compression/config/compression_shared.h"
+#include "draco/compression/decode.h"
+#include "draco/core/draco_types.h"
+
+#ifdef DRACO_UNITY_PLUGIN
+
+// If compiling with Visual Studio.
+#if defined(_MSC_VER)
+#define EXPORT_API __declspec(dllexport)
+#else
+// Other platforms don't need this.
+#define EXPORT_API
+#endif // defined(_MSC_VER)
+
+namespace draco {
+
+extern "C" {
+
+// Struct representing Draco attribute data within Unity.
+struct EXPORT_API DracoData {
+ DracoData() : data_type(DT_INVALID), data(nullptr) {}
+
+ DataType data_type;
+ void *data;
+};
+
+// Struct representing a Draco attribute within Unity.
+struct EXPORT_API DracoAttribute {
+ DracoAttribute()
+ : attribute_type(GeometryAttribute::INVALID),
+ data_type(DT_INVALID),
+ num_components(0),
+ unique_id(0),
+ private_attribute(nullptr) {}
+
+ GeometryAttribute::Type attribute_type;
+ DataType data_type;
+ int num_components;
+ int unique_id;
+ const void *private_attribute;
+};
+
+// Struct representing a Draco mesh within Unity.
+struct EXPORT_API DracoMesh {
+ DracoMesh()
+ : num_faces(0),
+ num_vertices(0),
+ num_attributes(0),
+ private_mesh(nullptr) {}
+
+ int num_faces;
+ int num_vertices;
+ int num_attributes;
+ void *private_mesh;
+};
+
+// Release data associated with DracoMesh.
+void EXPORT_API ReleaseDracoMesh(DracoMesh **mesh_ptr);
+// Release data associated with DracoAttribute.
+void EXPORT_API ReleaseDracoAttribute(DracoAttribute **attr_ptr);
+// Release attribute data.
+void EXPORT_API ReleaseDracoData(DracoData **data_ptr);
+
+// Decodes compressed Draco mesh in |data| and returns |mesh|. On input, |mesh|
+// must be null. The returned |mesh| must be released with ReleaseDracoMesh.
+int EXPORT_API DecodeDracoMesh(char *data, unsigned int length,
+ DracoMesh **mesh);
+
+// Returns |attribute| at |index| in |mesh|. On input, |attribute| must be
+// null. The returned |attribute| must be released with ReleaseDracoAttribute.
+bool EXPORT_API GetAttribute(const DracoMesh *mesh, int index,
+ DracoAttribute **attribute);
+// Returns |attribute| of |type| at |index| in |mesh|. E.g. If the mesh has
+// two texture coordinates then GetAttributeByType(mesh,
+// AttributeType.TEX_COORD, 1, &attr); will return the second TEX_COORD
+// attribute. On input, |attribute| must be null. The returned |attribute| must
+// be released with ReleaseDracoAttribute.
+bool EXPORT_API GetAttributeByType(const DracoMesh *mesh,
+ GeometryAttribute::Type type, int index,
+ DracoAttribute **attribute);
+// Returns |attribute| with |unique_id| in |mesh|. On input, |attribute| must be
+// null. The returned |attribute| must be released with ReleaseDracoAttribute.
+bool EXPORT_API GetAttributeByUniqueId(const DracoMesh *mesh, int unique_id,
+ DracoAttribute **attribute);
+// Returns the indices as well as the type of data in |indices|. On input,
+// |indices| must be null. The returned |indices| must be released with
+// ReleaseDracoData.
+bool EXPORT_API GetMeshIndices(const DracoMesh *mesh, DracoData **indices);
+// Returns the attribute data from attribute as well as the type of data in
+// |data|. On input, |data| must be null. The returned |data| must be released
+// with ReleaseDracoData.
+bool EXPORT_API GetAttributeData(const DracoMesh *mesh,
+ const DracoAttribute *attribute,
+ DracoData **data);
+
+// DracoToUnityMesh is deprecated.
+struct EXPORT_API DracoToUnityMesh {
+ DracoToUnityMesh()
+ : num_faces(0),
+ indices(nullptr),
+ num_vertices(0),
+ position(nullptr),
+ has_normal(false),
+ normal(nullptr),
+ has_texcoord(false),
+ texcoord(nullptr),
+ has_color(false),
+ color(nullptr) {}
+
+ int num_faces;
+ int *indices;
+ int num_vertices;
+ float *position;
+ bool has_normal;
+ float *normal;
+ bool has_texcoord;
+ float *texcoord;
+ bool has_color;
+ float *color;
+};
+
+// ReleaseUnityMesh is deprecated.
+void EXPORT_API ReleaseUnityMesh(DracoToUnityMesh **mesh_ptr);
+
+// To use this function, you do not allocate memory for |tmp_mesh|, just
+// define and pass a null pointer. Otherwise there will be memory leak.
+// DecodeMeshForUnity is deprecated.
+int EXPORT_API DecodeMeshForUnity(char *data, unsigned int length,
+ DracoToUnityMesh **tmp_mesh);
+} // extern "C"
+
+} // namespace draco
+
+#endif // DRACO_UNITY_PLUGIN
+
+#endif // DRACO_UNITY_DRACO_UNITY_PLUGIN_H_
diff --git a/libs/assimp/contrib/draco/src/draco/unity/draco_unity_plugin_test.cc b/libs/assimp/contrib/draco/src/draco/unity/draco_unity_plugin_test.cc
new file mode 100644
index 0000000..81be7ee
--- /dev/null
+++ b/libs/assimp/contrib/draco/src/draco/unity/draco_unity_plugin_test.cc
@@ -0,0 +1,243 @@
+// Copyright 2017 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/unity/draco_unity_plugin.h"
+
+#include <fstream>
+#include <memory>
+#include <sstream>
+#include <string>
+
+#include "draco/core/draco_test_base.h"
+#include "draco/core/draco_test_utils.h"
+
+namespace {
+
+draco::DracoMesh *DecodeToDracoMesh(const std::string &file_name) {
+ std::ifstream input_file(draco::GetTestFileFullPath(file_name),
+ std::ios::binary);
+ if (!input_file) {
+ return nullptr;
+ }
+ // Read the file stream into a buffer.
+ std::streampos file_size = 0;
+ input_file.seekg(0, std::ios::end);
+ file_size = input_file.tellg() - file_size;
+ input_file.seekg(0, std::ios::beg);
+ std::vector<char> data(file_size);
+ input_file.read(data.data(), file_size);
+ if (data.empty()) {
+ return nullptr;
+ }
+
+ draco::DracoMesh *draco_mesh = nullptr;
+ draco::DecodeDracoMesh(data.data(), data.size(), &draco_mesh);
+ return draco_mesh;
+}
+
+TEST(DracoUnityPluginTest, TestDecode) {
+ draco::DracoMesh *draco_mesh =
+ DecodeToDracoMesh("test_nm.obj.edgebreaker.cl4.2.2.drc");
+ ASSERT_NE(draco_mesh, nullptr);
+ ASSERT_EQ(draco_mesh->num_faces, 170);
+ ASSERT_EQ(draco_mesh->num_vertices, 99);
+ ASSERT_NE(draco_mesh->private_mesh, nullptr);
+
+ draco::DracoData *indices = nullptr;
+ ASSERT_TRUE(GetMeshIndices(draco_mesh, &indices));
+ ASSERT_EQ(indices->data_type, draco::DT_INT32);
+ draco::ReleaseDracoData(&indices);
+
+ for (int i = 0; i < draco_mesh->num_attributes; ++i) {
+ draco::DracoAttribute *draco_attribute = nullptr;
+ ASSERT_TRUE(draco::GetAttribute(draco_mesh, i, &draco_attribute));
+ ASSERT_NE(draco_attribute->data_type, draco::DT_INVALID);
+ ASSERT_GT(draco_attribute->num_components, 0);
+ ASSERT_NE(draco_attribute->private_attribute, nullptr);
+
+ draco::DracoData *attribute_data = nullptr;
+ ASSERT_TRUE(
+ draco::GetAttributeData(draco_mesh, draco_attribute, &attribute_data));
+ draco::ReleaseDracoData(&attribute_data);
+ draco::ReleaseDracoAttribute(&draco_attribute);
+ }
+ draco::ReleaseDracoMesh(&draco_mesh);
+}
+
+TEST(DracoUnityPluginTest, TestAttributeTypes) {
+ draco::DracoMesh *draco_mesh = DecodeToDracoMesh("color_attr.drc");
+ ASSERT_NE(draco_mesh, nullptr);
+
+ draco::DracoAttribute *pos_attribute = nullptr;
+ ASSERT_TRUE(draco::GetAttributeByType(
+ draco_mesh, draco::GeometryAttribute::POSITION, 0, &pos_attribute));
+ ASSERT_EQ(pos_attribute->attribute_type, draco::GeometryAttribute::POSITION);
+ ASSERT_EQ(pos_attribute->data_type, draco::DT_FLOAT32);
+ ASSERT_EQ(pos_attribute->num_components, 3);
+ ASSERT_EQ(pos_attribute->unique_id, 0);
+ ASSERT_NE(pos_attribute->private_attribute, nullptr);
+ draco::ReleaseDracoAttribute(&pos_attribute);
+
+ draco::DracoAttribute *color_attribute = nullptr;
+ ASSERT_TRUE(draco::GetAttributeByType(
+ draco_mesh, draco::GeometryAttribute::COLOR, 0, &color_attribute));
+ ASSERT_EQ(color_attribute->attribute_type, draco::GeometryAttribute::COLOR);
+ ASSERT_EQ(color_attribute->data_type, draco::DT_UINT8);
+ ASSERT_EQ(color_attribute->num_components, 4);
+ ASSERT_EQ(color_attribute->unique_id, 1);
+ ASSERT_NE(color_attribute->private_attribute, nullptr);
+ draco::ReleaseDracoAttribute(&color_attribute);
+
+ draco::DracoAttribute *bad_attribute = nullptr;
+ ASSERT_FALSE(draco::GetAttributeByType(
+ draco_mesh, draco::GeometryAttribute::NORMAL, 0, &bad_attribute));
+ ASSERT_FALSE(draco::GetAttributeByType(
+ draco_mesh, draco::GeometryAttribute::TEX_COORD, 0, &bad_attribute));
+ ASSERT_FALSE(draco::GetAttributeByType(
+ draco_mesh, draco::GeometryAttribute::GENERIC, 0, &bad_attribute));
+
+ draco::ReleaseDracoMesh(&draco_mesh);
+
+ draco_mesh = DecodeToDracoMesh("cube_att_sub_o_2.drc");
+ ASSERT_NE(draco_mesh, nullptr);
+
+ draco::DracoAttribute *norm_attribute = nullptr;
+ ASSERT_TRUE(draco::GetAttributeByType(
+ draco_mesh, draco::GeometryAttribute::NORMAL, 0, &norm_attribute));
+ ASSERT_EQ(norm_attribute->attribute_type, draco::GeometryAttribute::NORMAL);
+ ASSERT_EQ(norm_attribute->data_type, draco::DT_FLOAT32);
+ ASSERT_EQ(norm_attribute->num_components, 3);
+ ASSERT_EQ(norm_attribute->unique_id, 2);
+ ASSERT_NE(norm_attribute->private_attribute, nullptr);
+ draco::ReleaseDracoAttribute(&norm_attribute);
+
+ draco::DracoAttribute *texcoord_attribute = nullptr;
+ ASSERT_TRUE(draco::GetAttributeByType(
+ draco_mesh, draco::GeometryAttribute::TEX_COORD, 0, &texcoord_attribute));
+ ASSERT_EQ(texcoord_attribute->attribute_type,
+ draco::GeometryAttribute::TEX_COORD);
+ ASSERT_EQ(texcoord_attribute->data_type, draco::DT_FLOAT32);
+ ASSERT_EQ(texcoord_attribute->num_components, 2);
+ ASSERT_EQ(texcoord_attribute->unique_id, 1);
+ ASSERT_NE(texcoord_attribute->private_attribute, nullptr);
+ draco::ReleaseDracoAttribute(&texcoord_attribute);
+
+ draco::DracoAttribute *generic_attribute = nullptr;
+ ASSERT_TRUE(draco::GetAttributeByType(
+ draco_mesh, draco::GeometryAttribute::GENERIC, 0, &generic_attribute));
+ ASSERT_EQ(generic_attribute->attribute_type,
+ draco::GeometryAttribute::GENERIC);
+ ASSERT_EQ(generic_attribute->data_type, draco::DT_UINT8);
+ ASSERT_EQ(generic_attribute->num_components, 1);
+ ASSERT_EQ(generic_attribute->unique_id, 3);
+ ASSERT_NE(generic_attribute->private_attribute, nullptr);
+ draco::ReleaseDracoAttribute(&generic_attribute);
+
+ ASSERT_FALSE(draco::GetAttributeByType(
+ draco_mesh, draco::GeometryAttribute::TEX_COORD, 1, &bad_attribute));
+
+ draco::ReleaseDracoMesh(&draco_mesh);
+}
+
+TEST(DracoUnityPluginTest, TestAttributeUniqueId) {
+ draco::DracoMesh *draco_mesh = DecodeToDracoMesh("cube_att_sub_o_2.drc");
+ ASSERT_NE(draco_mesh, nullptr);
+
+ draco::DracoAttribute *pos_attribute = nullptr;
+ ASSERT_TRUE(draco::GetAttributeByUniqueId(draco_mesh, 0, &pos_attribute));
+ ASSERT_EQ(pos_attribute->attribute_type, draco::GeometryAttribute::POSITION);
+ ASSERT_EQ(pos_attribute->data_type, draco::DT_FLOAT32);
+ ASSERT_EQ(pos_attribute->num_components, 3);
+ ASSERT_EQ(pos_attribute->unique_id, 0);
+ ASSERT_NE(pos_attribute->private_attribute, nullptr);
+ draco::ReleaseDracoAttribute(&pos_attribute);
+
+ draco::DracoAttribute *norm_attribute = nullptr;
+ ASSERT_TRUE(draco::GetAttributeByUniqueId(draco_mesh, 2, &norm_attribute));
+ ASSERT_EQ(norm_attribute->attribute_type, draco::GeometryAttribute::NORMAL);
+ ASSERT_EQ(norm_attribute->data_type, draco::DT_FLOAT32);
+ ASSERT_EQ(norm_attribute->num_components, 3);
+ ASSERT_EQ(norm_attribute->unique_id, 2);
+ ASSERT_NE(norm_attribute->private_attribute, nullptr);
+ draco::ReleaseDracoAttribute(&norm_attribute);
+
+ draco::DracoAttribute *texcoord_attribute = nullptr;
+ ASSERT_TRUE(
+ draco::GetAttributeByUniqueId(draco_mesh, 1, &texcoord_attribute));
+ ASSERT_EQ(texcoord_attribute->attribute_type,
+ draco::GeometryAttribute::TEX_COORD);
+ ASSERT_EQ(texcoord_attribute->data_type, draco::DT_FLOAT32);
+ ASSERT_EQ(texcoord_attribute->num_components, 2);
+ ASSERT_EQ(texcoord_attribute->unique_id, 1);
+ ASSERT_NE(texcoord_attribute->private_attribute, nullptr);
+ draco::ReleaseDracoAttribute(&texcoord_attribute);
+
+ draco::DracoAttribute *generic_attribute = nullptr;
+ ASSERT_TRUE(draco::GetAttributeByUniqueId(draco_mesh, 3, &generic_attribute));
+ ASSERT_EQ(generic_attribute->attribute_type,
+ draco::GeometryAttribute::GENERIC);
+ ASSERT_EQ(generic_attribute->data_type, draco::DT_UINT8);
+ ASSERT_EQ(generic_attribute->num_components, 1);
+ ASSERT_EQ(generic_attribute->unique_id, 3);
+ ASSERT_NE(generic_attribute->private_attribute, nullptr);
+ draco::ReleaseDracoAttribute(&generic_attribute);
+
+ draco::DracoAttribute *bad_attribute = nullptr;
+ ASSERT_FALSE(draco::GetAttributeByUniqueId(draco_mesh, 4, &bad_attribute));
+
+ draco::ReleaseDracoMesh(&draco_mesh);
+}
+
+class DeprecatedDracoUnityPluginTest : public ::testing::Test {
+ protected:
+ DeprecatedDracoUnityPluginTest() : unity_mesh_(nullptr) {}
+
+ void TestDecodingToDracoUnityMesh(const std::string &file_name,
+ int expected_num_faces,
+ int expected_num_vertices) {
+ // Tests that decoders can successfully skip attribute transform.
+ std::ifstream input_file(draco::GetTestFileFullPath(file_name),
+ std::ios::binary);
+ ASSERT_TRUE(input_file);
+
+ // Read the file stream into a buffer.
+ std::streampos file_size = 0;
+ input_file.seekg(0, std::ios::end);
+ file_size = input_file.tellg() - file_size;
+ input_file.seekg(0, std::ios::beg);
+ std::vector<char> data(file_size);
+ input_file.read(data.data(), file_size);
+
+ ASSERT_FALSE(data.empty());
+
+ const int num_faces =
+ draco::DecodeMeshForUnity(data.data(), data.size(), &unity_mesh_);
+
+ ASSERT_EQ(num_faces, expected_num_faces);
+ ASSERT_EQ(unity_mesh_->num_faces, expected_num_faces);
+ ASSERT_EQ(unity_mesh_->num_vertices, expected_num_vertices);
+ ASSERT_TRUE(unity_mesh_->has_normal);
+ ASSERT_NE(unity_mesh_->normal, nullptr);
+ // TODO(fgalligan): Also test color and tex_coord attributes.
+
+ draco::ReleaseUnityMesh(&unity_mesh_);
+ }
+
+ draco::DracoToUnityMesh *unity_mesh_;
+};
+
+TEST_F(DeprecatedDracoUnityPluginTest, DeprecatedDecodingToDracoUnityMesh) {
+ TestDecodingToDracoUnityMesh("test_nm.obj.edgebreaker.1.0.0.drc", 170, 99);
+}
+} // namespace