summaryrefslogtreecommitdiff
path: root/libs/assimp/code/AssetLib/Assjson
diff options
context:
space:
mode:
Diffstat (limited to 'libs/assimp/code/AssetLib/Assjson')
-rw-r--r--libs/assimp/code/AssetLib/Assjson/cencode.c117
-rw-r--r--libs/assimp/code/AssetLib/Assjson/cencode.h35
-rw-r--r--libs/assimp/code/AssetLib/Assjson/json_exporter.cpp810
-rw-r--r--libs/assimp/code/AssetLib/Assjson/mesh_splitter.cpp319
-rw-r--r--libs/assimp/code/AssetLib/Assjson/mesh_splitter.h52
5 files changed, 0 insertions, 1333 deletions
diff --git a/libs/assimp/code/AssetLib/Assjson/cencode.c b/libs/assimp/code/AssetLib/Assjson/cencode.c
deleted file mode 100644
index 614a267..0000000
--- a/libs/assimp/code/AssetLib/Assjson/cencode.c
+++ /dev/null
@@ -1,117 +0,0 @@
-/*
-cencoder.c - c source to a base64 encoding algorithm implementation
-
-This is part of the libb64 project, and has been placed in the public domain.
-For details, see http://sourceforge.net/projects/libb64
-*/
-
-#include "cencode.h" // changed from <B64/cencode.h>
-
-const int CHARS_PER_LINE = 72;
-
-#ifdef _MSC_VER
-#pragma warning(push)
-#pragma warning(disable : 4244)
-#endif // _MSC_VER
-
-void base64_init_encodestate(base64_encodestate* state_in)
-{
- state_in->step = step_A;
- state_in->result = 0;
- state_in->stepcount = 0;
-}
-
-char base64_encode_value(char value_in)
-{
- static const char* encoding = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
- if (value_in > 63) return '=';
- return encoding[(int)value_in];
-}
-
-int base64_encode_block(const char* plaintext_in, int length_in, char* code_out, base64_encodestate* state_in)
-{
- const char* plainchar = plaintext_in;
- const char* const plaintextend = plaintext_in + length_in;
- char* codechar = code_out;
- char result;
- char fragment;
-
- result = state_in->result;
-
- switch (state_in->step)
- {
- while (1)
- {
- case step_A:
- if (plainchar == plaintextend)
- {
- state_in->result = result;
- state_in->step = step_A;
- return (int)(codechar - code_out);
- }
- fragment = *plainchar++;
- result = (fragment & 0x0fc) >> 2;
- *codechar++ = base64_encode_value(result);
- result = (fragment & 0x003) << 4;
- case step_B:
- if (plainchar == plaintextend)
- {
- state_in->result = result;
- state_in->step = step_B;
- return (int)(codechar - code_out);
- }
- fragment = *plainchar++;
- result |= (fragment & 0x0f0) >> 4;
- *codechar++ = base64_encode_value(result);
- result = (fragment & 0x00f) << 2;
- case step_C:
- if (plainchar == plaintextend)
- {
- state_in->result = result;
- state_in->step = step_C;
- return (int)(codechar - code_out);
- }
- fragment = *plainchar++;
- result |= (fragment & 0x0c0) >> 6;
- *codechar++ = base64_encode_value(result);
- result = (fragment & 0x03f) >> 0;
- *codechar++ = base64_encode_value(result);
-
- ++(state_in->stepcount);
- if (state_in->stepcount == CHARS_PER_LINE/4)
- {
- *codechar++ = '\n';
- state_in->stepcount = 0;
- }
- }
- }
- /* control should not reach here */
- return (int)(codechar - code_out);
-}
-
-int base64_encode_blockend(char* code_out, base64_encodestate* state_in)
-{
- char* codechar = code_out;
-
- switch (state_in->step)
- {
- case step_B:
- *codechar++ = base64_encode_value(state_in->result);
- *codechar++ = '=';
- *codechar++ = '=';
- break;
- case step_C:
- *codechar++ = base64_encode_value(state_in->result);
- *codechar++ = '=';
- break;
- case step_A:
- break;
- }
- *codechar++ = '\n';
-
- return (int)(codechar - code_out);
-}
-
-#ifdef _MSC_VER
-#pragma warning(pop)
-#endif // _MSC_VER
diff --git a/libs/assimp/code/AssetLib/Assjson/cencode.h b/libs/assimp/code/AssetLib/Assjson/cencode.h
deleted file mode 100644
index a7893c4..0000000
--- a/libs/assimp/code/AssetLib/Assjson/cencode.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
-cencode.h - c header for a base64 encoding algorithm
-
-This is part of the libb64 project, and has been placed in the public domain.
-For details, see http://sourceforge.net/projects/libb64
-*/
-
-#ifndef BASE64_CENCODE_H
-#define BASE64_CENCODE_H
-
-#ifdef _MSC_VER
-#pragma warning(disable : 4127 )
-#endif // _MSC_VER
-
-typedef enum
-{
- step_A, step_B, step_C
-} base64_encodestep;
-
-typedef struct
-{
- base64_encodestep step;
- char result;
- int stepcount;
-} base64_encodestate;
-
-void base64_init_encodestate(base64_encodestate* state_in);
-
-char base64_encode_value(char value_in);
-
-int base64_encode_block(const char* plaintext_in, int length_in, char* code_out, base64_encodestate* state_in);
-
-int base64_encode_blockend(char* code_out, base64_encodestate* state_in);
-
-#endif /* BASE64_CENCODE_H */
diff --git a/libs/assimp/code/AssetLib/Assjson/json_exporter.cpp b/libs/assimp/code/AssetLib/Assjson/json_exporter.cpp
deleted file mode 100644
index 7b2c8ec..0000000
--- a/libs/assimp/code/AssetLib/Assjson/json_exporter.cpp
+++ /dev/null
@@ -1,810 +0,0 @@
-/*
-Assimp2Json
-Copyright (c) 2011, Alexander C. Gessler
-
-Licensed under a 3-clause BSD license. See the LICENSE file for more information.
-
-*/
-
-#ifndef ASSIMP_BUILD_NO_EXPORT
-#ifndef ASSIMP_BUILD_NO_ASSJSON_EXPORTER
-
-#include <assimp/scene.h>
-#include <assimp/Exporter.hpp>
-#include <assimp/IOStream.hpp>
-#include <assimp/IOSystem.hpp>
-#include <assimp/Importer.hpp>
-#include <assimp/Exceptional.h>
-
-#include <cassert>
-#include <limits>
-#include <memory>
-#include <sstream>
-
-#define CURRENT_FORMAT_VERSION 100
-
-// grab scoped_ptr from assimp to avoid a dependency on boost.
-//#include <assimp/../../code/BoostWorkaround/boost/scoped_ptr.hpp>
-
-#include "mesh_splitter.h"
-
-extern "C" {
-#include "cencode.h"
-}
-namespace Assimp {
-
-void ExportAssimp2Json(const char *, Assimp::IOSystem *, const aiScene *, const Assimp::ExportProperties *);
-
-// small utility class to simplify serializing the aiScene to Json
-class JSONWriter {
-public:
- enum {
- Flag_DoNotIndent = 0x1,
- Flag_WriteSpecialFloats = 0x2,
- Flag_SkipWhitespaces = 0x4
- };
-
- JSONWriter(Assimp::IOStream &out, unsigned int flags = 0u) :
- out(out), indent (""), newline("\n"), space(" "), buff (), first(false), flags(flags) {
- // make sure that all formatting happens using the standard, C locale and not the user's current locale
- buff.imbue(std::locale("C"));
- if (flags & Flag_SkipWhitespaces) {
- newline = "";
- space = "";
- }
- }
-
- ~JSONWriter() {
- Flush();
- }
-
- void Flush() {
- const std::string s = buff.str();
- out.Write(s.c_str(), s.length(), 1);
- buff.clear();
- }
-
- void PushIndent() {
- indent += '\t';
- }
-
- void PopIndent() {
- indent.erase(indent.end() - 1);
- }
-
- void Key(const std::string &name) {
- AddIndentation();
- Delimit();
- buff << '\"' + name + "\":" << space;
- }
-
- template <typename Literal>
- void Element(const Literal &name) {
- AddIndentation();
- Delimit();
-
- LiteralToString(buff, name) << newline;
- }
-
- template <typename Literal>
- void SimpleValue(const Literal &s) {
- LiteralToString(buff, s) << newline;
- }
-
- void SimpleValue(const void *buffer, size_t len) {
- base64_encodestate s;
- base64_init_encodestate(&s);
-
- char *const cur_out = new char[std::max(len * 2, static_cast<size_t>(16u))];
- const int n = base64_encode_block(reinterpret_cast<const char *>(buffer), static_cast<int>(len), cur_out, &s);
- cur_out[n + base64_encode_blockend(cur_out + n, &s)] = '\0';
-
- // base64 encoding may add newlines, but JSON strings may not contain 'real' newlines
- // (only escaped ones). Remove any newlines in out.
- for (char *cur = cur_out; *cur; ++cur) {
- if (*cur == '\n') {
- *cur = ' ';
- }
- }
-
- buff << '\"' << cur_out << "\"" << newline;
- delete[] cur_out;
- }
-
- void StartObj(bool is_element = false) {
- // if this appears as a plain array element, we need to insert a delimiter and we should also indent it
- if (is_element) {
- AddIndentation();
- if (!first) {
- buff << ',';
- }
- }
- first = true;
- buff << "{" << newline;
- PushIndent();
- }
-
- void EndObj() {
- PopIndent();
- AddIndentation();
- first = false;
- buff << "}" << newline;
- }
-
- void StartArray(bool is_element = false) {
- // if this appears as a plain array element, we need to insert a delimiter and we should also indent it
- if (is_element) {
- AddIndentation();
- if (!first) {
- buff << ',';
- }
- }
- first = true;
- buff << "[" << newline;
- PushIndent();
- }
-
- void EndArray() {
- PopIndent();
- AddIndentation();
- buff << "]" << newline;
- first = false;
- }
-
- void AddIndentation() {
- if (!(flags & Flag_DoNotIndent) && !(flags & Flag_SkipWhitespaces)) {
- buff << indent;
- }
- }
-
- void Delimit() {
- if (!first) {
- buff << ',';
- } else {
- buff << space;
- first = false;
- }
- }
-
-private:
- template <typename Literal>
- std::stringstream &LiteralToString(std::stringstream &stream, const Literal &s) {
- stream << s;
- return stream;
- }
-
- std::stringstream &LiteralToString(std::stringstream &stream, const aiString &s) {
- std::string t;
-
- // escape backslashes and single quotes, both would render the JSON invalid if left as is
- t.reserve(s.length);
- for (size_t i = 0; i < s.length; ++i) {
-
- if (s.data[i] == '\\' || s.data[i] == '\'' || s.data[i] == '\"') {
- t.push_back('\\');
- }
-
- t.push_back(s.data[i]);
- }
- stream << "\"";
- stream << t;
- stream << "\"";
- return stream;
- }
-
- std::stringstream &LiteralToString(std::stringstream &stream, float f) {
- if (!std::numeric_limits<float>::is_iec559) {
- // on a non IEEE-754 platform, we make no assumptions about the representation or existence
- // of special floating-point numbers.
- stream << f;
- return stream;
- }
-
- // JSON does not support writing Inf/Nan
- // [RFC 4672: "Numeric values that cannot be represented as sequences of digits
- // (such as Infinity and NaN) are not permitted."]
- // Nevertheless, many parsers will accept the special keywords Infinity, -Infinity and NaN
- if (std::numeric_limits<float>::infinity() == fabs(f)) {
- if (flags & Flag_WriteSpecialFloats) {
- stream << (f < 0 ? "\"-" : "\"") + std::string("Infinity\"");
- return stream;
- }
- // we should print this warning, but we can't - this is called from within a generic assimp exporter, we cannot use cerr
- // std::cerr << "warning: cannot represent infinite number literal, substituting 0 instead (use -i flag to enforce Infinity/NaN)" << std::endl;
- stream << "0.0";
- return stream;
- }
- // f!=f is the most reliable test for NaNs that I know of
- else if (f != f) {
- if (flags & Flag_WriteSpecialFloats) {
- stream << "\"NaN\"";
- return stream;
- }
- // we should print this warning, but we can't - this is called from within a generic assimp exporter, we cannot use cerr
- // std::cerr << "warning: cannot represent infinite number literal, substituting 0 instead (use -i flag to enforce Infinity/NaN)" << std::endl;
- stream << "0.0";
- return stream;
- }
-
- stream << f;
- return stream;
- }
-
-private:
- Assimp::IOStream &out;
- std::string indent;
- std::string newline;
- std::string space;
- std::stringstream buff;
- bool first;
-
- unsigned int flags;
-};
-
-void Write(JSONWriter &out, const aiVector3D &ai, bool is_elem = true) {
- out.StartArray(is_elem);
- out.Element(ai.x);
- out.Element(ai.y);
- out.Element(ai.z);
- out.EndArray();
-}
-
-void Write(JSONWriter &out, const aiQuaternion &ai, bool is_elem = true) {
- out.StartArray(is_elem);
- out.Element(ai.w);
- out.Element(ai.x);
- out.Element(ai.y);
- out.Element(ai.z);
- out.EndArray();
-}
-
-void Write(JSONWriter &out, const aiColor3D &ai, bool is_elem = true) {
- out.StartArray(is_elem);
- out.Element(ai.r);
- out.Element(ai.g);
- out.Element(ai.b);
- out.EndArray();
-}
-
-void Write(JSONWriter &out, const aiMatrix4x4 &ai, bool is_elem = true) {
- out.StartArray(is_elem);
- for (unsigned int x = 0; x < 4; ++x) {
- for (unsigned int y = 0; y < 4; ++y) {
- out.Element(ai[x][y]);
- }
- }
- out.EndArray();
-}
-
-void Write(JSONWriter &out, const aiBone &ai, bool is_elem = true) {
- out.StartObj(is_elem);
-
- out.Key("name");
- out.SimpleValue(ai.mName);
-
- out.Key("offsetmatrix");
- Write(out, ai.mOffsetMatrix, false);
-
- out.Key("weights");
- out.StartArray();
- for (unsigned int i = 0; i < ai.mNumWeights; ++i) {
- out.StartArray(true);
- out.Element(ai.mWeights[i].mVertexId);
- out.Element(ai.mWeights[i].mWeight);
- out.EndArray();
- }
- out.EndArray();
- out.EndObj();
-}
-
-void Write(JSONWriter &out, const aiFace &ai, bool is_elem = true) {
- out.StartArray(is_elem);
- for (unsigned int i = 0; i < ai.mNumIndices; ++i) {
- out.Element(ai.mIndices[i]);
- }
- out.EndArray();
-}
-
-void Write(JSONWriter &out, const aiMesh &ai, bool is_elem = true) {
- out.StartObj(is_elem);
-
- out.Key("name");
- out.SimpleValue(ai.mName);
-
- out.Key("materialindex");
- out.SimpleValue(ai.mMaterialIndex);
-
- out.Key("primitivetypes");
- out.SimpleValue(ai.mPrimitiveTypes);
-
- out.Key("vertices");
- out.StartArray();
- for (unsigned int i = 0; i < ai.mNumVertices; ++i) {
- out.Element(ai.mVertices[i].x);
- out.Element(ai.mVertices[i].y);
- out.Element(ai.mVertices[i].z);
- }
- out.EndArray();
-
- if (ai.HasNormals()) {
- out.Key("normals");
- out.StartArray();
- for (unsigned int i = 0; i < ai.mNumVertices; ++i) {
- out.Element(ai.mNormals[i].x);
- out.Element(ai.mNormals[i].y);
- out.Element(ai.mNormals[i].z);
- }
- out.EndArray();
- }
-
- if (ai.HasTangentsAndBitangents()) {
- out.Key("tangents");
- out.StartArray();
- for (unsigned int i = 0; i < ai.mNumVertices; ++i) {
- out.Element(ai.mTangents[i].x);
- out.Element(ai.mTangents[i].y);
- out.Element(ai.mTangents[i].z);
- }
- out.EndArray();
-
- out.Key("bitangents");
- out.StartArray();
- for (unsigned int i = 0; i < ai.mNumVertices; ++i) {
- out.Element(ai.mBitangents[i].x);
- out.Element(ai.mBitangents[i].y);
- out.Element(ai.mBitangents[i].z);
- }
- out.EndArray();
- }
-
- if (ai.GetNumUVChannels()) {
- out.Key("numuvcomponents");
- out.StartArray();
- for (unsigned int n = 0; n < ai.GetNumUVChannels(); ++n) {
- out.Element(ai.mNumUVComponents[n]);
- }
- out.EndArray();
-
- out.Key("texturecoords");
- out.StartArray();
- for (unsigned int n = 0; n < ai.GetNumUVChannels(); ++n) {
- const unsigned int numc = ai.mNumUVComponents[n] ? ai.mNumUVComponents[n] : 2;
-
- out.StartArray(true);
- for (unsigned int i = 0; i < ai.mNumVertices; ++i) {
- for (unsigned int c = 0; c < numc; ++c) {
- out.Element(ai.mTextureCoords[n][i][c]);
- }
- }
- out.EndArray();
- }
- out.EndArray();
- }
-
- if (ai.GetNumColorChannels()) {
- out.Key("colors");
- out.StartArray();
- for (unsigned int n = 0; n < ai.GetNumColorChannels(); ++n) {
- out.StartArray(true);
- for (unsigned int i = 0; i < ai.mNumVertices; ++i) {
- out.Element(ai.mColors[n][i].r);
- out.Element(ai.mColors[n][i].g);
- out.Element(ai.mColors[n][i].b);
- out.Element(ai.mColors[n][i].a);
- }
- out.EndArray();
- }
- out.EndArray();
- }
-
- if (ai.mNumBones) {
- out.Key("bones");
- out.StartArray();
- for (unsigned int n = 0; n < ai.mNumBones; ++n) {
- Write(out, *ai.mBones[n]);
- }
- out.EndArray();
- }
-
- out.Key("faces");
- out.StartArray();
- for (unsigned int n = 0; n < ai.mNumFaces; ++n) {
- Write(out, ai.mFaces[n]);
- }
- out.EndArray();
-
- out.EndObj();
-}
-
-void Write(JSONWriter &out, const aiNode &ai, bool is_elem = true) {
- out.StartObj(is_elem);
-
- out.Key("name");
- out.SimpleValue(ai.mName);
-
- out.Key("transformation");
- Write(out, ai.mTransformation, false);
-
- if (ai.mNumMeshes) {
- out.Key("meshes");
- out.StartArray();
- for (unsigned int n = 0; n < ai.mNumMeshes; ++n) {
- out.Element(ai.mMeshes[n]);
- }
- out.EndArray();
- }
-
- if (ai.mNumChildren) {
- out.Key("children");
- out.StartArray();
- for (unsigned int n = 0; n < ai.mNumChildren; ++n) {
- Write(out, *ai.mChildren[n]);
- }
- out.EndArray();
- }
-
- out.EndObj();
-}
-
-void Write(JSONWriter &out, const aiMaterial &ai, bool is_elem = true) {
- out.StartObj(is_elem);
-
- out.Key("properties");
- out.StartArray();
- for (unsigned int i = 0; i < ai.mNumProperties; ++i) {
- const aiMaterialProperty *const prop = ai.mProperties[i];
- out.StartObj(true);
- out.Key("key");
- out.SimpleValue(prop->mKey);
- out.Key("semantic");
- out.SimpleValue(prop->mSemantic);
- out.Key("index");
- out.SimpleValue(prop->mIndex);
-
- out.Key("type");
- out.SimpleValue(prop->mType);
-
- out.Key("value");
- switch (prop->mType) {
- case aiPTI_Float:
- if (prop->mDataLength / sizeof(float) > 1) {
- out.StartArray();
- for (unsigned int ii = 0; ii < prop->mDataLength / sizeof(float); ++ii) {
- out.Element(reinterpret_cast<float *>(prop->mData)[ii]);
- }
- out.EndArray();
- } else {
- out.SimpleValue(*reinterpret_cast<float *>(prop->mData));
- }
- break;
-
- case aiPTI_Integer:
- if (prop->mDataLength / sizeof(int) > 1) {
- out.StartArray();
- for (unsigned int ii = 0; ii < prop->mDataLength / sizeof(int); ++ii) {
- out.Element(reinterpret_cast<int *>(prop->mData)[ii]);
- }
- out.EndArray();
- } else {
- out.SimpleValue(*reinterpret_cast<int *>(prop->mData));
- }
- break;
-
- case aiPTI_String: {
- aiString s;
- aiGetMaterialString(&ai, prop->mKey.data, prop->mSemantic, prop->mIndex, &s);
- out.SimpleValue(s);
- } break;
- case aiPTI_Buffer: {
- // binary data is written as series of hex-encoded octets
- out.SimpleValue(prop->mData, prop->mDataLength);
- } break;
- default:
- assert(false);
- }
-
- out.EndObj();
- }
-
- out.EndArray();
- out.EndObj();
-}
-
-void Write(JSONWriter &out, const aiTexture &ai, bool is_elem = true) {
- out.StartObj(is_elem);
-
- out.Key("width");
- out.SimpleValue(ai.mWidth);
-
- out.Key("height");
- out.SimpleValue(ai.mHeight);
-
- out.Key("formathint");
- out.SimpleValue(aiString(ai.achFormatHint));
-
- out.Key("data");
- if (!ai.mHeight) {
- out.SimpleValue(ai.pcData, ai.mWidth);
- } else {
- out.StartArray();
- for (unsigned int y = 0; y < ai.mHeight; ++y) {
- out.StartArray(true);
- for (unsigned int x = 0; x < ai.mWidth; ++x) {
- const aiTexel &tx = ai.pcData[y * ai.mWidth + x];
- out.StartArray(true);
- out.Element(static_cast<unsigned int>(tx.r));
- out.Element(static_cast<unsigned int>(tx.g));
- out.Element(static_cast<unsigned int>(tx.b));
- out.Element(static_cast<unsigned int>(tx.a));
- out.EndArray();
- }
- out.EndArray();
- }
- out.EndArray();
- }
-
- out.EndObj();
-}
-
-void Write(JSONWriter &out, const aiLight &ai, bool is_elem = true) {
- out.StartObj(is_elem);
-
- out.Key("name");
- out.SimpleValue(ai.mName);
-
- out.Key("type");
- out.SimpleValue(ai.mType);
-
- if (ai.mType == aiLightSource_SPOT || ai.mType == aiLightSource_UNDEFINED) {
- out.Key("angleinnercone");
- out.SimpleValue(ai.mAngleInnerCone);
-
- out.Key("angleoutercone");
- out.SimpleValue(ai.mAngleOuterCone);
- }
-
- out.Key("attenuationconstant");
- out.SimpleValue(ai.mAttenuationConstant);
-
- out.Key("attenuationlinear");
- out.SimpleValue(ai.mAttenuationLinear);
-
- out.Key("attenuationquadratic");
- out.SimpleValue(ai.mAttenuationQuadratic);
-
- out.Key("diffusecolor");
- Write(out, ai.mColorDiffuse, false);
-
- out.Key("specularcolor");
- Write(out, ai.mColorSpecular, false);
-
- out.Key("ambientcolor");
- Write(out, ai.mColorAmbient, false);
-
- if (ai.mType != aiLightSource_POINT) {
- out.Key("direction");
- Write(out, ai.mDirection, false);
- }
-
- if (ai.mType != aiLightSource_DIRECTIONAL) {
- out.Key("position");
- Write(out, ai.mPosition, false);
- }
-
- out.EndObj();
-}
-
-void Write(JSONWriter &out, const aiNodeAnim &ai, bool is_elem = true) {
- out.StartObj(is_elem);
-
- out.Key("name");
- out.SimpleValue(ai.mNodeName);
-
- out.Key("prestate");
- out.SimpleValue(ai.mPreState);
-
- out.Key("poststate");
- out.SimpleValue(ai.mPostState);
-
- if (ai.mNumPositionKeys) {
- out.Key("positionkeys");
- out.StartArray();
- for (unsigned int n = 0; n < ai.mNumPositionKeys; ++n) {
- const aiVectorKey &pos = ai.mPositionKeys[n];
- out.StartArray(true);
- out.Element(pos.mTime);
- Write(out, pos.mValue);
- out.EndArray();
- }
- out.EndArray();
- }
-
- if (ai.mNumRotationKeys) {
- out.Key("rotationkeys");
- out.StartArray();
- for (unsigned int n = 0; n < ai.mNumRotationKeys; ++n) {
- const aiQuatKey &rot = ai.mRotationKeys[n];
- out.StartArray(true);
- out.Element(rot.mTime);
- Write(out, rot.mValue);
- out.EndArray();
- }
- out.EndArray();
- }
-
- if (ai.mNumScalingKeys) {
- out.Key("scalingkeys");
- out.StartArray();
- for (unsigned int n = 0; n < ai.mNumScalingKeys; ++n) {
- const aiVectorKey &scl = ai.mScalingKeys[n];
- out.StartArray(true);
- out.Element(scl.mTime);
- Write(out, scl.mValue);
- out.EndArray();
- }
- out.EndArray();
- }
- out.EndObj();
-}
-
-void Write(JSONWriter &out, const aiAnimation &ai, bool is_elem = true) {
- out.StartObj(is_elem);
-
- out.Key("name");
- out.SimpleValue(ai.mName);
-
- out.Key("tickspersecond");
- out.SimpleValue(ai.mTicksPerSecond);
-
- out.Key("duration");
- out.SimpleValue(ai.mDuration);
-
- out.Key("channels");
- out.StartArray();
- for (unsigned int n = 0; n < ai.mNumChannels; ++n) {
- Write(out, *ai.mChannels[n]);
- }
- out.EndArray();
- out.EndObj();
-}
-
-void Write(JSONWriter &out, const aiCamera &ai, bool is_elem = true) {
- out.StartObj(is_elem);
-
- out.Key("name");
- out.SimpleValue(ai.mName);
-
- out.Key("aspect");
- out.SimpleValue(ai.mAspect);
-
- out.Key("clipplanefar");
- out.SimpleValue(ai.mClipPlaneFar);
-
- out.Key("clipplanenear");
- out.SimpleValue(ai.mClipPlaneNear);
-
- out.Key("horizontalfov");
- out.SimpleValue(ai.mHorizontalFOV);
-
- out.Key("up");
- Write(out, ai.mUp, false);
-
- out.Key("lookat");
- Write(out, ai.mLookAt, false);
-
- out.EndObj();
-}
-
-void WriteFormatInfo(JSONWriter &out) {
- out.StartObj();
- out.Key("format");
- out.SimpleValue("\"assimp2json\"");
- out.Key("version");
- out.SimpleValue(CURRENT_FORMAT_VERSION);
- out.EndObj();
-}
-
-void Write(JSONWriter &out, const aiScene &ai) {
- out.StartObj();
-
- out.Key("__metadata__");
- WriteFormatInfo(out);
-
- out.Key("rootnode");
- Write(out, *ai.mRootNode, false);
-
- out.Key("flags");
- out.SimpleValue(ai.mFlags);
-
- if (ai.HasMeshes()) {
- out.Key("meshes");
- out.StartArray();
- for (unsigned int n = 0; n < ai.mNumMeshes; ++n) {
- Write(out, *ai.mMeshes[n]);
- }
- out.EndArray();
- }
-
- if (ai.HasMaterials()) {
- out.Key("materials");
- out.StartArray();
- for (unsigned int n = 0; n < ai.mNumMaterials; ++n) {
- Write(out, *ai.mMaterials[n]);
- }
- out.EndArray();
- }
-
- if (ai.HasAnimations()) {
- out.Key("animations");
- out.StartArray();
- for (unsigned int n = 0; n < ai.mNumAnimations; ++n) {
- Write(out, *ai.mAnimations[n]);
- }
- out.EndArray();
- }
-
- if (ai.HasLights()) {
- out.Key("lights");
- out.StartArray();
- for (unsigned int n = 0; n < ai.mNumLights; ++n) {
- Write(out, *ai.mLights[n]);
- }
- out.EndArray();
- }
-
- if (ai.HasCameras()) {
- out.Key("cameras");
- out.StartArray();
- for (unsigned int n = 0; n < ai.mNumCameras; ++n) {
- Write(out, *ai.mCameras[n]);
- }
- out.EndArray();
- }
-
- if (ai.HasTextures()) {
- out.Key("textures");
- out.StartArray();
- for (unsigned int n = 0; n < ai.mNumTextures; ++n) {
- Write(out, *ai.mTextures[n]);
- }
- out.EndArray();
- }
- out.EndObj();
-}
-
-void ExportAssimp2Json(const char *file, Assimp::IOSystem *io, const aiScene *scene, const Assimp::ExportProperties *pProperties) {
- std::unique_ptr<Assimp::IOStream> str(io->Open(file, "wt"));
- if (!str) {
- throw DeadlyExportError("could not open output file");
- }
-
- // get a copy of the scene so we can modify it
- aiScene *scenecopy_tmp;
- aiCopyScene(scene, &scenecopy_tmp);
-
- try {
- // split meshes so they fit into a 16 bit index buffer
- MeshSplitter splitter;
- splitter.SetLimit(1 << 16);
- splitter.Execute(scenecopy_tmp);
-
- // XXX Flag_WriteSpecialFloats is turned on by default, right now we don't have a configuration interface for exporters
-
- unsigned int flags = JSONWriter::Flag_WriteSpecialFloats;
- if (pProperties->GetPropertyBool("JSON_SKIP_WHITESPACES", false)) {
- flags |= JSONWriter::Flag_SkipWhitespaces;
- }
- JSONWriter s(*str, flags);
- Write(s, *scenecopy_tmp);
-
- } catch (...) {
- aiFreeScene(scenecopy_tmp);
- throw;
- }
- aiFreeScene(scenecopy_tmp);
-}
-
-} // namespace Assimp
-
-#endif // ASSIMP_BUILD_NO_ASSJSON_EXPORTER
-#endif // ASSIMP_BUILD_NO_EXPORT
diff --git a/libs/assimp/code/AssetLib/Assjson/mesh_splitter.cpp b/libs/assimp/code/AssetLib/Assjson/mesh_splitter.cpp
deleted file mode 100644
index 978437c..0000000
--- a/libs/assimp/code/AssetLib/Assjson/mesh_splitter.cpp
+++ /dev/null
@@ -1,319 +0,0 @@
-/*
-Assimp2Json
-Copyright (c) 2011, Alexander C. Gessler
-
-Licensed under a 3-clause BSD license. See the LICENSE file for more information.
-
-*/
-
-#include "mesh_splitter.h"
-
-#include <assimp/scene.h>
-
-// ----------------------------------------------------------------------------
-// Note: this is largely based on assimp's SplitLargeMeshes_Vertex process.
-// it is refactored and the coding style is slightly improved, though.
-// ----------------------------------------------------------------------------
-
-// ------------------------------------------------------------------------------------------------
-// Executes the post processing step on the given imported data.
-void MeshSplitter::Execute( aiScene* pScene) {
- std::vector<std::pair<aiMesh*, unsigned int> > source_mesh_map;
-
- for( unsigned int a = 0; a < pScene->mNumMeshes; a++) {
- SplitMesh(a, pScene->mMeshes[a],source_mesh_map);
- }
-
- const unsigned int size = static_cast<unsigned int>(source_mesh_map.size());
- if (size != pScene->mNumMeshes) {
- // it seems something has been split. rebuild the mesh list
- delete[] pScene->mMeshes;
- pScene->mNumMeshes = size;
- pScene->mMeshes = new aiMesh*[size]();
-
- for (unsigned int i = 0; i < size;++i) {
- pScene->mMeshes[i] = source_mesh_map[i].first;
- }
-
- // now we need to update all nodes
- UpdateNode(pScene->mRootNode,source_mesh_map);
- }
-}
-
-
-// ------------------------------------------------------------------------------------------------
-void MeshSplitter::UpdateNode(aiNode* pcNode, const std::vector<std::pair<aiMesh*, unsigned int> >& source_mesh_map) {
- // TODO: should better use std::(multi)set for source_mesh_map.
-
- // for every index in out list build a new entry
- std::vector<unsigned int> aiEntries;
- aiEntries.reserve(pcNode->mNumMeshes + 1);
- for (unsigned int i = 0; i < pcNode->mNumMeshes;++i) {
- for (unsigned int a = 0, end = static_cast<unsigned int>(source_mesh_map.size()); a < end;++a) {
- if (source_mesh_map[a].second == pcNode->mMeshes[i]) {
- aiEntries.push_back(a);
- }
- }
- }
-
- // now build the new list
- delete pcNode->mMeshes;
- pcNode->mNumMeshes = static_cast<unsigned int>(aiEntries.size());
- pcNode->mMeshes = new unsigned int[pcNode->mNumMeshes];
-
- for (unsigned int b = 0; b < pcNode->mNumMeshes;++b) {
- pcNode->mMeshes[b] = aiEntries[b];
- }
-
- // recursively update children
- for (unsigned int i = 0, end = pcNode->mNumChildren; i < end;++i) {
- UpdateNode ( pcNode->mChildren[i], source_mesh_map );
- }
-}
-
-static const unsigned int WAS_NOT_COPIED = 0xffffffff;
-
-using PerVertexWeight = std::pair <unsigned int,float>;
-using VertexWeightTable = std::vector <PerVertexWeight>;
-
-// ------------------------------------------------------------------------------------------------
-VertexWeightTable* ComputeVertexBoneWeightTable(const aiMesh* pMesh) {
- if (!pMesh || !pMesh->mNumVertices || !pMesh->mNumBones) {
- return nullptr;
- }
-
- VertexWeightTable* const avPerVertexWeights = new VertexWeightTable[pMesh->mNumVertices];
- for (unsigned int i = 0; i < pMesh->mNumBones;++i) {
-
- aiBone* bone = pMesh->mBones[i];
- for (unsigned int a = 0; a < bone->mNumWeights;++a) {
- const aiVertexWeight& weight = bone->mWeights[a];
- avPerVertexWeights[weight.mVertexId].emplace_back(i,weight.mWeight);
- }
- }
- return avPerVertexWeights;
-}
-
-// ------------------------------------------------------------------------------------------------
-void MeshSplitter :: SplitMesh(unsigned int a, aiMesh* in_mesh, std::vector<std::pair<aiMesh*, unsigned int> >& source_mesh_map) {
- // TODO: should better use std::(multi)set for source_mesh_map.
-
- if (in_mesh->mNumVertices <= LIMIT) {
- source_mesh_map.emplace_back(in_mesh,a);
- return;
- }
-
- // build a per-vertex weight list if necessary
- VertexWeightTable* avPerVertexWeights = ComputeVertexBoneWeightTable(in_mesh);
-
- // we need to split this mesh into sub meshes. Estimate submesh size
- const unsigned int sub_meshes = (in_mesh->mNumVertices / LIMIT) + 1;
-
- // create a std::vector<unsigned int> to remember which vertices have already
- // been copied and to which position (i.e. output index)
- std::vector<unsigned int> was_copied_to;
- was_copied_to.resize(in_mesh->mNumVertices,WAS_NOT_COPIED);
-
- // Try to find a good estimate for the number of output faces
- // per mesh. Add 12.5% as buffer
- unsigned int size_estimated = in_mesh->mNumFaces / sub_meshes;
- size_estimated += size_estimated / 8;
-
- // now generate all submeshes
- unsigned int base = 0;
- while (true) {
- const unsigned int out_vertex_index = LIMIT;
-
- aiMesh* out_mesh = new aiMesh();
- out_mesh->mNumVertices = 0;
- out_mesh->mMaterialIndex = in_mesh->mMaterialIndex;
-
- // the name carries the adjacency information between the meshes
- out_mesh->mName = in_mesh->mName;
-
- typedef std::vector<aiVertexWeight> BoneWeightList;
- if (in_mesh->HasBones()) {
- out_mesh->mBones = new aiBone*[in_mesh->mNumBones]();
- }
-
- // clear the temporary helper array
- if (base) {
- std::fill(was_copied_to.begin(), was_copied_to.end(), WAS_NOT_COPIED);
- }
-
- std::vector<aiFace> vFaces;
-
- // reserve enough storage for most cases
- if (in_mesh->HasPositions()) {
- out_mesh->mVertices = new aiVector3D[out_vertex_index];
- }
-
- if (in_mesh->HasNormals()) {
- out_mesh->mNormals = new aiVector3D[out_vertex_index];
- }
-
- if (in_mesh->HasTangentsAndBitangents()) {
- out_mesh->mTangents = new aiVector3D[out_vertex_index];
- out_mesh->mBitangents = new aiVector3D[out_vertex_index];
- }
-
- for (unsigned int c = 0; in_mesh->HasVertexColors(c);++c) {
- out_mesh->mColors[c] = new aiColor4D[out_vertex_index];
- }
-
- for (unsigned int c = 0; in_mesh->HasTextureCoords(c);++c) {
- out_mesh->mNumUVComponents[c] = in_mesh->mNumUVComponents[c];
- out_mesh->mTextureCoords[c] = new aiVector3D[out_vertex_index];
- }
- vFaces.reserve(size_estimated);
-
- // (we will also need to copy the array of indices)
- while (base < in_mesh->mNumFaces) {
- const unsigned int iNumIndices = in_mesh->mFaces[base].mNumIndices;
-
- // doesn't catch degenerates but is quite fast
- unsigned int iNeed = 0;
- for (unsigned int v = 0; v < iNumIndices;++v) {
- unsigned int index = in_mesh->mFaces[base].mIndices[v];
-
- // check whether we do already have this vertex
- if (WAS_NOT_COPIED == was_copied_to[index]) {
- iNeed++;
- }
- }
- if (out_mesh->mNumVertices + iNeed > out_vertex_index) {
- // don't use this face
- break;
- }
-
- vFaces.emplace_back();
- aiFace& rFace = vFaces.back();
-
- // setup face type and number of indices
- rFace.mNumIndices = iNumIndices;
- rFace.mIndices = new unsigned int[iNumIndices];
-
- // need to update the output primitive types
- switch (rFace.mNumIndices)
- {
- case 1:
- out_mesh->mPrimitiveTypes |= aiPrimitiveType_POINT;
- break;
- case 2:
- out_mesh->mPrimitiveTypes |= aiPrimitiveType_LINE;
- break;
- case 3:
- out_mesh->mPrimitiveTypes |= aiPrimitiveType_TRIANGLE;
- break;
- default:
- out_mesh->mPrimitiveTypes |= aiPrimitiveType_POLYGON;
- }
-
- // and copy the contents of the old array, offset them by current base
- for (unsigned int v = 0; v < iNumIndices;++v) {
- const unsigned int index = in_mesh->mFaces[base].mIndices[v];
-
- // check whether we do already have this vertex
- if (WAS_NOT_COPIED != was_copied_to[index]) {
- rFace.mIndices[v] = was_copied_to[index];
- continue;
- }
-
- // copy positions
- out_mesh->mVertices[out_mesh->mNumVertices] = (in_mesh->mVertices[index]);
-
- // copy normals
- if (in_mesh->HasNormals()) {
- out_mesh->mNormals[out_mesh->mNumVertices] = (in_mesh->mNormals[index]);
- }
-
- // copy tangents/bi-tangents
- if (in_mesh->HasTangentsAndBitangents()) {
- out_mesh->mTangents[out_mesh->mNumVertices] = (in_mesh->mTangents[index]);
- out_mesh->mBitangents[out_mesh->mNumVertices] = (in_mesh->mBitangents[index]);
- }
-
- // texture coordinates
- for (unsigned int c = 0; c < AI_MAX_NUMBER_OF_TEXTURECOORDS;++c) {
- if (in_mesh->HasTextureCoords( c)) {
- out_mesh->mTextureCoords[c][out_mesh->mNumVertices] = in_mesh->mTextureCoords[c][index];
- }
- }
- // vertex colors
- for (unsigned int c = 0; c < AI_MAX_NUMBER_OF_COLOR_SETS;++c) {
- if (in_mesh->HasVertexColors( c)) {
- out_mesh->mColors[c][out_mesh->mNumVertices] = in_mesh->mColors[c][index];
- }
- }
- // check whether we have bone weights assigned to this vertex
- rFace.mIndices[v] = out_mesh->mNumVertices;
- if (avPerVertexWeights) {
- VertexWeightTable& table = avPerVertexWeights[ out_mesh->mNumVertices ];
- for (VertexWeightTable::const_iterator iter = table.begin(), end = table.end(); iter != end;++iter) {
- // allocate the bone weight array if necessary and store it in the mBones field (HACK!)
- BoneWeightList* weight_list = reinterpret_cast<BoneWeightList*>(out_mesh->mBones[(*iter).first]);
- if (!weight_list) {
- weight_list = new BoneWeightList();
- out_mesh->mBones[(*iter).first] = reinterpret_cast<aiBone*>(weight_list);
- }
- weight_list->push_back(aiVertexWeight(out_mesh->mNumVertices,(*iter).second));
- }
- }
-
- was_copied_to[index] = out_mesh->mNumVertices;
- out_mesh->mNumVertices++;
- }
- base++;
- if(out_mesh->mNumVertices == out_vertex_index) {
- // break here. The face is only added if it was complete
- break;
- }
- }
-
- // check which bones we'll need to create for this submesh
- if (in_mesh->HasBones()) {
- aiBone** ppCurrent = out_mesh->mBones;
- for (unsigned int k = 0; k < in_mesh->mNumBones;++k) {
- // check whether the bone exists
- BoneWeightList* const weight_list = reinterpret_cast<BoneWeightList*>(out_mesh->mBones[k]);
-
- if (weight_list) {
- const aiBone* const bone_in = in_mesh->mBones[k];
- aiBone* const bone_out = new aiBone();
- *ppCurrent++ = bone_out;
- bone_out->mName = aiString(bone_in->mName);
- bone_out->mOffsetMatrix =bone_in->mOffsetMatrix;
- bone_out->mNumWeights = (unsigned int)weight_list->size();
- bone_out->mWeights = new aiVertexWeight[bone_out->mNumWeights];
-
- // copy the vertex weights
- ::memcpy(bone_out->mWeights, &(*weight_list)[0],bone_out->mNumWeights * sizeof(aiVertexWeight));
-
- delete weight_list;
- out_mesh->mNumBones++;
- }
- }
- }
-
- // copy the face list to the mesh
- out_mesh->mFaces = new aiFace[vFaces.size()];
- out_mesh->mNumFaces = (unsigned int)vFaces.size();
-
- for (unsigned int p = 0; p < out_mesh->mNumFaces;++p) {
- out_mesh->mFaces[p] = vFaces[p];
- }
-
- // add the newly created mesh to the list
- source_mesh_map.push_back(std::make_pair(out_mesh,a));
-
- if (base == in_mesh->mNumFaces) {
- break;
- }
- }
-
- // delete the per-vertex weight list again
- delete[] avPerVertexWeights;
-
- // now delete the old mesh data
- delete in_mesh;
-}
diff --git a/libs/assimp/code/AssetLib/Assjson/mesh_splitter.h b/libs/assimp/code/AssetLib/Assjson/mesh_splitter.h
deleted file mode 100644
index f7f9a93..0000000
--- a/libs/assimp/code/AssetLib/Assjson/mesh_splitter.h
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
-Assimp2Json
-Copyright (c) 2011, Alexander C. Gessler
-
-Licensed under a 3-clause BSD license. See the LICENSE file for more information.
-
-*/
-
-#ifndef INCLUDED_MESH_SPLITTER
-#define INCLUDED_MESH_SPLITTER
-
-// ----------------------------------------------------------------------------
-// Note: this is largely based on assimp's SplitLargeMeshes_Vertex process.
-// it is refactored and the coding style is slightly improved, though.
-// ----------------------------------------------------------------------------
-
-#include <vector>
-
-struct aiScene;
-struct aiMesh;
-struct aiNode;
-
-// ---------------------------------------------------------------------------
-/** Splits meshes of unique vertices into meshes with no more vertices than
- * a given, configurable threshold value.
- */
-class MeshSplitter {
-public:
- unsigned int LIMIT;
-
- void SetLimit(unsigned int l) {
- LIMIT = l;
- }
-
- unsigned int GetLimit() const {
- return LIMIT;
- }
-
- // -------------------------------------------------------------------
- /** Executes the post processing step on the given imported data.
- * At the moment a process is not supposed to fail.
- * @param pScene The imported data to work at.
- */
- void Execute(aiScene *pScene);
-
-private:
- void UpdateNode(aiNode *pcNode, const std::vector<std::pair<aiMesh *, unsigned int>> &source_mesh_map);
- void SplitMesh(unsigned int index, aiMesh *mesh, std::vector<std::pair<aiMesh *, unsigned int>> &source_mesh_map);
-
-};
-
-#endif // INCLUDED_MESH_SPLITTER