diff options
author | sanine <sanine.not@pm.me> | 2022-03-04 10:47:15 -0600 |
---|---|---|
committer | sanine <sanine.not@pm.me> | 2022-03-04 10:47:15 -0600 |
commit | 058f98a63658dc1a2579826ba167fd61bed1e21f (patch) | |
tree | bcba07a1615a14d943f3af3f815a42f3be86b2f3 /src/mesh/assimp-master/port/PyAssimp | |
parent | 2f8028ac9e0812cb6f3cbb08f0f419e4e717bd22 (diff) |
add assimp submodule
Diffstat (limited to 'src/mesh/assimp-master/port/PyAssimp')
21 files changed, 8103 insertions, 0 deletions
diff --git a/src/mesh/assimp-master/port/PyAssimp/3d_viewer_screenshot.png b/src/mesh/assimp-master/port/PyAssimp/3d_viewer_screenshot.png Binary files differnew file mode 100644 index 0000000..2031faf --- /dev/null +++ b/src/mesh/assimp-master/port/PyAssimp/3d_viewer_screenshot.png diff --git a/src/mesh/assimp-master/port/PyAssimp/README.md b/src/mesh/assimp-master/port/PyAssimp/README.md new file mode 100644 index 0000000..c9944f7 --- /dev/null +++ b/src/mesh/assimp-master/port/PyAssimp/README.md @@ -0,0 +1,86 @@ +PyAssimp Readme +=============== + +A simple Python wrapper for Assimp using `ctypes` to access the library. +Requires Python >= 2.6. + +Python 3 support is mostly here, but not well tested. + +Note that pyassimp is not complete. Many ASSIMP features are missing. + +USAGE +----- + +### Complete example: 3D viewer + +`pyassimp` comes with a simple 3D viewer that shows how to load and display a 3D +model using a shader-based OpenGL pipeline. + +![Screenshot](3d_viewer_screenshot.png) + +To use it, from within `/port/PyAssimp`: + +```console +$ cd scripts +$ python ./3D-viewer <path to your model> +``` + +You can use this code as starting point in your applications. + +### Writing your own code + +To get started with `pyassimp`, examine the simpler `sample.py` script in `scripts/`, +which illustrates the basic usage. All Assimp data structures are wrapped using +`ctypes`. All the data+length fields in Assimp's data structures (such as +`aiMesh::mNumVertices`, `aiMesh::mVertices`) are replaced by simple python +lists, so you can call `len()` on them to get their respective size and access +members using `[]`. + +For example, to load a file named `hello.3ds` and print the first +vertex of the first mesh, you would do (proper error handling +substituted by assertions ...): + +```python + +from pyassimp import load +with load('hello.3ds') as scene: + + assert len(scene.meshes) + mesh = scene.meshes[0] + + assert len(mesh.vertices) + print(mesh.vertices[0]) + +``` + +Another example to list the 'top nodes' in a +scene: + +```python + +from pyassimp import load +with load('hello.3ds') as scene: + + for c in scene.rootnode.children: + print(str(c)) + +``` + +INSTALL +------- + +Install `pyassimp` by running: + +```console +$ python setup.py install +``` + +PyAssimp requires a assimp dynamic library (`DLL` on windows, +`.so` on linux, `.dynlib` on macOS) in order to work. The default search directories are: + - the current directory + - on linux additionally: `/usr/lib`, `/usr/local/lib`, + `/usr/lib/x86_64-linux-gnu` + +To build that library, refer to the Assimp master `INSTALL` +instructions. To look in more places, edit `./pyassimp/helper.py`. +There's an `additional_dirs` list waiting for your entries. diff --git a/src/mesh/assimp-master/port/PyAssimp/README.rst b/src/mesh/assimp-master/port/PyAssimp/README.rst new file mode 100644 index 0000000..03b7968 --- /dev/null +++ b/src/mesh/assimp-master/port/PyAssimp/README.rst @@ -0,0 +1,93 @@ +PyAssimp: Python bindings for libassimp +======================================= + +A simple Python wrapper for Assimp using ``ctypes`` to access the +library. Requires Python >= 2.6. + +Python 3 support is mostly here, but not well tested. + +Note that pyassimp is not complete. Many ASSIMP features are missing. + +USAGE +----- + +Complete example: 3D viewer +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +``pyassimp`` comes with a simple 3D viewer that shows how to load and +display a 3D model using a shader-based OpenGL pipeline. + +.. figure:: 3d_viewer_screenshot.png + :alt: Screenshot + + Screenshot + +To use it, from within ``/port/PyAssimp``: + +:: + + $ cd scripts + $ python ./3D-viewer <path to your model> + +You can use this code as starting point in your applications. + +Writing your own code +~~~~~~~~~~~~~~~~~~~~~ + +To get started with ``pyassimp``, examine the simpler ``sample.py`` +script in ``scripts/``, which illustrates the basic usage. All Assimp +data structures are wrapped using ``ctypes``. All the data+length fields +in Assimp's data structures (such as ``aiMesh::mNumVertices``, +``aiMesh::mVertices``) are replaced by simple python lists, so you can +call ``len()`` on them to get their respective size and access members +using ``[]``. + +For example, to load a file named ``hello.3ds`` and print the first +vertex of the first mesh, you would do (proper error handling +substituted by assertions ...): + +.. code:: python + + + from pyassimp import load + with load('hello.3ds') as scene: + + assert len(scene.meshes) + mesh = scene.meshes[0] + + assert len(mesh.vertices) + print(mesh.vertices[0]) + + +Another example to list the 'top nodes' in a scene: + +.. code:: python + + + from pyassimp import load + with load('hello.3ds') as scene: + + for c in scene.rootnode.children: + print(str(c)) + + +INSTALL +------- + +Install ``pyassimp`` by running: + +:: + + $ python setup.py install + +PyAssimp requires a assimp dynamic library (``DLL`` on windows, ``.so`` +on linux, ``.dynlib`` on macOS) in order to work. The default search +directories are: + +- the current directory +- on linux additionally: ``/usr/lib``, ``/usr/local/lib``, + ``/usr/lib/x86_64-linux-gnu`` + +To build that library, refer to the Assimp master ``INSTALL`` +instructions. To look in more places, edit ``./pyassimp/helper.py``. +There's an ``additional_dirs`` list waiting for your entries. diff --git a/src/mesh/assimp-master/port/PyAssimp/gen/materialgen.py b/src/mesh/assimp-master/port/PyAssimp/gen/materialgen.py new file mode 100644 index 0000000..ef32d8e --- /dev/null +++ b/src/mesh/assimp-master/port/PyAssimp/gen/materialgen.py @@ -0,0 +1,96 @@ +#!/usr/bin/env python +# -*- Coding: UTF-8 -*- + +# --------------------------------------------------------------------------- +# Open Asset Import Library (ASSIMP) +# --------------------------------------------------------------------------- +# +# Copyright (c) 2006-2020, ASSIMP Development Team +# +# All rights reserved. +# +# Redistribution and use of this software in source and binary forms, +# with or without modification, are permitted provided that the following +# conditions are met: +# +# * Redistributions of source code must retain the above +# copyright notice, this list of conditions and the +# following disclaimer. +# +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the +# following disclaimer in the documentation and/or other +# materials provided with the distribution. +# +# * Neither the name of the ASSIMP team, nor the names of its +# contributors may be used to endorse or promote products +# derived from this software without specific prior +# written permission of the ASSIMP Development Team. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# --------------------------------------------------------------------------- + +"""Update PyAssimp's texture type constants C/C++ headers. + +This script is meant to be executed in the source tree, directly from +port/PyAssimp/gen +""" + +import os +import re + +REenumTextureType = re.compile(r'' + r'enum\saiTextureType' # enum aiTextureType + r'[^{]*?\{' # { + r'(?P<code>.*?)' # code + r'\};' # }; + , re.IGNORECASE + re.DOTALL + re.MULTILINE) + +# Replace comments +RErpcom = re.compile(r'' + r'\s*(/\*+\s|\*+/|\B\*\s?|///?!?)' # /** + r'(?P<line>.*?)' # * line + , re.IGNORECASE + re.DOTALL) + +# Remove trailing commas +RErmtrailcom = re.compile(r',$', re.IGNORECASE + re.DOTALL) + +# Remove #ifdef __cplusplus +RErmifdef = re.compile(r'' + r'#ifndef SWIG' # #ifndef SWIG + r'(?P<code>.*)' # code + r'#endif(\s*//\s*!?\s*SWIG)*' # #endif + , re.IGNORECASE + re.DOTALL) + +path = '../../../include/assimp' + +files = os.listdir (path) +enumText = '' +for fileName in files: + if fileName.endswith('.h'): + text = open(os.path.join(path, fileName)).read() + for enum in REenumTextureType.findall(text): + enumText = enum + +text = '' +for line in enumText.split('\n'): + line = line.lstrip().rstrip() + line = RErmtrailcom.sub('', line) + text += RErpcom.sub('# \g<line>', line) + '\n' +text = RErmifdef.sub('', text) + +file = open('material.py', 'w') +file.write(text) +file.close() + +print("Generation done. You can now review the file 'material.py' and merge it.") diff --git a/src/mesh/assimp-master/port/PyAssimp/gen/structsgen.py b/src/mesh/assimp-master/port/PyAssimp/gen/structsgen.py new file mode 100644 index 0000000..f34ec19 --- /dev/null +++ b/src/mesh/assimp-master/port/PyAssimp/gen/structsgen.py @@ -0,0 +1,290 @@ +#!/usr/bin/env python +# -*- Coding: UTF-8 -*- + +# --------------------------------------------------------------------------- +# Open Asset Import Library (ASSIMP) +# --------------------------------------------------------------------------- +# +# Copyright (c) 2006-2020, ASSIMP Development Team +# +# All rights reserved. +# +# Redistribution and use of this software in source and binary forms, +# with or without modification, are permitted provided that the following +# conditions are met: +# +# * Redistributions of source code must retain the above +# copyright notice, this list of conditions and the +# following disclaimer. +# +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the +# following disclaimer in the documentation and/or other +# materials provided with the distribution. +# +# * Neither the name of the ASSIMP team, nor the names of its +# contributors may be used to endorse or promote products +# derived from this software without specific prior +# written permission of the ASSIMP Development Team. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# --------------------------------------------------------------------------- + +"""Update PyAssimp's data structures to keep up with the +C/C++ headers. + +This script is meant to be executed in the source tree, directly from +port/PyAssimp/gen +""" + +import os +import re + +#==[regexps]================================================= + +# Clean desc +REdefine = re.compile(r'' + r'(?P<desc>)' # /** *desc */ + r'#\s*define\s(?P<name>[^(\n]+?)\s(?P<code>.+)$' # #define name value + , re.MULTILINE) + +# Get structs +REstructs = re.compile(r'' + #r'//\s?[\-]*\s(?P<desc>.*?)\*/\s' # /** *desc */ + #r'//\s?[\-]*(?P<desc>.*?)\*/(?:.*?)' # garbage + r'//\s?[\-]*\s(?P<desc>.*?)\*/\W*?' # /** *desc */ + r'struct\s(?:ASSIMP_API\s)?(?P<name>[a-z][a-z0-9_]\w+\b)' # struct name + r'[^{]*?\{' # { + r'(?P<code>.*?)' # code + r'\}\s*(PACK_STRUCT)?;' # }; + , re.IGNORECASE + re.DOTALL + re.MULTILINE) + +# Clean desc +REdesc = re.compile(r'' + r'^\s*?([*]|/\*\*)(?P<line>.*?)' # * line + , re.IGNORECASE + re.DOTALL + re.MULTILINE) + +# Remove #ifdef __cplusplus +RErmifdef = re.compile(r'' + r'#ifdef __cplusplus' # #ifdef __cplusplus + r'(?P<code>.*)' # code + r'#endif(\s*//\s*!?\s*__cplusplus)*' # #endif + , re.IGNORECASE + re.DOTALL) + +# Replace comments +RErpcom = re.compile(r'' + r'\s*(/\*+\s|\*+/|\B\*\s|///?!?)' # /** + r'(?P<line>.*?)' # * line + , re.IGNORECASE + re.DOTALL) + +# Restructure +def GetType(type, prefix='c_'): + t = type + while t.endswith('*'): + t = t[:-1] + if t[:5] == 'const': + t = t[5:] + + # skip some types + if t in skiplist: + return None + + t = t.strip() + types = {'unsigned int':'uint', 'unsigned char':'ubyte',} + if t in types: + t = types[t] + t = prefix + t + while type.endswith('*'): + t = "POINTER(" + t + ")" + type = type[:-1] + return t + +def restructure( match ): + type = match.group("type") + if match.group("struct") == "": + type = GetType(type) + elif match.group("struct") == "C_ENUM ": + type = "c_uint" + else: + type = GetType(type[2:], '') + if type is None: + return '' + if match.group("index"): + type = type + "*" + match.group("index") + + result = "" + for name in match.group("name").split(','): + result += "(\"" + name.strip() + "\", "+ type + ")," + + return result + +RErestruc = re.compile(r'' + r'(?P<struct>C_STRUCT\s|C_ENUM\s|)' # [C_STRUCT] + r'(?P<type>\w+\s?\w+?[*]*)\s' # type + #r'(?P<name>\w+)' # name + r'(?P<name>\w+|[a-z0-9_, ]+)' # name + r'(:?\[(?P<index>\w+)\])?;' # []; (optional) + , re.DOTALL) +#==[template]================================================ +template = """ +class $NAME$(Structure): + \"\"\" +$DESCRIPTION$ + \"\"\" +$DEFINES$ + _fields_ = [ + $FIELDS$ + ] +""" + +templateSR = """ +class $NAME$(Structure): + \"\"\" +$DESCRIPTION$ + \"\"\" +$DEFINES$ + +$NAME$._fields_ = [ + $FIELDS$ + ] +""" + +skiplist = ("FileIO", "File", "locateFromAssimpHeap",'LogStream','MeshAnim','AnimMesh') + +#============================================================ +def Structify(fileName): + file = open(fileName, 'r') + text = file.read() + result = [] + + # Get defines. + defs = REdefine.findall(text) + # Create defines + defines = "\n" + for define in defs: + # Clean desc + desc = REdesc.sub('', define[0]) + # Replace comments + desc = RErpcom.sub('#\g<line>', desc) + defines += desc + if len(define[2].strip()): + # skip non-integral defines, we can support them right now + try: + int(define[2],0) + except: + continue + defines += " "*4 + define[1] + " = " + define[2] + "\n" + + + # Get structs + rs = REstructs.finditer(text) + + fileName = os.path.basename(fileName) + print fileName + for r in rs: + name = r.group('name')[2:] + desc = r.group('desc') + + # Skip some structs + if name in skiplist: + continue + + text = r.group('code') + + # Clean desc + desc = REdesc.sub('', desc) + + desc = "See '"+ fileName +"' for details." #TODO + + # Remove #ifdef __cplusplus + text = RErmifdef.sub('', text) + + # Whether the struct contains more than just POD + primitive = text.find('C_STRUCT') == -1 + + # Restructure + text = RErestruc.sub(restructure, text) + # Replace comments + text = RErpcom.sub('# \g<line>', text) + text = text.replace("),#", "),\n#") + text = text.replace("#", "\n#") + text = "".join([l for l in text.splitlines(True) if not l.strip().endswith("#")]) # remove empty comment lines + + # Whether it's selfreferencing: ex. struct Node { Node* parent; }; + selfreferencing = text.find('POINTER('+name+')') != -1 + + complex = name == "Scene" + + # Create description + description = "" + for line in desc.split('\n'): + description += " "*4 + line.strip() + "\n" + description = description.rstrip() + + # Create fields + fields = "" + for line in text.split('\n'): + fields += " "*12 + line.strip() + "\n" + fields = fields.strip() + + if selfreferencing: + templ = templateSR + else: + templ = template + + # Put it all together + text = templ.replace('$NAME$', name) + text = text.replace('$DESCRIPTION$', description) + text = text.replace('$FIELDS$', fields) + + if ((name.lower() == fileName.split('.')[0][2:].lower()) and (name != 'Material')) or name == "String": + text = text.replace('$DEFINES$', defines) + else: + text = text.replace('$DEFINES$', '') + + + result.append((primitive, selfreferencing, complex, text)) + + return result + +text = "#-*- coding: UTF-8 -*-\n\n" +text += "from ctypes import POINTER, c_int, c_uint, c_size_t, c_char, c_float, Structure, c_char_p, c_double, c_ubyte\n\n" + +structs1 = "" +structs2 = "" +structs3 = "" +structs4 = "" + +path = '../../../include/assimp' +files = os.listdir (path) +#files = ["aiScene.h", "aiTypes.h"] +for fileName in files: + if fileName.endswith('.h'): + for struct in Structify(os.path.join(path, fileName)): + primitive, sr, complex, struct = struct + if primitive: + structs1 += struct + elif sr: + structs2 += struct + elif complex: + structs4 += struct + else: + structs3 += struct + +text += structs1 + structs2 + structs3 + structs4 + +file = open('structs.py', 'w') +file.write(text) +file.close() + +print("Generation done. You can now review the file 'structs.py' and merge it.") diff --git a/src/mesh/assimp-master/port/PyAssimp/pyassimp/__init__.py b/src/mesh/assimp-master/port/PyAssimp/pyassimp/__init__.py new file mode 100644 index 0000000..bb67a43 --- /dev/null +++ b/src/mesh/assimp-master/port/PyAssimp/pyassimp/__init__.py @@ -0,0 +1 @@ +from .core import * diff --git a/src/mesh/assimp-master/port/PyAssimp/pyassimp/core.py b/src/mesh/assimp-master/port/PyAssimp/pyassimp/core.py new file mode 100644 index 0000000..35ad882 --- /dev/null +++ b/src/mesh/assimp-master/port/PyAssimp/pyassimp/core.py @@ -0,0 +1,556 @@ +""" +PyAssimp + +This is the main-module of PyAssimp. +""" + +import sys +if sys.version_info < (2,6): + raise RuntimeError('pyassimp: need python 2.6 or newer') + +# xrange was renamed range in Python 3 and the original range from Python 2 was removed. +# To keep compatibility with both Python 2 and 3, xrange is set to range for version 3.0 and up. +if sys.version_info >= (3,0): + xrange = range + + +try: + import numpy +except ImportError: + numpy = None +import logging +import ctypes +from contextlib import contextmanager +logger = logging.getLogger("pyassimp") +# attach default null handler to logger so it doesn't complain +# even if you don't attach another handler to logger +logger.addHandler(logging.NullHandler()) + +from . import structs +from . import helper +from . import postprocess +from .errors import AssimpError + +class AssimpLib(object): + """ + Assimp-Singleton + """ + load, load_mem, export, export_blob, release, dll = helper.search_library() +_assimp_lib = AssimpLib() + +def make_tuple(ai_obj, type = None): + res = None + + #notes: + # ai_obj._fields_ = [ ("attr", c_type), ... ] + # getattr(ai_obj, e[0]).__class__ == float + + if isinstance(ai_obj, structs.Matrix4x4): + if numpy: + res = numpy.array([getattr(ai_obj, e[0]) for e in ai_obj._fields_]).reshape((4,4)) + #import pdb;pdb.set_trace() + else: + res = [getattr(ai_obj, e[0]) for e in ai_obj._fields_] + res = [res[i:i+4] for i in xrange(0,16,4)] + elif isinstance(ai_obj, structs.Matrix3x3): + if numpy: + res = numpy.array([getattr(ai_obj, e[0]) for e in ai_obj._fields_]).reshape((3,3)) + else: + res = [getattr(ai_obj, e[0]) for e in ai_obj._fields_] + res = [res[i:i+3] for i in xrange(0,9,3)] + else: + if numpy: + res = numpy.array([getattr(ai_obj, e[0]) for e in ai_obj._fields_]) + else: + res = [getattr(ai_obj, e[0]) for e in ai_obj._fields_] + + return res + +# Returns unicode object for Python 2, and str object for Python 3. +def _convert_assimp_string(assimp_string): + if sys.version_info >= (3, 0): + return str(assimp_string.data, errors='ignore') + else: + return unicode(assimp_string.data, errors='ignore') + +# It is faster and more correct to have an init function for each assimp class +def _init_face(aiFace): + aiFace.indices = [aiFace.mIndices[i] for i in range(aiFace.mNumIndices)] +assimp_struct_inits = { structs.Face : _init_face } + +def call_init(obj, caller = None): + if helper.hasattr_silent(obj,'contents'): #pointer + _init(obj.contents, obj, caller) + else: + _init(obj,parent=caller) + +def _is_init_type(obj): + + if obj and helper.hasattr_silent(obj,'contents'): #pointer + return _is_init_type(obj[0]) + # null-pointer case that arises when we reach a mesh attribute + # like mBitangents which use mNumVertices rather than mNumBitangents + # so it breaks the 'is iterable' check. + # Basically: + # FIXME! + elif not bool(obj): + return False + tname = obj.__class__.__name__ + return not (tname[:2] == 'c_' or tname == 'Structure' \ + or tname == 'POINTER') and not isinstance(obj, (int, str, bytes)) + +def _init(self, target = None, parent = None): + """ + Custom initialize() for C structs, adds safely accessible member functionality. + + :param target: set the object which receive the added methods. Useful when manipulating + pointers, to skip the intermediate 'contents' deferencing. + """ + if not target: + target = self + + dirself = dir(self) + for m in dirself: + + if m.startswith("_"): + continue + + if m.startswith('mNum'): + if 'm' + m[4:] in dirself: + continue # will be processed later on + else: + name = m[1:].lower() + + obj = getattr(self, m) + setattr(target, name, obj) + continue + + if m == 'mName': + target.name = str(_convert_assimp_string(self.mName)) + target.__class__.__repr__ = lambda x: str(x.__class__) + "(" + getattr(x, 'name','') + ")" + target.__class__.__str__ = lambda x: getattr(x, 'name', '') + continue + + name = m[1:].lower() + + obj = getattr(self, m) + + # Create tuples + if isinstance(obj, structs.assimp_structs_as_tuple): + setattr(target, name, make_tuple(obj)) + logger.debug(str(self) + ": Added array " + str(getattr(target, name)) + " as self." + name.lower()) + continue + + if m.startswith('m') and len(m) > 1 and m[1].upper() == m[1]: + + if name == "parent": + setattr(target, name, parent) + logger.debug("Added a parent as self." + name) + continue + + if helper.hasattr_silent(self, 'mNum' + m[1:]): + + length = getattr(self, 'mNum' + m[1:]) + + # -> special case: properties are + # stored as a dict. + if m == 'mProperties': + setattr(target, name, _get_properties(obj, length)) + continue + + + if not length: # empty! + setattr(target, name, []) + logger.debug(str(self) + ": " + name + " is an empty list.") + continue + + + try: + if obj._type_ in structs.assimp_structs_as_tuple: + if numpy: + setattr(target, name, numpy.array([make_tuple(obj[i]) for i in range(length)], dtype=numpy.float32)) + + logger.debug(str(self) + ": Added an array of numpy arrays (type "+ str(type(obj)) + ") as self." + name) + else: + setattr(target, name, [make_tuple(obj[i]) for i in range(length)]) + + logger.debug(str(self) + ": Added a list of lists (type "+ str(type(obj)) + ") as self." + name) + + else: + setattr(target, name, [obj[i] for i in range(length)]) #TODO: maybe not necessary to recreate an array? + + logger.debug(str(self) + ": Added list of " + str(obj) + " " + name + " as self." + name + " (type: " + str(type(obj)) + ")") + + # initialize array elements + try: + init = assimp_struct_inits[type(obj[0])] + except KeyError: + if _is_init_type(obj[0]): + for e in getattr(target, name): + call_init(e, target) + else: + for e in getattr(target, name): + init(e) + + + except IndexError: + logger.error("in " + str(self) +" : mismatch between mNum" + name + " and the actual amount of data in m" + name + ". This may be due to version mismatch between libassimp and pyassimp. Quitting now.") + sys.exit(1) + + except ValueError as e: + + logger.error("In " + str(self) + "->" + name + ": " + str(e) + ". Quitting now.") + if "setting an array element with a sequence" in str(e): + logger.error("Note that pyassimp does not currently " + "support meshes with mixed triangles " + "and quads. Try to load your mesh with" + " a post-processing to triangulate your" + " faces.") + raise e + + + + else: # starts with 'm' but not iterable + setattr(target, m, obj) + logger.debug("Added " + name + " as self." + name + " (type: " + str(type(obj)) + ")") + + if _is_init_type(obj): + call_init(obj, target) + + if isinstance(self, structs.Mesh): + _finalize_mesh(self, target) + + if isinstance(self, structs.Texture): + _finalize_texture(self, target) + + if isinstance(self, structs.Metadata): + _finalize_metadata(self, target) + + + return self + + +def pythonize_assimp(type, obj, scene): + """ This method modify the Assimp data structures + to make them easier to work with in Python. + + Supported operations: + - MESH: replace a list of mesh IDs by reference to these meshes + - ADDTRANSFORMATION: add a reference to an object's transformation taken from their associated node. + + :param type: the type of modification to operate (cf above) + :param obj: the input object to modify + :param scene: a reference to the whole scene + """ + + if type == "MESH": + meshes = [] + for i in obj: + meshes.append(scene.meshes[i]) + return meshes + + if type == "ADDTRANSFORMATION": + def getnode(node, name): + if node.name == name: return node + for child in node.children: + n = getnode(child, name) + if n: return n + + node = getnode(scene.rootnode, obj.name) + if not node: + raise AssimpError("Object " + str(obj) + " has no associated node!") + setattr(obj, "transformation", node.transformation) + +def recur_pythonize(node, scene): + ''' + Recursively call pythonize_assimp on + nodes tree to apply several post-processing to + pythonize the assimp datastructures. + ''' + node.meshes = pythonize_assimp("MESH", node.meshes, scene) + for mesh in node.meshes: + mesh.material = scene.materials[mesh.materialindex] + for cam in scene.cameras: + pythonize_assimp("ADDTRANSFORMATION", cam, scene) + for c in node.children: + recur_pythonize(c, scene) + +def release(scene): + ''' + Release resources of a loaded scene. + ''' + _assimp_lib.release(ctypes.pointer(scene)) + +@contextmanager +def load(filename, + file_type = None, + processing = postprocess.aiProcess_Triangulate): + ''' + Load a model into a scene. On failure throws AssimpError. + + Arguments + --------- + filename: Either a filename or a file object to load model from. + If a file object is passed, file_type MUST be specified + Otherwise Assimp has no idea which importer to use. + This is named 'filename' so as to not break legacy code. + processing: assimp postprocessing parameters. Verbose keywords are imported + from postprocessing, and the parameters can be combined bitwise to + generate the final processing value. Note that the default value will + triangulate quad faces. Example of generating other possible values: + processing = (pyassimp.postprocess.aiProcess_Triangulate | + pyassimp.postprocess.aiProcess_OptimizeMeshes) + file_type: string of file extension, such as 'stl' + + Returns + --------- + Scene object with model data + ''' + + if hasattr(filename, 'read'): + # This is the case where a file object has been passed to load. + # It is calling the following function: + # const aiScene* aiImportFileFromMemory(const char* pBuffer, + # unsigned int pLength, + # unsigned int pFlags, + # const char* pHint) + if file_type is None: + raise AssimpError('File type must be specified when passing file objects!') + data = filename.read() + model = _assimp_lib.load_mem(data, + len(data), + processing, + file_type) + else: + # a filename string has been passed + model = _assimp_lib.load(filename.encode(sys.getfilesystemencoding()), processing) + + if not model: + raise AssimpError('Could not import file!') + scene = _init(model.contents) + recur_pythonize(scene.rootnode, scene) + try: + yield scene + finally: + release(scene) + +def export(scene, + filename, + file_type = None, + processing = postprocess.aiProcess_Triangulate): + ''' + Export a scene. On failure throws AssimpError. + + Arguments + --------- + scene: scene to export. + filename: Filename that the scene should be exported to. + file_type: string of file exporter to use. For example "collada". + processing: assimp postprocessing parameters. Verbose keywords are imported + from postprocessing, and the parameters can be combined bitwise to + generate the final processing value. Note that the default value will + triangulate quad faces. Example of generating other possible values: + processing = (pyassimp.postprocess.aiProcess_Triangulate | + pyassimp.postprocess.aiProcess_OptimizeMeshes) + + ''' + + exportStatus = _assimp_lib.export(ctypes.pointer(scene), file_type.encode("ascii"), filename.encode(sys.getfilesystemencoding()), processing) + + if exportStatus != 0: + raise AssimpError('Could not export scene!') + +def export_blob(scene, + file_type = None, + processing = postprocess.aiProcess_Triangulate): + ''' + Export a scene and return a blob in the correct format. On failure throws AssimpError. + + Arguments + --------- + scene: scene to export. + file_type: string of file exporter to use. For example "collada". + processing: assimp postprocessing parameters. Verbose keywords are imported + from postprocessing, and the parameters can be combined bitwise to + generate the final processing value. Note that the default value will + triangulate quad faces. Example of generating other possible values: + processing = (pyassimp.postprocess.aiProcess_Triangulate | + pyassimp.postprocess.aiProcess_OptimizeMeshes) + Returns + --------- + Pointer to structs.ExportDataBlob + ''' + exportBlobPtr = _assimp_lib.export_blob(ctypes.pointer(scene), file_type.encode("ascii"), processing) + + if exportBlobPtr == 0: + raise AssimpError('Could not export scene to blob!') + return exportBlobPtr + +def _finalize_texture(tex, target): + setattr(target, "achformathint", tex.achFormatHint) + if numpy: + data = numpy.array([make_tuple(getattr(tex, "pcData")[i]) for i in range(tex.mWidth * tex.mHeight)]) + else: + data = [make_tuple(getattr(tex, "pcData")[i]) for i in range(tex.mWidth * tex.mHeight)] + setattr(target, "data", data) + +def _finalize_mesh(mesh, target): + """ Building of meshes is a bit specific. + + We override here the various datasets that can + not be process as regular fields. + + For instance, the length of the normals array is + mNumVertices (no mNumNormals is available) + """ + nb_vertices = getattr(mesh, "mNumVertices") + + def fill(name): + mAttr = getattr(mesh, name) + if numpy: + if mAttr: + data = numpy.array([make_tuple(getattr(mesh, name)[i]) for i in range(nb_vertices)], dtype=numpy.float32) + setattr(target, name[1:].lower(), data) + else: + setattr(target, name[1:].lower(), numpy.array([], dtype="float32")) + else: + if mAttr: + data = [make_tuple(getattr(mesh, name)[i]) for i in range(nb_vertices)] + setattr(target, name[1:].lower(), data) + else: + setattr(target, name[1:].lower(), []) + + def fillarray(name): + mAttr = getattr(mesh, name) + + data = [] + for index, mSubAttr in enumerate(mAttr): + if mSubAttr: + data.append([make_tuple(getattr(mesh, name)[index][i]) for i in range(nb_vertices)]) + + if numpy: + setattr(target, name[1:].lower(), numpy.array(data, dtype=numpy.float32)) + else: + setattr(target, name[1:].lower(), data) + + fill("mNormals") + fill("mTangents") + fill("mBitangents") + + fillarray("mColors") + fillarray("mTextureCoords") + + # prepare faces + if numpy: + faces = numpy.array([f.indices for f in target.faces], dtype=numpy.int32) + else: + faces = [f.indices for f in target.faces] + setattr(target, 'faces', faces) + +def _init_metadata_entry(entry): + entry.type = entry.mType + if entry.type == structs.MetadataEntry.AI_BOOL: + entry.data = ctypes.cast(entry.mData, ctypes.POINTER(ctypes.c_bool)).contents.value + elif entry.type == structs.MetadataEntry.AI_INT32: + entry.data = ctypes.cast(entry.mData, ctypes.POINTER(ctypes.c_int32)).contents.value + elif entry.type == structs.MetadataEntry.AI_UINT64: + entry.data = ctypes.cast(entry.mData, ctypes.POINTER(ctypes.c_uint64)).contents.value + elif entry.type == structs.MetadataEntry.AI_FLOAT: + entry.data = ctypes.cast(entry.mData, ctypes.POINTER(ctypes.c_float)).contents.value + elif entry.type == structs.MetadataEntry.AI_DOUBLE: + entry.data = ctypes.cast(entry.mData, ctypes.POINTER(ctypes.c_double)).contents.value + elif entry.type == structs.MetadataEntry.AI_AISTRING: + assimp_string = ctypes.cast(entry.mData, ctypes.POINTER(structs.String)).contents + entry.data = _convert_assimp_string(assimp_string) + elif entry.type == structs.MetadataEntry.AI_AIVECTOR3D: + assimp_vector = ctypes.cast(entry.mData, ctypes.POINTER(structs.Vector3D)).contents + entry.data = make_tuple(assimp_vector) + + return entry + +def _finalize_metadata(metadata, target): + """ Building the metadata object is a bit specific. + + Firstly, there are two separate arrays: one with metadata keys and one + with metadata values, and there are no corresponding mNum* attributes, + so the C arrays are not converted to Python arrays using the generic + code in the _init function. + + Secondly, a metadata entry value has to be cast according to declared + metadata entry type. + """ + length = metadata.mNumProperties + setattr(target, 'keys', [str(_convert_assimp_string(metadata.mKeys[i])) for i in range(length)]) + setattr(target, 'values', [_init_metadata_entry(metadata.mValues[i]) for i in range(length)]) + +class PropertyGetter(dict): + def __getitem__(self, key): + semantic = 0 + if isinstance(key, tuple): + key, semantic = key + + return dict.__getitem__(self, (key, semantic)) + + def keys(self): + for k in dict.keys(self): + yield k[0] + + def __iter__(self): + return self.keys() + + def items(self): + for k, v in dict.items(self): + yield k[0], v + + +def _get_properties(properties, length): + """ + Convenience Function to get the material properties as a dict + and values in a python format. + """ + result = {} + #read all properties + for p in [properties[i] for i in range(length)]: + #the name + p = p.contents + key = str(_convert_assimp_string(p.mKey)) + key = (key.split('.')[1], p.mSemantic) + + #the data + if p.mType == 1: + arr = ctypes.cast(p.mData, + ctypes.POINTER(ctypes.c_float * int(p.mDataLength/ctypes.sizeof(ctypes.c_float))) + ).contents + value = [x for x in arr] + elif p.mType == 3: #string can't be an array + value = _convert_assimp_string(ctypes.cast(p.mData, ctypes.POINTER(structs.MaterialPropertyString)).contents) + + elif p.mType == 4: + arr = ctypes.cast(p.mData, + ctypes.POINTER(ctypes.c_int * int(p.mDataLength/ctypes.sizeof(ctypes.c_int))) + ).contents + value = [x for x in arr] + else: + value = p.mData[:p.mDataLength] + + if len(value) == 1: + [value] = value + + result[key] = value + + return PropertyGetter(result) + +def decompose_matrix(matrix): + if not isinstance(matrix, structs.Matrix4x4): + raise AssimpError("pyassimp.decompose_matrix failed: Not a Matrix4x4!") + + scaling = structs.Vector3D() + rotation = structs.Quaternion() + position = structs.Vector3D() + + _assimp_lib.dll.aiDecomposeMatrix(ctypes.pointer(matrix), + ctypes.byref(scaling), + ctypes.byref(rotation), + ctypes.byref(position)) + return scaling._init(), rotation._init(), position._init() + diff --git a/src/mesh/assimp-master/port/PyAssimp/pyassimp/errors.py b/src/mesh/assimp-master/port/PyAssimp/pyassimp/errors.py new file mode 100644 index 0000000..e017b51 --- /dev/null +++ b/src/mesh/assimp-master/port/PyAssimp/pyassimp/errors.py @@ -0,0 +1,11 @@ +#-*- coding: UTF-8 -*- + +""" +All possible errors. +""" + +class AssimpError(BaseException): + """ + If an internal error occurs. + """ + pass diff --git a/src/mesh/assimp-master/port/PyAssimp/pyassimp/formats.py b/src/mesh/assimp-master/port/PyAssimp/pyassimp/formats.py new file mode 100644 index 0000000..5d454e5 --- /dev/null +++ b/src/mesh/assimp-master/port/PyAssimp/pyassimp/formats.py @@ -0,0 +1,41 @@ +FORMATS = ["CSM", + "LWS", + "B3D", + "COB", + "PLY", + "IFC", + "OFF", + "SMD", + "IRRMESH", + "3D", + "DAE", + "MDL", + "HMP", + "TER", + "WRL", + "XML", + "NFF", + "AC", + "OBJ", + "3DS", + "STL", + "IRR", + "Q3O", + "Q3D", + "MS3D", + "Q3S", + "ZGL", + "MD2", + "X", + "BLEND", + "XGL", + "MD5MESH", + "MAX", + "LXO", + "DXF", + "BVH", + "LWO", + "NDO"] + +def available_formats(): + return FORMATS diff --git a/src/mesh/assimp-master/port/PyAssimp/pyassimp/helper.py b/src/mesh/assimp-master/port/PyAssimp/pyassimp/helper.py new file mode 100644 index 0000000..7c14f60 --- /dev/null +++ b/src/mesh/assimp-master/port/PyAssimp/pyassimp/helper.py @@ -0,0 +1,283 @@ +#-*- coding: UTF-8 -*- + +""" +Some fancy helper functions. +""" + +import os +import ctypes +import operator + +from distutils.sysconfig import get_python_lib +import re +import sys + +try: import numpy +except ImportError: numpy = None + +import logging;logger = logging.getLogger("pyassimp") + +from .errors import AssimpError + +additional_dirs, ext_whitelist = [],[] + +# populate search directories and lists of allowed file extensions +# depending on the platform we're running on. +if os.name=='posix': + additional_dirs.append('./') + additional_dirs.append('/usr/lib/') + additional_dirs.append('/usr/lib/x86_64-linux-gnu/') + additional_dirs.append('/usr/lib/aarch64-linux-gnu/') + additional_dirs.append('/usr/local/lib/') + + if 'LD_LIBRARY_PATH' in os.environ: + additional_dirs.extend([item for item in os.environ['LD_LIBRARY_PATH'].split(':') if item]) + + # check if running from anaconda. + anaconda_keywords = ("conda", "continuum") + if any(k in sys.version.lower() for k in anaconda_keywords): + cur_path = get_python_lib() + pattern = re.compile('.*\/lib\/') + conda_lib = pattern.match(cur_path).group() + logger.info("Adding Anaconda lib path:"+ conda_lib) + additional_dirs.append(conda_lib) + + # note - this won't catch libassimp.so.N.n, but + # currently there's always a symlink called + # libassimp.so in /usr/local/lib. + ext_whitelist.append('.so') + # libassimp.dylib in /usr/local/lib + ext_whitelist.append('.dylib') + +elif os.name=='nt': + ext_whitelist.append('.dll') + path_dirs = os.environ['PATH'].split(';') + additional_dirs.extend(path_dirs) + +def vec2tuple(x): + """ Converts a VECTOR3D to a Tuple """ + return (x.x, x.y, x.z) + +def transform(vector3, matrix4x4): + """ Apply a transformation matrix on a 3D vector. + + :param vector3: array with 3 elements + :param matrix4x4: 4x4 matrix + """ + if numpy: + return numpy.dot(matrix4x4, numpy.append(vector3, 1.)) + else: + m0,m1,m2,m3 = matrix4x4; x,y,z = vector3 + return [ + m0[0]*x + m0[1]*y + m0[2]*z + m0[3], + m1[0]*x + m1[1]*y + m1[2]*z + m1[3], + m2[0]*x + m2[1]*y + m2[2]*z + m2[3], + m3[0]*x + m3[1]*y + m3[2]*z + m3[3] + ] + +def _inv(matrix4x4): + m0,m1,m2,m3 = matrix4x4 + + det = m0[3]*m1[2]*m2[1]*m3[0] - m0[2]*m1[3]*m2[1]*m3[0] - \ + m0[3]*m1[1]*m2[2]*m3[0] + m0[1]*m1[3]*m2[2]*m3[0] + \ + m0[2]*m1[1]*m2[3]*m3[0] - m0[1]*m1[2]*m2[3]*m3[0] - \ + m0[3]*m1[2]*m2[0]*m3[1] + m0[2]*m1[3]*m2[0]*m3[1] + \ + m0[3]*m1[0]*m2[2]*m3[1] - m0[0]*m1[3]*m2[2]*m3[1] - \ + m0[2]*m1[0]*m2[3]*m3[1] + m0[0]*m1[2]*m2[3]*m3[1] + \ + m0[3]*m1[1]*m2[0]*m3[2] - m0[1]*m1[3]*m2[0]*m3[2] - \ + m0[3]*m1[0]*m2[1]*m3[2] + m0[0]*m1[3]*m2[1]*m3[2] + \ + m0[1]*m1[0]*m2[3]*m3[2] - m0[0]*m1[1]*m2[3]*m3[2] - \ + m0[2]*m1[1]*m2[0]*m3[3] + m0[1]*m1[2]*m2[0]*m3[3] + \ + m0[2]*m1[0]*m2[1]*m3[3] - m0[0]*m1[2]*m2[1]*m3[3] - \ + m0[1]*m1[0]*m2[2]*m3[3] + m0[0]*m1[1]*m2[2]*m3[3] + + return[[( m1[2]*m2[3]*m3[1] - m1[3]*m2[2]*m3[1] + m1[3]*m2[1]*m3[2] - m1[1]*m2[3]*m3[2] - m1[2]*m2[1]*m3[3] + m1[1]*m2[2]*m3[3]) /det, + ( m0[3]*m2[2]*m3[1] - m0[2]*m2[3]*m3[1] - m0[3]*m2[1]*m3[2] + m0[1]*m2[3]*m3[2] + m0[2]*m2[1]*m3[3] - m0[1]*m2[2]*m3[3]) /det, + ( m0[2]*m1[3]*m3[1] - m0[3]*m1[2]*m3[1] + m0[3]*m1[1]*m3[2] - m0[1]*m1[3]*m3[2] - m0[2]*m1[1]*m3[3] + m0[1]*m1[2]*m3[3]) /det, + ( m0[3]*m1[2]*m2[1] - m0[2]*m1[3]*m2[1] - m0[3]*m1[1]*m2[2] + m0[1]*m1[3]*m2[2] + m0[2]*m1[1]*m2[3] - m0[1]*m1[2]*m2[3]) /det], + [( m1[3]*m2[2]*m3[0] - m1[2]*m2[3]*m3[0] - m1[3]*m2[0]*m3[2] + m1[0]*m2[3]*m3[2] + m1[2]*m2[0]*m3[3] - m1[0]*m2[2]*m3[3]) /det, + ( m0[2]*m2[3]*m3[0] - m0[3]*m2[2]*m3[0] + m0[3]*m2[0]*m3[2] - m0[0]*m2[3]*m3[2] - m0[2]*m2[0]*m3[3] + m0[0]*m2[2]*m3[3]) /det, + ( m0[3]*m1[2]*m3[0] - m0[2]*m1[3]*m3[0] - m0[3]*m1[0]*m3[2] + m0[0]*m1[3]*m3[2] + m0[2]*m1[0]*m3[3] - m0[0]*m1[2]*m3[3]) /det, + ( m0[2]*m1[3]*m2[0] - m0[3]*m1[2]*m2[0] + m0[3]*m1[0]*m2[2] - m0[0]*m1[3]*m2[2] - m0[2]*m1[0]*m2[3] + m0[0]*m1[2]*m2[3]) /det], + [( m1[1]*m2[3]*m3[0] - m1[3]*m2[1]*m3[0] + m1[3]*m2[0]*m3[1] - m1[0]*m2[3]*m3[1] - m1[1]*m2[0]*m3[3] + m1[0]*m2[1]*m3[3]) /det, + ( m0[3]*m2[1]*m3[0] - m0[1]*m2[3]*m3[0] - m0[3]*m2[0]*m3[1] + m0[0]*m2[3]*m3[1] + m0[1]*m2[0]*m3[3] - m0[0]*m2[1]*m3[3]) /det, + ( m0[1]*m1[3]*m3[0] - m0[3]*m1[1]*m3[0] + m0[3]*m1[0]*m3[1] - m0[0]*m1[3]*m3[1] - m0[1]*m1[0]*m3[3] + m0[0]*m1[1]*m3[3]) /det, + ( m0[3]*m1[1]*m2[0] - m0[1]*m1[3]*m2[0] - m0[3]*m1[0]*m2[1] + m0[0]*m1[3]*m2[1] + m0[1]*m1[0]*m2[3] - m0[0]*m1[1]*m2[3]) /det], + [( m1[2]*m2[1]*m3[0] - m1[1]*m2[2]*m3[0] - m1[2]*m2[0]*m3[1] + m1[0]*m2[2]*m3[1] + m1[1]*m2[0]*m3[2] - m1[0]*m2[1]*m3[2]) /det, + ( m0[1]*m2[2]*m3[0] - m0[2]*m2[1]*m3[0] + m0[2]*m2[0]*m3[1] - m0[0]*m2[2]*m3[1] - m0[1]*m2[0]*m3[2] + m0[0]*m2[1]*m3[2]) /det, + ( m0[2]*m1[1]*m3[0] - m0[1]*m1[2]*m3[0] - m0[2]*m1[0]*m3[1] + m0[0]*m1[2]*m3[1] + m0[1]*m1[0]*m3[2] - m0[0]*m1[1]*m3[2]) /det, + ( m0[1]*m1[2]*m2[0] - m0[2]*m1[1]*m2[0] + m0[2]*m1[0]*m2[1] - m0[0]*m1[2]*m2[1] - m0[1]*m1[0]*m2[2] + m0[0]*m1[1]*m2[2]) /det]] + +def get_bounding_box(scene): + bb_min = [1e10, 1e10, 1e10] # x,y,z + bb_max = [-1e10, -1e10, -1e10] # x,y,z + inv = numpy.linalg.inv if numpy else _inv + return get_bounding_box_for_node(scene.rootnode, bb_min, bb_max, inv(scene.rootnode.transformation)) + +def get_bounding_box_for_node(node, bb_min, bb_max, transformation): + + if numpy: + transformation = numpy.dot(transformation, node.transformation) + else: + t0,t1,t2,t3 = transformation + T0,T1,T2,T3 = node.transformation + transformation = [ [ + t0[0]*T0[0] + t0[1]*T1[0] + t0[2]*T2[0] + t0[3]*T3[0], + t0[0]*T0[1] + t0[1]*T1[1] + t0[2]*T2[1] + t0[3]*T3[1], + t0[0]*T0[2] + t0[1]*T1[2] + t0[2]*T2[2] + t0[3]*T3[2], + t0[0]*T0[3] + t0[1]*T1[3] + t0[2]*T2[3] + t0[3]*T3[3] + ],[ + t1[0]*T0[0] + t1[1]*T1[0] + t1[2]*T2[0] + t1[3]*T3[0], + t1[0]*T0[1] + t1[1]*T1[1] + t1[2]*T2[1] + t1[3]*T3[1], + t1[0]*T0[2] + t1[1]*T1[2] + t1[2]*T2[2] + t1[3]*T3[2], + t1[0]*T0[3] + t1[1]*T1[3] + t1[2]*T2[3] + t1[3]*T3[3] + ],[ + t2[0]*T0[0] + t2[1]*T1[0] + t2[2]*T2[0] + t2[3]*T3[0], + t2[0]*T0[1] + t2[1]*T1[1] + t2[2]*T2[1] + t2[3]*T3[1], + t2[0]*T0[2] + t2[1]*T1[2] + t2[2]*T2[2] + t2[3]*T3[2], + t2[0]*T0[3] + t2[1]*T1[3] + t2[2]*T2[3] + t2[3]*T3[3] + ],[ + t3[0]*T0[0] + t3[1]*T1[0] + t3[2]*T2[0] + t3[3]*T3[0], + t3[0]*T0[1] + t3[1]*T1[1] + t3[2]*T2[1] + t3[3]*T3[1], + t3[0]*T0[2] + t3[1]*T1[2] + t3[2]*T2[2] + t3[3]*T3[2], + t3[0]*T0[3] + t3[1]*T1[3] + t3[2]*T2[3] + t3[3]*T3[3] + ] ] + + for mesh in node.meshes: + for v in mesh.vertices: + v = transform(v, transformation) + bb_min[0] = min(bb_min[0], v[0]) + bb_min[1] = min(bb_min[1], v[1]) + bb_min[2] = min(bb_min[2], v[2]) + bb_max[0] = max(bb_max[0], v[0]) + bb_max[1] = max(bb_max[1], v[1]) + bb_max[2] = max(bb_max[2], v[2]) + + + for child in node.children: + bb_min, bb_max = get_bounding_box_for_node(child, bb_min, bb_max, transformation) + + return bb_min, bb_max + +def try_load_functions(library_path, dll): + ''' + Try to bind to aiImportFile and aiReleaseImport + + Arguments + --------- + library_path: path to current lib + dll: ctypes handle to library + + Returns + --------- + If unsuccessful: + None + If successful: + Tuple containing (library_path, + load from filename function, + load from memory function, + export to filename function, + export to blob function, + release function, + ctypes handle to assimp library) + ''' + + try: + load = dll.aiImportFile + release = dll.aiReleaseImport + load_mem = dll.aiImportFileFromMemory + export = dll.aiExportScene + export2blob = dll.aiExportSceneToBlob + except AttributeError: + #OK, this is a library, but it doesn't have the functions we need + return None + + # library found! + from .structs import Scene, ExportDataBlob + load.restype = ctypes.POINTER(Scene) + load_mem.restype = ctypes.POINTER(Scene) + export2blob.restype = ctypes.POINTER(ExportDataBlob) + return (library_path, load, load_mem, export, export2blob, release, dll) + +def search_library(): + ''' + Loads the assimp library. + Throws exception AssimpError if no library_path is found + + Returns: tuple, (load from filename function, + load from memory function, + export to filename function, + export to blob function, + release function, + dll) + ''' + #this path + folder = os.path.dirname(__file__) + + # silence 'DLL not found' message boxes on win + try: + ctypes.windll.kernel32.SetErrorMode(0x8007) + except AttributeError: + pass + + candidates = [] + # test every file + for curfolder in [folder]+additional_dirs: + if os.path.isdir(curfolder): + for filename in os.listdir(curfolder): + # our minimum requirement for candidates is that + # they should contain 'assimp' somewhere in + # their name + if filename.lower().find('assimp')==-1 : + continue + is_out=1 + for et in ext_whitelist: + if et in filename.lower(): + is_out=0 + break + if is_out: + continue + + library_path = os.path.join(curfolder, filename) + logger.debug('Try ' + library_path) + try: + dll = ctypes.cdll.LoadLibrary(library_path) + except Exception as e: + logger.warning(str(e)) + # OK, this except is evil. But different OSs will throw different + # errors. So just ignore any errors. + continue + # see if the functions we need are in the dll + loaded = try_load_functions(library_path, dll) + if loaded: candidates.append(loaded) + + if not candidates: + # no library found + raise AssimpError("assimp library not found") + else: + # get the newest library_path + candidates = map(lambda x: (os.lstat(x[0])[-2], x), candidates) + res = max(candidates, key=operator.itemgetter(0))[1] + logger.debug('Using assimp library located at ' + res[0]) + + # XXX: if there are 1000 dll/so files containing 'assimp' + # in their name, do we have all of them in our address + # space now until gc kicks in? + + # XXX: take version postfix of the .so on linux? + return res[1:] + +def hasattr_silent(object, name): + """ + Calls hasttr() with the given parameters and preserves the legacy (pre-Python 3.2) + functionality of silently catching exceptions. + + Returns the result of hasatter() or False if an exception was raised. + """ + + try: + if not object: + return False + return hasattr(object, name) + except AttributeError: + return False diff --git a/src/mesh/assimp-master/port/PyAssimp/pyassimp/material.py b/src/mesh/assimp-master/port/PyAssimp/pyassimp/material.py new file mode 100644 index 0000000..a36e50a --- /dev/null +++ b/src/mesh/assimp-master/port/PyAssimp/pyassimp/material.py @@ -0,0 +1,89 @@ +# Dummy value. +# +# No texture, but the value to be used as 'texture semantic' +# (#aiMaterialProperty::mSemantic) for all material properties +# # not* related to textures. +# +aiTextureType_NONE = 0x0 + +# The texture is combined with the result of the diffuse +# lighting equation. +# +aiTextureType_DIFFUSE = 0x1 + +# The texture is combined with the result of the specular +# lighting equation. +# +aiTextureType_SPECULAR = 0x2 + +# The texture is combined with the result of the ambient +# lighting equation. +# +aiTextureType_AMBIENT = 0x3 + +# The texture is added to the result of the lighting +# calculation. It isn't influenced by incoming light. +# +aiTextureType_EMISSIVE = 0x4 + +# The texture is a height map. +# +# By convention, higher gray-scale values stand for +# higher elevations from the base height. +# +aiTextureType_HEIGHT = 0x5 + +# The texture is a (tangent space) normal-map. +# +# Again, there are several conventions for tangent-space +# normal maps. Assimp does (intentionally) not +# distinguish here. +# +aiTextureType_NORMALS = 0x6 + +# The texture defines the glossiness of the material. +# +# The glossiness is in fact the exponent of the specular +# (phong) lighting equation. Usually there is a conversion +# function defined to map the linear color values in the +# texture to a suitable exponent. Have fun. +# +aiTextureType_SHININESS = 0x7 + +# The texture defines per-pixel opacity. +# +# Usually 'white' means opaque and 'black' means +# 'transparency'. Or quite the opposite. Have fun. +# +aiTextureType_OPACITY = 0x8 + +# Displacement texture +# +# The exact purpose and format is application-dependent. +# Higher color values stand for higher vertex displacements. +# +aiTextureType_DISPLACEMENT = 0x9 + +# Lightmap texture (aka Ambient Occlusion) +# +# Both 'Lightmaps' and dedicated 'ambient occlusion maps' are +# covered by this material property. The texture contains a +# scaling value for the final color value of a pixel. Its +# intensity is not affected by incoming light. +# +aiTextureType_LIGHTMAP = 0xA + +# Reflection texture +# +# Contains the color of a perfect mirror reflection. +# Rarely used, almost never for real-time applications. +# +aiTextureType_REFLECTION = 0xB + +# Unknown texture +# +# A texture reference that does not match any of the definitions +# above is considered to be 'unknown'. It is still imported +# but is excluded from any further postprocessing. +# +aiTextureType_UNKNOWN = 0xC diff --git a/src/mesh/assimp-master/port/PyAssimp/pyassimp/postprocess.py b/src/mesh/assimp-master/port/PyAssimp/pyassimp/postprocess.py new file mode 100644 index 0000000..0c55d67 --- /dev/null +++ b/src/mesh/assimp-master/port/PyAssimp/pyassimp/postprocess.py @@ -0,0 +1,530 @@ +# <hr>Calculates the tangents and bitangents for the imported meshes. +# +# Does nothing if a mesh does not have normals. You might want this post +# processing step to be executed if you plan to use tangent space calculations +# such as normal mapping applied to the meshes. There's a config setting, +# <tt>#AI_CONFIG_PP_CT_MAX_SMOOTHING_ANGLE<tt>, which allows you to specify +# a maximum smoothing angle for the algorithm. However, usually you'll +# want to leave it at the default value. +# +aiProcess_CalcTangentSpace = 0x1 + +## <hr>Identifies and joins identical vertex data sets within all +# imported meshes. +# +# After this step is run, each mesh contains unique vertices, +# so a vertex may be used by multiple faces. You usually want +# to use this post processing step. If your application deals with +# indexed geometry, this step is compulsory or you'll just waste rendering +# time. <b>If this flag is not specified<b>, no vertices are referenced by +# more than one face and <b>no index buffer is required<b> for rendering. +# +aiProcess_JoinIdenticalVertices = 0x2 + +## <hr>Converts all the imported data to a left-handed coordinate space. +# +# By default the data is returned in a right-handed coordinate space (which +# OpenGL prefers). In this space, +X points to the right, +# +Z points towards the viewer, and +Y points upwards. In the DirectX +# coordinate space +X points to the right, +Y points upwards, and +Z points +# away from the viewer. +# +# You'll probably want to consider this flag if you use Direct3D for +# rendering. The #aiProcess_ConvertToLeftHanded flag supersedes this +# setting and bundles all conversions typically required for D3D-based +# applications. +# +aiProcess_MakeLeftHanded = 0x4 + +## <hr>Triangulates all faces of all meshes. +# +# By default the imported mesh data might contain faces with more than 3 +# indices. For rendering you'll usually want all faces to be triangles. +# This post processing step splits up faces with more than 3 indices into +# triangles. Line and point primitives are #not# modified! If you want +# 'triangles only' with no other kinds of primitives, try the following +# solution: +# <ul> +# <li>Specify both #aiProcess_Triangulate and #aiProcess_SortByPType <li> +# <li>Ignore all point and line meshes when you process assimp's output<li> +# <ul> +# +aiProcess_Triangulate = 0x8 + +## <hr>Removes some parts of the data structure (animations, materials, +# light sources, cameras, textures, vertex components). +# +# The components to be removed are specified in a separate +# configuration option, <tt>#AI_CONFIG_PP_RVC_FLAGS<tt>. This is quite useful +# if you don't need all parts of the output structure. Vertex colors +# are rarely used today for example... Calling this step to remove unneeded +# data from the pipeline as early as possible results in increased +# performance and a more optimized output data structure. +# This step is also useful if you want to force Assimp to recompute +# normals or tangents. The corresponding steps don't recompute them if +# they're already there (loaded from the source asset). By using this +# step you can make sure they are NOT there. +# +# This flag is a poor one, mainly because its purpose is usually +# misunderstood. Consider the following case: a 3D model has been exported +# from a CAD app, and it has per-face vertex colors. Vertex positions can't be +# shared, thus the #aiProcess_JoinIdenticalVertices step fails to +# optimize the data because of these nasty little vertex colors. +# Most apps don't even process them, so it's all for nothing. By using +# this step, unneeded components are excluded as early as possible +# thus opening more room for internal optimizations. +# +aiProcess_RemoveComponent = 0x10 + +## <hr>Generates normals for all faces of all meshes. +# +# This is ignored if normals are already there at the time this flag +# is evaluated. Model importers try to load them from the source file, so +# they're usually already there. Face normals are shared between all points +# of a single face, so a single point can have multiple normals, which +# forces the library to duplicate vertices in some cases. +# #aiProcess_JoinIdenticalVertices is #senseless# then. +# +# This flag may not be specified together with #aiProcess_GenSmoothNormals. +# +aiProcess_GenNormals = 0x20 + +## <hr>Generates smooth normals for all vertices in the mesh. +# +# This is ignored if normals are already there at the time this flag +# is evaluated. Model importers try to load them from the source file, so +# they're usually already there. +# +# This flag may not be specified together with +# #aiProcess_GenNormals. There's a configuration option, +# <tt>#AI_CONFIG_PP_GSN_MAX_SMOOTHING_ANGLE<tt> which allows you to specify +# an angle maximum for the normal smoothing algorithm. Normals exceeding +# this limit are not smoothed, resulting in a 'hard' seam between two faces. +# Using a decent angle here (e.g. 80 degrees) results in very good visual +# appearance. +# +aiProcess_GenSmoothNormals = 0x40 + +## <hr>Splits large meshes into smaller sub-meshes. +# +# This is quite useful for real-time rendering, where the number of triangles +# which can be maximally processed in a single draw-call is limited +# by the video driverhardware. The maximum vertex buffer is usually limited +# too. Both requirements can be met with this step: you may specify both a +# triangle and vertex limit for a single mesh. +# +# The split limits can (and should!) be set through the +# <tt>#AI_CONFIG_PP_SLM_VERTEX_LIMIT<tt> and <tt>#AI_CONFIG_PP_SLM_TRIANGLE_LIMIT<tt> +# settings. The default values are <tt>#AI_SLM_DEFAULT_MAX_VERTICES<tt> and +# <tt>#AI_SLM_DEFAULT_MAX_TRIANGLES<tt>. +# +# Note that splitting is generally a time-consuming task, but only if there's +# something to split. The use of this step is recommended for most users. +# +aiProcess_SplitLargeMeshes = 0x80 + +## <hr>Removes the node graph and pre-transforms all vertices with +# the local transformation matrices of their nodes. +# +# The output scene still contains nodes, however there is only a +# root node with children, each one referencing only one mesh, +# and each mesh referencing one material. For rendering, you can +# simply render all meshes in order - you don't need to pay +# attention to local transformations and the node hierarchy. +# Animations are removed during this step. +# This step is intended for applications without a scenegraph. +# The step CAN cause some problems: if e.g. a mesh of the asset +# contains normals and another, using the same material index, does not, +# they will be brought together, but the first meshes's part of +# the normal list is zeroed. However, these artifacts are rare. +# @note The <tt>#AI_CONFIG_PP_PTV_NORMALIZE<tt> configuration property +# can be set to normalize the scene's spatial dimension to the -1...1 +# range. +# +aiProcess_PreTransformVertices = 0x100 + +## <hr>Limits the number of bones simultaneously affecting a single vertex +# to a maximum value. +# +# If any vertex is affected by more than the maximum number of bones, the least +# important vertex weights are removed and the remaining vertex weights are +# renormalized so that the weights still sum up to 1. +# The default bone weight limit is 4 (defined as <tt>#AI_LMW_MAX_WEIGHTS<tt> in +# config.h), but you can use the <tt>#AI_CONFIG_PP_LBW_MAX_WEIGHTS<tt> setting to +# supply your own limit to the post processing step. +# +# If you intend to perform the skinning in hardware, this post processing +# step might be of interest to you. +# +aiProcess_LimitBoneWeights = 0x200 + +## <hr>Validates the imported scene data structure. +# This makes sure that all indices are valid, all animations and +# bones are linked correctly, all material references are correct .. etc. +# +# It is recommended that you capture Assimp's log output if you use this flag, +# so you can easily find out what's wrong if a file fails the +# validation. The validator is quite strict and will find #all# +# inconsistencies in the data structure... It is recommended that plugin +# developers use it to debug their loaders. There are two types of +# validation failures: +# <ul> +# <li>Error: There's something wrong with the imported data. Further +# postprocessing is not possible and the data is not usable at all. +# The import fails. #Importer::GetErrorString() or #aiGetErrorString() +# carry the error message around.<li> +# <li>Warning: There are some minor issues (e.g. 1000000 animation +# keyframes with the same time), but further postprocessing and use +# of the data structure is still safe. Warning details are written +# to the log file, <tt>#AI_SCENE_FLAGS_VALIDATION_WARNING<tt> is set +# in #aiScene::mFlags<li> +# <ul> +# +# This post-processing step is not time-consuming. Its use is not +# compulsory, but recommended. +# +aiProcess_ValidateDataStructure = 0x400 + +## <hr>Reorders triangles for better vertex cache locality. +# +# The step tries to improve the ACMR (average post-transform vertex cache +# miss ratio) for all meshes. The implementation runs in O(n) and is +# roughly based on the 'tipsify' algorithm (see <a href=" +# http:www.cs.princeton.edugfxpubsSander_2007_%3ETRtipsy.pdf">this +# paper<a>). +# +# If you intend to render huge models in hardware, this step might +# be of interest to you. The <tt>#AI_CONFIG_PP_ICL_PTCACHE_SIZE<tt>config +# setting can be used to fine-tune the cache optimization. +# +aiProcess_ImproveCacheLocality = 0x800 + +## <hr>Searches for redundantunreferenced materials and removes them. +# +# This is especially useful in combination with the +# #aiProcess_PretransformVertices and #aiProcess_OptimizeMeshes flags. +# Both join small meshes with equal characteristics, but they can't do +# their work if two meshes have different materials. Because several +# material settings are lost during Assimp's import filters, +# (and because many exporters don't check for redundant materials), huge +# models often have materials which are are defined several times with +# exactly the same settings. +# +# Several material settings not contributing to the final appearance of +# a surface are ignored in all comparisons (e.g. the material name). +# So, if you're passing additional information through the +# content pipeline (probably using #magic# material names), don't +# specify this flag. Alternatively take a look at the +# <tt>#AI_CONFIG_PP_RRM_EXCLUDE_LIST<tt> setting. +# +aiProcess_RemoveRedundantMaterials = 0x1000 + +## <hr>This step tries to determine which meshes have normal vectors +# that are facing inwards and inverts them. +# +# The algorithm is simple but effective: +# the bounding box of all vertices + their normals is compared against +# the volume of the bounding box of all vertices without their normals. +# This works well for most objects, problems might occur with planar +# surfaces. However, the step tries to filter such cases. +# The step inverts all in-facing normals. Generally it is recommended +# to enable this step, although the result is not always correct. +# +aiProcess_FixInfacingNormals = 0x2000 + +## <hr>This step splits meshes with more than one primitive type in +# homogeneous sub-meshes. +# +# The step is executed after the triangulation step. After the step +# returns, just one bit is set in aiMesh::mPrimitiveTypes. This is +# especially useful for real-time rendering where point and line +# primitives are often ignored or rendered separately. +# You can use the <tt>#AI_CONFIG_PP_SBP_REMOVE<tt> option to specify which +# primitive types you need. This can be used to easily exclude +# lines and points, which are rarely used, from the import. +# +aiProcess_SortByPType = 0x8000 + +## <hr>This step searches all meshes for degenerate primitives and +# converts them to proper lines or points. +# +# A face is 'degenerate' if one or more of its points are identical. +# To have the degenerate stuff not only detected and collapsed but +# removed, try one of the following procedures: +# <br><b>1.<b> (if you support lines and points for rendering but don't +# want the degenerates)<br> +# <ul> +# <li>Specify the #aiProcess_FindDegenerates flag. +# <li> +# <li>Set the <tt>AI_CONFIG_PP_FD_REMOVE<tt> option to 1. This will +# cause the step to remove degenerate triangles from the import +# as soon as they're detected. They won't pass any further +# pipeline steps. +# <li> +# <ul> +# <br><b>2.<b>(if you don't support lines and points at all)<br> +# <ul> +# <li>Specify the #aiProcess_FindDegenerates flag. +# <li> +# <li>Specify the #aiProcess_SortByPType flag. This moves line and +# point primitives to separate meshes. +# <li> +# <li>Set the <tt>AI_CONFIG_PP_SBP_REMOVE<tt> option to +# @code aiPrimitiveType_POINTS | aiPrimitiveType_LINES +# @endcode to cause SortByPType to reject point +# and line meshes from the scene. +# <li> +# <ul> +# @note Degenerate polygons are not necessarily evil and that's why +# they're not removed by default. There are several file formats which +# don't support lines or points, and some exporters bypass the +# format specification and write them as degenerate triangles instead. +# +aiProcess_FindDegenerates = 0x10000 + +## <hr>This step searches all meshes for invalid data, such as zeroed +# normal vectors or invalid UV coords and removesfixes them. This is +# intended to get rid of some common exporter errors. +# +# This is especially useful for normals. If they are invalid, and +# the step recognizes this, they will be removed and can later +# be recomputed, i.e. by the #aiProcess_GenSmoothNormals flag.<br> +# The step will also remove meshes that are infinitely small and reduce +# animation tracks consisting of hundreds if redundant keys to a single +# key. The <tt>AI_CONFIG_PP_FID_ANIM_ACCURACY<tt> config property decides +# the accuracy of the check for duplicate animation tracks. +# +aiProcess_FindInvalidData = 0x20000 + +## <hr>This step converts non-UV mappings (such as spherical or +# cylindrical mapping) to proper texture coordinate channels. +# +# Most applications will support UV mapping only, so you will +# probably want to specify this step in every case. Note that Assimp is not +# always able to match the original mapping implementation of the +# 3D app which produced a model perfectly. It's always better to let the +# modelling app compute the UV channels - 3ds max, Maya, Blender, +# LightWave, and Modo do this for example. +# +# @note If this step is not requested, you'll need to process the +# <tt>#AI_MATKEY_MAPPING<tt> material property in order to display all assets +# properly. +# +aiProcess_GenUVCoords = 0x40000 + +## <hr>This step applies per-texture UV transformations and bakes +# them into stand-alone vtexture coordinate channels. +# +# UV transformations are specified per-texture - see the +# <tt>#AI_MATKEY_UVTRANSFORM<tt> material key for more information. +# This step processes all textures with +# transformed input UV coordinates and generates a new (pre-transformed) UV channel +# which replaces the old channel. Most applications won't support UV +# transformations, so you will probably want to specify this step. +# +# @note UV transformations are usually implemented in real-time apps by +# transforming texture coordinates at vertex shader stage with a 3x3 +# (homogenous) transformation matrix. +# +aiProcess_TransformUVCoords = 0x80000 + +## <hr>This step searches for duplicate meshes and replaces them +# with references to the first mesh. +# +# This step takes a while, so don't use it if speed is a concern. +# Its main purpose is to workaround the fact that many export +# file formats don't support instanced meshes, so exporters need to +# duplicate meshes. This step removes the duplicates again. Please +# note that Assimp does not currently support per-node material +# assignment to meshes, which means that identical meshes with +# different materials are currently #not# joined, although this is +# planned for future versions. +# +aiProcess_FindInstances = 0x100000 + +## <hr>A postprocessing step to reduce the number of meshes. +# +# This will, in fact, reduce the number of draw calls. +# +# This is a very effective optimization and is recommended to be used +# together with #aiProcess_OptimizeGraph, if possible. The flag is fully +# compatible with both #aiProcess_SplitLargeMeshes and #aiProcess_SortByPType. +# +aiProcess_OptimizeMeshes = 0x200000 + + +## <hr>A postprocessing step to optimize the scene hierarchy. +# +# Nodes without animations, bones, lights or cameras assigned are +# collapsed and joined. +# +# Node names can be lost during this step. If you use special 'tag nodes' +# to pass additional information through your content pipeline, use the +# <tt>#AI_CONFIG_PP_OG_EXCLUDE_LIST<tt> setting to specify a list of node +# names you want to be kept. Nodes matching one of the names in this list won't +# be touched or modified. +# +# Use this flag with caution. Most simple files will be collapsed to a +# single node, so complex hierarchies are usually completely lost. This is not +# useful for editor environments, but probably a very effective +# optimization if you just want to get the model data, convert it to your +# own format, and render it as fast as possible. +# +# This flag is designed to be used with #aiProcess_OptimizeMeshes for best +# results. +# +# @note 'Crappy' scenes with thousands of extremely small meshes packed +# in deeply nested nodes exist for almost all file formats. +# #aiProcess_OptimizeMeshes in combination with #aiProcess_OptimizeGraph +# usually fixes them all and makes them renderable. +# +aiProcess_OptimizeGraph = 0x400000 + +## <hr>This step flips all UV coordinates along the y-axis and adjusts +# material settings and bitangents accordingly. +# +# <b>Output UV coordinate system:<b> +# @code +# 0y|0y ---------- 1x|0y +# | | +# | | +# | | +# 0x|1y ---------- 1x|1y +# @endcode +# +# You'll probably want to consider this flag if you use Direct3D for +# rendering. The #aiProcess_ConvertToLeftHanded flag supersedes this +# setting and bundles all conversions typically required for D3D-based +# applications. +# +aiProcess_FlipUVs = 0x800000 + +## <hr>This step adjusts the output face winding order to be CW. +# +# The default face winding order is counter clockwise (CCW). +# +# <b>Output face order:<b> +# @code +# x2 +# +# x0 +# x1 +# @endcode +# +aiProcess_FlipWindingOrder = 0x1000000 + +## <hr>This step splits meshes with many bones into sub-meshes so that each +# su-bmesh has fewer or as many bones as a given limit. +# +aiProcess_SplitByBoneCount = 0x2000000 + +## <hr>This step removes bones losslessly or according to some threshold. +# +# In some cases (i.e. formats that require it) exporters are forced to +# assign dummy bone weights to otherwise static meshes assigned to +# animated meshes. Full, weight-based skinning is expensive while +# animating nodes is extremely cheap, so this step is offered to clean up +# the data in that regard. +# +# Use <tt>#AI_CONFIG_PP_DB_THRESHOLD<tt> to control this. +# Use <tt>#AI_CONFIG_PP_DB_ALL_OR_NONE<tt> if you want bones removed if and +# only if all bones within the scene qualify for removal. +# +aiProcess_Debone = 0x4000000 + +aiProcess_GenEntityMeshes = 0x100000 +aiProcess_OptimizeAnimations = 0x200000 +aiProcess_FixTexturePaths = 0x200000 +aiProcess_EmbedTextures = 0x10000000, + +## @def aiProcess_ConvertToLeftHanded + # @brief Shortcut flag for Direct3D-based applications. + # + # Supersedes the #aiProcess_MakeLeftHanded and #aiProcess_FlipUVs and + # #aiProcess_FlipWindingOrder flags. + # The output data matches Direct3D's conventions: left-handed geometry, upper-left + # origin for UV coordinates and finally clockwise face order, suitable for CCW culling. + # + # @deprecated + # +aiProcess_ConvertToLeftHanded = ( \ + aiProcess_MakeLeftHanded | \ + aiProcess_FlipUVs | \ + aiProcess_FlipWindingOrder | \ + 0 ) + + +## @def aiProcessPreset_TargetRealtimeUse_Fast + # @brief Default postprocess configuration optimizing the data for real-time rendering. + # + # Applications would want to use this preset to load models on end-user PCs, + # maybe for direct use in game. + # + # If you're using DirectX, don't forget to combine this value with + # the #aiProcess_ConvertToLeftHanded step. If you don't support UV transformations + # in your application apply the #aiProcess_TransformUVCoords step, too. + # @note Please take the time to read the docs for the steps enabled by this preset. + # Some of them offer further configurable properties, while some of them might not be of + # use for you so it might be better to not specify them. + # +aiProcessPreset_TargetRealtime_Fast = ( \ + aiProcess_CalcTangentSpace | \ + aiProcess_GenNormals | \ + aiProcess_JoinIdenticalVertices | \ + aiProcess_Triangulate | \ + aiProcess_GenUVCoords | \ + aiProcess_SortByPType | \ + 0 ) + + ## @def aiProcessPreset_TargetRealtime_Quality + # @brief Default postprocess configuration optimizing the data for real-time rendering. + # + # Unlike #aiProcessPreset_TargetRealtime_Fast, this configuration + # performs some extra optimizations to improve rendering speed and + # to minimize memory usage. It could be a good choice for a level editor + # environment where import speed is not so important. + # + # If you're using DirectX, don't forget to combine this value with + # the #aiProcess_ConvertToLeftHanded step. If you don't support UV transformations + # in your application apply the #aiProcess_TransformUVCoords step, too. + # @note Please take the time to read the docs for the steps enabled by this preset. + # Some of them offer further configurable properties, while some of them might not be + # of use for you so it might be better to not specify them. + # +aiProcessPreset_TargetRealtime_Quality = ( \ + aiProcess_CalcTangentSpace | \ + aiProcess_GenSmoothNormals | \ + aiProcess_JoinIdenticalVertices | \ + aiProcess_ImproveCacheLocality | \ + aiProcess_LimitBoneWeights | \ + aiProcess_RemoveRedundantMaterials | \ + aiProcess_SplitLargeMeshes | \ + aiProcess_Triangulate | \ + aiProcess_GenUVCoords | \ + aiProcess_SortByPType | \ + aiProcess_FindDegenerates | \ + aiProcess_FindInvalidData | \ + 0 ) + + ## @def aiProcessPreset_TargetRealtime_MaxQuality + # @brief Default postprocess configuration optimizing the data for real-time rendering. + # + # This preset enables almost every optimization step to achieve perfectly + # optimized data. It's your choice for level editor environments where import speed + # is not important. + # + # If you're using DirectX, don't forget to combine this value with + # the #aiProcess_ConvertToLeftHanded step. If you don't support UV transformations + # in your application, apply the #aiProcess_TransformUVCoords step, too. + # @note Please take the time to read the docs for the steps enabled by this preset. + # Some of them offer further configurable properties, while some of them might not be + # of use for you so it might be better to not specify them. + # +aiProcessPreset_TargetRealtime_MaxQuality = ( \ + aiProcessPreset_TargetRealtime_Quality | \ + aiProcess_FindInstances | \ + aiProcess_ValidateDataStructure | \ + aiProcess_OptimizeMeshes | \ + 0 ) + + diff --git a/src/mesh/assimp-master/port/PyAssimp/pyassimp/structs.py b/src/mesh/assimp-master/port/PyAssimp/pyassimp/structs.py new file mode 100644 index 0000000..e1fba19 --- /dev/null +++ b/src/mesh/assimp-master/port/PyAssimp/pyassimp/structs.py @@ -0,0 +1,1135 @@ +#-*- coding: utf-8 -*- + +from ctypes import POINTER, c_void_p, c_uint, c_char, c_float, Structure, c_double, c_ubyte, c_size_t, c_uint32 + + +class Vector2D(Structure): + """ + See 'vector2.h' for details. + """ + + + _fields_ = [ + ("x", c_float),("y", c_float), + ] + +class Matrix3x3(Structure): + """ + See 'matrix3x3.h' for details. + """ + + + _fields_ = [ + ("a1", c_float),("a2", c_float),("a3", c_float), + ("b1", c_float),("b2", c_float),("b3", c_float), + ("c1", c_float),("c2", c_float),("c3", c_float), + ] + +class Texel(Structure): + """ + See 'texture.h' for details. + """ + + _fields_ = [ + ("b", c_ubyte),("g", c_ubyte),("r", c_ubyte),("a", c_ubyte), + ] + +class Color4D(Structure): + """ + See 'color4.h' for details. + """ + + + _fields_ = [ + # Red, green, blue and alpha color values + ("r", c_float),("g", c_float),("b", c_float),("a", c_float), + ] + +class Plane(Structure): + """ + See 'types.h' for details. + """ + + _fields_ = [ + # Plane equation + ("a", c_float),("b", c_float),("c", c_float),("d", c_float), + ] + +class Color3D(Structure): + """ + See 'types.h' for details. + """ + + _fields_ = [ + # Red, green and blue color values + ("r", c_float),("g", c_float),("b", c_float), + ] + +class String(Structure): + """ + See 'types.h' for details. + """ + + MAXLEN = 1024 + + _fields_ = [ + # Binary length of the string excluding the terminal 0. This is NOT the + # logical length of strings containing UTF-8 multibyte sequences! It's + # the number of bytes from the beginning of the string to its end. + ("length", c_uint32), + + # String buffer. Size limit is MAXLEN + ("data", c_char*MAXLEN), + ] + +class MaterialPropertyString(Structure): + """ + See 'MaterialSystem.cpp' for details. + + The size of length is truncated to 4 bytes on 64-bit platforms when used as a + material property (see MaterialSystem.cpp aiMaterial::AddProperty() for details). + """ + + MAXLEN = 1024 + + _fields_ = [ + # Binary length of the string excluding the terminal 0. This is NOT the + # logical length of strings containing UTF-8 multibyte sequences! It's + # the number of bytes from the beginning of the string to its end. + ("length", c_uint32), + + # String buffer. Size limit is MAXLEN + ("data", c_char*MAXLEN), + ] + +class MemoryInfo(Structure): + """ + See 'types.h' for details. + """ + + _fields_ = [ + # Storage allocated for texture data + ("textures", c_uint), + + # Storage allocated for material data + ("materials", c_uint), + + # Storage allocated for mesh data + ("meshes", c_uint), + + # Storage allocated for node data + ("nodes", c_uint), + + # Storage allocated for animation data + ("animations", c_uint), + + # Storage allocated for camera data + ("cameras", c_uint), + + # Storage allocated for light data + ("lights", c_uint), + + # Total storage allocated for the full import. + ("total", c_uint), + ] + +class Quaternion(Structure): + """ + See 'quaternion.h' for details. + """ + + + _fields_ = [ + # w,x,y,z components of the quaternion + ("w", c_float),("x", c_float),("y", c_float),("z", c_float), + ] + +class Face(Structure): + """ + See 'mesh.h' for details. + """ + + _fields_ = [ + # Number of indices defining this face. + # The maximum value for this member is + #AI_MAX_FACE_INDICES. + ("mNumIndices", c_uint), + + # Pointer to the indices array. Size of the array is given in numIndices. + ("mIndices", POINTER(c_uint)), + ] + +class VertexWeight(Structure): + """ + See 'mesh.h' for details. + """ + + _fields_ = [ + # Index of the vertex which is influenced by the bone. + ("mVertexId", c_uint), + + # The strength of the influence in the range (0...1). + # The influence from all bones at one vertex amounts to 1. + ("mWeight", c_float), + ] + +class Matrix4x4(Structure): + """ + See 'matrix4x4.h' for details. + """ + + + _fields_ = [ + ("a1", c_float),("a2", c_float),("a3", c_float),("a4", c_float), + ("b1", c_float),("b2", c_float),("b3", c_float),("b4", c_float), + ("c1", c_float),("c2", c_float),("c3", c_float),("c4", c_float), + ("d1", c_float),("d2", c_float),("d3", c_float),("d4", c_float), + ] + +class Vector3D(Structure): + """ + See 'vector3.h' for details. + """ + + + _fields_ = [ + ("x", c_float),("y", c_float),("z", c_float), + ] + +class MeshKey(Structure): + """ + See 'anim.h' for details. + """ + + _fields_ = [ + # The time of this key + ("mTime", c_double), + + # Index into the aiMesh::mAnimMeshes array of the + # mesh corresponding to the + #aiMeshAnim hosting this + # key frame. The referenced anim mesh is evaluated + # according to the rules defined in the docs for + #aiAnimMesh. + ("mValue", c_uint), + ] + +class MetadataEntry(Structure): + """ + See 'metadata.h' for details + """ + AI_BOOL = 0 + AI_INT32 = 1 + AI_UINT64 = 2 + AI_FLOAT = 3 + AI_DOUBLE = 4 + AI_AISTRING = 5 + AI_AIVECTOR3D = 6 + AI_META_MAX = 7 + _fields_ = [ + # The type field uniquely identifies the underlying type of the data field + ("mType", c_uint), + ("mData", c_void_p), + ] + +class Metadata(Structure): + """ + See 'metadata.h' for details + """ + _fields_ = [ + # Length of the mKeys and mValues arrays, respectively + ("mNumProperties", c_uint), + + # Arrays of keys, may not be NULL. Entries in this array may not be NULL + # as well. + ("mKeys", POINTER(String)), + + # Arrays of values, may not be NULL. Entries in this array may be NULL + # if the corresponding property key has no assigned value. + ("mValues", POINTER(MetadataEntry)), + ] + +class Node(Structure): + """ + See 'scene.h' for details. + """ + + +Node._fields_ = [ + # The name of the node. + # The name might be empty (length of zero) but all nodes which + # need to be accessed afterwards by bones or anims are usually named. + # Multiple nodes may have the same name, but nodes which are accessed + # by bones (see + #aiBone and + #aiMesh::mBones) *must* be unique. + # Cameras and lights are assigned to a specific node name - if there + # are multiple nodes with this name, they're assigned to each of them. + # <br> + # There are no limitations regarding the characters contained in + # this text. You should be able to handle stuff like whitespace, tabs, + # linefeeds, quotation marks, ampersands, ... . + ("mName", String), + + # The transformation relative to the node's parent. + ("mTransformation", Matrix4x4), + + # Parent node. NULL if this node is the root node. + ("mParent", POINTER(Node)), + + # The number of child nodes of this node. + ("mNumChildren", c_uint), + + # The child nodes of this node. NULL if mNumChildren is 0. + ("mChildren", POINTER(POINTER(Node))), + + # The number of meshes of this node. + ("mNumMeshes", c_uint), + + # The meshes of this node. Each entry is an index into the mesh + ("mMeshes", POINTER(c_uint)), + + # Metadata associated with this node or NULL if there is no metadata. + # Whether any metadata is generated depends on the source file format. + ("mMetadata", POINTER(Metadata)), + ] + +class Light(Structure): + """ + See 'light.h' for details. + """ + + + _fields_ = [ + # The name of the light source. + # There must be a node in the scenegraph with the same name. + # This node specifies the position of the light in the scene + # hierarchy and can be animated. + ("mName", String), + + # The type of the light source. + # aiLightSource_UNDEFINED is not a valid value for this member. + ("mType", c_uint), + + # Position of the light source in space. Relative to the + # transformation of the node corresponding to the light. + # The position is undefined for directional lights. + ("mPosition", Vector3D), + + # Direction of the light source in space. Relative to the + # transformation of the node corresponding to the light. + # The direction is undefined for point lights. The vector + # may be normalized, but it needn't. + ("mDirection", Vector3D), + + # Up direction of the light source in space. Relative to the + # transformation of the node corresponding to the light. + # + # The direction is undefined for point lights. The vector + # may be normalized, but it needn't. + ("mUp", Vector3D), + + # Constant light attenuation factor. + # The intensity of the light source at a given distance 'd' from + # the light's position is + # @code + # Atten = 1/( att0 + att1 + # d + att2 + # d*d) + # @endcode + # This member corresponds to the att0 variable in the equation. + # Naturally undefined for directional lights. + ("mAttenuationConstant", c_float), + + # Linear light attenuation factor. + # The intensity of the light source at a given distance 'd' from + # the light's position is + # @code + # Atten = 1/( att0 + att1 + # d + att2 + # d*d) + # @endcode + # This member corresponds to the att1 variable in the equation. + # Naturally undefined for directional lights. + ("mAttenuationLinear", c_float), + + # Quadratic light attenuation factor. + # The intensity of the light source at a given distance 'd' from + # the light's position is + # @code + # Atten = 1/( att0 + att1 + # d + att2 + # d*d) + # @endcode + # This member corresponds to the att2 variable in the equation. + # Naturally undefined for directional lights. + ("mAttenuationQuadratic", c_float), + + # Diffuse color of the light source + # The diffuse light color is multiplied with the diffuse + # material color to obtain the final color that contributes + # to the diffuse shading term. + ("mColorDiffuse", Color3D), + + # Specular color of the light source + # The specular light color is multiplied with the specular + # material color to obtain the final color that contributes + # to the specular shading term. + ("mColorSpecular", Color3D), + + # Ambient color of the light source + # The ambient light color is multiplied with the ambient + # material color to obtain the final color that contributes + # to the ambient shading term. Most renderers will ignore + # this value it, is just a remaining of the fixed-function pipeline + # that is still supported by quite many file formats. + ("mColorAmbient", Color3D), + + # Inner angle of a spot light's light cone. + # The spot light has maximum influence on objects inside this + # angle. The angle is given in radians. It is 2PI for point + # lights and undefined for directional lights. + ("mAngleInnerCone", c_float), + + # Outer angle of a spot light's light cone. + # The spot light does not affect objects outside this angle. + # The angle is given in radians. It is 2PI for point lights and + # undefined for directional lights. The outer angle must be + # greater than or equal to the inner angle. + # It is assumed that the application uses a smooth + # interpolation between the inner and the outer cone of the + # spot light. + ("mAngleOuterCone", c_float), + + # Size of area light source. + ("mSize", Vector2D), + ] + +class Texture(Structure): + """ + See 'texture.h' for details. + """ + + + _fields_ = [ + # Width of the texture, in pixels + # If mHeight is zero the texture is compressed in a format + # like JPEG. In this case mWidth specifies the size of the + # memory area pcData is pointing to, in bytes. + ("mWidth", c_uint), + + # Height of the texture, in pixels + # If this value is zero, pcData points to an compressed texture + # in any format (e.g. JPEG). + ("mHeight", c_uint), + + # A hint from the loader to make it easier for applications + # to determine the type of embedded textures. + # + # If mHeight != 0 this member is show how data is packed. Hint will consist of + # two parts: channel order and channel bitness (count of the bits for every + # color channel). For simple parsing by the viewer it's better to not omit + # absent color channel and just use 0 for bitness. For example: + # 1. Image contain RGBA and 8 bit per channel, achFormatHint == "rgba8888"; + # 2. Image contain ARGB and 8 bit per channel, achFormatHint == "argb8888"; + # 3. Image contain RGB and 5 bit for R and B channels and 6 bit for G channel, + # achFormatHint == "rgba5650"; + # 4. One color image with B channel and 1 bit for it, achFormatHint == "rgba0010"; + # If mHeight == 0 then achFormatHint is set set to '\\0\\0\\0\\0' if the loader has no additional + # information about the texture file format used OR the + # file extension of the format without a trailing dot. If there + # are multiple file extensions for a format, the shortest + # extension is chosen (JPEG maps to 'jpg', not to 'jpeg'). + # E.g. 'dds\\0', 'pcx\\0', 'jpg\\0'. All characters are lower-case. + # The fourth character will always be '\\0'. + ("achFormatHint", c_char*9), + + # Data of the texture. + # Points to an array of mWidth + # mHeight aiTexel's. + # The format of the texture data is always ARGB8888 to + # make the implementation for user of the library as easy + # as possible. If mHeight = 0 this is a pointer to a memory + # buffer of size mWidth containing the compressed texture + # data. Good luck, have fun! + ("pcData", POINTER(Texel)), + + # Texture original filename + # Used to get the texture reference + ("mFilename", String), + ] + +class Ray(Structure): + """ + See 'types.h' for details. + """ + + _fields_ = [ + # Position and direction of the ray + ("pos", Vector3D),("dir", Vector3D), + ] + +class UVTransform(Structure): + """ + See 'material.h' for details. + """ + + _fields_ = [ + # Translation on the u and v axes. + # The default value is (0|0). + ("mTranslation", Vector2D), + + # Scaling on the u and v axes. + # The default value is (1|1). + ("mScaling", Vector2D), + + # Rotation - in counter-clockwise direction. + # The rotation angle is specified in radians. The + # rotation center is 0.5f|0.5f. The default value + # 0.f. + ("mRotation", c_float), + ] + +class MaterialProperty(Structure): + """ + See 'material.h' for details. + """ + + _fields_ = [ + # Specifies the name of the property (key) + # Keys are generally case insensitive. + ("mKey", String), + + # Textures: Specifies their exact usage semantic. + # For non-texture properties, this member is always 0 + # (or, better-said, + #aiTextureType_NONE). + ("mSemantic", c_uint), + + # Textures: Specifies the index of the texture. + # For non-texture properties, this member is always 0. + ("mIndex", c_uint), + + # Size of the buffer mData is pointing to, in bytes. + # This value may not be 0. + ("mDataLength", c_uint), + + # Type information for the property. + # Defines the data layout inside the data buffer. This is used + # by the library internally to perform debug checks and to + # utilize proper type conversions. + # (It's probably a hacky solution, but it works.) + ("mType", c_uint), + + # Binary buffer to hold the property's value. + # The size of the buffer is always mDataLength. + ("mData", POINTER(c_char)), + ] + +class Material(Structure): + """ + See 'material.h' for details. + """ + + _fields_ = [ + # List of all material properties loaded. + ("mProperties", POINTER(POINTER(MaterialProperty))), + + # Number of properties in the data base + ("mNumProperties", c_uint), + + # Storage allocated + ("mNumAllocated", c_uint), + ] + +class Bone(Structure): + """ + See 'mesh.h' for details. + """ + + _fields_ = [ + # The name of the bone. + ("mName", String), + + # The number of vertices affected by this bone + # The maximum value for this member is + #AI_MAX_BONE_WEIGHTS. + ("mNumWeights", c_uint), + + # The vertices affected by this bone + ("mWeights", POINTER(VertexWeight)), + + # Matrix that transforms from mesh space to bone space in bind pose + ("mOffsetMatrix", Matrix4x4), + ] + + +class AnimMesh(Structure): + """ + See 'mesh.h' for details. + """ + + AI_MAX_NUMBER_OF_TEXTURECOORDS = 0x8 + AI_MAX_NUMBER_OF_COLOR_SETS = 0x8 + + _fields_ = [ + # Anim Mesh name + ("mName", String), + + # Replacement for aiMesh::mVertices. If this array is non-NULL, + # it *must* contain mNumVertices entries. The corresponding + # array in the host mesh must be non-NULL as well - animation + # meshes may neither add or nor remove vertex components (if + # a replacement array is NULL and the corresponding source + # array is not, the source data is taken instead) + ("mVertices", POINTER(Vector3D)), + + # Replacement for aiMesh::mNormals. + ("mNormals", POINTER(Vector3D)), + + # Replacement for aiMesh::mTangents. + ("mTangents", POINTER(Vector3D)), + + # Replacement for aiMesh::mBitangents. + ("mBitangents", POINTER(Vector3D)), + + # Replacement for aiMesh::mColors + ("mColors", POINTER(Color4D) * AI_MAX_NUMBER_OF_COLOR_SETS), + + # Replacement for aiMesh::mTextureCoords + ("mTextureCoords", POINTER(Vector3D) * AI_MAX_NUMBER_OF_TEXTURECOORDS), + + # The number of vertices in the aiAnimMesh, and thus the length of all + # the member arrays. + # + # This has always the same value as the mNumVertices property in the + # corresponding aiMesh. It is duplicated here merely to make the length + # of the member arrays accessible even if the aiMesh is not known, e.g. + # from language bindings. + ("mNumVertices", c_uint), + + # Weight of the AnimMesh. + ("mWeight", c_float), + ] + + +class Mesh(Structure): + """ + See 'mesh.h' for details. + """ + + AI_MAX_FACE_INDICES = 0x7fff + AI_MAX_BONE_WEIGHTS = 0x7fffffff + AI_MAX_VERTICES = 0x7fffffff + AI_MAX_FACES = 0x7fffffff + AI_MAX_NUMBER_OF_COLOR_SETS = 0x8 + AI_MAX_NUMBER_OF_TEXTURECOORDS = 0x8 + + _fields_ = [ # Bitwise combination of the members of the + #aiPrimitiveType enum. + # This specifies which types of primitives are present in the mesh. + # The "SortByPrimitiveType"-Step can be used to make sure the + # output meshes consist of one primitive type each. + ("mPrimitiveTypes", c_uint), + + # The number of vertices in this mesh. + # This is also the size of all of the per-vertex data arrays. + # The maximum value for this member is + #AI_MAX_VERTICES. + ("mNumVertices", c_uint), + + # The number of primitives (triangles, polygons, lines) in this mesh. + # This is also the size of the mFaces array. + # The maximum value for this member is + #AI_MAX_FACES. + ("mNumFaces", c_uint), + + # Vertex positions. + # This array is always present in a mesh. The array is + # mNumVertices in size. + ("mVertices", POINTER(Vector3D)), + + # Vertex normals. + # The array contains normalized vectors, NULL if not present. + # The array is mNumVertices in size. Normals are undefined for + # point and line primitives. A mesh consisting of points and + # lines only may not have normal vectors. Meshes with mixed + # primitive types (i.e. lines and triangles) may have normals, + # but the normals for vertices that are only referenced by + # point or line primitives are undefined and set to QNaN (WARN: + # qNaN compares to inequal to *everything*, even to qNaN itself. + # Using code like this to check whether a field is qnan is: + # @code + #define IS_QNAN(f) (f != f) + # @endcode + # still dangerous because even 1.f == 1.f could evaluate to false! ( + # remember the subtleties of IEEE754 artithmetics). Use stuff like + # @c fpclassify instead. + # @note Normal vectors computed by Assimp are always unit-length. + # However, this needn't apply for normals that have been taken + # directly from the model file. + ("mNormals", POINTER(Vector3D)), + + # Vertex tangents. + # The tangent of a vertex points in the direction of the positive + # X texture axis. The array contains normalized vectors, NULL if + # not present. The array is mNumVertices in size. A mesh consisting + # of points and lines only may not have normal vectors. Meshes with + # mixed primitive types (i.e. lines and triangles) may have + # normals, but the normals for vertices that are only referenced by + # point or line primitives are undefined and set to qNaN. See + # the + #mNormals member for a detailed discussion of qNaNs. + # @note If the mesh contains tangents, it automatically also + # contains bitangents (the bitangent is just the cross product of + # tangent and normal vectors). + ("mTangents", POINTER(Vector3D)), + + # Vertex bitangents. + # The bitangent of a vertex points in the direction of the positive + # Y texture axis. The array contains normalized vectors, NULL if not + # present. The array is mNumVertices in size. + # @note If the mesh contains tangents, it automatically also contains + # bitangents. + ("mBitangents", POINTER(Vector3D)), + + # Vertex color sets. + # A mesh may contain 0 to + #AI_MAX_NUMBER_OF_COLOR_SETS vertex + # colors per vertex. NULL if not present. Each array is + # mNumVertices in size if present. + ("mColors", POINTER(Color4D)*AI_MAX_NUMBER_OF_COLOR_SETS), + + # Vertex texture coords, also known as UV channels. + # A mesh may contain 0 to AI_MAX_NUMBER_OF_TEXTURECOORDS per + # vertex. NULL if not present. The array is mNumVertices in size. + ("mTextureCoords", POINTER(Vector3D)*AI_MAX_NUMBER_OF_TEXTURECOORDS), + + # Specifies the number of components for a given UV channel. + # Up to three channels are supported (UVW, for accessing volume + # or cube maps). If the value is 2 for a given channel n, the + # component p.z of mTextureCoords[n][p] is set to 0.0f. + # If the value is 1 for a given channel, p.y is set to 0.0f, too. + # @note 4D coords are not supported + ("mNumUVComponents", c_uint*AI_MAX_NUMBER_OF_TEXTURECOORDS), + + # The faces the mesh is constructed from. + # Each face refers to a number of vertices by their indices. + # This array is always present in a mesh, its size is given + # in mNumFaces. If the + #AI_SCENE_FLAGS_NON_VERBOSE_FORMAT + # is NOT set each face references an unique set of vertices. + ("mFaces", POINTER(Face)), + + # The number of bones this mesh contains. + # Can be 0, in which case the mBones array is NULL. + ("mNumBones", c_uint), + + # The bones of this mesh. + # A bone consists of a name by which it can be found in the + # frame hierarchy and a set of vertex weights. + ("mBones", POINTER(POINTER(Bone))), + + # The material used by this mesh. + # A mesh does use only a single material. If an imported model uses + # multiple materials, the import splits up the mesh. Use this value + # as index into the scene's material list. + ("mMaterialIndex", c_uint), + + # Name of the mesh. Meshes can be named, but this is not a + # requirement and leaving this field empty is totally fine. + # There are mainly three uses for mesh names: + # - some formats name nodes and meshes independently. + # - importers tend to split meshes up to meet the + # one-material-per-mesh requirement. Assigning + # the same (dummy) name to each of the result meshes + # aids the caller at recovering the original mesh + # partitioning. + # - Vertex animations refer to meshes by their names. + ("mName", String), + + # The number of attachment meshes. Note! Currently only works with Collada loader. + ("mNumAnimMeshes", c_uint), + + # Attachment meshes for this mesh, for vertex-based animation. + # Attachment meshes carry replacement data for some of the + # mesh'es vertex components (usually positions, normals). + # Note! Currently only works with Collada loader. + ("mAnimMeshes", POINTER(POINTER(AnimMesh))), + + # Method of morphing when animeshes are specified. + ("mMethod", c_uint), + + ] + +class Camera(Structure): + """ + See 'camera.h' for details. + """ + + + _fields_ = [ + # The name of the camera. + # There must be a node in the scenegraph with the same name. + # This node specifies the position of the camera in the scene + # hierarchy and can be animated. + ("mName", String), + + # Position of the camera relative to the coordinate space + # defined by the corresponding node. + # The default value is 0|0|0. + ("mPosition", Vector3D), + + # 'Up' - vector of the camera coordinate system relative to + # the coordinate space defined by the corresponding node. + # The 'right' vector of the camera coordinate system is + # the cross product of the up and lookAt vectors. + # The default value is 0|1|0. The vector + # may be normalized, but it needn't. + ("mUp", Vector3D), + + # 'LookAt' - vector of the camera coordinate system relative to + # the coordinate space defined by the corresponding node. + # This is the viewing direction of the user. + # The default value is 0|0|1. The vector + # may be normalized, but it needn't. + ("mLookAt", Vector3D), + + # Half horizontal field of view angle, in radians. + # The field of view angle is the angle between the center + # line of the screen and the left or right border. + # The default value is 1/4PI. + ("mHorizontalFOV", c_float), + + # Distance of the near clipping plane from the camera. + # The value may not be 0.f (for arithmetic reasons to prevent + # a division through zero). The default value is 0.1f. + ("mClipPlaneNear", c_float), + + # Distance of the far clipping plane from the camera. + # The far clipping plane must, of course, be further away than the + # near clipping plane. The default value is 1000.f. The ratio + # between the near and the far plane should not be too + # large (between 1000-10000 should be ok) to avoid floating-point + # inaccuracies which could lead to z-fighting. + ("mClipPlaneFar", c_float), + + # Screen aspect ratio. + # This is the ration between the width and the height of the + # screen. Typical values are 4/3, 1/2 or 1/1. This value is + # 0 if the aspect ratio is not defined in the source file. + # 0 is also the default value. + ("mAspect", c_float), + ] + +class VectorKey(Structure): + """ + See 'anim.h' for details. + """ + + _fields_ = [ + # The time of this key + ("mTime", c_double), + + # The value of this key + ("mValue", Vector3D), + ] + +class QuatKey(Structure): + """ + See 'anim.h' for details. + """ + + _fields_ = [ + # The time of this key + ("mTime", c_double), + + # The value of this key + ("mValue", Quaternion), + ] + +class MeshMorphKey(Structure): + """ + See 'anim.h' for details. + """ + + _fields_ = [ + # The time of this key + ("mTime", c_double), + + # The values and weights at the time of this key + ("mValues", POINTER(c_uint)), + ("mWeights", POINTER(c_double)), + + # The number of values and weights + ("mNumValuesAndWeights", c_uint), + + ] + +class NodeAnim(Structure): + """ + See 'anim.h' for details. + """ + + _fields_ = [ + # The name of the node affected by this animation. The node + # must exist and it must be unique. + ("mNodeName", String), + + # The number of position keys + ("mNumPositionKeys", c_uint), + + # The position keys of this animation channel. Positions are + # specified as 3D vector. The array is mNumPositionKeys in size. + # If there are position keys, there will also be at least one + # scaling and one rotation key. + ("mPositionKeys", POINTER(VectorKey)), + + # The number of rotation keys + ("mNumRotationKeys", c_uint), + + # The rotation keys of this animation channel. Rotations are + # given as quaternions, which are 4D vectors. The array is + # mNumRotationKeys in size. + # If there are rotation keys, there will also be at least one + # scaling and one position key. + ("mRotationKeys", POINTER(QuatKey)), + + # The number of scaling keys + ("mNumScalingKeys", c_uint), + + # The scaling keys of this animation channel. Scalings are + # specified as 3D vector. The array is mNumScalingKeys in size. + # If there are scaling keys, there will also be at least one + # position and one rotation key. + ("mScalingKeys", POINTER(VectorKey)), + + # Defines how the animation behaves before the first + # key is encountered. + # The default value is aiAnimBehaviour_DEFAULT (the original + # transformation matrix of the affected node is used). + ("mPreState", c_uint), + + # Defines how the animation behaves after the last + # key was processed. + # The default value is aiAnimBehaviour_DEFAULT (the original + # transformation matrix of the affected node is taken). + ("mPostState", c_uint), + ] + +class MeshAnim(Structure): + """ + See 'anim.h' for details. + """ + + _fields_ = [ + # Name of the mesh to be animated. An empty string is not allowed, + # animated meshes need to be named (not necessarily uniquely, + # the name can basically serve as wild-card to select a group + # of meshes with similar animation setup) + ("mName", String), + + # Size of the #mKeys array. Must be 1, at least. + ("mNumKeys", c_uint), + + # Key frames of the animation. May not be NULL. + ("mKeys", POINTER(MeshKey)), + ] + +class MeshMorphAnim(Structure): + """ + See 'anim.h' for details. + """ + + _fields_ = [ + # Name of the mesh to be animated. An empty string is not allowed, + # animated meshes need to be named (not necessarily uniquely, + # the name can basically serve as wildcard to select a group + # of meshes with similar animation setup) + ("mName", String), + + # Size of the #mKeys array. Must be 1, at least. + ("mNumKeys", c_uint), + + # Key frames of the animation. May not be NULL. + ("mKeys", POINTER(MeshMorphKey)), + ] + + +class Animation(Structure): + """ + See 'anim.h' for details. + """ + + _fields_ = [ + # The name of the animation. If the modeling package this data was + # exported from does support only a single animation channel, this + # name is usually empty (length is zero). + ("mName", String), + + # Duration of the animation in ticks. + ("mDuration", c_double), + + # Ticks per second. 0 if not specified in the imported file + ("mTicksPerSecond", c_double), + + # The number of bone animation channels. Each channel affects + # a single node. + ("mNumChannels", c_uint), + + # The node animation channels. Each channel affects a single node. + # The array is mNumChannels in size. + ("mChannels", POINTER(POINTER(NodeAnim))), + + # The number of mesh animation channels. Each channel affects + # a single mesh and defines vertex-based animation. + ("mNumMeshChannels", c_uint), + + # The mesh animation channels. Each channel affects a single mesh. + # The array is mNumMeshChannels in size. + ("mMeshChannels", POINTER(POINTER(MeshAnim))), + + # The number of mesh animation channels. Each channel affects + # a single mesh and defines morphing animation. + ("mNumMorphMeshChannels", c_uint), + + # The morph mesh animation channels. Each channel affects a single mesh. + # The array is mNumMorphMeshChannels in size. + ("mMorphMeshChannels", POINTER(POINTER(MeshMorphAnim))), + + ] + +class ExportDataBlob(Structure): + """ + See 'cexport.h' for details. + + Note that the '_fields_' definition is outside the class to allow the 'next' field to be recursive + """ + pass + +ExportDataBlob._fields_ = [ + # Size of the data in bytes + ("size", c_size_t), + + # The data. + ("data", c_void_p), + + # Name of the blob. An empty string always + # indicates the first (and primary) blob, + # which contains the actual file data. + # Any other blobs are auxiliary files produced + # by exporters (i.e. material files). Existence + # of such files depends on the file format. Most + # formats don't split assets across multiple files. + # + # If used, blob names usually contain the file + # extension that should be used when writing + # the data to disc. + ("name", String), + + # Pointer to the next blob in the chain or NULL if there is none. + ("next", POINTER(ExportDataBlob)), + ] + + +class Scene(Structure): + """ + See 'aiScene.h' for details. + """ + + AI_SCENE_FLAGS_INCOMPLETE = 0x1 + AI_SCENE_FLAGS_VALIDATED = 0x2 + AI_SCENE_FLAGS_VALIDATION_WARNING = 0x4 + AI_SCENE_FLAGS_NON_VERBOSE_FORMAT = 0x8 + AI_SCENE_FLAGS_TERRAIN = 0x10 + AI_SCENE_FLAGS_ALLOW_SHARED = 0x20 + + _fields_ = [ + # Any combination of the AI_SCENE_FLAGS_XXX flags. By default + # this value is 0, no flags are set. Most applications will + # want to reject all scenes with the AI_SCENE_FLAGS_INCOMPLETE + # bit set. + ("mFlags", c_uint), + + # The root node of the hierarchy. + # There will always be at least the root node if the import + # was successful (and no special flags have been set). + # Presence of further nodes depends on the format and content + # of the imported file. + ("mRootNode", POINTER(Node)), + + # The number of meshes in the scene. + ("mNumMeshes", c_uint), + + # The array of meshes. + # Use the indices given in the aiNode structure to access + # this array. The array is mNumMeshes in size. If the + # AI_SCENE_FLAGS_INCOMPLETE flag is not set there will always + # be at least ONE material. + ("mMeshes", POINTER(POINTER(Mesh))), + + # The number of materials in the scene. + ("mNumMaterials", c_uint), + + # The array of materials. + # Use the index given in each aiMesh structure to access this + # array. The array is mNumMaterials in size. If the + # AI_SCENE_FLAGS_INCOMPLETE flag is not set there will always + # be at least ONE material. + ("mMaterials", POINTER(POINTER(Material))), + + # The number of animations in the scene. + ("mNumAnimations", c_uint), + + # The array of animations. + # All animations imported from the given file are listed here. + # The array is mNumAnimations in size. + ("mAnimations", POINTER(POINTER(Animation))), + + # The number of textures embedded into the file + ("mNumTextures", c_uint), + + # The array of embedded textures. + # Not many file formats embed their textures into the file. + # An example is Quake's MDL format (which is also used by + # some GameStudio versions) + ("mTextures", POINTER(POINTER(Texture))), + + # The number of light sources in the scene. Light sources + # are fully optional, in most cases this attribute will be 0 + ("mNumLights", c_uint), + + # The array of light sources. + # All light sources imported from the given file are + # listed here. The array is mNumLights in size. + ("mLights", POINTER(POINTER(Light))), + + # The number of cameras in the scene. Cameras + # are fully optional, in most cases this attribute will be 0 + ("mNumCameras", c_uint), + + # The array of cameras. + # All cameras imported from the given file are listed here. + # The array is mNumCameras in size. The first camera in the + # array (if existing) is the default camera view into + # the scene. + ("mCameras", POINTER(POINTER(Camera))), + + # This data contains global metadata which belongs to the scene like + # unit-conversions, versions, vendors or other model-specific data. This + # can be used to store format-specific metadata as well. + ("mMetadata", POINTER(Metadata)), + + # Internal data, do not touch + ("mPrivate", POINTER(c_char)), + ] + +assimp_structs_as_tuple = (Matrix4x4, + Matrix3x3, + Vector2D, + Vector3D, + Color3D, + Color4D, + Quaternion, + Plane, + Texel) diff --git a/src/mesh/assimp-master/port/PyAssimp/scripts/3d_viewer.py b/src/mesh/assimp-master/port/PyAssimp/scripts/3d_viewer.py new file mode 100755 index 0000000..08a6266 --- /dev/null +++ b/src/mesh/assimp-master/port/PyAssimp/scripts/3d_viewer.py @@ -0,0 +1,1318 @@ +#!/usr/bin/env python +# -*- coding: UTF-8 -*- + +""" This program loads a model with PyASSIMP, and display it. + +Based on: +- pygame code from http://3dengine.org/Spectator_%28PyOpenGL%29 +- http://www.lighthouse3d.com/tutorials +- http://www.songho.ca/opengl/gl_transform.html +- http://code.activestate.com/recipes/325391/ +- ASSIMP's C++ SimpleOpenGL viewer + +Authors: Séverin Lemaignan, 2012-2016 +""" +import sys +import logging + +logger = logging.getLogger("pyassimp") +gllogger = logging.getLogger("OpenGL") +gllogger.setLevel(logging.WARNING) +logging.basicConfig(level=logging.INFO) + +import OpenGL + +OpenGL.ERROR_CHECKING = False +OpenGL.ERROR_LOGGING = False +# OpenGL.ERROR_ON_COPY = True +# OpenGL.FULL_LOGGING = True +from OpenGL.GL import * +from OpenGL.arrays import vbo +from OpenGL.GL import shaders + +import pygame +import pygame.font +import pygame.image + +import math, random +from numpy import linalg + +import pyassimp +from pyassimp.postprocess import * +from pyassimp.helper import * +import transformations + +ROTATION_180_X = numpy.array([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]], dtype=numpy.float32) + +# rendering mode +BASE = "BASE" +COLORS = "COLORS" +SILHOUETTE = "SILHOUETTE" +HELPERS = "HELPERS" + +# Entities type +ENTITY = "entity" +CAMERA = "camera" +MESH = "mesh" + +FLAT_VERTEX_SHADER_120 = """ +#version 120 + +uniform mat4 u_viewProjectionMatrix; +uniform mat4 u_modelMatrix; + +uniform vec4 u_materialDiffuse; + +attribute vec3 a_vertex; + +varying vec4 v_color; + +void main(void) +{ + v_color = u_materialDiffuse; + gl_Position = u_viewProjectionMatrix * u_modelMatrix * vec4(a_vertex, 1.0); +} +""" + +FLAT_VERTEX_SHADER_130 = """ +#version 130 + +uniform mat4 u_viewProjectionMatrix; +uniform mat4 u_modelMatrix; + +uniform vec4 u_materialDiffuse; + +in vec3 a_vertex; + +out vec4 v_color; + +void main(void) +{ + v_color = u_materialDiffuse; + gl_Position = u_viewProjectionMatrix * u_modelMatrix * vec4(a_vertex, 1.0); +} +""" + +BASIC_VERTEX_SHADER_120 = """ +#version 120 + +uniform mat4 u_viewProjectionMatrix; +uniform mat4 u_modelMatrix; +uniform mat3 u_normalMatrix; +uniform vec3 u_lightPos; + +uniform vec4 u_materialDiffuse; + +attribute vec3 a_vertex; +attribute vec3 a_normal; + +varying vec4 v_color; + +void main(void) +{ + // Now the normal is in world space, as we pass the light in world space. + vec3 normal = u_normalMatrix * a_normal; + + float dist = distance(a_vertex, u_lightPos); + + // go to https://www.desmos.com/calculator/nmnaud1hrw to play with the parameters + // att is not used for now + float att=1.0/(1.0+0.8*dist*dist); + + vec3 surf2light = normalize(u_lightPos - a_vertex); + vec3 norm = normalize(normal); + float dcont=max(0.0,dot(norm,surf2light)); + + float ambient = 0.3; + float intensity = dcont + 0.3 + ambient; + + v_color = u_materialDiffuse * intensity; + + gl_Position = u_viewProjectionMatrix * u_modelMatrix * vec4(a_vertex, 1.0); +} +""" + +BASIC_VERTEX_SHADER_130 = """ +#version 130 + +uniform mat4 u_viewProjectionMatrix; +uniform mat4 u_modelMatrix; +uniform mat3 u_normalMatrix; +uniform vec3 u_lightPos; + +uniform vec4 u_materialDiffuse; + +in vec3 a_vertex; +in vec3 a_normal; + +out vec4 v_color; + +void main(void) +{ + // Now the normal is in world space, as we pass the light in world space. + vec3 normal = u_normalMatrix * a_normal; + + float dist = distance(a_vertex, u_lightPos); + + // go to https://www.desmos.com/calculator/nmnaud1hrw to play with the parameters + // att is not used for now + float att=1.0/(1.0+0.8*dist*dist); + + vec3 surf2light = normalize(u_lightPos - a_vertex); + vec3 norm = normalize(normal); + float dcont=max(0.0,dot(norm,surf2light)); + + float ambient = 0.3; + float intensity = dcont + 0.3 + ambient; + + v_color = u_materialDiffuse * intensity; + + gl_Position = u_viewProjectionMatrix * u_modelMatrix * vec4(a_vertex, 1.0); +} +""" + +BASIC_FRAGMENT_SHADER_120 = """ +#version 120 + +varying vec4 v_color; + +void main() { + gl_FragColor = v_color; +} +""" + +BASIC_FRAGMENT_SHADER_130 = """ +#version 130 + +in vec4 v_color; + +void main() { + gl_FragColor = v_color; +} +""" + +GOOCH_VERTEX_SHADER_120 = """ +#version 120 + +// attributes +attribute vec3 a_vertex; // xyz - position +attribute vec3 a_normal; // xyz - normal + +// uniforms +uniform mat4 u_modelMatrix; +uniform mat4 u_viewProjectionMatrix; +uniform mat3 u_normalMatrix; +uniform vec3 u_lightPos; +uniform vec3 u_camPos; + +// output data from vertex to fragment shader +varying vec3 o_normal; +varying vec3 o_lightVector; + +/////////////////////////////////////////////////////////////////// + +void main(void) +{ + // transform position and normal to world space + vec4 positionWorld = u_modelMatrix * vec4(a_vertex, 1.0); + vec3 normalWorld = u_normalMatrix * a_normal; + + // calculate and pass vectors required for lighting + o_lightVector = u_lightPos - positionWorld.xyz; + o_normal = normalWorld; + + // project world space position to the screen and output it + gl_Position = u_viewProjectionMatrix * positionWorld; +} +""" + +GOOCH_VERTEX_SHADER_130 = """ +#version 130 + +// attributes +in vec3 a_vertex; // xyz - position +in vec3 a_normal; // xyz - normal + +// uniforms +uniform mat4 u_modelMatrix; +uniform mat4 u_viewProjectionMatrix; +uniform mat3 u_normalMatrix; +uniform vec3 u_lightPos; +uniform vec3 u_camPos; + +// output data from vertex to fragment shader +out vec3 o_normal; +out vec3 o_lightVector; + +/////////////////////////////////////////////////////////////////// + +void main(void) +{ + // transform position and normal to world space + vec4 positionWorld = u_modelMatrix * vec4(a_vertex, 1.0); + vec3 normalWorld = u_normalMatrix * a_normal; + + // calculate and pass vectors required for lighting + o_lightVector = u_lightPos - positionWorld.xyz; + o_normal = normalWorld; + + // project world space position to the screen and output it + gl_Position = u_viewProjectionMatrix * positionWorld; +} +""" + +GOOCH_FRAGMENT_SHADER_120 = """ +#version 120 + +// data from vertex shader +varying vec3 o_normal; +varying vec3 o_lightVector; + +// diffuse color of the object +uniform vec4 u_materialDiffuse; +// cool color of gooch shading +uniform vec3 u_coolColor; +// warm color of gooch shading +uniform vec3 u_warmColor; +// how much to take from object color in final cool color +uniform float u_alpha; +// how much to take from object color in final warm color +uniform float u_beta; + +/////////////////////////////////////////////////////////// + +void main(void) +{ + // normlize vectors for lighting + vec3 normalVector = normalize(o_normal); + vec3 lightVector = normalize(o_lightVector); + // intensity of diffuse lighting [-1, 1] + float diffuseLighting = dot(lightVector, normalVector); + // map intensity of lighting from range [-1; 1] to [0, 1] + float interpolationValue = (1.0 + diffuseLighting)/2; + + ////////////////////////////////////////////////////////////////// + + // cool color mixed with color of the object + vec3 coolColorMod = u_coolColor + vec3(u_materialDiffuse) * u_alpha; + // warm color mixed with color of the object + vec3 warmColorMod = u_warmColor + vec3(u_materialDiffuse) * u_beta; + // interpolation of cool and warm colors according + // to lighting intensity. The lower the light intensity, + // the larger part of the cool color is used + vec3 colorOut = mix(coolColorMod, warmColorMod, interpolationValue); + + ////////////////////////////////////////////////////////////////// + + // save color + gl_FragColor.rgb = colorOut; + gl_FragColor.a = 1; +} +""" + +GOOCH_FRAGMENT_SHADER_130 = """ +#version 130 + +// data from vertex shader +in vec3 o_normal; +in vec3 o_lightVector; + +// diffuse color of the object +uniform vec4 u_materialDiffuse; +// cool color of gooch shading +uniform vec3 u_coolColor; +// warm color of gooch shading +uniform vec3 u_warmColor; +// how much to take from object color in final cool color +uniform float u_alpha; +// how much to take from object color in final warm color +uniform float u_beta; + +// output to framebuffer +out vec4 resultingColor; + +/////////////////////////////////////////////////////////// + +void main(void) +{ + // normlize vectors for lighting + vec3 normalVector = normalize(o_normal); + vec3 lightVector = normalize(o_lightVector); + // intensity of diffuse lighting [-1, 1] + float diffuseLighting = dot(lightVector, normalVector); + // map intensity of lighting from range [-1; 1] to [0, 1] + float interpolationValue = (1.0 + diffuseLighting)/2; + + ////////////////////////////////////////////////////////////////// + + // cool color mixed with color of the object + vec3 coolColorMod = u_coolColor + vec3(u_materialDiffuse) * u_alpha; + // warm color mixed with color of the object + vec3 warmColorMod = u_warmColor + vec3(u_materialDiffuse) * u_beta; + // interpolation of cool and warm colors according + // to lighting intensity. The lower the light intensity, + // the larger part of the cool color is used + vec3 colorOut = mix(coolColorMod, warmColorMod, interpolationValue); + + ////////////////////////////////////////////////////////////////// + + // save color + resultingColor.rgb = colorOut; + resultingColor.a = 1; +} +""" + +SILHOUETTE_VERTEX_SHADER_120 = """ +#version 120 + +attribute vec3 a_vertex; // xyz - position +attribute vec3 a_normal; // xyz - normal + +uniform mat4 u_modelMatrix; +uniform mat4 u_viewProjectionMatrix; +uniform mat4 u_modelViewMatrix; +uniform vec4 u_materialDiffuse; +uniform float u_bordersize; // width of the border + +varying vec4 v_color; + +void main(void){ + v_color = u_materialDiffuse; + float distToCamera = -(u_modelViewMatrix * vec4(a_vertex, 1.0)).z; + vec4 tPos = vec4(a_vertex + a_normal * u_bordersize * distToCamera, 1.0); + gl_Position = u_viewProjectionMatrix * u_modelMatrix * tPos; +} +""" + +SILHOUETTE_VERTEX_SHADER_130 = """ +#version 130 + +in vec3 a_vertex; // xyz - position +in vec3 a_normal; // xyz - normal + +uniform mat4 u_modelMatrix; +uniform mat4 u_viewProjectionMatrix; +uniform mat4 u_modelViewMatrix; +uniform vec4 u_materialDiffuse; +uniform float u_bordersize; // width of the border + +out vec4 v_color; + +void main(void){ + v_color = u_materialDiffuse; + float distToCamera = -(u_modelViewMatrix * vec4(a_vertex, 1.0)).z; + vec4 tPos = vec4(a_vertex + a_normal * u_bordersize * distToCamera, 1.0); + gl_Position = u_viewProjectionMatrix * u_modelMatrix * tPos; +} +""" +DEFAULT_CLIP_PLANE_NEAR = 0.001 +DEFAULT_CLIP_PLANE_FAR = 1000.0 + + +def get_world_transform(scene, node): + if node == scene.rootnode: + return numpy.identity(4, dtype=numpy.float32) + + parents = reversed(_get_parent_chain(scene, node, [])) + parent_transform = reduce(numpy.dot, [p.transformation for p in parents]) + return numpy.dot(parent_transform, node.transformation) + + +def _get_parent_chain(scene, node, parents): + parent = node.parent + + parents.append(parent) + + if parent == scene.rootnode: + return parents + + return _get_parent_chain(scene, parent, parents) + + +class DefaultCamera: + def __init__(self, w, h, fov): + self.name = "default camera" + self.type = CAMERA + self.clipplanenear = DEFAULT_CLIP_PLANE_NEAR + self.clipplanefar = DEFAULT_CLIP_PLANE_FAR + self.aspect = w / h + self.horizontalfov = fov * math.pi / 180 + self.transformation = numpy.array([[0.68, -0.32, 0.65, 7.48], + [0.73, 0.31, -0.61, -6.51], + [-0.01, 0.89, 0.44, 5.34], + [0., 0., 0., 1.]], dtype=numpy.float32) + + self.transformation = numpy.dot(self.transformation, ROTATION_180_X) + + def __str__(self): + return self.name + + +class PyAssimp3DViewer: + base_name = "PyASSIMP 3D viewer" + + def __init__(self, model, w=1024, h=768): + + self.w = w + self.h = h + + pygame.init() + pygame.display.set_caption(self.base_name) + pygame.display.set_mode((w, h), pygame.OPENGL | pygame.DOUBLEBUF) + + glClearColor(0.18, 0.18, 0.18, 1.0) + + shader_compilation_succeeded = False + try: + self.set_shaders_v130() + self.prepare_shaders() + except RuntimeError, message: + sys.stderr.write("%s\n" % message) + sys.stdout.write("Could not compile shaders in version 1.30, trying version 1.20\n") + + if not shader_compilation_succeeded: + self.set_shaders_v120() + self.prepare_shaders() + + self.scene = None + self.meshes = {} # stores the OpenGL vertex/faces/normals buffers pointers + + self.node2colorid = {} # stores a color ID for each node. Useful for mouse picking and visibility checking + self.colorid2node = {} # reverse dict of node2colorid + + self.currently_selected = None + self.moving = False + self.moving_situation = None + + self.default_camera = DefaultCamera(self.w, self.h, fov=70) + self.cameras = [self.default_camera] + + self.current_cam_index = 0 + self.current_cam = self.default_camera + self.set_camera_projection() + + self.load_model(model) + + # user interactions + self.focal_point = [0, 0, 0] + self.is_rotating = False + self.is_panning = False + self.is_zooming = False + + def set_shaders_v120(self): + self.BASIC_VERTEX_SHADER = BASIC_VERTEX_SHADER_120 + self.FLAT_VERTEX_SHADER = FLAT_VERTEX_SHADER_120 + self.SILHOUETTE_VERTEX_SHADER = SILHOUETTE_VERTEX_SHADER_120 + self.GOOCH_VERTEX_SHADER = GOOCH_VERTEX_SHADER_120 + + self.BASIC_FRAGMENT_SHADER = BASIC_FRAGMENT_SHADER_120 + self.GOOCH_FRAGMENT_SHADER = GOOCH_FRAGMENT_SHADER_120 + + def set_shaders_v130(self): + self.BASIC_VERTEX_SHADER = BASIC_VERTEX_SHADER_130 + self.FLAT_VERTEX_SHADER = FLAT_VERTEX_SHADER_130 + self.SILHOUETTE_VERTEX_SHADER = SILHOUETTE_VERTEX_SHADER_130 + self.GOOCH_VERTEX_SHADER = GOOCH_VERTEX_SHADER_130 + + self.BASIC_FRAGMENT_SHADER = BASIC_FRAGMENT_SHADER_130 + self.GOOCH_FRAGMENT_SHADER = GOOCH_FRAGMENT_SHADER_130 + + def prepare_shaders(self): + + ### Base shader + vertex = shaders.compileShader(self.BASIC_VERTEX_SHADER, GL_VERTEX_SHADER) + fragment = shaders.compileShader(self.BASIC_FRAGMENT_SHADER, GL_FRAGMENT_SHADER) + + self.shader = shaders.compileProgram(vertex, fragment) + + self.set_shader_accessors(('u_modelMatrix', + 'u_viewProjectionMatrix', + 'u_normalMatrix', + 'u_lightPos', + 'u_materialDiffuse'), + ('a_vertex', + 'a_normal'), self.shader) + + ### Flat shader + flatvertex = shaders.compileShader(self.FLAT_VERTEX_SHADER, GL_VERTEX_SHADER) + self.flatshader = shaders.compileProgram(flatvertex, fragment) + + self.set_shader_accessors(('u_modelMatrix', + 'u_viewProjectionMatrix', + 'u_materialDiffuse',), + ('a_vertex',), self.flatshader) + + ### Silhouette shader + silh_vertex = shaders.compileShader(self.SILHOUETTE_VERTEX_SHADER, GL_VERTEX_SHADER) + self.silhouette_shader = shaders.compileProgram(silh_vertex, fragment) + + self.set_shader_accessors(('u_modelMatrix', + 'u_viewProjectionMatrix', + 'u_modelViewMatrix', + 'u_materialDiffuse', + 'u_bordersize' # width of the silhouette + ), + ('a_vertex', + 'a_normal'), self.silhouette_shader) + + ### Gooch shader + gooch_vertex = shaders.compileShader(self.GOOCH_VERTEX_SHADER, GL_VERTEX_SHADER) + gooch_fragment = shaders.compileShader(self.GOOCH_FRAGMENT_SHADER, GL_FRAGMENT_SHADER) + self.gooch_shader = shaders.compileProgram(gooch_vertex, gooch_fragment) + + self.set_shader_accessors(('u_modelMatrix', + 'u_viewProjectionMatrix', + 'u_normalMatrix', + 'u_lightPos', + 'u_materialDiffuse', + 'u_coolColor', + 'u_warmColor', + 'u_alpha', + 'u_beta' + ), + ('a_vertex', + 'a_normal'), self.gooch_shader) + + @staticmethod + def set_shader_accessors(uniforms, attributes, shader): + # add accessors to the shaders uniforms and attributes + for uniform in uniforms: + location = glGetUniformLocation(shader, uniform) + if location in (None, -1): + raise RuntimeError('No uniform: %s (maybe it is not used ' + 'anymore and has been optimized out by' + ' the shader compiler)' % uniform) + setattr(shader, uniform, location) + + for attribute in attributes: + location = glGetAttribLocation(shader, attribute) + if location in (None, -1): + raise RuntimeError('No attribute: %s' % attribute) + setattr(shader, attribute, location) + + @staticmethod + def prepare_gl_buffers(mesh): + + mesh.gl = {} + + # Fill the buffer for vertex and normals positions + v = numpy.array(mesh.vertices, 'f') + n = numpy.array(mesh.normals, 'f') + + mesh.gl["vbo"] = vbo.VBO(numpy.hstack((v, n))) + + # Fill the buffer for vertex positions + mesh.gl["faces"] = glGenBuffers(1) + glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, mesh.gl["faces"]) + glBufferData(GL_ELEMENT_ARRAY_BUFFER, + numpy.array(mesh.faces, dtype=numpy.int32), + GL_STATIC_DRAW) + + mesh.gl["nbfaces"] = len(mesh.faces) + + # Unbind buffers + glBindBuffer(GL_ARRAY_BUFFER, 0) + glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0) + + @staticmethod + def get_rgb_from_colorid(colorid): + r = (colorid >> 0) & 0xff + g = (colorid >> 8) & 0xff + b = (colorid >> 16) & 0xff + + return r, g, b + + def get_color_id(self): + id = random.randint(0, 256 * 256 * 256) + if id not in self.colorid2node: + return id + else: + return self.get_color_id() + + def glize(self, scene, node): + + logger.info("Loading node <%s>" % node) + node.selected = True if self.currently_selected and self.currently_selected == node else False + + node.transformation = node.transformation.astype(numpy.float32) + + if node.meshes: + node.type = MESH + colorid = self.get_color_id() + self.colorid2node[colorid] = node + self.node2colorid[node.name] = colorid + + elif node.name in [c.name for c in scene.cameras]: + + # retrieve the ASSIMP camera object + [cam] = [c for c in scene.cameras if c.name == node.name] + node.type = CAMERA + logger.info("Added camera <%s>" % node.name) + logger.info("Camera position: %.3f, %.3f, %.3f" % tuple(node.transformation[:, 3][:3].tolist())) + self.cameras.append(node) + node.clipplanenear = cam.clipplanenear + node.clipplanefar = cam.clipplanefar + + if numpy.allclose(cam.lookat, [0, 0, -1]) and numpy.allclose(cam.up, [0, 1, 0]): # Cameras in .blend files + + # Rotate by 180deg around X to have Z pointing forward + node.transformation = numpy.dot(node.transformation, ROTATION_180_X) + else: + raise RuntimeError( + "I do not know how to normalize this camera orientation: lookat=%s, up=%s" % (cam.lookat, cam.up)) + + if cam.aspect == 0.0: + logger.warning("Camera aspect not set. Setting to default 4:3") + node.aspect = 1.333 + else: + node.aspect = cam.aspect + + node.horizontalfov = cam.horizontalfov + + else: + node.type = ENTITY + + for child in node.children: + self.glize(scene, child) + + def load_model(self, path, postprocess=aiProcessPreset_TargetRealtime_MaxQuality): + logger.info("Loading model:" + path + "...") + + if postprocess: + self.scene = pyassimp.load(path, processing=postprocess) + else: + self.scene = pyassimp.load(path) + logger.info("Done.") + + scene = self.scene + # log some statistics + logger.info(" meshes: %d" % len(scene.meshes)) + logger.info(" total faces: %d" % sum([len(mesh.faces) for mesh in scene.meshes])) + logger.info(" materials: %d" % len(scene.materials)) + self.bb_min, self.bb_max = get_bounding_box(self.scene) + logger.info(" bounding box:" + str(self.bb_min) + " - " + str(self.bb_max)) + + self.scene_center = [(a + b) / 2. for a, b in zip(self.bb_min, self.bb_max)] + + for index, mesh in enumerate(scene.meshes): + self.prepare_gl_buffers(mesh) + + self.glize(scene, scene.rootnode) + + # Finally release the model + pyassimp.release(scene) + logger.info("Ready for 3D rendering!") + + def cycle_cameras(self): + + self.current_cam_index = (self.current_cam_index + 1) % len(self.cameras) + self.current_cam = self.cameras[self.current_cam_index] + self.set_camera_projection(self.current_cam) + logger.info("Switched to camera <%s>" % self.current_cam) + + def set_overlay_projection(self): + glViewport(0, 0, self.w, self.h) + glMatrixMode(GL_PROJECTION) + glLoadIdentity() + glOrtho(0.0, self.w - 1.0, 0.0, self.h - 1.0, -1.0, 1.0) + glMatrixMode(GL_MODELVIEW) + glLoadIdentity() + + def set_camera_projection(self, camera=None): + + if not camera: + camera = self.current_cam + + znear = camera.clipplanenear or DEFAULT_CLIP_PLANE_NEAR + zfar = camera.clipplanefar or DEFAULT_CLIP_PLANE_FAR + aspect = camera.aspect + fov = camera.horizontalfov + + glMatrixMode(GL_PROJECTION) + glLoadIdentity() + + # Compute gl frustrum + tangent = math.tan(fov / 2.) + h = znear * tangent + w = h * aspect + + # params: left, right, bottom, top, near, far + glFrustum(-w, w, -h, h, znear, zfar) + # equivalent to: + # gluPerspective(fov * 180/math.pi, aspect, znear, zfar) + + self.projection_matrix = glGetFloatv(GL_PROJECTION_MATRIX).transpose() + + glMatrixMode(GL_MODELVIEW) + glLoadIdentity() + + def render_colors(self): + + glEnable(GL_DEPTH_TEST) + glDepthFunc(GL_LEQUAL) + + glPolygonMode(GL_FRONT_AND_BACK, GL_FILL) + glEnable(GL_CULL_FACE) + + glUseProgram(self.flatshader) + + glUniformMatrix4fv(self.flatshader.u_viewProjectionMatrix, 1, GL_TRUE, + numpy.dot(self.projection_matrix, self.view_matrix)) + + self.recursive_render(self.scene.rootnode, self.flatshader, mode=COLORS) + + glUseProgram(0) + + def get_hovered_node(self, mousex, mousey): + """ + Attention: The performances of this method relies heavily on the size of the display! + """ + + # mouse out of the window? + if mousex < 0 or mousex >= self.w or mousey < 0 or mousey >= self.h: + return None + + self.render_colors() + # Capture image from the OpenGL buffer + buf = (GLubyte * (3 * self.w * self.h))(0) + glReadPixels(0, 0, self.w, self.h, GL_RGB, GL_UNSIGNED_BYTE, buf) + + # Reinterpret the RGB pixel buffer as a 1-D array of 24bits colors + a = numpy.ndarray(len(buf), numpy.dtype('>u1'), buf) + colors = numpy.zeros(len(buf) / 3, numpy.dtype('<u4')) + for i in range(3): + colors.view(dtype='>u1')[i::4] = a.view(dtype='>u1')[i::3] + + colorid = colors[mousex + mousey * self.w] + + glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT) + + if colorid in self.colorid2node: + return self.colorid2node[colorid] + + def render(self, wireframe=False, twosided=False): + + glEnable(GL_DEPTH_TEST) + glDepthFunc(GL_LEQUAL) + + glPolygonMode(GL_FRONT_AND_BACK, GL_LINE if wireframe else GL_FILL) + glDisable(GL_CULL_FACE) if twosided else glEnable(GL_CULL_FACE) + + self.render_grid() + + self.recursive_render(self.scene.rootnode, None, mode=HELPERS) + + ### First, the silhouette + + if False: + shader = self.silhouette_shader + + # glDepthMask(GL_FALSE) + glCullFace(GL_FRONT) # cull front faces + + glUseProgram(shader) + glUniform1f(shader.u_bordersize, 0.01) + + glUniformMatrix4fv(shader.u_viewProjectionMatrix, 1, GL_TRUE, + numpy.dot(self.projection_matrix, self.view_matrix)) + + self.recursive_render(self.scene.rootnode, shader, mode=SILHOUETTE) + + glUseProgram(0) + + ### Then, inner shading + # glDepthMask(GL_TRUE) + glCullFace(GL_BACK) + + use_gooch = False + if use_gooch: + shader = self.gooch_shader + + glUseProgram(shader) + glUniform3f(shader.u_lightPos, -.5, -.5, .5) + + ##### GOOCH specific + glUniform3f(shader.u_coolColor, 159.0 / 255, 148.0 / 255, 255.0 / 255) + glUniform3f(shader.u_warmColor, 255.0 / 255, 75.0 / 255, 75.0 / 255) + glUniform1f(shader.u_alpha, .25) + glUniform1f(shader.u_beta, .25) + ######### + else: + shader = self.shader + glUseProgram(shader) + glUniform3f(shader.u_lightPos, -.5, -.5, .5) + + glUniformMatrix4fv(shader.u_viewProjectionMatrix, 1, GL_TRUE, + numpy.dot(self.projection_matrix, self.view_matrix)) + + self.recursive_render(self.scene.rootnode, shader) + + glUseProgram(0) + + def render_axis(self, + transformation=numpy.identity(4, dtype=numpy.float32), + label=None, + size=0.2, + selected=False): + m = transformation.transpose() # OpenGL row major + + glPushMatrix() + glMultMatrixf(m) + + glLineWidth(3 if selected else 1) + + size = 2 * size if selected else size + + glBegin(GL_LINES) + + # draw line for x axis + glColor3f(1.0, 0.0, 0.0) + glVertex3f(0.0, 0.0, 0.0) + glVertex3f(size, 0.0, 0.0) + + # draw line for y axis + glColor3f(0.0, 1.0, 0.0) + glVertex3f(0.0, 0.0, 0.0) + glVertex3f(0.0, size, 0.0) + + # draw line for Z axis + glColor3f(0.0, 0.0, 1.0) + glVertex3f(0.0, 0.0, 0.0) + glVertex3f(0.0, 0.0, size) + + glEnd() + + if label: + self.showtext(label) + + glPopMatrix() + + @staticmethod + def render_camera(camera, transformation): + + m = transformation.transpose() # OpenGL row major + + aspect = camera.aspect + + u = 0.1 # unit size (in m) + l = 3 * u # length of the camera cone + f = 3 * u # aperture of the camera cone + + glPushMatrix() + glMultMatrixf(m) + + glLineWidth(2) + glBegin(GL_LINE_STRIP) + + glColor3f(.2, .2, .2) + + glVertex3f(u, u, -u) + glVertex3f(u, -u, -u) + glVertex3f(-u, -u, -u) + glVertex3f(-u, u, -u) + glVertex3f(u, u, -u) + + glVertex3f(u, u, 0.0) + glVertex3f(u, -u, 0.0) + glVertex3f(-u, -u, 0.0) + glVertex3f(-u, u, 0.0) + glVertex3f(u, u, 0.0) + + glVertex3f(f * aspect, f, l) + glVertex3f(f * aspect, -f, l) + glVertex3f(-f * aspect, -f, l) + glVertex3f(-f * aspect, f, l) + glVertex3f(f * aspect, f, l) + + glEnd() + + glBegin(GL_LINE_STRIP) + glVertex3f(u, -u, -u) + glVertex3f(u, -u, 0.0) + glVertex3f(f * aspect, -f, l) + glEnd() + + glBegin(GL_LINE_STRIP) + glVertex3f(-u, -u, -u) + glVertex3f(-u, -u, 0.0) + glVertex3f(-f * aspect, -f, l) + glEnd() + + glBegin(GL_LINE_STRIP) + glVertex3f(-u, u, -u) + glVertex3f(-u, u, 0.0) + glVertex3f(-f * aspect, f, l) + glEnd() + + glPopMatrix() + + @staticmethod + def render_grid(): + + glLineWidth(1) + glColor3f(0.5, 0.5, 0.5) + glBegin(GL_LINES) + for i in range(-10, 11): + glVertex3f(i, -10.0, 0.0) + glVertex3f(i, 10.0, 0.0) + + for i in range(-10, 11): + glVertex3f(-10.0, i, 0.0) + glVertex3f(10.0, i, 0.0) + glEnd() + + def recursive_render(self, node, shader, mode=BASE, with_normals=True): + """ Main recursive rendering method. + """ + + normals = with_normals + + if mode == COLORS: + normals = False + + + if not hasattr(node, "selected"): + node.selected = False + + m = get_world_transform(self.scene, node) + + # HELPERS mode + ### + if mode == HELPERS: + # if node.type == ENTITY: + self.render_axis(m, + label=node.name if node != self.scene.rootnode else None, + selected=node.selected if hasattr(node, "selected") else False) + + if node.type == CAMERA: + self.render_camera(node, m) + + for child in node.children: + self.recursive_render(child, shader, mode) + + return + + # Mesh rendering modes + ### + if node.type == MESH: + + for mesh in node.meshes: + + stride = 24 # 6 * 4 bytes + + if node.selected and mode == SILHOUETTE: + glUniform4f(shader.u_materialDiffuse, 1.0, 0.0, 0.0, 1.0) + glUniformMatrix4fv(shader.u_modelViewMatrix, 1, GL_TRUE, + numpy.dot(self.view_matrix, m)) + + else: + if mode == COLORS: + colorid = self.node2colorid[node.name] + r, g, b = self.get_rgb_from_colorid(colorid) + glUniform4f(shader.u_materialDiffuse, r / 255.0, g / 255.0, b / 255.0, 1.0) + elif mode == SILHOUETTE: + glUniform4f(shader.u_materialDiffuse, .0, .0, .0, 1.0) + else: + if node.selected: + diffuse = (1.0, 0.0, 0.0, 1.0) # selected nodes in red + else: + diffuse = mesh.material.properties["diffuse"] + if len(diffuse) == 3: # RGB instead of expected RGBA + diffuse.append(1.0) + glUniform4f(shader.u_materialDiffuse, *diffuse) + # if ambient: + # glUniform4f( shader.Material_ambient, *mat["ambient"] ) + + if mode == BASE: # not in COLORS or SILHOUETTE + normal_matrix = linalg.inv(numpy.dot(self.view_matrix, m)[0:3, 0:3]).transpose() + glUniformMatrix3fv(shader.u_normalMatrix, 1, GL_TRUE, normal_matrix) + + glUniformMatrix4fv(shader.u_modelMatrix, 1, GL_TRUE, m) + + vbo = mesh.gl["vbo"] + vbo.bind() + + glEnableVertexAttribArray(shader.a_vertex) + if normals: + glEnableVertexAttribArray(shader.a_normal) + + glVertexAttribPointer( + shader.a_vertex, + 3, GL_FLOAT, False, stride, vbo + ) + + if normals: + glVertexAttribPointer( + shader.a_normal, + 3, GL_FLOAT, False, stride, vbo + 12 + ) + + glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, mesh.gl["faces"]) + glDrawElements(GL_TRIANGLES, mesh.gl["nbfaces"] * 3, GL_UNSIGNED_INT, None) + + vbo.unbind() + glDisableVertexAttribArray(shader.a_vertex) + + if normals: + glDisableVertexAttribArray(shader.a_normal) + + glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0) + + for child in node.children: + self.recursive_render(child, shader, mode) + + + def switch_to_overlay(self): + glPushMatrix() + self.set_overlay_projection() + + def switch_from_overlay(self): + self.set_camera_projection() + glPopMatrix() + + def select_node(self, node): + self.currently_selected = node + self.update_node_select(self.scene.rootnode) + + def update_node_select(self, node): + if node is self.currently_selected: + node.selected = True + else: + node.selected = False + + for child in node.children: + self.update_node_select(child) + + def loop(self): + + pygame.display.flip() + + if not self.process_events(): + return False # ESC has been pressed + + glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT) + + return True + + def process_events(self): + + LEFT_BUTTON = 1 + MIDDLE_BUTTON = 2 + RIGHT_BUTTON = 3 + WHEEL_UP = 4 + WHEEL_DOWN = 5 + + dx, dy = pygame.mouse.get_rel() + mousex, mousey = pygame.mouse.get_pos() + + zooming_one_shot = False + + ok = True + + for evt in pygame.event.get(): + if evt.type == pygame.MOUSEBUTTONDOWN and evt.button == LEFT_BUTTON: + hovered = self.get_hovered_node(mousex, self.h - mousey) + if hovered: + if self.currently_selected and self.currently_selected == hovered: + self.select_node(None) + else: + logger.info("Node %s selected" % hovered) + self.select_node(hovered) + else: + self.is_rotating = True + if evt.type == pygame.MOUSEBUTTONUP and evt.button == LEFT_BUTTON: + self.is_rotating = False + + if evt.type == pygame.MOUSEBUTTONDOWN and evt.button == MIDDLE_BUTTON: + self.is_panning = True + if evt.type == pygame.MOUSEBUTTONUP and evt.button == MIDDLE_BUTTON: + self.is_panning = False + + if evt.type == pygame.MOUSEBUTTONDOWN and evt.button == RIGHT_BUTTON: + self.is_zooming = True + if evt.type == pygame.MOUSEBUTTONUP and evt.button == RIGHT_BUTTON: + self.is_zooming = False + + if evt.type == pygame.MOUSEBUTTONDOWN and evt.button in [WHEEL_UP, WHEEL_DOWN]: + zooming_one_shot = True + self.is_zooming = True + dy = -10 if evt.button == WHEEL_UP else 10 + + if evt.type == pygame.KEYDOWN: + ok = (ok and self.process_keystroke(evt.key, evt.mod)) + + self.controls_3d(dx, dy, zooming_one_shot) + + return ok + + def process_keystroke(self, key, mod): + + # process arrow keys if an object is selected + if self.currently_selected: + up = 0 + strafe = 0 + + if key == pygame.K_UP: + up = 1 + if key == pygame.K_DOWN: + up = -1 + if key == pygame.K_LEFT: + strafe = -1 + if key == pygame.K_RIGHT: + strafe = 1 + + self.move_selected_node(up, strafe) + + if key == pygame.K_f: + pygame.display.toggle_fullscreen() + + if key == pygame.K_TAB: + self.cycle_cameras() + + if key in [pygame.K_ESCAPE, pygame.K_q]: + return False + + return True + + def controls_3d(self, dx, dy, zooming_one_shot=False): + + CAMERA_TRANSLATION_FACTOR = 0.01 + CAMERA_ROTATION_FACTOR = 0.01 + + if not (self.is_rotating or self.is_panning or self.is_zooming): + return + + current_pos = self.current_cam.transformation[:3, 3].copy() + distance = numpy.linalg.norm(self.focal_point - current_pos) + + if self.is_rotating: + """ Orbiting the camera is implemented the following way: + + - the rotation is split into a rotation around the *world* Z axis + (controlled by the horizontal mouse motion along X) and a + rotation around the *X* axis of the camera (pitch) *shifted to + the focal origin* (the world origin for now). This is controlled + by the vertical motion of the mouse (Y axis). + + - as a result, the resulting transformation of the camera in the + world frame C' is: + C' = (T · Rx · T⁻¹ · (Rz · C)⁻¹)⁻¹ + + where: + - C is the original camera transformation in the world frame, + - Rz is the rotation along the Z axis (in the world frame) + - T is the translation camera -> world (ie, the inverse of the + translation part of C + - Rx is the rotation around X in the (translated) camera frame + """ + + rotation_camera_x = dy * CAMERA_ROTATION_FACTOR + rotation_world_z = dx * CAMERA_ROTATION_FACTOR + world_z_rotation = transformations.euler_matrix(0, 0, rotation_world_z) + cam_x_rotation = transformations.euler_matrix(rotation_camera_x, 0, 0) + + after_world_z_rotation = numpy.dot(world_z_rotation, self.current_cam.transformation) + + inverse_transformation = transformations.inverse_matrix(after_world_z_rotation) + + translation = transformations.translation_matrix( + transformations.decompose_matrix(inverse_transformation)[3]) + inverse_translation = transformations.inverse_matrix(translation) + + new_inverse = numpy.dot(inverse_translation, inverse_transformation) + new_inverse = numpy.dot(cam_x_rotation, new_inverse) + new_inverse = numpy.dot(translation, new_inverse) + + self.current_cam.transformation = transformations.inverse_matrix(new_inverse).astype(numpy.float32) + + if self.is_panning: + tx = -dx * CAMERA_TRANSLATION_FACTOR * distance + ty = dy * CAMERA_TRANSLATION_FACTOR * distance + cam_transform = transformations.translation_matrix((tx, ty, 0)).astype(numpy.float32) + self.current_cam.transformation = numpy.dot(self.current_cam.transformation, cam_transform) + + if self.is_zooming: + tz = dy * CAMERA_TRANSLATION_FACTOR * distance + cam_transform = transformations.translation_matrix((0, 0, tz)).astype(numpy.float32) + self.current_cam.transformation = numpy.dot(self.current_cam.transformation, cam_transform) + + if zooming_one_shot: + self.is_zooming = False + + self.update_view_camera() + + def update_view_camera(self): + + self.view_matrix = linalg.inv(self.current_cam.transformation) + + # Rotate by 180deg around X to have Z pointing backward (OpenGL convention) + self.view_matrix = numpy.dot(ROTATION_180_X, self.view_matrix) + + glMatrixMode(GL_MODELVIEW) + glLoadIdentity() + glMultMatrixf(self.view_matrix.transpose()) + + def move_selected_node(self, up, strafe): + self.currently_selected.transformation[0][3] += strafe + self.currently_selected.transformation[2][3] += up + + @staticmethod + def showtext(text, x=0, y=0, z=0, size=20): + + # TODO: alpha blending does not work... + # glEnable(GL_BLEND) + # glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA) + + font = pygame.font.Font(None, size) + text_surface = font.render(text, True, (10, 10, 10, 255), + (255 * 0.18, 255 * 0.18, 255 * 0.18, 0)) + text_data = pygame.image.tostring(text_surface, "RGBA", True) + glRasterPos3d(x, y, z) + glDrawPixels(text_surface.get_width(), + text_surface.get_height(), + GL_RGBA, GL_UNSIGNED_BYTE, + text_data) + + # glDisable(GL_BLEND) + + +def main(model, width, height): + app = PyAssimp3DViewer(model, w=width, h=height) + + clock = pygame.time.Clock() + + while app.loop(): + + app.update_view_camera() + + ## Main rendering + app.render() + + ## GUI text display + app.switch_to_overlay() + app.showtext("Active camera: %s" % str(app.current_cam), 10, app.h - 30) + if app.currently_selected: + app.showtext("Selected node: %s" % app.currently_selected, 10, app.h - 50) + pos = app.h - 70 + + app.showtext("(%sm, %sm, %sm)" % (app.currently_selected.transformation[0, 3], + app.currently_selected.transformation[1, 3], + app.currently_selected.transformation[2, 3]), 30, pos) + + app.switch_from_overlay() + + # Make sure we do not go over 30fps + clock.tick(30) + + logger.info("Quitting! Bye bye!") + + +######################################################################### +######################################################################### + +if __name__ == '__main__': + if not len(sys.argv) > 1: + print("Usage: " + __file__ + " <model>") + sys.exit(2) + + main(model=sys.argv[1], width=1024, height=768) diff --git a/src/mesh/assimp-master/port/PyAssimp/scripts/3d_viewer_py3.py b/src/mesh/assimp-master/port/PyAssimp/scripts/3d_viewer_py3.py new file mode 100755 index 0000000..fcee637 --- /dev/null +++ b/src/mesh/assimp-master/port/PyAssimp/scripts/3d_viewer_py3.py @@ -0,0 +1,1316 @@ +#!/usr/bin/env python +# -*- coding: UTF-8 -*- + +""" This program loads a model with PyASSIMP, and display it. + +Based on: +- pygame code from http://3dengine.org/Spectator_%28PyOpenGL%29 +- http://www.lighthouse3d.com/tutorials +- http://www.songho.ca/opengl/gl_transform.html +- http://code.activestate.com/recipes/325391/ +- ASSIMP's C++ SimpleOpenGL viewer + +Authors: Séverin Lemaignan, 2012-2016 +""" +import sys +import logging + +from functools import reduce + +logger = logging.getLogger("pyassimp") +gllogger = logging.getLogger("OpenGL") +gllogger.setLevel(logging.WARNING) +logging.basicConfig(level=logging.INFO) + +import OpenGL + +OpenGL.ERROR_CHECKING = False +OpenGL.ERROR_LOGGING = False +# OpenGL.ERROR_ON_COPY = True +# OpenGL.FULL_LOGGING = True +from OpenGL.GL import * +from OpenGL.arrays import vbo +from OpenGL.GL import shaders + +import pygame +import pygame.font +import pygame.image + +import math, random +from numpy import linalg + +import pyassimp +from pyassimp.postprocess import * +from pyassimp.helper import * +import transformations + +ROTATION_180_X = numpy.array([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]], dtype=numpy.float32) + +# rendering mode +BASE = "BASE" +COLORS = "COLORS" +SILHOUETTE = "SILHOUETTE" +HELPERS = "HELPERS" + +# Entities type +ENTITY = "entity" +CAMERA = "camera" +MESH = "mesh" + +FLAT_VERTEX_SHADER_120 = """ +#version 120 + +uniform mat4 u_viewProjectionMatrix; +uniform mat4 u_modelMatrix; + +uniform vec4 u_materialDiffuse; + +attribute vec3 a_vertex; + +varying vec4 v_color; + +void main(void) +{ + v_color = u_materialDiffuse; + gl_Position = u_viewProjectionMatrix * u_modelMatrix * vec4(a_vertex, 1.0); +} +""" + +FLAT_VERTEX_SHADER_130 = """ +#version 130 + +uniform mat4 u_viewProjectionMatrix; +uniform mat4 u_modelMatrix; + +uniform vec4 u_materialDiffuse; + +in vec3 a_vertex; + +out vec4 v_color; + +void main(void) +{ + v_color = u_materialDiffuse; + gl_Position = u_viewProjectionMatrix * u_modelMatrix * vec4(a_vertex, 1.0); +} +""" + +BASIC_VERTEX_SHADER_120 = """ +#version 120 + +uniform mat4 u_viewProjectionMatrix; +uniform mat4 u_modelMatrix; +uniform mat3 u_normalMatrix; +uniform vec3 u_lightPos; + +uniform vec4 u_materialDiffuse; + +attribute vec3 a_vertex; +attribute vec3 a_normal; + +varying vec4 v_color; + +void main(void) +{ + // Now the normal is in world space, as we pass the light in world space. + vec3 normal = u_normalMatrix * a_normal; + + float dist = distance(a_vertex, u_lightPos); + + // go to https://www.desmos.com/calculator/nmnaud1hrw to play with the parameters + // att is not used for now + float att=1.0/(1.0+0.8*dist*dist); + + vec3 surf2light = normalize(u_lightPos - a_vertex); + vec3 norm = normalize(normal); + float dcont=max(0.0,dot(norm,surf2light)); + + float ambient = 0.3; + float intensity = dcont + 0.3 + ambient; + + v_color = u_materialDiffuse * intensity; + + gl_Position = u_viewProjectionMatrix * u_modelMatrix * vec4(a_vertex, 1.0); +} +""" + +BASIC_VERTEX_SHADER_130 = """ +#version 130 + +uniform mat4 u_viewProjectionMatrix; +uniform mat4 u_modelMatrix; +uniform mat3 u_normalMatrix; +uniform vec3 u_lightPos; + +uniform vec4 u_materialDiffuse; + +in vec3 a_vertex; +in vec3 a_normal; + +out vec4 v_color; + +void main(void) +{ + // Now the normal is in world space, as we pass the light in world space. + vec3 normal = u_normalMatrix * a_normal; + + float dist = distance(a_vertex, u_lightPos); + + // go to https://www.desmos.com/calculator/nmnaud1hrw to play with the parameters + // att is not used for now + float att=1.0/(1.0+0.8*dist*dist); + + vec3 surf2light = normalize(u_lightPos - a_vertex); + vec3 norm = normalize(normal); + float dcont=max(0.0,dot(norm,surf2light)); + + float ambient = 0.3; + float intensity = dcont + 0.3 + ambient; + + v_color = u_materialDiffuse * intensity; + + gl_Position = u_viewProjectionMatrix * u_modelMatrix * vec4(a_vertex, 1.0); +} +""" + +BASIC_FRAGMENT_SHADER_120 = """ +#version 120 + +varying vec4 v_color; + +void main() { + gl_FragColor = v_color; +} +""" + +BASIC_FRAGMENT_SHADER_130 = """ +#version 130 + +in vec4 v_color; + +void main() { + gl_FragColor = v_color; +} +""" + +GOOCH_VERTEX_SHADER_120 = """ +#version 120 + +// attributes +attribute vec3 a_vertex; // xyz - position +attribute vec3 a_normal; // xyz - normal + +// uniforms +uniform mat4 u_modelMatrix; +uniform mat4 u_viewProjectionMatrix; +uniform mat3 u_normalMatrix; +uniform vec3 u_lightPos; +uniform vec3 u_camPos; + +// output data from vertex to fragment shader +varying vec3 o_normal; +varying vec3 o_lightVector; + +/////////////////////////////////////////////////////////////////// + +void main(void) +{ + // transform position and normal to world space + vec4 positionWorld = u_modelMatrix * vec4(a_vertex, 1.0); + vec3 normalWorld = u_normalMatrix * a_normal; + + // calculate and pass vectors required for lighting + o_lightVector = u_lightPos - positionWorld.xyz; + o_normal = normalWorld; + + // project world space position to the screen and output it + gl_Position = u_viewProjectionMatrix * positionWorld; +} +""" + +GOOCH_VERTEX_SHADER_130 = """ +#version 130 + +// attributes +in vec3 a_vertex; // xyz - position +in vec3 a_normal; // xyz - normal + +// uniforms +uniform mat4 u_modelMatrix; +uniform mat4 u_viewProjectionMatrix; +uniform mat3 u_normalMatrix; +uniform vec3 u_lightPos; +uniform vec3 u_camPos; + +// output data from vertex to fragment shader +out vec3 o_normal; +out vec3 o_lightVector; + +/////////////////////////////////////////////////////////////////// + +void main(void) +{ + // transform position and normal to world space + vec4 positionWorld = u_modelMatrix * vec4(a_vertex, 1.0); + vec3 normalWorld = u_normalMatrix * a_normal; + + // calculate and pass vectors required for lighting + o_lightVector = u_lightPos - positionWorld.xyz; + o_normal = normalWorld; + + // project world space position to the screen and output it + gl_Position = u_viewProjectionMatrix * positionWorld; +} +""" + +GOOCH_FRAGMENT_SHADER_120 = """ +#version 120 + +// data from vertex shader +varying vec3 o_normal; +varying vec3 o_lightVector; + +// diffuse color of the object +uniform vec4 u_materialDiffuse; +// cool color of gooch shading +uniform vec3 u_coolColor; +// warm color of gooch shading +uniform vec3 u_warmColor; +// how much to take from object color in final cool color +uniform float u_alpha; +// how much to take from object color in final warm color +uniform float u_beta; + +/////////////////////////////////////////////////////////// + +void main(void) +{ + // normlize vectors for lighting + vec3 normalVector = normalize(o_normal); + vec3 lightVector = normalize(o_lightVector); + // intensity of diffuse lighting [-1, 1] + float diffuseLighting = dot(lightVector, normalVector); + // map intensity of lighting from range [-1; 1] to [0, 1] + float interpolationValue = (1.0 + diffuseLighting)/2; + + ////////////////////////////////////////////////////////////////// + + // cool color mixed with color of the object + vec3 coolColorMod = u_coolColor + vec3(u_materialDiffuse) * u_alpha; + // warm color mixed with color of the object + vec3 warmColorMod = u_warmColor + vec3(u_materialDiffuse) * u_beta; + // interpolation of cool and warm colors according + // to lighting intensity. The lower the light intensity, + // the larger part of the cool color is used + vec3 colorOut = mix(coolColorMod, warmColorMod, interpolationValue); + + ////////////////////////////////////////////////////////////////// + + // save color + gl_FragColor.rgb = colorOut; + gl_FragColor.a = 1; +} +""" + +GOOCH_FRAGMENT_SHADER_130 = """ +#version 130 + +// data from vertex shader +in vec3 o_normal; +in vec3 o_lightVector; + +// diffuse color of the object +uniform vec4 u_materialDiffuse; +// cool color of gooch shading +uniform vec3 u_coolColor; +// warm color of gooch shading +uniform vec3 u_warmColor; +// how much to take from object color in final cool color +uniform float u_alpha; +// how much to take from object color in final warm color +uniform float u_beta; + +// output to framebuffer +out vec4 resultingColor; + +/////////////////////////////////////////////////////////// + +void main(void) +{ + // normlize vectors for lighting + vec3 normalVector = normalize(o_normal); + vec3 lightVector = normalize(o_lightVector); + // intensity of diffuse lighting [-1, 1] + float diffuseLighting = dot(lightVector, normalVector); + // map intensity of lighting from range [-1; 1] to [0, 1] + float interpolationValue = (1.0 + diffuseLighting)/2; + + ////////////////////////////////////////////////////////////////// + + // cool color mixed with color of the object + vec3 coolColorMod = u_coolColor + vec3(u_materialDiffuse) * u_alpha; + // warm color mixed with color of the object + vec3 warmColorMod = u_warmColor + vec3(u_materialDiffuse) * u_beta; + // interpolation of cool and warm colors according + // to lighting intensity. The lower the light intensity, + // the larger part of the cool color is used + vec3 colorOut = mix(coolColorMod, warmColorMod, interpolationValue); + + ////////////////////////////////////////////////////////////////// + + // save color + resultingColor.rgb = colorOut; + resultingColor.a = 1; +} +""" + +SILHOUETTE_VERTEX_SHADER_120 = """ +#version 120 + +attribute vec3 a_vertex; // xyz - position +attribute vec3 a_normal; // xyz - normal + +uniform mat4 u_modelMatrix; +uniform mat4 u_viewProjectionMatrix; +uniform mat4 u_modelViewMatrix; +uniform vec4 u_materialDiffuse; +uniform float u_bordersize; // width of the border + +varying vec4 v_color; + +void main(void){ + v_color = u_materialDiffuse; + float distToCamera = -(u_modelViewMatrix * vec4(a_vertex, 1.0)).z; + vec4 tPos = vec4(a_vertex + a_normal * u_bordersize * distToCamera, 1.0); + gl_Position = u_viewProjectionMatrix * u_modelMatrix * tPos; +} +""" + +SILHOUETTE_VERTEX_SHADER_130 = """ +#version 130 + +in vec3 a_vertex; // xyz - position +in vec3 a_normal; // xyz - normal + +uniform mat4 u_modelMatrix; +uniform mat4 u_viewProjectionMatrix; +uniform mat4 u_modelViewMatrix; +uniform vec4 u_materialDiffuse; +uniform float u_bordersize; // width of the border + +out vec4 v_color; + +void main(void){ + v_color = u_materialDiffuse; + float distToCamera = -(u_modelViewMatrix * vec4(a_vertex, 1.0)).z; + vec4 tPos = vec4(a_vertex + a_normal * u_bordersize * distToCamera, 1.0); + gl_Position = u_viewProjectionMatrix * u_modelMatrix * tPos; +} +""" +DEFAULT_CLIP_PLANE_NEAR = 0.001 +DEFAULT_CLIP_PLANE_FAR = 1000.0 + + +def get_world_transform(scene, node): + if node == scene.rootnode: + return numpy.identity(4, dtype=numpy.float32) + + parents = reversed(_get_parent_chain(scene, node, [])) + parent_transform = reduce(numpy.dot, [p.transformation for p in parents]) + return numpy.dot(parent_transform, node.transformation) + + +def _get_parent_chain(scene, node, parents): + parent = node.parent + + parents.append(parent) + + if parent == scene.rootnode: + return parents + + return _get_parent_chain(scene, parent, parents) + + +class DefaultCamera: + def __init__(self, w, h, fov): + self.name = "default camera" + self.type = CAMERA + self.clipplanenear = DEFAULT_CLIP_PLANE_NEAR + self.clipplanefar = DEFAULT_CLIP_PLANE_FAR + self.aspect = w / h + self.horizontalfov = fov * math.pi / 180 + self.transformation = numpy.array([[0.68, -0.32, 0.65, 7.48], + [0.73, 0.31, -0.61, -6.51], + [-0.01, 0.89, 0.44, 5.34], + [0., 0., 0., 1.]], dtype=numpy.float32) + + self.transformation = numpy.dot(self.transformation, ROTATION_180_X) + + def __str__(self): + return self.name + + +class PyAssimp3DViewer: + base_name = "PyASSIMP 3D viewer" + + def __init__(self, model, w=1024, h=768): + + self.w = w + self.h = h + + pygame.init() + pygame.display.set_caption(self.base_name) + pygame.display.set_mode((w, h), pygame.OPENGL | pygame.DOUBLEBUF) + + glClearColor(0.18, 0.18, 0.18, 1.0) + + shader_compilation_succeeded = False + try: + self.set_shaders_v130() + self.prepare_shaders() + except RuntimeError as message: + sys.stderr.write("%s\n" % message) + sys.stdout.write("Could not compile shaders in version 1.30, trying version 1.20\n") + + if not shader_compilation_succeeded: + self.set_shaders_v120() + self.prepare_shaders() + + self.scene = None + self.meshes = {} # stores the OpenGL vertex/faces/normals buffers pointers + + self.node2colorid = {} # stores a color ID for each node. Useful for mouse picking and visibility checking + self.colorid2node = {} # reverse dict of node2colorid + + self.currently_selected = None + self.moving = False + self.moving_situation = None + + self.default_camera = DefaultCamera(self.w, self.h, fov=70) + self.cameras = [self.default_camera] + + self.current_cam_index = 0 + self.current_cam = self.default_camera + self.set_camera_projection() + + self.load_model(model) + + # user interactions + self.focal_point = [0, 0, 0] + self.is_rotating = False + self.is_panning = False + self.is_zooming = False + + def set_shaders_v120(self): + self.BASIC_VERTEX_SHADER = BASIC_VERTEX_SHADER_120 + self.FLAT_VERTEX_SHADER = FLAT_VERTEX_SHADER_120 + self.SILHOUETTE_VERTEX_SHADER = SILHOUETTE_VERTEX_SHADER_120 + self.GOOCH_VERTEX_SHADER = GOOCH_VERTEX_SHADER_120 + + self.BASIC_FRAGMENT_SHADER = BASIC_FRAGMENT_SHADER_120 + self.GOOCH_FRAGMENT_SHADER = GOOCH_FRAGMENT_SHADER_120 + + def set_shaders_v130(self): + self.BASIC_VERTEX_SHADER = BASIC_VERTEX_SHADER_130 + self.FLAT_VERTEX_SHADER = FLAT_VERTEX_SHADER_130 + self.SILHOUETTE_VERTEX_SHADER = SILHOUETTE_VERTEX_SHADER_130 + self.GOOCH_VERTEX_SHADER = GOOCH_VERTEX_SHADER_130 + + self.BASIC_FRAGMENT_SHADER = BASIC_FRAGMENT_SHADER_130 + self.GOOCH_FRAGMENT_SHADER = GOOCH_FRAGMENT_SHADER_130 + + def prepare_shaders(self): + + ### Base shader + vertex = shaders.compileShader(self.BASIC_VERTEX_SHADER, GL_VERTEX_SHADER) + fragment = shaders.compileShader(self.BASIC_FRAGMENT_SHADER, GL_FRAGMENT_SHADER) + + self.shader = shaders.compileProgram(vertex, fragment) + + self.set_shader_accessors(('u_modelMatrix', + 'u_viewProjectionMatrix', + 'u_normalMatrix', + 'u_lightPos', + 'u_materialDiffuse'), + ('a_vertex', + 'a_normal'), self.shader) + + ### Flat shader + flatvertex = shaders.compileShader(self.FLAT_VERTEX_SHADER, GL_VERTEX_SHADER) + self.flatshader = shaders.compileProgram(flatvertex, fragment) + + self.set_shader_accessors(('u_modelMatrix', + 'u_viewProjectionMatrix', + 'u_materialDiffuse',), + ('a_vertex',), self.flatshader) + + ### Silhouette shader + silh_vertex = shaders.compileShader(self.SILHOUETTE_VERTEX_SHADER, GL_VERTEX_SHADER) + self.silhouette_shader = shaders.compileProgram(silh_vertex, fragment) + + self.set_shader_accessors(('u_modelMatrix', + 'u_viewProjectionMatrix', + 'u_modelViewMatrix', + 'u_materialDiffuse', + 'u_bordersize' # width of the silhouette + ), + ('a_vertex', + 'a_normal'), self.silhouette_shader) + + ### Gooch shader + gooch_vertex = shaders.compileShader(self.GOOCH_VERTEX_SHADER, GL_VERTEX_SHADER) + gooch_fragment = shaders.compileShader(self.GOOCH_FRAGMENT_SHADER, GL_FRAGMENT_SHADER) + self.gooch_shader = shaders.compileProgram(gooch_vertex, gooch_fragment) + + self.set_shader_accessors(('u_modelMatrix', + 'u_viewProjectionMatrix', + 'u_normalMatrix', + 'u_lightPos', + 'u_materialDiffuse', + 'u_coolColor', + 'u_warmColor', + 'u_alpha', + 'u_beta' + ), + ('a_vertex', + 'a_normal'), self.gooch_shader) + + @staticmethod + def set_shader_accessors(uniforms, attributes, shader): + # add accessors to the shaders uniforms and attributes + for uniform in uniforms: + location = glGetUniformLocation(shader, uniform) + if location in (None, -1): + raise RuntimeError('No uniform: %s (maybe it is not used ' + 'anymore and has been optimized out by' + ' the shader compiler)' % uniform) + setattr(shader, uniform, location) + + for attribute in attributes: + location = glGetAttribLocation(shader, attribute) + if location in (None, -1): + raise RuntimeError('No attribute: %s' % attribute) + setattr(shader, attribute, location) + + @staticmethod + def prepare_gl_buffers(mesh): + + mesh.gl = {} + + # Fill the buffer for vertex and normals positions + v = numpy.array(mesh.vertices, 'f') + n = numpy.array(mesh.normals, 'f') + + mesh.gl["vbo"] = vbo.VBO(numpy.hstack((v, n))) + + # Fill the buffer for vertex positions + mesh.gl["faces"] = glGenBuffers(1) + glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, mesh.gl["faces"]) + glBufferData(GL_ELEMENT_ARRAY_BUFFER, + numpy.array(mesh.faces, dtype=numpy.int32), + GL_STATIC_DRAW) + + mesh.gl["nbfaces"] = len(mesh.faces) + + # Unbind buffers + glBindBuffer(GL_ARRAY_BUFFER, 0) + glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0) + + @staticmethod + def get_rgb_from_colorid(colorid): + r = (colorid >> 0) & 0xff + g = (colorid >> 8) & 0xff + b = (colorid >> 16) & 0xff + + return r, g, b + + def get_color_id(self): + id = random.randint(0, 256 * 256 * 256) + if id not in self.colorid2node: + return id + else: + return self.get_color_id() + + def glize(self, scene, node): + + logger.info("Loading node <%s>" % node) + node.selected = True if self.currently_selected and self.currently_selected == node else False + + node.transformation = node.transformation.astype(numpy.float32) + + if node.meshes: + node.type = MESH + colorid = self.get_color_id() + self.colorid2node[colorid] = node + self.node2colorid[node.name] = colorid + + elif node.name in [c.name for c in scene.cameras]: + + # retrieve the ASSIMP camera object + [cam] = [c for c in scene.cameras if c.name == node.name] + node.type = CAMERA + logger.info("Added camera <%s>" % node.name) + logger.info("Camera position: %.3f, %.3f, %.3f" % tuple(node.transformation[:, 3][:3].tolist())) + self.cameras.append(node) + node.clipplanenear = cam.clipplanenear + node.clipplanefar = cam.clipplanefar + + if numpy.allclose(cam.lookat, [0, 0, -1]) and numpy.allclose(cam.up, [0, 1, 0]): # Cameras in .blend files + + # Rotate by 180deg around X to have Z pointing forward + node.transformation = numpy.dot(node.transformation, ROTATION_180_X) + else: + raise RuntimeError( + "I do not know how to normalize this camera orientation: lookat=%s, up=%s" % (cam.lookat, cam.up)) + + if cam.aspect == 0.0: + logger.warning("Camera aspect not set. Setting to default 4:3") + node.aspect = 1.333 + else: + node.aspect = cam.aspect + + node.horizontalfov = cam.horizontalfov + + else: + node.type = ENTITY + + for child in node.children: + self.glize(scene, child) + + def load_model(self, path, postprocess=aiProcessPreset_TargetRealtime_MaxQuality): + logger.info("Loading model:" + path + "...") + + if postprocess: + self.scene = pyassimp.load(path, processing=postprocess) + else: + self.scene = pyassimp.load(path) + logger.info("Done.") + + scene = self.scene + # log some statistics + logger.info(" meshes: %d" % len(scene.meshes)) + logger.info(" total faces: %d" % sum([len(mesh.faces) for mesh in scene.meshes])) + logger.info(" materials: %d" % len(scene.materials)) + self.bb_min, self.bb_max = get_bounding_box(self.scene) + logger.info(" bounding box:" + str(self.bb_min) + " - " + str(self.bb_max)) + + self.scene_center = [(a + b) / 2. for a, b in zip(self.bb_min, self.bb_max)] + + for index, mesh in enumerate(scene.meshes): + self.prepare_gl_buffers(mesh) + + self.glize(scene, scene.rootnode) + + # Finally release the model + pyassimp.release(scene) + logger.info("Ready for 3D rendering!") + + def cycle_cameras(self): + + self.current_cam_index = (self.current_cam_index + 1) % len(self.cameras) + self.current_cam = self.cameras[self.current_cam_index] + self.set_camera_projection(self.current_cam) + logger.info("Switched to camera <%s>" % self.current_cam) + + def set_overlay_projection(self): + glViewport(0, 0, self.w, self.h) + glMatrixMode(GL_PROJECTION) + glLoadIdentity() + glOrtho(0.0, self.w - 1.0, 0.0, self.h - 1.0, -1.0, 1.0) + glMatrixMode(GL_MODELVIEW) + glLoadIdentity() + + def set_camera_projection(self, camera=None): + + if not camera: + camera = self.current_cam + + znear = camera.clipplanenear or DEFAULT_CLIP_PLANE_NEAR + zfar = camera.clipplanefar or DEFAULT_CLIP_PLANE_FAR + aspect = camera.aspect + fov = camera.horizontalfov + + glMatrixMode(GL_PROJECTION) + glLoadIdentity() + + # Compute gl frustrum + tangent = math.tan(fov / 2.) + h = znear * tangent + w = h * aspect + + # params: left, right, bottom, top, near, far + glFrustum(-w, w, -h, h, znear, zfar) + # equivalent to: + # gluPerspective(fov * 180/math.pi, aspect, znear, zfar) + + self.projection_matrix = glGetFloatv(GL_PROJECTION_MATRIX).transpose() + + glMatrixMode(GL_MODELVIEW) + glLoadIdentity() + + def render_colors(self): + + glEnable(GL_DEPTH_TEST) + glDepthFunc(GL_LEQUAL) + + glPolygonMode(GL_FRONT_AND_BACK, GL_FILL) + glEnable(GL_CULL_FACE) + + glUseProgram(self.flatshader) + + glUniformMatrix4fv(self.flatshader.u_viewProjectionMatrix, 1, GL_TRUE, + numpy.dot(self.projection_matrix, self.view_matrix)) + + self.recursive_render(self.scene.rootnode, self.flatshader, mode=COLORS) + + glUseProgram(0) + + def get_hovered_node(self, mousex, mousey): + """ + Attention: The performances of this method relies heavily on the size of the display! + """ + + # mouse out of the window? + if mousex < 0 or mousex >= self.w or mousey < 0 or mousey >= self.h: + return None + + self.render_colors() + # Capture image from the OpenGL buffer + buf = (GLubyte * (3 * self.w * self.h))(0) + glReadPixels(0, 0, self.w, self.h, GL_RGB, GL_UNSIGNED_BYTE, buf) + + # Reinterpret the RGB pixel buffer as a 1-D array of 24bits colors + a = numpy.ndarray(len(buf), numpy.dtype('>u1'), buf) + colors = numpy.zeros(len(buf) // 3, numpy.dtype('<u4')) + for i in range(3): + colors.view(dtype='>u1')[i::4] = a.view(dtype='>u1')[i::3] + + colorid = colors[mousex + mousey * self.w] + + glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT) + + if colorid in self.colorid2node: + return self.colorid2node[colorid] + + def render(self, wireframe=False, twosided=False): + + glEnable(GL_DEPTH_TEST) + glDepthFunc(GL_LEQUAL) + + glPolygonMode(GL_FRONT_AND_BACK, GL_LINE if wireframe else GL_FILL) + glDisable(GL_CULL_FACE) if twosided else glEnable(GL_CULL_FACE) + + self.render_grid() + + self.recursive_render(self.scene.rootnode, None, mode=HELPERS) + + ### First, the silhouette + + if False: + shader = self.silhouette_shader + + # glDepthMask(GL_FALSE) + glCullFace(GL_FRONT) # cull front faces + + glUseProgram(shader) + glUniform1f(shader.u_bordersize, 0.01) + + glUniformMatrix4fv(shader.u_viewProjectionMatrix, 1, GL_TRUE, + numpy.dot(self.projection_matrix, self.view_matrix)) + + self.recursive_render(self.scene.rootnode, shader, mode=SILHOUETTE) + + glUseProgram(0) + + ### Then, inner shading + # glDepthMask(GL_TRUE) + glCullFace(GL_BACK) + + use_gooch = False + if use_gooch: + shader = self.gooch_shader + + glUseProgram(shader) + glUniform3f(shader.u_lightPos, -.5, -.5, .5) + + ##### GOOCH specific + glUniform3f(shader.u_coolColor, 159.0 / 255, 148.0 / 255, 255.0 / 255) + glUniform3f(shader.u_warmColor, 255.0 / 255, 75.0 / 255, 75.0 / 255) + glUniform1f(shader.u_alpha, .25) + glUniform1f(shader.u_beta, .25) + ######### + else: + shader = self.shader + glUseProgram(shader) + glUniform3f(shader.u_lightPos, -.5, -.5, .5) + + glUniformMatrix4fv(shader.u_viewProjectionMatrix, 1, GL_TRUE, + numpy.dot(self.projection_matrix, self.view_matrix)) + + self.recursive_render(self.scene.rootnode, shader) + + glUseProgram(0) + + def render_axis(self, + transformation=numpy.identity(4, dtype=numpy.float32), + label=None, + size=0.2, + selected=False): + m = transformation.transpose() # OpenGL row major + + glPushMatrix() + glMultMatrixf(m) + + glLineWidth(3 if selected else 1) + + size = 2 * size if selected else size + + glBegin(GL_LINES) + + # draw line for x axis + glColor3f(1.0, 0.0, 0.0) + glVertex3f(0.0, 0.0, 0.0) + glVertex3f(size, 0.0, 0.0) + + # draw line for y axis + glColor3f(0.0, 1.0, 0.0) + glVertex3f(0.0, 0.0, 0.0) + glVertex3f(0.0, size, 0.0) + + # draw line for Z axis + glColor3f(0.0, 0.0, 1.0) + glVertex3f(0.0, 0.0, 0.0) + glVertex3f(0.0, 0.0, size) + + glEnd() + + if label: + self.showtext(label) + + glPopMatrix() + + @staticmethod + def render_camera(camera, transformation): + + m = transformation.transpose() # OpenGL row major + + aspect = camera.aspect + + u = 0.1 # unit size (in m) + l = 3 * u # length of the camera cone + f = 3 * u # aperture of the camera cone + + glPushMatrix() + glMultMatrixf(m) + + glLineWidth(2) + glBegin(GL_LINE_STRIP) + + glColor3f(.2, .2, .2) + + glVertex3f(u, u, -u) + glVertex3f(u, -u, -u) + glVertex3f(-u, -u, -u) + glVertex3f(-u, u, -u) + glVertex3f(u, u, -u) + + glVertex3f(u, u, 0.0) + glVertex3f(u, -u, 0.0) + glVertex3f(-u, -u, 0.0) + glVertex3f(-u, u, 0.0) + glVertex3f(u, u, 0.0) + + glVertex3f(f * aspect, f, l) + glVertex3f(f * aspect, -f, l) + glVertex3f(-f * aspect, -f, l) + glVertex3f(-f * aspect, f, l) + glVertex3f(f * aspect, f, l) + + glEnd() + + glBegin(GL_LINE_STRIP) + glVertex3f(u, -u, -u) + glVertex3f(u, -u, 0.0) + glVertex3f(f * aspect, -f, l) + glEnd() + + glBegin(GL_LINE_STRIP) + glVertex3f(-u, -u, -u) + glVertex3f(-u, -u, 0.0) + glVertex3f(-f * aspect, -f, l) + glEnd() + + glBegin(GL_LINE_STRIP) + glVertex3f(-u, u, -u) + glVertex3f(-u, u, 0.0) + glVertex3f(-f * aspect, f, l) + glEnd() + + glPopMatrix() + + @staticmethod + def render_grid(): + + glLineWidth(1) + glColor3f(0.5, 0.5, 0.5) + glBegin(GL_LINES) + for i in range(-10, 11): + glVertex3f(i, -10.0, 0.0) + glVertex3f(i, 10.0, 0.0) + + for i in range(-10, 11): + glVertex3f(-10.0, i, 0.0) + glVertex3f(10.0, i, 0.0) + glEnd() + + def recursive_render(self, node, shader, mode=BASE, with_normals=True): + """ Main recursive rendering method. + """ + + normals = with_normals + + if mode == COLORS: + normals = False + + + if not hasattr(node, "selected"): + node.selected = False + + m = get_world_transform(self.scene, node) + + # HELPERS mode + ### + if mode == HELPERS: + # if node.type == ENTITY: + self.render_axis(m, + label=node.name if node != self.scene.rootnode else None, + selected=node.selected if hasattr(node, "selected") else False) + + if node.type == CAMERA: + self.render_camera(node, m) + + for child in node.children: + self.recursive_render(child, shader, mode) + + return + + # Mesh rendering modes + ### + if node.type == MESH: + + for mesh in node.meshes: + + stride = 24 # 6 * 4 bytes + + if node.selected and mode == SILHOUETTE: + glUniform4f(shader.u_materialDiffuse, 1.0, 0.0, 0.0, 1.0) + glUniformMatrix4fv(shader.u_modelViewMatrix, 1, GL_TRUE, + numpy.dot(self.view_matrix, m)) + + else: + if mode == COLORS: + colorid = self.node2colorid[node.name] + r, g, b = self.get_rgb_from_colorid(colorid) + glUniform4f(shader.u_materialDiffuse, r / 255.0, g / 255.0, b / 255.0, 1.0) + elif mode == SILHOUETTE: + glUniform4f(shader.u_materialDiffuse, .0, .0, .0, 1.0) + else: + if node.selected: + diffuse = (1.0, 0.0, 0.0, 1.0) # selected nodes in red + else: + diffuse = mesh.material.properties["diffuse"] + if len(diffuse) == 3: # RGB instead of expected RGBA + diffuse.append(1.0) + glUniform4f(shader.u_materialDiffuse, *diffuse) + # if ambient: + # glUniform4f( shader.Material_ambient, *mat["ambient"] ) + + if mode == BASE: # not in COLORS or SILHOUETTE + normal_matrix = linalg.inv(numpy.dot(self.view_matrix, m)[0:3, 0:3]).transpose() + glUniformMatrix3fv(shader.u_normalMatrix, 1, GL_TRUE, normal_matrix) + + glUniformMatrix4fv(shader.u_modelMatrix, 1, GL_TRUE, m) + + vbo = mesh.gl["vbo"] + vbo.bind() + + glEnableVertexAttribArray(shader.a_vertex) + if normals: + glEnableVertexAttribArray(shader.a_normal) + + glVertexAttribPointer( + shader.a_vertex, + 3, GL_FLOAT, False, stride, vbo + ) + + if normals: + glVertexAttribPointer( + shader.a_normal, + 3, GL_FLOAT, False, stride, vbo + 12 + ) + + glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, mesh.gl["faces"]) + glDrawElements(GL_TRIANGLES, mesh.gl["nbfaces"] * 3, GL_UNSIGNED_INT, None) + + vbo.unbind() + glDisableVertexAttribArray(shader.a_vertex) + + if normals: + glDisableVertexAttribArray(shader.a_normal) + + glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0) + + for child in node.children: + self.recursive_render(child, shader, mode) + + + def switch_to_overlay(self): + glPushMatrix() + self.set_overlay_projection() + + def switch_from_overlay(self): + self.set_camera_projection() + glPopMatrix() + + def select_node(self, node): + self.currently_selected = node + self.update_node_select(self.scene.rootnode) + + def update_node_select(self, node): + if node is self.currently_selected: + node.selected = True + else: + node.selected = False + + for child in node.children: + self.update_node_select(child) + + def loop(self): + + pygame.display.flip() + + if not self.process_events(): + return False # ESC has been pressed + + glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT) + + return True + + def process_events(self): + + LEFT_BUTTON = 1 + MIDDLE_BUTTON = 2 + RIGHT_BUTTON = 3 + WHEEL_UP = 4 + WHEEL_DOWN = 5 + + dx, dy = pygame.mouse.get_rel() + mousex, mousey = pygame.mouse.get_pos() + + zooming_one_shot = False + + ok = True + + for evt in pygame.event.get(): + if evt.type == pygame.MOUSEBUTTONDOWN and evt.button == LEFT_BUTTON: + hovered = self.get_hovered_node(mousex, self.h - mousey) + if hovered: + if self.currently_selected and self.currently_selected == hovered: + self.select_node(None) + else: + logger.info("Node %s selected" % hovered) + self.select_node(hovered) + else: + self.is_rotating = True + if evt.type == pygame.MOUSEBUTTONUP and evt.button == LEFT_BUTTON: + self.is_rotating = False + + if evt.type == pygame.MOUSEBUTTONDOWN and evt.button == MIDDLE_BUTTON: + self.is_panning = True + if evt.type == pygame.MOUSEBUTTONUP and evt.button == MIDDLE_BUTTON: + self.is_panning = False + + if evt.type == pygame.MOUSEBUTTONDOWN and evt.button == RIGHT_BUTTON: + self.is_zooming = True + if evt.type == pygame.MOUSEBUTTONUP and evt.button == RIGHT_BUTTON: + self.is_zooming = False + + if evt.type == pygame.MOUSEBUTTONDOWN and evt.button in [WHEEL_UP, WHEEL_DOWN]: + zooming_one_shot = True + self.is_zooming = True + dy = -10 if evt.button == WHEEL_UP else 10 + + if evt.type == pygame.KEYDOWN: + ok = (ok and self.process_keystroke(evt.key, evt.mod)) + + self.controls_3d(dx, dy, zooming_one_shot) + + return ok + + def process_keystroke(self, key, mod): + + # process arrow keys if an object is selected + if self.currently_selected: + up = 0 + strafe = 0 + + if key == pygame.K_UP: + up = 1 + if key == pygame.K_DOWN: + up = -1 + if key == pygame.K_LEFT: + strafe = -1 + if key == pygame.K_RIGHT: + strafe = 1 + + self.move_selected_node(up, strafe) + + if key == pygame.K_f: + pygame.display.toggle_fullscreen() + + if key == pygame.K_TAB: + self.cycle_cameras() + + if key in [pygame.K_ESCAPE, pygame.K_q]: + return False + + return True + + def controls_3d(self, dx, dy, zooming_one_shot=False): + """ Orbiting the camera is implemented the following way: + + - the rotation is split into a rotation around the *world* Z axis + (controlled by the horizontal mouse motion along X) and a + rotation around the *X* axis of the camera (pitch) *shifted to + the focal origin* (the world origin for now). This is controlled + by the vertical motion of the mouse (Y axis). + - as a result, the resulting transformation of the camera in the + world frame C' is: + C' = (T · Rx · T⁻¹ · (Rz · C)⁻¹)⁻¹ + where: + - C is the original camera transformation in the world frame, + - Rz is the rotation along the Z axis (in the world frame) + - T is the translation camera -> world (ie, the inverse of the + translation part of C + - Rx is the rotation around X in the (translated) camera frame """ + + CAMERA_TRANSLATION_FACTOR = 0.01 + CAMERA_ROTATION_FACTOR = 0.01 + + if not (self.is_rotating or self.is_panning or self.is_zooming): + return + + current_pos = self.current_cam.transformation[:3, 3].copy() + distance = numpy.linalg.norm(self.focal_point - current_pos) + + if self.is_rotating: + rotation_camera_x = dy * CAMERA_ROTATION_FACTOR + rotation_world_z = dx * CAMERA_ROTATION_FACTOR + world_z_rotation = transformations.euler_matrix(0, 0, rotation_world_z) + cam_x_rotation = transformations.euler_matrix(rotation_camera_x, 0, 0) + + after_world_z_rotation = numpy.dot(world_z_rotation, self.current_cam.transformation) + + inverse_transformation = transformations.inverse_matrix(after_world_z_rotation) + + translation = transformations.translation_matrix( + transformations.decompose_matrix(inverse_transformation)[3]) + inverse_translation = transformations.inverse_matrix(translation) + + new_inverse = numpy.dot(inverse_translation, inverse_transformation) + new_inverse = numpy.dot(cam_x_rotation, new_inverse) + new_inverse = numpy.dot(translation, new_inverse) + + self.current_cam.transformation = transformations.inverse_matrix(new_inverse).astype(numpy.float32) + + if self.is_panning: + tx = -dx * CAMERA_TRANSLATION_FACTOR * distance + ty = dy * CAMERA_TRANSLATION_FACTOR * distance + cam_transform = transformations.translation_matrix((tx, ty, 0)).astype(numpy.float32) + self.current_cam.transformation = numpy.dot(self.current_cam.transformation, cam_transform) + + if self.is_zooming: + tz = dy * CAMERA_TRANSLATION_FACTOR * distance + cam_transform = transformations.translation_matrix((0, 0, tz)).astype(numpy.float32) + self.current_cam.transformation = numpy.dot(self.current_cam.transformation, cam_transform) + + if zooming_one_shot: + self.is_zooming = False + + self.update_view_camera() + + def update_view_camera(self): + + self.view_matrix = linalg.inv(self.current_cam.transformation) + + # Rotate by 180deg around X to have Z pointing backward (OpenGL convention) + self.view_matrix = numpy.dot(ROTATION_180_X, self.view_matrix) + + glMatrixMode(GL_MODELVIEW) + glLoadIdentity() + glMultMatrixf(self.view_matrix.transpose()) + + def move_selected_node(self, up, strafe): + self.currently_selected.transformation[0][3] += strafe + self.currently_selected.transformation[2][3] += up + + @staticmethod + def showtext(text, x=0, y=0, z=0, size=20): + + # TODO: alpha blending does not work... + # glEnable(GL_BLEND) + # glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA) + + font = pygame.font.Font(None, size) + text_surface = font.render(text, True, (10, 10, 10, 255), + (255 * 0.18, 255 * 0.18, 255 * 0.18, 0)) + text_data = pygame.image.tostring(text_surface, "RGBA", True) + glRasterPos3d(x, y, z) + glDrawPixels(text_surface.get_width(), + text_surface.get_height(), + GL_RGBA, GL_UNSIGNED_BYTE, + text_data) + + # glDisable(GL_BLEND) + + +def main(model, width, height): + app = PyAssimp3DViewer(model, w=width, h=height) + + clock = pygame.time.Clock() + + while app.loop(): + + app.update_view_camera() + + ## Main rendering + app.render() + + ## GUI text display + app.switch_to_overlay() + app.showtext("Active camera: %s" % str(app.current_cam), 10, app.h - 30) + if app.currently_selected: + app.showtext("Selected node: %s" % app.currently_selected, 10, app.h - 50) + pos = app.h - 70 + + app.showtext("(%sm, %sm, %sm)" % (app.currently_selected.transformation[0, 3], + app.currently_selected.transformation[1, 3], + app.currently_selected.transformation[2, 3]), 30, pos) + + app.switch_from_overlay() + + # Make sure we do not go over 30fps + clock.tick(30) + + logger.info("Quitting! Bye bye!") + + +######################################################################### +######################################################################### + +if __name__ == '__main__': + if not len(sys.argv) > 1: + print("Usage: " + __file__ + " <model>") + sys.exit(2) + + main(model=sys.argv[1], width=1024, height=768) diff --git a/src/mesh/assimp-master/port/PyAssimp/scripts/README.md b/src/mesh/assimp-master/port/PyAssimp/scripts/README.md new file mode 100644 index 0000000..42caa27 --- /dev/null +++ b/src/mesh/assimp-master/port/PyAssimp/scripts/README.md @@ -0,0 +1,13 @@ +pyassimp examples +================= + +- `sample.py`: shows how to load a model with pyassimp, and display some statistics. +- `3d_viewer.py`: an OpenGL 3D viewer that requires shaders +- `fixed_pipeline_3d_viewer`: an OpenGL 3D viewer using the old fixed-pipeline. + Only for illustration example. Base new projects on `3d_viewer.py`. + + +Requirements for the 3D viewers: + +- `pyopengl` (on Ubuntu/Debian, `sudo apt-get install python-opengl`) +- `pygame` (on Ubuntu/Debian, `sudo apt-get install python-pygame`) diff --git a/src/mesh/assimp-master/port/PyAssimp/scripts/fixed_pipeline_3d_viewer.py b/src/mesh/assimp-master/port/PyAssimp/scripts/fixed_pipeline_3d_viewer.py new file mode 100755 index 0000000..c2f6ceb --- /dev/null +++ b/src/mesh/assimp-master/port/PyAssimp/scripts/fixed_pipeline_3d_viewer.py @@ -0,0 +1,372 @@ +#!/usr/bin/env python +#-*- coding: UTF-8 -*- + +""" This program demonstrates the use of pyassimp to load and +render objects with OpenGL. + +'c' cycles between cameras (if any available) +'q' to quit + +This example mixes 'old' OpenGL fixed-function pipeline with +Vertex Buffer Objects. + +Materials are supported but textures are currently ignored. + +For a more advanced example (with shaders + keyboard/mouse +controls), check scripts/sdl_viewer.py + +Author: Séverin Lemaignan, 2012 + +This sample is based on several sources, including: + - http://www.lighthouse3d.com/tutorials + - http://www.songho.ca/opengl/gl_transform.html + - http://code.activestate.com/recipes/325391/ + - ASSIMP's C++ SimpleOpenGL viewer +""" + +import sys +from OpenGL.GLUT import * +from OpenGL.GLU import * +from OpenGL.GL import * + +import logging +logger = logging.getLogger("pyassimp_opengl") +logging.basicConfig(level=logging.INFO) + +import math +import numpy + +import pyassimp +from pyassimp.postprocess import * +from pyassimp.helper import * + + +name = 'pyassimp OpenGL viewer' +height = 600 +width = 900 + +class GLRenderer(): + def __init__(self): + + self.scene = None + + self.using_fixed_cam = False + self.current_cam_index = 0 + + # store the global scene rotation + self.angle = 0. + + # for FPS calculation + self.prev_time = 0 + self.prev_fps_time = 0 + self.frames = 0 + + def prepare_gl_buffers(self, mesh): + """ Creates 3 buffer objets for each mesh, + to store the vertices, the normals, and the faces + indices. + """ + + mesh.gl = {} + + # Fill the buffer for vertex positions + mesh.gl["vertices"] = glGenBuffers(1) + glBindBuffer(GL_ARRAY_BUFFER, mesh.gl["vertices"]) + glBufferData(GL_ARRAY_BUFFER, + mesh.vertices, + GL_STATIC_DRAW) + + # Fill the buffer for normals + mesh.gl["normals"] = glGenBuffers(1) + glBindBuffer(GL_ARRAY_BUFFER, mesh.gl["normals"]) + glBufferData(GL_ARRAY_BUFFER, + mesh.normals, + GL_STATIC_DRAW) + + + # Fill the buffer for vertex positions + mesh.gl["triangles"] = glGenBuffers(1) + glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, mesh.gl["triangles"]) + glBufferData(GL_ELEMENT_ARRAY_BUFFER, + mesh.faces, + GL_STATIC_DRAW) + + # Unbind buffers + glBindBuffer(GL_ARRAY_BUFFER,0) + glBindBuffer(GL_ELEMENT_ARRAY_BUFFER,0) + + def load_model(self, path, postprocess = None): + logger.info("Loading model:" + path + "...") + + if postprocess: + self.scene = pyassimp.load(path, processing=postprocess) + else: + self.scene = pyassimp.load(path) + logger.info("Done.") + + scene = self.scene + #log some statistics + logger.info(" meshes: %d" % len(scene.meshes)) + logger.info(" total faces: %d" % sum([len(mesh.faces) for mesh in scene.meshes])) + logger.info(" materials: %d" % len(scene.materials)) + self.bb_min, self.bb_max = get_bounding_box(self.scene) + logger.info(" bounding box:" + str(self.bb_min) + " - " + str(self.bb_max)) + + self.scene_center = [(a + b) / 2. for a, b in zip(self.bb_min, self.bb_max)] + + for index, mesh in enumerate(scene.meshes): + self.prepare_gl_buffers(mesh) + + # Finally release the model + pyassimp.release(scene) + + def cycle_cameras(self): + self.current_cam_index + if not self.scene.cameras: + return None + self.current_cam_index = (self.current_cam_index + 1) % len(self.scene.cameras) + cam = self.scene.cameras[self.current_cam_index] + logger.info("Switched to camera " + str(cam)) + return cam + + def set_default_camera(self): + + if not self.using_fixed_cam: + glLoadIdentity() + + gluLookAt(0.,0.,3., + 0.,0.,-5., + 0.,1.,0.) + + + + def set_camera(self, camera): + + if not camera: + return + + self.using_fixed_cam = True + + znear = camera.clipplanenear + zfar = camera.clipplanefar + aspect = camera.aspect + fov = camera.horizontalfov + + glMatrixMode(GL_PROJECTION) + glLoadIdentity() + + # Compute gl frustrum + tangent = math.tan(fov/2.) + h = znear * tangent + w = h * aspect + + # params: left, right, bottom, top, near, far + glFrustum(-w, w, -h, h, znear, zfar) + # equivalent to: + #gluPerspective(fov * 180/math.pi, aspect, znear, zfar) + + glMatrixMode(GL_MODELVIEW) + glLoadIdentity() + + cam = transform(camera.position, camera.transformation) + at = transform(camera.lookat, camera.transformation) + gluLookAt(cam[0], cam[2], -cam[1], + at[0], at[2], -at[1], + 0, 1, 0) + + def fit_scene(self, restore = False): + """ Compute a scale factor and a translation to fit and center + the whole geometry on the screen. + """ + + x_max = self.bb_max[0] - self.bb_min[0] + y_max = self.bb_max[1] - self.bb_min[1] + tmp = max(x_max, y_max) + z_max = self.bb_max[2] - self.bb_min[2] + tmp = max(z_max, tmp) + + if not restore: + tmp = 1. / tmp + + logger.info("Scaling the scene by %.03f" % tmp) + glScalef(tmp, tmp, tmp) + + # center the model + direction = -1 if not restore else 1 + glTranslatef( direction * self.scene_center[0], + direction * self.scene_center[1], + direction * self.scene_center[2] ) + + return x_max, y_max, z_max + + def apply_material(self, mat): + """ Apply an OpenGL, using one OpenGL display list per material to cache + the operation. + """ + + if not hasattr(mat, "gl_mat"): # evaluate once the mat properties, and cache the values in a glDisplayList. + diffuse = numpy.array(mat.properties.get("diffuse", [0.8, 0.8, 0.8, 1.0])) + specular = numpy.array(mat.properties.get("specular", [0., 0., 0., 1.0])) + ambient = numpy.array(mat.properties.get("ambient", [0.2, 0.2, 0.2, 1.0])) + emissive = numpy.array(mat.properties.get("emissive", [0., 0., 0., 1.0])) + shininess = min(mat.properties.get("shininess", 1.0), 128) + wireframe = mat.properties.get("wireframe", 0) + twosided = mat.properties.get("twosided", 1) + + setattr(mat, "gl_mat", glGenLists(1)) + glNewList(mat.gl_mat, GL_COMPILE) + + glMaterialfv(GL_FRONT_AND_BACK, GL_DIFFUSE, diffuse) + glMaterialfv(GL_FRONT_AND_BACK, GL_SPECULAR, specular) + glMaterialfv(GL_FRONT_AND_BACK, GL_AMBIENT, ambient) + glMaterialfv(GL_FRONT_AND_BACK, GL_EMISSION, emissive) + glMaterialf(GL_FRONT_AND_BACK, GL_SHININESS, shininess) + glPolygonMode(GL_FRONT_AND_BACK, GL_LINE if wireframe else GL_FILL) + glDisable(GL_CULL_FACE) if twosided else glEnable(GL_CULL_FACE) + + glEndList() + + glCallList(mat.gl_mat) + + + + def do_motion(self): + + gl_time = glutGet(GLUT_ELAPSED_TIME) + + self.angle = (gl_time - self.prev_time) * 0.1 + + self.prev_time = gl_time + + # Compute FPS + self.frames += 1 + if gl_time - self.prev_fps_time >= 1000: + current_fps = self.frames * 1000 / (gl_time - self.prev_fps_time) + logger.info('%.0f fps' % current_fps) + self.frames = 0 + self.prev_fps_time = gl_time + + glutPostRedisplay() + + def recursive_render(self, node): + """ Main recursive rendering method. + """ + + # save model matrix and apply node transformation + glPushMatrix() + m = node.transformation.transpose() # OpenGL row major + glMultMatrixf(m) + + for mesh in node.meshes: + self.apply_material(mesh.material) + + glBindBuffer(GL_ARRAY_BUFFER, mesh.gl["vertices"]) + glEnableClientState(GL_VERTEX_ARRAY) + glVertexPointer(3, GL_FLOAT, 0, None) + + glBindBuffer(GL_ARRAY_BUFFER, mesh.gl["normals"]) + glEnableClientState(GL_NORMAL_ARRAY) + glNormalPointer(GL_FLOAT, 0, None) + + glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, mesh.gl["triangles"]) + glDrawElements(GL_TRIANGLES,len(mesh.faces) * 3, GL_UNSIGNED_INT, None) + + glDisableClientState(GL_VERTEX_ARRAY) + glDisableClientState(GL_NORMAL_ARRAY) + + glBindBuffer(GL_ARRAY_BUFFER, 0) + glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0) + + for child in node.children: + self.recursive_render(child) + + glPopMatrix() + + + def display(self): + """ GLUT callback to redraw OpenGL surface + """ + glClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT) + + glRotatef(self.angle,0.,1.,0.) + self.recursive_render(self.scene.rootnode) + + glutSwapBuffers() + self.do_motion() + return + + #################################################################### + ## GLUT keyboard and mouse callbacks ## + #################################################################### + def onkeypress(self, key, x, y): + if key == 'c': + self.fit_scene(restore = True) + self.set_camera(self.cycle_cameras()) + if key == 'q': + sys.exit(0) + + def render(self, filename=None, fullscreen = False, autofit = True, postprocess = None): + """ + + :param autofit: if true, scale the scene to fit the whole geometry + in the viewport. + """ + + # First initialize the openGL context + glutInit(sys.argv) + glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGB | GLUT_DEPTH) + if not fullscreen: + glutInitWindowSize(width, height) + glutCreateWindow(name) + else: + glutGameModeString("1024x768") + if glutGameModeGet(GLUT_GAME_MODE_POSSIBLE): + glutEnterGameMode() + else: + print("Fullscreen mode not available!") + sys.exit(1) + + self.load_model(filename, postprocess = postprocess) + + + glClearColor(0.1,0.1,0.1,1.) + #glShadeModel(GL_SMOOTH) + + glEnable(GL_LIGHTING) + + glEnable(GL_CULL_FACE) + glEnable(GL_DEPTH_TEST) + + glLightModeli(GL_LIGHT_MODEL_TWO_SIDE, GL_TRUE) + glEnable(GL_NORMALIZE) + glEnable(GL_LIGHT0) + + glutDisplayFunc(self.display) + + + glMatrixMode(GL_PROJECTION) + glLoadIdentity() + gluPerspective(35.0, width/float(height) , 0.10, 100.0) + glMatrixMode(GL_MODELVIEW) + self.set_default_camera() + + if autofit: + # scale the whole asset to fit into our view frustum· + self.fit_scene() + + glPushMatrix() + + glutKeyboardFunc(self.onkeypress) + glutIgnoreKeyRepeat(1) + + glutMainLoop() + + +if __name__ == '__main__': + if not len(sys.argv) > 1: + print("Usage: " + __file__ + " <model>") + sys.exit(0) + + glrender = GLRenderer() + glrender.render(sys.argv[1], fullscreen = False, postprocess = aiProcessPreset_TargetRealtime_MaxQuality) + diff --git a/src/mesh/assimp-master/port/PyAssimp/scripts/quicktest.py b/src/mesh/assimp-master/port/PyAssimp/scripts/quicktest.py new file mode 100755 index 0000000..cbeccb4 --- /dev/null +++ b/src/mesh/assimp-master/port/PyAssimp/scripts/quicktest.py @@ -0,0 +1,53 @@ +#!/usr/bin/env python +#-*- coding: UTF-8 -*- + +""" +This module uses the sample.py script to load all test models it finds. + +Note: this is not an exhaustive test suite, it does not check the +data structures in detail. It just verifies whether basic +loading and querying of 3d models using pyassimp works. +""" + +import os +import sys + +# Make the development (ie. GIT repo) version of PyAssimp available for import. +sys.path.insert(0, '..') + +import sample +from pyassimp import errors + +# Paths to model files. +basepaths = [os.path.join('..', '..', '..', 'test', 'models'), + os.path.join('..', '..', '..', 'test', 'models-nonbsd')] + +# Valid extensions for 3D model files. +extensions = ['.3ds', '.x', '.lwo', '.obj', '.md5mesh', '.dxf', '.ply', '.stl', + '.dae', '.md5anim', '.lws', '.irrmesh', '.nff', '.off', '.blend'] + + +def run_tests(): + ok, err = 0, 0 + for path in basepaths: + print("Looking for models in %s..." % path) + for root, dirs, files in os.walk(path): + for afile in files: + base, ext = os.path.splitext(afile) + if ext in extensions: + try: + sample.main(os.path.join(root, afile)) + ok += 1 + except errors.AssimpError as error: + # Assimp error is fine; this is a controlled case. + print(error) + err += 1 + except Exception: + print("Error encountered while loading <%s>" + % os.path.join(root, afile)) + print('** Loaded %s models, got controlled errors for %s files' + % (ok, err)) + + +if __name__ == '__main__': + run_tests() diff --git a/src/mesh/assimp-master/port/PyAssimp/scripts/sample.py b/src/mesh/assimp-master/port/PyAssimp/scripts/sample.py new file mode 100755 index 0000000..3cd4b3e --- /dev/null +++ b/src/mesh/assimp-master/port/PyAssimp/scripts/sample.py @@ -0,0 +1,89 @@ +#!/usr/bin/env python +#-*- coding: UTF-8 -*- + +""" +This module demonstrates the functionality of PyAssimp. +""" + +import sys +import logging +logging.basicConfig(level=logging.INFO) + +import pyassimp +import pyassimp.postprocess + +def recur_node(node,level = 0): + print(" " + "\t" * level + "- " + str(node)) + for child in node.children: + recur_node(child, level + 1) + + +def main(filename=None): + + scene = pyassimp.load(filename, processing=pyassimp.postprocess.aiProcess_Triangulate) + + #the model we load + print("MODEL:" + filename) + print + + #write some statistics + print("SCENE:") + print(" meshes:" + str(len(scene.meshes))) + print(" materials:" + str(len(scene.materials))) + print(" textures:" + str(len(scene.textures))) + print + + print("NODES:") + recur_node(scene.rootnode) + + print + print("MESHES:") + for index, mesh in enumerate(scene.meshes): + print(" MESH" + str(index+1)) + print(" material id:" + str(mesh.materialindex+1)) + print(" vertices:" + str(len(mesh.vertices))) + print(" first 3 verts:\n" + str(mesh.vertices[:3])) + if mesh.normals.any(): + print(" first 3 normals:\n" + str(mesh.normals[:3])) + else: + print(" no normals") + print(" colors:" + str(len(mesh.colors))) + tcs = mesh.texturecoords + if tcs.any(): + for tc_index, tc in enumerate(tcs): + print(" texture-coords "+ str(tc_index) + ":" + str(len(tcs[tc_index])) + "first3:" + str(tcs[tc_index][:3])) + + else: + print(" no texture coordinates") + print(" uv-component-count:" + str(len(mesh.numuvcomponents))) + print(" faces:" + str(len(mesh.faces)) + " -> first:\n" + str(mesh.faces[:3])) + print(" bones:" + str(len(mesh.bones)) + " -> first:" + str([str(b) for b in mesh.bones[:3]])) + print + + print("MATERIALS:") + for index, material in enumerate(scene.materials): + print(" MATERIAL (id:" + str(index+1) + ")") + for key, value in material.properties.items(): + print(" %s: %s" % (key, value)) + print + + print("TEXTURES:") + for index, texture in enumerate(scene.textures): + print(" TEXTURE" + str(index+1)) + print(" width:" + str(texture.width)) + print(" height:" + str(texture.height)) + print(" hint:" + str(texture.achformathint)) + print(" data (size):" + str(len(texture.data))) + + # Finally release the model + pyassimp.release(scene) + +def usage(): + print("Usage: sample.py <3d model>") + +if __name__ == "__main__": + + if len(sys.argv) != 2: + usage() + else: + main(sys.argv[1]) diff --git a/src/mesh/assimp-master/port/PyAssimp/scripts/transformations.py b/src/mesh/assimp-master/port/PyAssimp/scripts/transformations.py new file mode 100644 index 0000000..bf0cac9 --- /dev/null +++ b/src/mesh/assimp-master/port/PyAssimp/scripts/transformations.py @@ -0,0 +1,1705 @@ +# -*- coding: utf-8 -*- +# transformations.py + +# Copyright (c) 2006, Christoph Gohlke +# Copyright (c) 2006-2009, The Regents of the University of California +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# * Neither the name of the copyright holders nor the names of any +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +"""Homogeneous Transformation Matrices and Quaternions. + +A library for calculating 4x4 matrices for translating, rotating, reflecting, +scaling, shearing, projecting, orthogonalizing, and superimposing arrays of +3D homogeneous coordinates as well as for converting between rotation matrices, +Euler angles, and quaternions. Also includes an Arcball control object and +functions to decompose transformation matrices. + +:Authors: + `Christoph Gohlke <http://www.lfd.uci.edu/~gohlke/>`__, + Laboratory for Fluorescence Dynamics, University of California, Irvine + +:Version: 20090418 + +Requirements +------------ + +* `Python 2.6 <http://www.python.org>`__ +* `Numpy 1.3 <http://numpy.scipy.org>`__ +* `transformations.c 20090418 <http://www.lfd.uci.edu/~gohlke/>`__ + (optional implementation of some functions in C) + +Notes +----- + +Matrices (M) can be inverted using numpy.linalg.inv(M), concatenated using +numpy.dot(M0, M1), or used to transform homogeneous coordinates (v) using +numpy.dot(M, v) for shape (4, \*) "point of arrays", respectively +numpy.dot(v, M.T) for shape (\*, 4) "array of points". + +Calculations are carried out with numpy.float64 precision. + +This Python implementation is not optimized for speed. + +Vector, point, quaternion, and matrix function arguments are expected to be +"array like", i.e. tuple, list, or numpy arrays. + +Return types are numpy arrays unless specified otherwise. + +Angles are in radians unless specified otherwise. + +Quaternions ix+jy+kz+w are represented as [x, y, z, w]. + +Use the transpose of transformation matrices for OpenGL glMultMatrixd(). + +A triple of Euler angles can be applied/interpreted in 24 ways, which can +be specified using a 4 character string or encoded 4-tuple: + + *Axes 4-string*: e.g. 'sxyz' or 'ryxy' + + - first character : rotations are applied to 's'tatic or 'r'otating frame + - remaining characters : successive rotation axis 'x', 'y', or 'z' + + *Axes 4-tuple*: e.g. (0, 0, 0, 0) or (1, 1, 1, 1) + + - inner axis: code of axis ('x':0, 'y':1, 'z':2) of rightmost matrix. + - parity : even (0) if inner axis 'x' is followed by 'y', 'y' is followed + by 'z', or 'z' is followed by 'x'. Otherwise odd (1). + - repetition : first and last axis are same (1) or different (0). + - frame : rotations are applied to static (0) or rotating (1) frame. + +References +---------- + +(1) Matrices and transformations. Ronald Goldman. + In "Graphics Gems I", pp 472-475. Morgan Kaufmann, 1990. +(2) More matrices and transformations: shear and pseudo-perspective. + Ronald Goldman. In "Graphics Gems II", pp 320-323. Morgan Kaufmann, 1991. +(3) Decomposing a matrix into simple transformations. Spencer Thomas. + In "Graphics Gems II", pp 320-323. Morgan Kaufmann, 1991. +(4) Recovering the data from the transformation matrix. Ronald Goldman. + In "Graphics Gems II", pp 324-331. Morgan Kaufmann, 1991. +(5) Euler angle conversion. Ken Shoemake. + In "Graphics Gems IV", pp 222-229. Morgan Kaufmann, 1994. +(6) Arcball rotation control. Ken Shoemake. + In "Graphics Gems IV", pp 175-192. Morgan Kaufmann, 1994. +(7) Representing attitude: Euler angles, unit quaternions, and rotation + vectors. James Diebel. 2006. +(8) A discussion of the solution for the best rotation to relate two sets + of vectors. W Kabsch. Acta Cryst. 1978. A34, 827-828. +(9) Closed-form solution of absolute orientation using unit quaternions. + BKP Horn. J Opt Soc Am A. 1987. 4(4), 629-642. +(10) Quaternions. Ken Shoemake. + http://www.sfu.ca/~jwa3/cmpt461/files/quatut.pdf +(11) From quaternion to matrix and back. JMP van Waveren. 2005. + http://www.intel.com/cd/ids/developer/asmo-na/eng/293748.htm +(12) Uniform random rotations. Ken Shoemake. + In "Graphics Gems III", pp 124-132. Morgan Kaufmann, 1992. + + +Examples +-------- + +>>> alpha, beta, gamma = 0.123, -1.234, 2.345 +>>> origin, xaxis, yaxis, zaxis = (0, 0, 0), (1, 0, 0), (0, 1, 0), (0, 0, 1) +>>> I = identity_matrix() +>>> Rx = rotation_matrix(alpha, xaxis) +>>> Ry = rotation_matrix(beta, yaxis) +>>> Rz = rotation_matrix(gamma, zaxis) +>>> R = concatenate_matrices(Rx, Ry, Rz) +>>> euler = euler_from_matrix(R, 'rxyz') +>>> numpy.allclose([alpha, beta, gamma], euler) +True +>>> Re = euler_matrix(alpha, beta, gamma, 'rxyz') +>>> is_same_transform(R, Re) +True +>>> al, be, ga = euler_from_matrix(Re, 'rxyz') +>>> is_same_transform(Re, euler_matrix(al, be, ga, 'rxyz')) +True +>>> qx = quaternion_about_axis(alpha, xaxis) +>>> qy = quaternion_about_axis(beta, yaxis) +>>> qz = quaternion_about_axis(gamma, zaxis) +>>> q = quaternion_multiply(qx, qy) +>>> q = quaternion_multiply(q, qz) +>>> Rq = quaternion_matrix(q) +>>> is_same_transform(R, Rq) +True +>>> S = scale_matrix(1.23, origin) +>>> T = translation_matrix((1, 2, 3)) +>>> Z = shear_matrix(beta, xaxis, origin, zaxis) +>>> R = random_rotation_matrix(numpy.random.rand(3)) +>>> M = concatenate_matrices(T, R, Z, S) +>>> scale, shear, angles, trans, persp = decompose_matrix(M) +>>> numpy.allclose(scale, 1.23) +True +>>> numpy.allclose(trans, (1, 2, 3)) +True +>>> numpy.allclose(shear, (0, math.tan(beta), 0)) +True +>>> is_same_transform(R, euler_matrix(axes='sxyz', *angles)) +True +>>> M1 = compose_matrix(scale, shear, angles, trans, persp) +>>> is_same_transform(M, M1) +True + +""" + +from __future__ import division + +import warnings +import math + +import numpy + +# Documentation in HTML format can be generated with Epydoc +__docformat__ = "restructuredtext en" + + +def identity_matrix(): + """Return 4x4 identity/unit matrix. + + >>> I = identity_matrix() + >>> numpy.allclose(I, numpy.dot(I, I)) + True + >>> numpy.sum(I), numpy.trace(I) + (4.0, 4.0) + >>> numpy.allclose(I, numpy.identity(4, dtype=numpy.float64)) + True + + """ + return numpy.identity(4, dtype=numpy.float64) + + +def translation_matrix(direction): + """Return matrix to translate by direction vector. + + >>> v = numpy.random.random(3) - 0.5 + >>> numpy.allclose(v, translation_matrix(v)[:3, 3]) + True + + """ + M = numpy.identity(4) + M[:3, 3] = direction[:3] + return M + + +def translation_from_matrix(matrix): + """Return translation vector from translation matrix. + + >>> v0 = numpy.random.random(3) - 0.5 + >>> v1 = translation_from_matrix(translation_matrix(v0)) + >>> numpy.allclose(v0, v1) + True + + """ + return numpy.array(matrix, copy=False)[:3, 3].copy() + + +def reflection_matrix(point, normal): + """Return matrix to mirror at plane defined by point and normal vector. + + >>> v0 = numpy.random.random(4) - 0.5 + >>> v0[3] = 1.0 + >>> v1 = numpy.random.random(3) - 0.5 + >>> R = reflection_matrix(v0, v1) + >>> numpy.allclose(2., numpy.trace(R)) + True + >>> numpy.allclose(v0, numpy.dot(R, v0)) + True + >>> v2 = v0.copy() + >>> v2[:3] += v1 + >>> v3 = v0.copy() + >>> v2[:3] -= v1 + >>> numpy.allclose(v2, numpy.dot(R, v3)) + True + + """ + normal = unit_vector(normal[:3]) + M = numpy.identity(4) + M[:3, :3] -= 2.0 * numpy.outer(normal, normal) + M[:3, 3] = (2.0 * numpy.dot(point[:3], normal)) * normal + return M + + +def reflection_from_matrix(matrix): + """Return mirror plane point and normal vector from reflection matrix. + + >>> v0 = numpy.random.random(3) - 0.5 + >>> v1 = numpy.random.random(3) - 0.5 + >>> M0 = reflection_matrix(v0, v1) + >>> point, normal = reflection_from_matrix(M0) + >>> M1 = reflection_matrix(point, normal) + >>> is_same_transform(M0, M1) + True + + """ + M = numpy.array(matrix, dtype=numpy.float64, copy=False) + # normal: unit eigenvector corresponding to eigenvalue -1 + l, V = numpy.linalg.eig(M[:3, :3]) + i = numpy.where(abs(numpy.real(l) + 1.0) < 1e-8)[0] + if not len(i): + raise ValueError("no unit eigenvector corresponding to eigenvalue -1") + normal = numpy.real(V[:, i[0]]).squeeze() + # point: any unit eigenvector corresponding to eigenvalue 1 + l, V = numpy.linalg.eig(M) + i = numpy.where(abs(numpy.real(l) - 1.0) < 1e-8)[0] + if not len(i): + raise ValueError("no unit eigenvector corresponding to eigenvalue 1") + point = numpy.real(V[:, i[-1]]).squeeze() + point /= point[3] + return point, normal + + +def rotation_matrix(angle, direction, point=None): + """Return matrix to rotate about axis defined by point and direction. + + >>> angle = (random.random() - 0.5) * (2*math.pi) + >>> direc = numpy.random.random(3) - 0.5 + >>> point = numpy.random.random(3) - 0.5 + >>> R0 = rotation_matrix(angle, direc, point) + >>> R1 = rotation_matrix(angle-2*math.pi, direc, point) + >>> is_same_transform(R0, R1) + True + >>> R0 = rotation_matrix(angle, direc, point) + >>> R1 = rotation_matrix(-angle, -direc, point) + >>> is_same_transform(R0, R1) + True + >>> I = numpy.identity(4, numpy.float64) + >>> numpy.allclose(I, rotation_matrix(math.pi*2, direc)) + True + >>> numpy.allclose(2., numpy.trace(rotation_matrix(math.pi/2, + ... direc, point))) + True + + """ + sina = math.sin(angle) + cosa = math.cos(angle) + direction = unit_vector(direction[:3]) + # rotation matrix around unit vector + R = numpy.array(((cosa, 0.0, 0.0), + (0.0, cosa, 0.0), + (0.0, 0.0, cosa)), dtype=numpy.float64) + R += numpy.outer(direction, direction) * (1.0 - cosa) + direction *= sina + R += numpy.array((( 0.0, -direction[2], direction[1]), + ( direction[2], 0.0, -direction[0]), + (-direction[1], direction[0], 0.0)), + dtype=numpy.float64) + M = numpy.identity(4) + M[:3, :3] = R + if point is not None: + # rotation not around origin + point = numpy.array(point[:3], dtype=numpy.float64, copy=False) + M[:3, 3] = point - numpy.dot(R, point) + return M + + +def rotation_from_matrix(matrix): + """Return rotation angle and axis from rotation matrix. + + >>> angle = (random.random() - 0.5) * (2*math.pi) + >>> direc = numpy.random.random(3) - 0.5 + >>> point = numpy.random.random(3) - 0.5 + >>> R0 = rotation_matrix(angle, direc, point) + >>> angle, direc, point = rotation_from_matrix(R0) + >>> R1 = rotation_matrix(angle, direc, point) + >>> is_same_transform(R0, R1) + True + + """ + R = numpy.array(matrix, dtype=numpy.float64, copy=False) + R33 = R[:3, :3] + # direction: unit eigenvector of R33 corresponding to eigenvalue of 1 + l, W = numpy.linalg.eig(R33.T) + i = numpy.where(abs(numpy.real(l) - 1.0) < 1e-8)[0] + if not len(i): + raise ValueError("no unit eigenvector corresponding to eigenvalue 1") + direction = numpy.real(W[:, i[-1]]).squeeze() + # point: unit eigenvector of R33 corresponding to eigenvalue of 1 + l, Q = numpy.linalg.eig(R) + i = numpy.where(abs(numpy.real(l) - 1.0) < 1e-8)[0] + if not len(i): + raise ValueError("no unit eigenvector corresponding to eigenvalue 1") + point = numpy.real(Q[:, i[-1]]).squeeze() + point /= point[3] + # rotation angle depending on direction + cosa = (numpy.trace(R33) - 1.0) / 2.0 + if abs(direction[2]) > 1e-8: + sina = (R[1, 0] + (cosa-1.0)*direction[0]*direction[1]) / direction[2] + elif abs(direction[1]) > 1e-8: + sina = (R[0, 2] + (cosa-1.0)*direction[0]*direction[2]) / direction[1] + else: + sina = (R[2, 1] + (cosa-1.0)*direction[1]*direction[2]) / direction[0] + angle = math.atan2(sina, cosa) + return angle, direction, point + + +def scale_matrix(factor, origin=None, direction=None): + """Return matrix to scale by factor around origin in direction. + + Use factor -1 for point symmetry. + + >>> v = (numpy.random.rand(4, 5) - 0.5) * 20.0 + >>> v[3] = 1.0 + >>> S = scale_matrix(-1.234) + >>> numpy.allclose(numpy.dot(S, v)[:3], -1.234*v[:3]) + True + >>> factor = random.random() * 10 - 5 + >>> origin = numpy.random.random(3) - 0.5 + >>> direct = numpy.random.random(3) - 0.5 + >>> S = scale_matrix(factor, origin) + >>> S = scale_matrix(factor, origin, direct) + + """ + if direction is None: + # uniform scaling + M = numpy.array(((factor, 0.0, 0.0, 0.0), + (0.0, factor, 0.0, 0.0), + (0.0, 0.0, factor, 0.0), + (0.0, 0.0, 0.0, 1.0)), dtype=numpy.float64) + if origin is not None: + M[:3, 3] = origin[:3] + M[:3, 3] *= 1.0 - factor + else: + # nonuniform scaling + direction = unit_vector(direction[:3]) + factor = 1.0 - factor + M = numpy.identity(4) + M[:3, :3] -= factor * numpy.outer(direction, direction) + if origin is not None: + M[:3, 3] = (factor * numpy.dot(origin[:3], direction)) * direction + return M + + +def scale_from_matrix(matrix): + """Return scaling factor, origin and direction from scaling matrix. + + >>> factor = random.random() * 10 - 5 + >>> origin = numpy.random.random(3) - 0.5 + >>> direct = numpy.random.random(3) - 0.5 + >>> S0 = scale_matrix(factor, origin) + >>> factor, origin, direction = scale_from_matrix(S0) + >>> S1 = scale_matrix(factor, origin, direction) + >>> is_same_transform(S0, S1) + True + >>> S0 = scale_matrix(factor, origin, direct) + >>> factor, origin, direction = scale_from_matrix(S0) + >>> S1 = scale_matrix(factor, origin, direction) + >>> is_same_transform(S0, S1) + True + + """ + M = numpy.array(matrix, dtype=numpy.float64, copy=False) + M33 = M[:3, :3] + factor = numpy.trace(M33) - 2.0 + try: + # direction: unit eigenvector corresponding to eigenvalue factor + l, V = numpy.linalg.eig(M33) + i = numpy.where(abs(numpy.real(l) - factor) < 1e-8)[0][0] + direction = numpy.real(V[:, i]).squeeze() + direction /= vector_norm(direction) + except IndexError: + # uniform scaling + factor = (factor + 2.0) / 3.0 + direction = None + # origin: any eigenvector corresponding to eigenvalue 1 + l, V = numpy.linalg.eig(M) + i = numpy.where(abs(numpy.real(l) - 1.0) < 1e-8)[0] + if not len(i): + raise ValueError("no eigenvector corresponding to eigenvalue 1") + origin = numpy.real(V[:, i[-1]]).squeeze() + origin /= origin[3] + return factor, origin, direction + + +def projection_matrix(point, normal, direction=None, + perspective=None, pseudo=False): + """Return matrix to project onto plane defined by point and normal. + + Using either perspective point, projection direction, or none of both. + + If pseudo is True, perspective projections will preserve relative depth + such that Perspective = dot(Orthogonal, PseudoPerspective). + + >>> P = projection_matrix((0, 0, 0), (1, 0, 0)) + >>> numpy.allclose(P[1:, 1:], numpy.identity(4)[1:, 1:]) + True + >>> point = numpy.random.random(3) - 0.5 + >>> normal = numpy.random.random(3) - 0.5 + >>> direct = numpy.random.random(3) - 0.5 + >>> persp = numpy.random.random(3) - 0.5 + >>> P0 = projection_matrix(point, normal) + >>> P1 = projection_matrix(point, normal, direction=direct) + >>> P2 = projection_matrix(point, normal, perspective=persp) + >>> P3 = projection_matrix(point, normal, perspective=persp, pseudo=True) + >>> is_same_transform(P2, numpy.dot(P0, P3)) + True + >>> P = projection_matrix((3, 0, 0), (1, 1, 0), (1, 0, 0)) + >>> v0 = (numpy.random.rand(4, 5) - 0.5) * 20.0 + >>> v0[3] = 1.0 + >>> v1 = numpy.dot(P, v0) + >>> numpy.allclose(v1[1], v0[1]) + True + >>> numpy.allclose(v1[0], 3.0-v1[1]) + True + + """ + M = numpy.identity(4) + point = numpy.array(point[:3], dtype=numpy.float64, copy=False) + normal = unit_vector(normal[:3]) + if perspective is not None: + # perspective projection + perspective = numpy.array(perspective[:3], dtype=numpy.float64, + copy=False) + M[0, 0] = M[1, 1] = M[2, 2] = numpy.dot(perspective-point, normal) + M[:3, :3] -= numpy.outer(perspective, normal) + if pseudo: + # preserve relative depth + M[:3, :3] -= numpy.outer(normal, normal) + M[:3, 3] = numpy.dot(point, normal) * (perspective+normal) + else: + M[:3, 3] = numpy.dot(point, normal) * perspective + M[3, :3] = -normal + M[3, 3] = numpy.dot(perspective, normal) + elif direction is not None: + # parallel projection + direction = numpy.array(direction[:3], dtype=numpy.float64, copy=False) + scale = numpy.dot(direction, normal) + M[:3, :3] -= numpy.outer(direction, normal) / scale + M[:3, 3] = direction * (numpy.dot(point, normal) / scale) + else: + # orthogonal projection + M[:3, :3] -= numpy.outer(normal, normal) + M[:3, 3] = numpy.dot(point, normal) * normal + return M + + +def projection_from_matrix(matrix, pseudo=False): + """Return projection plane and perspective point from projection matrix. + + Return values are same as arguments for projection_matrix function: + point, normal, direction, perspective, and pseudo. + + >>> point = numpy.random.random(3) - 0.5 + >>> normal = numpy.random.random(3) - 0.5 + >>> direct = numpy.random.random(3) - 0.5 + >>> persp = numpy.random.random(3) - 0.5 + >>> P0 = projection_matrix(point, normal) + >>> result = projection_from_matrix(P0) + >>> P1 = projection_matrix(*result) + >>> is_same_transform(P0, P1) + True + >>> P0 = projection_matrix(point, normal, direct) + >>> result = projection_from_matrix(P0) + >>> P1 = projection_matrix(*result) + >>> is_same_transform(P0, P1) + True + >>> P0 = projection_matrix(point, normal, perspective=persp, pseudo=False) + >>> result = projection_from_matrix(P0, pseudo=False) + >>> P1 = projection_matrix(*result) + >>> is_same_transform(P0, P1) + True + >>> P0 = projection_matrix(point, normal, perspective=persp, pseudo=True) + >>> result = projection_from_matrix(P0, pseudo=True) + >>> P1 = projection_matrix(*result) + >>> is_same_transform(P0, P1) + True + + """ + M = numpy.array(matrix, dtype=numpy.float64, copy=False) + M33 = M[:3, :3] + l, V = numpy.linalg.eig(M) + i = numpy.where(abs(numpy.real(l) - 1.0) < 1e-8)[0] + if not pseudo and len(i): + # point: any eigenvector corresponding to eigenvalue 1 + point = numpy.real(V[:, i[-1]]).squeeze() + point /= point[3] + # direction: unit eigenvector corresponding to eigenvalue 0 + l, V = numpy.linalg.eig(M33) + i = numpy.where(abs(numpy.real(l)) < 1e-8)[0] + if not len(i): + raise ValueError("no eigenvector corresponding to eigenvalue 0") + direction = numpy.real(V[:, i[0]]).squeeze() + direction /= vector_norm(direction) + # normal: unit eigenvector of M33.T corresponding to eigenvalue 0 + l, V = numpy.linalg.eig(M33.T) + i = numpy.where(abs(numpy.real(l)) < 1e-8)[0] + if len(i): + # parallel projection + normal = numpy.real(V[:, i[0]]).squeeze() + normal /= vector_norm(normal) + return point, normal, direction, None, False + else: + # orthogonal projection, where normal equals direction vector + return point, direction, None, None, False + else: + # perspective projection + i = numpy.where(abs(numpy.real(l)) > 1e-8)[0] + if not len(i): + raise ValueError( + "no eigenvector not corresponding to eigenvalue 0") + point = numpy.real(V[:, i[-1]]).squeeze() + point /= point[3] + normal = - M[3, :3] + perspective = M[:3, 3] / numpy.dot(point[:3], normal) + if pseudo: + perspective -= normal + return point, normal, None, perspective, pseudo + + +def clip_matrix(left, right, bottom, top, near, far, perspective=False): + """Return matrix to obtain normalized device coordinates from frustrum. + + The frustrum bounds are axis-aligned along x (left, right), + y (bottom, top) and z (near, far). + + Normalized device coordinates are in range [-1, 1] if coordinates are + inside the frustrum. + + If perspective is True the frustrum is a truncated pyramid with the + perspective point at origin and direction along z axis, otherwise an + orthographic canonical view volume (a box). + + Homogeneous coordinates transformed by the perspective clip matrix + need to be dehomogenized (divided by w coordinate). + + >>> frustrum = numpy.random.rand(6) + >>> frustrum[1] += frustrum[0] + >>> frustrum[3] += frustrum[2] + >>> frustrum[5] += frustrum[4] + >>> M = clip_matrix(*frustrum, perspective=False) + >>> numpy.dot(M, [frustrum[0], frustrum[2], frustrum[4], 1.0]) + array([-1., -1., -1., 1.]) + >>> numpy.dot(M, [frustrum[1], frustrum[3], frustrum[5], 1.0]) + array([ 1., 1., 1., 1.]) + >>> M = clip_matrix(*frustrum, perspective=True) + >>> v = numpy.dot(M, [frustrum[0], frustrum[2], frustrum[4], 1.0]) + >>> v / v[3] + array([-1., -1., -1., 1.]) + >>> v = numpy.dot(M, [frustrum[1], frustrum[3], frustrum[4], 1.0]) + >>> v / v[3] + array([ 1., 1., -1., 1.]) + + """ + if left >= right or bottom >= top or near >= far: + raise ValueError("invalid frustrum") + if perspective: + if near <= _EPS: + raise ValueError("invalid frustrum: near <= 0") + t = 2.0 * near + M = ((-t/(right-left), 0.0, (right+left)/(right-left), 0.0), + (0.0, -t/(top-bottom), (top+bottom)/(top-bottom), 0.0), + (0.0, 0.0, -(far+near)/(far-near), t*far/(far-near)), + (0.0, 0.0, -1.0, 0.0)) + else: + M = ((2.0/(right-left), 0.0, 0.0, (right+left)/(left-right)), + (0.0, 2.0/(top-bottom), 0.0, (top+bottom)/(bottom-top)), + (0.0, 0.0, 2.0/(far-near), (far+near)/(near-far)), + (0.0, 0.0, 0.0, 1.0)) + return numpy.array(M, dtype=numpy.float64) + + +def shear_matrix(angle, direction, point, normal): + """Return matrix to shear by angle along direction vector on shear plane. + + The shear plane is defined by a point and normal vector. The direction + vector must be orthogonal to the plane's normal vector. + + A point P is transformed by the shear matrix into P" such that + the vector P-P" is parallel to the direction vector and its extent is + given by the angle of P-P'-P", where P' is the orthogonal projection + of P onto the shear plane. + + >>> angle = (random.random() - 0.5) * 4*math.pi + >>> direct = numpy.random.random(3) - 0.5 + >>> point = numpy.random.random(3) - 0.5 + >>> normal = numpy.cross(direct, numpy.random.random(3)) + >>> S = shear_matrix(angle, direct, point, normal) + >>> numpy.allclose(1.0, numpy.linalg.det(S)) + True + + """ + normal = unit_vector(normal[:3]) + direction = unit_vector(direction[:3]) + if abs(numpy.dot(normal, direction)) > 1e-6: + raise ValueError("direction and normal vectors are not orthogonal") + angle = math.tan(angle) + M = numpy.identity(4) + M[:3, :3] += angle * numpy.outer(direction, normal) + M[:3, 3] = -angle * numpy.dot(point[:3], normal) * direction + return M + + +def shear_from_matrix(matrix): + """Return shear angle, direction and plane from shear matrix. + + >>> angle = (random.random() - 0.5) * 4*math.pi + >>> direct = numpy.random.random(3) - 0.5 + >>> point = numpy.random.random(3) - 0.5 + >>> normal = numpy.cross(direct, numpy.random.random(3)) + >>> S0 = shear_matrix(angle, direct, point, normal) + >>> angle, direct, point, normal = shear_from_matrix(S0) + >>> S1 = shear_matrix(angle, direct, point, normal) + >>> is_same_transform(S0, S1) + True + + """ + M = numpy.array(matrix, dtype=numpy.float64, copy=False) + M33 = M[:3, :3] + # normal: cross independent eigenvectors corresponding to the eigenvalue 1 + l, V = numpy.linalg.eig(M33) + i = numpy.where(abs(numpy.real(l) - 1.0) < 1e-4)[0] + if len(i) < 2: + raise ValueError("No two linear independent eigenvectors found %s" % l) + V = numpy.real(V[:, i]).squeeze().T + lenorm = -1.0 + for i0, i1 in ((0, 1), (0, 2), (1, 2)): + n = numpy.cross(V[i0], V[i1]) + l = vector_norm(n) + if l > lenorm: + lenorm = l + normal = n + normal /= lenorm + # direction and angle + direction = numpy.dot(M33 - numpy.identity(3), normal) + angle = vector_norm(direction) + direction /= angle + angle = math.atan(angle) + # point: eigenvector corresponding to eigenvalue 1 + l, V = numpy.linalg.eig(M) + i = numpy.where(abs(numpy.real(l) - 1.0) < 1e-8)[0] + if not len(i): + raise ValueError("no eigenvector corresponding to eigenvalue 1") + point = numpy.real(V[:, i[-1]]).squeeze() + point /= point[3] + return angle, direction, point, normal + + +def decompose_matrix(matrix): + """Return sequence of transformations from transformation matrix. + + matrix : array_like + Non-degenerative homogeneous transformation matrix + + Return tuple of: + scale : vector of 3 scaling factors + shear : list of shear factors for x-y, x-z, y-z axes + angles : list of Euler angles about static x, y, z axes + translate : translation vector along x, y, z axes + perspective : perspective partition of matrix + + Raise ValueError if matrix is of wrong type or degenerative. + + >>> T0 = translation_matrix((1, 2, 3)) + >>> scale, shear, angles, trans, persp = decompose_matrix(T0) + >>> T1 = translation_matrix(trans) + >>> numpy.allclose(T0, T1) + True + >>> S = scale_matrix(0.123) + >>> scale, shear, angles, trans, persp = decompose_matrix(S) + >>> scale[0] + 0.123 + >>> R0 = euler_matrix(1, 2, 3) + >>> scale, shear, angles, trans, persp = decompose_matrix(R0) + >>> R1 = euler_matrix(*angles) + >>> numpy.allclose(R0, R1) + True + + """ + M = numpy.array(matrix, dtype=numpy.float64, copy=True).T + if abs(M[3, 3]) < _EPS: + raise ValueError("M[3, 3] is zero") + M /= M[3, 3] + P = M.copy() + P[:, 3] = 0, 0, 0, 1 + if not numpy.linalg.det(P): + raise ValueError("Matrix is singular") + + scale = numpy.zeros((3, ), dtype=numpy.float64) + shear = [0, 0, 0] + angles = [0, 0, 0] + + if any(abs(M[:3, 3]) > _EPS): + perspective = numpy.dot(M[:, 3], numpy.linalg.inv(P.T)) + M[:, 3] = 0, 0, 0, 1 + else: + perspective = numpy.array((0, 0, 0, 1), dtype=numpy.float64) + + translate = M[3, :3].copy() + M[3, :3] = 0 + + row = M[:3, :3].copy() + scale[0] = vector_norm(row[0]) + row[0] /= scale[0] + shear[0] = numpy.dot(row[0], row[1]) + row[1] -= row[0] * shear[0] + scale[1] = vector_norm(row[1]) + row[1] /= scale[1] + shear[0] /= scale[1] + shear[1] = numpy.dot(row[0], row[2]) + row[2] -= row[0] * shear[1] + shear[2] = numpy.dot(row[1], row[2]) + row[2] -= row[1] * shear[2] + scale[2] = vector_norm(row[2]) + row[2] /= scale[2] + shear[1:] /= scale[2] + + if numpy.dot(row[0], numpy.cross(row[1], row[2])) < 0: + scale *= -1 + row *= -1 + + angles[1] = math.asin(-row[0, 2]) + if math.cos(angles[1]): + angles[0] = math.atan2(row[1, 2], row[2, 2]) + angles[2] = math.atan2(row[0, 1], row[0, 0]) + else: + #angles[0] = math.atan2(row[1, 0], row[1, 1]) + angles[0] = math.atan2(-row[2, 1], row[1, 1]) + angles[2] = 0.0 + + return scale, shear, angles, translate, perspective + + +def compose_matrix(scale=None, shear=None, angles=None, translate=None, + perspective=None): + """Return transformation matrix from sequence of transformations. + + This is the inverse of the decompose_matrix function. + + Sequence of transformations: + scale : vector of 3 scaling factors + shear : list of shear factors for x-y, x-z, y-z axes + angles : list of Euler angles about static x, y, z axes + translate : translation vector along x, y, z axes + perspective : perspective partition of matrix + + >>> scale = numpy.random.random(3) - 0.5 + >>> shear = numpy.random.random(3) - 0.5 + >>> angles = (numpy.random.random(3) - 0.5) * (2*math.pi) + >>> trans = numpy.random.random(3) - 0.5 + >>> persp = numpy.random.random(4) - 0.5 + >>> M0 = compose_matrix(scale, shear, angles, trans, persp) + >>> result = decompose_matrix(M0) + >>> M1 = compose_matrix(*result) + >>> is_same_transform(M0, M1) + True + + """ + M = numpy.identity(4) + if perspective is not None: + P = numpy.identity(4) + P[3, :] = perspective[:4] + M = numpy.dot(M, P) + if translate is not None: + T = numpy.identity(4) + T[:3, 3] = translate[:3] + M = numpy.dot(M, T) + if angles is not None: + R = euler_matrix(angles[0], angles[1], angles[2], 'sxyz') + M = numpy.dot(M, R) + if shear is not None: + Z = numpy.identity(4) + Z[1, 2] = shear[2] + Z[0, 2] = shear[1] + Z[0, 1] = shear[0] + M = numpy.dot(M, Z) + if scale is not None: + S = numpy.identity(4) + S[0, 0] = scale[0] + S[1, 1] = scale[1] + S[2, 2] = scale[2] + M = numpy.dot(M, S) + M /= M[3, 3] + return M + + +def orthogonalization_matrix(lengths, angles): + """Return orthogonalization matrix for crystallographic cell coordinates. + + Angles are expected in degrees. + + The de-orthogonalization matrix is the inverse. + + >>> O = orthogonalization_matrix((10., 10., 10.), (90., 90., 90.)) + >>> numpy.allclose(O[:3, :3], numpy.identity(3, float) * 10) + True + >>> O = orthogonalization_matrix([9.8, 12.0, 15.5], [87.2, 80.7, 69.7]) + >>> numpy.allclose(numpy.sum(O), 43.063229) + True + + """ + a, b, c = lengths + angles = numpy.radians(angles) + sina, sinb, _ = numpy.sin(angles) + cosa, cosb, cosg = numpy.cos(angles) + co = (cosa * cosb - cosg) / (sina * sinb) + return numpy.array(( + ( a*sinb*math.sqrt(1.0-co*co), 0.0, 0.0, 0.0), + (-a*sinb*co, b*sina, 0.0, 0.0), + ( a*cosb, b*cosa, c, 0.0), + ( 0.0, 0.0, 0.0, 1.0)), + dtype=numpy.float64) + + +def superimposition_matrix(v0, v1, scaling=False, usesvd=True): + """Return matrix to transform given vector set into second vector set. + + v0 and v1 are shape (3, \*) or (4, \*) arrays of at least 3 vectors. + + If usesvd is True, the weighted sum of squared deviations (RMSD) is + minimized according to the algorithm by W. Kabsch [8]. Otherwise the + quaternion based algorithm by B. Horn [9] is used (slower when using + this Python implementation). + + The returned matrix performs rotation, translation and uniform scaling + (if specified). + + >>> v0 = numpy.random.rand(3, 10) + >>> M = superimposition_matrix(v0, v0) + >>> numpy.allclose(M, numpy.identity(4)) + True + >>> R = random_rotation_matrix(numpy.random.random(3)) + >>> v0 = ((1,0,0), (0,1,0), (0,0,1), (1,1,1)) + >>> v1 = numpy.dot(R, v0) + >>> M = superimposition_matrix(v0, v1) + >>> numpy.allclose(v1, numpy.dot(M, v0)) + True + >>> v0 = (numpy.random.rand(4, 100) - 0.5) * 20.0 + >>> v0[3] = 1.0 + >>> v1 = numpy.dot(R, v0) + >>> M = superimposition_matrix(v0, v1) + >>> numpy.allclose(v1, numpy.dot(M, v0)) + True + >>> S = scale_matrix(random.random()) + >>> T = translation_matrix(numpy.random.random(3)-0.5) + >>> M = concatenate_matrices(T, R, S) + >>> v1 = numpy.dot(M, v0) + >>> v0[:3] += numpy.random.normal(0.0, 1e-9, 300).reshape(3, -1) + >>> M = superimposition_matrix(v0, v1, scaling=True) + >>> numpy.allclose(v1, numpy.dot(M, v0)) + True + >>> M = superimposition_matrix(v0, v1, scaling=True, usesvd=False) + >>> numpy.allclose(v1, numpy.dot(M, v0)) + True + >>> v = numpy.empty((4, 100, 3), dtype=numpy.float64) + >>> v[:, :, 0] = v0 + >>> M = superimposition_matrix(v0, v1, scaling=True, usesvd=False) + >>> numpy.allclose(v1, numpy.dot(M, v[:, :, 0])) + True + + """ + v0 = numpy.array(v0, dtype=numpy.float64, copy=False)[:3] + v1 = numpy.array(v1, dtype=numpy.float64, copy=False)[:3] + + if v0.shape != v1.shape or v0.shape[1] < 3: + raise ValueError("Vector sets are of wrong shape or type.") + + # move centroids to origin + t0 = numpy.mean(v0, axis=1) + t1 = numpy.mean(v1, axis=1) + v0 = v0 - t0.reshape(3, 1) + v1 = v1 - t1.reshape(3, 1) + + if usesvd: + # Singular Value Decomposition of covariance matrix + u, s, vh = numpy.linalg.svd(numpy.dot(v1, v0.T)) + # rotation matrix from SVD orthonormal bases + R = numpy.dot(u, vh) + if numpy.linalg.det(R) < 0.0: + # R does not constitute right handed system + R -= numpy.outer(u[:, 2], vh[2, :]*2.0) + s[-1] *= -1.0 + # homogeneous transformation matrix + M = numpy.identity(4) + M[:3, :3] = R + else: + # compute symmetric matrix N + xx, yy, zz = numpy.sum(v0 * v1, axis=1) + xy, yz, zx = numpy.sum(v0 * numpy.roll(v1, -1, axis=0), axis=1) + xz, yx, zy = numpy.sum(v0 * numpy.roll(v1, -2, axis=0), axis=1) + N = ((xx+yy+zz, yz-zy, zx-xz, xy-yx), + (yz-zy, xx-yy-zz, xy+yx, zx+xz), + (zx-xz, xy+yx, -xx+yy-zz, yz+zy), + (xy-yx, zx+xz, yz+zy, -xx-yy+zz)) + # quaternion: eigenvector corresponding to most positive eigenvalue + l, V = numpy.linalg.eig(N) + q = V[:, numpy.argmax(l)] + q /= vector_norm(q) # unit quaternion + q = numpy.roll(q, -1) # move w component to end + # homogeneous transformation matrix + M = quaternion_matrix(q) + + # scale: ratio of rms deviations from centroid + if scaling: + v0 *= v0 + v1 *= v1 + M[:3, :3] *= math.sqrt(numpy.sum(v1) / numpy.sum(v0)) + + # translation + M[:3, 3] = t1 + T = numpy.identity(4) + T[:3, 3] = -t0 + M = numpy.dot(M, T) + return M + + +def euler_matrix(ai, aj, ak, axes='sxyz'): + """Return homogeneous rotation matrix from Euler angles and axis sequence. + + ai, aj, ak : Euler's roll, pitch and yaw angles + axes : One of 24 axis sequences as string or encoded tuple + + >>> R = euler_matrix(1, 2, 3, 'syxz') + >>> numpy.allclose(numpy.sum(R[0]), -1.34786452) + True + >>> R = euler_matrix(1, 2, 3, (0, 1, 0, 1)) + >>> numpy.allclose(numpy.sum(R[0]), -0.383436184) + True + >>> ai, aj, ak = (4.0*math.pi) * (numpy.random.random(3) - 0.5) + >>> for axes in _AXES2TUPLE.keys(): + ... R = euler_matrix(ai, aj, ak, axes) + >>> for axes in _TUPLE2AXES.keys(): + ... R = euler_matrix(ai, aj, ak, axes) + + """ + try: + firstaxis, parity, repetition, frame = _AXES2TUPLE[axes] + except (AttributeError, KeyError): + _ = _TUPLE2AXES[axes] + firstaxis, parity, repetition, frame = axes + + i = firstaxis + j = _NEXT_AXIS[i+parity] + k = _NEXT_AXIS[i-parity+1] + + if frame: + ai, ak = ak, ai + if parity: + ai, aj, ak = -ai, -aj, -ak + + si, sj, sk = math.sin(ai), math.sin(aj), math.sin(ak) + ci, cj, ck = math.cos(ai), math.cos(aj), math.cos(ak) + cc, cs = ci*ck, ci*sk + sc, ss = si*ck, si*sk + + M = numpy.identity(4) + if repetition: + M[i, i] = cj + M[i, j] = sj*si + M[i, k] = sj*ci + M[j, i] = sj*sk + M[j, j] = -cj*ss+cc + M[j, k] = -cj*cs-sc + M[k, i] = -sj*ck + M[k, j] = cj*sc+cs + M[k, k] = cj*cc-ss + else: + M[i, i] = cj*ck + M[i, j] = sj*sc-cs + M[i, k] = sj*cc+ss + M[j, i] = cj*sk + M[j, j] = sj*ss+cc + M[j, k] = sj*cs-sc + M[k, i] = -sj + M[k, j] = cj*si + M[k, k] = cj*ci + return M + + +def euler_from_matrix(matrix, axes='sxyz'): + """Return Euler angles from rotation matrix for specified axis sequence. + + axes : One of 24 axis sequences as string or encoded tuple + + Note that many Euler angle triplets can describe one matrix. + + >>> R0 = euler_matrix(1, 2, 3, 'syxz') + >>> al, be, ga = euler_from_matrix(R0, 'syxz') + >>> R1 = euler_matrix(al, be, ga, 'syxz') + >>> numpy.allclose(R0, R1) + True + >>> angles = (4.0*math.pi) * (numpy.random.random(3) - 0.5) + >>> for axes in _AXES2TUPLE.keys(): + ... R0 = euler_matrix(axes=axes, *angles) + ... R1 = euler_matrix(axes=axes, *euler_from_matrix(R0, axes)) + ... if not numpy.allclose(R0, R1): print axes, "failed" + + """ + try: + firstaxis, parity, repetition, frame = _AXES2TUPLE[axes.lower()] + except (AttributeError, KeyError): + _ = _TUPLE2AXES[axes] + firstaxis, parity, repetition, frame = axes + + i = firstaxis + j = _NEXT_AXIS[i+parity] + k = _NEXT_AXIS[i-parity+1] + + M = numpy.array(matrix, dtype=numpy.float64, copy=False)[:3, :3] + if repetition: + sy = math.sqrt(M[i, j]*M[i, j] + M[i, k]*M[i, k]) + if sy > _EPS: + ax = math.atan2( M[i, j], M[i, k]) + ay = math.atan2( sy, M[i, i]) + az = math.atan2( M[j, i], -M[k, i]) + else: + ax = math.atan2(-M[j, k], M[j, j]) + ay = math.atan2( sy, M[i, i]) + az = 0.0 + else: + cy = math.sqrt(M[i, i]*M[i, i] + M[j, i]*M[j, i]) + if cy > _EPS: + ax = math.atan2( M[k, j], M[k, k]) + ay = math.atan2(-M[k, i], cy) + az = math.atan2( M[j, i], M[i, i]) + else: + ax = math.atan2(-M[j, k], M[j, j]) + ay = math.atan2(-M[k, i], cy) + az = 0.0 + + if parity: + ax, ay, az = -ax, -ay, -az + if frame: + ax, az = az, ax + return ax, ay, az + + +def euler_from_quaternion(quaternion, axes='sxyz'): + """Return Euler angles from quaternion for specified axis sequence. + + >>> angles = euler_from_quaternion([0.06146124, 0, 0, 0.99810947]) + >>> numpy.allclose(angles, [0.123, 0, 0]) + True + + """ + return euler_from_matrix(quaternion_matrix(quaternion), axes) + + +def quaternion_from_euler(ai, aj, ak, axes='sxyz'): + """Return quaternion from Euler angles and axis sequence. + + ai, aj, ak : Euler's roll, pitch and yaw angles + axes : One of 24 axis sequences as string or encoded tuple + + >>> q = quaternion_from_euler(1, 2, 3, 'ryxz') + >>> numpy.allclose(q, [0.310622, -0.718287, 0.444435, 0.435953]) + True + + """ + try: + firstaxis, parity, repetition, frame = _AXES2TUPLE[axes.lower()] + except (AttributeError, KeyError): + _ = _TUPLE2AXES[axes] + firstaxis, parity, repetition, frame = axes + + i = firstaxis + j = _NEXT_AXIS[i+parity] + k = _NEXT_AXIS[i-parity+1] + + if frame: + ai, ak = ak, ai + if parity: + aj = -aj + + ai /= 2.0 + aj /= 2.0 + ak /= 2.0 + ci = math.cos(ai) + si = math.sin(ai) + cj = math.cos(aj) + sj = math.sin(aj) + ck = math.cos(ak) + sk = math.sin(ak) + cc = ci*ck + cs = ci*sk + sc = si*ck + ss = si*sk + + quaternion = numpy.empty((4, ), dtype=numpy.float64) + if repetition: + quaternion[i] = cj*(cs + sc) + quaternion[j] = sj*(cc + ss) + quaternion[k] = sj*(cs - sc) + quaternion[3] = cj*(cc - ss) + else: + quaternion[i] = cj*sc - sj*cs + quaternion[j] = cj*ss + sj*cc + quaternion[k] = cj*cs - sj*sc + quaternion[3] = cj*cc + sj*ss + if parity: + quaternion[j] *= -1 + + return quaternion + + +def quaternion_about_axis(angle, axis): + """Return quaternion for rotation about axis. + + >>> q = quaternion_about_axis(0.123, (1, 0, 0)) + >>> numpy.allclose(q, [0.06146124, 0, 0, 0.99810947]) + True + + """ + quaternion = numpy.zeros((4, ), dtype=numpy.float64) + quaternion[:3] = axis[:3] + qlen = vector_norm(quaternion) + if qlen > _EPS: + quaternion *= math.sin(angle/2.0) / qlen + quaternion[3] = math.cos(angle/2.0) + return quaternion + + +def quaternion_matrix(quaternion): + """Return homogeneous rotation matrix from quaternion. + + >>> R = quaternion_matrix([0.06146124, 0, 0, 0.99810947]) + >>> numpy.allclose(R, rotation_matrix(0.123, (1, 0, 0))) + True + + """ + q = numpy.array(quaternion[:4], dtype=numpy.float64, copy=True) + nq = numpy.dot(q, q) + if nq < _EPS: + return numpy.identity(4) + q *= math.sqrt(2.0 / nq) + q = numpy.outer(q, q) + return numpy.array(( + (1.0-q[1, 1]-q[2, 2], q[0, 1]-q[2, 3], q[0, 2]+q[1, 3], 0.0), + ( q[0, 1]+q[2, 3], 1.0-q[0, 0]-q[2, 2], q[1, 2]-q[0, 3], 0.0), + ( q[0, 2]-q[1, 3], q[1, 2]+q[0, 3], 1.0-q[0, 0]-q[1, 1], 0.0), + ( 0.0, 0.0, 0.0, 1.0) + ), dtype=numpy.float64) + + +def quaternion_from_matrix(matrix): + """Return quaternion from rotation matrix. + + >>> R = rotation_matrix(0.123, (1, 2, 3)) + >>> q = quaternion_from_matrix(R) + >>> numpy.allclose(q, [0.0164262, 0.0328524, 0.0492786, 0.9981095]) + True + + """ + q = numpy.empty((4, ), dtype=numpy.float64) + M = numpy.array(matrix, dtype=numpy.float64, copy=False)[:4, :4] + t = numpy.trace(M) + if t > M[3, 3]: + q[3] = t + q[2] = M[1, 0] - M[0, 1] + q[1] = M[0, 2] - M[2, 0] + q[0] = M[2, 1] - M[1, 2] + else: + i, j, k = 0, 1, 2 + if M[1, 1] > M[0, 0]: + i, j, k = 1, 2, 0 + if M[2, 2] > M[i, i]: + i, j, k = 2, 0, 1 + t = M[i, i] - (M[j, j] + M[k, k]) + M[3, 3] + q[i] = t + q[j] = M[i, j] + M[j, i] + q[k] = M[k, i] + M[i, k] + q[3] = M[k, j] - M[j, k] + q *= 0.5 / math.sqrt(t * M[3, 3]) + return q + + +def quaternion_multiply(quaternion1, quaternion0): + """Return multiplication of two quaternions. + + >>> q = quaternion_multiply([1, -2, 3, 4], [-5, 6, 7, 8]) + >>> numpy.allclose(q, [-44, -14, 48, 28]) + True + + """ + x0, y0, z0, w0 = quaternion0 + x1, y1, z1, w1 = quaternion1 + return numpy.array(( + x1*w0 + y1*z0 - z1*y0 + w1*x0, + -x1*z0 + y1*w0 + z1*x0 + w1*y0, + x1*y0 - y1*x0 + z1*w0 + w1*z0, + -x1*x0 - y1*y0 - z1*z0 + w1*w0), dtype=numpy.float64) + + +def quaternion_conjugate(quaternion): + """Return conjugate of quaternion. + + >>> q0 = random_quaternion() + >>> q1 = quaternion_conjugate(q0) + >>> q1[3] == q0[3] and all(q1[:3] == -q0[:3]) + True + + """ + return numpy.array((-quaternion[0], -quaternion[1], + -quaternion[2], quaternion[3]), dtype=numpy.float64) + + +def quaternion_inverse(quaternion): + """Return inverse of quaternion. + + >>> q0 = random_quaternion() + >>> q1 = quaternion_inverse(q0) + >>> numpy.allclose(quaternion_multiply(q0, q1), [0, 0, 0, 1]) + True + + """ + return quaternion_conjugate(quaternion) / numpy.dot(quaternion, quaternion) + + +def quaternion_slerp(quat0, quat1, fraction, spin=0, shortestpath=True): + """Return spherical linear interpolation between two quaternions. + + >>> q0 = random_quaternion() + >>> q1 = random_quaternion() + >>> q = quaternion_slerp(q0, q1, 0.0) + >>> numpy.allclose(q, q0) + True + >>> q = quaternion_slerp(q0, q1, 1.0, 1) + >>> numpy.allclose(q, q1) + True + >>> q = quaternion_slerp(q0, q1, 0.5) + >>> angle = math.acos(numpy.dot(q0, q)) + >>> numpy.allclose(2.0, math.acos(numpy.dot(q0, q1)) / angle) or \ + numpy.allclose(2.0, math.acos(-numpy.dot(q0, q1)) / angle) + True + + """ + q0 = unit_vector(quat0[:4]) + q1 = unit_vector(quat1[:4]) + if fraction == 0.0: + return q0 + elif fraction == 1.0: + return q1 + d = numpy.dot(q0, q1) + if abs(abs(d) - 1.0) < _EPS: + return q0 + if shortestpath and d < 0.0: + # invert rotation + d = -d + q1 *= -1.0 + angle = math.acos(d) + spin * math.pi + if abs(angle) < _EPS: + return q0 + isin = 1.0 / math.sin(angle) + q0 *= math.sin((1.0 - fraction) * angle) * isin + q1 *= math.sin(fraction * angle) * isin + q0 += q1 + return q0 + + +def random_quaternion(rand=None): + """Return uniform random unit quaternion. + + rand: array like or None + Three independent random variables that are uniformly distributed + between 0 and 1. + + >>> q = random_quaternion() + >>> numpy.allclose(1.0, vector_norm(q)) + True + >>> q = random_quaternion(numpy.random.random(3)) + >>> q.shape + (4,) + + """ + if rand is None: + rand = numpy.random.rand(3) + else: + assert len(rand) == 3 + r1 = numpy.sqrt(1.0 - rand[0]) + r2 = numpy.sqrt(rand[0]) + pi2 = math.pi * 2.0 + t1 = pi2 * rand[1] + t2 = pi2 * rand[2] + return numpy.array((numpy.sin(t1)*r1, + numpy.cos(t1)*r1, + numpy.sin(t2)*r2, + numpy.cos(t2)*r2), dtype=numpy.float64) + + +def random_rotation_matrix(rand=None): + """Return uniform random rotation matrix. + + rnd: array like + Three independent random variables that are uniformly distributed + between 0 and 1 for each returned quaternion. + + >>> R = random_rotation_matrix() + >>> numpy.allclose(numpy.dot(R.T, R), numpy.identity(4)) + True + + """ + return quaternion_matrix(random_quaternion(rand)) + + +class Arcball(object): + """Virtual Trackball Control. + + >>> ball = Arcball() + >>> ball = Arcball(initial=numpy.identity(4)) + >>> ball.place([320, 320], 320) + >>> ball.down([500, 250]) + >>> ball.drag([475, 275]) + >>> R = ball.matrix() + >>> numpy.allclose(numpy.sum(R), 3.90583455) + True + >>> ball = Arcball(initial=[0, 0, 0, 1]) + >>> ball.place([320, 320], 320) + >>> ball.setaxes([1,1,0], [-1, 1, 0]) + >>> ball.setconstrain(True) + >>> ball.down([400, 200]) + >>> ball.drag([200, 400]) + >>> R = ball.matrix() + >>> numpy.allclose(numpy.sum(R), 0.2055924) + True + >>> ball.next() + + """ + + def __init__(self, initial=None): + """Initialize virtual trackball control. + + initial : quaternion or rotation matrix + + """ + self._axis = None + self._axes = None + self._radius = 1.0 + self._center = [0.0, 0.0] + self._vdown = numpy.array([0, 0, 1], dtype=numpy.float64) + self._constrain = False + + if initial is None: + self._qdown = numpy.array([0, 0, 0, 1], dtype=numpy.float64) + else: + initial = numpy.array(initial, dtype=numpy.float64) + if initial.shape == (4, 4): + self._qdown = quaternion_from_matrix(initial) + elif initial.shape == (4, ): + initial /= vector_norm(initial) + self._qdown = initial + else: + raise ValueError("initial not a quaternion or matrix.") + + self._qnow = self._qpre = self._qdown + + def place(self, center, radius): + """Place Arcball, e.g. when window size changes. + + center : sequence[2] + Window coordinates of trackball center. + radius : float + Radius of trackball in window coordinates. + + """ + self._radius = float(radius) + self._center[0] = center[0] + self._center[1] = center[1] + + def setaxes(self, *axes): + """Set axes to constrain rotations.""" + if axes is None: + self._axes = None + else: + self._axes = [unit_vector(axis) for axis in axes] + + def setconstrain(self, constrain): + """Set state of constrain to axis mode.""" + self._constrain = constrain == True + + def getconstrain(self): + """Return state of constrain to axis mode.""" + return self._constrain + + def down(self, point): + """Set initial cursor window coordinates and pick constrain-axis.""" + self._vdown = arcball_map_to_sphere(point, self._center, self._radius) + self._qdown = self._qpre = self._qnow + + if self._constrain and self._axes is not None: + self._axis = arcball_nearest_axis(self._vdown, self._axes) + self._vdown = arcball_constrain_to_axis(self._vdown, self._axis) + else: + self._axis = None + + def drag(self, point): + """Update current cursor window coordinates.""" + vnow = arcball_map_to_sphere(point, self._center, self._radius) + + if self._axis is not None: + vnow = arcball_constrain_to_axis(vnow, self._axis) + + self._qpre = self._qnow + + t = numpy.cross(self._vdown, vnow) + if numpy.dot(t, t) < _EPS: + self._qnow = self._qdown + else: + q = [t[0], t[1], t[2], numpy.dot(self._vdown, vnow)] + self._qnow = quaternion_multiply(q, self._qdown) + + def next(self, acceleration=0.0): + """Continue rotation in direction of last drag.""" + q = quaternion_slerp(self._qpre, self._qnow, 2.0+acceleration, False) + self._qpre, self._qnow = self._qnow, q + + def matrix(self): + """Return homogeneous rotation matrix.""" + return quaternion_matrix(self._qnow) + + +def arcball_map_to_sphere(point, center, radius): + """Return unit sphere coordinates from window coordinates.""" + v = numpy.array(((point[0] - center[0]) / radius, + (center[1] - point[1]) / radius, + 0.0), dtype=numpy.float64) + n = v[0]*v[0] + v[1]*v[1] + if n > 1.0: + v /= math.sqrt(n) # position outside of sphere + else: + v[2] = math.sqrt(1.0 - n) + return v + + +def arcball_constrain_to_axis(point, axis): + """Return sphere point perpendicular to axis.""" + v = numpy.array(point, dtype=numpy.float64, copy=True) + a = numpy.array(axis, dtype=numpy.float64, copy=True) + v -= a * numpy.dot(a, v) # on plane + n = vector_norm(v) + if n > _EPS: + if v[2] < 0.0: + v *= -1.0 + v /= n + return v + if a[2] == 1.0: + return numpy.array([1, 0, 0], dtype=numpy.float64) + return unit_vector([-a[1], a[0], 0]) + + +def arcball_nearest_axis(point, axes): + """Return axis, which arc is nearest to point.""" + point = numpy.array(point, dtype=numpy.float64, copy=False) + nearest = None + mx = -1.0 + for axis in axes: + t = numpy.dot(arcball_constrain_to_axis(point, axis), point) + if t > mx: + nearest = axis + mx = t + return nearest + + +# epsilon for testing whether a number is close to zero +_EPS = numpy.finfo(float).eps * 4.0 + +# axis sequences for Euler angles +_NEXT_AXIS = [1, 2, 0, 1] + +# map axes strings to/from tuples of inner axis, parity, repetition, frame +_AXES2TUPLE = { + 'sxyz': (0, 0, 0, 0), 'sxyx': (0, 0, 1, 0), 'sxzy': (0, 1, 0, 0), + 'sxzx': (0, 1, 1, 0), 'syzx': (1, 0, 0, 0), 'syzy': (1, 0, 1, 0), + 'syxz': (1, 1, 0, 0), 'syxy': (1, 1, 1, 0), 'szxy': (2, 0, 0, 0), + 'szxz': (2, 0, 1, 0), 'szyx': (2, 1, 0, 0), 'szyz': (2, 1, 1, 0), + 'rzyx': (0, 0, 0, 1), 'rxyx': (0, 0, 1, 1), 'ryzx': (0, 1, 0, 1), + 'rxzx': (0, 1, 1, 1), 'rxzy': (1, 0, 0, 1), 'ryzy': (1, 0, 1, 1), + 'rzxy': (1, 1, 0, 1), 'ryxy': (1, 1, 1, 1), 'ryxz': (2, 0, 0, 1), + 'rzxz': (2, 0, 1, 1), 'rxyz': (2, 1, 0, 1), 'rzyz': (2, 1, 1, 1)} + +_TUPLE2AXES = dict((v, k) for k, v in _AXES2TUPLE.items()) + +# helper functions + +def vector_norm(data, axis=None, out=None): + """Return length, i.e. eucledian norm, of ndarray along axis. + + >>> v = numpy.random.random(3) + >>> n = vector_norm(v) + >>> numpy.allclose(n, numpy.linalg.norm(v)) + True + >>> v = numpy.random.rand(6, 5, 3) + >>> n = vector_norm(v, axis=-1) + >>> numpy.allclose(n, numpy.sqrt(numpy.sum(v*v, axis=2))) + True + >>> n = vector_norm(v, axis=1) + >>> numpy.allclose(n, numpy.sqrt(numpy.sum(v*v, axis=1))) + True + >>> v = numpy.random.rand(5, 4, 3) + >>> n = numpy.empty((5, 3), dtype=numpy.float64) + >>> vector_norm(v, axis=1, out=n) + >>> numpy.allclose(n, numpy.sqrt(numpy.sum(v*v, axis=1))) + True + >>> vector_norm([]) + 0.0 + >>> vector_norm([1.0]) + 1.0 + + """ + data = numpy.array(data, dtype=numpy.float64, copy=True) + if out is None: + if data.ndim == 1: + return math.sqrt(numpy.dot(data, data)) + data *= data + out = numpy.atleast_1d(numpy.sum(data, axis=axis)) + numpy.sqrt(out, out) + return out + else: + data *= data + numpy.sum(data, axis=axis, out=out) + numpy.sqrt(out, out) + + +def unit_vector(data, axis=None, out=None): + """Return ndarray normalized by length, i.e. eucledian norm, along axis. + + >>> v0 = numpy.random.random(3) + >>> v1 = unit_vector(v0) + >>> numpy.allclose(v1, v0 / numpy.linalg.norm(v0)) + True + >>> v0 = numpy.random.rand(5, 4, 3) + >>> v1 = unit_vector(v0, axis=-1) + >>> v2 = v0 / numpy.expand_dims(numpy.sqrt(numpy.sum(v0*v0, axis=2)), 2) + >>> numpy.allclose(v1, v2) + True + >>> v1 = unit_vector(v0, axis=1) + >>> v2 = v0 / numpy.expand_dims(numpy.sqrt(numpy.sum(v0*v0, axis=1)), 1) + >>> numpy.allclose(v1, v2) + True + >>> v1 = numpy.empty((5, 4, 3), dtype=numpy.float64) + >>> unit_vector(v0, axis=1, out=v1) + >>> numpy.allclose(v1, v2) + True + >>> list(unit_vector([])) + [] + >>> list(unit_vector([1.0])) + [1.0] + + """ + if out is None: + data = numpy.array(data, dtype=numpy.float64, copy=True) + if data.ndim == 1: + data /= math.sqrt(numpy.dot(data, data)) + return data + else: + if out is not data: + out[:] = numpy.array(data, copy=False) + data = out + length = numpy.atleast_1d(numpy.sum(data*data, axis)) + numpy.sqrt(length, length) + if axis is not None: + length = numpy.expand_dims(length, axis) + data /= length + if out is None: + return data + + +def random_vector(size): + """Return array of random doubles in the half-open interval [0.0, 1.0). + + >>> v = random_vector(10000) + >>> numpy.all(v >= 0.0) and numpy.all(v < 1.0) + True + >>> v0 = random_vector(10) + >>> v1 = random_vector(10) + >>> numpy.any(v0 == v1) + False + + """ + return numpy.random.random(size) + + +def inverse_matrix(matrix): + """Return inverse of square transformation matrix. + + >>> M0 = random_rotation_matrix() + >>> M1 = inverse_matrix(M0.T) + >>> numpy.allclose(M1, numpy.linalg.inv(M0.T)) + True + >>> for size in range(1, 7): + ... M0 = numpy.random.rand(size, size) + ... M1 = inverse_matrix(M0) + ... if not numpy.allclose(M1, numpy.linalg.inv(M0)): print size + + """ + return numpy.linalg.inv(matrix) + + +def concatenate_matrices(*matrices): + """Return concatenation of series of transformation matrices. + + >>> M = numpy.random.rand(16).reshape((4, 4)) - 0.5 + >>> numpy.allclose(M, concatenate_matrices(M)) + True + >>> numpy.allclose(numpy.dot(M, M.T), concatenate_matrices(M, M.T)) + True + + """ + M = numpy.identity(4) + for i in matrices: + M = numpy.dot(M, i) + return M + + +def is_same_transform(matrix0, matrix1): + """Return True if two matrices perform same transformation. + + >>> is_same_transform(numpy.identity(4), numpy.identity(4)) + True + >>> is_same_transform(numpy.identity(4), random_rotation_matrix()) + False + + """ + matrix0 = numpy.array(matrix0, dtype=numpy.float64, copy=True) + matrix0 /= matrix0[3, 3] + matrix1 = numpy.array(matrix1, dtype=numpy.float64, copy=True) + matrix1 /= matrix1[3, 3] + return numpy.allclose(matrix0, matrix1) + + +def _import_module(module_name, warn=True, prefix='_py_', ignore='_'): + """Try import all public attributes from module into global namespace. + + Existing attributes with name clashes are renamed with prefix. + Attributes starting with underscore are ignored by default. + + Return True on successful import. + + """ + try: + module = __import__(module_name) + except ImportError: + if warn: + warnings.warn("Failed to import module " + module_name) + else: + for attr in dir(module): + if ignore and attr.startswith(ignore): + continue + if prefix: + if attr in globals(): + globals()[prefix + attr] = globals()[attr] + elif warn: + warnings.warn("No Python implementation of " + attr) + globals()[attr] = getattr(module, attr) + return True diff --git a/src/mesh/assimp-master/port/PyAssimp/setup.py b/src/mesh/assimp-master/port/PyAssimp/setup.py new file mode 100644 index 0000000..a3497d6 --- /dev/null +++ b/src/mesh/assimp-master/port/PyAssimp/setup.py @@ -0,0 +1,26 @@ + #!/usr/bin/env python + # -*- coding: utf-8 -*- +import os +from distutils.core import setup + +def readme(): + with open('README.rst') as f: + return f.read() + +setup(name='pyassimp', + version='4.1.4', + license='ISC', + description='Python bindings for the Open Asset Import Library (ASSIMP)', + long_description=readme(), + url='https://github.com/assimp/assimp', + author='ASSIMP developers', + author_email='assimp-discussions@lists.sourceforge.net', + maintainer='Séverin Lemaignan', + maintainer_email='severin@guakamole.org', + packages=['pyassimp'], + data_files=[ + ('share/pyassimp', ['README.rst']), + ('share/examples/pyassimp', ['scripts/' + f for f in os.listdir('scripts/')]) + ], + requires=['numpy'] + ) |